repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Journal_repo/mainWindSampling.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: Claudio Battiloro """ #import warnings #warnings.filterwarnings("ignore") to suppress warnings import sys import pytorch_lightning as pl from pytorch_lightning.callbacks.early_stopping import EarlyStopping import torch from architecture import TNN,MNN from data_util import WindSampling device = torch.device("cuda" if torch.cuda.is_available() else torch.device("cpu")) import numpy as np from utils import get_laplacians, project_data, topk from tensorboard import program import webbrowser import numpy.ma as ma import pickle as pkl # Set Seeds np.random.seed(0) pl.seed_everything(0) # Custom activation function: Identity activation class linear_act(torch.nn.Module): def __init__(self): super(linear_act, self).__init__() def forward(self, x): return x # Open Tensorboard open_tb = 0 # Select Architecture tnn_or_mnn = sys.argv[1] #%% Data Importing with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/data/windfields/data2016.pkl', 'rb') as file: data_all = pkl.load(file) # In the sampling and reconstrunction experiment we take a single day data_all = data_all[0,:,:] # Normalize the coordinates by the nominal earth radius to avoid numerical instability R = 6356.8 data_all[:,:3] = data_all[:,:3]/R # Scale the data to facilitate training data_all[:,3:] = (data_all[:,3:])/(np.max(data_all[:,3:])-np.min(data_all[:,3:]))#-np.min(data_all[:,3:]) n_max = data_all.shape[0] p = 3 # Ambient Space Dimension d = 2 # Manifold Dimension # MonteCarlo Simulation Parameters outer_num_rel = 8 inner_num_rel = 8 num_avg_samples_coll = [100, 150, 200, 300, 400] # 1st Sampling: to reduce the initial dimensionality -> let us assume that the complete dataset is the complete manifold avg_sample_pctg_coll = [.5, .7, .9] # 2nd Sampling: the actual mask # Architecture Parameters in_features = int((data_all.shape[1]-p)/d) if tnn_or_mnn == 'tnn' or tnn_or_mnn == 'ftnn' else data_all.shape[1]-p features = [8,4,1] # The last number is the output features. The lenght is the number of layers if tnn_or_mnn == "mnn" or tnn_or_mnn == "fmnn": features[-1] = features[-1]*d dense = [] lr = 4e-4 if tnn_or_mnn == "fmnn" or tnn_or_mnn == "ftnn": readout_sigma =linear_act()# torch.nn.Tanh() sigma = linear_act() else: readout_sigma =linear_act()# torch.nn.Tanh() sigma = torch.nn.Tanh() kappa = [2]*len(features) loss_function = torch.nn.MSELoss(reduction = 'sum')#reduction = 'mean' weight_decay = 1e-3 max_epochs = 500 opt_step_per_epoch = 100 # Total optimization steps = step_per_epoch*max_epochs, division useful for loggin # Logging Parameters string = "Wind_Reconstruction" # Experiment Name save_dir_ = '/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results' # Saving Directory # Sheaf Laplacian Parameters epsilon_pca = .8#.2#n**(-2/(true_d+1))# n^{-2/(d+1)} gamma = .8 epsilon = .5 open_tb = 0 # Opens TensorBoard in the default browser tracking_address = '/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string # TB Tracking Folder for num_avg_samples in num_avg_samples_coll: print() print("Testing with average number of points: "+str(num_avg_samples)) print() p_samp = num_avg_samples/n_max for avg_sample_pctg in avg_sample_pctg_coll: print() print("Testing with masking propability: "+str(avg_sample_pctg)) print() min_mse = np.zeros((outer_num_rel,inner_num_rel)) # 1st Sampling for outer_rel in range(outer_num_rel): sampling_set = np.random.binomial(1, p_samp, n_max)>0 data = data_all[sampling_set,-2:] coord = data_all[sampling_set,:3] n = coord.shape[0] print() print("Outer Realization number "+str(outer_rel)+": "+str(n) + " samples!") print() # Build the Sheaf Laplacian if tnn_or_mnn == "tnn" or tnn_or_mnn == "ftnn": Delta_n_numpy, S,W,O_i_collection, d_hat, B_i_collection = get_laplacians(coord,epsilon,epsilon_pca,gamma, tnn_or_mnn) data_proj = project_data(data, O_i_collection) else: Delta_n_numpy = get_laplacians(coord,epsilon,epsilon_pca,gamma, tnn_or_mnn) data_proj = data # Laplacian Replicating for each layer Delta_n = len(features)*[torch.from_numpy(Delta_n_numpy)] # Net Prameters hparams ={'in_features': in_features,\ 'L': Delta_n,\ 'features': features,\ 'lr': lr,\ 'weight_decay': weight_decay,\ 'sigma': sigma,\ 'readout_sigma': readout_sigma,\ 'kappa': kappa,\ 'n': n,\ 'loss_function': loss_function,\ 'device': device} for inner_rel in range(inner_num_rel): # 2nd Sampling bern = np.random.binomial(1, avg_sample_pctg, n) if tnn_or_mnn == "tnn" or tnn_or_mnn == "ftnn": mask = np.kron(np.ones((1,d)),np.expand_dims(bern,1)).flatten()>0 else: mask = bern > 0 val_mask = mask==0 print() print("Inner Realization number "+str(inner_rel)+": "+str(sum(mask)) + " masked points!") print() # Data and Net Instantiating data_torch = WindSampling(data_proj,mask,opt_step_per_epoch,device) data_torch_val = WindSampling(data_proj,val_mask,1,device) if tnn_or_mnn == "tnn" or tnn_or_mnn == "ftnn": net = TNN(**hparams).to(device) else: net = MNN(**hparams).to(device) train_loader = \ torch.utils.data.DataLoader( data_torch, batch_size=None, batch_sampler=None, shuffle=True, num_workers=0) val_loader =\ torch.utils.data.DataLoader( data_torch_val, batch_size=None, batch_sampler=None, shuffle=False, num_workers=0) logger = pl.loggers.TensorBoardLogger(name=string, save_dir=save_dir_) early_stop_callback = EarlyStopping(monitor="test_mse", min_delta=1e-6, patience=5, verbose=False, mode="min") trainer = pl.Trainer(max_epochs=max_epochs,logger = logger, log_every_n_steps= 1, accelerator='gpu', devices=1, auto_select_gpus=False, callbacks=[early_stop_callback]) trainer.fit(net, train_loader, val_loader) min_mse[outer_rel,inner_rel] = net.min_mse_val min_mse = min_mse[~np.isnan(min_mse).any(axis=1), :] # Removes eventual corrupted runs (divergent, outliers, etc...) to_delete = topk(min_mse,2) # Removes the worst 2 (redundant in case of divergent but not NaN runs with results_aggregator.py) mask = np.logical_or(min_mse == to_delete[0], min_mse == to_delete[1]) min_mse = ma.masked_array(min_mse, mask = mask) try: with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'rb') as file: mse_dic = pkl.load(file) print("Results file already exisisting... Updating!") try: tmp = mse_dic["avg_points"+str(num_avg_samples)] tmp["avg_mask"+str(avg_sample_pctg)] = {"avg_mse":min_mse.mean(),"std_mse": min_mse.std(), "complete_coll": min_mse} mse_dic["avg_points"+str(num_avg_samples)] = tmp except: mse_dic["avg_points"+str(num_avg_samples)] = {"avg_mask"+str(avg_sample_pctg):{"avg_mse":min_mse.mean(),"std_mse":min_mse.std(), "complete_coll": min_mse}} with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'wb') as file: pkl.dump(mse_dic, file) print("Updated!") except: print("Results file not found... Creating!") mse_dic = {"avg_points"+str(num_avg_samples):{"avg_mask"+str(avg_sample_pctg):{"avg_mse":min_mse.mean(),"std_mse":min_mse.std(), "complete_coll": min_mse}}} with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'wb') as file: pkl.dump(mse_dic, file) print(mse_dic) # Tensor Board Monitoring if open_tb: tb = program.TensorBoard() tb.configure(argv=[None, '--logdir', tracking_address]) url = tb.launch() print(f"Tensorflow listening on {url}") webbrowser.open_new(url) input("Press Enter to Exit") """ print("Minimum TEST MSE: "+ str(net.min_mse_val)) print("Misc. Metrics:") print(trainer.callback_metrics) """
9,380
46.619289
171
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Journal_repo/data_util.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: Clabat """ import torch import pickle import pandas as pd import numpy as np from collections import defaultdict import torch.nn.functional as F class WindSampling(torch.utils.data.Dataset): def __init__(self, data_proj_numpy, mask, step_per_epoch, device): self.device = device torch_data = torch.from_numpy(data_proj_numpy).to(self.device) self.X = torch.clone(torch_data) self.X[mask,:] = torch.mean(self.X[mask == 0,:]) self.y = torch.clone(torch_data) self.length = data_proj_numpy.shape[0] self.mask = torch.from_numpy(mask).to(self.device) self.step_per_epoch = step_per_epoch def __getitem__(self, index): return self.X, self.y, self.mask def __len__(self): # Returns length return self.step_per_epoch class TorusDenoising(torch.utils.data.Dataset): def __init__(self, data_clean,data_noisy, step_per_epoch, device): self.device = device torch_data_clean = torch.from_numpy(data_clean).to(self.device) torch_data_noisy = torch.from_numpy(data_noisy).to(self.device) self.X = torch.clone(torch_data_noisy) self.y = torch.clone(torch_data_clean) self.step_per_epoch = step_per_epoch def __getitem__(self, index): return self.X, self.y def __len__(self): # Returns length return self.step_per_epoch class WindPrediction(torch.utils.data.Dataset): def __init__(self, data_proj_numpy, time_window, device): self.device = device self.X = torch.from_numpy(data_proj_numpy).to(self.device) self.time_window = time_window def __getitem__(self, idx): x = self.X[idx:(idx+self.time_window),:,:] y = self.X[(idx+self.time_window):(idx+2*self.time_window),:,:] return x,y def __len__(self): return self.X.shape[0]-2*self.time_window
1,956
30.063492
71
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Journal_repo/layers.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: Claudio Battiloro """ import torch import torch.nn as nn # Graph Convolutional Neural Network Layer class GNNLayer(nn.Module): def __init__(self, F_in, F_out, L, kappa,device, sigma): """ Parameters ---------- F_in: Numer of input signals F_out: Numer of outpu signals L: Shift Operator kappa: Filters order device: Device sigma: non-linearity """ super(GNNLayer, self).__init__() self.K = kappa self.F_in = F_in self.F_out = F_out self.sigma = sigma self.L = L if self.L.type() == 'torch.cuda.DoubleTensor': self.W = nn.Parameter(torch.empty(size=(self.K, F_in, F_out)).to(device).double()) self.b = nn.Parameter(torch.empty(size=(1, 1)).to(device).double()) else: self.W = nn.Parameter(torch.empty(size=(self.K, F_in, F_out)).to(device)) self.b = nn.Parameter(torch.empty(size=(1, 1)).to(device)) self.reset_parameters() self.device = device def reset_parameters(self): """Reinitialize learnable parameters""" gain = nn.init.calculate_gain('relu') nn.init.xavier_uniform_(self.W.data, gain=gain) nn.init.xavier_uniform_(self.b.data, gain=gain) def forward(self, x): alpha_zero = torch.clone(self.L) data = torch.clone(x) alpha_k = torch.clone(alpha_zero) try: z_i = alpha_k @ torch.clone(data @ self.W[0]) except: alpha_k = alpha_k.to(data.device) z_i = alpha_k @ torch.clone(data @ self.W[0]) for k in range(1, self.K): alpha_k = alpha_k @ alpha_zero z_i += alpha_k @ data @ self.W[k] out = self.sigma(z_i) return out # Graph Convolutional Neural Network Layer class RGNNLayer(nn.Module): def __init__(self, F_in, F_out, L, kappa,device, sigma, time_window): """ Parameters ---------- F_in: Numer of input signals F_out: Numer of outpu signals L: Shift Operator kappa: Filters order device: Device sigma: non-linearity time_window: Prediction time window """ super(RGNNLayer, self).__init__() self.K = kappa self.F_in = F_in self.F_out = F_out self.sigma = sigma self.time_window = time_window self.L = L if self.L.type() == 'torch.cuda.DoubleTensor': self.W = nn.Parameter(torch.empty(size=(self.K, F_in, F_out)).to(device).double()) self.H = nn.Parameter(torch.empty(size=(self.K, F_in, F_out)).to(device).double()) self.b = nn.Parameter(torch.empty(size=(1, 1)).to(device).double()) else: self.W = nn.Parameter(torch.empty(size=(self.K, F_in, F_out)).to(device)) self.H = nn.Parameter(torch.empty(size=(self.K, F_in, F_out)).to(device)) self.b = nn.Parameter(torch.empty(size=(1, 1)).to(device)) self.reset_parameters() self.device = device def reset_parameters(self): """Reinitialize learnable parameters.""" gain = nn.init.calculate_gain('relu') nn.init.xavier_uniform_(self.W.data, gain=gain) nn.init.xavier_uniform_(self.H.data, gain=gain) nn.init.xavier_uniform_(self.b.data, gain=gain) def forward(self, x): # x is batch_sizeXhow_many_time_slotsXnumber_of_nodesXnumber_of_features alpha_zero = torch.clone(self.L) data = torch.clone(x).to(self.device).double() out = torch.zeros(data.shape) for data_point in range(data.shape[0]): # Batch Loop: inefficient, can be improved with PyTorch Geometric hidden_state = torch.zeros(data.shape[2:]) for t in range(self.time_window): # Time Loop alpha_k = torch.clone(alpha_zero) hidden_state = hidden_state.to(self.device).double() try: z_i = alpha_k @ torch.clone(data[data_point,t,:,:] @ self.W[0]) + alpha_k @ torch.clone(hidden_state @ self.H[0]) except: alpha_k = alpha_k.to(data.device) z_i = alpha_k @ torch.clone(data[data_point,t,:,:] @ self.W[0]) + alpha_k @ torch.clone(hidden_state @ self.H[0]) for k in range(1, self.K): alpha_k = alpha_k @ alpha_zero z_i += alpha_k @ data[data_point,t,:,:] @ self.W[k] + alpha_k @ torch.clone(hidden_state @ self.H[k]) hidden_state = self.sigma(z_i) out[data_point,t,:,:] = hidden_state return out
4,752
37.959016
135
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Journal_repo/mainTorusDenoising.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: Claudio Battiloro """ #import warnings #warnings.filterwarnings("ignore") to suppress warnings import sys import pickle as pkl import numpy.ma as ma import pytorch_lightning as pl from pytorch_lightning.callbacks.early_stopping import EarlyStopping import torch from architecture import TNN,MNN from data_util import TorusDenoising device = torch.device("cuda" if torch.cuda.is_available() else torch.device("cpu")) import numpy as np from utils import get_laplacians, project_data, topk from tensorboard import program import webbrowser # Set Seeds np.random.seed(0) pl.seed_everything(0) # Custom activation function: Identity activation class linear_act(torch.nn.Module): def __init__(self): super(linear_act, self).__init__() def forward(self, x): return x # Open Tensorboard open_tb = 0 # Select Architecture tnn_or_mnn = sys.argv[1] open_tb = 0 tnn_or_mnn = sys.argv[1] #%% Synthetic Data Generation () res = 100 # The torus will be sampled on a regular grid of res^2 points p = 3 d = 2 # Torus sampling phi = np.linspace(0, 2*np.pi, res) theta = np.linspace(0, 2*np.pi, res) phi, theta = np.meshgrid(phi, theta) phi = phi.flatten() theta = theta.flatten() r = .1 b = .3 x = np.expand_dims((r*np.cos(theta)+b)*np.cos(phi),1) y = np.expand_dims((r*np.cos(theta)+b)*np.sin(phi),1) z = np.expand_dims((r*np.sin(theta)),1) coord_max = np.concatenate((x,y,z),1) # Smooth Tangent vector field on Torus X = np.expand_dims(-np.sin(theta),1) Y = np.expand_dims(np.cos(theta),1) Z = np.expand_dims(np.zeros(len(theta)),1) data_all = np.concatenate((X,Y,Z),1) n_max = data_all.shape[0] # MonteCarlo Simulation Parameters outer_num_rel = 8 inner_num_rel = 8 num_avg_samples_coll = [400]#[100, 200,150,300,450] # 1st Sampling: to reduce the initial dimensionality -> let us assume that the complete dataset is the complete manifold noise_sds_coll = [7e-2, 1e-1, 3e-1] # 2nd Sampling: the actual mask # Architecture Parameters in_features = int(data_all.shape[1]/d) if tnn_or_mnn == 'tnn' or tnn_or_mnn == 'ftnn' else data_all.shape[1] features = [8,4,1] if tnn_or_mnn == "mnn" or tnn_or_mnn == "fmnn": features[-1] = features[-1]*p dense = [] lr = 1e-3 if tnn_or_mnn == "fmnn" or tnn_or_mnn == "ftnn": readout_sigma = linear_act() sigma = linear_act()#torch.nn.ReLU() else: readout_sigma = linear_act() sigma = torch.nn.Tanh()#torch.nn.ReLU() kappa = [2]*len(features) loss_function = torch.nn.MSELoss(reduction = 'sum') weight_decay = 0.0 step_per_epoch = 100 max_epochs = 500 # Logging Parameters string = "Torus_Denoising" # Experiment Name save_dir_ = '/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results' # Saving Directory # Sheaf Laplacian Parameters epsilon_pca = .8#.2#n**(-2/(true_d+1))# n^{-2/(d+1)} epsilon = .5 gamma = .8 open_tb = 0 # Opens TensorBoard in the default browser tracking_address = '/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string # TB Tracking Folder for num_avg_samples in num_avg_samples_coll: print() print("Testing with average number of points: "+str(num_avg_samples)) print() p_samp = num_avg_samples/n_max for noise_sd in noise_sds_coll: print() print("Testing with noise variance: "+str(noise_sd)) print() min_mse = np.zeros((outer_num_rel,inner_num_rel)) # 1st Sampling (to reduce the initial dimensionality and ensure random sampling -> let us assume that the complete dataset is the complete manifold) for outer_rel in range(outer_num_rel): sampling_set = np.random.binomial(1, p_samp, n_max)>0 data = data_all[sampling_set,:] coord = coord_max[sampling_set,:] n = coord.shape[0] print() print("Outer Realization number "+str(outer_rel)+": "+str(n) + " samples!") print() for inner_rel in range(inner_num_rel): print() print("Inner Realization number "+str(inner_rel)) print() # Adding Noise to Data data_noisy = data + np.random.normal(0.0, noise_sd, size=data.shape) #np.random.normal(0.0, sigma_noise, size=(1,))* # Build the Sheaf Laplacian if tnn_or_mnn == "tnn" or tnn_or_mnn == "ftnn": Delta_n_numpy, S,W,O_i_collection, _, B_i_collection = get_laplacians(coord,epsilon,epsilon_pca,gamma, tnn_or_mnn) data_proj = project_data(data, O_i_collection) data_proj_noisy = project_data(data_noisy, O_i_collection) else: Delta_n_numpy = get_laplacians(coord,epsilon,epsilon_pca,gamma, tnn_or_mnn) data_proj = data data_proj_noisy = data_noisy # Laplacian Replicating for each layer Delta_n = len(features)*[torch.from_numpy(Delta_n_numpy)] # Net Parameters hparams ={'in_features': in_features,\ 'L': Delta_n,\ 'features': features,\ 'lr': lr,\ 'weight_decay': weight_decay,\ 'sigma': sigma,\ 'readout_sigma': readout_sigma,\ 'kappa': kappa,\ 'n': n,\ 'loss_function': loss_function,\ 'device': device} # Data and Net Instantiating data_torch = TorusDenoising(data_proj,data_proj_noisy, step_per_epoch, device) if tnn_or_mnn == "tnn" or tnn_or_mnn == "ftnn": net = TNN(**hparams).to(device) else: net = MNN(**hparams).to(device) train_loader = \ torch.utils.data.DataLoader( data_torch, batch_size=None, batch_sampler=None, shuffle=True, num_workers=0) logger = pl.loggers.TensorBoardLogger(name=string, save_dir='/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results') early_stop_callback = EarlyStopping(monitor="train_mse", min_delta=1e-6, patience=5, verbose=False, mode="min") trainer = pl.Trainer(max_epochs=max_epochs,logger = logger, log_every_n_steps= 1, accelerator='gpu', devices=1, auto_select_gpus=False, callbacks=[early_stop_callback]) trainer.fit(net, train_loader) min_mse[outer_rel,inner_rel] = net.min_mse_train min_mse = min_mse[~np.isnan(min_mse).any(axis=1), :] # Removes eventual corrupted runs (divergent, outliers, etc...) to_delete = topk(min_mse,2) # Removes the worst 2 (redundant in case of divergent but not NaN runs with results_aggregator.py) mask = np.logical_or(min_mse == to_delete[0], min_mse == to_delete[1]) min_mse = ma.masked_array(min_mse, mask = mask) try: with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'rb') as file: mse_dic = pkl.load(file) print("Results file already exisisting... Updating!") try: tmp = mse_dic["avg_points"+str(num_avg_samples)] tmp["noise_sd"+str(noise_sd)] = {"avg_mse":min_mse.mean(),"std_mse": min_mse.std(), "complete_coll": min_mse} mse_dic["avg_points"+str(num_avg_samples)] = tmp except: mse_dic["avg_points"+str(num_avg_samples)] = {"noise_sd"+str(noise_sd):{"avg_mse":min_mse.mean(),"std_mse":min_mse.std(), "complete_coll": min_mse}} with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'wb') as file: pkl.dump(mse_dic, file) print("Updated!") except: print("Results file not found... Creating!") mse_dic = {"avg_points"+str(num_avg_samples):{"noise_sd"+str(noise_sd):{"avg_mse":min_mse.mean(),"std_mse":min_mse.std(), "complete_coll": min_mse}}} with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'wb') as file: pkl.dump(mse_dic, file) print(mse_dic) # Tensor Board Monitoring if open_tb: tracking_address = '/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string tb = program.TensorBoard() tb.configure(argv=[None, '--logdir', tracking_address]) url = tb.launch() print(f"Tensorflow listening on {url}") webbrowser.open_new(url) input("Press Enter to Exit")
9,188
45.64467
172
py
Tangent-Bundle-Neural-Networks
Tangent-Bundle-Neural-Networks-main/Journal_repo/mainWindPrediction.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: Claudio Battiloro """ import warnings #warnings.filterwarnings("ignore") to suppress warnings import sys import pytorch_lightning as pl from pytorch_lightning.callbacks.early_stopping import EarlyStopping import torch from architecture import RTNN, RMNN device = torch.device("cuda" if torch.cuda.is_available() else torch.device("cpu")) import numpy as np from utils import get_laplacians, project_data, topk from data_util import WindPrediction from tensorboard import program import webbrowser import numpy.ma as ma import pickle as pkl # Set Seeds np.random.seed(0) pl.seed_everything(0) # Custom activation function: Identity activation class linear_act(torch.nn.Module): def __init__(self): super(linear_act, self).__init__() def forward(self, x): return x # Open Tensorboard open_tb = 0 # Select Architecture tnn_or_mnn = sys.argv[1] #%% Data Importing # Train with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/Journal_repo/data/windfields/data2016.pkl', 'rb') as file: data_all = pkl.load(file) #Test with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/Journal_repo/data/windfieldsdata2017.pkl', 'rb') as file: data_all_test = pkl.load(file) # Crop the data (the whole year will be slow) how_many_days = 250 #250 ok data_all = data_all[:how_many_days,:,:] data_all_test = data_all_test[:how_many_days,:,:] # Normalize the coordinates by the nominal earth radius to avoid numerical instability and R = 6356.8 data_all[:,:,:3] = data_all[:,:,:3]/R data_all_test[:,:,:3] = data_all_test[:,:,:3]/R # Scale the data for numerical stability data_all[:,:,3:] = data_all[:,:,3:]/(np.max(data_all[:,:,3:])-np.min(data_all[:,:,3:])) #-np.min(data_all[:,:,3:])) data_all_test[:,:,3:] = (data_all_test[:,:,3:]-np.min(data_all_test[:,:,3:]))/(np.max(data_all_test[:,:,3:])-np.min(data_all_test[:,:,3:])) n_max = data_all.shape[1] p = 3 # Ambient Space Dimension d = 2 # Manifold Dimension # MonteCarlo Simulation Parameters outer_num_rel = 8 num_avg_samples_coll = [100, 200,300, 400] # 1st Sampling: to reduce the initial dimensionality -> let us assume that the complete dataset is the complete manifold time_window_coll = [20,50,80] # 2nd Sampling: the actual mask # Architecture Parameters in_features = int((data_all.shape[2]-p)/d) if tnn_or_mnn == 'tnn' or tnn_or_mnn == 'ftnn' else data_all.shape[2]-p # The last number is the output features. The lenght is the number of layers n_layers = 3 in_features = [in_features]*n_layers dense = [] lr = 1e-3 if tnn_or_mnn == "fmnn" or tnn_or_mnn == "ftnn": sigma = linear_act() else: sigma = torch.nn.Tanh() kappa = [2]*n_layers num_epochs = 70 batch_size_ = 1 loss_function = torch.nn.MSELoss(reduction = 'sum') weight_decay = 1e-3 # Logging Parameters string = "Wind_Prediction" # Experiment Name save_dir_ = '/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results' # Saving Directory # Sheaf Laplacian Parameters epsilon_pca = .8#.2#n**(-2/(true_d+1))# n^{-2/(d+1)} gamma = .8 epsilon = .5 open_tb = 0 # Opens TensorBoard in the default browser tracking_address = '/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string # TB Tracking Folder for num_avg_samples in num_avg_samples_coll: print() print("Testing with average number of points: "+str(num_avg_samples)) print() # 1st Sampling (to reduce the initial dimensionality -> let us assume that the complete dataset is the complete manifold) p_samp = num_avg_samples/n_max for time_window in time_window_coll: print() print("Testing with Time Window: "+str(time_window)) print() min_mse = np.zeros((outer_num_rel,)) # 1st Sampling for outer_rel in range(outer_num_rel): sampling_set = np.random.binomial(1, p_samp, n_max)>0 data = data_all[:,sampling_set,-2:] data_test = data_all_test[:,sampling_set,-2:] coord = data_all[0,sampling_set,:3] n = coord.shape[0] if tnn_or_mnn == "tnn" or tnn_or_mnn == "ftnn": Delta_n_numpy, S,W,O_i_collection, d_hat, B_i_collection = get_laplacians(coord,epsilon,epsilon_pca,gamma, tnn_or_mnn) data_proj = np.array([project_data(data[el,:,:], O_i_collection) for el in range(data.shape[0])]) data_proj_test = np.array([project_data(data_test[el,:,:], O_i_collection) for el in range(data_test.shape[0])]) if tnn_or_mnn == "mnn" or tnn_or_mnn == "fmnn": Delta_n_numpy = get_laplacians(coord,epsilon,epsilon_pca,gamma, tnn_or_mnn) data_proj = data data_proj_test = data_test if tnn_or_mnn == "rnn": Delta_n_numpy = np.eye(n) data_proj = data data_proj_test = data_test # Normalize Laplacians #[lambdas,_] = np.linalg.eigh(Delta_n_numpy) #Delta_n_numpy = Delta_n_numpy/np.max(np.real(lambdas)) Delta_n = len(in_features)*[torch.from_numpy(Delta_n_numpy)] data_torch = WindPrediction(data_proj,time_window,device) data_torch_val = WindPrediction(data_proj_test,time_window,device) hparams ={'in_features': in_features,\ 'L': Delta_n,\ 'lr': lr,\ 'weight_decay': weight_decay,\ 'sigma': sigma,\ 'kappa': kappa,\ 'time_window': time_window,\ 'loss_function': loss_function,\ 'device': device} if tnn_or_mnn == "tnn" or tnn_or_mnn == "ftnn": net = RTNN(**hparams).to(device) else: net = RMNN(**hparams).to(device) train_loader = \ torch.utils.data.DataLoader( data_torch, batch_size=batch_size_, batch_sampler=None, shuffle=True, num_workers=0) val_loader =\ torch.utils.data.DataLoader( data_torch_val, batch_size=how_many_days-2*time_window, batch_sampler=None, shuffle=False, num_workers=0) logger = pl.loggers.TensorBoardLogger(name=string, save_dir=save_dir_) early_stop_callback = EarlyStopping(monitor="test_mse", min_delta=1e-6, patience=5, verbose=False, mode="min") trainer = pl.Trainer(max_epochs=num_epochs,logger = logger, log_every_n_steps= 1, accelerator='gpu', devices=1, auto_select_gpus=False, callbacks=[early_stop_callback])#,check_val_every_n_epoch=int(num_epochs/10) trainer.fit(net, train_loader,val_loader) min_mse[outer_rel] = net.min_mse_val min_mse = min_mse[~np.isnan(min_mse)] # Removes eventual corrupted runs (divergent, outliers, etc...) #min_mse = min_mse[min_mse < 1.5] to_delete = topk(min_mse,2) mask = np.logical_or(min_mse == to_delete[0], min_mse == to_delete[1]) min_mse = ma.masked_array(min_mse, mask = mask) try: with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'rb') as file: mse_dic = pkl.load(file) print("Results file already exisisting... Updating!") try: tmp = mse_dic["avg_points"+str(num_avg_samples)] tmp["time_window"+str(time_window)] = {"avg_mse":min_mse.mean(),"std_mse": min_mse.std(), "complete_coll": min_mse} mse_dic["avg_points"+str(num_avg_samples)] = tmp except: mse_dic["avg_points"+str(num_avg_samples)] = {"time_window"+str(time_window):{"avg_mse":min_mse.mean(),"std_mse":min_mse.std(), "complete_coll": min_mse}} with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'wb') as file: pkl.dump(mse_dic, file) print("Updated!") except: print("Results file not found... Creating!") mse_dic = {"avg_points"+str(num_avg_samples):{"time_window"+str(time_window):{"avg_mse":min_mse.mean(),"std_mse":min_mse.std(), "complete_coll": min_mse}}} with open('/home/claudio/Desktop/Tangent-Bundle-Neural-Networks-main/Code_for_Journal/TNNs/results/'+string+'/res_'+tnn_or_mnn+'.pkl', 'wb') as file: pkl.dump(mse_dic, file) print(mse_dic) # Tensor Board Monitoring if open_tb: tb = program.TensorBoard() tb.configure(argv=[None, '--logdir', tracking_address]) url = tb.launch() print(f"Tensorflow listening on {url}") webbrowser.open_new(url) input("Press Enter to Exit")
9,072
47.518717
191
py
AP-BSN
AP-BSN-master/test.py
import argparse, os import torch from src.util.config_parse import ConfigParser from src.trainer import get_trainer_class def main(): # parsing configuration args = argparse.ArgumentParser() args.add_argument('-s', '--session_name', default=None, type=str) args.add_argument('-c', '--config', default=None, type=str) args.add_argument('-e', '--ckpt_epoch', default=0, type=int) args.add_argument('-g', '--gpu', default=None, type=str) args.add_argument( '--pretrained', default=None, type=str) args.add_argument( '--thread', default=4, type=int) args.add_argument( '--self_en', action='store_true') args.add_argument( '--test_img', default=None, type=str) args.add_argument( '--test_dir', default=None, type=str) args = args.parse_args() assert args.config is not None, 'config file path is needed' if args.session_name is None: args.session_name = args.config # set session name to config file name cfg = ConfigParser(args) # device setting if cfg['gpu'] is not None: os.environ['CUDA_VISIBLE_DEVICES'] = cfg['gpu'] # intialize trainer trainer = get_trainer_class(cfg['trainer'])(cfg) # test trainer.test() if __name__ == '__main__': main()
1,335
30.069767
78
py
AP-BSN
AP-BSN-master/train.py
import argparse, os from importlib import import_module import torch from src.util.config_parse import ConfigParser from src.trainer import get_trainer_class def main(): # parsing configuration args = argparse.ArgumentParser() args.add_argument('-s', '--session_name', default=None, type=str) args.add_argument('-c', '--config', default=None, type=str) args.add_argument('-r', '--resume', action='store_true') args.add_argument('-g', '--gpu', default=None, type=str) args.add_argument( '--thread', default=4, type=int) args = args.parse_args() assert args.config is not None, 'config file path is needed' if args.session_name is None: args.session_name = args.config # set session name to config file name cfg = ConfigParser(args) # device setting if cfg['gpu'] is not None: os.environ['CUDA_VISIBLE_DEVICES'] = cfg['gpu'] # intialize trainer trainer = get_trainer_class(cfg['trainer'])(cfg) # train trainer.train() if __name__ == '__main__': main()
1,089
26.25
78
py
AP-BSN
AP-BSN-master/src/trainer/base.py
import os import math import time, datetime import cv2 import numpy as np import torch from torch import nn from torch import optim import torch.autograd as autograd from torch.utils.tensorboard import SummaryWriter from torch.utils.data import DataLoader from ..util.dnd_submission.bundle_submissions import bundle_submissions_srgb from ..util.dnd_submission.dnd_denoise import denoise_srgb from ..util.dnd_submission.pytorch_wrapper import pytorch_denoiser from ..loss import Loss from ..datahandler import get_dataset_class from ..util.file_manager import FileManager from ..util.logger import Logger from ..util.util import human_format, np2tensor, rot_hflip_img, psnr, ssim, tensor2np, imread_tensor from ..util.util import pixel_shuffle_down_sampling, pixel_shuffle_up_sampling status_len = 13 class BaseTrainer(): ''' Base trainer class to implement other trainer classes. below function should be implemented in each of trainer class. ''' def test(self): raise NotImplementedError('define this function for each trainer') def validation(self): raise NotImplementedError('define this function for each trainer') def _set_module(self): # return dict form with model name. raise NotImplementedError('define this function for each trainer') def _set_optimizer(self): # return dict form with each coresponding model name. raise NotImplementedError('define this function for each trainer') def _forward_fn(self, module, loss, data): # forward with model, loss function and data. # return output of loss function. raise NotImplementedError('define this function for each trainer') #----------------------------# # Train/Test functions # #----------------------------# def __init__(self, cfg): self.session_name = cfg['session_name'] self.checkpoint_folder = 'checkpoint' # get file manager and logger class self.file_manager = FileManager(self.session_name) self.logger = Logger() self.cfg = cfg self.train_cfg = cfg['training'] self.val_cfg = cfg['validation'] self.test_cfg = cfg['test'] self.ckpt_cfg = cfg['checkpoint'] def train(self): # initializing self._before_train() # warmup if self.epoch == 1 and self.train_cfg['warmup']: self._warmup() # training for self.epoch in range(self.epoch, self.max_epoch+1): self._before_epoch() self._run_epoch() self._after_epoch() self._after_train() def _warmup(self): self._set_status('warmup') # make dataloader iterable. self.train_dataloader_iter = {} for key in self.train_dataloader: self.train_dataloader_iter[key] = iter(self.train_dataloader[key]) warmup_iter = self.train_cfg['warmup_iter'] if warmup_iter > self.max_iter: self.logger.info('currently warmup support 1 epoch as maximum. warmup iter is replaced to 1 epoch iteration. %d -> %d' \ % (warmup_iter, self.max_iter)) warmup_iter = self.max_iter for self.iter in range(1, warmup_iter+1): self._adjust_warmup_lr(warmup_iter) self._before_step() self._run_step() self._after_step() def _before_test(self, dataset_load): # initialing self.module = self._set_module() self._set_status('test') # load checkpoint file ckpt_epoch = self._find_last_epoch() if self.cfg['ckpt_epoch'] == -1 else self.cfg['ckpt_epoch'] ckpt_name = self.cfg['pretrained'] if self.cfg['pretrained'] is not None else None self.load_checkpoint(ckpt_epoch, name=ckpt_name) self.epoch = self.cfg['ckpt_epoch'] # for print or saving file name. # test dataset loader if dataset_load: self.test_dataloader = self._set_dataloader(self.test_cfg, batch_size=1, shuffle=False, num_workers=self.cfg['thread']) # wrapping and device setting if self.cfg['gpu'] != 'None': # model to GPU self.model = {key: nn.DataParallel(self.module[key]).cuda() for key in self.module} else: self.model = {key: nn.DataParallel(self.module[key]) for key in self.module} # evaluation mode and set status self._eval_mode() self._set_status('test %03d'%self.epoch) # start message self.logger.highlight(self.logger.get_start_msg()) # set denoiser self._set_denoiser() # wrapping denoiser w/ self_ensemble if self.cfg['self_en']: # (warning) self_ensemble cannot be applied with multi-input model denoiser_fn = self.denoiser self.denoiser = lambda *input_data: self.self_ensemble(denoiser_fn, *input_data) # wrapping denoiser w/ crop test if 'crop' in self.cfg['test']: # (warning) self_ensemble cannot be applied with multi-input model denoiser_fn = self.denoiser self.denoiser = lambda *input_data: self.crop_test(denoiser_fn, *input_data, size=self.cfg['test']['crop']) def _before_train(self): # cudnn torch.backends.cudnn.benchmark = False # initialing self.module = self._set_module() # training dataset loader self.train_dataloader = self._set_dataloader(self.train_cfg, batch_size=self.train_cfg['batch_size'], shuffle=True, num_workers=self.cfg['thread']) # validation dataset loader if self.val_cfg['val']: self.val_dataloader = self._set_dataloader(self.val_cfg, batch_size=1, shuffle=False, num_workers=self.cfg['thread']) # other configuration self.max_epoch = self.train_cfg['max_epoch'] self.epoch = self.start_epoch = 1 max_len = self.train_dataloader['dataset'].dataset.__len__() # base number of iteration works for dataset named 'dataset' self.max_iter = math.ceil(max_len / self.train_cfg['batch_size']) self.loss = Loss(self.train_cfg['loss'], self.train_cfg['tmp_info']) self.loss_dict = {'count':0} self.tmp_info = {} self.loss_log = [] # set optimizer self.optimizer = self._set_optimizer() for opt in self.optimizer.values(): opt.zero_grad(set_to_none=True) # resume if self.cfg["resume"]: # find last checkpoint load_epoch = self._find_last_epoch() # load last checkpoint self.load_checkpoint(load_epoch) self.epoch = load_epoch+1 # logger initialization self.logger = Logger((self.max_epoch, self.max_iter), log_dir=self.file_manager.get_dir(''), log_file_option='a') else: # logger initialization self.logger = Logger((self.max_epoch, self.max_iter), log_dir=self.file_manager.get_dir(''), log_file_option='w') # tensorboard tboard_time = datetime.datetime.now().strftime('%m-%d-%H-%M') self.tboard = SummaryWriter(log_dir=self.file_manager.get_dir('tboard/%s'%tboard_time)) # wrapping and device setting if self.cfg['gpu'] != 'None': # model to GPU self.model = {key: nn.DataParallel(self.module[key]).cuda() for key in self.module} # optimizer to GPU for optim in self.optimizer.values(): for state in optim.state.values(): for k, v in state.items(): if isinstance(v, torch.Tensor): state[k] = v.cuda() else: self.model = {key: nn.DataParallel(self.module[key]) for key in self.module} # start message self.logger.info(self.summary()) self.logger.start((self.epoch-1, 0)) self.logger.highlight(self.logger.get_start_msg()) def _after_train(self): # finish message self.logger.highlight(self.logger.get_finish_msg()) def _before_epoch(self): self._set_status('epoch %03d/%03d'%(self.epoch, self.max_epoch)) # make dataloader iterable. self.train_dataloader_iter = {} for key in self.train_dataloader: self.train_dataloader_iter[key] = iter(self.train_dataloader[key]) # model training mode self._train_mode() def _run_epoch(self): for self.iter in range(1, self.max_iter+1): self._before_step() self._run_step() self._after_step() def _after_epoch(self): # save checkpoint if self.epoch >= self.ckpt_cfg['start_epoch']: if (self.epoch-self.ckpt_cfg['start_epoch'])%self.ckpt_cfg['interval_epoch'] == 0: self.save_checkpoint() # validation if self.val_cfg['val']: if self.epoch >= self.val_cfg['start_epoch'] and self.val_cfg['val']: if (self.epoch-self.val_cfg['start_epoch']) % self.val_cfg['interval_epoch'] == 0: self._eval_mode() self._set_status('val %03d'%self.epoch) self.validation() def _before_step(self): pass def _run_step(self): # get data (data should be dictionary of Tensors) data = {} for key in self.train_dataloader_iter: data[key] = next(self.train_dataloader_iter[key]) # to device if self.cfg['gpu'] != 'None': for dataset_key in data: for key in data[dataset_key]: data[dataset_key][key] = data[dataset_key][key].cuda() # forward, cal losses, backward) losses, tmp_info = self._forward_fn(self.model, self.loss, data) losses = {key: losses[key].mean() for key in losses} tmp_info = {key: tmp_info[key].mean() for key in tmp_info} # backward total_loss = sum(v for v in losses.values()) total_loss.backward() # optimizer step for opt in self.optimizer.values(): opt.step() # zero grad for opt in self.optimizer.values(): opt.zero_grad(set_to_none=True) # save losses and tmp_info for key in losses: if key != 'count': if key in self.loss_dict: self.loss_dict[key] += float(losses[key]) else: self.loss_dict[key] = float(losses[key]) for key in tmp_info: if key in self.tmp_info: self.tmp_info[key] += float(tmp_info[key]) else: self.tmp_info[key] = float(tmp_info[key]) self.loss_dict['count'] += 1 def _after_step(self): # adjust learning rate self._adjust_lr() # print loss if (self.iter%self.cfg['log']['interval_iter']==0 and self.iter!=0) or (self.iter == self.max_iter): self.print_loss() # print progress self.logger.print_prog_msg((self.epoch-1, self.iter-1)) def test_dataloader_process(self, dataloader, add_con=0., floor=False, img_save=True, img_save_path=None, info=True): ''' do test or evaluation process for each dataloader include following steps: 1. denoise image 2. calculate PSNR & SSIM 3. (optional) save denoised image Args: dataloader : dataloader to be tested. add_con : add constant to denoised image. floor : floor denoised image. (default range is [0, 255]) img_save : whether to save denoised and clean images. img_save_path (optional) : path to save denoised images. info (optional) : whether to print info. Returns: psnr : total PSNR score of dataloaer results or None (if clean image is not available) ssim : total SSIM score of dataloder results or None (if clean image is not available) ''' # make directory self.file_manager.make_dir(img_save_path) # test start psnr_sum = 0. ssim_sum = 0. count = 0 for idx, data in enumerate(dataloader): # to device if self.cfg['gpu'] != 'None': for key in data: data[key] = data[key].cuda() # forward input_data = [data[arg] for arg in self.cfg['model_input']] denoised_image = self.denoiser(*input_data) # add constant and floor (if floor is on) denoised_image += add_con if floor: denoised_image = torch.floor(denoised_image) # evaluation if 'clean' in data: psnr_value = psnr(denoised_image, data['clean']) ssim_value = ssim(denoised_image, data['clean']) psnr_sum += psnr_value ssim_sum += ssim_value count += 1 # image save if img_save: # to cpu if 'clean' in data: clean_img = data['clean'].squeeze(0).cpu() if 'real_noisy' in self.cfg['model_input']: noisy_img = data['real_noisy'] elif 'syn_noisy' in self.cfg['model_input']: noisy_img = data['syn_noisy'] elif 'noisy' in self.cfg['model_input']: noisy_img = data['noisy'] else: noisy_img = None if noisy_img is not None: noisy_img = noisy_img.squeeze(0).cpu() denoi_img = denoised_image.squeeze(0).cpu() # write psnr value on file name denoi_name = '%04d_DN_%.2f'%(idx, psnr_value) if 'clean' in data else '%04d_DN'%idx # imwrite if 'clean' in data: self.file_manager.save_img_tensor(img_save_path, '%04d_CL'%idx, clean_img) if noisy_img is not None: self.file_manager.save_img_tensor(img_save_path, '%04d_N'%idx, noisy_img) self.file_manager.save_img_tensor(img_save_path, denoi_name, denoi_img) # procedure log msg if info: if 'clean' in data: self.logger.note('[%s] testing... %04d/%04d. PSNR : %.2f dB'%(self.status, idx, dataloader.__len__(), psnr_value), end='\r') else: self.logger.note('[%s] testing... %04d/%04d.'%(self.status, idx, dataloader.__len__()), end='\r') # final log msg if count > 0: self.logger.val('[%s] Done! PSNR : %.2f dB, SSIM : %.3f'%(self.status, psnr_sum/count, ssim_sum/count)) else: self.logger.val('[%s] Done!'%self.status) # return if count == 0: return None, None else: return psnr_sum/count, ssim_sum/count def test_img(self, image_dir, save_dir='./'): ''' Inference a single image. ''' # load image noisy = np2tensor(cv2.imread(image_dir)) noisy = noisy.unsqueeze(0).float() # to device if self.cfg['gpu'] != 'None': noisy = noisy.cuda() # forward denoised = self.denoiser(noisy) # post-process denoised += self.test_cfg['add_con'] if self.test_cfg['floor']: denoised = torch.floor(denoised) # save image denoised = tensor2np(denoised) denoised = denoised.squeeze(0) name = image_dir.split('/')[-1].split('.')[0] cv2.imwrite(os.path.join(save_dir, name+'_DN.png'), denoised) # print message self.logger.note('[%s] saved : %s'%(self.status, os.path.join(save_dir, name+'_DN.png'))) def test_dir(self, direc): ''' Inference all images in the directory. ''' for ff in [f for f in os.listdir(direc) if os.path.isfile(os.path.join(direc, f))]: os.makedirs(os.path.join(direc, 'results'), exist_ok=True) self.test_img(os.path.join(direc, ff), os.path.join(direc, 'results')) def test_DND(self, img_save_path): ''' Benchmarking DND dataset. ''' # make directories for .mat & image saving self.file_manager.make_dir(img_save_path) self.file_manager.make_dir(img_save_path + '/mat') if self.test_cfg['save_image']: self.file_manager.make_dir(img_save_path + '/img') def wrap_denoiser(Inoisy, nlf, idx, kidx): noisy = 255 * torch.from_numpy(Inoisy) # to device if self.cfg['gpu'] != 'None': noisy = noisy.cuda() noisy = autograd.Variable(noisy) # processing noisy = noisy.permute(2,0,1) noisy = self.test_dataloader['dataset'].dataset._pre_processing({'real_noisy': noisy})['real_noisy'] noisy = noisy.view(1,noisy.shape[0], noisy.shape[1], noisy.shape[2]) denoised = self.denoiser(noisy) denoised += self.test_cfg['add_con'] if self.test_cfg['floor']: denoised = torch.floor(denoised) denoised = denoised[0,...].cpu().numpy() denoised = np.transpose(denoised, [1,2,0]) # image save if self.test_cfg['save_image'] and False: self.file_manager.save_img_numpy(img_save_path+'/img', '%02d_%02d_N'%(idx, kidx), 255*Inoisy) self.file_manager.save_img_numpy(img_save_path+'/img', '%02d_%02d_DN'%(idx, kidx), denoised) return denoised / 255 denoise_srgb(wrap_denoiser, './dataset/DND/dnd_2017', self.file_manager.get_dir(img_save_path+'/mat')) bundle_submissions_srgb(self.file_manager.get_dir(img_save_path+'/mat')) # info self.logger.val('[%s] Done!'%self.status) def _set_denoiser(self): if hasattr(self.model['denoiser'].module, 'denoise'): self.denoiser = self.model['denoiser'].module.denoise else: self.denoiser = self.model['denoiser'].module @torch.no_grad() def crop_test(self, fn, x, size=512, overlap=0): ''' crop test image and inference due to memory problem ''' b,c,h,w = x.shape denoised = torch.zeros_like(x) for i in range(0,h,size-overlap): for j in range(0,w,size-overlap): end_i = min(i+size, h) end_j = min(j+size, w) x_crop = x[...,i:end_i,j:end_j] denoised_crop = fn(x_crop) start_i = overlap if i != 0 else 0 start_j = overlap if j != 0 else 0 denoised[..., i+start_i:end_i, j+start_j:end_j] = denoised_crop[..., start_i:, start_j:] return denoised @torch.no_grad() def self_ensemble(self, fn, x): ''' Geomery self-ensemble function Note that in this function there is no gradient calculation. Args: fn : denoiser function x : input image Return: result : self-ensembled image ''' result = torch.zeros_like(x) for i in range(8): tmp = fn(rot_hflip_img(x, rot_times=i%4, hflip=i//4)) tmp = rot_hflip_img(tmp, rot_times=4-i%4) result += rot_hflip_img(tmp, hflip=i//4) return result / 8 #----------------------------# # Utility functions # #----------------------------# def print_loss(self): temporal_loss = 0. for key in self.loss_dict: if key != 'count': temporal_loss += self.loss_dict[key]/self.loss_dict['count'] self.loss_log += [temporal_loss] if len(self.loss_log) > 100: self.loss_log.pop(0) # print status and learning rate loss_out_str = '[%s] %04d/%04d, lr:%s ∣ '%(self.status, self.iter, self.max_iter, "{:.1e}".format(self._get_current_lr())) global_iter = (self.epoch-1)*self.max_iter + self.iter # print losses avg_loss = np.mean(self.loss_log) loss_out_str += 'avg_100 : %.3f ∣ '%(avg_loss) self.tboard.add_scalar('loss/avg_100', avg_loss, global_iter) for key in self.loss_dict: if key != 'count': loss = self.loss_dict[key]/self.loss_dict['count'] loss_out_str += '%s : %.3f ∣ '%(key, loss) self.tboard.add_scalar('loss/%s'%key, loss, global_iter) self.loss_dict[key] = 0. # print temporal information if len(self.tmp_info) > 0: loss_out_str += '\t[' for key in self.tmp_info: loss_out_str += ' %s : %.2f'%(key, self.tmp_info[key]/self.loss_dict['count']) self.tmp_info[key] = 0. loss_out_str += ' ]' # reset self.loss_dict['count'] = 0 self.logger.info(loss_out_str) def save_checkpoint(self): checkpoint_name = self._checkpoint_name(self.epoch) torch.save({'epoch': self.epoch, 'model_weight': {key:self.model[key].module.state_dict() for key in self.model}, 'optimizer_weight': {key:self.optimizer[key].state_dict() for key in self.optimizer}}, os.path.join(self.file_manager.get_dir(self.checkpoint_folder), checkpoint_name)) def load_checkpoint(self, load_epoch=0, name=None): if name is None: # if scratch, return if load_epoch == 0: return # load from local checkpoint folder file_name = os.path.join(self.file_manager.get_dir(self.checkpoint_folder), self._checkpoint_name(load_epoch)) else: # load from global checkpoint folder file_name = os.path.join('./ckpt', name) # check file exist assert os.path.isfile(file_name), 'there is no checkpoint: %s'%file_name # load checkpoint (epoch, model_weight, optimizer_weight) saved_checkpoint = torch.load(file_name) self.epoch = saved_checkpoint['epoch'] for key in self.module: self.module[key].load_state_dict(saved_checkpoint['model_weight'][key]) if hasattr(self, 'optimizer'): for key in self.optimizer: self.optimizer[key].load_state_dict(saved_checkpoint['optimizer_weight'][key]) # print message self.logger.note('[%s] model loaded : %s'%(self.status, file_name)) def _checkpoint_name(self, epoch): return self.session_name + '_%03d'%epoch + '.pth' def _find_last_epoch(self): checkpoint_list = os.listdir(self.file_manager.get_dir(self.checkpoint_folder)) epochs = [int(ckpt.replace('%s_'%self.session_name, '').replace('.pth', '')) for ckpt in checkpoint_list] assert len(epochs) > 0, 'There is no resumable checkpoint on session %s.'%self.session_name return max(epochs) def _get_current_lr(self): for first_optim in self.optimizer.values(): for param_group in first_optim.param_groups: return param_group['lr'] def _set_dataloader(self, dataset_cfg, batch_size, shuffle, num_workers): dataloader = {} dataset_dict = dataset_cfg['dataset'] if not isinstance(dataset_dict, dict): dataset_dict = {'dataset': dataset_dict} for key in dataset_dict: args = dataset_cfg[key + '_args'] dataset = get_dataset_class(dataset_dict[key])(**args) dataloader[key] = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=False) return dataloader def _set_one_optimizer(self, opt, parameters, lr): lr = float(self.train_cfg['init_lr']) if opt['type'] == 'SGD': return optim.SGD(parameters, lr=lr, momentum=float(opt['SGD']['momentum']), weight_decay=float(opt['SGD']['weight_decay'])) elif opt['type'] == 'Adam': return optim.Adam(parameters, lr=lr, betas=opt['Adam']['betas']) elif opt['type'] == 'AdamW': return optim.Adam(parameters, lr=lr, betas=opt['AdamW']['betas'], weight_decay=float(opt['AdamW']['weight_decay'])) else: raise RuntimeError('ambiguious optimizer type: {}'.format(opt['type'])) def _adjust_lr(self): sched = self.train_cfg['scheduler'] if sched['type'] == 'step': ''' step decreasing scheduler Args: step_size: step size(epoch) to decay the learning rate gamma: decay rate ''' if self.iter == self.max_iter: args = sched['step'] if self.epoch % args['step_size'] == 0: for optimizer in self.optimizer.values(): lr_before = optimizer.param_groups[0]['lr'] for param_group in optimizer.param_groups: param_group["lr"] = lr_before * float(args['gamma']) elif sched['type'] == 'linear': ''' linear decreasing scheduler Args: step_size: step size(epoch) to decrease the learning rate gamma: decay rate for reset learning rate ''' args = sched['linear'] if not hasattr(self, 'reset_lr'): self.reset_lr = float(self.train_cfg['init_lr']) * float(args['gamma'])**((self.epoch-1)//args['step_size']) # reset lr to initial value if self.epoch % args['step_size'] == 0 and self.iter == self.max_iter: self.reset_lr = float(self.train_cfg['init_lr']) * float(args['gamma'])**(self.epoch//args['step_size']) for optimizer in self.optimizer.values(): for param_group in optimizer.param_groups: param_group["lr"] = self.reset_lr # linear decaying else: ratio = ((self.epoch + (self.iter)/self.max_iter - 1) % args['step_size']) / args['step_size'] curr_lr = (1-ratio) * self.reset_lr for optimizer in self.optimizer.values(): for param_group in optimizer.param_groups: param_group["lr"] = curr_lr else: raise RuntimeError('ambiguious scheduler type: {}'.format(sched['type'])) def _adjust_warmup_lr(self, warmup_iter): init_lr = float(self.train_cfg['init_lr']) warmup_lr = init_lr * self.iter / warmup_iter for optimizer in self.optimizer.values(): for param_group in optimizer.param_groups: param_group["lr"] = warmup_lr def _train_mode(self): for key in self.model: self.model[key].train() def _eval_mode(self): for key in self.model: self.model[key].eval() def _set_status(self, status:str): assert len(status) <= status_len, 'status string cannot exceed %d characters, (now %d)'%(status_len, len(status)) if len(status.split(' ')) == 2: s0, s1 = status.split(' ') self.status = '%s'%s0.rjust(status_len//2) + ' '\ '%s'%s1.ljust(status_len//2) else: sp = status_len - len(status) self.status = ''.ljust(sp//2) + status + ''.ljust((sp+1)//2) def summary(self): summary = '' summary += '-'*100 + '\n' # model for k, v in self.module.items(): # get parameter number param_num = sum(p.numel() for p in v.parameters()) # get information about architecture and parameter number summary += '[%s] paramters: %s -->'%(k, human_format(param_num)) + '\n' summary += str(v) + '\n\n' # optim # Hardware summary += '-'*100 + '\n' return summary
27,989
37.767313
155
py
AP-BSN
AP-BSN-master/src/trainer/trainer.py
import os import datetime import torch from . import regist_trainer from .base import BaseTrainer from ..model import get_model_class @regist_trainer class Trainer(BaseTrainer): def __init__(self, cfg): super().__init__(cfg) @torch.no_grad() def test(self): ''' initialization test setting ''' # initialization dataset_load = (self.cfg['test_img'] is None) and (self.cfg['test_dir'] is None) self._before_test(dataset_load=dataset_load) # set image save path for i in range(60): test_time = datetime.datetime.now().strftime('%m-%d-%H-%M') + '-%02d'%i img_save_path = 'img/test_%s_%03d_%s' % (self.cfg['test']['dataset'], self.epoch, test_time) if not self.file_manager.is_dir_exist(img_save_path): break # -- [ TEST Single Image ] -- # if self.cfg['test_img'] is not None: self.test_img(self.cfg['test_img']) exit() # -- [ TEST Image Directory ] -- # elif self.cfg['test_dir'] is not None: self.test_dir(self.cfg['test_dir']) exit() # -- [ TEST DND Benchmark ] -- # elif self.test_cfg['dataset'] == 'DND_benchmark': self.test_DND(img_save_path) exit() # -- [ Test Normal Dataset ] -- # else: psnr, ssim = self.test_dataloader_process( dataloader = self.test_dataloader['dataset'], add_con = 0. if not 'add_con' in self.test_cfg else self.test_cfg['add_con'], floor = False if not 'floor' in self.test_cfg else self.test_cfg['floor'], img_save_path = img_save_path, img_save = self.test_cfg['save_image']) # print out result as filename if psnr is not None and ssim is not None: with open(os.path.join(self.file_manager.get_dir(img_save_path), '_psnr-%.2f_ssim-%.3f.result'%(psnr, ssim)), 'w') as f: f.write('PSNR: %f\nSSIM: %f'%(psnr, ssim)) @torch.no_grad() def validation(self): # set denoiser self._set_denoiser() # make directories for image saving img_save_path = 'img/val_%03d' % self.epoch self.file_manager.make_dir(img_save_path) # validation psnr, ssim = self.test_dataloader_process( dataloader = self.val_dataloader['dataset'], add_con = 0. if not 'add_con' in self.val_cfg else self.val_cfg['add_con'], floor = False if not 'floor' in self.val_cfg else self.val_cfg['floor'], img_save_path = img_save_path, img_save = self.val_cfg['save_image']) def _set_module(self): module = {} if self.cfg['model']['kwargs'] is None: module['denoiser'] = get_model_class(self.cfg['model']['type'])() else: module['denoiser'] = get_model_class(self.cfg['model']['type'])(**self.cfg['model']['kwargs']) return module def _set_optimizer(self): optimizer = {} for key in self.module: optimizer[key] = self._set_one_optimizer(opt = self.train_cfg['optimizer'], parameters = self.module[key].parameters(), lr = float(self.train_cfg['init_lr'])) return optimizer def _forward_fn(self, module, loss, data): # forward input_data = [data['dataset'][arg] for arg in self.cfg['model_input']] denoised_img = module['denoiser'](*input_data) model_output = {'recon': denoised_img} # get losses losses, tmp_info = loss(input_data, model_output, data['dataset'], module, \ ratio=(self.epoch-1 + (self.iter-1)/self.max_iter)/self.max_epoch) return losses, tmp_info
4,319
44
184
py
AP-BSN
AP-BSN-master/src/util/summary_logging.py
import time from torch.utils.tensorboard import SummaryWriter import numpy as np class LossWriter(SummaryWriter): def __init__(self, log_dir=None, comment=''): if log_dir == None: log_dir = './logs/tensorboard/' + time.strftime('%Y-%m-%d--%H-%M-%S', time.localtime(time.time())) super(LossWriter, self).__init__(log_dir=log_dir, comment=comment) def write_loss(self, loss_name, scalar, n_iter): self.add_scalar('Loss/'+loss_name, scalar, n_iter) if __name__=='__main__': testwriter = LossWriter() for n_iter in range(100): testwriter.write_loss(np.random.random(), n_iter)
640
28.136364
110
py
AP-BSN
AP-BSN-master/src/util/util.py
from math import exp import cv2 import numpy as np import torch import torch.nn.functional as F from skimage.metrics import peak_signal_noise_ratio, structural_similarity def np2tensor(n:np.array): ''' transform numpy array (image) to torch Tensor BGR -> RGB (h,w,c) -> (c,h,w) ''' # gray if len(n.shape) == 2: return torch.from_numpy(np.ascontiguousarray(np.transpose(n, (2,0,1)))) # RGB -> BGR elif len(n.shape) == 3: return torch.from_numpy(np.ascontiguousarray(np.transpose(np.flip(n, axis=2), (2,0,1)))) else: raise RuntimeError('wrong numpy dimensions : %s'%(n.shape,)) def tensor2np(t:torch.Tensor): ''' transform torch Tensor to numpy having opencv image form. RGB -> BGR (c,h,w) -> (h,w,c) ''' t = t.cpu().detach() # gray if len(t.shape) == 2: return t.permute(1,2,0).numpy() # RGB -> BGR elif len(t.shape) == 3: return np.flip(t.permute(1,2,0).numpy(), axis=2) # image batch elif len(t.shape) == 4: return np.flip(t.permute(0,2,3,1).numpy(), axis=3) else: raise RuntimeError('wrong tensor dimensions : %s'%(t.shape,)) def imwrite_tensor(t, name='test.png'): cv2.imwrite('./%s'%name, tensor2np(t.cpu())) def imread_tensor(name='test'): return np2tensor(cv2.imread('./%s'%name)) def rot_hflip_img(img:torch.Tensor, rot_times:int=0, hflip:int=0): ''' rotate '90 x times degree' & horizontal flip image (shape of img: b,c,h,w or c,h,w) ''' b=0 if len(img.shape)==3 else 1 # no flip if hflip % 2 == 0: # 0 degrees if rot_times % 4 == 0: return img # 90 degrees elif rot_times % 4 == 1: return img.flip(b+1).transpose(b+1,b+2) # 180 degrees elif rot_times % 4 == 2: return img.flip(b+2).flip(b+1) # 270 degrees else: return img.flip(b+2).transpose(b+1,b+2) # horizontal flip else: # 0 degrees if rot_times % 4 == 0: return img.flip(b+2) # 90 degrees elif rot_times % 4 == 1: return img.flip(b+1).flip(b+2).transpose(b+1,b+2) # 180 degrees elif rot_times % 4 == 2: return img.flip(b+1) # 270 degrees else: return img.transpose(b+1,b+2) def pixel_shuffle_down_sampling(x:torch.Tensor, f:int, pad:int=0, pad_value:float=0.): ''' pixel-shuffle down-sampling (PD) from "When AWGN-denoiser meets real-world noise." (AAAI 2019) Args: x (Tensor) : input tensor f (int) : factor of PD pad (int) : number of pad between each down-sampled images pad_value (float) : padding value Return: pd_x (Tensor) : down-shuffled image tensor with pad or not ''' # single image tensor if len(x.shape) == 3: c,w,h = x.shape unshuffled = F.pixel_unshuffle(x, f) if pad != 0: unshuffled = F.pad(unshuffled, (pad, pad, pad, pad), value=pad_value) return unshuffled.view(c,f,f,w//f+2*pad,h//f+2*pad).permute(0,1,3,2,4).reshape(c, w+2*f*pad, h+2*f*pad) # batched image tensor else: b,c,w,h = x.shape unshuffled = F.pixel_unshuffle(x, f) if pad != 0: unshuffled = F.pad(unshuffled, (pad, pad, pad, pad), value=pad_value) return unshuffled.view(b,c,f,f,w//f+2*pad,h//f+2*pad).permute(0,1,2,4,3,5).reshape(b,c,w+2*f*pad, h+2*f*pad) def pixel_shuffle_up_sampling(x:torch.Tensor, f:int, pad:int=0): ''' inverse of pixel-shuffle down-sampling (PD) see more details about PD in pixel_shuffle_down_sampling() Args: x (Tensor) : input tensor f (int) : factor of PD pad (int) : number of pad will be removed ''' # single image tensor if len(x.shape) == 3: c,w,h = x.shape before_shuffle = x.view(c,f,w//f,f,h//f).permute(0,1,3,2,4).reshape(c*f*f,w//f,h//f) if pad != 0: before_shuffle = before_shuffle[..., pad:-pad, pad:-pad] return F.pixel_shuffle(before_shuffle, f) # batched image tensor else: b,c,w,h = x.shape before_shuffle = x.view(b,c,f,w//f,f,h//f).permute(0,1,2,4,3,5).reshape(b,c*f*f,w//f,h//f) if pad != 0: before_shuffle = before_shuffle[..., pad:-pad, pad:-pad] return F.pixel_shuffle(before_shuffle, f) def human_format(num): magnitude=0 while abs(num)>=1000: magnitude+=1 num/=1000.0 return '%.1f%s'%(num,['','K','M','G','T','P'][magnitude]) def psnr(img1, img2): ''' image value range : [0 - 255] clipping for model output ''' if len(img1.shape) == 4: img1 = img1[0] if len(img2.shape) == 4: img2 = img2[0] # tensor to numpy if isinstance(img1, torch.Tensor): img1 = tensor2np(img1) if isinstance(img2, torch.Tensor): img2 = tensor2np(img2) # numpy value cliping & chnage type to uint8 img1 = np.clip(img1, 0, 255) img2 = np.clip(img2, 0, 255) return peak_signal_noise_ratio(img1, img2, data_range=255) def ssim(img1, img2): ''' image value range : [0 - 255] clipping for model output ''' if len(img1.shape) == 4: img1 = img1[0] if len(img2.shape) == 4: img2 = img2[0] # tensor to numpy if isinstance(img1, torch.Tensor): img1 = tensor2np(img1) if isinstance(img2, torch.Tensor): img2 = tensor2np(img2) # numpy value cliping img2 = np.clip(img2, 0, 255) img1 = np.clip(img1, 0, 255) return structural_similarity(img1, img2, multichannel=True, data_range=255) def get_gaussian_2d_filter(window_size, sigma, channel=1, device=torch.device('cpu')): ''' return 2d gaussian filter window as tensor form Arg: window_size : filter window size sigma : standard deviation ''' gauss = torch.ones(window_size, device=device) for x in range(window_size): gauss[x] = exp(-(x - window_size//2)**2/float(2*sigma**2)) gauss = gauss.unsqueeze(1) #gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)], device=device).unsqueeze(1) filter2d = gauss.mm(gauss.t()).float() filter2d = (filter2d/filter2d.sum()).unsqueeze(0).unsqueeze(0) return filter2d.expand(channel, 1, window_size, window_size) def get_mean_2d_filter(window_size, channel=1, device=torch.device('cpu')): ''' return 2d mean filter as tensor form Args: window_size : filter window size ''' window = torch.ones((window_size, window_size), device=device) window = (window/window.sum()).unsqueeze(0).unsqueeze(0) return window.expand(channel, 1, window_size, window_size) def mean_conv2d(x, window_size=None, window=None, filter_type='gau', sigma=None, keep_sigma=False, padd=True): ''' color channel-wise 2d mean or gaussian convolution Args: x : input image window_size : filter window size filter_type(opt) : 'gau' or 'mean' sigma : standard deviation of gaussian filter ''' b_x = x.unsqueeze(0) if len(x.shape) == 3 else x if window is None: if sigma is None: sigma = (window_size-1)/6 if filter_type == 'gau': window = get_gaussian_2d_filter(window_size, sigma=sigma, channel=b_x.shape[1], device=x.device) else: window = get_mean_2d_filter(window_size, channel=b_x.shape[1], device=x.device) else: window_size = window.shape[-1] if padd: pl = (window_size-1)//2 b_x = F.pad(b_x, (pl,pl,pl,pl), 'reflect') m_b_x = F.conv2d(b_x, window, groups=b_x.shape[1]) if keep_sigma: m_b_x /= (window**2).sum().sqrt() if len(x.shape) == 4: return m_b_x elif len(x.shape) == 3: return m_b_x.squeeze(0) else: raise ValueError('input image shape is not correct')
7,961
31.765432
132
py
AP-BSN
AP-BSN-master/src/util/file_manager.py
import os import cv2 import numpy as np import torch from .util import tensor2np class FileManager: def __init__(self, session_name:str): self.output_folder = "./output" if not os.path.isdir(self.output_folder): os.makedirs(self.output_folder) print("[WARNING] output folder is not exist, create new one") # init session self.session_name = session_name os.makedirs(os.path.join(self.output_folder, self.session_name), exist_ok=True) # mkdir for directory in ['checkpoint', 'img', 'tboard']: self.make_dir(directory) def is_dir_exist(self, dir_name:str) -> bool: return os.path.isdir(os.path.join(self.output_folder, self.session_name, dir_name)) def make_dir(self, dir_name:str) -> str: os.makedirs(os.path.join(self.output_folder, self.session_name, dir_name), exist_ok=True) def get_dir(self, dir_name:str) -> str: # -> './output/<session_name>/dir_name' return os.path.join(self.output_folder, self.session_name, dir_name) def save_img_tensor(self, dir_name:str, file_name:str, img:torch.Tensor, ext='png'): self.save_img_numpy(dir_name, file_name, tensor2np(img), ext) def save_img_numpy(self, dir_name:str, file_name:str, img:np.array, ext='png'): file_dir_name = os.path.join(self.get_dir(dir_name), '%s.%s'%(file_name, ext)) if np.shape(img)[2] == 1: cv2.imwrite(file_dir_name, np.squeeze(img, 2)) else: cv2.imwrite(file_dir_name, img)
1,563
35.372093
98
py
AP-BSN
AP-BSN-master/src/util/dnd_submission/pytorch_wrapper.py
# Author: Tobias Plötz, TU Darmstadt (tobias.ploetz@visinf.tu-darmstadt.de) # This file is part of the implementation as described in the CVPR 2017 paper: # Tobias Plötz and Stefan Roth, Benchmarking Denoising Algorithms with Real Photographs. # Please see the file LICENSE.txt for the license governing this code. import numpy as np import torch from torch.autograd import Variable def pytorch_denoiser(denoiser, use_cuda): def wrap_denoiser(Inoisy, nlf): noisy = torch.from_numpy(Inoisy) if len(noisy.shape) > 2: noisy = noisy.view(1,noisy.shape[2], noisy.shape[0], noisy.shape[1]) else: noisy = noisy.view(1,1, noisy.shape[0], noisy.shape[1]) if use_cuda: noisy = noisy.cuda() noisy = Variable(noisy) denoised = denoiser(noisy, nlf) denoised = denoised[0,...].cpu().numpy() denoised = np.transpose(denoised, [1,2,0]) return denoised return wrap_denoiser
984
31.833333
89
py
AP-BSN
AP-BSN-master/src/datahandler/DND.py
import os import torch import h5py from src.datahandler.denoise_dataset import DenoiseDataSet from . import regist_dataset @regist_dataset class DND(DenoiseDataSet): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def _scan(self): dataset_path = os.path.join(self.dataset_dir, 'DND/dnd_2017/images_srgb') assert os.path.exists(dataset_path), 'There is no dataset %s'%dataset_path for root, _, files in os.walk(dataset_path): for file_name in files: self.img_paths.append(os.path.join(root, file_name)) def _load_data(self, data_idx): with h5py.File(self.img_paths[data_idx], 'r') as img_file: noisy_img = img_file[list(img_file.keys())[0]][()]*255. return {'real_noisy': torch.from_numpy(noisy_img)} @regist_dataset class prep_DND(DenoiseDataSet): ''' dataset class for prepared DND dataset which is cropped with overlap. [using size 512x512 with 128 overlapping] ''' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def _scan(self): self.dataset_path = os.path.join(self.dataset_dir, 'prep/DND_s512_o128') assert os.path.exists(self.dataset_path), 'There is no dataset %s'%self.dataset_path for root, _, files in os.walk(os.path.join(self.dataset_path, 'RN')): self.img_paths = files def _load_data(self, data_idx): file_name = self.img_paths[data_idx] noisy_img = self._load_img(os.path.join(self.dataset_path, 'RN' , file_name)) return {'real_noisy': noisy_img} #'instances': instance } @regist_dataset class DND_benchmark(DenoiseDataSet): ''' dumpy dataset class for DND benchmark DND benchmarking code is implemented in the "trainer" directly ''' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def _scan(self): pass def _load_data(self, data_idx): pass
1,988
31.080645
92
py
AP-BSN
AP-BSN-master/src/datahandler/denoise_dataset.py
import random, os import cv2 import numpy as np from scipy.io import savemat import torch from torch.utils.data import Dataset from ..util.util import rot_hflip_img, tensor2np, np2tensor, mean_conv2d class DenoiseDataSet(Dataset): def __init__(self, add_noise:str=None, crop_size:list=None, aug:list=None, n_repeat:int=1, n_data:int=None, ratio_data:float=None) -> None: ''' Base denoising dataset class for various dataset. to build custom dataset class, below functions must be implemented in the inherited class. (or see other dataset class already implemented.) - self._scan(self) : scan image data & save its paths. (saved to self.img_paths) - self._load_data(self, data_idx) : load single paired data from idx as a form of dictionary. Args: add_noise (str) : configuration of additive noise to synthesize noisy image. (see _add_noise() for more details.) crop_size (list) : crop size, e.g. [W] or [H, W] and no crop if None aug (list) : list of data augmentations (see _augmentation() for more details.) n_repeat (int) : number of repeat for each data. n_data (int) : number of data to be used. (default: None = all data) ratio_data (float) : ratio of data to be used. (activated when n_data=None, default: None = all data) ''' self.dataset_dir = './dataset' if not os.path.isdir(self.dataset_dir): raise Exception('dataset directory is not exist') # parse additive noise argument self.add_noise_type, self.add_noise_opt, self.add_noise_clamp = self._parse_add_noise(add_noise) # set parameters for dataset. self.crop_size = crop_size self.aug = aug self.n_repeat = n_repeat # scan all data and fill in self.img_paths self.img_paths = [] self._scan() if len(self.img_paths) > 0: if self.img_paths[0].__class__.__name__ in ['int', 'str', 'float']: self.img_paths.sort() # set data amount if n_data is not None: self.n_data = n_data elif ratio_data is not None: self.n_data = int(ratio_data * len(self.img_paths)) else: self.n_data = len(self.img_paths) def __len__(self): return self.n_data * self.n_repeat def __getitem__(self, idx): ''' final dictionary shape of data: {'clean', 'syn_noisy', 'real_noisy', 'noisy (any of real[first priority] and syn)', etc} ''' # calculate data index data_idx = idx % self.n_data # load data data = self._load_data(data_idx) # pre-processing (currently only crop) data = self._pre_processing(data) # synthesize additive noise if self.add_noise_type is not None: if 'clean' in data: syn_noisy_img, nlf = self._add_noise(data['clean'], self.add_noise_type, self.add_noise_opt, self.add_noise_clamp) data['syn_noisy'] = syn_noisy_img data['nlf'] = nlf elif 'real_noisy' in data: syn_noisy_img, nlf = self._add_noise(data['real_noisy'], self.add_noise_type, self.add_noise_opt, self.add_noise_clamp) data['syn_noisy'] = syn_noisy_img data['nlf'] = nlf else: raise RuntimeError('there is no clean or real image to synthesize. (synthetic noise type: %s)'%self.add_noise_type) # data augmentation if self.aug is not None: data = self._augmentation(data, self.aug) # add general label 'noisy' to use any of real_noisy or syn_noisy (real first) if 'real_noisy' in data or 'syn_noisy' in data: data['noisy'] = data['real_noisy'] if 'real_noisy' in data else data['syn_noisy'] return data def _scan(self): raise NotImplementedError # TODO fill in self.img_paths (include path from project directory) def _load_data(self, data_idx): raise NotImplementedError # TODO load possible data as dictionary # dictionary key list : # 'clean' : clean image without noise (gt or anything). # 'real_noisy' : real noisy image or already synthesized noisy image. # 'instances' : any other information of capturing situation. #----------------------------# # Image handling functions # #----------------------------# def _load_img(self, img_name, as_gray=False): img = cv2.imread(img_name, 1) assert img is not None, "failure on loading image - %s"%img_name return self._load_img_from_np(img, as_gray, RGBflip=True) def _load_img_from_np(self, img, as_gray=False, RGBflip=False): # if color if len(img.shape) != 2: if as_gray: # follows definition of sRBG in terms of the CIE 1931 linear luminance. # because calculation opencv color conversion and imread grayscale mode is a bit different. # https://en.wikipedia.org/wiki/Grayscale img = np.average(img, axis=2, weights=[0.0722, 0.7152, 0.2126]) img = np.expand_dims(img, axis=0) else: if RGBflip: img = np.flip(img, axis=2) img = np.transpose(img, (2,0,1)) # if gray else: img = np.expand_dims(img, axis=0) return torch.from_numpy(np.ascontiguousarray(img).astype(np.float32)) def _pre_processing(self, data): # get a patch from image data if self.crop_size != None: data = self._get_patch(self.crop_size, data) return data def _get_patch(self, crop_size, data, rnd=True): # check all image size is same if 'clean' in data and 'real_noisy' in data: assert data['clean'].shape[1] == data['clean'].shape[1] and data['real_noisy'].shape[2] == data['real_noisy'].shape[2], \ 'img shape should be same. (%d, %d) != (%d, %d)' % (data['clean'].shape[1], data['clean'].shape[1], data['real_noisy'].shape[2], data['real_noisy'].shape[2]) # get image shape and select random crop location if 'clean' in data: max_x = data['clean'].shape[2] - crop_size[0] max_y = data['clean'].shape[1] - crop_size[1] else: max_x = data['real_noisy'].shape[2] - crop_size[0] max_y = data['real_noisy'].shape[1] - crop_size[1] assert max_x >= 0 assert max_y >= 0 if rnd and max_x>0 and max_y>0: x = np.random.randint(0, max_x) y = np.random.randint(0, max_y) else: x, y = 0, 0 # crop if 'clean' in data: data['clean'] = data['clean'][:, y:y+crop_size[1], x:x+crop_size[0]] if 'real_noisy' in data: data['real_noisy'] = data['real_noisy'][:, y:y+crop_size[1], x:x+crop_size[0]] return data def normalize_data(self, data, cuda=False): # for all image for key in data: if self._is_image_tensor(data[key]): data[key] = self.normalize(data[key], cuda) return data def inverse_normalize_data(self, data, cuda=False): # for all image for key in data: # is image if self._is_image_tensor(data[key]): data[key] = self.inverse_normalize(data[key], cuda) return data def normalize(self, img, cuda=False): if img.shape[0] == 1: stds = self.gray_stds means = self.gray_means elif img.shape[0] == 3: stds = self.color_stds means = self.color_means else: raise RuntimeError('undefined image channel length : %d'%img.shape[0]) if cuda: means, stds = means.cuda(), stds.cuda() return (img-means) / stds def inverse_normalize(self, img, cuda=False): if img.shape[0] == 1: stds = self.gray_stds means = self.gray_means elif img.shape[0] == 3: stds = self.color_stds means = self.color_means else: raise RuntimeError('undefined image channel length : %d'%img.shape[0]) if cuda: means, stds = means.cuda(), stds.cuda() return (img*stds) + means def _parse_add_noise(self, add_noise_str:str): ''' noise_type-opt0:opt1:opt2-clamp ''' if add_noise_str == 'bypass': return 'bypass', None, None elif add_noise_str != None: add_noise_type = add_noise_str.split('-')[0] add_noise_opt = [float(v) for v in add_noise_str.split('-')[1].split(':')] add_noise_clamp = len(add_noise_str.split('-'))>2 and add_noise_str.split('-')[2] == 'clamp' return add_noise_type, add_noise_opt, add_noise_clamp else: return None, None, None def _add_noise(self, clean_img:torch.Tensor, add_noise_type:str, opt:list, clamp:bool=False) -> torch.Tensor: ''' add various noise to clean image. Args: clean_img (Tensor) : clean image to synthesize on add_noise_type : below types are available opt (list) : args for synthsize noise clamp (bool) : optional, clamp noisy image into [0,255] Return: synthesized_img Noise_types - bypass : bypass clean image - uni : uniform distribution noise from -opt[0] ~ opt[0] - gau : gaussian distribution noise with zero-mean & opt[0] variance - gau_blind : blind gaussian distribution with zero-mean, variance is uniformly selected from opt[0] ~ opt[1] - struc_gau : structured gaussian noise. gaussian filter is applied to above gaussian noise. opt[0] is variance of gaussian, opt[1] is window size and opt[2] is sigma of gaussian filter. - het_gau : heteroscedastic gaussian noise with indep weight:opt[0], dep weight:opt[1] ''' nlf = None if add_noise_type == 'bypass': # bypass clean image synthesized_img = clean_img elif add_noise_type == 'uni': # add uniform noise synthesized_img = clean_img + 2*opt[0] * torch.rand(clean_img.shape) - opt[0] elif add_noise_type == 'gau': # add AWGN nlf = opt[0] synthesized_img = clean_img + torch.normal(mean=0., std=nlf, size=clean_img.shape) elif add_noise_type == 'gau_blind': # add blind gaussian noise nlf = random.uniform(opt[0], opt[1]) synthesized_img = clean_img + torch.normal(mean=0., std=nlf, size=clean_img.shape) elif add_noise_type == 'struc_gau': # add structured gaussian noise (used in the paper "Noiser2Noise": https://arxiv.org/pdf/1910.11908.pdf) nlf = opt[0] gau_noise = torch.normal(mean=0., std=opt[0], size=clean_img.shape) struc_gau = mean_conv2d(gau_noise, window_size=int(opt[1]), sigma=opt[2], keep_sigma=True) synthesized_img = clean_img + struc_gau elif add_noise_type == 'het_gau': # add heteroscedastic guassian noise het_gau_std = (clean_img * (opt[0]**2) + torch.ones(clean_img.shape) * (opt[1]**2)).sqrt() nlf = het_gau_std synthesized_img = clean_img + torch.normal(mean=0., std=nlf) else: raise RuntimeError('undefined additive noise type : %s'%add_noise_type) if clamp: synthesized_img = torch.clamp(synthesized_img, 0, 255) return synthesized_img, nlf def _augmentation(self, data:dict, aug:list): ''' Parsing augmentation list and apply it to the data images. ''' # parsign augmentation rot, hflip = 0, 0 for aug_name in aug: # aug : random rotation if aug_name == 'rot': rot = random.randint(0,3) # aug : random flip elif aug_name == 'hflip': hflip = random.randint(0,1) else: raise RuntimeError('undefined augmentation option : %s'%aug_name) # for every data(only image), apply rotation and flipping augmentation. for key in data: if self._is_image_tensor(data[key]): # random rotation and flip if rot != 0 or hflip != 0: data[key] = rot_hflip_img(data[key], rot, hflip) return data #----------------------------# # Image saving functions # #----------------------------# def save_all_image(self, dir, clean=False, syn_noisy=False, real_noisy=False): for idx in range(len(self.img_paths)): data = self.__getitem__(idx) if clean and 'clean' in data: cv2.imwrite(os.path.join(dir, '%04d_CL.png'%idx), tensor2np(data['clean'])) if syn_noisy and 'syn_noisy' in data: cv2.imwrite(os.path.join(dir, '%04d_SN.png'%idx), tensor2np(data['syn_noisy'])) if real_noisy and 'real_noisy' in data: cv2.imwrite(os.path.join(dir, '%04d_RN.png'%idx), tensor2np(data['real_noisy'])) print('image %04d saved!'%idx) def prep_save(self, img_idx:int, img_size:int, overlap:int, clean:bool=False, syn_noisy:bool=False, real_noisy:bool=False): ''' cropping am image into mini-size patches for efficient training. Args: img_idx (int) : index of image img_size (int) : size of image overlap (int) : overlap between patches clean (bool) : save clean image (default: False) syn_noisy (bool) : save synthesized noisy image (default: False) real_noisy (bool) : save real noisy image (default: False) ''' d_name = '%s_s%d_o%d'%(self.__class__.__name__, img_size, overlap) os.makedirs(os.path.join(self.dataset_dir, 'prep', d_name), exist_ok=True) assert overlap < img_size stride = img_size - overlap if clean: clean_dir = os.path.join(self.dataset_dir, 'prep', d_name, 'CL') os.makedirs(clean_dir, exist_ok=True) if syn_noisy: syn_noisy_dir = os.path.join(self.dataset_dir, 'prep', d_name, 'SN') os.makedirs(syn_noisy_dir, exist_ok=True) if real_noisy: real_noisy_dir = os.path.join(self.dataset_dir, 'prep', d_name, 'RN') os.makedirs(real_noisy_dir, exist_ok=True) data = self.__getitem__(img_idx) c,h,w = data['clean'].shape if 'clean' in data else data['real_noisy'].shape for h_idx in range((h-img_size)//stride + 1): for w_idx in range((w-img_size+1)//stride + 1): hl, hr = h_idx*stride, h_idx*stride+img_size wl, wr = w_idx*stride, w_idx*stride+img_size if clean: cv2.imwrite(os.path.join(clean_dir, '%d_%d_%d.png'%(img_idx, h_idx, w_idx)), tensor2np(data['clean'][:,hl:hr,wl:wr])) if syn_noisy: cv2.imwrite(os.path.join(syn_noisy_dir, '%d_%d_%d.png'%(img_idx, h_idx, w_idx)), tensor2np(data['syn_noisy'][:,hl:hr,wl:wr])) if real_noisy: cv2.imwrite(os.path.join(real_noisy_dir, '%d_%d_%d.png'%(img_idx, h_idx, w_idx)), tensor2np(data['real_noisy'][:,hl:hr,wl:wr])) print('Cropped image %d / %d'%(img_idx, self.__len__())) #----------------------------# # etc # #----------------------------# def _is_image_tensor(self, x): ''' return input tensor has image shape. (include batched image) ''' if isinstance(x, torch.Tensor): if len(x.shape) == 3 or len(x.shape) == 4: if x.dtype != torch.bool: return True return False class ReturnMergedDataset(): def __init__(self, d_list): self.d_list = d_list def __call__(self, *args, **kwargs): return MergedDataset(self.d_list, *args, **kwargs) class MergedDataset(Dataset): def __init__(self, d_list, *args, **kwargs): ''' Merged denoising dataset when you use multiple dataset combined. see more details of DenoiseDataSet ''' from ..datahandler import get_dataset_object self.dataset_list = [] for d in d_list: self.dataset_list.append(get_dataset_object(d)(*args, **kwargs)) self.data_contents_flags = {'clean':True, 'noisy':True, 'real_noisy':True} self.dataset_length = [] for d in self.dataset_list: self.dataset_length.append(d.__len__()) data_sample = d.__getitem__(0) for key in self.data_contents_flags.keys(): if not key in data_sample: self.data_contents_flags[key] = False def __len__(self): return sum(self.dataset_length) def __getitem__(self, idx): t_idx = idx for d_idx, d in enumerate(self.dataset_list): if t_idx < self.dataset_length[d_idx]: data = d.__getitem__(t_idx) return_data = {} for key in self.data_contents_flags.keys(): if self.data_contents_flags[key]: return_data[key] = data[key] return return_data t_idx -= self.dataset_length[d_idx] raise RuntimeError('index of merged dataset contains some bugs, total length %d, requiring idx %d'%(self.__len__(), idx))
17,823
41.539379
198
py
AP-BSN
AP-BSN-master/src/loss/recon.py
import torch import torch.nn as nn import torch.nn.functional as F from . import regist_loss eps = 1e-6 # ============================ # # Reconstruction loss # # ============================ # @regist_loss class L1(): def __call__(self, input_data, model_output, data, module): output = model_output['recon'] return F.l1_loss(output, data['clean']) @regist_loss class L2(): def __call__(self, input_data, model_output, data, module): output = model_output['recon'] return F.mse_loss(output, data['clean'])
563
20.692308
63
py
AP-BSN
AP-BSN-master/src/loss/__init__.py
import os from importlib import import_module import torch import torch.nn as nn loss_class_dict = {} def regist_loss(loss_class): loss_name = loss_class.__name__ assert not loss_name in loss_class_dict, 'there is already registered loss name: %s in loss_class_dict.' % loss_name loss_class_dict[loss_name] = loss_class return loss_class ''' ## default format of loss ## @regist_loss class (): def __call__(self, input_data, model_output, data, model): ## example of loss: L1 loss ## @regist_loss class L1(): def __call__(self, input_data, model_output, data, module): output = model_output['recon'] return F.l1_loss(output, data['clean']) ''' # import all python files in model folder for module in os.listdir(os.path.dirname(__file__)): if module == '__init__.py' or module[-3:] != '.py': continue import_module('src.loss.{}'.format(module[:-3])) del module class Loss(nn.Module): def __init__(self, loss_string, tmp_info=[]): super().__init__() loss_string = loss_string.replace(' ', '') # parse loss string self.loss_list = [] for single_loss in loss_string.split('+'): weight, name = single_loss.split('*') ratio = True if 'r' in weight else False weight = float(weight.replace('r', '')) if name in loss_class_dict: self.loss_list.append({ 'name': name, 'weight': float(weight), 'func': loss_class_dict[name](), 'ratio': ratio}) else: raise RuntimeError('undefined loss term: {}'.format(name)) # parse temporal information string self.tmp_info_list = [] for name in tmp_info: if name in loss_class_dict: self.tmp_info_list.append({ 'name': name, 'func': loss_class_dict[name]()}) else: raise RuntimeError('undefined loss term: {}'.format(name)) def forward(self, input_data, model_output, data, module, loss_name=None, change_name=None, ratio=1.0): ''' forward all loss and return as dict format. Args input_data : input of the network (also in the data) model_output : output of the network data : entire batch of data module : dictionary of modules (for another network forward) loss_name : (optional) choose specific loss with name change_name : (optional) replace name of chosen loss ratio : (optional) percentage of learning procedure for increase weight during training Return losses : dictionary of loss ''' loss_arg = (input_data, model_output, data, module) # calculate only specific loss 'loss_name' and change its name to 'change_name' if loss_name is not None: for single_loss in self.loss_list: if loss_name == single_loss['name']: loss = single_loss['weight'] * single_loss['func'](*loss_arg) if single_loss['ratio']: loss *= ratio if change_name is not None: return {change_name: loss} return {single_loss['name']: loss} raise RuntimeError('there is no such loss in training losses: {}'.format(loss_name)) # normal case: calculate all training losses at one time losses = {} for single_loss in self.loss_list: losses[single_loss['name']] = single_loss['weight'] * single_loss['func'](*loss_arg) if single_loss['ratio']: losses[single_loss['name']] *= ratio # calculate temporal information tmp_info = {} for single_tmp_info in self.tmp_info_list: # don't need gradient with torch.no_grad(): tmp_info[single_tmp_info['name']] = single_tmp_info['func'](*loss_arg) return losses, tmp_info
4,173
36.267857
120
py
AP-BSN
AP-BSN-master/src/loss/recon_self.py
import torch import torch.nn as nn import torch.nn.functional as F from . import regist_loss eps = 1e-6 # ============================ # # Self-reconstruction loss # # ============================ # @regist_loss class self_L1(): def __call__(self, input_data, model_output, data, module): output = model_output['recon'] target_noisy = data['syn_noisy'] if 'syn_noisy' in data else data['real_noisy'] return F.l1_loss(output, target_noisy) @regist_loss class self_L2(): def __call__(self, input_data, model_output, data, module): output = model_output['recon'] target_noisy = data['syn_noisy'] if 'syn_noisy' in data else data['real_noisy'] return F.mse_loss(output, target_noisy)
750
24.896552
87
py
AP-BSN
AP-BSN-master/src/model/DBSNl.py
import torch import torch.nn as nn import torch.nn.functional as F from . import regist_model @regist_model class DBSNl(nn.Module): ''' Dilated Blind-Spot Network (cutomized light version) self-implemented version of the network from "Unpaired Learning of Deep Image Denoising (ECCV 2020)" and several modificaions are included. see our supple for more details. ''' def __init__(self, in_ch=3, out_ch=3, base_ch=128, num_module=9): ''' Args: in_ch : number of input channel out_ch : number of output channel base_ch : number of base channel num_module : number of modules in the network ''' super().__init__() assert base_ch%2 == 0, "base channel should be divided with 2" ly = [] ly += [ nn.Conv2d(in_ch, base_ch, kernel_size=1) ] ly += [ nn.ReLU(inplace=True) ] self.head = nn.Sequential(*ly) self.branch1 = DC_branchl(2, base_ch, num_module) self.branch2 = DC_branchl(3, base_ch, num_module) ly = [] ly += [ nn.Conv2d(base_ch*2, base_ch, kernel_size=1) ] ly += [ nn.ReLU(inplace=True) ] ly += [ nn.Conv2d(base_ch, base_ch//2, kernel_size=1) ] ly += [ nn.ReLU(inplace=True) ] ly += [ nn.Conv2d(base_ch//2, base_ch//2, kernel_size=1) ] ly += [ nn.ReLU(inplace=True) ] ly += [ nn.Conv2d(base_ch//2, out_ch, kernel_size=1) ] self.tail = nn.Sequential(*ly) def forward(self, x): x = self.head(x) br1 = self.branch1(x) br2 = self.branch2(x) x = torch.cat([br1, br2], dim=1) return self.tail(x) def _initialize_weights(self): # Liyong version for m in self.modules(): if isinstance(m, nn.Conv2d): # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, (2 / (9.0 * 64)) ** 0.5) class DC_branchl(nn.Module): def __init__(self, stride, in_ch, num_module): super().__init__() ly = [] ly += [ CentralMaskedConv2d(in_ch, in_ch, kernel_size=2*stride-1, stride=1, padding=stride-1) ] ly += [ nn.ReLU(inplace=True) ] ly += [ nn.Conv2d(in_ch, in_ch, kernel_size=1) ] ly += [ nn.ReLU(inplace=True) ] ly += [ nn.Conv2d(in_ch, in_ch, kernel_size=1) ] ly += [ nn.ReLU(inplace=True) ] ly += [ DCl(stride, in_ch) for _ in range(num_module) ] ly += [ nn.Conv2d(in_ch, in_ch, kernel_size=1) ] ly += [ nn.ReLU(inplace=True) ] self.body = nn.Sequential(*ly) def forward(self, x): return self.body(x) class DCl(nn.Module): def __init__(self, stride, in_ch): super().__init__() ly = [] ly += [ nn.Conv2d(in_ch, in_ch, kernel_size=3, stride=1, padding=stride, dilation=stride) ] ly += [ nn.ReLU(inplace=True) ] ly += [ nn.Conv2d(in_ch, in_ch, kernel_size=1) ] self.body = nn.Sequential(*ly) def forward(self, x): return x + self.body(x) class CentralMaskedConv2d(nn.Conv2d): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.register_buffer('mask', self.weight.data.clone()) _, _, kH, kW = self.weight.size() self.mask.fill_(1) self.mask[:, :, kH//2, kH//2] = 0 def forward(self, x): self.weight.data *= self.mask return super().forward(x)
3,510
30.630631
104
py
AP-BSN
AP-BSN-master/src/model/APBSN.py
import torch import torch.nn as nn import torch.nn.functional as F from ..util.util import pixel_shuffle_down_sampling, pixel_shuffle_up_sampling from . import regist_model from .DBSNl import DBSNl @regist_model class APBSN(nn.Module): ''' Asymmetric PD Blind-Spot Network (AP-BSN) ''' def __init__(self, pd_a=5, pd_b=2, pd_pad=2, R3=True, R3_T=8, R3_p=0.16, bsn='DBSNl', in_ch=3, bsn_base_ch=128, bsn_num_module=9): ''' Args: pd_a : 'PD stride factor' during training pd_b : 'PD stride factor' during inference pd_pad : pad size between sub-images by PD process R3 : flag of 'Random Replacing Refinement' R3_T : number of masks for R3 R3_p : probability of R3 bsn : blind-spot network type in_ch : number of input image channel bsn_base_ch : number of bsn base channel bsn_num_module : number of module ''' super().__init__() # network hyper-parameters self.pd_a = pd_a self.pd_b = pd_b self.pd_pad = pd_pad self.R3 = R3 self.R3_T = R3_T self.R3_p = R3_p # define network if bsn == 'DBSNl': self.bsn = DBSNl(in_ch, in_ch, bsn_base_ch, bsn_num_module) else: raise NotImplementedError('bsn %s is not implemented'%bsn) def forward(self, img, pd=None): ''' Foward function includes sequence of PD, BSN and inverse PD processes. Note that denoise() function is used during inference time (for differenct pd factor and R3). ''' # default pd factor is training factor (a) if pd is None: pd = self.pd_a # do PD if pd > 1: pd_img = pixel_shuffle_down_sampling(img, f=pd, pad=self.pd_pad) else: p = self.pd_pad pd_img = F.pad(img, (p,p,p,p)) # forward blind-spot network pd_img_denoised = self.bsn(pd_img) # do inverse PD if pd > 1: img_pd_bsn = pixel_shuffle_up_sampling(pd_img_denoised, f=pd, pad=self.pd_pad) else: p = self.pd_pad img_pd_bsn = pd_img_denoised[:,:,p:-p,p:-p] return img_pd_bsn def denoise(self, x): ''' Denoising process for inference. ''' b,c,h,w = x.shape # pad images for PD process if h % self.pd_b != 0: x = F.pad(x, (0, 0, 0, self.pd_b - h%self.pd_b), mode='constant', value=0) if w % self.pd_b != 0: x = F.pad(x, (0, self.pd_b - w%self.pd_b, 0, 0), mode='constant', value=0) # forward PD-BSN process with inference pd factor img_pd_bsn = self.forward(img=x, pd=self.pd_b) # Random Replacing Refinement if not self.R3: ''' Directly return the result (w/o R3) ''' return img_pd_bsn[:,:,:h,:w] else: denoised = torch.empty(*(x.shape), self.R3_T, device=x.device) for t in range(self.R3_T): indice = torch.rand_like(x) mask = indice < self.R3_p tmp_input = torch.clone(img_pd_bsn).detach() tmp_input[mask] = x[mask] p = self.pd_pad tmp_input = F.pad(tmp_input, (p,p,p,p), mode='reflect') if self.pd_pad == 0: denoised[..., t] = self.bsn(tmp_input) else: denoised[..., t] = self.bsn(tmp_input)[:,:,p:-p,p:-p] return torch.mean(denoised, dim=-1) ''' elif self.R3 == 'PD-refinement': s = 2 denoised = torch.empty(*(x.shape), s**2, device=x.device) for i in range(s): for j in range(s): tmp_input = torch.clone(x_mean).detach() tmp_input[:,:,i::s,j::s] = x[:,:,i::s,j::s] p = self.pd_pad tmp_input = F.pad(tmp_input, (p,p,p,p), mode='reflect') if self.pd_pad == 0: denoised[..., i*s+j] = self.bsn(tmp_input) else: denoised[..., i*s+j] = self.bsn(tmp_input)[:,:,p:-p,p:-p] return_denoised = torch.mean(denoised, dim=-1) else: raise RuntimeError('post-processing type not supported') '''
4,559
34.625
101
py
CoTr
CoTr-main/nnUNet/nnunet/training/model_restore.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import nnunet import torch from batchgenerators.utilities.file_and_folder_operations import * import importlib import pkgutil from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer def recursive_find_python_class(folder, trainer_name, current_module): tr = None for importer, modname, ispkg in pkgutil.iter_modules(folder): # print(modname, ispkg) if not ispkg: m = importlib.import_module(current_module + "." + modname) if hasattr(m, trainer_name): tr = getattr(m, trainer_name) break if tr is None: for importer, modname, ispkg in pkgutil.iter_modules(folder): if ispkg: next_current_module = current_module + "." + modname tr = recursive_find_python_class([join(folder[0], modname)], trainer_name, current_module=next_current_module) if tr is not None: break return tr def restore_model(pkl_file, checkpoint=None, train=False, fp16=None): """ This is a utility function to load any nnUNet trainer from a pkl. It will recursively search nnunet.trainig.network_training for the file that contains the trainer and instantiate it with the arguments saved in the pkl file. If checkpoint is specified, it will furthermore load the checkpoint file in train/test mode (as specified by train). The pkl file required here is the one that will be saved automatically when calling nnUNetTrainer.save_checkpoint. :param pkl_file: :param checkpoint: :param train: :param fp16: if None then we take no action. If True/False we overwrite what the model has in its init :return: """ info = load_pickle(pkl_file) init = info['init'] name = info['name'] search_in = join(nnunet.__path__[0], "training", "network_training") tr = recursive_find_python_class([search_in], name, current_module="nnunet.training.network_training") if tr is None: """ Fabian only. This will trigger searching for trainer classes in other repositories as well """ try: import meddec search_in = join(meddec.__path__[0], "model_training") tr = recursive_find_python_class([search_in], name, current_module="meddec.model_training") except ImportError: pass if tr is None: raise RuntimeError("Could not find the model trainer specified in checkpoint in nnunet.trainig.network_training. If it " "is not located there, please move it or change the code of restore_model. Your model " "trainer can be located in any directory within nnunet.trainig.network_training (search is recursive)." "\nDebug info: \ncheckpoint file: %s\nName of trainer: %s " % (checkpoint, name)) assert issubclass(tr, nnUNetTrainer), "The network trainer was found but is not a subclass of nnUNetTrainer. " \ "Please make it so!" # this is now deprecated """if len(init) == 7: print("warning: this model seems to have been saved with a previous version of nnUNet. Attempting to load it " "anyways. Expect the unexpected.") print("manually editing init args...") init = [init[i] for i in range(len(init)) if i != 2]""" # ToDo Fabian make saves use kwargs, please... trainer = tr(*init) # We can hack fp16 overwriting into the trainer without changing the init arguments because nothing happens with # fp16 in the init, it just saves it to a member variable if fp16 is not None: trainer.fp16 = fp16 trainer.process_plans(info['plans']) if checkpoint is not None: trainer.load_checkpoint(checkpoint, train) return trainer def load_best_model_for_inference(folder): checkpoint = join(folder, "model_best.model") pkl_file = checkpoint + ".pkl" return restore_model(pkl_file, checkpoint, False) def load_model_and_checkpoint_files(folder, folds=None, mixed_precision=None, checkpoint_name="model_best"): """ used for if you need to ensemble the five models of a cross-validation. This will restore the model from the checkpoint in fold 0, load all parameters of the five folds in ram and return both. This will allow for fast switching between parameters (as opposed to loading them form disk each time). This is best used for inference and test prediction :param folder: :param folds: :param mixed_precision: if None then we take no action. If True/False we overwrite what the model has in its init :return: """ if isinstance(folds, str): folds = [join(folder, "all")] assert isdir(folds[0]), "no output folder for fold %s found" % folds elif isinstance(folds, (list, tuple)): if len(folds) == 1 and folds[0] == "all": folds = [join(folder, "all")] else: folds = [join(folder, "fold_%d" % i) for i in folds] assert all([isdir(i) for i in folds]), "list of folds specified but not all output folders are present" elif isinstance(folds, int): folds = [join(folder, "fold_%d" % folds)] assert all([isdir(i) for i in folds]), "output folder missing for fold %d" % folds elif folds is None: print("folds is None so we will automatically look for output folders (not using \'all\'!)") folds = subfolders(folder, prefix="fold") print("found the following folds: ", folds) else: raise ValueError("Unknown value for folds. Type: %s. Expected: list of int, int, str or None", str(type(folds))) trainer = restore_model(join(folds[0], "%s.model.pkl" % checkpoint_name), fp16=mixed_precision) trainer.output_folder = folder trainer.output_folder_base = folder trainer.update_fold(0) trainer.initialize(False) all_best_model_files = [join(i, "%s.model" % checkpoint_name) for i in folds] print("using the following model files: ", all_best_model_files) all_params = [torch.load(i, map_location=torch.device('cpu')) for i in all_best_model_files] return trainer, all_params if __name__ == "__main__": pkl = "/home/fabian/PhD/results/nnUNetV2/nnUNetV2_3D_fullres/Task004_Hippocampus/fold0/model_best.model.pkl" checkpoint = pkl[:-4] train = False trainer = restore_model(pkl, checkpoint, train)
7,125
44.679487
149
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNetTrainerV2_DDP.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil from collections import OrderedDict from multiprocessing import Pool from time import sleep from typing import Tuple import numpy as np import torch import torch.distributed as dist from torch.cuda.amp import autocast from torch.nn.parallel import DistributedDataParallel as DDP from batchgenerators.utilities.file_and_folder_operations import maybe_mkdir_p, join, subfiles, isfile, load_pickle, \ save_json from nnunet.configuration import default_num_threads from nnunet.evaluation.evaluator import aggregate_scores from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.postprocessing.connected_components import determine_postprocessing from nnunet.training.data_augmentation.default_data_augmentation import get_moreDA_augmentation from nnunet.training.dataloading.dataset_loading import unpack_dataset from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss from nnunet.training.loss_functions.dice_loss import get_tp_fp_fn_tn from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.distributed import awesome_allgather_function from nnunet.utilities.nd_softmax import softmax_helper from nnunet.utilities.tensor_utilities import sum_tensor from nnunet.utilities.to_torch import to_cuda, maybe_to_torch from torch import nn, distributed from torch.nn.utils import clip_grad_norm_ from torch.optim.lr_scheduler import _LRScheduler class nnUNetTrainerV2_DDP(nnUNetTrainerV2): def __init__(self, plans_file, fold, local_rank, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, distribute_batch_size=False, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.init_args = ( plans_file, fold, local_rank, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, distribute_batch_size, fp16) self.distribute_batch_size = distribute_batch_size np.random.seed(local_rank) torch.manual_seed(local_rank) if torch.cuda.is_available(): torch.cuda.manual_seed_all(local_rank) self.local_rank = local_rank if torch.cuda.is_available(): torch.cuda.set_device(local_rank) dist.init_process_group(backend='nccl', init_method='env://') self.loss = None self.ce_loss = RobustCrossEntropyLoss() self.global_batch_size = None # we need to know this to properly steer oversample def set_batch_size_and_oversample(self): batch_sizes = [] oversample_percents = [] world_size = dist.get_world_size() my_rank = dist.get_rank() if self.distribute_batch_size: self.global_batch_size = self.batch_size else: self.global_batch_size = self.batch_size * world_size batch_size_per_GPU = np.ceil(self.batch_size / world_size).astype(int) for rank in range(world_size): if self.distribute_batch_size: if (rank + 1) * batch_size_per_GPU > self.batch_size: batch_size = batch_size_per_GPU - ((rank + 1) * batch_size_per_GPU - self.batch_size) else: batch_size = batch_size_per_GPU else: batch_size = self.batch_size batch_sizes.append(batch_size) sample_id_low = 0 if len(batch_sizes) == 0 else np.sum(batch_sizes[:-1]) sample_id_high = np.sum(batch_sizes) if sample_id_high / self.global_batch_size < (1 - self.oversample_foreground_percent): oversample_percents.append(0.0) elif sample_id_low / self.global_batch_size > (1 - self.oversample_foreground_percent): oversample_percents.append(1.0) else: percent_covered_by_this_rank = sample_id_high / self.global_batch_size - sample_id_low / self.global_batch_size oversample_percent_here = 1 - (((1 - self.oversample_foreground_percent) - sample_id_low / self.global_batch_size) / percent_covered_by_this_rank) oversample_percents.append(oversample_percent_here) print("worker", my_rank, "oversample", oversample_percents[my_rank]) print("worker", my_rank, "batch_size", batch_sizes[my_rank]) self.batch_size = batch_sizes[my_rank] self.oversample_foreground_percent = oversample_percents[my_rank] def save_checkpoint(self, fname, save_optimizer=True): if self.local_rank == 0: super().save_checkpoint(fname, save_optimizer) def plot_progress(self): if self.local_rank == 0: super().plot_progress() def print_to_log_file(self, *args, also_print_to_console=True): if self.local_rank == 0: super().print_to_log_file(*args, also_print_to_console=also_print_to_console) def process_plans(self, plans): super().process_plans(plans) self.set_batch_size_and_oversample() def initialize(self, training=True, force_load_plans=False): """ :param training: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: if self.local_rank == 0: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") distributed.barrier() else: print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") # setting weights for deep supervision losses net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights seeds_train = np.random.random_integers(0, 99999, self.data_aug_params.get('num_threads')) seeds_val = np.random.random_integers(0, 99999, max(self.data_aug_params.get('num_threads') // 2, 1)) print("seeds train", seeds_train) print("seeds_val", seeds_val) self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, seeds_train=seeds_train, seeds_val=seeds_val, pin_memory=self.pin_memory) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() self.network = DDP(self.network, device_ids=[self.local_rank]) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): data_dict = next(data_generator) data = data_dict['data'] target = data_dict['target'] data = maybe_to_torch(data) target = maybe_to_torch(target) if torch.cuda.is_available(): data = to_cuda(data, gpu_id=None) target = to_cuda(target, gpu_id=None) self.optimizer.zero_grad() if self.fp16: with autocast(): output = self.network(data) del data l = self.compute_loss(output, target) if do_backprop: self.amp_grad_scaler.scale(l).backward() self.amp_grad_scaler.unscale_(self.optimizer) torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.amp_grad_scaler.step(self.optimizer) self.amp_grad_scaler.update() else: output = self.network(data) del data l = self.compute_loss(output, target) if do_backprop: l.backward() torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.optimizer.step() if run_online_evaluation: self.run_online_evaluation(output, target) del target return l.detach().cpu().numpy() def compute_loss(self, output, target): total_loss = None for i in range(len(output)): # Starting here it gets spicy! axes = tuple(range(2, len(output[i].size()))) # network does not do softmax. We need to do softmax for dice output_softmax = softmax_helper(output[i]) # get the tp, fp and fn terms we need tp, fp, fn, _ = get_tp_fp_fn_tn(output_softmax, target[i], axes, mask=None) # for dice, compute nominator and denominator so that we have to accumulate only 2 instead of 3 variables # do_bg=False in nnUNetTrainer -> [:, 1:] nominator = 2 * tp[:, 1:] denominator = 2 * tp[:, 1:] + fp[:, 1:] + fn[:, 1:] if self.batch_dice: # for DDP we need to gather all nominator and denominator terms from all GPUS to do proper batch dice nominator = awesome_allgather_function.apply(nominator) denominator = awesome_allgather_function.apply(denominator) nominator = nominator.sum(0) denominator = denominator.sum(0) else: pass ce_loss = self.ce_loss(output[i], target[i][:, 0].long()) # we smooth by 1e-5 to penalize false positives if tp is 0 dice_loss = (- (nominator + 1e-5) / (denominator + 1e-5)).mean() if total_loss is None: total_loss = self.ds_loss_weights[i] * (ce_loss + dice_loss) else: total_loss += self.ds_loss_weights[i] * (ce_loss + dice_loss) return total_loss def run_online_evaluation(self, output, target): with torch.no_grad(): num_classes = output[0].shape[1] output_seg = output[0].argmax(1) target = target[0][:, 0] axes = tuple(range(1, len(target.shape))) tp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index) fp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index) fn_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index) for c in range(1, num_classes): tp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target == c).float(), axes=axes) fp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target != c).float(), axes=axes) fn_hard[:, c - 1] = sum_tensor((output_seg != c).float() * (target == c).float(), axes=axes) # tp_hard, fp_hard, fn_hard = get_tp_fp_fn((output_softmax > (1 / num_classes)).float(), target, # axes, None) # print_if_rank0("before allgather", tp_hard.shape) tp_hard = tp_hard.sum(0, keepdim=False)[None] fp_hard = fp_hard.sum(0, keepdim=False)[None] fn_hard = fn_hard.sum(0, keepdim=False)[None] tp_hard = awesome_allgather_function.apply(tp_hard) fp_hard = awesome_allgather_function.apply(fp_hard) fn_hard = awesome_allgather_function.apply(fn_hard) tp_hard = tp_hard.detach().cpu().numpy().sum(0) fp_hard = fp_hard.detach().cpu().numpy().sum(0) fn_hard = fn_hard.detach().cpu().numpy().sum(0) self.online_eval_foreground_dc.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8))) self.online_eval_tp.append(list(tp_hard)) self.online_eval_fp.append(list(fp_hard)) self.online_eval_fn.append(list(fn_hard)) def run_training(self): """ if we run with -c then we need to set the correct lr for the first epoch, otherwise it will run the first continued epoch with self.initial_lr we also need to make sure deep supervision in the network is enabled for training, thus the wrapper :return: """ self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we # want at the start of the training if isinstance(self.network, DDP): net = self.network.module else: net = self.network ds = net.do_ds net.do_ds = True ret = nnUNetTrainer.run_training(self) net.do_ds = ds return ret def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs: dict = None): if isinstance(self.network, DDP): net = self.network.module else: net = self.network ds = net.do_ds net.do_ds = False current_mode = self.network.training self.network.eval() assert self.was_initialized, "must initialize, ideally with checkpoint (or train first)" if self.dataset_val is None: self.load_dataset() self.do_split() if segmentation_export_kwargs is None: if 'segmentation_export_params' in self.plans.keys(): force_separate_z = self.plans['segmentation_export_params']['force_separate_z'] interpolation_order = self.plans['segmentation_export_params']['interpolation_order'] interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z'] else: force_separate_z = None interpolation_order = 1 interpolation_order_z = 0 else: force_separate_z = segmentation_export_kwargs['force_separate_z'] interpolation_order = segmentation_export_kwargs['interpolation_order'] interpolation_order_z = segmentation_export_kwargs['interpolation_order_z'] # predictions as they come from the network go here output_folder = join(self.output_folder, validation_folder_name) maybe_mkdir_p(output_folder) # this is for debug purposes my_input_args = {'do_mirroring': do_mirroring, 'use_sliding_window': use_sliding_window, 'step_size': step_size, 'save_softmax': save_softmax, 'use_gaussian': use_gaussian, 'overwrite': overwrite, 'validation_folder_name': validation_folder_name, 'debug': debug, 'all_in_gpu': all_in_gpu, 'segmentation_export_kwargs': segmentation_export_kwargs, } save_json(my_input_args, join(output_folder, "validation_args.json")) if do_mirroring: if not self.data_aug_params['do_mirror']: raise RuntimeError( "We did not train with mirroring so you cannot do inference with mirroring enabled") mirror_axes = self.data_aug_params['mirror_axes'] else: mirror_axes = () pred_gt_tuples = [] export_pool = Pool(default_num_threads) results = [] all_keys = list(self.dataset_val.keys()) my_keys = all_keys[self.local_rank::dist.get_world_size()] # we cannot simply iterate over all_keys because we need to know pred_gt_tuples and valid_labels of all cases # for evaluation (which is done by local rank 0) for k in my_keys: properties = load_pickle(self.dataset[k]['properties_file']) fname = properties['list_of_data_files'][0].split("/")[-1][:-12] pred_gt_tuples.append([join(output_folder, fname + ".nii.gz"), join(self.gt_niftis_folder, fname + ".nii.gz")]) if k in my_keys: if overwrite or (not isfile(join(output_folder, fname + ".nii.gz"))) or \ (save_softmax and not isfile(join(output_folder, fname + ".npz"))): data = np.load(self.dataset[k]['data_file'])['data'] print(k, data.shape) data[-1][data[-1] == -1] = 0 softmax_pred = self.predict_preprocessed_data_return_seg_and_softmax(data[:-1], do_mirroring=do_mirroring, mirror_axes=mirror_axes, use_sliding_window=use_sliding_window, step_size=step_size, use_gaussian=use_gaussian, all_in_gpu=all_in_gpu, mixed_precision=self.fp16)[1] softmax_pred = softmax_pred.transpose([0] + [i + 1 for i in self.transpose_backward]) if save_softmax: softmax_fname = join(output_folder, fname + ".npz") else: softmax_fname = None """There is a problem with python process communication that prevents us from communicating obejcts larger than 2 GB between processes (basically when the length of the pickle string that will be sent is communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either filename or np.ndarray and will handle this automatically""" if np.prod(softmax_pred.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save np.save(join(output_folder, fname + ".npy"), softmax_pred) softmax_pred = join(output_folder, fname + ".npy") results.append(export_pool.starmap_async(save_segmentation_nifti_from_softmax, ((softmax_pred, join(output_folder, fname + ".nii.gz"), properties, interpolation_order, self.regions_class_order, None, None, softmax_fname, None, force_separate_z, interpolation_order_z), ) ) ) _ = [i.get() for i in results] self.print_to_log_file("finished prediction") distributed.barrier() if self.local_rank == 0: # evaluate raw predictions self.print_to_log_file("evaluation of raw predictions") task = self.dataset_directory.split("/")[-1] job_name = self.experiment_name _ = aggregate_scores(pred_gt_tuples, labels=list(range(self.num_classes)), json_output_file=join(output_folder, "summary.json"), json_name=job_name + " val tiled %s" % (str(use_sliding_window)), json_author="Fabian", json_task=task, num_threads=default_num_threads) # in the old nnunet we would stop here. Now we add a postprocessing. This postprocessing can remove everything # except the largest connected component for each class. To see if this improves results, we do this for all # classes and then rerun the evaluation. Those classes for which this resulted in an improved dice score will # have this applied during inference as well self.print_to_log_file("determining postprocessing") determine_postprocessing(self.output_folder, self.gt_niftis_folder, validation_folder_name, final_subf_name=validation_folder_name + "_postprocessed", debug=debug) # after this the final predictions for the vlaidation set can be found in validation_folder_name_base + "_postprocessed" # They are always in that folder, even if no postprocessing as applied! # detemining postprocesing on a per-fold basis may be OK for this fold but what if another fold finds another # postprocesing to be better? In this case we need to consolidate. At the time the consolidation is going to be # done we won't know what self.gt_niftis_folder was, so now we copy all the niftis into a separate folder to # be used later gt_nifti_folder = join(self.output_folder_base, "gt_niftis") maybe_mkdir_p(gt_nifti_folder) for f in subfiles(self.gt_niftis_folder, suffix=".nii.gz"): success = False attempts = 0 e = None while not success and attempts < 10: try: shutil.copy(f, gt_nifti_folder) success = True except OSError as e: attempts += 1 sleep(1) if not success: print("Could not copy gt nifti file %s into folder %s" % (f, gt_nifti_folder)) if e is not None: raise e self.network.train(current_mode) net.do_ds = ds def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True, mirror_axes: Tuple[int] = None, use_sliding_window: bool = True, step_size: float = 0.5, use_gaussian: bool = True, pad_border_mode: str = 'constant', pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision=True) -> Tuple[ np.ndarray, np.ndarray]: if pad_border_mode == 'constant' and pad_kwargs is None: pad_kwargs = {'constant_values': 0} if do_mirroring and mirror_axes is None: mirror_axes = self.data_aug_params['mirror_axes'] if do_mirroring: assert self.data_aug_params["do_mirror"], "Cannot do mirroring as test time augmentation when training " \ "was done without mirroring" valid = list((SegmentationNetwork, nn.DataParallel, DDP)) assert isinstance(self.network, tuple(valid)) if isinstance(self.network, DDP): net = self.network.module else: net = self.network ds = net.do_ds net.do_ds = False ret = net.predict_3D(data, do_mirroring=do_mirroring, mirror_axes=mirror_axes, use_sliding_window=use_sliding_window, step_size=step_size, patch_size=self.patch_size, regions_class_order=self.regions_class_order, use_gaussian=use_gaussian, pad_border_mode=pad_border_mode, pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose, mixed_precision=mixed_precision) net.do_ds = ds return ret def load_checkpoint_ram(self, checkpoint, train=True): """ used for if the checkpoint is already in ram :param checkpoint: :param train: :return: """ if not self.was_initialized: self.initialize(train) new_state_dict = OrderedDict() curr_state_dict_keys = list(self.network.state_dict().keys()) # if state dict comes form nn.DataParallel but we use non-parallel model here then the state dict keys do not # match. Use heuristic to make it match for k, value in checkpoint['state_dict'].items(): key = k if key not in curr_state_dict_keys: print("duh") key = key[7:] new_state_dict[key] = value if self.fp16: self._maybe_init_amp() if 'amp_grad_scaler' in checkpoint.keys(): self.amp_grad_scaler.load_state_dict(checkpoint['amp_grad_scaler']) self.network.load_state_dict(new_state_dict) self.epoch = checkpoint['epoch'] if train: optimizer_state_dict = checkpoint['optimizer_state_dict'] if optimizer_state_dict is not None: self.optimizer.load_state_dict(optimizer_state_dict) if self.lr_scheduler is not None and hasattr(self.lr_scheduler, 'load_state_dict') and checkpoint[ 'lr_scheduler_state_dict'] is not None: self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict']) if issubclass(self.lr_scheduler.__class__, _LRScheduler): self.lr_scheduler.step(self.epoch) self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode, self.all_val_eval_metrics = checkpoint[ 'plot_stuff'] # after the training is done, the epoch is incremented one more time in my old code. This results in # self.epoch = 1001 for old trained models when the epoch is actually 1000. This causes issues because # len(self.all_tr_losses) = 1000 and the plot function will fail. We can easily detect and correct that here if self.epoch != len(self.all_tr_losses): self.print_to_log_file("WARNING in loading checkpoint: self.epoch != len(self.all_tr_losses). This is " "due to an old bug and should only appear when you are loading old models. New " "models should have this fixed! self.epoch is now set to len(self.all_tr_losses)") self.epoch = len(self.all_tr_losses) self.all_tr_losses = self.all_tr_losses[:self.epoch] self.all_val_losses = self.all_val_losses[:self.epoch] self.all_val_losses_tr_mode = self.all_val_losses_tr_mode[:self.epoch] self.all_val_eval_metrics = self.all_val_eval_metrics[:self.epoch]
30,456
50.447635
132
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNetTrainerV2_CascadeFullRes.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from multiprocessing.pool import Pool from time import sleep import matplotlib from nnunet.configuration import default_num_threads from nnunet.postprocessing.connected_components import determine_postprocessing from nnunet.training.data_augmentation.default_data_augmentation import get_default_augmentation, \ get_moreDA_augmentation from nnunet.training.dataloading.dataset_loading import DataLoader3D, unpack_dataset from nnunet.evaluation.evaluator import aggregate_scores from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.paths import network_training_output_dir from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax from batchgenerators.utilities.file_and_folder_operations import * import numpy as np from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2 from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.one_hot_encoding import to_one_hot import shutil from torch import nn matplotlib.use("agg") class nnUNetTrainerV2CascadeFullRes(nnUNetTrainerV2): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, previous_trainer="nnUNetTrainerV2", fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, previous_trainer, fp16) if self.output_folder is not None: task = self.output_folder.split("/")[-3] plans_identifier = self.output_folder.split("/")[-2].split("__")[-1] folder_with_segs_prev_stage = join(network_training_output_dir, "3d_lowres", task, previous_trainer + "__" + plans_identifier, "pred_next_stage") self.folder_with_segs_from_prev_stage = folder_with_segs_prev_stage # Do not put segs_prev_stage into self.output_folder as we need to unpack them for performance and we # don't want to do that in self.output_folder because that one is located on some network drive. else: self.folder_with_segs_from_prev_stage = None def do_split(self): super().do_split() for k in self.dataset: self.dataset[k]['seg_from_prev_stage_file'] = join(self.folder_with_segs_from_prev_stage, k + "_segFromPrevStage.npz") assert isfile(self.dataset[k]['seg_from_prev_stage_file']), \ "seg from prev stage missing: %s" % (self.dataset[k]['seg_from_prev_stage_file']) for k in self.dataset_val: self.dataset_val[k]['seg_from_prev_stage_file'] = join(self.folder_with_segs_from_prev_stage, k + "_segFromPrevStage.npz") for k in self.dataset_tr: self.dataset_tr[k]['seg_from_prev_stage_file'] = join(self.folder_with_segs_from_prev_stage, k + "_segFromPrevStage.npz") def get_basic_generators(self): self.load_dataset() self.do_split() if self.threeD: dl_tr = DataLoader3D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size, True, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides) dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, True, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides) else: raise NotImplementedError("2D has no cascade") return dl_tr, dl_val def process_plans(self, plans): super().process_plans(plans) self.num_input_channels += (self.num_classes - 1) # for seg from prev stage def setup_DA_params(self): super().setup_DA_params() self.data_aug_params["num_cached_per_thread"] = 2 self.data_aug_params['move_last_seg_chanel_to_data'] = True self.data_aug_params['cascade_do_cascade_augmentations'] = True self.data_aug_params['cascade_random_binary_transform_p'] = 0.4 self.data_aug_params['cascade_random_binary_transform_p_per_label'] = 1 self.data_aug_params['cascade_random_binary_transform_size'] = (1, 8) self.data_aug_params['cascade_remove_conn_comp_p'] = 0.2 self.data_aug_params['cascade_remove_conn_comp_max_size_percent_threshold'] = 0.15 self.data_aug_params['cascade_remove_conn_comp_fill_with_other_class_p'] = 0.0 # we have 2 channels now because the segmentation from the previous stage is stored in 'seg' as well until it # is moved to 'data' at the end self.data_aug_params['selected_seg_channels'] = [0, 1] # needed for converting the segmentation from the previous stage to one hot self.data_aug_params['all_segmentation_labels'] = list(range(1, self.num_classes)) def initialize(self, training=True, force_load_plans=False): """ For prediction of test cases just set training=False, this will prevent loading of training data and training batchgenerator initialization :param training: :return: """ if not self.was_initialized: if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights # now wrap the loss self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) ################# END ################### self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: if not isdir(self.folder_with_segs_from_prev_stage): raise RuntimeError( "Cannot run final stage of cascade. Run corresponding 3d_lowres first and predict the " "segmentations for the next stage") self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") else: print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, pin_memory=self.pin_memory) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs: dict = None): assert self.was_initialized, "must initialize, ideally with checkpoint (or train first)" current_mode = self.network.training self.network.eval() # save whether network is in deep supervision mode or not ds = self.network.do_ds # disable deep supervision self.network.do_ds = False if segmentation_export_kwargs is None: if 'segmentation_export_params' in self.plans.keys(): force_separate_z = self.plans['segmentation_export_params']['force_separate_z'] interpolation_order = self.plans['segmentation_export_params']['interpolation_order'] interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z'] else: force_separate_z = None interpolation_order = 1 interpolation_order_z = 0 else: force_separate_z = segmentation_export_kwargs['force_separate_z'] interpolation_order = segmentation_export_kwargs['interpolation_order'] interpolation_order_z = segmentation_export_kwargs['interpolation_order_z'] if self.dataset_val is None: self.load_dataset() self.do_split() output_folder = join(self.output_folder, validation_folder_name) maybe_mkdir_p(output_folder) # this is for debug purposes my_input_args = {'do_mirroring': do_mirroring, 'use_sliding_window': use_sliding_window, 'step': step_size, 'save_softmax': save_softmax, 'use_gaussian': use_gaussian, 'overwrite': overwrite, 'validation_folder_name': validation_folder_name, 'debug': debug, 'all_in_gpu': all_in_gpu, 'segmentation_export_kwargs': segmentation_export_kwargs, } save_json(my_input_args, join(output_folder, "validation_args.json")) if do_mirroring: if not self.data_aug_params['do_mirror']: raise RuntimeError("We did not train with mirroring so you cannot do inference with mirroring enabled") mirror_axes = self.data_aug_params['mirror_axes'] else: mirror_axes = () pred_gt_tuples = [] export_pool = Pool(default_num_threads) results = [] for k in self.dataset_val.keys(): properties = load_pickle(self.dataset[k]['properties_file']) fname = properties['list_of_data_files'][0].split("/")[-1][:-12] if overwrite or (not isfile(join(output_folder, fname + ".nii.gz"))) or \ (save_softmax and not isfile(join(output_folder, fname + ".npz"))): data = np.load(self.dataset[k]['data_file'])['data'] # concat segmentation of previous step seg_from_prev_stage = np.load(join(self.folder_with_segs_from_prev_stage, k + "_segFromPrevStage.npz"))['data'][None] print(k, data.shape) data[-1][data[-1] == -1] = 0 data_for_net = np.concatenate((data[:-1], to_one_hot(seg_from_prev_stage[0], range(1, self.num_classes)))) softmax_pred = self.predict_preprocessed_data_return_seg_and_softmax(data_for_net, do_mirroring=do_mirroring, mirror_axes=mirror_axes, use_sliding_window=use_sliding_window, step_size=step_size, use_gaussian=use_gaussian, all_in_gpu=all_in_gpu, mixed_precision=self.fp16)[1] softmax_pred = softmax_pred.transpose([0] + [i + 1 for i in self.transpose_backward]) if save_softmax: softmax_fname = join(output_folder, fname + ".npz") else: softmax_fname = None """There is a problem with python process communication that prevents us from communicating obejcts larger than 2 GB between processes (basically when the length of the pickle string that will be sent is communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either filename or np.ndarray and will handle this automatically""" if np.prod(softmax_pred.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save np.save(join(output_folder, fname + ".npy"), softmax_pred) softmax_pred = join(output_folder, fname + ".npy") results.append(export_pool.starmap_async(save_segmentation_nifti_from_softmax, ((softmax_pred, join(output_folder, fname + ".nii.gz"), properties, interpolation_order, None, None, None, softmax_fname, None, force_separate_z, interpolation_order_z), ) ) ) pred_gt_tuples.append([join(output_folder, fname + ".nii.gz"), join(self.gt_niftis_folder, fname + ".nii.gz")]) _ = [i.get() for i in results] self.print_to_log_file("finished prediction") # evaluate raw predictions self.print_to_log_file("evaluation of raw predictions") task = self.dataset_directory.split("/")[-1] job_name = self.experiment_name _ = aggregate_scores(pred_gt_tuples, labels=list(range(self.num_classes)), json_output_file=join(output_folder, "summary.json"), json_name=job_name + " val tiled %s" % (str(use_sliding_window)), json_author="Fabian", json_task=task, num_threads=default_num_threads) # in the old nnunet we would stop here. Now we add a postprocessing. This postprocessing can remove everything # except the largest connected component for each class. To see if this improves results, we do this for all # classes and then rerun the evaluation. Those classes for which this resulted in an improved dice score will # have this applied during inference as well self.print_to_log_file("determining postprocessing") determine_postprocessing(self.output_folder, self.gt_niftis_folder, validation_folder_name, final_subf_name=validation_folder_name + "_postprocessed", debug=debug) # after this the final predictions for the vlaidation set can be found in validation_folder_name_base + "_postprocessed" # They are always in that folder, even if no postprocessing as applied! # detemining postprocesing on a per-fold basis may be OK for this fold but what if another fold finds another # postprocesing to be better? In this case we need to consolidate. At the time the consolidation is going to be # done we won't know what self.gt_niftis_folder was, so now we copy all the niftis into a separate folder to # be used later gt_nifti_folder = join(self.output_folder_base, "gt_niftis") maybe_mkdir_p(gt_nifti_folder) for f in subfiles(self.gt_niftis_folder, suffix=".nii.gz"): success = False attempts = 0 e = None while not success and attempts < 10: try: shutil.copy(f, gt_nifti_folder) success = True except OSError as e: attempts += 1 sleep(1) if not success: print("Could not copy gt nifti file %s into folder %s" % (f, gt_nifti_folder)) if e is not None: raise e # restore network deep supervision mode self.network.train(current_mode) self.network.do_ds = ds
19,421
54.176136
128
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNetTrainerV2.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict from typing import Tuple import numpy as np import torch from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2 from nnunet.utilities.to_torch import maybe_to_torch, to_cuda from nnunet.training.data_augmentation.default_data_augmentation import get_moreDA_augmentation from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.training.data_augmentation.default_data_augmentation import default_2D_augmentation_params, \ get_patch_size, default_3D_augmentation_params from nnunet.training.dataloading.dataset_loading import unpack_dataset from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer from nnunet.utilities.nd_softmax import softmax_helper from sklearn.model_selection import KFold from torch import nn from torch.cuda.amp import autocast from nnunet.training.learning_rate.poly_lr import poly_lr from batchgenerators.utilities.file_and_folder_operations import * class nnUNetTrainerV2(nnUNetTrainer): """ Info for Fabian: same as internal nnUNetTrainerV2_2 """ def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.max_num_epochs = 1000 self.initial_lr = 1e-2 self.deep_supervision_scales = None self.ds_loss_weights = None self.pin_memory = True def initialize(self, training=True, force_load_plans=False): """ - replaced get_default_augmentation with get_moreDA_augmentation - enforce to only run this code once - loss function wrapper for deep supervision :param training: :param force_load_plans: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights # now wrap the loss self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) ################# END ################### self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") else: print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") self.tr_gen, self.val_gen = get_moreDA_augmentation( self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, pin_memory=self.pin_memory ) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def initialize_network(self): """ - momentum 0.99 - SGD instead of Adam - self.lr_scheduler = None because we do poly_lr - deep supervision = True - i am sure I forgot something here Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though :return: """ if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper def initialize_optimizer_and_scheduler(self): assert self.network is not None, "self.initialize_network must be called first" self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, momentum=0.99, nesterov=True) self.lr_scheduler = None def run_online_evaluation(self, output, target): """ due to deep supervision the return value and the reference are now lists of tensors. We only need the full resolution output because this is what we are interested in in the end. The others are ignored :param output: :param target: :return: """ target = target[0] output = output[0] return super().run_online_evaluation(output, target) def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs: dict = None): """ We need to wrap this because we need to enforce self.network.do_ds = False for prediction """ ds = self.network.do_ds self.network.do_ds = False ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size, save_softmax=save_softmax, use_gaussian=use_gaussian, overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug, all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs) self.network.do_ds = ds return ret def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True, mirror_axes: Tuple[int] = None, use_sliding_window: bool = True, step_size: float = 0.5, use_gaussian: bool = True, pad_border_mode: str = 'constant', pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]: """ We need to wrap this because we need to enforce self.network.do_ds = False for prediction """ ds = self.network.do_ds self.network.do_ds = False ret = super().predict_preprocessed_data_return_seg_and_softmax(data, do_mirroring=do_mirroring, mirror_axes=mirror_axes, use_sliding_window=use_sliding_window, step_size=step_size, use_gaussian=use_gaussian, pad_border_mode=pad_border_mode, pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose, mixed_precision=mixed_precision) self.network.do_ds = ds return ret def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): """ gradient clipping improves training stability :param data_generator: :param do_backprop: :param run_online_evaluation: :return: """ data_dict = next(data_generator) data = data_dict['data'] target = data_dict['target'] data = maybe_to_torch(data) target = maybe_to_torch(target) if torch.cuda.is_available(): data = to_cuda(data) target = to_cuda(target) self.optimizer.zero_grad() if self.fp16: with autocast(): output = self.network(data) del data l = self.loss(output, target) if do_backprop: self.amp_grad_scaler.scale(l).backward() self.amp_grad_scaler.unscale_(self.optimizer) torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.amp_grad_scaler.step(self.optimizer) self.amp_grad_scaler.update() else: output = self.network(data) del data l = self.loss(output, target) if do_backprop: l.backward() torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.optimizer.step() if run_online_evaluation: self.run_online_evaluation(output, target) del target return l.detach().cpu().numpy() def do_split(self): """ The default split is a 5 fold CV on all available training cases. nnU-Net will create a split (it is seeded, so always the same) and save it as splits_final.pkl file in the preprocessed data directory. Sometimes you may want to create your own split for various reasons. For this you will need to create your own splits_final.pkl file. If this file is present, nnU-Net is going to use it and whatever splits are defined in it. You can create as many splits in this file as you want. Note that if you define only 4 splits (fold 0-3) and then set fold=4 when training (that would be the fifth split), nnU-Net will print a warning and proceed to use a random 80:20 data split. :return: """ if self.fold == "all": # if fold==all then we use all images for training and validation tr_keys = val_keys = list(self.dataset.keys()) else: splits_file = join(self.dataset_directory, "splits_final.pkl") # if the split file does not exist we need to create it if not isfile(splits_file): self.print_to_log_file("Creating new split...") splits = [] all_keys_sorted = np.sort(list(self.dataset.keys())) kfold = KFold(n_splits=5, shuffle=True, random_state=12345) for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)): train_keys = np.array(all_keys_sorted)[train_idx] test_keys = np.array(all_keys_sorted)[test_idx] splits.append(OrderedDict()) splits[-1]['train'] = train_keys splits[-1]['val'] = test_keys save_pickle(splits, splits_file) splits = load_pickle(splits_file) if self.fold < len(splits): tr_keys = splits[self.fold]['train'] val_keys = splits[self.fold]['val'] else: self.print_to_log_file("INFO: Requested fold %d but split file only has %d folds. I am now creating a " "random 80:20 split!" % (self.fold, len(splits))) # if we request a fold that is not in the split file, create a random 80:20 split rnd = np.random.RandomState(seed=12345 + self.fold) keys = np.sort(list(self.dataset.keys())) idx_tr = rnd.choice(len(keys), int(len(keys) * 0.8), replace=False) idx_val = [i for i in range(len(keys)) if i not in idx_tr] tr_keys = [keys[i] for i in idx_tr] val_keys = [keys[i] for i in idx_val] tr_keys.sort() val_keys.sort() self.dataset_tr = OrderedDict() for i in tr_keys: self.dataset_tr[i] = self.dataset[i] self.dataset_val = OrderedDict() for i in val_keys: self.dataset_val[i] = self.dataset[i] def setup_DA_params(self): """ - we increase roation angle from [-15, 15] to [-30, 30] - scale range is now (0.7, 1.4), was (0.85, 1.25) - we don't do elastic deformation anymore :return: """ self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod( np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1] if self.threeD: self.data_aug_params = default_3D_augmentation_params self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) if self.do_dummy_2D_aug: self.data_aug_params["dummy_2D"] = True self.print_to_log_file("Using dummy2d data augmentation") self.data_aug_params["elastic_deform_alpha"] = \ default_2D_augmentation_params["elastic_deform_alpha"] self.data_aug_params["elastic_deform_sigma"] = \ default_2D_augmentation_params["elastic_deform_sigma"] self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"] else: self.do_dummy_2D_aug = False if max(self.patch_size) / min(self.patch_size) > 1.5: default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi) self.data_aug_params = default_2D_augmentation_params self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size)) patch_size_for_spatialtransform = self.patch_size[1:] else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) patch_size_for_spatialtransform = self.patch_size self.data_aug_params["scale_range"] = (0.7, 1.4) self.data_aug_params["do_elastic"] = False self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform self.data_aug_params["num_cached_per_thread"] = 2 def maybe_update_lr(self, epoch=None): """ if epoch is not None we overwrite epoch. Else we use epoch = self.epoch + 1 (maybe_update_lr is called in on_epoch_end which is called before epoch is incremented. herefore we need to do +1 here) :param epoch: :return: """ if epoch is None: ep = self.epoch + 1 else: ep = epoch self.optimizer.param_groups[0]['lr'] = poly_lr(ep, self.max_num_epochs, self.initial_lr, 0.9) self.print_to_log_file("lr:", np.round(self.optimizer.param_groups[0]['lr'], decimals=6)) def on_epoch_end(self): """ overwrite patient-based early stopping. Always run to 1000 epochs :return: """ super().on_epoch_end() continue_training = self.epoch < self.max_num_epochs # it can rarely happen that the momentum of nnUNetTrainerV2 is too high for some dataset. If at epoch 100 the # estimated validation Dice is still 0 then we reduce the momentum from 0.99 to 0.95 if self.epoch == 100: if self.all_val_eval_metrics[-1] == 0: self.optimizer.param_groups[0]["momentum"] = 0.95 self.network.apply(InitWeights_He(1e-2)) self.print_to_log_file("At epoch 100, the mean foreground Dice was 0. This can be caused by a too " "high momentum. High momentum (0.99) is good for datasets where it works, but " "sometimes causes issues such as this one. Momentum has now been reduced to " "0.95 and network weights have been reinitialized") return continue_training def run_training(self): """ if we run with -c then we need to set the correct lr for the first epoch, otherwise it will run the first continued epoch with self.initial_lr we also need to make sure deep supervision in the network is enabled for training, thus the wrapper :return: """ self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we # want at the start of the training ds = self.network.do_ds self.network.do_ds = True ret = super().run_training() self.network.do_ds = ds return ret
21,273
48.018433
134
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNetTrainerV2_DP.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from batchgenerators.utilities.file_and_folder_operations import * from nnunet.network_architecture.generic_UNet_DP import Generic_UNet_DP from nnunet.training.data_augmentation.default_data_augmentation import get_moreDA_augmentation from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.to_torch import maybe_to_torch, to_cuda from nnunet.network_architecture.initialization import InitWeights_He from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.training.dataloading.dataset_loading import unpack_dataset from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer from nnunet.utilities.nd_softmax import softmax_helper from torch import nn from torch.cuda.amp import autocast from torch.nn.parallel.data_parallel import DataParallel from torch.nn.utils import clip_grad_norm_ class nnUNetTrainerV2_DP(nnUNetTrainerV2): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, num_gpus=1, distribute_batch_size=False, fp16=False): super(nnUNetTrainerV2_DP, self).__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, num_gpus, distribute_batch_size, fp16) self.num_gpus = num_gpus self.distribute_batch_size = distribute_batch_size self.dice_smooth = 1e-5 self.dice_do_BG = False self.loss = None self.loss_weights = None def setup_DA_params(self): super(nnUNetTrainerV2_DP, self).setup_DA_params() self.data_aug_params['num_threads'] = 8 * self.num_gpus def process_plans(self, plans): super(nnUNetTrainerV2_DP, self).process_plans(plans) if not self.distribute_batch_size: self.batch_size = self.num_gpus * self.plans['plans_per_stage'][self.stage]['batch_size'] else: if self.batch_size < self.num_gpus: print("WARNING: self.batch_size < self.num_gpus. Will not be able to use the GPUs well") elif self.batch_size % self.num_gpus != 0: print("WARNING: self.batch_size % self.num_gpus != 0. Will not be able to use the GPUs well") def initialize(self, training=True, force_load_plans=False): """ - replaced get_default_augmentation with get_moreDA_augmentation - only run this code once - loss function wrapper for deep supervision :param training: :param force_load_plans: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here configure the loss for deep supervision ############ net_numpool = len(self.net_num_pool_op_kernel_sizes) weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.loss_weights = weights ################# END ################### self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") else: print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, pin_memory=self.pin_memory) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() assert isinstance(self.network, (SegmentationNetwork, DataParallel)) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def initialize_network(self): """ replace genericUNet with the implementation of above for super speeds """ if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet_DP(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper def initialize_optimizer_and_scheduler(self): assert self.network is not None, "self.initialize_network must be called first" self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, momentum=0.99, nesterov=True) self.lr_scheduler = None def run_training(self): self.maybe_update_lr(self.epoch) # amp must be initialized before DP ds = self.network.do_ds self.network.do_ds = True self.network = DataParallel(self.network, tuple(range(self.num_gpus)), ) ret = nnUNetTrainer.run_training(self) self.network = self.network.module self.network.do_ds = ds return ret def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): data_dict = next(data_generator) data = data_dict['data'] target = data_dict['target'] data = maybe_to_torch(data) target = maybe_to_torch(target) if torch.cuda.is_available(): data = to_cuda(data) target = to_cuda(target) self.optimizer.zero_grad() if self.fp16: with autocast(): ret = self.network(data, target, return_hard_tp_fp_fn=run_online_evaluation) if run_online_evaluation: ces, tps, fps, fns, tp_hard, fp_hard, fn_hard = ret self.run_online_evaluation(tp_hard, fp_hard, fn_hard) else: ces, tps, fps, fns = ret del data, target l = self.compute_loss(ces, tps, fps, fns) if do_backprop: self.amp_grad_scaler.scale(l).backward() self.amp_grad_scaler.unscale_(self.optimizer) torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.amp_grad_scaler.step(self.optimizer) self.amp_grad_scaler.update() else: ret = self.network(data, target, return_hard_tp_fp_fn=run_online_evaluation) if run_online_evaluation: ces, tps, fps, fns, tp_hard, fp_hard, fn_hard = ret self.run_online_evaluation(tp_hard, fp_hard, fn_hard) else: ces, tps, fps, fns = ret del data, target l = self.compute_loss(ces, tps, fps, fns) if do_backprop: l.backward() torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.optimizer.step() return l.detach().cpu().numpy() def run_online_evaluation(self, tp_hard, fp_hard, fn_hard): tp_hard = tp_hard.detach().cpu().numpy().mean(0) fp_hard = fp_hard.detach().cpu().numpy().mean(0) fn_hard = fn_hard.detach().cpu().numpy().mean(0) self.online_eval_foreground_dc.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8))) self.online_eval_tp.append(list(tp_hard)) self.online_eval_fp.append(list(fp_hard)) self.online_eval_fn.append(list(fn_hard)) def compute_loss(self, ces, tps, fps, fns): # we now need to effectively reimplement the loss loss = None for i in range(len(ces)): if not self.dice_do_BG: tp = tps[i][:, 1:] fp = fps[i][:, 1:] fn = fns[i][:, 1:] else: tp = tps[i] fp = fps[i] fn = fns[i] if self.batch_dice: tp = tp.sum(0) fp = fp.sum(0) fn = fn.sum(0) else: pass nominator = 2 * tp + self.dice_smooth denominator = 2 * tp + fp + fn + self.dice_smooth dice_loss = (- nominator / denominator).mean() if loss is None: loss = self.loss_weights[i] * (ces[i].mean() + dice_loss) else: loss += self.loss_weights[i] * (ces[i].mean() + dice_loss) ########### return loss
11,682
44.459144
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/network_trainer.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from _warnings import warn from typing import Tuple import matplotlib from batchgenerators.utilities.file_and_folder_operations import * from nnunet.network_architecture.neural_network import SegmentationNetwork from sklearn.model_selection import KFold from torch import nn from torch.cuda.amp import GradScaler, autocast from torch.optim.lr_scheduler import _LRScheduler matplotlib.use("agg") from time import time, sleep import torch import numpy as np from torch.optim import lr_scheduler import matplotlib.pyplot as plt import sys from collections import OrderedDict import torch.backends.cudnn as cudnn from abc import abstractmethod from datetime import datetime from tqdm import trange from nnunet.utilities.to_torch import maybe_to_torch, to_cuda class NetworkTrainer(object): def __init__(self, deterministic=True, fp16=False): """ A generic class that can train almost any neural network (RNNs excluded). It provides basic functionality such as the training loop, tracking of training and validation losses (and the target metric if you implement it) Training can be terminated early if the validation loss (or the target metric if implemented) do not improve anymore. This is based on a moving average (MA) of the loss/metric instead of the raw values to get more smooth results. What you need to override: - __init__ - initialize - run_online_evaluation (optional) - finish_online_evaluation (optional) - validate - predict_test_case """ self.fp16 = fp16 self.amp_grad_scaler = None if deterministic: np.random.seed(12345) torch.manual_seed(12345) if torch.cuda.is_available(): torch.cuda.manual_seed_all(12345) cudnn.deterministic = True torch.backends.cudnn.benchmark = False else: cudnn.deterministic = False torch.backends.cudnn.benchmark = True ################# SET THESE IN self.initialize() ################################### self.network: Tuple[SegmentationNetwork, nn.DataParallel] = None self.optimizer = None self.lr_scheduler = None self.tr_gen = self.val_gen = None self.was_initialized = False ################# SET THESE IN INIT ################################################ self.output_folder = None self.fold = None self.loss = None self.dataset_directory = None ################# SET THESE IN LOAD_DATASET OR DO_SPLIT ############################ self.dataset = None # these can be None for inference mode self.dataset_tr = self.dataset_val = None # do not need to be used, they just appear if you are using the suggested load_dataset_and_do_split ################# THESE DO NOT NECESSARILY NEED TO BE MODIFIED ##################### self.patience = 50 self.val_eval_criterion_alpha = 0.9 # alpha * old + (1-alpha) * new # if this is too low then the moving average will be too noisy and the training may terminate early. If it is # too high the training will take forever self.train_loss_MA_alpha = 0.93 # alpha * old + (1-alpha) * new self.train_loss_MA_eps = 5e-4 # new MA must be at least this much better (smaller) self.max_num_epochs = 1000 self.num_batches_per_epoch = 250 self.num_val_batches_per_epoch = 50 self.also_val_in_tr_mode = False self.lr_threshold = 1e-6 # the network will not terminate training if the lr is still above this threshold ################# LEAVE THESE ALONE ################################################ self.val_eval_criterion_MA = None self.train_loss_MA = None self.best_val_eval_criterion_MA = None self.best_MA_tr_loss_for_patience = None self.best_epoch_based_on_MA_tr_loss = None self.all_tr_losses = [] self.all_val_losses = [] self.all_val_losses_tr_mode = [] self.all_val_eval_metrics = [] # does not have to be used self.epoch = 0 self.log_file = None self.deterministic = deterministic self.use_progress_bar = False if 'nnunet_use_progress_bar' in os.environ.keys(): self.use_progress_bar = bool(int(os.environ['nnunet_use_progress_bar'])) ################# Settings for saving checkpoints ################################## self.save_every = 50 self.save_latest_only = True # if false it will not store/overwrite _latest but separate files each # time an intermediate checkpoint is created self.save_intermediate_checkpoints = True # whether or not to save checkpoint_latest self.save_best_checkpoint = True # whether or not to save the best checkpoint according to self.best_val_eval_criterion_MA self.save_final_checkpoint = True # whether or not to save the final checkpoint @abstractmethod def initialize(self, training=True): """ create self.output_folder modify self.output_folder if you are doing cross-validation (one folder per fold) set self.tr_gen and self.val_gen call self.initialize_network and self.initialize_optimizer_and_scheduler (important!) finally set self.was_initialized to True :param training: :return: """ @abstractmethod def load_dataset(self): pass def do_split(self): """ This is a suggestion for if your dataset is a dictionary (my personal standard) :return: """ splits_file = join(self.dataset_directory, "splits_final.pkl") if not isfile(splits_file): self.print_to_log_file("Creating new split...") splits = [] all_keys_sorted = np.sort(list(self.dataset.keys())) kfold = KFold(n_splits=5, shuffle=True, random_state=12345) for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)): train_keys = np.array(all_keys_sorted)[train_idx] test_keys = np.array(all_keys_sorted)[test_idx] splits.append(OrderedDict()) splits[-1]['train'] = train_keys splits[-1]['val'] = test_keys save_pickle(splits, splits_file) splits = load_pickle(splits_file) if self.fold == "all": tr_keys = val_keys = list(self.dataset.keys()) else: tr_keys = splits[self.fold]['train'] val_keys = splits[self.fold]['val'] tr_keys.sort() val_keys.sort() self.dataset_tr = OrderedDict() for i in tr_keys: self.dataset_tr[i] = self.dataset[i] self.dataset_val = OrderedDict() for i in val_keys: self.dataset_val[i] = self.dataset[i] def plot_progress(self): """ Should probably by improved :return: """ try: font = {'weight': 'normal', 'size': 18} matplotlib.rc('font', **font) fig = plt.figure(figsize=(30, 24)) ax = fig.add_subplot(111) ax2 = ax.twinx() x_values = list(range(self.epoch + 1)) ax.plot(x_values, self.all_tr_losses, color='b', ls='-', label="loss_tr") ax.plot(x_values, self.all_val_losses, color='r', ls='-', label="loss_val, train=False") if len(self.all_val_losses_tr_mode) > 0: ax.plot(x_values, self.all_val_losses_tr_mode, color='g', ls='-', label="loss_val, train=True") if len(self.all_val_eval_metrics) == len(x_values): ax2.plot(x_values, self.all_val_eval_metrics, color='g', ls='--', label="evaluation metric") ax.set_xlabel("epoch") ax.set_ylabel("loss") ax2.set_ylabel("evaluation metric") ax.legend() ax2.legend(loc=9) fig.savefig(join(self.output_folder, "progress.png")) plt.close() except IOError: self.print_to_log_file("failed to plot: ", sys.exc_info()) def print_to_log_file(self, *args, also_print_to_console=True, add_timestamp=True): timestamp = time() dt_object = datetime.fromtimestamp(timestamp) if add_timestamp: args = ("%s:" % dt_object, *args) if self.log_file is None: maybe_mkdir_p(self.output_folder) timestamp = datetime.now() self.log_file = join(self.output_folder, "training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt" % (timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute, timestamp.second)) with open(self.log_file, 'w') as f: f.write("Starting... \n") successful = False max_attempts = 5 ctr = 0 while not successful and ctr < max_attempts: try: with open(self.log_file, 'a+') as f: for a in args: f.write(str(a)) f.write(" ") f.write("\n") successful = True except IOError: print("%s: failed to log: " % datetime.fromtimestamp(timestamp), sys.exc_info()) sleep(0.5) ctr += 1 if also_print_to_console: print(*args) def save_checkpoint(self, fname, save_optimizer=True): start_time = time() state_dict = self.network.state_dict() for key in state_dict.keys(): state_dict[key] = state_dict[key].cpu() lr_sched_state_dct = None if self.lr_scheduler is not None and hasattr(self.lr_scheduler, 'state_dict'): # not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau): lr_sched_state_dct = self.lr_scheduler.state_dict() # WTF is this!? # for key in lr_sched_state_dct.keys(): # lr_sched_state_dct[key] = lr_sched_state_dct[key] if save_optimizer: optimizer_state_dict = self.optimizer.state_dict() else: optimizer_state_dict = None self.print_to_log_file("saving checkpoint...") save_this = { 'epoch': self.epoch + 1, 'state_dict': state_dict, 'optimizer_state_dict': optimizer_state_dict, 'lr_scheduler_state_dict': lr_sched_state_dct, 'plot_stuff': (self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode, self.all_val_eval_metrics), 'best_stuff' : (self.best_epoch_based_on_MA_tr_loss, self.best_MA_tr_loss_for_patience, self.best_val_eval_criterion_MA)} if self.amp_grad_scaler is not None: save_this['amp_grad_scaler'] = self.amp_grad_scaler.state_dict() torch.save(save_this, fname) self.print_to_log_file("done, saving took %.2f seconds" % (time() - start_time)) def load_best_checkpoint(self, train=True): if self.fold is None: raise RuntimeError("Cannot load best checkpoint if self.fold is None") if isfile(join(self.output_folder, "model_best.model")): self.load_checkpoint(join(self.output_folder, "model_best.model"), train=train) else: self.print_to_log_file("WARNING! model_best.model does not exist! Cannot load best checkpoint. Falling " "back to load_latest_checkpoint") self.load_latest_checkpoint(train) def load_latest_checkpoint(self, train=True): if isfile(join(self.output_folder, "model_final_checkpoint.model")): return self.load_checkpoint(join(self.output_folder, "model_final_checkpoint.model"), train=train) if isfile(join(self.output_folder, "model_latest.model")): return self.load_checkpoint(join(self.output_folder, "model_latest.model"), train=train) if isfile(join(self.output_folder, "model_best.model")): return self.load_best_checkpoint(train) raise RuntimeError("No checkpoint found") def load_checkpoint(self, fname, train=True): self.print_to_log_file("loading checkpoint", fname, "train=", train) if not self.was_initialized: self.initialize(train) # saved_model = torch.load(fname, map_location=torch.device('cuda', torch.cuda.current_device())) saved_model = torch.load(fname, map_location=torch.device('cpu')) self.load_checkpoint_ram(saved_model, train) @abstractmethod def initialize_network(self): """ initialize self.network here :return: """ pass @abstractmethod def initialize_optimizer_and_scheduler(self): """ initialize self.optimizer and self.lr_scheduler (if applicable) here :return: """ pass def load_checkpoint_ram(self, checkpoint, train=True): """ used for if the checkpoint is already in ram :param checkpoint: :param train: :return: """ if not self.was_initialized: self.initialize(train) new_state_dict = OrderedDict() curr_state_dict_keys = list(self.network.state_dict().keys()) # if state dict comes form nn.DataParallel but we use non-parallel model here then the state dict keys do not # match. Use heuristic to make it match for k, value in checkpoint['state_dict'].items(): key = k if key not in curr_state_dict_keys and key.startswith('module.'): key = key[7:] new_state_dict[key] = value if self.fp16: self._maybe_init_amp() if 'amp_grad_scaler' in checkpoint.keys(): self.amp_grad_scaler.load_state_dict(checkpoint['amp_grad_scaler']) self.network.load_state_dict(new_state_dict) self.epoch = checkpoint['epoch'] if train: optimizer_state_dict = checkpoint['optimizer_state_dict'] if optimizer_state_dict is not None: self.optimizer.load_state_dict(optimizer_state_dict) if self.lr_scheduler is not None and hasattr(self.lr_scheduler, 'load_state_dict') and checkpoint[ 'lr_scheduler_state_dict'] is not None: self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict']) if issubclass(self.lr_scheduler.__class__, _LRScheduler): self.lr_scheduler.step(self.epoch) self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode, self.all_val_eval_metrics = checkpoint[ 'plot_stuff'] # load best loss (if present) if 'best_stuff' in checkpoint.keys(): self.best_epoch_based_on_MA_tr_loss, self.best_MA_tr_loss_for_patience, self.best_val_eval_criterion_MA = checkpoint[ 'best_stuff'] # after the training is done, the epoch is incremented one more time in my old code. This results in # self.epoch = 1001 for old trained models when the epoch is actually 1000. This causes issues because # len(self.all_tr_losses) = 1000 and the plot function will fail. We can easily detect and correct that here if self.epoch != len(self.all_tr_losses): self.print_to_log_file("WARNING in loading checkpoint: self.epoch != len(self.all_tr_losses). This is " "due to an old bug and should only appear when you are loading old models. New " "models should have this fixed! self.epoch is now set to len(self.all_tr_losses)") self.epoch = len(self.all_tr_losses) self.all_tr_losses = self.all_tr_losses[:self.epoch] self.all_val_losses = self.all_val_losses[:self.epoch] self.all_val_losses_tr_mode = self.all_val_losses_tr_mode[:self.epoch] self.all_val_eval_metrics = self.all_val_eval_metrics[:self.epoch] self._maybe_init_amp() def _maybe_init_amp(self): if self.fp16 and self.amp_grad_scaler is None and torch.cuda.is_available(): self.amp_grad_scaler = GradScaler() def plot_network_architecture(self): """ can be implemented (see nnUNetTrainer) but does not have to. Not implemented here because it imposes stronger assumptions on the presence of class variables :return: """ pass def run_training(self): _ = self.tr_gen.next() _ = self.val_gen.next() if torch.cuda.is_available(): torch.cuda.empty_cache() self._maybe_init_amp() maybe_mkdir_p(self.output_folder) self.plot_network_architecture() if cudnn.benchmark and cudnn.deterministic: warn("torch.backends.cudnn.deterministic is True indicating a deterministic training is desired. " "But torch.backends.cudnn.benchmark is True as well and this will prevent deterministic training! " "If you want deterministic then set benchmark=False") if not self.was_initialized: self.initialize(True) while self.epoch < self.max_num_epochs: self.print_to_log_file("\nepoch: ", self.epoch) epoch_start_time = time() train_losses_epoch = [] # train one epoch self.network.train() if self.use_progress_bar: with trange(self.num_batches_per_epoch) as tbar: for b in tbar: tbar.set_description("Epoch {}/{}".format(self.epoch+1, self.max_num_epochs)) l = self.run_iteration(self.tr_gen, True) tbar.set_postfix(loss=l) train_losses_epoch.append(l) else: for _ in range(self.num_batches_per_epoch): l = self.run_iteration(self.tr_gen, True) train_losses_epoch.append(l) self.all_tr_losses.append(np.mean(train_losses_epoch)) self.print_to_log_file("train loss : %.4f" % self.all_tr_losses[-1]) with torch.no_grad(): # validation with train=False self.network.eval() val_losses = [] for b in range(self.num_val_batches_per_epoch): l = self.run_iteration(self.val_gen, False, True) val_losses.append(l) self.all_val_losses.append(np.mean(val_losses)) self.print_to_log_file("validation loss: %.4f" % self.all_val_losses[-1]) if self.also_val_in_tr_mode: self.network.train() # validation with train=True val_losses = [] for b in range(self.num_val_batches_per_epoch): l = self.run_iteration(self.val_gen, False) val_losses.append(l) self.all_val_losses_tr_mode.append(np.mean(val_losses)) self.print_to_log_file("validation loss (train=True): %.4f" % self.all_val_losses_tr_mode[-1]) self.update_train_loss_MA() # needed for lr scheduler and stopping of training continue_training = self.on_epoch_end() epoch_end_time = time() if not continue_training: # allows for early stopping break self.epoch += 1 self.print_to_log_file("This epoch took %f s\n" % (epoch_end_time - epoch_start_time)) self.epoch -= 1 # if we don't do this we can get a problem with loading model_final_checkpoint. if self.save_final_checkpoint: self.save_checkpoint(join(self.output_folder, "model_final_checkpoint.model")) # now we can delete latest as it will be identical with final if isfile(join(self.output_folder, "model_latest.model")): os.remove(join(self.output_folder, "model_latest.model")) if isfile(join(self.output_folder, "model_latest.model.pkl")): os.remove(join(self.output_folder, "model_latest.model.pkl")) def maybe_update_lr(self): # maybe update learning rate if self.lr_scheduler is not None: assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler)) if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau): # lr scheduler is updated with moving average val loss. should be more robust self.lr_scheduler.step(self.train_loss_MA) else: self.lr_scheduler.step(self.epoch + 1) self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr'])) def maybe_save_checkpoint(self): """ Saves a checkpoint every save_ever epochs. :return: """ if self.save_intermediate_checkpoints and (self.epoch % self.save_every == (self.save_every - 1)): self.print_to_log_file("saving scheduled checkpoint file...") if not self.save_latest_only: self.save_checkpoint(join(self.output_folder, "model_ep_%03.0d.model" % (self.epoch + 1))) self.save_checkpoint(join(self.output_folder, "model_latest.model")) self.print_to_log_file("done") def update_eval_criterion_MA(self): """ If self.all_val_eval_metrics is unused (len=0) then we fall back to using -self.all_val_losses for the MA to determine early stopping (not a minimization, but a maximization of a metric and therefore the - in the latter case) :return: """ if self.val_eval_criterion_MA is None: if len(self.all_val_eval_metrics) == 0: self.val_eval_criterion_MA = - self.all_val_losses[-1] else: self.val_eval_criterion_MA = self.all_val_eval_metrics[-1] else: if len(self.all_val_eval_metrics) == 0: """ We here use alpha * old - (1 - alpha) * new because new in this case is the vlaidation loss and lower is better, so we need to negate it. """ self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA - ( 1 - self.val_eval_criterion_alpha) * \ self.all_val_losses[-1] else: self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA + ( 1 - self.val_eval_criterion_alpha) * \ self.all_val_eval_metrics[-1] def manage_patience(self): # update patience continue_training = True if self.patience is not None: # if best_MA_tr_loss_for_patience and best_epoch_based_on_MA_tr_loss were not yet initialized, # initialize them if self.best_MA_tr_loss_for_patience is None: self.best_MA_tr_loss_for_patience = self.train_loss_MA if self.best_epoch_based_on_MA_tr_loss is None: self.best_epoch_based_on_MA_tr_loss = self.epoch if self.best_val_eval_criterion_MA is None: self.best_val_eval_criterion_MA = self.val_eval_criterion_MA # check if the current epoch is the best one according to moving average of validation criterion. If so # then save 'best' model # Do not use this for validation. This is intended for test set prediction only. #self.print_to_log_file("current best_val_eval_criterion_MA is %.4f0" % self.best_val_eval_criterion_MA) #self.print_to_log_file("current val_eval_criterion_MA is %.4f" % self.val_eval_criterion_MA) if self.val_eval_criterion_MA > self.best_val_eval_criterion_MA: self.best_val_eval_criterion_MA = self.val_eval_criterion_MA #self.print_to_log_file("saving best epoch checkpoint...") if self.save_best_checkpoint: self.save_checkpoint(join(self.output_folder, "model_best.model")) # Now see if the moving average of the train loss has improved. If yes then reset patience, else # increase patience if self.train_loss_MA + self.train_loss_MA_eps < self.best_MA_tr_loss_for_patience: self.best_MA_tr_loss_for_patience = self.train_loss_MA self.best_epoch_based_on_MA_tr_loss = self.epoch #self.print_to_log_file("New best epoch (train loss MA): %03.4f" % self.best_MA_tr_loss_for_patience) else: pass #self.print_to_log_file("No improvement: current train MA %03.4f, best: %03.4f, eps is %03.4f" % # (self.train_loss_MA, self.best_MA_tr_loss_for_patience, self.train_loss_MA_eps)) # if patience has reached its maximum then finish training (provided lr is low enough) if self.epoch - self.best_epoch_based_on_MA_tr_loss > self.patience: if self.optimizer.param_groups[0]['lr'] > self.lr_threshold: #self.print_to_log_file("My patience ended, but I believe I need more time (lr > 1e-6)") self.best_epoch_based_on_MA_tr_loss = self.epoch - self.patience // 2 else: #self.print_to_log_file("My patience ended") continue_training = False else: pass #self.print_to_log_file( # "Patience: %d/%d" % (self.epoch - self.best_epoch_based_on_MA_tr_loss, self.patience)) return continue_training def on_epoch_end(self): self.finish_online_evaluation() # does not have to do anything, but can be used to update self.all_val_eval_ # metrics self.plot_progress() self.maybe_update_lr() self.maybe_save_checkpoint() self.update_eval_criterion_MA() continue_training = self.manage_patience() return continue_training def update_train_loss_MA(self): if self.train_loss_MA is None: self.train_loss_MA = self.all_tr_losses[-1] else: self.train_loss_MA = self.train_loss_MA_alpha * self.train_loss_MA + (1 - self.train_loss_MA_alpha) * \ self.all_tr_losses[-1] def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): data_dict = next(data_generator) data = data_dict['data'] target = data_dict['target'] data = maybe_to_torch(data) target = maybe_to_torch(target) if torch.cuda.is_available(): data = to_cuda(data) target = to_cuda(target) self.optimizer.zero_grad() if self.fp16: with autocast(): output = self.network(data) del data l = self.loss(output, target) if do_backprop: self.amp_grad_scaler.scale(l).backward() self.amp_grad_scaler.step(self.optimizer) self.amp_grad_scaler.update() else: output = self.network(data) del data l = self.loss(output, target) if do_backprop: l.backward() self.optimizer.step() if run_online_evaluation: self.run_online_evaluation(output, target) del target return l.detach().cpu().numpy() def run_online_evaluation(self, *args, **kwargs): """ Can be implemented, does not have to :param output_torch: :param target_npy: :return: """ pass def finish_online_evaluation(self): """ Can be implemented, does not have to :return: """ pass @abstractmethod def validate(self, *args, **kwargs): pass def find_lr(self, num_iters=1000, init_value=1e-6, final_value=10., beta=0.98): """ stolen and adapted from here: https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html :param num_iters: :param init_value: :param final_value: :param beta: :return: """ import math self._maybe_init_amp() mult = (final_value / init_value) ** (1 / num_iters) lr = init_value self.optimizer.param_groups[0]['lr'] = lr avg_loss = 0. best_loss = 0. losses = [] log_lrs = [] for batch_num in range(1, num_iters + 1): # +1 because this one here is not designed to have negative loss... loss = self.run_iteration(self.tr_gen, do_backprop=True, run_online_evaluation=False).data.item() + 1 # Compute the smoothed loss avg_loss = beta * avg_loss + (1 - beta) * loss smoothed_loss = avg_loss / (1 - beta ** batch_num) # Stop if the loss is exploding if batch_num > 1 and smoothed_loss > 4 * best_loss: break # Record the best loss if smoothed_loss < best_loss or batch_num == 1: best_loss = smoothed_loss # Store the values losses.append(smoothed_loss) log_lrs.append(math.log10(lr)) # Update the lr for the next step lr *= mult self.optimizer.param_groups[0]['lr'] = lr import matplotlib.pyplot as plt lrs = [10 ** i for i in log_lrs] fig = plt.figure() plt.xscale('log') plt.plot(lrs[10:-5], losses[10:-5]) plt.savefig(join(self.output_folder, "lr_finder.png")) plt.close() return log_lrs, losses
30,849
41.376374
150
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNetTrainer.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil from collections import OrderedDict from multiprocessing import Pool from time import sleep from typing import Tuple, List import matplotlib import nnunet import numpy as np import torch from batchgenerators.utilities.file_and_folder_operations import * from nnunet.configuration import default_num_threads from nnunet.evaluation.evaluator import aggregate_scores from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.postprocessing.connected_components import determine_postprocessing from nnunet.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \ default_2D_augmentation_params, get_default_augmentation, get_patch_size from nnunet.training.dataloading.dataset_loading import load_dataset, DataLoader3D, DataLoader2D, unpack_dataset from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss from nnunet.training.network_training.network_trainer import NetworkTrainer from nnunet.utilities.nd_softmax import softmax_helper from nnunet.utilities.tensor_utilities import sum_tensor from torch import nn from torch.optim import lr_scheduler matplotlib.use("agg") class nnUNetTrainer(NetworkTrainer): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): """ :param deterministic: :param fold: can be either [0 ... 5) for cross-validation, 'all' to train on all available training data or None if you wish to load some checkpoint and do inference only :param plans_file: the pkl file generated by preprocessing. This file will determine all design choices :param subfolder_with_preprocessed_data: must be a subfolder of dataset_directory (just the name of the folder, not the entire path). This is where the preprocessed data lies that will be used for network training. We made this explicitly available so that differently preprocessed data can coexist and the user can choose what to use. Can be None if you are doing inference only. :param output_folder: where to store parameters, plot progress and to the validation :param dataset_directory: the parent directory in which the preprocessed Task data is stored. This is required because the split information is stored in this directory. For running prediction only this input is not required and may be set to None :param batch_dice: compute dice loss for each sample and average over all samples in the batch or pretend the batch is a pseudo volume? :param stage: The plans file may contain several stages (used for lowres / highres / pyramid). Stage must be specified for training: if stage 1 exists then stage 1 is the high resolution stage, otherwise it's 0 :param unpack_data: if False, npz preprocessed data will not be unpacked to npy. This consumes less space but is considerably slower! Running unpack_data=False with 2d should never be done! IMPORTANT: If you inherit from nnUNetTrainer and the init args change then you need to redefine self.init_args in your init accordingly. Otherwise checkpoints won't load properly! """ super(nnUNetTrainer, self).__init__(deterministic, fp16) self.unpack_data = unpack_data self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) # set through arguments from init self.stage = stage self.experiment_name = self.__class__.__name__ self.plans_file = plans_file self.output_folder = output_folder self.dataset_directory = dataset_directory self.output_folder_base = self.output_folder self.fold = fold self.plans = None # if we are running inference only then the self.dataset_directory is set (due to checkpoint loading) but it # irrelevant if self.dataset_directory is not None and isdir(self.dataset_directory): self.gt_niftis_folder = join(self.dataset_directory, "gt_segmentations") else: self.gt_niftis_folder = None self.folder_with_preprocessed_data = None # set in self.initialize() self.dl_tr = self.dl_val = None self.num_input_channels = self.num_classes = self.net_pool_per_axis = self.patch_size = self.batch_size = \ self.threeD = self.base_num_features = self.intensity_properties = self.normalization_schemes = \ self.net_num_pool_op_kernel_sizes = self.net_conv_kernel_sizes = None # loaded automatically from plans_file self.basic_generator_patch_size = self.data_aug_params = self.transpose_forward = self.transpose_backward = None self.batch_dice = batch_dice self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}) self.online_eval_foreground_dc = [] self.online_eval_tp = [] self.online_eval_fp = [] self.online_eval_fn = [] self.classes = self.do_dummy_2D_aug = self.use_mask_for_norm = self.only_keep_largest_connected_component = \ self.min_region_size_per_class = self.min_size_per_class = None self.inference_pad_border_mode = "constant" self.inference_pad_kwargs = {'constant_values': 0} self.update_fold(fold) self.pad_all_sides = None self.lr_scheduler_eps = 1e-3 self.lr_scheduler_patience = 30 self.initial_lr = 3e-4 self.weight_decay = 3e-5 self.oversample_foreground_percent = 0.33 self.conv_per_stage = None self.regions_class_order = None def update_fold(self, fold): """ used to swap between folds for inference (ensemble of models from cross-validation) DO NOT USE DURING TRAINING AS THIS WILL NOT UPDATE THE DATASET SPLIT AND THE DATA AUGMENTATION GENERATORS :param fold: :return: """ if fold is not None: if isinstance(fold, str): assert fold == "all", "if self.fold is a string then it must be \'all\'" if self.output_folder.endswith("%s" % str(self.fold)): self.output_folder = self.output_folder_base self.output_folder = join(self.output_folder, "%s" % str(fold)) else: if self.output_folder.endswith("fold_%s" % str(self.fold)): self.output_folder = self.output_folder_base self.output_folder = join(self.output_folder, "fold_%s" % str(fold)) self.fold = fold def setup_DA_params(self): if self.threeD: self.data_aug_params = default_3D_augmentation_params if self.do_dummy_2D_aug: self.data_aug_params["dummy_2D"] = True self.print_to_log_file("Using dummy2d data augmentation") self.data_aug_params["elastic_deform_alpha"] = \ default_2D_augmentation_params["elastic_deform_alpha"] self.data_aug_params["elastic_deform_sigma"] = \ default_2D_augmentation_params["elastic_deform_sigma"] self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"] else: self.do_dummy_2D_aug = False if max(self.patch_size) / min(self.patch_size) > 1.5: default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi) self.data_aug_params = default_2D_augmentation_params self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size)) patch_size_for_spatialtransform = self.patch_size[1:] else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) patch_size_for_spatialtransform = self.patch_size self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform def initialize(self, training=True, force_load_plans=False): """ For prediction of test cases just set training=False, this will prevent loading of training data and training batchgenerator initialization :param training: :return: """ maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() if training: self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: self.print_to_log_file("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) self.print_to_log_file("done") else: self.print_to_log_file( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") self.tr_gen, self.val_gen = get_default_augmentation(self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() # assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) self.was_initialized = True def initialize_network(self): """ This is specific to the U-Net and must be adapted for other network architectures :return: """ # self.print_to_log_file(self.net_num_pool_op_kernel_sizes) # self.print_to_log_file(self.net_conv_kernel_sizes) net_numpool = len(self.net_num_pool_op_kernel_sizes) if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, net_numpool, self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, False, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) self.network.inference_apply_nonlin = softmax_helper if torch.cuda.is_available(): self.network.cuda() def initialize_optimizer_and_scheduler(self): assert self.network is not None, "self.initialize_network must be called first" self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, amsgrad=True) self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2, patience=self.lr_scheduler_patience, verbose=True, threshold=self.lr_scheduler_eps, threshold_mode="abs") def plot_network_architecture(self): try: from batchgenerators.utilities.file_and_folder_operations import join import hiddenlayer as hl if torch.cuda.is_available(): g = hl.build_graph(self.network, torch.rand((1, self.num_input_channels, *self.patch_size)).cuda(), transforms=None) else: g = hl.build_graph(self.network, torch.rand((1, self.num_input_channels, *self.patch_size)), transforms=None) g.save(join(self.output_folder, "network_architecture.pdf")) del g except Exception as e: self.print_to_log_file("Unable to plot network architecture:") self.print_to_log_file(e) self.print_to_log_file("\nprinting the network instead:\n") self.print_to_log_file(self.network) self.print_to_log_file("\n") finally: if torch.cuda.is_available(): torch.cuda.empty_cache() def run_training(self): dct = OrderedDict() for k in self.__dir__(): if not k.startswith("__"): if not callable(getattr(self, k)): dct[k] = str(getattr(self, k)) del dct['plans'] del dct['intensity_properties'] del dct['dataset'] del dct['dataset_tr'] del dct['dataset_val'] save_json(dct, join(self.output_folder, "debug.json")) import shutil shutil.copy(self.plans_file, join(self.output_folder_base, "plans.pkl")) super(nnUNetTrainer, self).run_training() def load_plans_file(self): """ This is what actually configures the entire experiment. The plans file is generated by experiment planning :return: """ self.plans = load_pickle(self.plans_file) def process_plans(self, plans): if self.stage is None: assert len(list(plans['plans_per_stage'].keys())) == 1, \ "If self.stage is None then there can be only one stage in the plans file. That seems to not be the " \ "case. Please specify which stage of the cascade must be trained" self.stage = list(plans['plans_per_stage'].keys())[0] self.plans = plans stage_plans = self.plans['plans_per_stage'][self.stage] self.batch_size = stage_plans['batch_size'] self.net_pool_per_axis = stage_plans['num_pool_per_axis'] self.patch_size = np.array(stage_plans['patch_size']).astype(int) self.do_dummy_2D_aug = stage_plans['do_dummy_2D_data_aug'] if 'pool_op_kernel_sizes' not in stage_plans.keys(): assert 'num_pool_per_axis' in stage_plans.keys() self.print_to_log_file("WARNING! old plans file with missing pool_op_kernel_sizes. Attempting to fix it...") self.net_num_pool_op_kernel_sizes = [] for i in range(max(self.net_pool_per_axis)): curr = [] for j in self.net_pool_per_axis: if (max(self.net_pool_per_axis) - j) <= i: curr.append(2) else: curr.append(1) self.net_num_pool_op_kernel_sizes.append(curr) else: self.net_num_pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes'] if 'conv_kernel_sizes' not in stage_plans.keys(): self.print_to_log_file("WARNING! old plans file with missing conv_kernel_sizes. Attempting to fix it...") self.net_conv_kernel_sizes = [[3] * len(self.net_pool_per_axis)] * (max(self.net_pool_per_axis) + 1) else: self.net_conv_kernel_sizes = stage_plans['conv_kernel_sizes'] self.pad_all_sides = None # self.patch_size self.intensity_properties = plans['dataset_properties']['intensityproperties'] self.normalization_schemes = plans['normalization_schemes'] self.base_num_features = plans['base_num_features'] self.num_input_channels = plans['num_modalities'] self.num_classes = plans['num_classes'] + 1 # background is no longer in num_classes self.classes = plans['all_classes'] self.use_mask_for_norm = plans['use_mask_for_norm'] self.only_keep_largest_connected_component = plans['keep_only_largest_region'] self.min_region_size_per_class = plans['min_region_size_per_class'] self.min_size_per_class = None # DONT USE THIS. plans['min_size_per_class'] if plans.get('transpose_forward') is None or plans.get('transpose_backward') is None: print("WARNING! You seem to have data that was preprocessed with a previous version of nnU-Net. " "You should rerun preprocessing. We will proceed and assume that both transpose_foward " "and transpose_backward are [0, 1, 2]. If that is not correct then weird things will happen!") plans['transpose_forward'] = [0, 1, 2] plans['transpose_backward'] = [0, 1, 2] self.transpose_forward = plans['transpose_forward'] self.transpose_backward = plans['transpose_backward'] if len(self.patch_size) == 2: self.threeD = False elif len(self.patch_size) == 3: self.threeD = True else: raise RuntimeError("invalid patch size in plans file: %s" % str(self.patch_size)) if "conv_per_stage" in plans.keys(): # this ha sbeen added to the plans only recently self.conv_per_stage = plans['conv_per_stage'] else: self.conv_per_stage = 2 def load_dataset(self): self.dataset = load_dataset(self.folder_with_preprocessed_data) def get_basic_generators(self): self.load_dataset() self.do_split() if self.threeD: dl_tr = DataLoader3D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size, False, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r') dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, False, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r') else: dl_tr = DataLoader2D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r') dl_val = DataLoader2D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r') return dl_tr, dl_val def preprocess_patient(self, input_files): """ Used to predict new unseen data. Not used for the preprocessing of the training/test data :param input_files: :return: """ from nnunet.training.model_restore import recursive_find_python_class preprocessor_name = self.plans.get('preprocessor_name') if preprocessor_name is None: if self.threeD: preprocessor_name = "GenericPreprocessor" else: preprocessor_name = "PreprocessorFor2D" print("using preprocessor", preprocessor_name) preprocessor_class = recursive_find_python_class([join(nnunet.__path__[0], "preprocessing")], preprocessor_name, current_module="nnunet.preprocessing") assert preprocessor_class is not None, "Could not find preprocessor %s in nnunet.preprocessing" % \ preprocessor_name preprocessor = preprocessor_class(self.normalization_schemes, self.use_mask_for_norm, self.transpose_forward, self.intensity_properties) d, s, properties = preprocessor.preprocess_test_case(input_files, self.plans['plans_per_stage'][self.stage][ 'current_spacing']) return d, s, properties def preprocess_predict_nifti(self, input_files: List[str], output_file: str = None, softmax_ouput_file: str = None, mixed_precision: bool = True) -> None: """ Use this to predict new data :param input_files: :param output_file: :param softmax_ouput_file: :param mixed_precision: :return: """ print("preprocessing...") d, s, properties = self.preprocess_patient(input_files) print("predicting...") pred = self.predict_preprocessed_data_return_seg_and_softmax(d, do_mirroring=self.data_aug_params["do_mirror"], mirror_axes=self.data_aug_params['mirror_axes'], use_sliding_window=True, step_size=0.5, use_gaussian=True, pad_border_mode='constant', pad_kwargs={'constant_values': 0}, verbose=True, all_in_gpu=False, mixed_precision=mixed_precision)[1] pred = pred.transpose([0] + [i + 1 for i in self.transpose_backward]) if 'segmentation_export_params' in self.plans.keys(): force_separate_z = self.plans['segmentation_export_params']['force_separate_z'] interpolation_order = self.plans['segmentation_export_params']['interpolation_order'] interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z'] else: force_separate_z = None interpolation_order = 1 interpolation_order_z = 0 print("resampling to original spacing and nifti export...") save_segmentation_nifti_from_softmax(pred, output_file, properties, interpolation_order, self.regions_class_order, None, None, softmax_ouput_file, None, force_separate_z=force_separate_z, interpolation_order_z=interpolation_order_z) print("done") def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True, mirror_axes: Tuple[int] = None, use_sliding_window: bool = True, step_size: float = 0.5, use_gaussian: bool = True, pad_border_mode: str = 'constant', pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]: """ :param data: :param do_mirroring: :param mirror_axes: :param use_sliding_window: :param step_size: :param use_gaussian: :param pad_border_mode: :param pad_kwargs: :param all_in_gpu: :param verbose: :return: """ if pad_border_mode == 'constant' and pad_kwargs is None: pad_kwargs = {'constant_values': 0} if do_mirroring and mirror_axes is None: mirror_axes = self.data_aug_params['mirror_axes'] if do_mirroring: assert self.data_aug_params["do_mirror"], "Cannot do mirroring as test time augmentation when training " \ "was done without mirroring" valid = list((SegmentationNetwork, nn.DataParallel)) assert isinstance(self.network, tuple(valid)) current_mode = self.network.training self.network.eval() ret = self.network.predict_3D(data, do_mirroring=do_mirroring, mirror_axes=mirror_axes, use_sliding_window=use_sliding_window, step_size=step_size, patch_size=self.patch_size, regions_class_order=self.regions_class_order, use_gaussian=use_gaussian, pad_border_mode=pad_border_mode, pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose, mixed_precision=mixed_precision) self.network.train(current_mode) return ret def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs: dict = None): """ if debug=True then the temporary files generated for postprocessing determination will be kept """ current_mode = self.network.training self.network.eval() assert self.was_initialized, "must initialize, ideally with checkpoint (or train first)" if self.dataset_val is None: self.load_dataset() self.do_split() if segmentation_export_kwargs is None: if 'segmentation_export_params' in self.plans.keys(): force_separate_z = self.plans['segmentation_export_params']['force_separate_z'] interpolation_order = self.plans['segmentation_export_params']['interpolation_order'] interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z'] else: force_separate_z = None interpolation_order = 1 interpolation_order_z = 0 else: force_separate_z = segmentation_export_kwargs['force_separate_z'] interpolation_order = segmentation_export_kwargs['interpolation_order'] interpolation_order_z = segmentation_export_kwargs['interpolation_order_z'] # predictions as they come from the network go here output_folder = join(self.output_folder, validation_folder_name) maybe_mkdir_p(output_folder) # this is for debug purposes my_input_args = {'do_mirroring': do_mirroring, 'use_sliding_window': use_sliding_window, 'step_size': step_size, 'save_softmax': save_softmax, 'use_gaussian': use_gaussian, 'overwrite': overwrite, 'validation_folder_name': validation_folder_name, 'debug': debug, 'all_in_gpu': all_in_gpu, 'segmentation_export_kwargs': segmentation_export_kwargs, } save_json(my_input_args, join(output_folder, "validation_args.json")) if do_mirroring: if not self.data_aug_params['do_mirror']: raise RuntimeError("We did not train with mirroring so you cannot do inference with mirroring enabled") mirror_axes = self.data_aug_params['mirror_axes'] else: mirror_axes = () pred_gt_tuples = [] export_pool = Pool(default_num_threads) results = [] for k in self.dataset_val.keys(): properties = load_pickle(self.dataset[k]['properties_file']) fname = properties['list_of_data_files'][0].split("/")[-1][:-12] if overwrite or (not isfile(join(output_folder, fname + ".nii.gz"))) or \ (save_softmax and not isfile(join(output_folder, fname + ".npz"))): data = np.load(self.dataset[k]['data_file'])['data'] print(k, data.shape) data[-1][data[-1] == -1] = 0 softmax_pred = self.predict_preprocessed_data_return_seg_and_softmax(data[:-1], do_mirroring=do_mirroring, mirror_axes=mirror_axes, use_sliding_window=use_sliding_window, step_size=step_size, use_gaussian=use_gaussian, all_in_gpu=all_in_gpu, mixed_precision=self.fp16)[1] softmax_pred = softmax_pred.transpose([0] + [i + 1 for i in self.transpose_backward]) if save_softmax: softmax_fname = join(output_folder, fname + ".npz") else: softmax_fname = None """There is a problem with python process communication that prevents us from communicating obejcts larger than 2 GB between processes (basically when the length of the pickle string that will be sent is communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either filename or np.ndarray and will handle this automatically""" if np.prod(softmax_pred.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save np.save(join(output_folder, fname + ".npy"), softmax_pred) softmax_pred = join(output_folder, fname + ".npy") results.append(export_pool.starmap_async(save_segmentation_nifti_from_softmax, ((softmax_pred, join(output_folder, fname + ".nii.gz"), properties, interpolation_order, self.regions_class_order, None, None, softmax_fname, None, force_separate_z, interpolation_order_z), ) ) ) pred_gt_tuples.append([join(output_folder, fname + ".nii.gz"), join(self.gt_niftis_folder, fname + ".nii.gz")]) _ = [i.get() for i in results] self.print_to_log_file("finished prediction") # evaluate raw predictions self.print_to_log_file("evaluation of raw predictions") task = self.dataset_directory.split("/")[-1] job_name = self.experiment_name _ = aggregate_scores(pred_gt_tuples, labels=list(range(self.num_classes)), json_output_file=join(output_folder, "summary.json"), json_name=job_name + " val tiled %s" % (str(use_sliding_window)), json_author="Fabian", json_task=task, num_threads=default_num_threads) # in the old nnunet we would stop here. Now we add a postprocessing. This postprocessing can remove everything # except the largest connected component for each class. To see if this improves results, we do this for all # classes and then rerun the evaluation. Those classes for which this resulted in an improved dice score will # have this applied during inference as well self.print_to_log_file("determining postprocessing") determine_postprocessing(self.output_folder, self.gt_niftis_folder, validation_folder_name, final_subf_name=validation_folder_name + "_postprocessed", debug=debug) # after this the final predictions for the vlaidation set can be found in validation_folder_name_base + "_postprocessed" # They are always in that folder, even if no postprocessing as applied! # detemining postprocesing on a per-fold basis may be OK for this fold but what if another fold finds another # postprocesing to be better? In this case we need to consolidate. At the time the consolidation is going to be # done we won't know what self.gt_niftis_folder was, so now we copy all the niftis into a separate folder to # be used later gt_nifti_folder = join(self.output_folder_base, "gt_niftis") maybe_mkdir_p(gt_nifti_folder) for f in subfiles(self.gt_niftis_folder, suffix=".nii.gz"): success = False attempts = 0 e = None while not success and attempts < 10: try: shutil.copy(f, gt_nifti_folder) success = True except OSError as e: attempts += 1 sleep(1) if not success: print("Could not copy gt nifti file %s into folder %s" % (f, gt_nifti_folder)) if e is not None: raise e self.network.train(current_mode) def run_online_evaluation(self, output, target): with torch.no_grad(): num_classes = output.shape[1] output_softmax = softmax_helper(output) output_seg = output_softmax.argmax(1) target = target[:, 0] axes = tuple(range(1, len(target.shape))) tp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index) fp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index) fn_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index) for c in range(1, num_classes): tp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target == c).float(), axes=axes) fp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target != c).float(), axes=axes) fn_hard[:, c - 1] = sum_tensor((output_seg != c).float() * (target == c).float(), axes=axes) tp_hard = tp_hard.sum(0, keepdim=False).detach().cpu().numpy() fp_hard = fp_hard.sum(0, keepdim=False).detach().cpu().numpy() fn_hard = fn_hard.sum(0, keepdim=False).detach().cpu().numpy() self.online_eval_foreground_dc.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8))) self.online_eval_tp.append(list(tp_hard)) self.online_eval_fp.append(list(fp_hard)) self.online_eval_fn.append(list(fn_hard)) def finish_online_evaluation(self): self.online_eval_tp = np.sum(self.online_eval_tp, 0) self.online_eval_fp = np.sum(self.online_eval_fp, 0) self.online_eval_fn = np.sum(self.online_eval_fn, 0) global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)] if not np.isnan(i)] self.all_val_eval_metrics.append(np.mean(global_dc_per_class)) self.print_to_log_file("Average global foreground Dice:", str(global_dc_per_class)) self.print_to_log_file("(interpret this as an estimate for the Dice of the different classes. This is not " "exact.)") self.online_eval_foreground_dc = [] self.online_eval_tp = [] self.online_eval_fp = [] self.online_eval_fn = [] def save_checkpoint(self, fname, save_optimizer=True): super(nnUNetTrainer, self).save_checkpoint(fname, save_optimizer) info = OrderedDict() info['init'] = self.init_args info['name'] = self.__class__.__name__ info['class'] = str(self.__class__) info['plans'] = self.plans write_pickle(info, fname + ".pkl")
39,650
53.094134
142
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/competitions_with_custom_Trainers/BraTS2020/nnUNetTrainerV2BraTSRegions.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from time import sleep import numpy as np import torch from batchgenerators.utilities.file_and_folder_operations import * from torch import nn from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.utils import clip_grad_norm_ from nnunet.evaluation.region_based_evaluation import evaluate_regions, get_brats_regions from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.training.data_augmentation.default_data_augmentation import get_moreDA_augmentation from nnunet.training.dataloading.dataset_loading import unpack_dataset from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2 from nnunet.training.loss_functions.dice_loss import DC_and_BCE_loss, get_tp_fp_fn_tn, SoftDiceLoss from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.training.network_training.nnUNetTrainerV2_DDP import nnUNetTrainerV2_DDP from nnunet.utilities.distributed import awesome_allgather_function from nnunet.utilities.to_torch import maybe_to_torch, to_cuda class nnUNetTrainerV2BraTSRegions_BN(nnUNetTrainerV2): def initialize_network(self): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.BatchNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.BatchNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = torch.nn.Softmax(1) class nnUNetTrainerV2BraTSRegions(nnUNetTrainerV2): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.regions = get_brats_regions() self.regions_class_order = (1, 2, 3) self.loss = DC_and_BCE_loss({}, {'batch_dice': False, 'do_bg': True, 'smooth': 0}) def process_plans(self, plans): super().process_plans(plans) """ The network has as many outputs as we have regions """ self.num_classes = len(self.regions) def initialize_network(self): """inference_apply_nonlin to sigmoid""" super().initialize_network() self.network.inference_apply_nonlin = nn.Sigmoid() def initialize(self, training=True, force_load_plans=False): """ this is a copy of nnUNetTrainerV2's initialize. We only add the regions to the data augmentation :param training: :param force_load_plans: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights # now wrap the loss self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) ################# END ################### self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") else: print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, regions=self.regions) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: int = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs: dict = None): super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size, save_softmax=save_softmax, use_gaussian=use_gaussian, overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug, all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs) # run brats specific validation output_folder = join(self.output_folder, validation_folder_name) evaluate_regions(output_folder, self.gt_niftis_folder, self.regions) def run_online_evaluation(self, output, target): output = output[0] target = target[0] with torch.no_grad(): out_sigmoid = torch.sigmoid(output) out_sigmoid = (out_sigmoid > 0.5).float() if self.threeD: axes = (0, 2, 3, 4) else: axes = (0, 2, 3) tp, fp, fn, _ = get_tp_fp_fn_tn(out_sigmoid, target, axes=axes) tp_hard = tp.detach().cpu().numpy() fp_hard = fp.detach().cpu().numpy() fn_hard = fn.detach().cpu().numpy() self.online_eval_foreground_dc.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8))) self.online_eval_tp.append(list(tp_hard)) self.online_eval_fp.append(list(fp_hard)) self.online_eval_fn.append(list(fn_hard)) class nnUNetTrainerV2BraTSRegions_Dice(nnUNetTrainerV2BraTSRegions): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.loss = SoftDiceLoss(apply_nonlin=torch.sigmoid, **{'batch_dice': False, 'do_bg': True, 'smooth': 0}) class nnUNetTrainerV2BraTSRegions_DDP(nnUNetTrainerV2_DDP): def __init__(self, plans_file, fold, local_rank, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, distribute_batch_size=False, fp16=False): super().__init__(plans_file, fold, local_rank, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, distribute_batch_size, fp16) self.regions = get_brats_regions() self.regions_class_order = (1, 2, 3) self.loss = None self.ce_loss = nn.BCEWithLogitsLoss() def process_plans(self, plans): super().process_plans(plans) """ The network has as many outputs as we have regions """ self.num_classes = len(self.regions) def initialize_network(self): """inference_apply_nonlin to sigmoid""" super().initialize_network() self.network.inference_apply_nonlin = nn.Sigmoid() def initialize(self, training=True, force_load_plans=False): """ this is a copy of nnUNetTrainerV2's initialize. We only add the regions to the data augmentation :param training: :param force_load_plans: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: if self.local_rank == 0: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") else: # we need to wait until worker 0 has finished unpacking npz_files = subfiles(self.folder_with_preprocessed_data, suffix=".npz", join=False) case_ids = [i[:-4] for i in npz_files] all_present = all( [isfile(join(self.folder_with_preprocessed_data, i + ".npy")) for i in case_ids]) while not all_present: print("worker", self.local_rank, "is waiting for unpacking") sleep(3) all_present = all( [isfile(join(self.folder_with_preprocessed_data, i + ".npy")) for i in case_ids]) # there is some slight chance that there may arise some error because dataloader are loading a file # that is still being written by worker 0. We ignore this for now an address it only if it becomes # relevant # (this can occur because while worker 0 writes the file is technically present so the other workers # will proceed and eventually try to read it) else: print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") # setting weights for deep supervision losses net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights seeds_train = np.random.random_integers(0, 99999, self.data_aug_params.get('num_threads')) seeds_val = np.random.random_integers(0, 99999, max(self.data_aug_params.get('num_threads') // 2, 1)) print("seeds train", seeds_train) print("seeds_val", seeds_val) self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, seeds_train=seeds_train, seeds_val=seeds_val, pin_memory=self.pin_memory, regions=self.regions) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() self._maybe_init_amp() self.network = DDP(self.network, self.local_rank) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: int = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs: dict = None): super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size, save_softmax=save_softmax, use_gaussian=use_gaussian, overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug, all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs) # run brats specific validation output_folder = join(self.output_folder, validation_folder_name) evaluate_regions(output_folder, self.gt_niftis_folder, self.regions) def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): raise NotImplementedError("this class has not been changed to work with pytorch amp yet!") data_dict = next(data_generator) data = data_dict['data'] target = data_dict['target'] data = maybe_to_torch(data) target = maybe_to_torch(target) if torch.cuda.is_available(): data = to_cuda(data, gpu_id=None) target = to_cuda(target, gpu_id=None) self.optimizer.zero_grad() output = self.network(data) del data total_loss = None for i in range(len(output)): # Starting here it gets spicy! axes = tuple(range(2, len(output[i].size()))) # network does not do softmax. We need to do softmax for dice output_softmax = torch.sigmoid(output[i]) # get the tp, fp and fn terms we need tp, fp, fn, _ = get_tp_fp_fn_tn(output_softmax, target[i], axes, mask=None) # for dice, compute nominator and denominator so that we have to accumulate only 2 instead of 3 variables # do_bg=False in nnUNetTrainer -> [:, 1:] nominator = 2 * tp[:, 1:] denominator = 2 * tp[:, 1:] + fp[:, 1:] + fn[:, 1:] if self.batch_dice: # for DDP we need to gather all nominator and denominator terms from all GPUS to do proper batch dice nominator = awesome_allgather_function.apply(nominator) denominator = awesome_allgather_function.apply(denominator) nominator = nominator.sum(0) denominator = denominator.sum(0) else: pass ce_loss = self.ce_loss(output[i], target[i]) # we smooth by 1e-5 to penalize false positives if tp is 0 dice_loss = (- (nominator + 1e-5) / (denominator + 1e-5)).mean() if total_loss is None: total_loss = self.ds_loss_weights[i] * (ce_loss + dice_loss) else: total_loss += self.ds_loss_weights[i] * (ce_loss + dice_loss) if run_online_evaluation: with torch.no_grad(): output = output[0] target = target[0] out_sigmoid = torch.sigmoid(output) out_sigmoid = (out_sigmoid > 0.5).float() if self.threeD: axes = (2, 3, 4) else: axes = (2, 3) tp, fp, fn, _ = get_tp_fp_fn_tn(out_sigmoid, target, axes=axes) tp_hard = awesome_allgather_function.apply(tp) fp_hard = awesome_allgather_function.apply(fp) fn_hard = awesome_allgather_function.apply(fn) # print_if_rank0("after allgather", tp_hard.shape) # print_if_rank0("after sum", tp_hard.shape) self.run_online_evaluation(tp_hard.detach().cpu().numpy().sum(0), fp_hard.detach().cpu().numpy().sum(0), fn_hard.detach().cpu().numpy().sum(0)) del target if do_backprop: if not self.fp16 or amp is None or not torch.cuda.is_available(): total_loss.backward() else: with amp.scale_loss(total_loss, self.optimizer) as scaled_loss: scaled_loss.backward() _ = clip_grad_norm_(self.network.parameters(), 12) self.optimizer.step() return total_loss.detach().cpu().numpy() def run_online_evaluation(self, tp, fp, fn): self.online_eval_foreground_dc.append(list((2 * tp) / (2 * tp + fp + fn + 1e-8))) self.online_eval_tp.append(list(tp)) self.online_eval_fp.append(list(fp)) self.online_eval_fn.append(list(fn))
21,055
49.252983
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/competitions_with_custom_Trainers/BraTS2020/nnUNetTrainerV2BraTSRegions_moreDA.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from batchgenerators.utilities.file_and_folder_operations import * from torch import nn from nnunet.evaluation.region_based_evaluation import evaluate_regions, get_brats_regions from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \ default_2D_augmentation_params, get_patch_size from nnunet.training.dataloading.dataset_loading import unpack_dataset from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2 from nnunet.training.loss_functions.dice_loss import DC_and_BCE_loss, get_tp_fp_fn_tn from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.training.network_training.nnUNet_variants.data_augmentation.nnUNetTrainerV2_DA3 import \ nnUNetTrainerV2_DA3_BN, get_insaneDA_augmentation2 class nnUNetTrainerV2BraTSRegions_DA3_BN(nnUNetTrainerV2_DA3_BN): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.regions = get_brats_regions() self.regions_class_order = (1, 2, 3) self.loss = DC_and_BCE_loss({}, {'batch_dice': False, 'do_bg': True, 'smooth': 0}) def process_plans(self, plans): super().process_plans(plans) """ The network has as many outputs as we have regions """ self.num_classes = len(self.regions) def initialize_network(self): """inference_apply_nonlin to sigmoid""" super().initialize_network() self.network.inference_apply_nonlin = nn.Sigmoid() def initialize(self, training=True, force_load_plans=False): if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights # now wrap the loss self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) ################# END ################### self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") else: print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") self.tr_gen, self.val_gen = get_insaneDA_augmentation2( self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, pin_memory=self.pin_memory, regions=self.regions ) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: int = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs: dict = None): super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size, save_softmax=save_softmax, use_gaussian=use_gaussian, overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug, all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs) # run brats specific validation output_folder = join(self.output_folder, validation_folder_name) evaluate_regions(output_folder, self.gt_niftis_folder, self.regions) def run_online_evaluation(self, output, target): output = output[0] target = target[0] with torch.no_grad(): out_sigmoid = torch.sigmoid(output) out_sigmoid = (out_sigmoid > 0.5).float() if self.threeD: axes = (0, 2, 3, 4) else: axes = (0, 2, 3) tp, fp, fn, _ = get_tp_fp_fn_tn(out_sigmoid, target, axes=axes) tp_hard = tp.detach().cpu().numpy() fp_hard = fp.detach().cpu().numpy() fn_hard = fn.detach().cpu().numpy() self.online_eval_foreground_dc.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8))) self.online_eval_tp.append(list(tp_hard)) self.online_eval_fp.append(list(fp_hard)) self.online_eval_fn.append(list(fn_hard)) class nnUNetTrainerV2BraTSRegions_DA3(nnUNetTrainerV2BraTSRegions_DA3_BN): def initialize_network(self): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = nn.Sigmoid() class nnUNetTrainerV2BraTSRegions_DA3_BD(nnUNetTrainerV2BraTSRegions_DA3): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.loss = DC_and_BCE_loss({}, {'batch_dice': True, 'do_bg': True, 'smooth': 0}) class nnUNetTrainerV2BraTSRegions_DA3_BN_BD(nnUNetTrainerV2BraTSRegions_DA3_BN): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.loss = DC_and_BCE_loss({}, {'batch_dice': True, 'do_bg': True, 'smooth': 0}) class nnUNetTrainerV2BraTSRegions_DA4_BN(nnUNetTrainerV2BraTSRegions_DA3_BN): def setup_DA_params(self): nnUNetTrainerV2.setup_DA_params(self) self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod( np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1] if self.threeD: self.data_aug_params = default_3D_augmentation_params self.data_aug_params['rotation_x'] = (-90. / 360 * 2. * np.pi, 90. / 360 * 2. * np.pi) self.data_aug_params['rotation_y'] = (-90. / 360 * 2. * np.pi, 90. / 360 * 2. * np.pi) self.data_aug_params['rotation_z'] = (-90. / 360 * 2. * np.pi, 90. / 360 * 2. * np.pi) if self.do_dummy_2D_aug: self.data_aug_params["dummy_2D"] = True self.print_to_log_file("Using dummy2d data augmentation") self.data_aug_params["elastic_deform_alpha"] = \ default_2D_augmentation_params["elastic_deform_alpha"] self.data_aug_params["elastic_deform_sigma"] = \ default_2D_augmentation_params["elastic_deform_sigma"] self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"] else: self.do_dummy_2D_aug = False if max(self.patch_size) / min(self.patch_size) > 1.5: default_2D_augmentation_params['rotation_x'] = (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi) self.data_aug_params = default_2D_augmentation_params self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size)) patch_size_for_spatialtransform = self.patch_size[1:] else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) patch_size_for_spatialtransform = self.patch_size self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform self.data_aug_params["p_rot"] = 0.3 self.data_aug_params["scale_range"] = (0.65, 1.6) self.data_aug_params["p_scale"] = 0.3 self.data_aug_params["independent_scale_factor_for_each_axis"] = True self.data_aug_params["p_independent_scale_per_axis"] = 0.3 self.data_aug_params["do_elastic"] = True self.data_aug_params["p_eldef"] = 0.2 self.data_aug_params["eldef_deformation_scale"] = (0, 0.25) self.data_aug_params["do_additive_brightness"] = True self.data_aug_params["additive_brightness_mu"] = 0 self.data_aug_params["additive_brightness_sigma"] = 0.2 self.data_aug_params["additive_brightness_p_per_sample"] = 0.3 self.data_aug_params["additive_brightness_p_per_channel"] = 0.5 self.data_aug_params['gamma_range'] = (0.5, 1.6) self.data_aug_params['num_cached_per_thread'] = 4 class nnUNetTrainerV2BraTSRegions_DA4_BN_BD(nnUNetTrainerV2BraTSRegions_DA4_BN): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.loss = DC_and_BCE_loss({}, {'batch_dice': True, 'do_bg': True, 'smooth': 0})
14,362
51.805147
119
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py
import torch from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNet_variants.data_augmentation.nnUNetTrainerV2_insaneDA import \ nnUNetTrainerV2_insaneDA from nnunet.utilities.nd_softmax import softmax_helper from torch import nn class nnUNetTrainerV2_MMS(nnUNetTrainerV2_insaneDA): def setup_DA_params(self): super().setup_DA_params() self.data_aug_params["p_rot"] = 0.7 self.data_aug_params["p_eldef"] = 0.1 self.data_aug_params["p_scale"] = 0.3 self.data_aug_params["independent_scale_factor_for_each_axis"] = True self.data_aug_params["p_independent_scale_per_axis"] = 0.3 self.data_aug_params["do_additive_brightness"] = True self.data_aug_params["additive_brightness_mu"] = 0 self.data_aug_params["additive_brightness_sigma"] = 0.2 self.data_aug_params["additive_brightness_p_per_sample"] = 0.3 self.data_aug_params["additive_brightness_p_per_channel"] = 1 self.data_aug_params["elastic_deform_alpha"] = (0., 300.) self.data_aug_params["elastic_deform_sigma"] = (9., 15.) self.data_aug_params['gamma_range'] = (0.5, 1.6) def initialize_network(self): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.BatchNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.BatchNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper """def run_training(self): from batchviewer import view_batch a = next(self.tr_gen) view_batch(a['data']) import IPython;IPython.embed()"""
2,662
42.655738
117
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/nnUNetTrainerNoDA.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import matplotlib from batchgenerators.utilities.file_and_folder_operations import maybe_mkdir_p, join from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.training.data_augmentation.default_data_augmentation import get_no_augmentation from nnunet.training.dataloading.dataset_loading import unpack_dataset, DataLoader3D, DataLoader2D from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer from torch import nn matplotlib.use("agg") class nnUNetTrainerNoDA(nnUNetTrainer): def get_basic_generators(self): self.load_dataset() self.do_split() if self.threeD: dl_tr = DataLoader3D(self.dataset_tr, self.patch_size, self.patch_size, self.batch_size, False, oversample_foreground_percent=self.oversample_foreground_percent , pad_mode="constant", pad_sides=self.pad_all_sides) dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, False, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides) else: dl_tr = DataLoader2D(self.dataset_tr, self.patch_size, self.patch_size, self.batch_size, transpose=self.plans.get('transpose_forward'), oversample_foreground_percent=self.oversample_foreground_percent , pad_mode="constant", pad_sides=self.pad_all_sides) dl_val = DataLoader2D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, transpose=self.plans.get('transpose_forward'), oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides) return dl_tr, dl_val def initialize(self, training=True, force_load_plans=False): """ For prediction of test cases just set training=False, this will prevent loading of training data and training batchgenerator initialization :param training: :return: """ maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") else: print("INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") self.tr_gen, self.val_gen = get_no_augmentation(self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) self.was_initialized = True self.data_aug_params['mirror_axes'] = ()
4,742
50.554348
117
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/profiling/nnUNetTrainerV2_dummyLoad.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Tuple import torch from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss from nnunet.training.network_training.nnUNet_variants.architectural_variants.nnUNetTrainerV2_noDeepSupervision import \ nnUNetTrainerV2_noDeepSupervision from nnunet.training.network_training.nnUNet_variants.profiling.nnUNetTrainerV2_2epochs import nnUNetTrainerV2_5epochs from torch.cuda.amp import autocast from torch.nn.utils import clip_grad_norm_ import numpy as np from torch import nn class nnUNetTrainerV2_5epochs_dummyLoad(nnUNetTrainerV2_5epochs): def initialize(self, training=True, force_load_plans=False): super().initialize(training, force_load_plans) self.some_batch = torch.rand((self.batch_size, self.num_input_channels, *self.patch_size)).float().cuda() self.some_gt = [torch.round(torch.rand((self.batch_size, 1, *[int(i * j) for i, j in zip(self.patch_size, k)])) * (self.num_classes - 1)).float().cuda() for k in self.deep_supervision_scales] def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): data = self.some_batch target = self.some_gt self.optimizer.zero_grad() if self.fp16: with autocast(): output = self.network(data) del data l = self.loss(output, target) if do_backprop: self.amp_grad_scaler.scale(l).backward() self.amp_grad_scaler.unscale_(self.optimizer) torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.amp_grad_scaler.step(self.optimizer) self.amp_grad_scaler.update() else: output = self.network(data) del data l = self.loss(output, target) if do_backprop: l.backward() torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.optimizer.step() if run_online_evaluation: self.run_online_evaluation(output, target) del target return l.detach().cpu().numpy() class nnUNetTrainerV2_5epochs_dummyLoadCEnoDS(nnUNetTrainerV2_noDeepSupervision): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.max_num_epochs = 5 self.loss = RobustCrossEntropyLoss() def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs=None): pass def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True, mirror_axes: Tuple[int] = None, use_sliding_window: bool = True, step_size: float = 0.5, use_gaussian: bool = True, pad_border_mode: str = 'constant', pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]: pass def save_checkpoint(self, fname, save_optimizer=True): pass def initialize(self, training=True, force_load_plans=False): super().initialize(training, force_load_plans) self.some_batch = torch.rand((self.batch_size, self.num_input_channels, *self.patch_size)).float().cuda() self.some_gt = torch.round(torch.rand((self.batch_size, *self.patch_size)) * (self.num_classes - 1)).long().cuda() def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): data = self.some_batch target = self.some_gt self.optimizer.zero_grad() output = self.network(data) del data loss = self.loss(output, target) if run_online_evaluation: self.run_online_evaluation(output, target) del target if do_backprop: if not self.fp16 or amp is None or not torch.cuda.is_available(): loss.backward() else: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() _ = clip_grad_norm_(self.network.parameters(), 12) self.optimizer.step() return loss.detach().cpu().numpy() def run_online_evaluation(self, output, target): pass def finish_online_evaluation(self): pass
5,758
42.300752
199
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/profiling/nnUNetTrainerV2_2epochs.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Tuple import numpy as np import torch from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.training.network_training.nnUNetTrainerV2_DDP import nnUNetTrainerV2_DDP from nnunet.training.network_training.nnUNet_variants.architectural_variants.nnUNetTrainerV2_noDeepSupervision import \ nnUNetTrainerV2_noDeepSupervision from nnunet.utilities.to_torch import maybe_to_torch, to_cuda from torch.cuda.amp import autocast class nnUNetTrainerV2_2epochs(nnUNetTrainerV2): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.max_num_epochs = 2 def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs=None): pass def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True, mirror_axes: Tuple[int] = None, use_sliding_window: bool = True, step_size: float = 0.5, use_gaussian: bool = True, pad_border_mode: str = 'constant', pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]: pass def save_checkpoint(self, fname, save_optimizer=True): pass class nnUNetTrainerV2_5epochs(nnUNetTrainerV2): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.max_num_epochs = 5 def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs=None): pass def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True, mirror_axes: Tuple[int] = None, use_sliding_window: bool = True, step_size: float = 0.5, use_gaussian: bool = True, pad_border_mode: str = 'constant', pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]: pass def save_checkpoint(self, fname, save_optimizer=True): pass class nnUNetTrainerV2_5epochs_CEnoDS(nnUNetTrainerV2_noDeepSupervision): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.max_num_epochs = 5 self.loss = RobustCrossEntropyLoss() def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs=None): pass def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True, mirror_axes: Tuple[int] = None, use_sliding_window: bool = True, step_size: float = 0.5, use_gaussian: bool = True, pad_border_mode: str = 'constant', pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]: pass def save_checkpoint(self, fname, save_optimizer=True): pass def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): data_dict = next(data_generator) data = data_dict['data'] target = data_dict['target'] data = maybe_to_torch(data) target = maybe_to_torch(target).long()[:, 0] if torch.cuda.is_available(): data = to_cuda(data) target = to_cuda(target) self.optimizer.zero_grad() if self.fp16: with autocast(): output = self.network(data) del data l = self.loss(output, target) if do_backprop: self.amp_grad_scaler.scale(l).backward() self.amp_grad_scaler.unscale_(self.optimizer) torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.amp_grad_scaler.step(self.optimizer) self.amp_grad_scaler.update() else: output = self.network(data) del data l = self.loss(output, target) if do_backprop: l.backward() torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.optimizer.step() if run_online_evaluation: self.run_online_evaluation(output, target) del target return l.detach().cpu().numpy() def run_online_evaluation(self, output, target): pass def finish_online_evaluation(self): pass class nnUNetTrainerV2_5epochs_noDS(nnUNetTrainerV2_noDeepSupervision): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.max_num_epochs = 5 def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs=None): pass def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True, mirror_axes: Tuple[int] = None, use_sliding_window: bool = True, step_size: float = 0.5, use_gaussian: bool = True, pad_border_mode: str = 'constant', pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]: pass def save_checkpoint(self, fname, save_optimizer=True): pass def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): data_dict = next(data_generator) data = data_dict['data'] target = data_dict['target'] data = maybe_to_torch(data) target = maybe_to_torch(target) if torch.cuda.is_available(): data = to_cuda(data) target = to_cuda(target) self.optimizer.zero_grad() if self.fp16: with autocast(): output = self.network(data) del data l = self.loss(output, target) if do_backprop: self.amp_grad_scaler.scale(l).backward() self.amp_grad_scaler.unscale_(self.optimizer) torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.amp_grad_scaler.step(self.optimizer) self.amp_grad_scaler.update() else: output = self.network(data) del data l = self.loss(output, target) if do_backprop: l.backward() torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.optimizer.step() if run_online_evaluation: self.run_online_evaluation(output, target) del target return l.detach().cpu().numpy() def run_online_evaluation(self, output, target): pass def finish_online_evaluation(self): pass class nnUNetTrainerV2_DDP_5epochs(nnUNetTrainerV2_DDP): def __init__(self, plans_file, fold, local_rank, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, distribute_batch_size=False, fp16=False): super().__init__(plans_file, fold, local_rank, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, distribute_batch_size, fp16) self.max_num_epochs = 5 def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs=None): pass def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True, mirror_axes: Tuple[int] = None, use_sliding_window: bool = True, step_size: float = 0.5, use_gaussian: bool = True, pad_border_mode: str = 'constant', pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]: pass def save_checkpoint(self, fname, save_optimizer=True): pass class nnUNetTrainerV2_DDP_5epochs_dummyLoad(nnUNetTrainerV2_DDP_5epochs): def initialize(self, training=True, force_load_plans=False): super().initialize(training, force_load_plans) self.some_batch = torch.rand((self.batch_size, self.num_input_channels, *self.patch_size)).float().cuda() self.some_gt = [torch.round(torch.rand((self.batch_size, 1, *[int(i * j) for i, j in zip(self.patch_size, k)])) * ( self.num_classes - 1)).float().cuda() for k in self.deep_supervision_scales] def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): data = self.some_batch target = self.some_gt self.optimizer.zero_grad() if self.fp16: with autocast(): output = self.network(data) del data l = self.compute_loss(output, target) if do_backprop: self.amp_grad_scaler.scale(l).backward() self.amp_grad_scaler.unscale_(self.optimizer) torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.amp_grad_scaler.step(self.optimizer) self.amp_grad_scaler.update() else: output = self.network(data) del data l = self.compute_loss(output, target) if do_backprop: l.backward() torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.optimizer.step() if run_online_evaluation: self.run_online_evaluation(output, target) del target return l.detach().cpu().numpy()
13,888
46.40273
134
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum09.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 class nnUNetTrainerV2_momentum09(nnUNetTrainerV2): def initialize_optimizer_and_scheduler(self): assert self.network is not None, "self.initialize_network must be called first" self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, momentum=0.9, nesterov=True) self.lr_scheduler = None
1,192
43.185185
116
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_reduceMomentumDuringTraining.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 class nnUNetTrainerV2_reduceMomentumDuringTraining(nnUNetTrainerV2): """ This implementation will not work with LR scheduler!!!!!!!!!! After epoch 800, linearly decrease momentum from 0.99 to 0.9 """ def initialize_optimizer_and_scheduler(self): current_momentum = 0.99 min_momentum = 0.9 if self.epoch > 800: current_momentum = current_momentum - (current_momentum - min_momentum) / 200 * (self.epoch - 800) self.print_to_log_file("current momentum", current_momentum) assert self.network is not None, "self.initialize_network must be called first" if self.optimizer is None: self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, momentum=0.99, nesterov=True) else: # can't reinstantiate because that would break NVIDIA AMP self.optimizer.param_groups[0]["momentum"] = current_momentum self.lr_scheduler = None def on_epoch_end(self): self.initialize_optimizer_and_scheduler() return super().on_epoch_end()
1,947
40.446809
120
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_SGD_ReduceOnPlateau.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from torch.optim import lr_scheduler class nnUNetTrainerV2_SGD_ReduceOnPlateau(nnUNetTrainerV2): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) def initialize_optimizer_and_scheduler(self): self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, momentum=0.99, nesterov=True) self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2, patience=self.lr_scheduler_patience, verbose=True, threshold=self.lr_scheduler_eps, threshold_mode="abs") def maybe_update_lr(self, epoch=None): # maybe update learning rate if self.lr_scheduler is not None: assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler)) if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau): # lr scheduler is updated with moving average val loss. should be more robust if self.epoch > 0: # otherwise self.train_loss_MA is None self.lr_scheduler.step(self.train_loss_MA) else: self.lr_scheduler.step(self.epoch + 1) self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr'])) def on_epoch_end(self): return nnUNetTrainer.on_epoch_end(self)
2,707
52.098039
116
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum09in2D.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 class nnUNetTrainerV2_momentum09in2D(nnUNetTrainerV2): def initialize_optimizer_and_scheduler(self): if self.threeD: momentum = 0.99 else: momentum = 0.9 assert self.network is not None, "self.initialize_network must be called first" self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, momentum=momentum, nesterov=True) self.lr_scheduler = None
1,293
42.133333
116
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum095.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 class nnUNetTrainerV2_momentum095(nnUNetTrainerV2): def initialize_optimizer_and_scheduler(self): assert self.network is not None, "self.initialize_network must be called first" self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, momentum=0.95, nesterov=True) self.lr_scheduler = None
1,194
43.259259
116
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Adam_ReduceOnPlateau.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from torch.optim import lr_scheduler class nnUNetTrainerV2_Adam_ReduceOnPlateau(nnUNetTrainerV2): """ Same schedule as nnUNetTrainer """ def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.initial_lr = 3e-4 def initialize_optimizer_and_scheduler(self): assert self.network is not None, "self.initialize_network must be called first" self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, amsgrad=True) self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2, patience=self.lr_scheduler_patience, verbose=True, threshold=self.lr_scheduler_eps, threshold_mode="abs") def maybe_update_lr(self, epoch=None): # maybe update learning rate if self.lr_scheduler is not None: assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler)) if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau): # lr scheduler is updated with moving average val loss. should be more robust if self.epoch > 0 and self.train_loss_MA is not None: # otherwise self.train_loss_MA is None self.lr_scheduler.step(self.train_loss_MA) else: self.lr_scheduler.step(self.epoch + 1) self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr'])) def on_epoch_end(self): return nnUNetTrainer.on_epoch_end(self)
2,899
50.785714
117
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_momentum098.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 class nnUNetTrainerV2_momentum098(nnUNetTrainerV2): def initialize_optimizer_and_scheduler(self): assert self.network is not None, "self.initialize_network must be called first" self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, momentum=0.98, nesterov=True) self.lr_scheduler = None
1,194
43.259259
116
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/optimizer_and_lr/nnUNetTrainerV2_Adam.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 class nnUNetTrainerV2_Adam(nnUNetTrainerV2): def initialize_optimizer_and_scheduler(self): self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, amsgrad=True) self.lr_scheduler = None nnUNetTrainerV2_Adam_copy1 = nnUNetTrainerV2_Adam nnUNetTrainerV2_Adam_copy2 = nnUNetTrainerV2_Adam nnUNetTrainerV2_Adam_copy3 = nnUNetTrainerV2_Adam nnUNetTrainerV2_Adam_copy4 = nnUNetTrainerV2_Adam
1,245
39.193548
131
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_insaneDA.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from batchgenerators.utilities.file_and_folder_operations import join, maybe_mkdir_p from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \ default_2D_augmentation_params, get_patch_size, get_insaneDA_augmentation from nnunet.training.dataloading.dataset_loading import unpack_dataset from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2 from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from torch import nn class nnUNetTrainerV2_insaneDA(nnUNetTrainerV2): def setup_DA_params(self): self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod( np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1] if self.threeD: self.data_aug_params = default_3D_augmentation_params self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) if self.do_dummy_2D_aug: self.data_aug_params["dummy_2D"] = True self.print_to_log_file("Using dummy2d data augmentation") self.data_aug_params["elastic_deform_alpha"] = \ default_2D_augmentation_params["elastic_deform_alpha"] self.data_aug_params["elastic_deform_sigma"] = \ default_2D_augmentation_params["elastic_deform_sigma"] self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"] else: self.do_dummy_2D_aug = False if max(self.patch_size) / min(self.patch_size) > 1.5: default_2D_augmentation_params['rotation_x'] = (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi) self.data_aug_params = default_2D_augmentation_params self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size)) patch_size_for_spatialtransform = self.patch_size[1:] else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) patch_size_for_spatialtransform = self.patch_size self.data_aug_params["scale_range"] = (0.65, 1.6) self.data_aug_params["do_elastic"] = True self.data_aug_params["elastic_deform_alpha"] = (0., 1300.) self.data_aug_params["elastic_deform_sigma"] = (9., 15.) self.data_aug_params["p_eldef"] = 0.2 self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['gamma_range'] = (0.6, 2) self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform def initialize(self, training=True, force_load_plans=False): if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() # now wrap the loss self.loss = MultipleOutputLoss2(self.loss, weights) ################# END ################### self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") else: print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") self.tr_gen, self.val_gen = get_insaneDA_augmentation(self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, pin_memory=self.pin_memory) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True
7,799
54.319149
123
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_noDA.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Tuple import numpy as np from batchgenerators.utilities.file_and_folder_operations import join, maybe_mkdir_p from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.training.data_augmentation.default_data_augmentation import get_no_augmentation from nnunet.training.dataloading.dataset_loading import unpack_dataset, DataLoader3D, DataLoader2D from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2 from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from torch import nn class nnUNetTrainerV2_noDataAugmentation(nnUNetTrainerV2): def setup_DA_params(self): super().setup_DA_params() # important because we need to know in validation and inference that we did not mirror in training self.data_aug_params["do_mirror"] = False self.data_aug_params["mirror_axes"] = tuple() def get_basic_generators(self): self.load_dataset() self.do_split() if self.threeD: dl_tr = DataLoader3D(self.dataset_tr, self.patch_size, self.patch_size, self.batch_size, False, oversample_foreground_percent=self.oversample_foreground_percent , pad_mode="constant", pad_sides=self.pad_all_sides) dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, False, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides) else: dl_tr = DataLoader2D(self.dataset_tr, self.patch_size, self.patch_size, self.batch_size, transpose=self.plans.get('transpose_forward'), oversample_foreground_percent=self.oversample_foreground_percent , pad_mode="constant", pad_sides=self.pad_all_sides) dl_val = DataLoader2D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, transpose=self.plans.get('transpose_forward'), oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides) return dl_tr, dl_val def initialize(self, training=True, force_load_plans=False): if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() # now wrap the loss self.loss = MultipleOutputLoss2(self.loss, weights) ################# END ################### self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") else: print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") self.tr_gen, self.val_gen = get_no_augmentation(self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, pin_memory=self.pin_memory) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs: dict = None): """ We need to wrap this because we need to enforce self.network.do_ds = False for prediction """ ds = self.network.do_ds if do_mirroring: print("WARNING! do_mirroring was True but we cannot do that because we trained without mirroring. " "do_mirroring was set to False") do_mirroring = False self.network.do_ds = False ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size, save_softmax=save_softmax, use_gaussian=use_gaussian, overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug, all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs) self.network.do_ds = ds return ret nnUNetTrainerV2_noDataAugmentation_copy1 = nnUNetTrainerV2_noDataAugmentation nnUNetTrainerV2_noDataAugmentation_copy2 = nnUNetTrainerV2_noDataAugmentation nnUNetTrainerV2_noDataAugmentation_copy3 = nnUNetTrainerV2_noDataAugmentation nnUNetTrainerV2_noDataAugmentation_copy4 = nnUNetTrainerV2_noDataAugmentation
7,886
53.770833
121
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/data_augmentation/nnUNetTrainerV2_DA3.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from batchgenerators.dataloading import MultiThreadedAugmenter, SingleThreadedAugmenter from batchgenerators.transforms import Compose, MirrorTransform, GammaTransform, BrightnessTransform, \ SimulateLowResolutionTransform, ContrastAugmentationTransform, BrightnessMultiplicativeTransform, \ GaussianBlurTransform, GaussianNoiseTransform, SegChannelSelectionTransform, \ DataChannelSelectionTransform from batchgenerators.transforms.spatial_transforms import SpatialTransform_2 from batchgenerators.transforms.utility_transforms import RemoveLabelTransform, NumpyToTensor, RenameTransform from batchgenerators.utilities.file_and_folder_operations import join from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.training.data_augmentation.custom_transforms import ConvertSegmentationToRegionsTransform, MaskTransform, \ Convert2DTo3DTransform, Convert3DTo2DTransform from nnunet.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \ default_2D_augmentation_params, get_patch_size from nnunet.training.data_augmentation.downsampling import DownsampleSegForDSTransform3, DownsampleSegForDSTransform2 from nnunet.training.data_augmentation.pyramid_augmentations import \ RemoveRandomConnectedComponentFromOneHotEncodingTransform, ApplyRandomBinaryOperatorTransform, MoveSegAsOneHotToData from nnunet.training.dataloading.dataset_loading import unpack_dataset from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2 from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2, maybe_mkdir_p from nnunet.utilities.nd_softmax import softmax_helper from torch import nn import numpy as np def get_insaneDA_augmentation2(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params, border_val_seg=-1, seeds_train=None, seeds_val=None, order_seg=1, order_data=3, deep_supervision_scales=None, soft_ds=False, classes=None, pin_memory=True, regions=None): assert params.get('mirror') is None, "old version of params, use new keyword do_mirror" tr_transforms = [] if params.get("selected_data_channels") is not None: tr_transforms.append(DataChannelSelectionTransform(params.get("selected_data_channels"))) if params.get("selected_seg_channels") is not None: tr_transforms.append(SegChannelSelectionTransform(params.get("selected_seg_channels"))) # don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!! if params.get("dummy_2D") is not None and params.get("dummy_2D"): ignore_axes = (0,) tr_transforms.append(Convert3DTo2DTransform()) else: ignore_axes = None tr_transforms.append(SpatialTransform_2( patch_size, patch_center_dist_from_border=None, do_elastic_deform=params.get("do_elastic"), deformation_scale=params.get("eldef_deformation_scale"), do_rotation=params.get("do_rotation"), angle_x=params.get("rotation_x"), angle_y=params.get("rotation_y"), angle_z=params.get("rotation_z"), do_scale=params.get("do_scaling"), scale=params.get("scale_range"), border_mode_data=params.get("border_mode_data"), border_cval_data=0, order_data=order_data, border_mode_seg="constant", border_cval_seg=border_val_seg, order_seg=order_seg, random_crop=params.get("random_crop"), p_el_per_sample=params.get("p_eldef"), p_scale_per_sample=params.get("p_scale"), p_rot_per_sample=params.get("p_rot"), independent_scale_for_each_axis=params.get("independent_scale_factor_for_each_axis"), p_independent_scale_per_axis=params.get("p_independent_scale_per_axis") )) if params.get("dummy_2D"): tr_transforms.append(Convert2DTo3DTransform()) # we need to put the color augmentations after the dummy 2d part (if applicable). Otherwise the overloaded color # channel gets in the way tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.15)) tr_transforms.append(GaussianBlurTransform((0.5, 1.5), different_sigma_per_channel=True, p_per_sample=0.2, p_per_channel=0.5)) tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.70, 1.3), p_per_sample=0.15)) tr_transforms.append(ContrastAugmentationTransform(contrast_range=(0.65, 1.5), p_per_sample=0.15)) tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, p_per_channel=0.5, order_downsample=0, order_upsample=3, p_per_sample=0.25, ignore_axes=ignore_axes)) tr_transforms.append( GammaTransform(params.get("gamma_range"), True, True, retain_stats=params.get("gamma_retain_stats"), p_per_sample=0.15)) # inverted gamma if params.get("do_additive_brightness"): tr_transforms.append(BrightnessTransform(params.get("additive_brightness_mu"), params.get("additive_brightness_sigma"), True, p_per_sample=params.get("additive_brightness_p_per_sample"), p_per_channel=params.get("additive_brightness_p_per_channel"))) if params.get("do_gamma"): tr_transforms.append( GammaTransform(params.get("gamma_range"), False, True, retain_stats=params.get("gamma_retain_stats"), p_per_sample=params["p_gamma"])) if params.get("do_mirror") or params.get("mirror"): tr_transforms.append(MirrorTransform(params.get("mirror_axes"))) if params.get("mask_was_used_for_normalization") is not None: mask_was_used_for_normalization = params.get("mask_was_used_for_normalization") tr_transforms.append(MaskTransform(mask_was_used_for_normalization, mask_idx_in_seg=0, set_outside_to=0)) tr_transforms.append(RemoveLabelTransform(-1, 0)) if params.get("move_last_seg_chanel_to_data") is not None and params.get("move_last_seg_chanel_to_data"): tr_transforms.append(MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data')) if params.get("cascade_do_cascade_augmentations") and not None and params.get( "cascade_do_cascade_augmentations"): if params.get("cascade_random_binary_transform_p") > 0: tr_transforms.append(ApplyRandomBinaryOperatorTransform( channel_idx=list(range(-len(params.get("all_segmentation_labels")), 0)), p_per_sample=params.get("cascade_random_binary_transform_p"), key="data", strel_size=params.get("cascade_random_binary_transform_size"))) if params.get("cascade_remove_conn_comp_p") > 0: tr_transforms.append( RemoveRandomConnectedComponentFromOneHotEncodingTransform( channel_idx=list(range(-len(params.get("all_segmentation_labels")), 0)), key="data", p_per_sample=params.get("cascade_remove_conn_comp_p"), fill_with_other_class_p=params.get("cascade_remove_conn_comp_max_size_percent_threshold"), dont_do_if_covers_more_than_X_percent=params.get( "cascade_remove_conn_comp_fill_with_other_class_p"))) tr_transforms.append(RenameTransform('seg', 'target', True)) if regions is not None: tr_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target')) if deep_supervision_scales is not None: if soft_ds: assert classes is not None tr_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes)) else: tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target', output_key='target')) tr_transforms.append(NumpyToTensor(['data', 'target'], 'float')) tr_transforms = Compose(tr_transforms) batchgenerator_train = MultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'), params.get("num_cached_per_thread"), seeds=seeds_train, pin_memory=pin_memory) #batchgenerator_train = SingleThreadedAugmenter(dataloader_train, tr_transforms) val_transforms = [] val_transforms.append(RemoveLabelTransform(-1, 0)) if params.get("selected_data_channels") is not None: val_transforms.append(DataChannelSelectionTransform(params.get("selected_data_channels"))) if params.get("selected_seg_channels") is not None: val_transforms.append(SegChannelSelectionTransform(params.get("selected_seg_channels"))) if params.get("move_last_seg_chanel_to_data") is not None and params.get("move_last_seg_chanel_to_data"): val_transforms.append(MoveSegAsOneHotToData(1, params.get("all_segmentation_labels"), 'seg', 'data')) val_transforms.append(RenameTransform('seg', 'target', True)) if regions is not None: val_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target')) if deep_supervision_scales is not None: if soft_ds: assert classes is not None val_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes)) else: val_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, 0, input_key='target', output_key='target')) val_transforms.append(NumpyToTensor(['data', 'target'], 'float')) val_transforms = Compose(val_transforms) batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms, max(params.get('num_threads') // 2, 1), params.get("num_cached_per_thread"), seeds=seeds_val, pin_memory=pin_memory) return batchgenerator_train, batchgenerator_val class nnUNetTrainerV2_DA3(nnUNetTrainerV2): def setup_DA_params(self): super().setup_DA_params() self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod( np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))[:-1] if self.threeD: self.data_aug_params = default_3D_augmentation_params self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) if self.do_dummy_2D_aug: self.data_aug_params["dummy_2D"] = True self.print_to_log_file("Using dummy2d data augmentation") self.data_aug_params["elastic_deform_alpha"] = \ default_2D_augmentation_params["elastic_deform_alpha"] self.data_aug_params["elastic_deform_sigma"] = \ default_2D_augmentation_params["elastic_deform_sigma"] self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"] else: self.do_dummy_2D_aug = False if max(self.patch_size) / min(self.patch_size) > 1.5: default_2D_augmentation_params['rotation_x'] = (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi) self.data_aug_params = default_2D_augmentation_params self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size)) patch_size_for_spatialtransform = self.patch_size[1:] else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) patch_size_for_spatialtransform = self.patch_size self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform self.data_aug_params["p_rot"] = 0.3 self.data_aug_params["scale_range"] = (0.65, 1.6) self.data_aug_params["p_scale"] = 0.3 self.data_aug_params["independent_scale_factor_for_each_axis"] = True self.data_aug_params["p_independent_scale_per_axis"] = 0.3 self.data_aug_params["do_elastic"] = True self.data_aug_params["p_eldef"] = 0.3 self.data_aug_params["eldef_deformation_scale"] = (0, 0.25) self.data_aug_params["do_additive_brightness"] = True self.data_aug_params["additive_brightness_mu"] = 0 self.data_aug_params["additive_brightness_sigma"] = 0.2 self.data_aug_params["additive_brightness_p_per_sample"] = 0.3 self.data_aug_params["additive_brightness_p_per_channel"] = 1 self.data_aug_params['gamma_range'] = (0.5, 1.6) self.data_aug_params['num_cached_per_thread'] = 4 def initialize(self, training=True, force_load_plans=False): if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights # now wrap the loss self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) ################# END ################### self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") else: print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") self.tr_gen, self.val_gen = get_insaneDA_augmentation2( self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, pin_memory=self.pin_memory ) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True """def run_training(self): from batchviewer import view_batch a = next(self.tr_gen) view_batch(a['data'][:, 0], width=512, height=512) import IPython;IPython.embed()""" class nnUNetTrainerV2_DA3_BN(nnUNetTrainerV2_DA3): def initialize_network(self): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.BatchNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.BatchNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
19,740
55.402857
120
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_lReLU_biasInSegOutput.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.nd_softmax import softmax_helper from torch import nn class nnUNetTrainerV2_lReLU_biasInSegOutput(nnUNetTrainerV2): def initialize_network(self): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True, seg_output_use_bias=True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
2,288
47.702128
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_allConv3x3.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.nd_softmax import softmax_helper from torch import nn class nnUNetTrainerV2_allConv3x3(nnUNetTrainerV2): def initialize_network(self): """ - momentum 0.99 - SGD instead of Adam - self.lr_scheduler = None because we do poly_lr - deep supervision = True - i am sure I forgot something here Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though :return: """ if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d for s in range(len(self.net_conv_kernel_sizes)): for i in range(len(self.net_conv_kernel_sizes[s])): self.net_conv_kernel_sizes[s][i] = 3 norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
2,727
43.721311
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_lReLU_convlReLUIN.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.generic_UNet import Generic_UNet, ConvDropoutNonlinNorm from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.nd_softmax import softmax_helper from torch import nn class nnUNetTrainerV2_lReLU_convReLUIN(nnUNetTrainerV2): def initialize_network(self): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'inplace': True, 'negative_slope': 1e-2} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True, basic_block=ConvDropoutNonlinNorm) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
2,318
48.340426
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_NoNormalization.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.network_architecture.custom_modules.helperModules import Identity from nnunet.utilities.nd_softmax import softmax_helper from torch import nn class nnUNetTrainerV2_NoNormalization(nnUNetTrainerV2): def initialize_network(self): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = Identity else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = Identity norm_op_kwargs = {} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
2,256
47.021277
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_BN.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.nd_softmax import softmax_helper from torch import nn class nnUNetTrainerV2_BN(nnUNetTrainerV2): def initialize_network(self): """ changed deep supervision to False :return: """ if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.BatchNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.BatchNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper nnUNetTrainerV2_BN_copy1 = nnUNetTrainerV2_BN nnUNetTrainerV2_BN_copy2 = nnUNetTrainerV2_BN nnUNetTrainerV2_BN_copy3 = nnUNetTrainerV2_BN nnUNetTrainerV2_BN_copy4 = nnUNetTrainerV2_BN
2,473
43.178571
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ReLU.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.nd_softmax import softmax_helper from torch import nn class nnUNetTrainerV2_ReLU(nnUNetTrainerV2): def initialize_network(self): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.ReLU net_nonlin_kwargs = {'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
2,180
46.413043
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ResencUNet.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Tuple import numpy as np import torch from nnunet.network_architecture.generic_modular_residual_UNet import FabiansUNet, get_default_network_config from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.nd_softmax import softmax_helper class nnUNetTrainerV2_ResencUNet(nnUNetTrainerV2): def initialize_network(self): if self.threeD: cfg = get_default_network_config(3, None, norm_type="in") else: cfg = get_default_network_config(1, None, norm_type="in") stage_plans = self.plans['plans_per_stage'][self.stage] conv_kernel_sizes = stage_plans['conv_kernel_sizes'] blocks_per_stage_encoder = stage_plans['num_blocks_encoder'] blocks_per_stage_decoder = stage_plans['num_blocks_decoder'] pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes'] self.network = FabiansUNet(self.num_input_channels, self.base_num_features, blocks_per_stage_encoder, 2, pool_op_kernel_sizes, conv_kernel_sizes, cfg, self.num_classes, blocks_per_stage_decoder, True, False, 320, InitWeights_He(1e-2)) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper def setup_DA_params(self): """ net_num_pool_op_kernel_sizes is different in resunet """ super().setup_DA_params() self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod( np.vstack(self.net_num_pool_op_kernel_sizes[1:]), axis=0))[:-1] def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, force_separate_z: bool = None, interpolation_order: int = 3, interpolation_order_z=0, segmentation_export_kwargs: dict = None): ds = self.network.decoder.deep_supervision self.network.decoder.deep_supervision = False ret = nnUNetTrainer.validate(self, do_mirroring, use_sliding_window, step_size, save_softmax, use_gaussian, overwrite, validation_folder_name, debug, all_in_gpu, segmentation_export_kwargs) self.network.decoder.deep_supervision = ds return ret def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True, mirror_axes: Tuple[int] = None, use_sliding_window: bool = True, step_size: float = 0.5, use_gaussian: bool = True, pad_border_mode: str = 'constant', pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]: ds = self.network.decoder.deep_supervision self.network.decoder.deep_supervision = False ret = nnUNetTrainer.predict_preprocessed_data_return_seg_and_softmax(self, data, do_mirroring=do_mirroring, mirror_axes=mirror_axes, use_sliding_window=use_sliding_window, step_size=step_size, use_gaussian=use_gaussian, pad_border_mode=pad_border_mode, pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose, mixed_precision=mixed_precision) self.network.decoder.deep_supervision = ds return ret def run_training(self): self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we # want at the start of the training ds = self.network.decoder.deep_supervision self.network.decoder.deep_supervision = True ret = nnUNetTrainer.run_training(self) self.network.decoder.deep_supervision = ds return ret nnUNetTrainerV2_ResencUNet_copy1 = nnUNetTrainerV2_ResencUNet nnUNetTrainerV2_ResencUNet_copy2 = nnUNetTrainerV2_ResencUNet nnUNetTrainerV2_ResencUNet_copy3 = nnUNetTrainerV2_ResencUNet nnUNetTrainerV2_ResencUNet_copy4 = nnUNetTrainerV2_ResencUNet
5,977
57.038835
134
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ReLU_convReLUIN.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.generic_UNet import Generic_UNet, ConvDropoutNonlinNorm from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.nd_softmax import softmax_helper from torch import nn class nnUNetTrainerV2_ReLU_convReLUIN(nnUNetTrainerV2): def initialize_network(self): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.ReLU net_nonlin_kwargs = {'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True, basic_block=ConvDropoutNonlinNorm) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
2,285
47.638298
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_GN.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.network_architecture.custom_modules.helperModules import MyGroupNorm from nnunet.utilities.nd_softmax import softmax_helper from torch import nn class nnUNetTrainerV2_GN(nnUNetTrainerV2): def initialize_network(self): """ changed deep supervision to False :return: """ if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = MyGroupNorm else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = MyGroupNorm norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'num_groups': 8} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
2,379
45.666667
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_softDeepSupervision.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from batchgenerators.utilities.file_and_folder_operations import join, maybe_mkdir_p try: from meddec.model_training.ablation_studies.new_nnUNet_candidates.nnUNetTrainerCandidate23_softDeepSupervision4 import \ MyDSLoss4 except ImportError: MyDSLoss4 = None from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.training.data_augmentation.default_data_augmentation import get_moreDA_augmentation from nnunet.training.dataloading.dataset_loading import unpack_dataset from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from torch import nn import numpy as np class nnUNetTrainerV2_softDeepSupervision(nnUNetTrainerV2): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.loss = None # we take care of that later def initialize(self, training=True, force_load_plans=False): """ - replaced get_default_augmentation with get_moreDA_augmentation - only run this code once - loss function wrapper for deep supervision :param training: :param force_load_plans: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True if i < net_numpool - 1 else False for i in range(net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() # now wrap the loss if MyDSLoss4 is None: raise RuntimeError("This aint ready for prime time yet") self.loss = MyDSLoss4(self.batch_dice, weights) #self.loss = MultipleOutputLoss2(self.loss, weights) ################# END ################### self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") else: print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, soft_ds=True, classes=[0] + list(self.classes), pin_memory=self.pin_memory) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def run_online_evaluation(self, output, target): """ due to deep supervision the return value and the reference are now lists of tensors. We only need the full resolution output because this is what we are interested in in the end. The others are ignored :param output: :param target: :return: """ target = target[0][:, None] # we need to restore color channel dimension here to be compatible with previous code output = output[0] return nnUNetTrainer.run_online_evaluation(self, output, target)
6,237
47.734375
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_GeLU.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.nd_softmax import softmax_helper from torch import nn try: from torch.nn.functional import gelu except ImportError: gelu = None class GeLU(nn.Module): def __init__(self): super().__init__() if gelu is None: raise ImportError('You need to have at least torch==1.7.0 to use GeLUs') def forward(self, x): return gelu(x) class nnUNetTrainerV2_GeLU(nnUNetTrainerV2): def initialize_network(self): """ - momentum 0.99 - SGD instead of Adam - self.lr_scheduler = None because we do poly_lr - deep supervision = True - ReLU - i am sure I forgot something here Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though :return: """ if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = GeLU net_nonlin_kwargs = {} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
2,829
37.767123
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_ReLU_biasInSegOutput.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.nd_softmax import softmax_helper from torch import nn class nnUNetTrainerV2_ReLU_biasInSegOutput(nnUNetTrainerV2): def initialize_network(self): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.ReLU net_nonlin_kwargs = {'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True, seg_output_use_bias=True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
2,258
47.06383
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_3ConvPerStage.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.nd_softmax import softmax_helper from torch import nn class nnUNetTrainerV2_3ConvPerStage(nnUNetTrainerV2): def initialize_network(self): self.base_num_features = 24 # otherwise we run out of VRAM if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), 3, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
2,271
47.340426
117
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_noDeepSupervision.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from batchgenerators.utilities.file_and_folder_operations import * from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \ default_2D_augmentation_params, get_patch_size, get_moreDA_augmentation from nnunet.training.dataloading.dataset_loading import unpack_dataset from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.nd_softmax import softmax_helper from torch import nn import torch class nnUNetTrainerV2_noDeepSupervision(nnUNetTrainerV2): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}) def setup_DA_params(self): """ we leave out the creation of self.deep_supervision_scales, so it remains None :return: """ if self.threeD: self.data_aug_params = default_3D_augmentation_params self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) if self.do_dummy_2D_aug: self.data_aug_params["dummy_2D"] = True self.print_to_log_file("Using dummy2d data augmentation") self.data_aug_params["elastic_deform_alpha"] = \ default_2D_augmentation_params["elastic_deform_alpha"] self.data_aug_params["elastic_deform_sigma"] = \ default_2D_augmentation_params["elastic_deform_sigma"] self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"] else: self.do_dummy_2D_aug = False if max(self.patch_size) / min(self.patch_size) > 1.5: default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi) self.data_aug_params = default_2D_augmentation_params self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size)) patch_size_for_spatialtransform = self.patch_size[1:] else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) patch_size_for_spatialtransform = self.patch_size self.data_aug_params["scale_range"] = (0.7, 1.4) self.data_aug_params["do_elastic"] = False self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform def initialize(self, training=True, force_load_plans=False): """ removed deep supervision :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") else: print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") assert self.deep_supervision_scales is None self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, classes=None, pin_memory=self.pin_memory) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def initialize_network(self): """ changed deep supervision to False :return: """ if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, False, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper def run_online_evaluation(self, output, target): return nnUNetTrainer.run_online_evaluation(self, output, target)
8,908
52.668675
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_LReLU_slope_2en1.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.nd_softmax import softmax_helper from torch import nn class nnUNetTrainerV2_LReLU_slope_2en1(nnUNetTrainerV2): def initialize_network(self): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'inplace': True, 'negative_slope': 2e-1} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
2,221
47.304348
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_Mish.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.nd_softmax import softmax_helper from torch import nn from nnunet.network_architecture.custom_modules.mish import Mish class nnUNetTrainerV2_Mish(nnUNetTrainerV2): def initialize_network(self): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = Mish net_nonlin_kwargs = {} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(0), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
2,228
45.4375
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_3ConvPerStage_samefilters.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.nd_softmax import softmax_helper from torch import nn class nnUNetTrainerV2_3ConvPerStageSameFilters(nnUNetTrainerV2): def initialize_network(self): if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), 3, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
2,214
47.152174
117
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/architectural_variants/nnUNetTrainerV2_FRN.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nnunet.network_architecture.custom_modules.feature_response_normalization import FRN3D from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from nnunet.utilities.nd_softmax import softmax_helper from torch import nn from nnunet.network_architecture.custom_modules.helperModules import Identity import torch class nnUNetTrainerV2_FRN(nnUNetTrainerV2): def initialize_network(self): """ changed deep supervision to False :return: """ if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = FRN3D else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d raise NotImplementedError norm_op = nn.BatchNorm2d norm_op_kwargs = {'eps': 1e-6} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = Identity net_nonlin_kwargs = {} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper
2,430
43.2
124
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/miscellaneous/nnUNetTrainerV2_fullEvals.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from multiprocessing.pool import Pool from time import time import numpy as np import torch from nnunet.configuration import default_num_threads from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from batchgenerators.utilities.file_and_folder_operations import * from nnunet.evaluation.region_based_evaluation import evaluate_regions, get_brats_regions class nnUNetTrainerV2_fullEvals(nnUNetTrainerV2): """ this trainer only works for brats and nothing else """ def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.validate_every = 1 self.evaluation_regions = get_brats_regions() self.num_val_batches_per_epoch = 0 # we dont need this because this does not evaluate on full images def finish_online_evaluation(self): pass def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, force_separate_z: bool = None, interpolation_order: int = 3, interpolation_order_z=0): """ disable nnunet postprocessing. this would just waste computation time and does not benefit brats !!!We run this with use_sliding_window=False per default (see on_epoch_end). This triggers fully convolutional inference. THIS ONLY MAKES SENSE WHEN TRAINING ON FULL IMAGES! Make sure use_sliding_window=True when running with default patch size (128x128x128)!!! per default this does not use test time data augmentation (mirroring). The reference implementation, however, does. I disabled it here because this eats up a lot of computation time """ validation_start = time() current_mode = self.network.training self.network.eval() assert self.was_initialized, "must initialize, ideally with checkpoint (or train first)" if self.dataset_val is None: self.load_dataset() self.do_split() # predictions as they come from the network go here output_folder = join(self.output_folder, validation_folder_name) maybe_mkdir_p(output_folder) # this is for debug purposes my_input_args = {'do_mirroring': do_mirroring, 'use_sliding_window': use_sliding_window, 'step_size': step_size, 'save_softmax': save_softmax, 'use_gaussian': use_gaussian, 'overwrite': overwrite, 'validation_folder_name': validation_folder_name, 'debug': debug, 'all_in_gpu': all_in_gpu, 'force_separate_z': force_separate_z, 'interpolation_order': interpolation_order, 'interpolation_order_z': interpolation_order_z, } save_json(my_input_args, join(output_folder, "validation_args.json")) if do_mirroring: if not self.data_aug_params['do_mirror']: raise RuntimeError("We did not train with mirroring so you cannot do inference with mirroring enabled") mirror_axes = self.data_aug_params['mirror_axes'] else: mirror_axes = () export_pool = Pool(default_num_threads) results = [] for k in self.dataset_val.keys(): properties = load_pickle(self.dataset[k]['properties_file']) fname = properties['list_of_data_files'][0].split("/")[-1][:-12] if overwrite or (not isfile(join(output_folder, fname + ".nii.gz"))) or \ (save_softmax and not isfile(join(output_folder, fname + ".npz"))): data = np.load(self.dataset[k]['data_file'])['data'] #print(k, data.shape) softmax_pred = self.predict_preprocessed_data_return_seg_and_softmax(data[:-1], do_mirroring=do_mirroring, mirror_axes=mirror_axes, use_sliding_window=use_sliding_window, step_size=step_size, use_gaussian=use_gaussian, all_in_gpu=all_in_gpu, verbose=False, mixed_precision=self.fp16)[1] # this does not do anything in brats -> remove this line # softmax_pred = softmax_pred.transpose([0] + [i + 1 for i in self.transpose_backward]) if save_softmax: softmax_fname = join(output_folder, fname + ".npz") else: softmax_fname = None results.append(export_pool.starmap_async(save_segmentation_nifti_from_softmax, ((softmax_pred, join(output_folder, fname + ".nii.gz"), properties, interpolation_order, None, None, None, softmax_fname, None, force_separate_z, interpolation_order_z, False), ) ) ) _ = [i.get() for i in results] self.print_to_log_file("finished prediction") # evaluate raw predictions self.print_to_log_file("evaluation of raw predictions") # this writes a csv file into output_folder evaluate_regions(output_folder, self.gt_niftis_folder, self.evaluation_regions) csv_file = np.loadtxt(join(output_folder, 'summary.csv'), skiprows=1, dtype=str, delimiter=',')[:, 1:] # these are the values that are compute with np.nanmean aggregation whole, core, enhancing = csv_file[-4, :].astype(float) # do some cleanup if torch.cuda.is_available(): torch.cuda.empty_cache() self.network.train(current_mode) validation_end = time() self.print_to_log_file('Running the validation took %f seconds' % (validation_end - validation_start)) self.print_to_log_file('(the time needed for validation is included in the total epoch time!)') return whole, core, enhancing def on_epoch_end(self): return_value = True # on epoch end is called before the epoch counter is incremented, so we need to do that here to get the correct epoch number if (self.epoch + 1) % self.validate_every == 0: whole, core, enhancing = self.validate(do_mirroring=False, use_sliding_window=True, step_size=0.5, save_softmax=False, use_gaussian=True, overwrite=True, validation_folder_name='validation_after_ep_%04.0d' % self.epoch, debug=False, all_in_gpu=True) here = np.mean((whole, core, enhancing)) self.print_to_log_file("After epoch %d: whole %0.4f core %0.4f enhancing: %0.4f" % (self.epoch, whole, core, enhancing)) self.print_to_log_file("Mean: %0.4f" % here) # now we need to figure out if we are done fully_trained_nnunet = (0.911, 0.8739, 0.7848) mean_dice = np.mean(fully_trained_nnunet) target = 0.97 * mean_dice self.all_val_eval_metrics.append(here) self.print_to_log_file("Target mean: %0.4f" % target) if here >= target: self.print_to_log_file("I am done!") self.save_checkpoint(join(self.output_folder, "model_final_checkpoint.model")) return_value = False # this triggers early stopping ret_old = super().on_epoch_end() # if we do not achieve the target accuracy in 1000 epochs then we need to stop the training. This is not built # to run longer than 1000 epochs if not ret_old: return_value = ret_old return return_value
9,982
49.933673
132
py
CoTr
CoTr-main/nnUNet/nnunet/training/network_training/nnUNet_variants/loss_function/nnUNetTrainerV2_focalLoss.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2 from functools import partial import torch.nn.functional as F from torch.nn.modules.loss import _Loss def sigmoid_focal_loss( outputs: torch.Tensor, targets: torch.Tensor, gamma: float = 2.0, alpha: float = 0.25, reduction: str = "mean" ): """ Compute binary focal loss between target and output logits. Source https://github.com/BloodAxe/pytorch-toolbelt See :class:`~pytorch_toolbelt.losses` for details. Args: outputs: Tensor of arbitrary shape targets: Tensor of the same shape as input reduction (string, optional): Specifies the reduction to apply to the output: "none" | "mean" | "sum" | "batchwise_mean". "none": no reduction will be applied, "mean": the sum of the output will be divided by the number of elements in the output, "sum": the output will be summed. See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/loss/losses.py # noqa: E501 """ targets = targets.type(outputs.type()) logpt = -F.binary_cross_entropy_with_logits( outputs, targets, reduction="none" ) pt = torch.exp(logpt) # compute the loss loss = -((1 - pt).pow(gamma)) * logpt if alpha is not None: loss = loss * (alpha * targets + (1 - alpha) * (1 - targets)) if reduction == "mean": loss = loss.mean() if reduction == "sum": loss = loss.sum() if reduction == "batchwise_mean": loss = loss.sum(0) return loss def reduced_focal_loss( outputs: torch.Tensor, targets: torch.Tensor, threshold: float = 0.5, gamma: float = 2.0, reduction="mean" ): """ Compute reduced focal loss between target and output logits. Source https://github.com/BloodAxe/pytorch-toolbelt See :class:`~pytorch_toolbelt.losses` for details. Args: outputs: Tensor of arbitrary shape targets: Tensor of the same shape as input reduction (string, optional): Specifies the reduction to apply to the output: "none" | "mean" | "sum" | "batchwise_mean". "none": no reduction will be applied, "mean": the sum of the output will be divided by the number of elements in the output, "sum": the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. "batchwise_mean" computes mean loss per sample in batch. Default: "mean" See https://arxiv.org/abs/1903.01347 """ targets = targets.type(outputs.type()) logpt = -F.binary_cross_entropy_with_logits( outputs, targets, reduction="none" ) pt = torch.exp(logpt) # compute the loss focal_reduction = ((1. - pt) / threshold).pow(gamma) focal_reduction[pt < threshold] = 1 loss = -focal_reduction * logpt if reduction == "mean": loss = loss.mean() if reduction == "sum": loss = loss.sum() if reduction == "batchwise_mean": loss = loss.sum(0) return loss class FocalLossBinary(_Loss): def __init__( self, ignore: int = None, reduced: bool = False, gamma: float = 2.0, alpha: float = 0.25, threshold: float = 0.5, reduction: str = "mean", ): """ Compute focal loss for binary classification problem. """ super().__init__() self.ignore = ignore if reduced: self.loss_fn = partial( reduced_focal_loss, gamma=gamma, threshold=threshold, reduction=reduction ) else: self.loss_fn = partial( sigmoid_focal_loss, gamma=gamma, alpha=alpha, reduction=reduction ) def forward(self, logits, targets): """ Args: logits: [bs; ...] targets: [bs; ...] """ targets = targets.view(-1) logits = logits.view(-1) if self.ignore is not None: # Filter predictions with ignore label from loss computation not_ignored = targets != self.ignore logits = logits[not_ignored] targets = targets[not_ignored] loss = self.loss_fn(logits, targets) return loss class FocalLossMultiClass(FocalLossBinary): """ Compute focal loss for multi-class problem. Ignores targets having -1 label """ def forward(self, logits, targets): """ Args: logits: [bs; num_classes; ...] targets: [bs; ...] """ num_classes = logits.size(1) loss = 0 targets = targets.view(-1) logits = logits.view(-1, num_classes) # Filter anchors with -1 label from loss computation if self.ignore is not None: not_ignored = targets != self.ignore for cls in range(num_classes): cls_label_target = (targets == (cls + 0)).long() cls_label_input = logits[..., cls] if self.ignore is not None: cls_label_target = cls_label_target[not_ignored] cls_label_input = cls_label_input[not_ignored] loss += self.loss_fn(cls_label_input, cls_label_target) return loss class nnUNetTrainerV2_focalLoss(nnUNetTrainerV2): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.loss = FocalLossMultiClass()
6,748
30.834906
114
py
CoTr
CoTr-main/nnUNet/nnunet/training/data_augmentation/downsampling.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from batchgenerators.augmentations.utils import convert_seg_image_to_one_hot_encoding_batched, resize_segmentation from batchgenerators.transforms import AbstractTransform from torch.nn.functional import avg_pool2d, avg_pool3d import numpy as np class DownsampleSegForDSTransform3(AbstractTransform): ''' returns one hot encodings of the segmentation maps if downsampling has occured (no one hot for highest resolution) downsampled segmentations are smooth, not 0/1 returns torch tensors, not numpy arrays! always uses seg channel 0!! you should always give classes! Otherwise weird stuff may happen ''' def __init__(self, ds_scales=(1, 0.5, 0.25), input_key="seg", output_key="seg", classes=None): self.classes = classes self.output_key = output_key self.input_key = input_key self.ds_scales = ds_scales def __call__(self, **data_dict): data_dict[self.output_key] = downsample_seg_for_ds_transform3(data_dict[self.input_key][:, 0], self.ds_scales, self.classes) return data_dict def downsample_seg_for_ds_transform3(seg, ds_scales=((1, 1, 1), (0.5, 0.5, 0.5), (0.25, 0.25, 0.25)), classes=None): output = [] one_hot = torch.from_numpy(convert_seg_image_to_one_hot_encoding_batched(seg, classes)) # b, c, for s in ds_scales: if all([i == 1 for i in s]): output.append(torch.from_numpy(seg)) else: kernel_size = tuple(int(1 / i) for i in s) stride = kernel_size pad = tuple((i-1) // 2 for i in kernel_size) if len(s) == 2: pool_op = avg_pool2d elif len(s) == 3: pool_op = avg_pool3d else: raise RuntimeError() pooled = pool_op(one_hot, kernel_size, stride, pad, count_include_pad=False, ceil_mode=False) output.append(pooled) return output class DownsampleSegForDSTransform2(AbstractTransform): ''' data_dict['output_key'] will be a list of segmentations scaled according to ds_scales ''' def __init__(self, ds_scales=(1, 0.5, 0.25), order=0, cval=0, input_key="seg", output_key="seg", axes=None): self.axes = axes self.output_key = output_key self.input_key = input_key self.cval = cval self.order = order self.ds_scales = ds_scales def __call__(self, **data_dict): data_dict[self.output_key] = downsample_seg_for_ds_transform2(data_dict[self.input_key], self.ds_scales, self.order, self.cval, self.axes) return data_dict def downsample_seg_for_ds_transform2(seg, ds_scales=((1, 1, 1), (0.5, 0.5, 0.5), (0.25, 0.25, 0.25)), order=0, cval=0, axes=None): if axes is None: axes = list(range(2, len(seg.shape))) output = [] for s in ds_scales: if all([i == 1 for i in s]): output.append(seg) else: new_shape = np.array(seg.shape).astype(float) for i, a in enumerate(axes): new_shape[a] *= s[i] new_shape = np.round(new_shape).astype(int) out_seg = np.zeros(new_shape, dtype=seg.dtype) for b in range(seg.shape[0]): for c in range(seg.shape[1]): out_seg[b, c] = resize_segmentation(seg[b, c], new_shape[2:], order, cval) output.append(out_seg) return output
4,164
38.292453
132
py
CoTr
CoTr-main/nnUNet/nnunet/training/optimizer/ranger.py
############ # https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer # This code was taken from the repo above and was not created by me (Fabian)! Full credit goes to the original authors ############ import math import torch from torch.optim.optimizer import Optimizer class Ranger(Optimizer): def __init__(self, params, lr=1e-3, alpha=0.5, k=6, N_sma_threshhold=5, betas=(.95, 0.999), eps=1e-5, weight_decay=0): # parameter checks if not 0.0 <= alpha <= 1.0: raise ValueError(f'Invalid slow update rate: {alpha}') if not 1 <= k: raise ValueError(f'Invalid lookahead steps: {k}') if not lr > 0: raise ValueError(f'Invalid Learning Rate: {lr}') if not eps > 0: raise ValueError(f'Invalid eps: {eps}') # parameter comments: # beta1 (momentum) of .95 seems to work better than .90... # N_sma_threshold of 5 seems better in testing than 4. # In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you. # prep defaults and init torch.optim base defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold, eps=eps, weight_decay=weight_decay) super().__init__(params, defaults) # adjustable threshold self.N_sma_threshhold = N_sma_threshhold # now we can get to work... # removed as we now use step from RAdam...no need for duplicate step counting # for group in self.param_groups: # group["step_counter"] = 0 # print("group step counter init") # look ahead params self.alpha = alpha self.k = k # radam buffer for state self.radam_buffer = [[None, None, None] for ind in range(10)] # self.first_run_check=0 # lookahead weights # 9/2/19 - lookahead param tensors have been moved to state storage. # This should resolve issues with load/save where weights were left in GPU memory from first load, slowing down future runs. # self.slow_weights = [[p.clone().detach() for p in group['params']] # for group in self.param_groups] # don't use grad for lookahead weights # for w in it.chain(*self.slow_weights): # w.requires_grad = False def __setstate__(self, state): print("set state called") super(Ranger, self).__setstate__(state) def step(self, closure=None): loss = None # note - below is commented out b/c I have other work that passes back the loss as a float, and thus not a callable closure. # Uncomment if you need to use the actual closure... # if closure is not None: # loss = closure() # Evaluate averages and grad, update param tensors for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('Ranger optimizer does not support sparse gradients') p_data_fp32 = p.data.float() state = self.state[p] # get state dict for this param if len(state) == 0: # if first time to run...init dictionary with our desired entries # if self.first_run_check==0: # self.first_run_check=1 # print("Initializing slow buffer...should not see this at load from saved model!") state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) # look ahead weight storage now in state dict state['slow_buffer'] = torch.empty_like(p.data) state['slow_buffer'].copy_(p.data) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) # begin computations exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] # compute variance mov avg exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) # compute mean moving avg exp_avg.mul_(beta1).add_(1 - beta1, grad) state['step'] += 1 buffered = self.radam_buffer[int(state['step'] % 10)] if state['step'] == buffered[0]: N_sma, step_size = buffered[1], buffered[2] else: buffered[0] = state['step'] beta2_t = beta2 ** state['step'] N_sma_max = 2 / (1 - beta2) - 1 N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) buffered[1] = N_sma if N_sma > self.N_sma_threshhold: step_size = math.sqrt( (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / ( N_sma_max - 2)) / (1 - beta1 ** state['step']) else: step_size = 1.0 / (1 - beta1 ** state['step']) buffered[2] = step_size if group['weight_decay'] != 0: p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) if N_sma > self.N_sma_threshhold: denom = exp_avg_sq.sqrt().add_(group['eps']) p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom) else: p_data_fp32.add_(-step_size * group['lr'], exp_avg) p.data.copy_(p_data_fp32) # integrated look ahead... # we do it at the param level instead of group level if state['step'] % group['k'] == 0: slow_p = state['slow_buffer'] # get access to slow param tensor slow_p.add_(self.alpha, p.data - slow_p) # (fast weights - slow weights) * alpha p.data.copy_(slow_p) # copy interpolated weights to RAdam param tensor return loss
6,465
41.261438
132
py
CoTr
CoTr-main/nnUNet/nnunet/training/loss_functions/dice_loss.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.training.loss_functions.TopK_loss import TopKLoss from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss from nnunet.utilities.nd_softmax import softmax_helper from nnunet.utilities.tensor_utilities import sum_tensor from torch import nn import numpy as np class GDL(nn.Module): def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1., square=False, square_volumes=False): """ square_volumes will square the weight term. The paper recommends square_volumes=True; I don't (just an intuition) """ super(GDL, self).__init__() self.square_volumes = square_volumes self.square = square self.do_bg = do_bg self.batch_dice = batch_dice self.apply_nonlin = apply_nonlin self.smooth = smooth def forward(self, x, y, loss_mask=None): shp_x = x.shape shp_y = y.shape if self.batch_dice: axes = [0] + list(range(2, len(shp_x))) else: axes = list(range(2, len(shp_x))) if len(shp_x) != len(shp_y): y = y.view((shp_y[0], 1, *shp_y[1:])) if all([i == j for i, j in zip(x.shape, y.shape)]): # if this is the case then gt is probably already a one hot encoding y_onehot = y else: gt = y.long() y_onehot = torch.zeros(shp_x) if x.device.type == "cuda": y_onehot = y_onehot.cuda(x.device.index) y_onehot.scatter_(1, gt, 1) if self.apply_nonlin is not None: x = self.apply_nonlin(x) if not self.do_bg: x = x[:, 1:] y_onehot = y_onehot[:, 1:] tp, fp, fn, _ = get_tp_fp_fn_tn(x, y_onehot, axes, loss_mask, self.square) # GDL weight computation, we use 1/V volumes = sum_tensor(y_onehot, axes) + 1e-6 # add some eps to prevent div by zero if self.square_volumes: volumes = volumes ** 2 # apply weights tp = tp / volumes fp = fp / volumes fn = fn / volumes # sum over classes if self.batch_dice: axis = 0 else: axis = 1 tp = tp.sum(axis, keepdim=False) fp = fp.sum(axis, keepdim=False) fn = fn.sum(axis, keepdim=False) # compute dice dc = (2 * tp + self.smooth) / (2 * tp + fp + fn + self.smooth) dc = dc.mean() return -dc def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False): """ net_output must be (b, c, x, y(, z))) gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z)) if mask is provided it must have shape (b, 1, x, y(, z))) :param net_output: :param gt: :param axes: can be (, ) = no summation :param mask: mask must be 1 for valid pixels and 0 for invalid pixels :param square: if True then fp, tp and fn will be squared before summation :return: """ if axes is None: axes = tuple(range(2, len(net_output.size()))) shp_x = net_output.shape shp_y = gt.shape with torch.no_grad(): if len(shp_x) != len(shp_y): gt = gt.view((shp_y[0], 1, *shp_y[1:])) if all([i == j for i, j in zip(net_output.shape, gt.shape)]): # if this is the case then gt is probably already a one hot encoding y_onehot = gt else: gt = gt.long() y_onehot = torch.zeros(shp_x) if net_output.device.type == "cuda": y_onehot = y_onehot.cuda(net_output.device.index) y_onehot.scatter_(1, gt, 1) tp = net_output * y_onehot fp = net_output * (1 - y_onehot) fn = (1 - net_output) * y_onehot tn = (1 - net_output) * (1 - y_onehot) if mask is not None: tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1) fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1) fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1) tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1) if square: tp = tp ** 2 fp = fp ** 2 fn = fn ** 2 tn = tn ** 2 if len(axes) > 0: tp = sum_tensor(tp, axes, keepdim=False) fp = sum_tensor(fp, axes, keepdim=False) fn = sum_tensor(fn, axes, keepdim=False) tn = sum_tensor(tn, axes, keepdim=False) return tp, fp, fn, tn class SoftDiceLoss(nn.Module): def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.): """ """ super(SoftDiceLoss, self).__init__() self.do_bg = do_bg self.batch_dice = batch_dice self.apply_nonlin = apply_nonlin self.smooth = smooth def forward(self, x, y, loss_mask=None): shp_x = x.shape if self.batch_dice: axes = [0] + list(range(2, len(shp_x))) else: axes = list(range(2, len(shp_x))) if self.apply_nonlin is not None: x = self.apply_nonlin(x) tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False) nominator = 2 * tp + self.smooth denominator = 2 * tp + fp + fn + self.smooth dc = nominator / (denominator + 1e-8) if not self.do_bg: if self.batch_dice: dc = dc[1:] else: dc = dc[:, 1:] dc = dc.mean() return -dc class MCCLoss(nn.Module): def __init__(self, apply_nonlin=None, batch_mcc=False, do_bg=True, smooth=0.0): """ based on matthews correlation coefficient https://en.wikipedia.org/wiki/Matthews_correlation_coefficient Does not work. Really unstable. F this. """ super(MCCLoss, self).__init__() self.smooth = smooth self.do_bg = do_bg self.batch_mcc = batch_mcc self.apply_nonlin = apply_nonlin def forward(self, x, y, loss_mask=None): shp_x = x.shape voxels = np.prod(shp_x[2:]) if self.batch_mcc: axes = [0] + list(range(2, len(shp_x))) else: axes = list(range(2, len(shp_x))) if self.apply_nonlin is not None: x = self.apply_nonlin(x) tp, fp, fn, tn = get_tp_fp_fn_tn(x, y, axes, loss_mask, False) tp /= voxels fp /= voxels fn /= voxels tn /= voxels nominator = tp * tn - fp * fn + self.smooth denominator = ((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) ** 0.5 + self.smooth mcc = nominator / denominator if not self.do_bg: if self.batch_mcc: mcc = mcc[1:] else: mcc = mcc[:, 1:] mcc = mcc.mean() return -mcc class SoftDiceLossSquared(nn.Module): def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.): """ squares the terms in the denominator as proposed by Milletari et al. """ super(SoftDiceLossSquared, self).__init__() self.do_bg = do_bg self.batch_dice = batch_dice self.apply_nonlin = apply_nonlin self.smooth = smooth def forward(self, x, y, loss_mask=None): shp_x = x.shape shp_y = y.shape if self.batch_dice: axes = [0] + list(range(2, len(shp_x))) else: axes = list(range(2, len(shp_x))) if self.apply_nonlin is not None: x = self.apply_nonlin(x) with torch.no_grad(): if len(shp_x) != len(shp_y): y = y.view((shp_y[0], 1, *shp_y[1:])) if all([i == j for i, j in zip(x.shape, y.shape)]): # if this is the case then gt is probably already a one hot encoding y_onehot = y else: y = y.long() y_onehot = torch.zeros(shp_x) if x.device.type == "cuda": y_onehot = y_onehot.cuda(x.device.index) y_onehot.scatter_(1, y, 1).float() intersect = x * y_onehot # values in the denominator get smoothed denominator = x ** 2 + y_onehot ** 2 # aggregation was previously done in get_tp_fp_fn, but needs to be done here now (needs to be done after # squaring) intersect = sum_tensor(intersect, axes, False) + self.smooth denominator = sum_tensor(denominator, axes, False) + self.smooth dc = 2 * intersect / denominator if not self.do_bg: if self.batch_dice: dc = dc[1:] else: dc = dc[:, 1:] dc = dc.mean() return -dc class DC_and_CE_loss(nn.Module): def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False, weight_ce=1, weight_dice=1, log_dice=False, ignore_label=None): """ CAREFUL. Weights for CE and Dice do not need to sum to one. You can set whatever you want. :param soft_dice_kwargs: :param ce_kwargs: :param aggregate: :param square_dice: :param weight_ce: :param weight_dice: """ super(DC_and_CE_loss, self).__init__() if ignore_label is not None: assert not square_dice, 'not implemented' ce_kwargs['reduction'] = 'none' self.log_dice = log_dice self.weight_dice = weight_dice self.weight_ce = weight_ce self.aggregate = aggregate self.ce = RobustCrossEntropyLoss(**ce_kwargs) self.ignore_label = ignore_label if not square_dice: self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs) else: self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs) def forward(self, net_output, target): """ target must be b, c, x, y(, z) with c=1 :param net_output: :param target: :return: """ if self.ignore_label is not None: assert target.shape[1] == 1, 'not implemented for one hot encoding' mask = target != self.ignore_label target[~mask] = 0 mask = mask.float() else: mask = None dc_loss = self.dc(net_output, target, loss_mask=mask) if self.weight_dice != 0 else 0 if self.log_dice: dc_loss = -torch.log(-dc_loss) ce_loss = self.ce(net_output, target[:, 0].long()) if self.weight_ce != 0 else 0 if self.ignore_label is not None: ce_loss *= mask[:, 0] ce_loss = ce_loss.sum() / mask.sum() if self.aggregate == "sum": result = self.weight_ce * ce_loss + self.weight_dice * dc_loss else: raise NotImplementedError("nah son") # reserved for other stuff (later) return result class DC_and_BCE_loss(nn.Module): def __init__(self, bce_kwargs, soft_dice_kwargs, aggregate="sum"): """ DO NOT APPLY NONLINEARITY IN YOUR NETWORK! THIS LOSS IS INTENDED TO BE USED FOR BRATS REGIONS ONLY :param soft_dice_kwargs: :param bce_kwargs: :param aggregate: """ super(DC_and_BCE_loss, self).__init__() self.aggregate = aggregate self.ce = nn.BCEWithLogitsLoss(**bce_kwargs) self.dc = SoftDiceLoss(apply_nonlin=torch.sigmoid, **soft_dice_kwargs) def forward(self, net_output, target): ce_loss = self.ce(net_output, target) dc_loss = self.dc(net_output, target) if self.aggregate == "sum": result = ce_loss + dc_loss else: raise NotImplementedError("nah son") # reserved for other stuff (later) return result class GDL_and_CE_loss(nn.Module): def __init__(self, gdl_dice_kwargs, ce_kwargs, aggregate="sum"): super(GDL_and_CE_loss, self).__init__() self.aggregate = aggregate self.ce = RobustCrossEntropyLoss(**ce_kwargs) self.dc = GDL(softmax_helper, **gdl_dice_kwargs) def forward(self, net_output, target): dc_loss = self.dc(net_output, target) ce_loss = self.ce(net_output, target) if self.aggregate == "sum": result = ce_loss + dc_loss else: raise NotImplementedError("nah son") # reserved for other stuff (later) return result class DC_and_topk_loss(nn.Module): def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False): super(DC_and_topk_loss, self).__init__() self.aggregate = aggregate self.ce = TopKLoss(**ce_kwargs) if not square_dice: self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs) else: self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs) def forward(self, net_output, target): dc_loss = self.dc(net_output, target) ce_loss = self.ce(net_output, target) if self.aggregate == "sum": result = ce_loss + dc_loss else: raise NotImplementedError("nah son") # reserved for other stuff (later?) return result
14,049
31.903981
121
py
CoTr
CoTr-main/nnUNet/nnunet/training/loss_functions/TopK_loss.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss class TopKLoss(RobustCrossEntropyLoss): """ Network has to have NO LINEARITY! """ def __init__(self, weight=None, ignore_index=-100, k=10): self.k = k super(TopKLoss, self).__init__(weight, False, ignore_index, reduce=False) def forward(self, inp, target): target = target[:, 0].long() res = super(TopKLoss, self).forward(inp, target) num_voxels = np.prod(res.shape, dtype=np.int64) res, _ = torch.topk(res.view((-1, )), int(num_voxels * self.k / 100), sorted=False) return res.mean()
1,364
39.147059
114
py
CoTr
CoTr-main/nnUNet/nnunet/training/loss_functions/crossentropy.py
from torch import nn, Tensor class RobustCrossEntropyLoss(nn.CrossEntropyLoss): """ this is just a compatibility layer because my target tensor is float and has an extra dimension """ def forward(self, input: Tensor, target: Tensor) -> Tensor: if len(target.shape) == len(input.shape): assert target.shape[1] == 1 target = target[:, 0] return super().forward(input, target.long())
438
35.583333
99
py
CoTr
CoTr-main/nnUNet/nnunet/training/loss_functions/deep_supervision.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from torch import nn class MultipleOutputLoss2(nn.Module): def __init__(self, loss, weight_factors=None): """ use this if you have several outputs and ground truth (both list of same len) and the loss should be computed between them (x[0] and y[0], x[1] and y[1] etc) :param loss: :param weight_factors: """ super(MultipleOutputLoss2, self).__init__() self.weight_factors = weight_factors self.loss = loss def forward(self, x, y): assert isinstance(x, (tuple, list)), "x must be either tuple or list" assert isinstance(y, (tuple, list)), "y must be either tuple or list" if self.weight_factors is None: weights = [1] * len(x) else: weights = self.weight_factors l = weights[0] * self.loss(x[0], y[0]) for i in range(1, len(x)): if weights[i] != 0: l += weights[i] * self.loss(x[i], y[i]) return l
1,679
37.181818
117
py
CoTr
CoTr-main/nnUNet/nnunet/network_architecture/generic_UNet_DP.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss from nnunet.training.loss_functions.dice_loss import get_tp_fp_fn_tn from nnunet.utilities.nd_softmax import softmax_helper from nnunet.utilities.tensor_utilities import sum_tensor from torch import nn class Generic_UNet_DP(Generic_UNet): def __init__(self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2, feat_map_mul_on_downscale=2, conv_op=nn.Conv2d, norm_op=nn.BatchNorm2d, norm_op_kwargs=None, dropout_op=nn.Dropout2d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None, conv_kernel_sizes=None, upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False, max_num_features=None): """ As opposed to the Generic_UNet, this class will compute parts of the loss function in the forward pass. This is useful for GPU parallelization. The batch DICE loss, if used, must be computed over the whole batch. Therefore, in a naive implementation, all softmax outputs must be copied to a single GPU which will then do the loss computation all by itself. In the context of 3D Segmentation, this results in a lot of overhead AND is inefficient because the DICE computation is also kinda expensive (Think 8 GPUs with a result of shape 2x4x128x128x128 each.). The DICE is a global metric, but its parts can be computed locally (TP, FP, FN). Thus, this implementation will compute all the parts of the loss function in the forward pass (and thus in a parallelized way). The results are very small (batch_size x num_classes for TP, FN and FP, respectively; scalar for CE) and copied easily. Also the final steps of the loss function (computing batch dice and average CE values) are easy and very quick on the one GPU they need to run on. BAM. final_nonlin is lambda x:x here! """ super(Generic_UNet_DP, self).__init__(input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage, feat_map_mul_on_downscale, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, nonlin, nonlin_kwargs, deep_supervision, dropout_in_localization, lambda x: x, weightInitializer, pool_op_kernel_sizes, conv_kernel_sizes, upscale_logits, convolutional_pooling, convolutional_upsampling, max_num_features) self.ce_loss = RobustCrossEntropyLoss() def forward(self, x, y=None, return_hard_tp_fp_fn=False): res = super(Generic_UNet_DP, self).forward(x) # regular Generic_UNet forward pass if y is None: return res else: # compute ce loss if self._deep_supervision and self.do_ds: ce_losses = [self.ce_loss(res[0], y[0]).unsqueeze(0)] tps = [] fps = [] fns = [] res_softmax = softmax_helper(res[0]) tp, fp, fn, _ = get_tp_fp_fn_tn(res_softmax, y[0]) tps.append(tp) fps.append(fp) fns.append(fn) for i in range(1, len(y)): ce_losses.append(self.ce_loss(res[i], y[i]).unsqueeze(0)) res_softmax = softmax_helper(res[i]) tp, fp, fn, _ = get_tp_fp_fn_tn(res_softmax, y[i]) tps.append(tp) fps.append(fp) fns.append(fn) ret = ce_losses, tps, fps, fns else: ce_loss = self.ce_loss(res, y).unsqueeze(0) # tp fp and fn need the output to be softmax res_softmax = softmax_helper(res) tp, fp, fn, _ = get_tp_fp_fn_tn(res_softmax, y) ret = ce_loss, tp, fp, fn if return_hard_tp_fp_fn: if self._deep_supervision and self.do_ds: output = res[0] target = y[0] else: target = y output = res with torch.no_grad(): num_classes = output.shape[1] output_softmax = softmax_helper(output) output_seg = output_softmax.argmax(1) target = target[:, 0] axes = tuple(range(1, len(target.shape))) tp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index) fp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index) fn_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index) for c in range(1, num_classes): tp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target == c).float(), axes=axes) fp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target != c).float(), axes=axes) fn_hard[:, c - 1] = sum_tensor((output_seg != c).float() * (target == c).float(), axes=axes) tp_hard = tp_hard.sum(0, keepdim=False)[None] fp_hard = fp_hard.sum(0, keepdim=False)[None] fn_hard = fn_hard.sum(0, keepdim=False)[None] ret = *ret, tp_hard, fp_hard, fn_hard return ret
6,839
53.72
131
py
CoTr
CoTr-main/nnUNet/nnunet/network_architecture/neural_network.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from batchgenerators.augmentations.utils import pad_nd_image from nnunet.utilities.random_stuff import no_op from nnunet.utilities.to_torch import to_cuda, maybe_to_torch from torch import nn import torch from scipy.ndimage.filters import gaussian_filter from typing import Union, Tuple, List from torch.cuda.amp import autocast class NeuralNetwork(nn.Module): def __init__(self): super(NeuralNetwork, self).__init__() def get_device(self): if next(self.parameters()).device == "cpu": return "cpu" else: return next(self.parameters()).device.index def set_device(self, device): if device == "cpu": self.cpu() else: self.cuda(device) def forward(self, x): raise NotImplementedError class SegmentationNetwork(NeuralNetwork): def __init__(self): super(NeuralNetwork, self).__init__() # if we have 5 pooling then our patch size must be divisible by 2**5 self.input_shape_must_be_divisible_by = None # for example in a 2d network that does 5 pool in x and 6 pool # in y this would be (32, 64) # we need to know this because we need to know if we are a 2d or a 3d netowrk self.conv_op = None # nn.Conv2d or nn.Conv3d # this tells us how many channely we have in the output. Important for preallocation in inference self.num_classes = None # number of channels in the output # depending on the loss, we do not hard code a nonlinearity into the architecture. To aggregate predictions # during inference, we need to apply the nonlinearity, however. So it is important to let the newtork know what # to apply in inference. For the most part this will be softmax self.inference_apply_nonlin = lambda x: x # softmax_helper # This is for saving a gaussian importance map for inference. It weights voxels higher that are closer to the # center. Prediction at the borders are often less accurate and are thus downweighted. Creating these Gaussians # can be expensive, so it makes sense to save and reuse them. self._gaussian_3d = self._patch_size_for_gaussian_3d = None self._gaussian_2d = self._patch_size_for_gaussian_2d = None def predict_3D(self, x: np.ndarray, do_mirroring: bool, mirror_axes: Tuple[int, ...] = (0, 1, 2), use_sliding_window: bool = False, step_size: float = 0.5, patch_size: Tuple[int, ...] = None, regions_class_order: Tuple[int, ...] = None, use_gaussian: bool = False, pad_border_mode: str = "constant", pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]: """ Use this function to predict a 3D image. It does not matter whether the network is a 2D or 3D U-Net, it will detect that automatically and run the appropriate code. When running predictions, you need to specify whether you want to run fully convolutional of sliding window based inference. We very strongly recommend you use sliding window with the default settings. It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If the network is not in eval mode it will print a warning. :param x: Your input data. Must be a nd.ndarray of shape (c, x, y, z). :param do_mirroring: If True, use test time data augmentation in the form of mirroring :param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three axes :param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default :param step_size: When running sliding window prediction, the step size determines the distance between adjacent predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between predictions. step_size cannot be larger than 1! :param patch_size: The patch size that was used for training the network. Do not use different patch sizes here, this will either crash or give potentially less accurate segmentations :param regions_class_order: Fabian only :param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting to weigh predictions closer to the center of the current patch higher than those at the borders. The reason behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True :param pad_border_mode: leave this alone :param pad_kwargs: leave this alone :param all_in_gpu: experimental. You probably want to leave this as is it :param verbose: Do you want a wall of text? If yes then set this to True :param mixed_precision: if True, will run inference in mixed precision with autocast() :return: """ torch.cuda.empty_cache() assert step_size <= 1, 'step_size must be smaller than 1. Otherwise there will be a gap between consecutive ' \ 'predictions' if verbose: print("debug: mirroring", do_mirroring, "mirror_axes", mirror_axes) assert self.get_device() != "cpu", "CPU not implemented" if pad_kwargs is None: pad_kwargs = {'constant_values': 0} # A very long time ago the mirror axes were (2, 3, 4) for a 3d network. This is just to intercept any old # code that uses this convention if len(mirror_axes): if self.conv_op == nn.Conv2d: if max(mirror_axes) > 1: raise ValueError("mirror axes. duh") if self.conv_op == nn.Conv3d: if max(mirror_axes) > 2: raise ValueError("mirror axes. duh") if self.training: print('WARNING! Network is in train mode during inference. This may be intended, or not...') assert len(x.shape) == 4, "data must have shape (c,x,y,z)" if mixed_precision: context = autocast else: context = no_op with context(): with torch.no_grad(): if self.conv_op == nn.Conv3d: if use_sliding_window: res = self._internal_predict_3D_3Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size, regions_class_order, use_gaussian, pad_border_mode, pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose) else: res = self._internal_predict_3D_3Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs, verbose=verbose) elif self.conv_op == nn.Conv2d: if use_sliding_window: res = self._internal_predict_3D_2Dconv_tiled(x, patch_size, do_mirroring, mirror_axes, step_size, regions_class_order, use_gaussian, pad_border_mode, pad_kwargs, all_in_gpu, False) else: res = self._internal_predict_3D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, all_in_gpu, False) else: raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is") return res def predict_2D(self, x, do_mirroring: bool, mirror_axes: tuple = (0, 1, 2), use_sliding_window: bool = False, step_size: float = 0.5, patch_size: tuple = None, regions_class_order: tuple = None, use_gaussian: bool = False, pad_border_mode: str = "constant", pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]: """ Use this function to predict a 2D image. If this is a 3D U-Net it will crash because you cannot predict a 2D image with that (you dummy). When running predictions, you need to specify whether you want to run fully convolutional of sliding window based inference. We very strongly recommend you use sliding window with the default settings. It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If the network is not in eval mode it will print a warning. :param x: Your input data. Must be a nd.ndarray of shape (c, x, y). :param do_mirroring: If True, use test time data augmentation in the form of mirroring :param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three axes :param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default :param step_size: When running sliding window prediction, the step size determines the distance between adjacent predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between predictions. step_size cannot be larger than 1! :param patch_size: The patch size that was used for training the network. Do not use different patch sizes here, this will either crash or give potentially less accurate segmentations :param regions_class_order: Fabian only :param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting to weigh predictions closer to the center of the current patch higher than those at the borders. The reason behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True :param pad_border_mode: leave this alone :param pad_kwargs: leave this alone :param all_in_gpu: experimental. You probably want to leave this as is it :param verbose: Do you want a wall of text? If yes then set this to True :return: """ torch.cuda.empty_cache() assert step_size <= 1, 'step_size must be smaler than 1. Otherwise there will be a gap between consecutive ' \ 'predictions' if self.conv_op == nn.Conv3d: raise RuntimeError("Cannot predict 2d if the network is 3d. Dummy.") if verbose: print("debug: mirroring", do_mirroring, "mirror_axes", mirror_axes) assert self.get_device() != "cpu", "CPU not implemented" if pad_kwargs is None: pad_kwargs = {'constant_values': 0} # A very long time ago the mirror axes were (2, 3) for a 2d network. This is just to intercept any old # code that uses this convention if len(mirror_axes): if max(mirror_axes) > 1: raise ValueError("mirror axes. duh") if self.training: print('WARNING! Network is in train mode during inference. This may be intended, or not...') assert len(x.shape) == 3, "data must have shape (c,x,y)" if mixed_precision: context = autocast else: context = no_op with context(): with torch.no_grad(): if self.conv_op == nn.Conv2d: if use_sliding_window: res = self._internal_predict_2D_2Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size, regions_class_order, use_gaussian, pad_border_mode, pad_kwargs, all_in_gpu, verbose) else: res = self._internal_predict_2D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, verbose) else: raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is") return res @staticmethod def _get_gaussian(patch_size, sigma_scale=1. / 8) -> np.ndarray: tmp = np.zeros(patch_size) center_coords = [i // 2 for i in patch_size] sigmas = [i * sigma_scale for i in patch_size] tmp[tuple(center_coords)] = 1 gaussian_importance_map = gaussian_filter(tmp, sigmas, 0, mode='constant', cval=0) gaussian_importance_map = gaussian_importance_map / np.max(gaussian_importance_map) * 1 gaussian_importance_map = gaussian_importance_map.astype(np.float32) # gaussian_importance_map cannot be 0, otherwise we may end up with nans! gaussian_importance_map[gaussian_importance_map == 0] = np.min( gaussian_importance_map[gaussian_importance_map != 0]) return gaussian_importance_map @staticmethod def _compute_steps_for_sliding_window(patch_size: Tuple[int, ...], image_size: Tuple[int, ...], step_size: float) -> List[List[int]]: assert [i >= j for i, j in zip(image_size, patch_size)], "image size must be as large or larger than patch_size" assert 0 < step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1' # our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of # 110, patch size of 32 and step_size of 0.5, then we want to make 4 steps starting at coordinate 0, 27, 55, 78 target_step_sizes_in_voxels = [i * step_size for i in patch_size] num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j, k in zip(image_size, target_step_sizes_in_voxels, patch_size)] steps = [] for dim in range(len(patch_size)): # the highest step value for this dimension is max_step_value = image_size[dim] - patch_size[dim] if num_steps[dim] > 1: actual_step_size = max_step_value / (num_steps[dim] - 1) else: actual_step_size = 99999999999 # does not matter because there is only one step at 0 steps_here = [int(np.round(actual_step_size * i)) for i in range(num_steps[dim])] steps.append(steps_here) return steps def _internal_predict_3D_3Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple, patch_size: tuple, regions_class_order: tuple, use_gaussian: bool, pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool, verbose: bool) -> Tuple[np.ndarray, np.ndarray]: # better safe than sorry assert len(x.shape) == 4, "x must be (c, x, y, z)" assert self.get_device() != "cpu" if verbose: print("step_size:", step_size) if verbose: print("do mirror:", do_mirroring) assert patch_size is not None, "patch_size cannot be None for tiled prediction" # for sliding window inference the image must at least be as large as the patch size. It does not matter # whether the shape is divisible by 2**num_pool as long as the patch size is data, slicer = pad_nd_image(x, patch_size, pad_border_mode, pad_kwargs, True, None) data_shape = data.shape # still c, x, y, z # compute the steps for sliding window steps = self._compute_steps_for_sliding_window(patch_size, data_shape[1:], step_size) num_tiles = len(steps[0]) * len(steps[1]) * len(steps[2]) if verbose: print("data shape:", data_shape) print("patch size:", patch_size) print("steps (x, y, and z):", steps) print("number of tiles:", num_tiles) # we only need to compute that once. It can take a while to compute this due to the large sigma in # gaussian_filter if use_gaussian and num_tiles > 1: if self._gaussian_3d is None or not all( [i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_3d)]): if verbose: print('computing Gaussian') gaussian_importance_map = self._get_gaussian(patch_size, sigma_scale=1. / 8) self._gaussian_3d = gaussian_importance_map self._patch_size_for_gaussian_3d = patch_size else: if verbose: print("using precomputed Gaussian") gaussian_importance_map = self._gaussian_3d gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(), non_blocking=True) else: gaussian_importance_map = None if all_in_gpu: # If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces # CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU if use_gaussian and num_tiles > 1: # half precision for the outputs should be good enough. If the outputs here are half, the # gaussian_importance_map should be as well gaussian_importance_map = gaussian_importance_map.half() # make sure we did not round anything to 0 gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[ gaussian_importance_map != 0].min() add_for_nb_of_preds = gaussian_importance_map else: add_for_nb_of_preds = torch.ones(data.shape[1:], device=self.get_device()) if verbose: print("initializing result array (on GPU)") aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half, device=self.get_device()) if verbose: print("moving data to GPU") data = torch.from_numpy(data).cuda(self.get_device(), non_blocking=True) if verbose: print("initializing result_numsamples (on GPU)") aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half, device=self.get_device()) else: if use_gaussian and num_tiles > 1: add_for_nb_of_preds = self._gaussian_3d else: add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32) aggregated_results = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32) aggregated_nb_of_predictions = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32) for x in steps[0]: lb_x = x ub_x = x + patch_size[0] for y in steps[1]: lb_y = y ub_y = y + patch_size[1] for z in steps[2]: lb_z = z ub_z = z + patch_size[2] predicted_patch = self._internal_maybe_mirror_and_pred_3D( data[None, :, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z], mirror_axes, do_mirroring, gaussian_importance_map)[0] if all_in_gpu: predicted_patch = predicted_patch.half() else: predicted_patch = predicted_patch.cpu().numpy() aggregated_results[:, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z] += predicted_patch aggregated_nb_of_predictions[:, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z] += add_for_nb_of_preds # we reverse the padding here (remeber that we padded the input to be at least as large as the patch size slicer = tuple( [slice(0, aggregated_results.shape[i]) for i in range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:]) aggregated_results = aggregated_results[slicer] aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer] # computing the class_probabilities by dividing the aggregated result with result_numsamples class_probabilities = aggregated_results / aggregated_nb_of_predictions if regions_class_order is None: predicted_segmentation = class_probabilities.argmax(0) else: if all_in_gpu: class_probabilities_here = class_probabilities.detach().cpu().numpy() else: class_probabilities_here = class_probabilities predicted_segmentation = np.zeros(class_probabilities_here.shape[1:], dtype=np.float32) for i, c in enumerate(regions_class_order): predicted_segmentation[class_probabilities_here[i] > 0.5] = c if all_in_gpu: if verbose: print("copying results to CPU") if regions_class_order is None: predicted_segmentation = predicted_segmentation.detach().cpu().numpy() class_probabilities = class_probabilities.detach().cpu().numpy() if verbose: print("prediction done") return predicted_segmentation, class_probabilities def _internal_predict_2D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool, mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None, pad_border_mode: str = "constant", pad_kwargs: dict = None, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]: """ This one does fully convolutional inference. No sliding window """ assert len(x.shape) == 3, "x must be (c, x, y)" assert self.get_device() != "cpu" assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \ 'run _internal_predict_2D_2Dconv' if verbose: print("do mirror:", do_mirroring) data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True, self.input_shape_must_be_divisible_by) predicted_probabilities = self._internal_maybe_mirror_and_pred_2D(data[None], mirror_axes, do_mirroring, None)[0] slicer = tuple( [slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) - (len(slicer) - 1))] + slicer[1:]) predicted_probabilities = predicted_probabilities[slicer] if regions_class_order is None: predicted_segmentation = predicted_probabilities.argmax(0) predicted_segmentation = predicted_segmentation.detach().cpu().numpy() predicted_probabilities = predicted_probabilities.detach().cpu().numpy() else: predicted_probabilities = predicted_probabilities.detach().cpu().numpy() predicted_segmentation = np.zeros(predicted_probabilities.shape[1:], dtype=np.float32) for i, c in enumerate(regions_class_order): predicted_segmentation[predicted_probabilities[i] > 0.5] = c return predicted_segmentation, predicted_probabilities def _internal_predict_3D_3Dconv(self, x: np.ndarray, min_size: Tuple[int, ...], do_mirroring: bool, mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None, pad_border_mode: str = "constant", pad_kwargs: dict = None, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]: """ This one does fully convolutional inference. No sliding window """ assert len(x.shape) == 4, "x must be (c, x, y, z)" assert self.get_device() != "cpu" assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \ 'run _internal_predict_3D_3Dconv' if verbose: print("do mirror:", do_mirroring) data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True, self.input_shape_must_be_divisible_by) predicted_probabilities = self._internal_maybe_mirror_and_pred_3D(data[None], mirror_axes, do_mirroring, None)[0] slicer = tuple( [slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) - (len(slicer) - 1))] + slicer[1:]) predicted_probabilities = predicted_probabilities[slicer] if regions_class_order is None: predicted_segmentation = predicted_probabilities.argmax(0) predicted_segmentation = predicted_segmentation.detach().cpu().numpy() predicted_probabilities = predicted_probabilities.detach().cpu().numpy() else: predicted_probabilities = predicted_probabilities.detach().cpu().numpy() predicted_segmentation = np.zeros(predicted_probabilities.shape[1:], dtype=np.float32) for i, c in enumerate(regions_class_order): predicted_segmentation[predicted_probabilities[i] > 0.5] = c return predicted_segmentation, predicted_probabilities def _internal_maybe_mirror_and_pred_3D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple, do_mirroring: bool = True, mult: np.ndarray or torch.tensor = None) -> torch.tensor: assert len(x.shape) == 5, 'x must be (b, c, x, y, z)' # everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here # we now return a cuda tensor! Not numpy array! x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device()) result_torch = torch.zeros([1, self.num_classes] + list(x.shape[2:]), dtype=torch.float).cuda(self.get_device(), non_blocking=True) if mult is not None: mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device()) if do_mirroring: mirror_idx = 8 num_results = 2 ** len(mirror_axes) else: mirror_idx = 1 num_results = 1 for m in range(mirror_idx): if m == 0: pred = self.inference_apply_nonlin(self(x)) result_torch += 1 / num_results * pred if m == 1 and (2 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (4, )))) result_torch += 1 / num_results * torch.flip(pred, (4,)) if m == 2 and (1 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (3, )))) result_torch += 1 / num_results * torch.flip(pred, (3,)) if m == 3 and (2 in mirror_axes) and (1 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 3)))) result_torch += 1 / num_results * torch.flip(pred, (4, 3)) if m == 4 and (0 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (2, )))) result_torch += 1 / num_results * torch.flip(pred, (2,)) if m == 5 and (0 in mirror_axes) and (2 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 2)))) result_torch += 1 / num_results * torch.flip(pred, (4, 2)) if m == 6 and (0 in mirror_axes) and (1 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2)))) result_torch += 1 / num_results * torch.flip(pred, (3, 2)) if m == 7 and (0 in mirror_axes) and (1 in mirror_axes) and (2 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 3, 2)))) result_torch += 1 / num_results * torch.flip(pred, (4, 3, 2)) if mult is not None: result_torch[:, :] *= mult return result_torch def _internal_maybe_mirror_and_pred_2D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple, do_mirroring: bool = True, mult: np.ndarray or torch.tensor = None) -> torch.tensor: # everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here # we now return a cuda tensor! Not numpy array! assert len(x.shape) == 4, 'x must be (b, c, x, y)' x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device()) result_torch = torch.zeros([x.shape[0], self.num_classes] + list(x.shape[2:]), dtype=torch.float).cuda(self.get_device(), non_blocking=True) if mult is not None: mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device()) if do_mirroring: mirror_idx = 4 num_results = 2 ** len(mirror_axes) else: mirror_idx = 1 num_results = 1 for m in range(mirror_idx): if m == 0: pred = self.inference_apply_nonlin(self(x)) result_torch += 1 / num_results * pred if m == 1 and (1 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (3, )))) result_torch += 1 / num_results * torch.flip(pred, (3, )) if m == 2 and (0 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (2, )))) result_torch += 1 / num_results * torch.flip(pred, (2, )) if m == 3 and (0 in mirror_axes) and (1 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2)))) result_torch += 1 / num_results * torch.flip(pred, (3, 2)) if mult is not None: result_torch[:, :] *= mult return result_torch def _internal_predict_2D_2Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple, patch_size: tuple, regions_class_order: tuple, use_gaussian: bool, pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool, verbose: bool) -> Tuple[np.ndarray, np.ndarray]: # better safe than sorry assert len(x.shape) == 3, "x must be (c, x, y)" assert self.get_device() != "cpu" if verbose: print("step_size:", step_size) if verbose: print("do mirror:", do_mirroring) assert patch_size is not None, "patch_size cannot be None for tiled prediction" # for sliding window inference the image must at least be as large as the patch size. It does not matter # whether the shape is divisible by 2**num_pool as long as the patch size is data, slicer = pad_nd_image(x, patch_size, pad_border_mode, pad_kwargs, True, None) data_shape = data.shape # still c, x, y # compute the steps for sliding window steps = self._compute_steps_for_sliding_window(patch_size, data_shape[1:], step_size) num_tiles = len(steps[0]) * len(steps[1]) if verbose: print("data shape:", data_shape) print("patch size:", patch_size) print("steps (x, y, and z):", steps) print("number of tiles:", num_tiles) # we only need to compute that once. It can take a while to compute this due to the large sigma in # gaussian_filter if use_gaussian and num_tiles > 1: if self._gaussian_2d is None or not all( [i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_2d)]): if verbose: print('computing Gaussian') gaussian_importance_map = self._get_gaussian(patch_size, sigma_scale=1. / 8) self._gaussian_2d = gaussian_importance_map self._patch_size_for_gaussian_2d = patch_size else: if verbose: print("using precomputed Gaussian") gaussian_importance_map = self._gaussian_2d gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(), non_blocking=True) else: gaussian_importance_map = None if all_in_gpu: # If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces # CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU if use_gaussian and num_tiles > 1: # half precision for the outputs should be good enough. If the outputs here are half, the # gaussian_importance_map should be as well gaussian_importance_map = gaussian_importance_map.half() # make sure we did not round anything to 0 gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[ gaussian_importance_map != 0].min() add_for_nb_of_preds = gaussian_importance_map else: add_for_nb_of_preds = torch.ones(data.shape[1:], device=self.get_device()) if verbose: print("initializing result array (on GPU)") aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half, device=self.get_device()) if verbose: print("moving data to GPU") data = torch.from_numpy(data).cuda(self.get_device(), non_blocking=True) if verbose: print("initializing result_numsamples (on GPU)") aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half, device=self.get_device()) else: if use_gaussian and num_tiles > 1: add_for_nb_of_preds = self._gaussian_2d else: add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32) aggregated_results = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32) aggregated_nb_of_predictions = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32) for x in steps[0]: lb_x = x ub_x = x + patch_size[0] for y in steps[1]: lb_y = y ub_y = y + patch_size[1] predicted_patch = self._internal_maybe_mirror_and_pred_2D( data[None, :, lb_x:ub_x, lb_y:ub_y], mirror_axes, do_mirroring, gaussian_importance_map)[0] if all_in_gpu: predicted_patch = predicted_patch.half() else: predicted_patch = predicted_patch.cpu().numpy() aggregated_results[:, lb_x:ub_x, lb_y:ub_y] += predicted_patch aggregated_nb_of_predictions[:, lb_x:ub_x, lb_y:ub_y] += add_for_nb_of_preds # we reverse the padding here (remeber that we padded the input to be at least as large as the patch size slicer = tuple( [slice(0, aggregated_results.shape[i]) for i in range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:]) aggregated_results = aggregated_results[slicer] aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer] # computing the class_probabilities by dividing the aggregated result with result_numsamples class_probabilities = aggregated_results / aggregated_nb_of_predictions if regions_class_order is None: predicted_segmentation = class_probabilities.argmax(0) else: if all_in_gpu: class_probabilities_here = class_probabilities.detach().cpu().numpy() else: class_probabilities_here = class_probabilities predicted_segmentation = np.zeros(class_probabilities_here.shape[1:], dtype=np.float32) for i, c in enumerate(regions_class_order): predicted_segmentation[class_probabilities_here[i] > 0.5] = c if all_in_gpu: if verbose: print("copying results to CPU") if regions_class_order is None: predicted_segmentation = predicted_segmentation.detach().cpu().numpy() class_probabilities = class_probabilities.detach().cpu().numpy() if verbose: print("prediction done") return predicted_segmentation, class_probabilities def _internal_predict_3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool, mirror_axes: tuple = (0, 1), regions_class_order: tuple = None, pad_border_mode: str = "constant", pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]: if all_in_gpu: raise NotImplementedError assert len(x.shape) == 4, "data must be c, x, y, z" predicted_segmentation = [] softmax_pred = [] for s in range(x.shape[1]): pred_seg, softmax_pres = self._internal_predict_2D_2Dconv( x[:, s], min_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, verbose) predicted_segmentation.append(pred_seg[None]) softmax_pred.append(softmax_pres[None]) predicted_segmentation = np.vstack(predicted_segmentation) softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3)) return predicted_segmentation, softmax_pred def predict_3D_pseudo3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool, mirror_axes: tuple = (0, 1), regions_class_order: tuple = None, pseudo3D_slices: int = 5, all_in_gpu: bool = False, pad_border_mode: str = "constant", pad_kwargs: dict = None, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]: if all_in_gpu: raise NotImplementedError assert len(x.shape) == 4, "data must be c, x, y, z" assert pseudo3D_slices % 2 == 1, "pseudo3D_slices must be odd" extra_slices = (pseudo3D_slices - 1) // 2 shp_for_pad = np.array(x.shape) shp_for_pad[1] = extra_slices pad = np.zeros(shp_for_pad, dtype=np.float32) data = np.concatenate((pad, x, pad), 1) predicted_segmentation = [] softmax_pred = [] for s in range(extra_slices, data.shape[1] - extra_slices): d = data[:, (s - extra_slices):(s + extra_slices + 1)] d = d.reshape((-1, d.shape[-2], d.shape[-1])) pred_seg, softmax_pres = \ self._internal_predict_2D_2Dconv(d, min_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, verbose) predicted_segmentation.append(pred_seg[None]) softmax_pred.append(softmax_pres[None]) predicted_segmentation = np.vstack(predicted_segmentation) softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3)) return predicted_segmentation, softmax_pred def _internal_predict_3D_2Dconv_tiled(self, x: np.ndarray, patch_size: Tuple[int, int], do_mirroring: bool, mirror_axes: tuple = (0, 1), step_size: float = 0.5, regions_class_order: tuple = None, use_gaussian: bool = False, pad_border_mode: str = "edge", pad_kwargs: dict =None, all_in_gpu: bool = False, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]: if all_in_gpu: raise NotImplementedError assert len(x.shape) == 4, "data must be c, x, y, z" predicted_segmentation = [] softmax_pred = [] for s in range(x.shape[1]): pred_seg, softmax_pres = self._internal_predict_2D_2Dconv_tiled( x[:, s], step_size, do_mirroring, mirror_axes, patch_size, regions_class_order, use_gaussian, pad_border_mode, pad_kwargs, all_in_gpu, verbose) predicted_segmentation.append(pred_seg[None]) softmax_pred.append(softmax_pres[None]) predicted_segmentation = np.vstack(predicted_segmentation) softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3)) return predicted_segmentation, softmax_pred if __name__ == '__main__': print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (162, 529, 529), 0.5)) print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (162, 529, 529), 1)) print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (162, 529, 529), 0.1)) print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (60, 448, 224), 1)) print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (60, 448, 224), 0.5)) print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (30, 224, 224), 1)) print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (30, 224, 224), 0.125)) print(SegmentationNetwork._compute_steps_for_sliding_window((123, 54, 123), (246, 162, 369), 0.25))
43,801
51.964933
137
py
CoTr
CoTr-main/nnUNet/nnunet/network_architecture/initialization.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from torch import nn class InitWeights_He(object): def __init__(self, neg_slope=1e-2): self.neg_slope = neg_slope def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class InitWeights_XavierUniform(object): def __init__(self, gain=1): self.gain = gain def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.xavier_uniform_(module.weight, self.gain) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0)
1,673
41.923077
158
py
CoTr
CoTr-main/nnUNet/nnunet/network_architecture/generic_UNet.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from copy import deepcopy from nnunet.utilities.nd_softmax import softmax_helper from torch import nn import torch import numpy as np from nnunet.network_architecture.initialization import InitWeights_He from nnunet.network_architecture.neural_network import SegmentationNetwork import torch.nn.functional class ConvDropoutNormNonlin(nn.Module): """ fixes a bug in ConvDropoutNormNonlin where lrelu was used regardless of nonlin. Bad. """ def __init__(self, input_channels, output_channels, conv_op=nn.Conv2d, conv_kwargs=None, norm_op=nn.BatchNorm2d, norm_op_kwargs=None, dropout_op=nn.Dropout2d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None): super(ConvDropoutNormNonlin, self).__init__() if nonlin_kwargs is None: nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} if dropout_op_kwargs is None: dropout_op_kwargs = {'p': 0.5, 'inplace': True} if norm_op_kwargs is None: norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1} if conv_kwargs is None: conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True} self.nonlin_kwargs = nonlin_kwargs self.nonlin = nonlin self.dropout_op = dropout_op self.dropout_op_kwargs = dropout_op_kwargs self.norm_op_kwargs = norm_op_kwargs self.conv_kwargs = conv_kwargs self.conv_op = conv_op self.norm_op = norm_op self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs) if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[ 'p'] > 0: self.dropout = self.dropout_op(**self.dropout_op_kwargs) else: self.dropout = None self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs) self.lrelu = self.nonlin(**self.nonlin_kwargs) def forward(self, x): x = self.conv(x) if self.dropout is not None: x = self.dropout(x) return self.lrelu(self.instnorm(x)) class ConvDropoutNonlinNorm(ConvDropoutNormNonlin): def forward(self, x): x = self.conv(x) if self.dropout is not None: x = self.dropout(x) return self.instnorm(self.lrelu(x)) class StackedConvLayers(nn.Module): def __init__(self, input_feature_channels, output_feature_channels, num_convs, conv_op=nn.Conv2d, conv_kwargs=None, norm_op=nn.BatchNorm2d, norm_op_kwargs=None, dropout_op=nn.Dropout2d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None, basic_block=ConvDropoutNormNonlin): ''' stacks ConvDropoutNormLReLU layers. initial_stride will only be applied to first layer in the stack. The other parameters affect all layers :param input_feature_channels: :param output_feature_channels: :param num_convs: :param dilation: :param kernel_size: :param padding: :param dropout: :param initial_stride: :param conv_op: :param norm_op: :param dropout_op: :param inplace: :param neg_slope: :param norm_affine: :param conv_bias: ''' self.input_channels = input_feature_channels self.output_channels = output_feature_channels if nonlin_kwargs is None: nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} if dropout_op_kwargs is None: dropout_op_kwargs = {'p': 0.5, 'inplace': True} if norm_op_kwargs is None: norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1} if conv_kwargs is None: conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True} self.nonlin_kwargs = nonlin_kwargs self.nonlin = nonlin self.dropout_op = dropout_op self.dropout_op_kwargs = dropout_op_kwargs self.norm_op_kwargs = norm_op_kwargs self.conv_kwargs = conv_kwargs self.conv_op = conv_op self.norm_op = norm_op if first_stride is not None: self.conv_kwargs_first_conv = deepcopy(conv_kwargs) self.conv_kwargs_first_conv['stride'] = first_stride else: self.conv_kwargs_first_conv = conv_kwargs super(StackedConvLayers, self).__init__() self.blocks = nn.Sequential( *([basic_block(input_feature_channels, output_feature_channels, self.conv_op, self.conv_kwargs_first_conv, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs)] + [basic_block(output_feature_channels, output_feature_channels, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)])) def forward(self, x): return self.blocks(x) def print_module_training_status(module): if isinstance(module, nn.Conv2d) or isinstance(module, nn.Conv3d) or isinstance(module, nn.Dropout3d) or \ isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout) or isinstance(module, nn.InstanceNorm3d) \ or isinstance(module, nn.InstanceNorm2d) or isinstance(module, nn.InstanceNorm1d) \ or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or isinstance(module, nn.BatchNorm1d): print(str(module), module.training) class Upsample(nn.Module): def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=False): super(Upsample, self).__init__() self.align_corners = align_corners self.mode = mode self.scale_factor = scale_factor self.size = size def forward(self, x): return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) class Generic_UNet(SegmentationNetwork): DEFAULT_BATCH_SIZE_3D = 2 DEFAULT_PATCH_SIZE_3D = (64, 192, 160) SPACING_FACTOR_BETWEEN_STAGES = 2 BASE_NUM_FEATURES_3D = 30 MAX_NUMPOOL_3D = 999 MAX_NUM_FILTERS_3D = 320 DEFAULT_PATCH_SIZE_2D = (256, 256) BASE_NUM_FEATURES_2D = 30 DEFAULT_BATCH_SIZE_2D = 50 MAX_NUMPOOL_2D = 999 MAX_FILTERS_2D = 480 use_this_for_batch_size_computation_2D = 19739648 use_this_for_batch_size_computation_3D = 520000000 # 505789440 def __init__(self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2, feat_map_mul_on_downscale=2, conv_op=nn.Conv2d, norm_op=nn.BatchNorm2d, norm_op_kwargs=None, dropout_op=nn.Dropout2d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False, final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None, conv_kernel_sizes=None, upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False, max_num_features=None, basic_block=ConvDropoutNormNonlin, seg_output_use_bias=False): """ basically more flexible than v1, architecture is the same Does this look complicated? Nah bro. Functionality > usability This does everything you need, including world peace. Questions? -> f.isensee@dkfz.de """ super(Generic_UNet, self).__init__() self.convolutional_upsampling = convolutional_upsampling self.convolutional_pooling = convolutional_pooling self.upscale_logits = upscale_logits if nonlin_kwargs is None: nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} if dropout_op_kwargs is None: dropout_op_kwargs = {'p': 0.5, 'inplace': True} if norm_op_kwargs is None: norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1} self.conv_kwargs = {'stride': 1, 'dilation': 1, 'bias': True} self.nonlin = nonlin self.nonlin_kwargs = nonlin_kwargs self.dropout_op_kwargs = dropout_op_kwargs self.norm_op_kwargs = norm_op_kwargs self.weightInitializer = weightInitializer self.conv_op = conv_op self.norm_op = norm_op self.dropout_op = dropout_op self.num_classes = num_classes self.final_nonlin = final_nonlin self._deep_supervision = deep_supervision self.do_ds = deep_supervision if conv_op == nn.Conv2d: upsample_mode = 'bilinear' pool_op = nn.MaxPool2d transpconv = nn.ConvTranspose2d if pool_op_kernel_sizes is None: pool_op_kernel_sizes = [(2, 2)] * num_pool if conv_kernel_sizes is None: conv_kernel_sizes = [(3, 3)] * (num_pool + 1) elif conv_op == nn.Conv3d: upsample_mode = 'trilinear' pool_op = nn.MaxPool3d transpconv = nn.ConvTranspose3d if pool_op_kernel_sizes is None: pool_op_kernel_sizes = [(2, 2, 2)] * num_pool if conv_kernel_sizes is None: conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1) else: raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op)) self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0, dtype=np.int64) self.pool_op_kernel_sizes = pool_op_kernel_sizes self.conv_kernel_sizes = conv_kernel_sizes self.conv_pad_sizes = [] for krnl in self.conv_kernel_sizes: self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl]) if max_num_features is None: if self.conv_op == nn.Conv3d: self.max_num_features = self.MAX_NUM_FILTERS_3D else: self.max_num_features = self.MAX_FILTERS_2D else: self.max_num_features = max_num_features self.conv_blocks_context = [] self.conv_blocks_localization = [] self.td = [] self.tu = [] self.seg_outputs = [] output_features = base_num_features input_features = input_channels for d in range(num_pool): # determine the first stride if d != 0 and self.convolutional_pooling: first_stride = pool_op_kernel_sizes[d - 1] else: first_stride = None self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[d] self.conv_kwargs['padding'] = self.conv_pad_sizes[d] # add convolutions self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, first_stride, basic_block=basic_block)) if not self.convolutional_pooling: self.td.append(pool_op(pool_op_kernel_sizes[d])) input_features = output_features output_features = int(np.round(output_features * feat_map_mul_on_downscale)) output_features = min(output_features, self.max_num_features) # now the bottleneck. # determine the first stride if self.convolutional_pooling: first_stride = pool_op_kernel_sizes[-1] else: first_stride = None # the output of the last conv must match the number of features from the skip connection if we are not using # convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be # done by the transposed conv if self.convolutional_upsampling: final_num_features = output_features else: final_num_features = self.conv_blocks_context[-1].output_channels self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[num_pool] self.conv_kwargs['padding'] = self.conv_pad_sizes[num_pool] self.conv_blocks_context.append(nn.Sequential( StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, first_stride, basic_block=basic_block), StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block))) # if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here if not dropout_in_localization: old_dropout_p = self.dropout_op_kwargs['p'] self.dropout_op_kwargs['p'] = 0.0 # now lets build the localization pathway for u in range(num_pool): nfeatures_from_down = final_num_features nfeatures_from_skip = self.conv_blocks_context[ -(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2 n_features_after_tu_and_concat = nfeatures_from_skip * 2 # the first conv reduces the number of features to match those of skip # the following convs work on that number of features # if not convolutional upsampling then the final conv reduces the num of features again if u != num_pool - 1 and not self.convolutional_upsampling: final_num_features = self.conv_blocks_context[-(3 + u)].output_channels else: final_num_features = nfeatures_from_skip if not self.convolutional_upsampling: self.tu.append(Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode)) else: self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u + 1)], pool_op_kernel_sizes[-(u + 1)], bias=False)) self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[- (u + 1)] self.conv_kwargs['padding'] = self.conv_pad_sizes[- (u + 1)] self.conv_blocks_localization.append(nn.Sequential( StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block), StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block) )) for ds in range(len(self.conv_blocks_localization)): self.seg_outputs.append(conv_op(self.conv_blocks_localization[ds][-1].output_channels, num_classes, 1, 1, 0, 1, 1, seg_output_use_bias)) self.upscale_logits_ops = [] cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1] for usl in range(num_pool - 1): if self.upscale_logits: self.upscale_logits_ops.append(Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl + 1]]), mode=upsample_mode)) else: self.upscale_logits_ops.append(lambda x: x) if not dropout_in_localization: self.dropout_op_kwargs['p'] = old_dropout_p # register all modules properly self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization) self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context) self.td = nn.ModuleList(self.td) self.tu = nn.ModuleList(self.tu) self.seg_outputs = nn.ModuleList(self.seg_outputs) if self.upscale_logits: self.upscale_logits_ops = nn.ModuleList( self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here if self.weightInitializer is not None: self.apply(self.weightInitializer) # self.apply(print_module_training_status) def forward(self, x): skips = [] seg_outputs = [] for d in range(len(self.conv_blocks_context) - 1): x = self.conv_blocks_context[d](x) skips.append(x) if not self.convolutional_pooling: x = self.td[d](x) x = self.conv_blocks_context[-1](x) for u in range(len(self.tu)): x = self.tu[u](x) x = torch.cat((x, skips[-(u + 1)]), dim=1) x = self.conv_blocks_localization[u](x) seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x))) if self._deep_supervision and self.do_ds: return tuple([seg_outputs[-1]] + [i(j) for i, j in zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])]) else: return seg_outputs[-1] @staticmethod def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features, num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False, conv_per_stage=2): """ This only applies for num_conv_per_stage and convolutional_upsampling=True not real vram consumption. just a constant term to which the vram consumption will be approx proportional (+ offset for parameter storage) :param deep_supervision: :param patch_size: :param num_pool_per_axis: :param base_num_features: :param max_num_features: :param num_modalities: :param num_classes: :param pool_op_kernel_sizes: :return: """ if not isinstance(num_pool_per_axis, np.ndarray): num_pool_per_axis = np.array(num_pool_per_axis) npool = len(pool_op_kernel_sizes) map_size = np.array(patch_size) tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features + num_modalities * np.prod(map_size, dtype=np.int64) + num_classes * np.prod(map_size, dtype=np.int64)) num_feat = base_num_features for p in range(npool): for pi in range(len(num_pool_per_axis)): map_size[pi] /= pool_op_kernel_sizes[p][pi] num_feat = min(num_feat * 2, max_num_features) num_blocks = (conv_per_stage * 2 + 1) if p < (npool - 1) else conv_per_stage # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat if deep_supervision and p < (npool - 2): tmp += np.prod(map_size, dtype=np.int64) * num_classes # print(p, map_size, num_feat, tmp) return tmp
20,989
45.644444
180
py
CoTr
CoTr-main/nnUNet/nnunet/network_architecture/generic_modular_residual_UNet.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from nnunet.network_architecture.custom_modules.conv_blocks import BasicResidualBlock, ResidualLayer from nnunet.network_architecture.generic_UNet import Upsample from nnunet.network_architecture.generic_modular_UNet import PlainConvUNetDecoder, get_default_network_config from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss from torch import nn from torch.optim import SGD from torch.backends import cudnn class ResidualUNetEncoder(nn.Module): def __init__(self, input_channels, base_num_features, num_blocks_per_stage, feat_map_mul_on_downscale, pool_op_kernel_sizes, conv_kernel_sizes, props, default_return_skips=True, max_num_features=480, block=BasicResidualBlock): """ Following UNet building blocks can be added by utilizing the properties this class exposes (TODO) this one includes the bottleneck layer! :param input_channels: :param base_num_features: :param num_blocks_per_stage: :param feat_map_mul_on_downscale: :param pool_op_kernel_sizes: :param conv_kernel_sizes: :param props: """ super(ResidualUNetEncoder, self).__init__() self.default_return_skips = default_return_skips self.props = props self.stages = [] self.stage_output_features = [] self.stage_pool_kernel_size = [] self.stage_conv_op_kernel_size = [] assert len(pool_op_kernel_sizes) == len(conv_kernel_sizes) num_stages = len(conv_kernel_sizes) if not isinstance(num_blocks_per_stage, (list, tuple)): num_blocks_per_stage = [num_blocks_per_stage] * num_stages else: assert len(num_blocks_per_stage) == num_stages self.num_blocks_per_stage = num_blocks_per_stage # decoder may need this self.initial_conv = props['conv_op'](input_channels, base_num_features, 3, padding=1, **props['conv_op_kwargs']) self.initial_norm = props['norm_op'](base_num_features, **props['norm_op_kwargs']) self.initial_nonlin = props['nonlin'](**props['nonlin_kwargs']) current_input_features = base_num_features for stage in range(num_stages): current_output_features = min(base_num_features * feat_map_mul_on_downscale ** stage, max_num_features) current_kernel_size = conv_kernel_sizes[stage] current_pool_kernel_size = pool_op_kernel_sizes[stage] current_stage = ResidualLayer(current_input_features, current_output_features, current_kernel_size, props, self.num_blocks_per_stage[stage], current_pool_kernel_size, block) self.stages.append(current_stage) self.stage_output_features.append(current_output_features) self.stage_conv_op_kernel_size.append(current_kernel_size) self.stage_pool_kernel_size.append(current_pool_kernel_size) # update current_input_features current_input_features = current_output_features self.stages = nn.ModuleList(self.stages) def forward(self, x, return_skips=None): """ :param x: :param return_skips: if none then self.default_return_skips is used :return: """ skips = [] x = self.initial_nonlin(self.initial_norm(self.initial_conv(x))) for s in self.stages: x = s(x) if self.default_return_skips: skips.append(x) if return_skips is None: return_skips = self.default_return_skips if return_skips: return skips else: return x @staticmethod def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_modalities, pool_op_kernel_sizes, num_conv_per_stage_encoder, feat_map_mul_on_downscale, batch_size): npool = len(pool_op_kernel_sizes) - 1 current_shape = np.array(patch_size) tmp = (num_conv_per_stage_encoder[0] * 2 + 1) * np.prod(current_shape) * base_num_features \ + num_modalities * np.prod(current_shape) num_feat = base_num_features for p in range(1, npool + 1): current_shape = current_shape / np.array(pool_op_kernel_sizes[p]) num_feat = min(num_feat * feat_map_mul_on_downscale, max_num_features) num_convs = num_conv_per_stage_encoder[p] * 2 + 1 # + 1 for conv in skip in first block print(p, num_feat, num_convs, current_shape) tmp += num_convs * np.prod(current_shape) * num_feat return tmp * batch_size class ResidualUNetDecoder(nn.Module): def __init__(self, previous, num_classes, num_blocks_per_stage=None, network_props=None, deep_supervision=False, upscale_logits=False, block=BasicResidualBlock): super(ResidualUNetDecoder, self).__init__() self.num_classes = num_classes self.deep_supervision = deep_supervision """ We assume the bottleneck is part of the encoder, so we can start with upsample -> concat here """ previous_stages = previous.stages previous_stage_output_features = previous.stage_output_features previous_stage_pool_kernel_size = previous.stage_pool_kernel_size previous_stage_conv_op_kernel_size = previous.stage_conv_op_kernel_size if network_props is None: self.props = previous.props else: self.props = network_props if self.props['conv_op'] == nn.Conv2d: transpconv = nn.ConvTranspose2d upsample_mode = "bilinear" elif self.props['conv_op'] == nn.Conv3d: transpconv = nn.ConvTranspose3d upsample_mode = "trilinear" else: raise ValueError("unknown convolution dimensionality, conv op: %s" % str(self.props['conv_op'])) if num_blocks_per_stage is None: num_blocks_per_stage = previous.num_blocks_per_stage[:-1][::-1] assert len(num_blocks_per_stage) == len(previous.num_blocks_per_stage) - 1 self.stage_pool_kernel_size = previous_stage_pool_kernel_size self.stage_output_features = previous_stage_output_features self.stage_conv_op_kernel_size = previous_stage_conv_op_kernel_size num_stages = len(previous_stages) - 1 # we have one less as the first stage here is what comes after the # bottleneck self.tus = [] self.stages = [] self.deep_supervision_outputs = [] # only used for upsample_logits cum_upsample = np.cumprod(np.vstack(self.stage_pool_kernel_size), axis=0).astype(int) for i, s in enumerate(np.arange(num_stages)[::-1]): features_below = previous_stage_output_features[s + 1] features_skip = previous_stage_output_features[s] self.tus.append(transpconv(features_below, features_skip, previous_stage_pool_kernel_size[s + 1], previous_stage_pool_kernel_size[s + 1], bias=False)) # after we tu we concat features so now we have 2xfeatures_skip self.stages.append(ResidualLayer(2 * features_skip, features_skip, previous_stage_conv_op_kernel_size[s], self.props, num_blocks_per_stage[i], None, block)) if deep_supervision and s != 0: seg_layer = self.props['conv_op'](features_skip, num_classes, 1, 1, 0, 1, 1, False) if upscale_logits: upsample = Upsample(scale_factor=cum_upsample[s], mode=upsample_mode) self.deep_supervision_outputs.append(nn.Sequential(seg_layer, upsample)) else: self.deep_supervision_outputs.append(seg_layer) self.segmentation_output = self.props['conv_op'](features_skip, num_classes, 1, 1, 0, 1, 1, False) self.tus = nn.ModuleList(self.tus) self.stages = nn.ModuleList(self.stages) self.deep_supervision_outputs = nn.ModuleList(self.deep_supervision_outputs) def forward(self, skips): # skips come from the encoder. They are sorted so that the bottleneck is last in the list # what is maybe not perfect is that the TUs and stages here are sorted the other way around # so let's just reverse the order of skips skips = skips[::-1] seg_outputs = [] x = skips[0] # this is the bottleneck for i in range(len(self.tus)): x = self.tus[i](x) x = torch.cat((x, skips[i + 1]), dim=1) x = self.stages[i](x) if self.deep_supervision and (i != len(self.tus) - 1): seg_outputs.append(self.deep_supervision_outputs[i](x)) segmentation = self.segmentation_output(x) if self.deep_supervision: seg_outputs.append(segmentation) return seg_outputs[ ::-1] # seg_outputs are ordered so that the seg from the highest layer is first, the seg from # the bottleneck of the UNet last else: return segmentation @staticmethod def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_classes, pool_op_kernel_sizes, num_blocks_per_stage_decoder, feat_map_mul_on_downscale, batch_size): """ This only applies for num_conv_per_stage and convolutional_upsampling=True not real vram consumption. just a constant term to which the vram consumption will be approx proportional (+ offset for parameter storage) :param patch_size: :param num_pool_per_axis: :param base_num_features: :param max_num_features: :return: """ npool = len(pool_op_kernel_sizes) - 1 current_shape = np.array(patch_size) tmp = (num_blocks_per_stage_decoder[-1] * 2 + 1) * np.prod( current_shape) * base_num_features + num_classes * np.prod(current_shape) num_feat = base_num_features for p in range(1, npool): current_shape = current_shape / np.array(pool_op_kernel_sizes[p]) num_feat = min(num_feat * feat_map_mul_on_downscale, max_num_features) num_convs = num_blocks_per_stage_decoder[-(p + 1)] * 2 + 1 + 1 # +1 for transpconv and +1 for conv in skip print(p, num_feat, num_convs, current_shape) tmp += num_convs * np.prod(current_shape) * num_feat return tmp * batch_size class ResidualUNet(SegmentationNetwork): use_this_for_batch_size_computation_2D = 858931200.0 # 1167982592.0 use_this_for_batch_size_computation_3D = 727842816.0 # 1152286720.0 default_base_num_features = 24 default_conv_per_stage = (2, 2, 2, 2, 2, 2, 2, 2) def __init__(self, input_channels, base_num_features, num_blocks_per_stage_encoder, feat_map_mul_on_downscale, pool_op_kernel_sizes, conv_kernel_sizes, props, num_classes, num_blocks_per_stage_decoder, deep_supervision=False, upscale_logits=False, max_features=512, initializer=None, block=BasicResidualBlock): super(ResidualUNet, self).__init__() self.conv_op = props['conv_op'] self.num_classes = num_classes self.encoder = ResidualUNetEncoder(input_channels, base_num_features, num_blocks_per_stage_encoder, feat_map_mul_on_downscale, pool_op_kernel_sizes, conv_kernel_sizes, props, default_return_skips=True, max_num_features=max_features, block=block) self.decoder = ResidualUNetDecoder(self.encoder, num_classes, num_blocks_per_stage_decoder, props, deep_supervision, upscale_logits, block=block) if initializer is not None: self.apply(initializer) def forward(self, x): skips = self.encoder(x) return self.decoder(skips) @staticmethod def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_modalities, num_classes, pool_op_kernel_sizes, num_conv_per_stage_encoder, num_conv_per_stage_decoder, feat_map_mul_on_downscale, batch_size): enc = ResidualUNetEncoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_modalities, pool_op_kernel_sizes, num_conv_per_stage_encoder, feat_map_mul_on_downscale, batch_size) dec = ResidualUNetDecoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_classes, pool_op_kernel_sizes, num_conv_per_stage_decoder, feat_map_mul_on_downscale, batch_size) return enc + dec class FabiansUNet(SegmentationNetwork): """ Residual Encoder, Plain conv decoder """ use_this_for_2D_configuration = 1244233721.0 # 1167982592.0 use_this_for_3D_configuration = 1230348801.0 default_blocks_per_stage_encoder = (1, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4) default_blocks_per_stage_decoder = (1, 1, 1, 1, 1, 1, 1, 1, 1, 1) default_min_batch_size = 2 # this is what works with the numbers above def __init__(self, input_channels, base_num_features, num_blocks_per_stage_encoder, feat_map_mul_on_downscale, pool_op_kernel_sizes, conv_kernel_sizes, props, num_classes, num_blocks_per_stage_decoder, deep_supervision=False, upscale_logits=False, max_features=512, initializer=None, block=BasicResidualBlock, props_decoder=None): super().__init__() self.conv_op = props['conv_op'] self.num_classes = num_classes self.encoder = ResidualUNetEncoder(input_channels, base_num_features, num_blocks_per_stage_encoder, feat_map_mul_on_downscale, pool_op_kernel_sizes, conv_kernel_sizes, props, default_return_skips=True, max_num_features=max_features, block=block) props['dropout_op_kwargs']['p'] = 0 if props_decoder is None: props_decoder = props self.decoder = PlainConvUNetDecoder(self.encoder, num_classes, num_blocks_per_stage_decoder, props_decoder, deep_supervision, upscale_logits) if initializer is not None: self.apply(initializer) def forward(self, x): skips = self.encoder(x) return self.decoder(skips) @staticmethod def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_modalities, num_classes, pool_op_kernel_sizes, num_conv_per_stage_encoder, num_conv_per_stage_decoder, feat_map_mul_on_downscale, batch_size): enc = ResidualUNetEncoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_modalities, pool_op_kernel_sizes, num_conv_per_stage_encoder, feat_map_mul_on_downscale, batch_size) dec = PlainConvUNetDecoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_classes, pool_op_kernel_sizes, num_conv_per_stage_decoder, feat_map_mul_on_downscale, batch_size) return enc + dec def find_3d_configuration(): # lets compute a reference for 3D # we select hyperparameters here so that we get approximately the same patch size as we would get with the # regular unet. This is just my choice. You can do whatever you want # These default hyperparemeters will then be used by the experiment planner # since this is more parameter intensive than the UNet, we will test a configuration that has a lot of parameters # herefore we copy the UNet configuration for Task005_Prostate cudnn.deterministic = False cudnn.benchmark = True patch_size = (20, 320, 256) max_num_features = 320 num_modalities = 2 num_classes = 3 batch_size = 2 # now we fiddle with the network specific hyperparameters until everything just barely fits into a titanx blocks_per_stage_encoder = FabiansUNet.default_blocks_per_stage_encoder blocks_per_stage_decoder = FabiansUNet.default_blocks_per_stage_decoder initial_num_features = 32 # we neeed to add a [1, 1, 1] for the res unet because in this implementation all stages of the encoder can have a stride pool_op_kernel_sizes = [[1, 1, 1], [1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]] conv_op_kernel_sizes = [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]] unet = FabiansUNet(num_modalities, initial_num_features, blocks_per_stage_encoder[:len(conv_op_kernel_sizes)], 2, pool_op_kernel_sizes, conv_op_kernel_sizes, get_default_network_config(3, dropout_p=None), num_classes, blocks_per_stage_decoder[:len(conv_op_kernel_sizes)-1], False, False, max_features=max_num_features).cuda() optimizer = SGD(unet.parameters(), lr=0.1, momentum=0.95) loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'do_bg': False}, {}) dummy_input = torch.rand((batch_size, num_modalities, *patch_size)).cuda() dummy_gt = (torch.rand((batch_size, 1, *patch_size)) * num_classes).round().clamp_(0, 2).cuda().long() for _ in range(20): optimizer.zero_grad() skips = unet.encoder(dummy_input) print([i.shape for i in skips]) output = unet.decoder(skips) l = loss(output, dummy_gt) l.backward() optimizer.step() if _ == 0: torch.cuda.empty_cache() # that should do. Now take the network hyperparameters and insert them in FabiansUNet.compute_approx_vram_consumption # whatever number this spits out, save it to FabiansUNet.use_this_for_batch_size_computation_3D print(FabiansUNet.compute_approx_vram_consumption(patch_size, initial_num_features, max_num_features, num_modalities, num_classes, pool_op_kernel_sizes, blocks_per_stage_encoder[:len(conv_op_kernel_sizes)], blocks_per_stage_decoder[:len(conv_op_kernel_sizes)-1], 2, batch_size)) # the output is 1230348800.0 for me # I increment that number by 1 to allow this configuration be be chosen def find_2d_configuration(): # lets compute a reference for 3D # we select hyperparameters here so that we get approximately the same patch size as we would get with the # regular unet. This is just my choice. You can do whatever you want # These default hyperparemeters will then be used by the experiment planner # since this is more parameter intensive than the UNet, we will test a configuration that has a lot of parameters # herefore we copy the UNet configuration for Task003_Liver cudnn.deterministic = False cudnn.benchmark = True patch_size = (512, 512) max_num_features = 512 num_modalities = 1 num_classes = 3 batch_size = 12 # now we fiddle with the network specific hyperparameters until everything just barely fits into a titanx blocks_per_stage_encoder = FabiansUNet.default_blocks_per_stage_encoder blocks_per_stage_decoder = FabiansUNet.default_blocks_per_stage_decoder initial_num_features = 30 # we neeed to add a [1, 1, 1] for the res unet because in this implementation all stages of the encoder can have a stride pool_op_kernel_sizes = [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]] conv_op_kernel_sizes = [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]] unet = FabiansUNet(num_modalities, initial_num_features, blocks_per_stage_encoder[:len(conv_op_kernel_sizes)], 2, pool_op_kernel_sizes, conv_op_kernel_sizes, get_default_network_config(2, dropout_p=None), num_classes, blocks_per_stage_decoder[:len(conv_op_kernel_sizes)-1], False, False, max_features=max_num_features).cuda() optimizer = SGD(unet.parameters(), lr=0.1, momentum=0.95) loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'do_bg': False}, {}) dummy_input = torch.rand((batch_size, num_modalities, *patch_size)).cuda() dummy_gt = (torch.rand((batch_size, 1, *patch_size)) * num_classes).round().clamp_(0, 2).cuda().long() for _ in range(20): optimizer.zero_grad() skips = unet.encoder(dummy_input) print([i.shape for i in skips]) output = unet.decoder(skips) l = loss(output, dummy_gt) l.backward() optimizer.step() if _ == 0: torch.cuda.empty_cache() # that should do. Now take the network hyperparameters and insert them in FabiansUNet.compute_approx_vram_consumption # whatever number this spits out, save it to FabiansUNet.use_this_for_batch_size_computation_2D print(FabiansUNet.compute_approx_vram_consumption(patch_size, initial_num_features, max_num_features, num_modalities, num_classes, pool_op_kernel_sizes, blocks_per_stage_encoder[:len(conv_op_kernel_sizes)], blocks_per_stage_decoder[:len(conv_op_kernel_sizes)-1], 2, batch_size)) # the output is 1244233728.0 for me # I increment that number by 1 to allow this configuration be be chosen # This will not fit with 32 filters, but so will the regular U-net. We still use 32 filters in training. # This does not matter because we are using mixed precision training now, so a rough memory approximation is OK if __name__ == "__main__": pass
24,392
46.829412
125
py
CoTr
CoTr-main/nnUNet/nnunet/network_architecture/generic_modular_UNet.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from nnunet.network_architecture.custom_modules.conv_blocks import StackedConvLayers from nnunet.network_architecture.generic_UNet import Upsample from nnunet.network_architecture.neural_network import SegmentationNetwork from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss from torch import nn import numpy as np from torch.optim import SGD """ The idea behind this modular U-net ist that we decouple encoder and decoder and thus make things a) a lot more easy to combine and b) enable easy swapping between segmentation or classification mode of the same architecture """ def get_default_network_config(dim=2, dropout_p=None, nonlin="LeakyReLU", norm_type="bn"): """ returns a dictionary that contains pointers to conv, nonlin and norm ops and the default kwargs I like to use :return: """ props = {} if dim == 2: props['conv_op'] = nn.Conv2d props['dropout_op'] = nn.Dropout2d elif dim == 3: props['conv_op'] = nn.Conv3d props['dropout_op'] = nn.Dropout3d else: raise NotImplementedError if norm_type == "bn": if dim == 2: props['norm_op'] = nn.BatchNorm2d elif dim == 3: props['norm_op'] = nn.BatchNorm3d props['norm_op_kwargs'] = {'eps': 1e-5, 'affine': True} elif norm_type == "in": if dim == 2: props['norm_op'] = nn.InstanceNorm2d elif dim == 3: props['norm_op'] = nn.InstanceNorm3d props['norm_op_kwargs'] = {'eps': 1e-5, 'affine': True} else: raise NotImplementedError if dropout_p is None: props['dropout_op'] = None props['dropout_op_kwargs'] = {'p': 0, 'inplace': True} else: props['dropout_op_kwargs'] = {'p': dropout_p, 'inplace': True} props['conv_op_kwargs'] = {'stride': 1, 'dilation': 1, 'bias': True} # kernel size will be set by network! if nonlin == "LeakyReLU": props['nonlin'] = nn.LeakyReLU props['nonlin_kwargs'] = {'negative_slope': 1e-2, 'inplace': True} elif nonlin == "ReLU": props['nonlin'] = nn.ReLU props['nonlin_kwargs'] = {'inplace': True} else: raise ValueError return props class PlainConvUNetEncoder(nn.Module): def __init__(self, input_channels, base_num_features, num_blocks_per_stage, feat_map_mul_on_downscale, pool_op_kernel_sizes, conv_kernel_sizes, props, default_return_skips=True, max_num_features=480): """ Following UNet building blocks can be added by utilizing the properties this class exposes (TODO) this one includes the bottleneck layer! :param input_channels: :param base_num_features: :param num_blocks_per_stage: :param feat_map_mul_on_downscale: :param pool_op_kernel_sizes: :param conv_kernel_sizes: :param props: """ super(PlainConvUNetEncoder, self).__init__() self.default_return_skips = default_return_skips self.props = props self.stages = [] self.stage_output_features = [] self.stage_pool_kernel_size = [] self.stage_conv_op_kernel_size = [] assert len(pool_op_kernel_sizes) == len(conv_kernel_sizes) num_stages = len(conv_kernel_sizes) if not isinstance(num_blocks_per_stage, (list, tuple)): num_blocks_per_stage = [num_blocks_per_stage] * num_stages else: assert len(num_blocks_per_stage) == num_stages self.num_blocks_per_stage = num_blocks_per_stage # decoder may need this current_input_features = input_channels for stage in range(num_stages): current_output_features = min(int(base_num_features * feat_map_mul_on_downscale ** stage), max_num_features) current_kernel_size = conv_kernel_sizes[stage] current_pool_kernel_size = pool_op_kernel_sizes[stage] current_stage = StackedConvLayers(current_input_features, current_output_features, current_kernel_size, props, num_blocks_per_stage[stage], current_pool_kernel_size) self.stages.append(current_stage) self.stage_output_features.append(current_output_features) self.stage_conv_op_kernel_size.append(current_kernel_size) self.stage_pool_kernel_size.append(current_pool_kernel_size) # update current_input_features current_input_features = current_output_features self.stages = nn.ModuleList(self.stages) def forward(self, x, return_skips=None): """ :param x: :param return_skips: if none then self.default_return_skips is used :return: """ skips = [] for s in self.stages: x = s(x) if self.default_return_skips: skips.append(x) if return_skips is None: return_skips = self.default_return_skips if return_skips: return skips else: return x @staticmethod def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_modalities, pool_op_kernel_sizes, num_blocks_per_stage_encoder, feat_map_mul_on_downscale, batch_size): npool = len(pool_op_kernel_sizes) - 1 current_shape = np.array(patch_size) tmp = num_blocks_per_stage_encoder[0] * np.prod(current_shape) * base_num_features \ + num_modalities * np.prod(current_shape) num_feat = base_num_features for p in range(1, npool + 1): current_shape = current_shape / np.array(pool_op_kernel_sizes[p]) num_feat = min(num_feat * feat_map_mul_on_downscale, max_num_features) num_convs = num_blocks_per_stage_encoder[p] print(p, num_feat, num_convs, current_shape) tmp += num_convs * np.prod(current_shape) * num_feat return tmp * batch_size class PlainConvUNetDecoder(nn.Module): def __init__(self, previous, num_classes, num_blocks_per_stage=None, network_props=None, deep_supervision=False, upscale_logits=False): super(PlainConvUNetDecoder, self).__init__() self.num_classes = num_classes self.deep_supervision = deep_supervision """ We assume the bottleneck is part of the encoder, so we can start with upsample -> concat here """ previous_stages = previous.stages previous_stage_output_features = previous.stage_output_features previous_stage_pool_kernel_size = previous.stage_pool_kernel_size previous_stage_conv_op_kernel_size = previous.stage_conv_op_kernel_size if network_props is None: self.props = previous.props else: self.props = network_props if self.props['conv_op'] == nn.Conv2d: transpconv = nn.ConvTranspose2d upsample_mode = "bilinear" elif self.props['conv_op'] == nn.Conv3d: transpconv = nn.ConvTranspose3d upsample_mode = "trilinear" else: raise ValueError("unknown convolution dimensionality, conv op: %s" % str(self.props['conv_op'])) if num_blocks_per_stage is None: num_blocks_per_stage = previous.num_blocks_per_stage[:-1][::-1] assert len(num_blocks_per_stage) == len(previous.num_blocks_per_stage) - 1 self.stage_pool_kernel_size = previous_stage_pool_kernel_size self.stage_output_features = previous_stage_output_features self.stage_conv_op_kernel_size = previous_stage_conv_op_kernel_size num_stages = len(previous_stages) - 1 # we have one less as the first stage here is what comes after the # bottleneck self.tus = [] self.stages = [] self.deep_supervision_outputs = [] # only used for upsample_logits cum_upsample = np.cumprod(np.vstack(self.stage_pool_kernel_size), axis=0).astype(int) for i, s in enumerate(np.arange(num_stages)[::-1]): features_below = previous_stage_output_features[s + 1] features_skip = previous_stage_output_features[s] self.tus.append(transpconv(features_below, features_skip, previous_stage_pool_kernel_size[s + 1], previous_stage_pool_kernel_size[s + 1], bias=False)) # after we tu we concat features so now we have 2xfeatures_skip self.stages.append(StackedConvLayers(2 * features_skip, features_skip, previous_stage_conv_op_kernel_size[s], self.props, num_blocks_per_stage[i])) if deep_supervision and s != 0: seg_layer = self.props['conv_op'](features_skip, num_classes, 1, 1, 0, 1, 1, False) if upscale_logits: upsample = Upsample(scale_factor=cum_upsample[s], mode=upsample_mode) self.deep_supervision_outputs.append(nn.Sequential(seg_layer, upsample)) else: self.deep_supervision_outputs.append(seg_layer) self.segmentation_output = self.props['conv_op'](features_skip, num_classes, 1, 1, 0, 1, 1, False) self.tus = nn.ModuleList(self.tus) self.stages = nn.ModuleList(self.stages) self.deep_supervision_outputs = nn.ModuleList(self.deep_supervision_outputs) def forward(self, skips, gt=None, loss=None): # skips come from the encoder. They are sorted so that the bottleneck is last in the list # what is maybe not perfect is that the TUs and stages here are sorted the other way around # so let's just reverse the order of skips skips = skips[::-1] seg_outputs = [] x = skips[0] # this is the bottleneck for i in range(len(self.tus)): x = self.tus[i](x) x = torch.cat((x, skips[i + 1]), dim=1) x = self.stages[i](x) if self.deep_supervision and (i != len(self.tus) - 1): tmp = self.deep_supervision_outputs[i](x) if gt is not None: tmp = loss(tmp, gt) seg_outputs.append(tmp) segmentation = self.segmentation_output(x) if self.deep_supervision: tmp = segmentation if gt is not None: tmp = loss(tmp, gt) seg_outputs.append(tmp) return seg_outputs[::-1] # seg_outputs are ordered so that the seg from the highest layer is first, the seg from # the bottleneck of the UNet last else: return segmentation @staticmethod def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_classes, pool_op_kernel_sizes, num_blocks_per_stage_decoder, feat_map_mul_on_downscale, batch_size): """ This only applies for num_blocks_per_stage and convolutional_upsampling=True not real vram consumption. just a constant term to which the vram consumption will be approx proportional (+ offset for parameter storage) :param patch_size: :param num_pool_per_axis: :param base_num_features: :param max_num_features: :return: """ npool = len(pool_op_kernel_sizes) - 1 current_shape = np.array(patch_size) tmp = (num_blocks_per_stage_decoder[-1] + 1) * np.prod(current_shape) * base_num_features + num_classes * np.prod(current_shape) num_feat = base_num_features for p in range(1, npool): current_shape = current_shape / np.array(pool_op_kernel_sizes[p]) num_feat = min(num_feat * feat_map_mul_on_downscale, max_num_features) num_convs = num_blocks_per_stage_decoder[-(p+1)] + 1 print(p, num_feat, num_convs, current_shape) tmp += num_convs * np.prod(current_shape) * num_feat return tmp * batch_size class PlainConvUNet(SegmentationNetwork): use_this_for_batch_size_computation_2D = 1167982592.0 use_this_for_batch_size_computation_3D = 1152286720.0 def __init__(self, input_channels, base_num_features, num_blocks_per_stage_encoder, feat_map_mul_on_downscale, pool_op_kernel_sizes, conv_kernel_sizes, props, num_classes, num_blocks_per_stage_decoder, deep_supervision=False, upscale_logits=False, max_features=512, initializer=None): super(PlainConvUNet, self).__init__() self.conv_op = props['conv_op'] self.num_classes = num_classes self.encoder = PlainConvUNetEncoder(input_channels, base_num_features, num_blocks_per_stage_encoder, feat_map_mul_on_downscale, pool_op_kernel_sizes, conv_kernel_sizes, props, default_return_skips=True, max_num_features=max_features) self.decoder = PlainConvUNetDecoder(self.encoder, num_classes, num_blocks_per_stage_decoder, props, deep_supervision, upscale_logits) if initializer is not None: self.apply(initializer) def forward(self, x): skips = self.encoder(x) return self.decoder(skips) @staticmethod def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_modalities, num_classes, pool_op_kernel_sizes, num_blocks_per_stage_encoder, num_blocks_per_stage_decoder, feat_map_mul_on_downscale, batch_size): enc = PlainConvUNetEncoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_modalities, pool_op_kernel_sizes, num_blocks_per_stage_encoder, feat_map_mul_on_downscale, batch_size) dec = PlainConvUNetDecoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_classes, pool_op_kernel_sizes, num_blocks_per_stage_decoder, feat_map_mul_on_downscale, batch_size) return enc + dec @staticmethod def compute_reference_for_vram_consumption_3d(): patch_size = (160, 128, 128) pool_op_kernel_sizes = ((1, 1, 1), (2, 2, 2), (2, 2, 2), (2, 2, 2), (2, 2, 2), (2, 2, 2)) conv_per_stage_encoder = (2, 2, 2, 2, 2, 2) conv_per_stage_decoder = (2, 2, 2, 2, 2) return PlainConvUNet.compute_approx_vram_consumption(patch_size, 32, 512, 4, 3, pool_op_kernel_sizes, conv_per_stage_encoder, conv_per_stage_decoder, 2, 2) @staticmethod def compute_reference_for_vram_consumption_2d(): patch_size = (256, 256) pool_op_kernel_sizes = ( (1, 1), # (256, 256) (2, 2), # (128, 128) (2, 2), # (64, 64) (2, 2), # (32, 32) (2, 2), # (16, 16) (2, 2), # (8, 8) (2, 2) # (4, 4) ) conv_per_stage_encoder = (2, 2, 2, 2, 2, 2, 2) conv_per_stage_decoder = (2, 2, 2, 2, 2, 2) return PlainConvUNet.compute_approx_vram_consumption(patch_size, 32, 512, 4, 3, pool_op_kernel_sizes, conv_per_stage_encoder, conv_per_stage_decoder, 2, 56) if __name__ == "__main__": conv_op_kernel_sizes = ((3, 3), (3, 3), (3, 3), (3, 3), (3, 3), (3, 3), (3, 3)) pool_op_kernel_sizes = ((1, 1), (2, 2), (2, 2), (2, 2), (2, 2), (2, 2), (2, 2)) patch_size = (256, 256) batch_size = 56 unet = PlainConvUNet(4, 32, (2, 2, 2, 2, 2, 2, 2), 2, pool_op_kernel_sizes, conv_op_kernel_sizes, get_default_network_config(2, dropout_p=None), 4, (2, 2, 2, 2, 2, 2), False, False, max_features=512).cuda() optimizer = SGD(unet.parameters(), lr=0.1, momentum=0.95) unet.compute_reference_for_vram_consumption_3d() unet.compute_reference_for_vram_consumption_2d() dummy_input = torch.rand((batch_size, 4, *patch_size)).cuda() dummy_gt = (torch.rand((batch_size, 1, *patch_size)) * 4).round().clamp_(0, 3).cuda().long() optimizer.zero_grad() skips = unet.encoder(dummy_input) print([i.shape for i in skips]) loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'smooth_in_nom': True, 'do_bg': False, 'rebalance_weights': None, 'background_weight': 1}, {}) output = unet.decoder(skips) l = loss(output, dummy_gt) l.backward() optimizer.step() import hiddenlayer as hl g = hl.build_graph(unet, dummy_input) g.save("/home/fabian/test.pdf") """conv_op_kernel_sizes = ((3, 3, 3), (3, 3, 3), (3, 3, 3), (3, 3, 3), (3, 3, 3), (3, 3, 3)) pool_op_kernel_sizes = ((1, 1, 1), (2, 2, 2), (2, 2, 2), (2, 2, 2), (2, 2, 2), (2, 2, 2)) patch_size = (160, 128, 128) unet = PlainConvUNet(4, 32, (2, 2, 2, 2, 2, 2), 2, pool_op_kernel_sizes, conv_op_kernel_sizes, get_default_network_config(3, dropout_p=None), 4, (2, 2, 2, 2, 2), False, False, max_features=512).cuda() optimizer = SGD(unet.parameters(), lr=0.1, momentum=0.95) unet.compute_reference_for_vram_consumption_3d() unet.compute_reference_for_vram_consumption_2d() dummy_input = torch.rand((2, 4, *patch_size)).cuda() dummy_gt = (torch.rand((2, 1, *patch_size)) * 4).round().clamp_(0, 3).cuda().long() optimizer.zero_grad() skips = unet.encoder(dummy_input) print([i.shape for i in skips]) loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'smooth_in_nom': True, 'do_bg': False, 'rebalance_weights': None, 'background_weight': 1}, {}) output = unet.decoder(skips) l = loss(output, dummy_gt) l.backward() optimizer.step() import hiddenlayer as hl g = hl.build_graph(unet, dummy_input) g.save("/home/fabian/test.pdf")"""
19,951
41.451064
136
py
CoTr
CoTr-main/nnUNet/nnunet/network_architecture/custom_modules/helperModules.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from torch import nn class Identity(nn.Module): def __init__(self, *args, **kwargs): super().__init__() def forward(self, input): return input class MyGroupNorm(nn.GroupNorm): def __init__(self, num_channels, eps=1e-5, affine=True, num_groups=8): super(MyGroupNorm, self).__init__(num_groups, num_channels, eps, affine)
1,051
34.066667
114
py
CoTr
CoTr-main/nnUNet/nnunet/network_architecture/custom_modules/conv_blocks.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from copy import deepcopy from nnunet.network_architecture.custom_modules.helperModules import Identity from torch import nn class ConvDropoutNormReLU(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, network_props): """ if network_props['dropout_op'] is None then no dropout if network_props['norm_op'] is None then no norm :param input_channels: :param output_channels: :param kernel_size: :param network_props: """ super(ConvDropoutNormReLU, self).__init__() network_props = deepcopy(network_props) # network_props is a dict and mutable, so we deepcopy to be safe. self.conv = network_props['conv_op'](input_channels, output_channels, kernel_size, padding=[(i - 1) // 2 for i in kernel_size], **network_props['conv_op_kwargs']) # maybe dropout if network_props['dropout_op'] is not None: self.do = network_props['dropout_op'](**network_props['dropout_op_kwargs']) else: self.do = Identity() if network_props['norm_op'] is not None: self.norm = network_props['norm_op'](output_channels, **network_props['norm_op_kwargs']) else: self.norm = Identity() self.nonlin = network_props['nonlin'](**network_props['nonlin_kwargs']) self.all = nn.Sequential(self.conv, self.do, self.norm, self.nonlin) def forward(self, x): return self.all(x) class StackedConvLayers(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, network_props, num_convs, first_stride=None): """ if network_props['dropout_op'] is None then no dropout if network_props['norm_op'] is None then no norm :param input_channels: :param output_channels: :param kernel_size: :param network_props: """ super(StackedConvLayers, self).__init__() network_props = deepcopy(network_props) # network_props is a dict and mutable, so we deepcopy to be safe. network_props_first = deepcopy(network_props) if first_stride is not None: network_props_first['conv_op_kwargs']['stride'] = first_stride self.convs = nn.Sequential( ConvDropoutNormReLU(input_channels, output_channels, kernel_size, network_props_first), *[ConvDropoutNormReLU(output_channels, output_channels, kernel_size, network_props) for _ in range(num_convs - 1)] ) def forward(self, x): return self.convs(x) class BasicResidualBlock(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, props, stride=None): """ This is the conv bn nonlin conv bn nonlin kind of block :param in_planes: :param out_planes: :param props: :param override_stride: """ super().__init__() self.kernel_size = kernel_size props['conv_op_kwargs']['stride'] = 1 self.stride = stride self.props = props self.out_planes = out_planes self.in_planes = in_planes if stride is not None: kwargs_conv1 = deepcopy(props['conv_op_kwargs']) kwargs_conv1['stride'] = stride else: kwargs_conv1 = props['conv_op_kwargs'] self.conv1 = props['conv_op'](in_planes, out_planes, kernel_size, padding=[(i - 1) // 2 for i in kernel_size], **kwargs_conv1) self.norm1 = props['norm_op'](out_planes, **props['norm_op_kwargs']) self.nonlin1 = props['nonlin'](**props['nonlin_kwargs']) if props['dropout_op_kwargs']['p'] != 0: self.dropout = props['dropout_op'](**props['dropout_op_kwargs']) else: self.dropout = Identity() self.conv2 = props['conv_op'](out_planes, out_planes, kernel_size, padding=[(i - 1) // 2 for i in kernel_size], **props['conv_op_kwargs']) self.norm2 = props['norm_op'](out_planes, **props['norm_op_kwargs']) self.nonlin2 = props['nonlin'](**props['nonlin_kwargs']) if (self.stride is not None and any((i != 1 for i in self.stride))) or (in_planes != out_planes): stride_here = stride if stride is not None else 1 self.downsample_skip = nn.Sequential(props['conv_op'](in_planes, out_planes, 1, stride_here, bias=False), props['norm_op'](out_planes, **props['norm_op_kwargs'])) else: self.downsample_skip = lambda x: x def forward(self, x): residual = x out = self.dropout(self.conv1(x)) out = self.nonlin1(self.norm1(out)) out = self.norm2(self.conv2(out)) residual = self.downsample_skip(residual) out += residual return self.nonlin2(out) class ResidualBottleneckBlock(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, props, stride=None): """ This is the conv bn nonlin conv bn nonlin kind of block :param in_planes: :param out_planes: :param props: :param override_stride: """ super().__init__() if props['dropout_op_kwargs'] is None and props['dropout_op_kwargs'] > 0: raise NotImplementedError("ResidualBottleneckBlock does not yet support dropout!") self.kernel_size = kernel_size props['conv_op_kwargs']['stride'] = 1 self.stride = stride self.props = props self.out_planes = out_planes self.in_planes = in_planes self.bottleneck_planes = out_planes // 4 if stride is not None: kwargs_conv1 = deepcopy(props['conv_op_kwargs']) kwargs_conv1['stride'] = stride else: kwargs_conv1 = props['conv_op_kwargs'] self.conv1 = props['conv_op'](in_planes, self.bottleneck_planes, [1 for _ in kernel_size], padding=[0 for i in kernel_size], **kwargs_conv1) self.norm1 = props['norm_op'](self.bottleneck_planes, **props['norm_op_kwargs']) self.nonlin1 = props['nonlin'](**props['nonlin_kwargs']) self.conv2 = props['conv_op'](self.bottleneck_planes, self.bottleneck_planes, kernel_size, padding=[(i - 1) // 2 for i in kernel_size], **props['conv_op_kwargs']) self.norm2 = props['norm_op'](self.bottleneck_planes, **props['norm_op_kwargs']) self.nonlin2 = props['nonlin'](**props['nonlin_kwargs']) self.conv3 = props['conv_op'](self.bottleneck_planes, out_planes, [1 for _ in kernel_size], padding=[0 for i in kernel_size], **props['conv_op_kwargs']) self.norm3 = props['norm_op'](out_planes, **props['norm_op_kwargs']) self.nonlin3 = props['nonlin'](**props['nonlin_kwargs']) if (self.stride is not None and any((i != 1 for i in self.stride))) or (in_planes != out_planes): stride_here = stride if stride is not None else 1 self.downsample_skip = nn.Sequential(props['conv_op'](in_planes, out_planes, 1, stride_here, bias=False), props['norm_op'](out_planes, **props['norm_op_kwargs'])) else: self.downsample_skip = lambda x: x def forward(self, x): residual = x out = self.nonlin1(self.norm1(self.conv1(x))) out = self.nonlin2(self.norm2(self.conv2(out))) out = self.norm3(self.conv3(out)) residual = self.downsample_skip(residual) out += residual return self.nonlin3(out) class ResidualLayer(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, network_props, num_blocks, first_stride=None, block=BasicResidualBlock): super().__init__() network_props = deepcopy(network_props) # network_props is a dict and mutable, so we deepcopy to be safe. self.convs = nn.Sequential( block(input_channels, output_channels, kernel_size, network_props, first_stride), *[block(output_channels, output_channels, kernel_size, network_props) for _ in range(num_blocks - 1)] ) def forward(self, x): return self.convs(x)
9,127
38.860262
143
py
CoTr
CoTr-main/nnUNet/nnunet/network_architecture/custom_modules/feature_response_normalization.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nnunet.utilities.tensor_utilities import mean_tensor from torch import nn import torch from torch.nn.parameter import Parameter import torch.jit class FRN3D(nn.Module): def __init__(self, num_features: int, eps=1e-6, **kwargs): super().__init__() self.eps = eps self.num_features = num_features self.weight = Parameter(torch.ones(1, num_features, 1, 1, 1), True) self.bias = Parameter(torch.zeros(1, num_features, 1, 1, 1), True) self.tau = Parameter(torch.zeros(1, num_features, 1, 1, 1), True) def forward(self, x: torch.Tensor): x = x * torch.rsqrt(mean_tensor(x * x, [2, 3, 4], keepdim=True) + self.eps) return torch.max(self.weight * x + self.bias, self.tau) if __name__ == "__main__": tmp = torch.rand((3, 32, 16, 16, 16)) frn = FRN3D(32) out = frn(tmp)
1,547
34.181818
114
py
CoTr
CoTr-main/nnUNet/nnunet/network_architecture/custom_modules/mish.py
############ # https://github.com/lessw2020/mish/blob/master/mish.py # This code was taken from the repo above and was not created by me (Fabian)! Full credit goes to the original authors ############ import torch import torch.nn as nn import torch.nn.functional as F # Mish - "Mish: A Self Regularized Non-Monotonic Neural Activation Function" # https://arxiv.org/abs/1908.08681v1 # implemented for PyTorch / FastAI by lessw2020 # github: https://github.com/lessw2020/mish class Mish(nn.Module): def __init__(self): super().__init__() def forward(self, x): # inlining this saves 1 second per epoch (V100 GPU) vs having a temp x and then returning x(!) return x * (torch.tanh(F.softplus(x)))
730
29.458333
118
py
CoTr
CoTr-main/nnUNet/nnunet/utilities/nd_softmax.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch import nn import torch.nn.functional as F softmax_helper = lambda x: F.softmax(x, 1)
801
35.454545
114
py
CoTr
CoTr-main/nnUNet/nnunet/utilities/tensor_utilities.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from torch import nn def sum_tensor(inp, axes, keepdim=False): axes = np.unique(axes).astype(int) if keepdim: for ax in axes: inp = inp.sum(int(ax), keepdim=True) else: for ax in sorted(axes, reverse=True): inp = inp.sum(int(ax)) return inp def mean_tensor(inp, axes, keepdim=False): axes = np.unique(axes).astype(int) if keepdim: for ax in axes: inp = inp.mean(int(ax), keepdim=True) else: for ax in sorted(axes, reverse=True): inp = inp.mean(int(ax)) return inp def flip(x, dim): """ flips the tensor at dimension dim (mirroring!) :param x: :param dim: :return: """ indices = [slice(None)] * x.dim() indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, dtype=torch.long, device=x.device) return x[tuple(indices)]
1,624
28.545455
114
py
CoTr
CoTr-main/nnUNet/nnunet/utilities/to_torch.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch def maybe_to_torch(d): if isinstance(d, list): d = [maybe_to_torch(i) if not isinstance(i, torch.Tensor) else i for i in d] elif not isinstance(d, torch.Tensor): d = torch.from_numpy(d).float() return d def to_cuda(data, non_blocking=True, gpu_id=0): if isinstance(data, list): data = [i.cuda(gpu_id, non_blocking=non_blocking) for i in data] else: data = data.cuda(gpu_id, non_blocking=True) return data
1,167
35.5
114
py
CoTr
CoTr-main/nnUNet/nnunet/utilities/distributed.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch import distributed from torch import autograd from torch.nn.parallel import DistributedDataParallel as DDP def print_if_rank0(*args): if distributed.get_rank() == 0: print(*args) class awesome_allgather_function(autograd.Function): @staticmethod def forward(ctx, input): world_size = distributed.get_world_size() # create a destination list for the allgather. I'm assuming you're gathering from 3 workers. allgather_list = [torch.empty_like(input) for _ in range(world_size)] #if distributed.get_rank() == 0: # import IPython;IPython.embed() distributed.all_gather(allgather_list, input) return torch.cat(allgather_list, dim=0) @staticmethod def backward(ctx, grad_output): #print_if_rank0("backward grad_output len", len(grad_output)) #print_if_rank0("backward grad_output shape", grad_output.shape) grads_per_rank = grad_output.shape[0] // distributed.get_world_size() rank = distributed.get_rank() # We'll receive gradients for the entire catted forward output, so to mimic DataParallel, # return only the slice that corresponds to this process's input: sl = slice(rank * grads_per_rank, (rank + 1) * grads_per_rank) #print("worker", rank, "backward slice", sl) return grad_output[sl] if __name__ == "__main__": import torch.distributed as dist import argparse from torch import nn from torch.optim import Adam argumentparser = argparse.ArgumentParser() argumentparser.add_argument("--local_rank", type=int) args = argumentparser.parse_args() torch.cuda.set_device(args.local_rank) dist.init_process_group(backend='nccl', init_method='env://') rnd = torch.rand((5, 2)).cuda() rnd_gathered = awesome_allgather_function.apply(rnd) print("gathering random tensors\nbefore\b", rnd, "\nafter\n", rnd_gathered) # so far this works as expected print("now running a DDP model") c = nn.Conv2d(2, 3, 3, 1, 1, 1, 1, True).cuda() c = DDP(c) opt = Adam(c.parameters()) bs = 5 if dist.get_rank() == 0: bs = 4 inp = torch.rand((bs, 2, 5, 5)).cuda() out = c(inp) print("output_shape", out.shape) out_gathered = awesome_allgather_function.apply(out) print("output_shape_after_gather", out_gathered.shape) # this also works loss = out_gathered.sum() loss.backward() opt.step()
3,172
34.255556
114
py
CoTr
CoTr-main/nnUNet/nnunet/inference/predict_simple.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import torch from nnunet.inference.predict import predict_from_folder from nnunet.paths import default_plans_identifier, network_training_output_dir, default_cascade_trainer, default_trainer from batchgenerators.utilities.file_and_folder_operations import join, isdir from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name def main(): parser = argparse.ArgumentParser() parser.add_argument("-i", '--input_folder', help="Must contain all modalities for each patient in the correct" " order (same as training). Files must be named " "CASENAME_XXXX.nii.gz where XXXX is the modality " "identifier (0000, 0001, etc)", required=True) parser.add_argument('-o', "--output_folder", required=True, help="folder for saving predictions") parser.add_argument('-t', '--task_name', help='task name or task ID, required.', default=default_plans_identifier, required=True) parser.add_argument('-tr', '--trainer_class_name', help='Name of the nnUNetTrainer used for 2D U-Net, full resolution 3D U-Net and low resolution ' 'U-Net. The default is %s. If you are running inference with the cascade and the folder ' 'pointed to by --lowres_segmentations does not contain the segmentation maps generated by ' 'the low resolution U-Net then the low resolution segmentation maps will be automatically ' 'generated. For this case, make sure to set the trainer class here that matches your ' '--cascade_trainer_class_name (this part can be ignored if defaults are used).' % default_trainer, required=False, default=default_trainer) parser.add_argument('-ctr', '--cascade_trainer_class_name', help="Trainer class name used for predicting the 3D full resolution U-Net part of the cascade." "Default is %s" % default_cascade_trainer, required=False, default=default_cascade_trainer) parser.add_argument('-m', '--model', help="2d, 3d_lowres, 3d_fullres or 3d_cascade_fullres. Default: 3d_fullres", default="3d_fullres", required=False) parser.add_argument('-p', '--plans_identifier', help='do not touch this unless you know what you are doing', default=default_plans_identifier, required=False) parser.add_argument('-f', '--folds', nargs='+', default='None', help="folds to use for prediction. Default is None which means that folds will be detected " "automatically in the model output folder") parser.add_argument('-z', '--save_npz', required=False, action='store_true', help="use this if you want to ensemble these predictions with those of other models. Softmax " "probabilities will be saved as compressed numpy arrays in output_folder and can be " "merged between output_folders with nnUNet_ensemble_predictions") parser.add_argument('-l', '--lowres_segmentations', required=False, default='None', help="if model is the highres stage of the cascade then you can use this folder to provide " "predictions from the low resolution 3D U-Net. If this is left at default, the " "predictions will be generated automatically (provided that the 3D low resolution U-Net " "network weights are present") parser.add_argument("--part_id", type=int, required=False, default=0, help="Used to parallelize the prediction of " "the folder over several GPUs. If you " "want to use n GPUs to predict this " "folder you need to run this command " "n times with --part_id=0, ... n-1 and " "--num_parts=n (each with a different " "GPU (for example via " "CUDA_VISIBLE_DEVICES=X)") parser.add_argument("--num_parts", type=int, required=False, default=1, help="Used to parallelize the prediction of " "the folder over several GPUs. If you " "want to use n GPUs to predict this " "folder you need to run this command " "n times with --part_id=0, ... n-1 and " "--num_parts=n (each with a different " "GPU (via " "CUDA_VISIBLE_DEVICES=X)") parser.add_argument("--num_threads_preprocessing", required=False, default=6, type=int, help= "Determines many background processes will be used for data preprocessing. Reduce this if you " "run into out of memory (RAM) problems. Default: 6") parser.add_argument("--num_threads_nifti_save", required=False, default=2, type=int, help= "Determines many background processes will be used for segmentation export. Reduce this if you " "run into out of memory (RAM) problems. Default: 2") parser.add_argument("--disable_tta", required=False, default=False, action="store_true", help="set this flag to disable test time data augmentation via mirroring. Speeds up inference " "by roughly factor 4 (2D) or 8 (3D)") parser.add_argument("--overwrite_existing", required=False, default=False, action="store_true", help="Set this flag if the target folder contains predictions that you would like to overwrite") parser.add_argument("--mode", type=str, default="normal", required=False, help="Hands off!") parser.add_argument("--all_in_gpu", type=str, default="None", required=False, help="can be None, False or True. " "Do not touch.") parser.add_argument("--step_size", type=float, default=0.5, required=False, help="don't touch") # parser.add_argument("--interp_order", required=False, default=3, type=int, # help="order of interpolation for segmentations, has no effect if mode=fastest. Do not touch this.") # parser.add_argument("--interp_order_z", required=False, default=0, type=int, # help="order of interpolation along z is z is done differently. Do not touch this.") # parser.add_argument("--force_separate_z", required=False, default="None", type=str, # help="force_separate_z resampling. Can be None, True or False, has no effect if mode=fastest. " # "Do not touch this.") parser.add_argument('-chk', help='checkpoint name, default: model_final_checkpoint', required=False, default='model_final_checkpoint') parser.add_argument('--disable_mixed_precision', default=False, action='store_true', required=False, help='Predictions are done with mixed precision by default. This improves speed and reduces ' 'the required vram. If you want to disable mixed precision you can set this flag. Note ' 'that yhis is not recommended (mixed precision is ~2x faster!)') args = parser.parse_args() input_folder = args.input_folder output_folder = args.output_folder part_id = args.part_id num_parts = args.num_parts folds = args.folds save_npz = args.save_npz lowres_segmentations = args.lowres_segmentations num_threads_preprocessing = args.num_threads_preprocessing num_threads_nifti_save = args.num_threads_nifti_save disable_tta = args.disable_tta step_size = args.step_size # interp_order = args.interp_order # interp_order_z = args.interp_order_z # force_separate_z = args.force_separate_z overwrite_existing = args.overwrite_existing mode = args.mode all_in_gpu = args.all_in_gpu model = args.model trainer_class_name = args.trainer_class_name cascade_trainer_class_name = args.cascade_trainer_class_name task_name = args.task_name if not task_name.startswith("Task"): task_id = int(task_name) task_name = convert_id_to_task_name(task_id) assert model in ["2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"], "-m must be 2d, 3d_lowres, 3d_fullres or " \ "3d_cascade_fullres" # if force_separate_z == "None": # force_separate_z = None # elif force_separate_z == "False": # force_separate_z = False # elif force_separate_z == "True": # force_separate_z = True # else: # raise ValueError("force_separate_z must be None, True or False. Given: %s" % force_separate_z) if lowres_segmentations == "None": lowres_segmentations = None if isinstance(folds, list): if folds[0] == 'all' and len(folds) == 1: pass else: folds = [int(i) for i in folds] elif folds == "None": folds = None else: raise ValueError("Unexpected value for argument folds") assert all_in_gpu in ['None', 'False', 'True'] if all_in_gpu == "None": all_in_gpu = None elif all_in_gpu == "True": all_in_gpu = True elif all_in_gpu == "False": all_in_gpu = False # we need to catch the case where model is 3d cascade fullres and the low resolution folder has not been set. # In that case we need to try and predict with 3d low res first if model == "3d_cascade_fullres" and lowres_segmentations is None: print("lowres_segmentations is None. Attempting to predict 3d_lowres first...") assert part_id == 0 and num_parts == 1, "if you don't specify a --lowres_segmentations folder for the " \ "inference of the cascade, custom values for part_id and num_parts " \ "are not supported. If you wish to have multiple parts, please " \ "run the 3d_lowres inference first (separately)" model_folder_name = join(network_training_output_dir, "3d_lowres", task_name, trainer_class_name + "__" + args.plans_identifier) assert isdir(model_folder_name), "model output folder not found. Expected: %s" % model_folder_name lowres_output_folder = join(output_folder, "3d_lowres_predictions") predict_from_folder(model_folder_name, input_folder, lowres_output_folder, folds, False, num_threads_preprocessing, num_threads_nifti_save, None, part_id, num_parts, not disable_tta, overwrite_existing=overwrite_existing, mode=mode, overwrite_all_in_gpu=all_in_gpu, mixed_precision=not args.disable_mixed_precision, step_size=step_size) lowres_segmentations = lowres_output_folder torch.cuda.empty_cache() print("3d_lowres done") if model == "3d_cascade_fullres": trainer = cascade_trainer_class_name else: trainer = trainer_class_name model_folder_name = join(network_training_output_dir, model, task_name, trainer + "__" + args.plans_identifier) print("using model stored in ", model_folder_name) assert isdir(model_folder_name), "model output folder not found. Expected: %s" % model_folder_name predict_from_folder(model_folder_name, input_folder, output_folder, folds, save_npz, num_threads_preprocessing, num_threads_nifti_save, lowres_segmentations, part_id, num_parts, not disable_tta, overwrite_existing=overwrite_existing, mode=mode, overwrite_all_in_gpu=all_in_gpu, mixed_precision=not args.disable_mixed_precision, step_size=step_size, checkpoint_name=args.chk) if __name__ == "__main__": main()
13,593
59.150442
125
py
CoTr
CoTr-main/nnUNet/nnunet/inference/predict.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from copy import deepcopy from typing import Tuple, Union, List import numpy as np from batchgenerators.augmentations.utils import resize_segmentation from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax, save_segmentation_nifti from batchgenerators.utilities.file_and_folder_operations import * from multiprocessing import Process, Queue import torch import SimpleITK as sitk import shutil from multiprocessing import Pool from nnunet.postprocessing.connected_components import load_remove_save, load_postprocessing from nnunet.training.model_restore import load_model_and_checkpoint_files from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer from nnunet.utilities.one_hot_encoding import to_one_hot def preprocess_save_to_queue(preprocess_fn, q, list_of_lists, output_files, segs_from_prev_stage, classes, transpose_forward): # suppress output # sys.stdout = open(os.devnull, 'w') errors_in = [] for i, l in enumerate(list_of_lists): try: output_file = output_files[i] print("preprocessing", output_file) d, _, dct = preprocess_fn(l) # print(output_file, dct) if segs_from_prev_stage[i] is not None: assert isfile(segs_from_prev_stage[i]) and segs_from_prev_stage[i].endswith( ".nii.gz"), "segs_from_prev_stage" \ " must point to a " \ "segmentation file" seg_prev = sitk.GetArrayFromImage(sitk.ReadImage(segs_from_prev_stage[i])) # check to see if shapes match img = sitk.GetArrayFromImage(sitk.ReadImage(l[0])) assert all([i == j for i, j in zip(seg_prev.shape, img.shape)]), "image and segmentation from previous " \ "stage don't have the same pixel array " \ "shape! image: %s, seg_prev: %s" % \ (l[0], segs_from_prev_stage[i]) seg_prev = seg_prev.transpose(transpose_forward) seg_reshaped = resize_segmentation(seg_prev, d.shape[1:], order=1, cval=0) seg_reshaped = to_one_hot(seg_reshaped, classes) d = np.vstack((d, seg_reshaped)).astype(np.float32) """There is a problem with python process communication that prevents us from communicating obejcts larger than 2 GB between processes (basically when the length of the pickle string that will be sent is communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either filename or np.ndarray and will handle this automatically""" print(d.shape) if np.prod(d.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save, 4 because float32 is 4 bytes print( "This output is too large for python process-process communication. " "Saving output temporarily to disk") np.save(output_file[:-7] + ".npy", d) d = output_file[:-7] + ".npy" q.put((output_file, (d, dct))) except KeyboardInterrupt: raise KeyboardInterrupt except Exception as e: print("error in", l) print(e) q.put("end") if len(errors_in) > 0: print("There were some errors in the following cases:", errors_in) print("These cases were ignored.") else: print("This worker has ended successfully, no errors to report") # restore output # sys.stdout = sys.__stdout__ def preprocess_multithreaded(trainer, list_of_lists, output_files, num_processes=2, segs_from_prev_stage=None): if segs_from_prev_stage is None: segs_from_prev_stage = [None] * len(list_of_lists) num_processes = min(len(list_of_lists), num_processes) classes = list(range(1, trainer.num_classes)) assert isinstance(trainer, nnUNetTrainer) q = Queue(1) processes = [] for i in range(num_processes): pr = Process(target=preprocess_save_to_queue, args=(trainer.preprocess_patient, q, list_of_lists[i::num_processes], output_files[i::num_processes], segs_from_prev_stage[i::num_processes], classes, trainer.plans['transpose_forward'])) pr.start() processes.append(pr) try: end_ctr = 0 while end_ctr != num_processes: item = q.get() if item == "end": end_ctr += 1 continue else: yield item finally: for p in processes: if p.is_alive(): p.terminate() # this should not happen but better safe than sorry right p.join() q.close() def predict_cases(model, list_of_lists, output_filenames, folds, save_npz, num_threads_preprocessing, num_threads_nifti_save, segs_from_prev_stage=None, do_tta=True, mixed_precision=True, overwrite_existing=False, all_in_gpu=False, step_size=0.5, checkpoint_name="model_final_checkpoint", segmentation_export_kwargs: dict = None): """ :param segmentation_export_kwargs: :param model: folder where the model is saved, must contain fold_x subfolders :param list_of_lists: [[case0_0000.nii.gz, case0_0001.nii.gz], [case1_0000.nii.gz, case1_0001.nii.gz], ...] :param output_filenames: [output_file_case0.nii.gz, output_file_case1.nii.gz, ...] :param folds: default: (0, 1, 2, 3, 4) (but can also be 'all' or a subset of the five folds, for example use (0, ) for using only fold_0 :param save_npz: default: False :param num_threads_preprocessing: :param num_threads_nifti_save: :param segs_from_prev_stage: :param do_tta: default: True, can be set to False for a 8x speedup at the cost of a reduced segmentation quality :param overwrite_existing: default: True :param mixed_precision: if None then we take no action. If True/False we overwrite what the model has in its init :return: """ assert len(list_of_lists) == len(output_filenames) if segs_from_prev_stage is not None: assert len(segs_from_prev_stage) == len(output_filenames) pool = Pool(num_threads_nifti_save) results = [] cleaned_output_files = [] for o in output_filenames: dr, f = os.path.split(o) if len(dr) > 0: maybe_mkdir_p(dr) if not f.endswith(".nii.gz"): f, _ = os.path.splitext(f) f = f + ".nii.gz" cleaned_output_files.append(join(dr, f)) if not overwrite_existing: print("number of cases:", len(list_of_lists)) not_done_idx = [i for i, j in enumerate(cleaned_output_files) if not isfile(j)] cleaned_output_files = [cleaned_output_files[i] for i in not_done_idx] list_of_lists = [list_of_lists[i] for i in not_done_idx] if segs_from_prev_stage is not None: segs_from_prev_stage = [segs_from_prev_stage[i] for i in not_done_idx] print("number of cases that still need to be predicted:", len(cleaned_output_files)) print("emptying cuda cache") torch.cuda.empty_cache() print("loading parameters for folds,", folds) trainer, params = load_model_and_checkpoint_files(model, folds, mixed_precision=mixed_precision, checkpoint_name=checkpoint_name) if segmentation_export_kwargs is None: if 'segmentation_export_params' in trainer.plans.keys(): force_separate_z = trainer.plans['segmentation_export_params']['force_separate_z'] interpolation_order = trainer.plans['segmentation_export_params']['interpolation_order'] interpolation_order_z = trainer.plans['segmentation_export_params']['interpolation_order_z'] else: force_separate_z = None interpolation_order = 1 interpolation_order_z = 0 else: force_separate_z = segmentation_export_kwargs['force_separate_z'] interpolation_order = segmentation_export_kwargs['interpolation_order'] interpolation_order_z = segmentation_export_kwargs['interpolation_order_z'] print("starting preprocessing generator") preprocessing = preprocess_multithreaded(trainer, list_of_lists, cleaned_output_files, num_threads_preprocessing, segs_from_prev_stage) print("starting prediction...") all_output_files = [] for preprocessed in preprocessing: output_filename, (d, dct) = preprocessed all_output_files.append(all_output_files) if isinstance(d, str): data = np.load(d) os.remove(d) d = data print("predicting", output_filename) softmax = [] for p in params: trainer.load_checkpoint_ram(p, False) softmax.append(trainer.predict_preprocessed_data_return_seg_and_softmax( d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params['mirror_axes'], use_sliding_window=True, step_size=step_size, use_gaussian=True, all_in_gpu=all_in_gpu, mixed_precision=mixed_precision)[1][None]) softmax = np.vstack(softmax) softmax_mean = np.mean(softmax, 0) transpose_forward = trainer.plans.get('transpose_forward') if transpose_forward is not None: transpose_backward = trainer.plans.get('transpose_backward') softmax_mean = softmax_mean.transpose([0] + [i + 1 for i in transpose_backward]) if save_npz: npz_file = output_filename[:-7] + ".npz" else: npz_file = None if hasattr(trainer, 'regions_class_order'): region_class_order = trainer.regions_class_order else: region_class_order = None """There is a problem with python process communication that prevents us from communicating obejcts larger than 2 GB between processes (basically when the length of the pickle string that will be sent is communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either filename or np.ndarray and will handle this automatically""" bytes_per_voxel = 4 if all_in_gpu: bytes_per_voxel = 2 # if all_in_gpu then the return value is half (float16) if np.prod(softmax_mean.shape) > (2e9 / bytes_per_voxel * 0.85): # * 0.85 just to be save print( "This output is too large for python process-process communication. Saving output temporarily to disk") np.save(output_filename[:-7] + ".npy", softmax_mean) softmax_mean = output_filename[:-7] + ".npy" results.append(pool.starmap_async(save_segmentation_nifti_from_softmax, ((softmax_mean, output_filename, dct, interpolation_order, region_class_order, None, None, npz_file, None, force_separate_z, interpolation_order_z),) )) print("inference done. Now waiting for the segmentation export to finish...") _ = [i.get() for i in results] # now apply postprocessing # first load the postprocessing properties if they are present. Else raise a well visible warning results = [] pp_file = join(model, "postprocessing.json") if isfile(pp_file): print("postprocessing...") shutil.copy(pp_file, os.path.abspath(os.path.dirname(output_filenames[0]))) # for_which_classes stores for which of the classes everything but the largest connected component needs to be # removed for_which_classes, min_valid_obj_size = load_postprocessing(pp_file) results.append(pool.starmap_async(load_remove_save, zip(output_filenames, output_filenames, [for_which_classes] * len(output_filenames), [min_valid_obj_size] * len(output_filenames)))) _ = [i.get() for i in results] else: print("WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run " "consolidate_folds in the output folder of the model first!\nThe folder you need to run this in is " "%s" % model) pool.close() pool.join() def predict_cases_fast(model, list_of_lists, output_filenames, folds, num_threads_preprocessing, num_threads_nifti_save, segs_from_prev_stage=None, do_tta=True, mixed_precision=True, overwrite_existing=False, all_in_gpu=False, step_size=0.5, checkpoint_name="model_final_checkpoint", segmentation_export_kwargs: dict = None): assert len(list_of_lists) == len(output_filenames) if segs_from_prev_stage is not None: assert len(segs_from_prev_stage) == len(output_filenames) pool = Pool(num_threads_nifti_save) results = [] cleaned_output_files = [] for o in output_filenames: dr, f = os.path.split(o) if len(dr) > 0: maybe_mkdir_p(dr) if not f.endswith(".nii.gz"): f, _ = os.path.splitext(f) f = f + ".nii.gz" cleaned_output_files.append(join(dr, f)) if not overwrite_existing: print("number of cases:", len(list_of_lists)) not_done_idx = [i for i, j in enumerate(cleaned_output_files) if not isfile(j)] cleaned_output_files = [cleaned_output_files[i] for i in not_done_idx] list_of_lists = [list_of_lists[i] for i in not_done_idx] if segs_from_prev_stage is not None: segs_from_prev_stage = [segs_from_prev_stage[i] for i in not_done_idx] print("number of cases that still need to be predicted:", len(cleaned_output_files)) print("emptying cuda cache") torch.cuda.empty_cache() print("loading parameters for folds,", folds) trainer, params = load_model_and_checkpoint_files(model, folds, mixed_precision=mixed_precision, checkpoint_name=checkpoint_name) if segmentation_export_kwargs is None: if 'segmentation_export_params' in trainer.plans.keys(): force_separate_z = trainer.plans['segmentation_export_params']['force_separate_z'] interpolation_order = trainer.plans['segmentation_export_params']['interpolation_order'] interpolation_order_z = trainer.plans['segmentation_export_params']['interpolation_order_z'] else: force_separate_z = None interpolation_order = 1 interpolation_order_z = 0 else: force_separate_z = segmentation_export_kwargs['force_separate_z'] interpolation_order = segmentation_export_kwargs['interpolation_order'] interpolation_order_z = segmentation_export_kwargs['interpolation_order_z'] print("starting preprocessing generator") preprocessing = preprocess_multithreaded(trainer, list_of_lists, cleaned_output_files, num_threads_preprocessing, segs_from_prev_stage) print("starting prediction...") for preprocessed in preprocessing: print("getting data from preprocessor") output_filename, (d, dct) = preprocessed print("got something") if isinstance(d, str): print("what I got is a string, so I need to load a file") data = np.load(d) os.remove(d) d = data # preallocate the output arrays # same dtype as the return value in predict_preprocessed_data_return_seg_and_softmax (saves time) softmax_aggr = None # np.zeros((trainer.num_classes, *d.shape[1:]), dtype=np.float16) all_seg_outputs = np.zeros((len(params), *d.shape[1:]), dtype=int) print("predicting", output_filename) for i, p in enumerate(params): trainer.load_checkpoint_ram(p, False) res = trainer.predict_preprocessed_data_return_seg_and_softmax(d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params['mirror_axes'], use_sliding_window=True, step_size=step_size, use_gaussian=True, all_in_gpu=all_in_gpu, mixed_precision=mixed_precision) if len(params) > 1: # otherwise we dont need this and we can save ourselves the time it takes to copy that print("aggregating softmax") if softmax_aggr is None: softmax_aggr = res[1] else: softmax_aggr += res[1] all_seg_outputs[i] = res[0] print("obtaining segmentation map") if len(params) > 1: # we dont need to normalize the softmax by 1 / len(params) because this would not change the outcome of the argmax seg = softmax_aggr.argmax(0) else: seg = all_seg_outputs[0] print("applying transpose_backward") transpose_forward = trainer.plans.get('transpose_forward') if transpose_forward is not None: transpose_backward = trainer.plans.get('transpose_backward') seg = seg.transpose([i for i in transpose_backward]) print("initializing segmentation export") results.append(pool.starmap_async(save_segmentation_nifti, ((seg, output_filename, dct, interpolation_order, force_separate_z, interpolation_order_z),) )) print("done") print("inference done. Now waiting for the segmentation export to finish...") _ = [i.get() for i in results] # now apply postprocessing # first load the postprocessing properties if they are present. Else raise a well visible warning results = [] pp_file = join(model, "postprocessing.json") if isfile(pp_file): print("postprocessing...") shutil.copy(pp_file, os.path.dirname(output_filenames[0])) # for_which_classes stores for which of the classes everything but the largest connected component needs to be # removed for_which_classes, min_valid_obj_size = load_postprocessing(pp_file) results.append(pool.starmap_async(load_remove_save, zip(output_filenames, output_filenames, [for_which_classes] * len(output_filenames), [min_valid_obj_size] * len(output_filenames)))) _ = [i.get() for i in results] else: print("WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run " "consolidate_folds in the output folder of the model first!\nThe folder you need to run this in is " "%s" % model) pool.close() pool.join() def predict_cases_fastest(model, list_of_lists, output_filenames, folds, num_threads_preprocessing, num_threads_nifti_save, segs_from_prev_stage=None, do_tta=True, mixed_precision=True, overwrite_existing=False, all_in_gpu=True, step_size=0.5, checkpoint_name="model_final_checkpoint"): assert len(list_of_lists) == len(output_filenames) if segs_from_prev_stage is not None: assert len(segs_from_prev_stage) == len(output_filenames) pool = Pool(num_threads_nifti_save) results = [] cleaned_output_files = [] for o in output_filenames: dr, f = os.path.split(o) if len(dr) > 0: maybe_mkdir_p(dr) if not f.endswith(".nii.gz"): f, _ = os.path.splitext(f) f = f + ".nii.gz" cleaned_output_files.append(join(dr, f)) if not overwrite_existing: print("number of cases:", len(list_of_lists)) not_done_idx = [i for i, j in enumerate(cleaned_output_files) if not isfile(j)] cleaned_output_files = [cleaned_output_files[i] for i in not_done_idx] list_of_lists = [list_of_lists[i] for i in not_done_idx] if segs_from_prev_stage is not None: segs_from_prev_stage = [segs_from_prev_stage[i] for i in not_done_idx] print("number of cases that still need to be predicted:", len(cleaned_output_files)) print("emptying cuda cache") torch.cuda.empty_cache() print("loading parameters for folds,", folds) trainer, params = load_model_and_checkpoint_files(model, folds, mixed_precision=mixed_precision, checkpoint_name=checkpoint_name) print("starting preprocessing generator") preprocessing = preprocess_multithreaded(trainer, list_of_lists, cleaned_output_files, num_threads_preprocessing, segs_from_prev_stage) print("starting prediction...") for preprocessed in preprocessing: print("getting data from preprocessor") output_filename, (d, dct) = preprocessed print("got something") if isinstance(d, str): print("what I got is a string, so I need to load a file") data = np.load(d) os.remove(d) d = data # preallocate the output arrays # same dtype as the return value in predict_preprocessed_data_return_seg_and_softmax (saves time) all_softmax_outputs = np.zeros((len(params), trainer.num_classes, *d.shape[1:]), dtype=np.float16) all_seg_outputs = np.zeros((len(params), *d.shape[1:]), dtype=int) print("predicting", output_filename) for i, p in enumerate(params): trainer.load_checkpoint_ram(p, False) res = trainer.predict_preprocessed_data_return_seg_and_softmax(d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params['mirror_axes'], use_sliding_window=True, step_size=step_size, use_gaussian=True, all_in_gpu=all_in_gpu, mixed_precision=mixed_precision) if len(params) > 1: # otherwise we dont need this and we can save ourselves the time it takes to copy that all_softmax_outputs[i] = res[1] all_seg_outputs[i] = res[0] print("aggregating predictions") if len(params) > 1: softmax_mean = np.mean(all_softmax_outputs, 0) seg = softmax_mean.argmax(0) else: seg = all_seg_outputs[0] print("applying transpose_backward") transpose_forward = trainer.plans.get('transpose_forward') if transpose_forward is not None: transpose_backward = trainer.plans.get('transpose_backward') seg = seg.transpose([i for i in transpose_backward]) print("initializing segmentation export") results.append(pool.starmap_async(save_segmentation_nifti, ((seg, output_filename, dct, 0, None),) )) print("done") print("inference done. Now waiting for the segmentation export to finish...") _ = [i.get() for i in results] # now apply postprocessing # first load the postprocessing properties if they are present. Else raise a well visible warning results = [] pp_file = join(model, "postprocessing.json") if isfile(pp_file): print("postprocessing...") shutil.copy(pp_file, os.path.dirname(output_filenames[0])) # for_which_classes stores for which of the classes everything but the largest connected component needs to be # removed for_which_classes, min_valid_obj_size = load_postprocessing(pp_file) results.append(pool.starmap_async(load_remove_save, zip(output_filenames, output_filenames, [for_which_classes] * len(output_filenames), [min_valid_obj_size] * len(output_filenames)))) _ = [i.get() for i in results] else: print("WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run " "consolidate_folds in the output folder of the model first!\nThe folder you need to run this in is " "%s" % model) pool.close() pool.join() def check_input_folder_and_return_caseIDs(input_folder, expected_num_modalities): print("This model expects %d input modalities for each image" % expected_num_modalities) files = subfiles(input_folder, suffix=".nii.gz", join=False, sort=True) maybe_case_ids = np.unique([i[:-12] for i in files]) remaining = deepcopy(files) missing = [] assert len(files) > 0, "input folder did not contain any images (expected to find .nii.gz file endings)" # now check if all required files are present and that no unexpected files are remaining for c in maybe_case_ids: for n in range(expected_num_modalities): expected_output_file = c + "_%04.0d.nii.gz" % n if not isfile(join(input_folder, expected_output_file)): missing.append(expected_output_file) else: remaining.remove(expected_output_file) print("Found %d unique case ids, here are some examples:" % len(maybe_case_ids), np.random.choice(maybe_case_ids, min(len(maybe_case_ids), 10))) print("If they don't look right, make sure to double check your filenames. They must end with _0000.nii.gz etc") if len(remaining) > 0: print("found %d unexpected remaining files in the folder. Here are some examples:" % len(remaining), np.random.choice(remaining, min(len(remaining), 10))) if len(missing) > 0: print("Some files are missing:") print(missing) raise RuntimeError("missing files in input_folder") return maybe_case_ids def predict_from_folder(model: str, input_folder: str, output_folder: str, folds: Union[Tuple[int], List[int]], save_npz: bool, num_threads_preprocessing: int, num_threads_nifti_save: int, lowres_segmentations: Union[str, None], part_id: int, num_parts: int, tta: bool, mixed_precision: bool = True, overwrite_existing: bool = True, mode: str = 'normal', overwrite_all_in_gpu: bool = None, step_size: float = 0.5, checkpoint_name: str = "model_final_checkpoint", segmentation_export_kwargs: dict = None): """ here we use the standard naming scheme to generate list_of_lists and output_files needed by predict_cases :param model: :param input_folder: :param output_folder: :param folds: :param save_npz: :param num_threads_preprocessing: :param num_threads_nifti_save: :param lowres_segmentations: :param part_id: :param num_parts: :param tta: :param mixed_precision: :param overwrite_existing: if not None then it will be overwritten with whatever is in there. None is default (no overwrite) :return: """ maybe_mkdir_p(output_folder) shutil.copy(join(model, 'plans.pkl'), output_folder) assert isfile(join(model, "plans.pkl")), "Folder with saved model weights must contain a plans.pkl file" expected_num_modalities = load_pickle(join(model, "plans.pkl"))['num_modalities'] # check input folder integrity case_ids = check_input_folder_and_return_caseIDs(input_folder, expected_num_modalities) output_files = [join(output_folder, i + ".nii.gz") for i in case_ids] all_files = subfiles(input_folder, suffix=".nii.gz", join=False, sort=True) list_of_lists = [[join(input_folder, i) for i in all_files if i[:len(j)].startswith(j) and len(i) == (len(j) + 12)] for j in case_ids] if lowres_segmentations is not None: assert isdir(lowres_segmentations), "if lowres_segmentations is not None then it must point to a directory" lowres_segmentations = [join(lowres_segmentations, i + ".nii.gz") for i in case_ids] assert all([isfile(i) for i in lowres_segmentations]), "not all lowres_segmentations files are present. " \ "(I was searching for case_id.nii.gz in that folder)" lowres_segmentations = lowres_segmentations[part_id::num_parts] else: lowres_segmentations = None if mode == "normal": if overwrite_all_in_gpu is None: all_in_gpu = False else: all_in_gpu = overwrite_all_in_gpu return predict_cases(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds, save_npz, num_threads_preprocessing, num_threads_nifti_save, lowres_segmentations, tta, mixed_precision=mixed_precision, overwrite_existing=overwrite_existing, all_in_gpu=all_in_gpu, step_size=step_size, checkpoint_name=checkpoint_name, segmentation_export_kwargs=segmentation_export_kwargs) elif mode == "fast": if overwrite_all_in_gpu is None: all_in_gpu = True else: all_in_gpu = overwrite_all_in_gpu assert save_npz is False return predict_cases_fast(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds, num_threads_preprocessing, num_threads_nifti_save, lowres_segmentations, tta, mixed_precision=mixed_precision, overwrite_existing=overwrite_existing, all_in_gpu=all_in_gpu, step_size=step_size, checkpoint_name=checkpoint_name, segmentation_export_kwargs=segmentation_export_kwargs) elif mode == "fastest": if overwrite_all_in_gpu is None: all_in_gpu = True else: all_in_gpu = overwrite_all_in_gpu assert save_npz is False return predict_cases_fastest(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds, num_threads_preprocessing, num_threads_nifti_save, lowres_segmentations, tta, mixed_precision=mixed_precision, overwrite_existing=overwrite_existing, all_in_gpu=all_in_gpu, step_size=step_size, checkpoint_name=checkpoint_name) else: raise ValueError("unrecognized mode. Must be normal, fast or fastest") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-i", '--input_folder', help="Must contain all modalities for each patient in the correct" " order (same as training). Files must be named " "CASENAME_XXXX.nii.gz where XXXX is the modality " "identifier (0000, 0001, etc)", required=True) parser.add_argument('-o', "--output_folder", required=True, help="folder for saving predictions") parser.add_argument('-m', '--model_output_folder', help='model output folder. Will automatically discover the folds ' 'that were ' 'run and use those as an ensemble', required=True) parser.add_argument('-f', '--folds', nargs='+', default='None', help="folds to use for prediction. Default is None " "which means that folds will be detected " "automatically in the model output folder") parser.add_argument('-z', '--save_npz', required=False, action='store_true', help="use this if you want to ensemble" " these predictions with those of" " other models. Softmax " "probabilities will be saved as " "compresed numpy arrays in " "output_folder and can be merged " "between output_folders with " "merge_predictions.py") parser.add_argument('-l', '--lowres_segmentations', required=False, default='None', help="if model is the highres " "stage of the cascade then you need to use -l to specify where the segmentations of the " "corresponding lowres unet are. Here they are required to do a prediction") parser.add_argument("--part_id", type=int, required=False, default=0, help="Used to parallelize the prediction of " "the folder over several GPUs. If you " "want to use n GPUs to predict this " "folder you need to run this command " "n times with --part_id=0, ... n-1 and " "--num_parts=n (each with a different " "GPU (for example via " "CUDA_VISIBLE_DEVICES=X)") parser.add_argument("--num_parts", type=int, required=False, default=1, help="Used to parallelize the prediction of " "the folder over several GPUs. If you " "want to use n GPUs to predict this " "folder you need to run this command " "n times with --part_id=0, ... n-1 and " "--num_parts=n (each with a different " "GPU (via " "CUDA_VISIBLE_DEVICES=X)") parser.add_argument("--num_threads_preprocessing", required=False, default=6, type=int, help= "Determines many background processes will be used for data preprocessing. Reduce this if you " "run into out of memory (RAM) problems. Default: 6") parser.add_argument("--num_threads_nifti_save", required=False, default=2, type=int, help= "Determines many background processes will be used for segmentation export. Reduce this if you " "run into out of memory (RAM) problems. Default: 2") parser.add_argument("--tta", required=False, type=int, default=1, help="Set to 0 to disable test time data " "augmentation (speedup of factor " "4(2D)/8(3D)), " "lower quality segmentations") parser.add_argument("--overwrite_existing", required=False, type=int, default=1, help="Set this to 0 if you need " "to resume a previous " "prediction. Default: 1 " "(=existing segmentations " "in output_folder will be " "overwritten)") parser.add_argument("--mode", type=str, default="normal", required=False) parser.add_argument("--all_in_gpu", type=str, default="None", required=False, help="can be None, False or True") parser.add_argument("--step_size", type=float, default=0.5, required=False, help="don't touch") # parser.add_argument("--interp_order", required=False, default=3, type=int, # help="order of interpolation for segmentations, has no effect if mode=fastest") # parser.add_argument("--interp_order_z", required=False, default=0, type=int, # help="order of interpolation along z is z is done differently") # parser.add_argument("--force_separate_z", required=False, default="None", type=str, # help="force_separate_z resampling. Can be None, True or False, has no effect if mode=fastest") parser.add_argument('--disable_mixed_precision', default=False, action='store_true', required=False, help='Predictions are done with mixed precision by default. This improves speed and reduces ' 'the required vram. If you want to disable mixed precision you can set this flag. Note ' 'that yhis is not recommended (mixed precision is ~2x faster!)') args = parser.parse_args() input_folder = args.input_folder output_folder = args.output_folder part_id = args.part_id num_parts = args.num_parts model = args.model_output_folder folds = args.folds save_npz = args.save_npz lowres_segmentations = args.lowres_segmentations num_threads_preprocessing = args.num_threads_preprocessing num_threads_nifti_save = args.num_threads_nifti_save tta = args.tta step_size = args.step_size # interp_order = args.interp_order # interp_order_z = args.interp_order_z # force_separate_z = args.force_separate_z # if force_separate_z == "None": # force_separate_z = None # elif force_separate_z == "False": # force_separate_z = False # elif force_separate_z == "True": # force_separate_z = True # else: # raise ValueError("force_separate_z must be None, True or False. Given: %s" % force_separate_z) overwrite = args.overwrite_existing mode = args.mode all_in_gpu = args.all_in_gpu if lowres_segmentations == "None": lowres_segmentations = None if isinstance(folds, list): if folds[0] == 'all' and len(folds) == 1: pass else: folds = [int(i) for i in folds] elif folds == "None": folds = None else: raise ValueError("Unexpected value for argument folds") if tta == 0: tta = False elif tta == 1: tta = True else: raise ValueError("Unexpected value for tta, Use 1 or 0") if overwrite == 0: overwrite = False elif overwrite == 1: overwrite = True else: raise ValueError("Unexpected value for overwrite, Use 1 or 0") assert all_in_gpu in ['None', 'False', 'True'] if all_in_gpu == "None": all_in_gpu = None elif all_in_gpu == "True": all_in_gpu = True elif all_in_gpu == "False": all_in_gpu = False predict_from_folder(model, input_folder, output_folder, folds, save_npz, num_threads_preprocessing, num_threads_nifti_save, lowres_segmentations, part_id, num_parts, tta, mixed_precision=not args.disable_mixed_precision, overwrite_existing=overwrite, mode=mode, overwrite_all_in_gpu=all_in_gpu, step_size=step_size)
42,543
51.98132
182
py
CoTr
CoTr-main/CoTr_package/CoTr/training/model_restore.py
import CoTr import torch from batchgenerators.utilities.file_and_folder_operations import * import importlib import pkgutil from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer def recursive_find_python_class(folder, trainer_name, current_module): tr = None for importer, modname, ispkg in pkgutil.iter_modules(folder): # print(modname, ispkg) if not ispkg: m = importlib.import_module(current_module + "." + modname) # m = importlib.import_module(modname) if hasattr(m, trainer_name): tr = getattr(m, trainer_name) break if tr is None: for importer, modname, ispkg in pkgutil.iter_modules(folder): if ispkg: next_current_module = current_module + "." + modname tr = recursive_find_python_class([join(folder[0], modname)], trainer_name, current_module=next_current_module) if tr is not None: break return tr def restore_model(pkl_file, checkpoint=None, train=False, fp16=None): info = load_pickle(pkl_file) init = info['init'] name = info['name'] search_in = join(CoTr.__path__[0], "training", "network_training") tr = recursive_find_python_class([search_in], name, current_module="CoTr.training.network_training") if tr is None: try: import meddec # search_in = join(meddec.__path__[0], "model_training") search_in = (os.path.dirname(os.path.abspath(__file__)), 'network_training') tr = recursive_find_python_class([search_in], name, current_module="training.network_training") except ImportError: pass if tr is None: raise RuntimeError("Could not find the model trainer specified in checkpoint in nnunet.trainig.network_training. If it " "is not located there, please move it or change the code of restore_model. Your model " "trainer can be located in any directory within nnunet.trainig.network_training (search is recursive)." "\nDebug info: \ncheckpoint file: %s\nName of trainer: %s " % (checkpoint, name)) assert issubclass(tr, nnUNetTrainer), "The network trainer was found but is not a subclass of nnUNetTrainer. " \ "Please make it so!" trainer = tr(*init) if fp16 is not None: trainer.fp16 = fp16 trainer.process_plans(info['plans']) if checkpoint is not None: trainer.load_checkpoint(checkpoint, train) return trainer def load_best_model_for_inference(folder): checkpoint = join(folder, "model_best.model") pkl_file = checkpoint + ".pkl" return restore_model(pkl_file, checkpoint, False) def load_model_and_checkpoint_files(folder, folds=None, mixed_precision=None, checkpoint_name="model_best"): """ used for if you need to ensemble the five models of a cross-validation. This will restore the model from the checkpoint in fold 0, load all parameters of the five folds in ram and return both. This will allow for fast switching between parameters (as opposed to loading them form disk each time). This is best used for inference and test prediction :param folder: :param folds: :param mixed_precision: if None then we take no action. If True/False we overwrite what the model has in its init :return: """ if isinstance(folds, str): folds = [join(folder, "all")] assert isdir(folds[0]), "no output folder for fold %s found" % folds elif isinstance(folds, (list, tuple)): if len(folds) == 1 and folds[0] == "all": folds = [join(folder, "all")] else: folds = [join(folder, "fold_%d" % i) for i in folds] assert all([isdir(i) for i in folds]), "list of folds specified but not all output folders are present" elif isinstance(folds, int): folds = [join(folder, "fold_%d" % folds)] assert all([isdir(i) for i in folds]), "output folder missing for fold %d" % folds elif folds is None: print("folds is None so we will automatically look for output folders (not using \'all\'!)") folds = subfolders(folder, prefix="fold") print("found the following folds: ", folds) else: raise ValueError("Unknown value for folds. Type: %s. Expected: list of int, int, str or None", str(type(folds))) trainer = restore_model(join(folds[0], "%s.model.pkl" % checkpoint_name), fp16=mixed_precision) trainer.output_folder = folder trainer.output_folder_base = folder trainer.update_fold(0) trainer.initialize(False) all_best_model_files = [join(i, "%s.model" % checkpoint_name) for i in folds] print("using the following model files: ", all_best_model_files) all_params = [torch.load(i, map_location=torch.device('cpu')) for i in all_best_model_files] return trainer, all_params
4,979
43.070796
130
py
CoTr
CoTr-main/CoTr_package/CoTr/training/network_training/nnUNetTrainerV2_ResTrans.py
from collections import OrderedDict from typing import Tuple import numpy as np import torch import shutil from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2 from nnunet.utilities.to_torch import maybe_to_torch, to_cuda from nnunet.training.data_augmentation.default_data_augmentation import get_moreDA_augmentation from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from CoTr.network_architecture.neural_network import SegmentationNetwork from nnunet.training.data_augmentation.default_data_augmentation import default_2D_augmentation_params, \ get_patch_size, default_3D_augmentation_params from nnunet.training.dataloading.dataset_loading import unpack_dataset from CoTr.training.network_training.nnUNetTrainer import nnUNetTrainer from nnunet.utilities.nd_softmax import softmax_helper from sklearn.model_selection import KFold from torch import nn from torch.cuda.amp import autocast from nnunet.training.learning_rate.poly_lr import poly_lr from batchgenerators.utilities.file_and_folder_operations import * from nnunet.network_architecture.generic_modular_UNet import get_default_network_config from CoTr.network_architecture.ResTranUnet import ResTranUnet class nnUNetTrainerV2_ResTrans(nnUNetTrainer): def __init__(self, plans_file, fold, norm_cfg, activation_cfg, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) self.max_num_epochs = 1000 self.initial_lr = 1e-2 self.deep_supervision_scales = None self.ds_loss_weights = None self.norm_cfg = norm_cfg self.activation_cfg = activation_cfg self.pin_memory = True self.save_best_checkpoint = True def initialize(self, training=True, force_load_plans=False): """ - replaced get_default_augmentation with get_moreDA_augmentation - enforce to only run this code once - loss function wrapper for deep supervision :param training: :param force_load_plans: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() print("Patch size is %s" % self.plans['plans_per_stage'][1]['patch_size']) if self.norm_cfg=='BN': self.plans['plans_per_stage'][1]['batch_size'] = 8 # self.plans['plans_per_stage'][1]['batch_size'] = 1 #Debug print("Batch size is %s" % self.plans['plans_per_stage'][1]['batch_size']) self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights # now wrap the loss self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) ################# END ################### self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) print("done") else: print( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") self.tr_gen, self.val_gen = get_moreDA_augmentation( self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=self.deep_supervision_scales, pin_memory=self.pin_memory ) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def initialize_network(self): """ - momentum 0.99 - SGD instead of Adam - self.lr_scheduler = None because we do poly_lr - deep supervision = True - i am sure I forgot something here Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though :return: """ self.network = ResTranUnet(norm_cfg=self.norm_cfg, activation_cfg=self.activation_cfg, img_size=self.plans['plans_per_stage'][1]['patch_size'], num_classes=self.num_classes, weight_std=False, deep_supervision=True).cuda() total = sum([param.nelement() for param in self.network.parameters()]) print(' + Number of Network Params: %.2f(e6)' % (total / 1e6)) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper def initialize_optimizer_and_scheduler(self): assert self.network is not None, "self.initialize_network must be called first" self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, momentum=0.99, nesterov=True) self.lr_scheduler = None def run_online_evaluation(self, output, target): """ due to deep supervision the return value and the reference are now lists of tensors. We only need the full resolution output because this is what we are interested in in the end. The others are ignored :param output: :param target: :return: """ target = target[0] output = output[0] return super().run_online_evaluation(output, target) def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs: dict = None): """ We need to wrap this because we need to enforce self.network.do_ds = False for prediction """ ds = self.network.do_ds self.network.do_ds = False ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size, save_softmax=save_softmax, use_gaussian=use_gaussian, overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug, all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs) self.network.do_ds = ds return ret def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True, mirror_axes: Tuple[int] = None, use_sliding_window: bool = True, step_size: float = 0.5, use_gaussian: bool = True, pad_border_mode: str = 'constant', pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]: """ We need to wrap this because we need to enforce self.network.do_ds = False for prediction """ ds = self.network.do_ds self.network.do_ds = False ret = super().predict_preprocessed_data_return_seg_and_softmax(data, do_mirroring=do_mirroring, mirror_axes=mirror_axes, use_sliding_window=use_sliding_window, step_size=step_size, use_gaussian=use_gaussian, pad_border_mode=pad_border_mode, pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose, mixed_precision=mixed_precision) self.network.do_ds = ds return ret def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): """ gradient clipping improves training stability :param data_generator: :param do_backprop: :param run_online_evaluation: :return: """ data_dict = next(data_generator) data = data_dict['data'] target = data_dict['target'] data = maybe_to_torch(data) target = maybe_to_torch(target) if torch.cuda.is_available(): data = to_cuda(data) target = to_cuda(target) self.optimizer.zero_grad() if self.fp16: with autocast(): output = self.network(data) del data l = self.loss(output, target) if do_backprop: self.amp_grad_scaler.scale(l).backward() self.amp_grad_scaler.unscale_(self.optimizer) torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.amp_grad_scaler.step(self.optimizer) self.amp_grad_scaler.update() else: output = self.network(data) del data l = self.loss(output, target) if do_backprop: l.backward() torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.optimizer.step() if run_online_evaluation: self.run_online_evaluation(output, target) del target return l.detach().cpu().numpy() def do_split(self): """ Create a split and save it as splits_final.pkl file in the preprocessed data directory. """ # if the split file does not exist we need to create it if not isfile(join(self.dataset_directory, "splits_final.pkl")): shutil.copy('../../../data/splits_final.pkl', self.dataset_directory) splits_file = join(self.dataset_directory, "splits_final.pkl") splits = load_pickle(splits_file) tr_keys = splits[0]['train'] val_keys = splits[0]['val'] tr_keys.sort() val_keys.sort() print("Current train-val split is ...") print('Training set is %s' % tr_keys) print('Validation set is %s \n' % val_keys) self.dataset_tr = OrderedDict() for i in tr_keys: self.dataset_tr[i] = self.dataset[i] self.dataset_val = OrderedDict() for i in val_keys: self.dataset_val[i] = self.dataset[i] def setup_DA_params(self): """ - we increase roation angle from [-15, 15] to [-30, 30] - scale range is now (0.7, 1.4), was (0.85, 1.25) - we don't do elastic deformation anymore :return: """ self.downsampe_scales = [[1, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]] self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod( np.vstack(self.downsampe_scales), axis=0))[:-1] if self.threeD: self.data_aug_params = default_3D_augmentation_params self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) if self.do_dummy_2D_aug: self.data_aug_params["dummy_2D"] = True self.print_to_log_file("Using dummy2d data augmentation") self.data_aug_params["elastic_deform_alpha"] = \ default_2D_augmentation_params["elastic_deform_alpha"] self.data_aug_params["elastic_deform_sigma"] = \ default_2D_augmentation_params["elastic_deform_sigma"] self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"] else: self.do_dummy_2D_aug = False if max(self.patch_size) / min(self.patch_size) > 1.5: default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi) self.data_aug_params = default_2D_augmentation_params self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size)) patch_size_for_spatialtransform = self.patch_size[1:] else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) patch_size_for_spatialtransform = self.patch_size self.data_aug_params["scale_range"] = (0.7, 1.4) self.data_aug_params["do_elastic"] = False self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform self.data_aug_params["num_cached_per_thread"] = 2 def maybe_update_lr(self, epoch=None): """ if epoch is not None we overwrite epoch. Else we use epoch = self.epoch + 1 (maybe_update_lr is called in on_epoch_end which is called before epoch is incremented. herefore we need to do +1 here) :param epoch: :return: """ if epoch is None: ep = self.epoch + 1 else: ep = epoch self.optimizer.param_groups[0]['lr'] = poly_lr(ep, self.max_num_epochs, self.initial_lr, 0.9) self.print_to_log_file("lr:", np.round(self.optimizer.param_groups[0]['lr'], decimals=6)) def on_epoch_end(self): """ overwrite patient-based early stopping. Always run to 1000 epochs :return: """ super().on_epoch_end() continue_training = self.epoch < self.max_num_epochs # it can rarely happen that the momentum of nnUNetTrainerV2 is too high for some dataset. If at epoch 100 the # estimated validation Dice is still 0 then we reduce the momentum from 0.99 to 0.95 if self.epoch == 100: if self.all_val_eval_metrics[-1] == 0: self.optimizer.param_groups[0]["momentum"] = 0.95 self.network.apply(InitWeights_He(1e-2)) self.print_to_log_file("At epoch 100, the mean foreground Dice was 0. This can be caused by a too " "high momentum. High momentum (0.99) is good for datasets where it works, but " "sometimes causes issues such as this one. Momentum has now been reduced to " "0.95 and network weights have been reinitialized") return continue_training def run_training(self): """ if we run with -c then we need to set the correct lr for the first epoch, otherwise it will run the first continued epoch with self.initial_lr we also need to make sure deep supervision in the network is enabled for training, thus the wrapper :return: """ self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we # want at the start of the training ds = self.network.do_ds self.network.do_ds = True ret = super().run_training() self.network.do_ds = ds return ret
18,610
46.843188
151
py
CoTr
CoTr-main/CoTr_package/CoTr/training/network_training/network_trainer.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from _warnings import warn from typing import Tuple import matplotlib from batchgenerators.utilities.file_and_folder_operations import * from CoTr.network_architecture.neural_network import SegmentationNetwork from sklearn.model_selection import KFold from torch import nn from torch.cuda.amp import GradScaler, autocast from torch.optim.lr_scheduler import _LRScheduler matplotlib.use("agg") from time import time, sleep import torch import numpy as np from torch.optim import lr_scheduler import matplotlib.pyplot as plt import sys from collections import OrderedDict import torch.backends.cudnn as cudnn from abc import abstractmethod from datetime import datetime from tqdm import trange from nnunet.utilities.to_torch import maybe_to_torch, to_cuda class NetworkTrainer(object): def __init__(self, deterministic=True, fp16=False): """ A generic class that can train almost any neural network (RNNs excluded). It provides basic functionality such as the training loop, tracking of training and validation losses (and the target metric if you implement it) Training can be terminated early if the validation loss (or the target metric if implemented) do not improve anymore. This is based on a moving average (MA) of the loss/metric instead of the raw values to get more smooth results. What you need to override: - __init__ - initialize - run_online_evaluation (optional) - finish_online_evaluation (optional) - validate - predict_test_case """ self.fp16 = fp16 self.amp_grad_scaler = None if deterministic: np.random.seed(12345) torch.manual_seed(12345) if torch.cuda.is_available(): torch.cuda.manual_seed_all(12345) cudnn.deterministic = True torch.backends.cudnn.benchmark = False else: cudnn.deterministic = False torch.backends.cudnn.benchmark = True ################# SET THESE IN self.initialize() ################################### self.network: Tuple[SegmentationNetwork, nn.DataParallel] = None self.optimizer = None self.lr_scheduler = None self.tr_gen = self.val_gen = None self.was_initialized = False ################# SET THESE IN INIT ################################################ self.output_folder = None self.fold = None self.loss = None self.dataset_directory = None ################# SET THESE IN LOAD_DATASET OR DO_SPLIT ############################ self.dataset = None # these can be None for inference mode self.dataset_tr = self.dataset_val = None # do not need to be used, they just appear if you are using the suggested load_dataset_and_do_split ################# THESE DO NOT NECESSARILY NEED TO BE MODIFIED ##################### self.patience = 50 self.val_eval_criterion_alpha = 0.9 # alpha * old + (1-alpha) * new # if this is too low then the moving average will be too noisy and the training may terminate early. If it is # too high the training will take forever self.train_loss_MA_alpha = 0.93 # alpha * old + (1-alpha) * new self.train_loss_MA_eps = 5e-4 # new MA must be at least this much better (smaller) self.max_num_epochs = 1000 self.num_batches_per_epoch = 250 self.num_val_batches_per_epoch = 50 self.also_val_in_tr_mode = False self.lr_threshold = 1e-6 # the network will not terminate training if the lr is still above this threshold ################# LEAVE THESE ALONE ################################################ self.val_eval_criterion_MA = None self.train_loss_MA = None self.best_val_eval_criterion_MA = None self.best_MA_tr_loss_for_patience = None self.best_epoch_based_on_MA_tr_loss = None self.all_tr_losses = [] self.all_val_losses = [] self.all_val_losses_tr_mode = [] self.all_val_eval_metrics = [] # does not have to be used self.epoch = 0 self.log_file = None self.deterministic = deterministic self.use_progress_bar = True if 'nnunet_use_progress_bar' in os.environ.keys(): self.use_progress_bar = bool(int(os.environ['nnunet_use_progress_bar'])) ################# Settings for saving checkpoints ################################## self.save_every = 50 self.save_latest_only = True # if false it will not store/overwrite _latest but separate files each # time an intermediate checkpoint is created self.save_intermediate_checkpoints = True # whether or not to save checkpoint_latest self.save_best_checkpoint = True # whether or not to save the best checkpoint according to self.best_val_eval_criterion_MA self.save_final_checkpoint = True # whether or not to save the final checkpoint @abstractmethod def initialize(self, training=True): """ create self.output_folder modify self.output_folder if you are doing cross-validation (one folder per fold) set self.tr_gen and self.val_gen call self.initialize_network and self.initialize_optimizer_and_scheduler (important!) finally set self.was_initialized to True :param training: :return: """ @abstractmethod def load_dataset(self): pass def do_split(self): """ This is a suggestion for if your dataset is a dictionary (my personal standard) :return: """ splits_file = join(self.dataset_directory, "splits_final.pkl") if not isfile(splits_file): self.print_to_log_file("Creating new split...") splits = [] all_keys_sorted = np.sort(list(self.dataset.keys())) kfold = KFold(n_splits=5, shuffle=True, random_state=12345) for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)): train_keys = np.array(all_keys_sorted)[train_idx] test_keys = np.array(all_keys_sorted)[test_idx] splits.append(OrderedDict()) splits[-1]['train'] = train_keys splits[-1]['val'] = test_keys save_pickle(splits, splits_file) splits = load_pickle(splits_file) if self.fold == "all": tr_keys = val_keys = list(self.dataset.keys()) else: tr_keys = splits[self.fold]['train'] val_keys = splits[self.fold]['val'] tr_keys.sort() val_keys.sort() self.dataset_tr = OrderedDict() for i in tr_keys: self.dataset_tr[i] = self.dataset[i] self.dataset_val = OrderedDict() for i in val_keys: self.dataset_val[i] = self.dataset[i] def plot_progress(self): """ Should probably by improved :return: """ try: font = {'weight': 'normal', 'size': 18} matplotlib.rc('font', **font) fig = plt.figure(figsize=(30, 24)) ax = fig.add_subplot(111) ax2 = ax.twinx() x_values = list(range(self.epoch + 1)) ax.plot(x_values, self.all_tr_losses, color='b', ls='-', label="loss_tr") ax.plot(x_values, self.all_val_losses, color='r', ls='-', label="loss_val, train=False") if len(self.all_val_losses_tr_mode) > 0: ax.plot(x_values, self.all_val_losses_tr_mode, color='g', ls='-', label="loss_val, train=True") if len(self.all_val_eval_metrics) == len(x_values): ax2.plot(x_values, self.all_val_eval_metrics, color='g', ls='--', label="evaluation metric") ax.set_xlabel("epoch") ax.set_ylabel("loss") ax2.set_ylabel("evaluation metric") ax.legend() ax2.legend(loc=9) fig.savefig(join(self.output_folder, "progress.png")) plt.close() except IOError: self.print_to_log_file("failed to plot: ", sys.exc_info()) def print_to_log_file(self, *args, also_print_to_console=True, add_timestamp=True): timestamp = time() dt_object = datetime.fromtimestamp(timestamp) if add_timestamp: args = ("%s:" % dt_object, *args) if self.log_file is None: maybe_mkdir_p(self.output_folder) timestamp = datetime.now() self.log_file = join(self.output_folder, "training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt" % (timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute, timestamp.second)) with open(self.log_file, 'w') as f: f.write("Starting... \n") successful = False max_attempts = 5 ctr = 0 while not successful and ctr < max_attempts: try: with open(self.log_file, 'a+') as f: for a in args: f.write(str(a)) f.write(" ") f.write("\n") successful = True except IOError: print("%s: failed to log: " % datetime.fromtimestamp(timestamp), sys.exc_info()) sleep(0.5) ctr += 1 if also_print_to_console: print(*args) def save_checkpoint(self, fname, save_optimizer=True): start_time = time() state_dict = self.network.state_dict() for key in state_dict.keys(): state_dict[key] = state_dict[key].cpu() lr_sched_state_dct = None if self.lr_scheduler is not None and hasattr(self.lr_scheduler, 'state_dict'): # not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau): lr_sched_state_dct = self.lr_scheduler.state_dict() # WTF is this!? # for key in lr_sched_state_dct.keys(): # lr_sched_state_dct[key] = lr_sched_state_dct[key] if save_optimizer: optimizer_state_dict = self.optimizer.state_dict() else: optimizer_state_dict = None self.print_to_log_file("saving checkpoint...") save_this = { 'epoch': self.epoch + 1, 'state_dict': state_dict, 'optimizer_state_dict': optimizer_state_dict, 'lr_scheduler_state_dict': lr_sched_state_dct, 'plot_stuff': (self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode, self.all_val_eval_metrics), 'best_stuff' : (self.best_epoch_based_on_MA_tr_loss, self.best_MA_tr_loss_for_patience, self.best_val_eval_criterion_MA)} if self.amp_grad_scaler is not None: save_this['amp_grad_scaler'] = self.amp_grad_scaler.state_dict() torch.save(save_this, fname) self.print_to_log_file("done, saving took %.2f seconds" % (time() - start_time)) def load_best_checkpoint(self, train=True): if self.fold is None: raise RuntimeError("Cannot load best checkpoint if self.fold is None") if isfile(join(self.output_folder, "model_best.model")): self.load_checkpoint(join(self.output_folder, "model_best.model"), train=train) else: self.print_to_log_file("WARNING! model_best.model does not exist! Cannot load best checkpoint. Falling " "back to load_latest_checkpoint") self.load_latest_checkpoint(train) def load_latest_checkpoint(self, train=True): if isfile(join(self.output_folder, "model_final_checkpoint.model")): return self.load_checkpoint(join(self.output_folder, "model_final_checkpoint.model"), train=train) if isfile(join(self.output_folder, "model_latest.model")): return self.load_checkpoint(join(self.output_folder, "model_latest.model"), train=train) if isfile(join(self.output_folder, "model_best.model")): return self.load_best_checkpoint(train) raise RuntimeError("No checkpoint found") def load_checkpoint(self, fname, train=True): self.print_to_log_file("loading checkpoint", fname, "train=", train) if not self.was_initialized: self.initialize(train) # saved_model = torch.load(fname, map_location=torch.device('cuda', torch.cuda.current_device())) saved_model = torch.load(fname, map_location=torch.device('cpu')) self.load_checkpoint_ram(saved_model, train) @abstractmethod def initialize_network(self): """ initialize self.network here :return: """ pass @abstractmethod def initialize_optimizer_and_scheduler(self): """ initialize self.optimizer and self.lr_scheduler (if applicable) here :return: """ pass def load_checkpoint_ram(self, checkpoint, train=True): """ used for if the checkpoint is already in ram :param checkpoint: :param train: :return: """ if not self.was_initialized: self.initialize(train) new_state_dict = OrderedDict() curr_state_dict_keys = list(self.network.state_dict().keys()) # if state dict comes form nn.DataParallel but we use non-parallel model here then the state dict keys do not # match. Use heuristic to make it match for k, value in checkpoint['state_dict'].items(): key = k if key not in curr_state_dict_keys and key.startswith('module.'): key = key[7:] new_state_dict[key] = value if self.fp16: self._maybe_init_amp() if 'amp_grad_scaler' in checkpoint.keys(): self.amp_grad_scaler.load_state_dict(checkpoint['amp_grad_scaler']) self.network.load_state_dict(new_state_dict) self.epoch = checkpoint['epoch'] if train: optimizer_state_dict = checkpoint['optimizer_state_dict'] if optimizer_state_dict is not None: self.optimizer.load_state_dict(optimizer_state_dict) if self.lr_scheduler is not None and hasattr(self.lr_scheduler, 'load_state_dict') and checkpoint[ 'lr_scheduler_state_dict'] is not None: self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict']) if issubclass(self.lr_scheduler.__class__, _LRScheduler): self.lr_scheduler.step(self.epoch) self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode, self.all_val_eval_metrics = checkpoint[ 'plot_stuff'] # load best loss (if present) if 'best_stuff' in checkpoint.keys(): self.best_epoch_based_on_MA_tr_loss, self.best_MA_tr_loss_for_patience, self.best_val_eval_criterion_MA = checkpoint[ 'best_stuff'] # after the training is done, the epoch is incremented one more time in my old code. This results in # self.epoch = 1001 for old trained models when the epoch is actually 1000. This causes issues because # len(self.all_tr_losses) = 1000 and the plot function will fail. We can easily detect and correct that here if self.epoch != len(self.all_tr_losses): self.print_to_log_file("WARNING in loading checkpoint: self.epoch != len(self.all_tr_losses). This is " "due to an old bug and should only appear when you are loading old models. New " "models should have this fixed! self.epoch is now set to len(self.all_tr_losses)") self.epoch = len(self.all_tr_losses) self.all_tr_losses = self.all_tr_losses[:self.epoch] self.all_val_losses = self.all_val_losses[:self.epoch] self.all_val_losses_tr_mode = self.all_val_losses_tr_mode[:self.epoch] self.all_val_eval_metrics = self.all_val_eval_metrics[:self.epoch] self._maybe_init_amp() def _maybe_init_amp(self): if self.fp16 and self.amp_grad_scaler is None and torch.cuda.is_available(): self.amp_grad_scaler = GradScaler() def plot_network_architecture(self): """ can be implemented (see nnUNetTrainer) but does not have to. Not implemented here because it imposes stronger assumptions on the presence of class variables :return: """ pass def run_training(self): _ = self.tr_gen.next() _ = self.val_gen.next() if torch.cuda.is_available(): torch.cuda.empty_cache() self._maybe_init_amp() maybe_mkdir_p(self.output_folder) self.plot_network_architecture() if cudnn.benchmark and cudnn.deterministic: warn("torch.backends.cudnn.deterministic is True indicating a deterministic training is desired. " "But torch.backends.cudnn.benchmark is True as well and this will prevent deterministic training! " "If you want deterministic then set benchmark=False") if not self.was_initialized: self.initialize(True) while self.epoch < self.max_num_epochs: self.print_to_log_file("\nepoch: ", self.epoch) epoch_start_time = time() train_losses_epoch = [] # train one epoch self.network.train() if self.use_progress_bar: with trange(self.num_batches_per_epoch) as tbar: for b in tbar: tbar.set_description("Epoch {}/{}".format(self.epoch+1, self.max_num_epochs)) l = self.run_iteration(self.tr_gen, True) tbar.set_postfix(loss=l) train_losses_epoch.append(l) else: for _ in range(self.num_batches_per_epoch): l = self.run_iteration(self.tr_gen, True) train_losses_epoch.append(l) self.all_tr_losses.append(np.mean(train_losses_epoch)) self.print_to_log_file("train loss : %.4f" % self.all_tr_losses[-1]) with torch.no_grad(): # validation with train=False self.network.eval() val_losses = [] for b in range(self.num_val_batches_per_epoch): l = self.run_iteration(self.val_gen, False, True) val_losses.append(l) self.all_val_losses.append(np.mean(val_losses)) self.print_to_log_file("validation loss: %.4f" % self.all_val_losses[-1]) if self.also_val_in_tr_mode: self.network.train() # validation with train=True val_losses = [] for b in range(self.num_val_batches_per_epoch): l = self.run_iteration(self.val_gen, False) val_losses.append(l) self.all_val_losses_tr_mode.append(np.mean(val_losses)) self.print_to_log_file("validation loss (train=True): %.4f" % self.all_val_losses_tr_mode[-1]) self.update_train_loss_MA() # needed for lr scheduler and stopping of training continue_training = self.on_epoch_end() epoch_end_time = time() if not continue_training: # allows for early stopping break self.epoch += 1 self.print_to_log_file("This epoch took %f s\n" % (epoch_end_time - epoch_start_time)) self.epoch -= 1 # if we don't do this we can get a problem with loading model_final_checkpoint. if self.save_final_checkpoint: self.save_checkpoint(join(self.output_folder, "model_final_checkpoint.model")) # now we can delete latest as it will be identical with final if isfile(join(self.output_folder, "model_latest.model")): os.remove(join(self.output_folder, "model_latest.model")) if isfile(join(self.output_folder, "model_latest.model.pkl")): os.remove(join(self.output_folder, "model_latest.model.pkl")) def maybe_update_lr(self): # maybe update learning rate if self.lr_scheduler is not None: assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler)) if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau): # lr scheduler is updated with moving average val loss. should be more robust self.lr_scheduler.step(self.train_loss_MA) else: self.lr_scheduler.step(self.epoch + 1) self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr'])) def maybe_save_checkpoint(self): """ Saves a checkpoint every save_ever epochs. :return: """ if self.save_intermediate_checkpoints and (self.epoch % self.save_every == (self.save_every - 1)): self.print_to_log_file("saving scheduled checkpoint file...") if not self.save_latest_only: self.save_checkpoint(join(self.output_folder, "model_ep_%03.0d.model" % (self.epoch + 1))) self.save_checkpoint(join(self.output_folder, "model_latest.model")) self.print_to_log_file("done") def update_eval_criterion_MA(self): """ If self.all_val_eval_metrics is unused (len=0) then we fall back to using -self.all_val_losses for the MA to determine early stopping (not a minimization, but a maximization of a metric and therefore the - in the latter case) :return: """ if self.val_eval_criterion_MA is None: if len(self.all_val_eval_metrics) == 0: self.val_eval_criterion_MA = - self.all_val_losses[-1] else: self.val_eval_criterion_MA = self.all_val_eval_metrics[-1] else: if len(self.all_val_eval_metrics) == 0: """ We here use alpha * old - (1 - alpha) * new because new in this case is the vlaidation loss and lower is better, so we need to negate it. """ self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA - ( 1 - self.val_eval_criterion_alpha) * \ self.all_val_losses[-1] else: self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA + ( 1 - self.val_eval_criterion_alpha) * \ self.all_val_eval_metrics[-1] def manage_patience(self): # update patience continue_training = True if self.patience is not None: # if best_MA_tr_loss_for_patience and best_epoch_based_on_MA_tr_loss were not yet initialized, # initialize them if self.best_MA_tr_loss_for_patience is None: self.best_MA_tr_loss_for_patience = self.train_loss_MA if self.best_epoch_based_on_MA_tr_loss is None: self.best_epoch_based_on_MA_tr_loss = self.epoch if self.best_val_eval_criterion_MA is None: self.best_val_eval_criterion_MA = self.val_eval_criterion_MA # check if the current epoch is the best one according to moving average of validation criterion. If so # then save 'best' model # Do not use this for validation. This is intended for test set prediction only. #self.print_to_log_file("current best_val_eval_criterion_MA is %.4f0" % self.best_val_eval_criterion_MA) #self.print_to_log_file("current val_eval_criterion_MA is %.4f" % self.val_eval_criterion_MA) if self.val_eval_criterion_MA > self.best_val_eval_criterion_MA: self.best_val_eval_criterion_MA = self.val_eval_criterion_MA #self.print_to_log_file("saving best epoch checkpoint...") if self.save_best_checkpoint: self.save_checkpoint(join(self.output_folder, "model_best.model")) # Now see if the moving average of the train loss has improved. If yes then reset patience, else # increase patience if self.train_loss_MA + self.train_loss_MA_eps < self.best_MA_tr_loss_for_patience: self.best_MA_tr_loss_for_patience = self.train_loss_MA self.best_epoch_based_on_MA_tr_loss = self.epoch #self.print_to_log_file("New best epoch (train loss MA): %03.4f" % self.best_MA_tr_loss_for_patience) else: pass #self.print_to_log_file("No improvement: current train MA %03.4f, best: %03.4f, eps is %03.4f" % # (self.train_loss_MA, self.best_MA_tr_loss_for_patience, self.train_loss_MA_eps)) # if patience has reached its maximum then finish training (provided lr is low enough) if self.epoch - self.best_epoch_based_on_MA_tr_loss > self.patience: if self.optimizer.param_groups[0]['lr'] > self.lr_threshold: #self.print_to_log_file("My patience ended, but I believe I need more time (lr > 1e-6)") self.best_epoch_based_on_MA_tr_loss = self.epoch - self.patience // 2 else: #self.print_to_log_file("My patience ended") continue_training = False else: pass #self.print_to_log_file( # "Patience: %d/%d" % (self.epoch - self.best_epoch_based_on_MA_tr_loss, self.patience)) return continue_training def on_epoch_end(self): self.finish_online_evaluation() # does not have to do anything, but can be used to update self.all_val_eval_ # metrics self.plot_progress() self.maybe_update_lr() self.maybe_save_checkpoint() self.update_eval_criterion_MA() continue_training = self.manage_patience() return continue_training def update_train_loss_MA(self): if self.train_loss_MA is None: self.train_loss_MA = self.all_tr_losses[-1] else: self.train_loss_MA = self.train_loss_MA_alpha * self.train_loss_MA + (1 - self.train_loss_MA_alpha) * \ self.all_tr_losses[-1] def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): data_dict = next(data_generator) data = data_dict['data'] target = data_dict['target'] data = maybe_to_torch(data) target = maybe_to_torch(target) if torch.cuda.is_available(): data = to_cuda(data) target = to_cuda(target) self.optimizer.zero_grad() if self.fp16: with autocast(): output = self.network(data) del data l = self.loss(output, target) if do_backprop: self.amp_grad_scaler.scale(l).backward() self.amp_grad_scaler.step(self.optimizer) self.amp_grad_scaler.update() else: output = self.network(data) del data l = self.loss(output, target) if do_backprop: l.backward() self.optimizer.step() if run_online_evaluation: self.run_online_evaluation(output, target) del target return l.detach().cpu().numpy() def run_online_evaluation(self, *args, **kwargs): """ Can be implemented, does not have to :param output_torch: :param target_npy: :return: """ pass def finish_online_evaluation(self): """ Can be implemented, does not have to :return: """ pass @abstractmethod def validate(self, *args, **kwargs): pass def find_lr(self, num_iters=1000, init_value=1e-6, final_value=10., beta=0.98): """ stolen and adapted from here: https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html :param num_iters: :param init_value: :param final_value: :param beta: :return: """ import math self._maybe_init_amp() mult = (final_value / init_value) ** (1 / num_iters) lr = init_value self.optimizer.param_groups[0]['lr'] = lr avg_loss = 0. best_loss = 0. losses = [] log_lrs = [] for batch_num in range(1, num_iters + 1): # +1 because this one here is not designed to have negative loss... loss = self.run_iteration(self.tr_gen, do_backprop=True, run_online_evaluation=False).data.item() + 1 # Compute the smoothed loss avg_loss = beta * avg_loss + (1 - beta) * loss smoothed_loss = avg_loss / (1 - beta ** batch_num) # Stop if the loss is exploding if batch_num > 1 and smoothed_loss > 4 * best_loss: break # Record the best loss if smoothed_loss < best_loss or batch_num == 1: best_loss = smoothed_loss # Store the values losses.append(smoothed_loss) log_lrs.append(math.log10(lr)) # Update the lr for the next step lr *= mult self.optimizer.param_groups[0]['lr'] = lr import matplotlib.pyplot as plt lrs = [10 ** i for i in log_lrs] fig = plt.figure() plt.xscale('log') plt.plot(lrs[10:-5], losses[10:-5]) plt.savefig(join(self.output_folder, "lr_finder.png")) plt.close() return log_lrs, losses
30,846
41.372253
150
py
CoTr
CoTr-main/CoTr_package/CoTr/training/network_training/nnUNetTrainer.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil from collections import OrderedDict from multiprocessing import Pool from time import sleep from typing import Tuple, List import matplotlib import nnunet import numpy as np import torch from batchgenerators.utilities.file_and_folder_operations import * from nnunet.configuration import default_num_threads from nnunet.evaluation.evaluator import aggregate_scores from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax from nnunet.network_architecture.generic_UNet import Generic_UNet from nnunet.network_architecture.initialization import InitWeights_He from CoTr.network_architecture.neural_network import SegmentationNetwork from nnunet.postprocessing.connected_components import determine_postprocessing from nnunet.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \ default_2D_augmentation_params, get_default_augmentation, get_patch_size from nnunet.training.dataloading.dataset_loading import load_dataset, DataLoader3D, DataLoader2D, unpack_dataset from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss from CoTr.training.network_training.network_trainer import NetworkTrainer from nnunet.utilities.nd_softmax import softmax_helper from nnunet.utilities.tensor_utilities import sum_tensor from torch import nn from torch.optim import lr_scheduler matplotlib.use("agg") class nnUNetTrainer(NetworkTrainer): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False): """ :param deterministic: :param fold: can be either [0 ... 5) for cross-validation, 'all' to train on all available training data or None if you wish to load some checkpoint and do inference only :param plans_file: the pkl file generated by preprocessing. This file will determine all design choices :param subfolder_with_preprocessed_data: must be a subfolder of dataset_directory (just the name of the folder, not the entire path). This is where the preprocessed data lies that will be used for network training. We made this explicitly available so that differently preprocessed data can coexist and the user can choose what to use. Can be None if you are doing inference only. :param output_folder: where to store parameters, plot progress and to the validation :param dataset_directory: the parent directory in which the preprocessed Task data is stored. This is required because the split information is stored in this directory. For running prediction only this input is not required and may be set to None :param batch_dice: compute dice loss for each sample and average over all samples in the batch or pretend the batch is a pseudo volume? :param stage: The plans file may contain several stages (used for lowres / highres / pyramid). Stage must be specified for training: if stage 1 exists then stage 1 is the high resolution stage, otherwise it's 0 :param unpack_data: if False, npz preprocessed data will not be unpacked to npy. This consumes less space but is considerably slower! Running unpack_data=False with 2d should never be done! IMPORTANT: If you inherit from nnUNetTrainer and the init args change then you need to redefine self.init_args in your init accordingly. Otherwise checkpoints won't load properly! """ super(nnUNetTrainer, self).__init__(deterministic, fp16) self.unpack_data = unpack_data self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) # set through arguments from init self.stage = stage self.experiment_name = self.__class__.__name__ self.plans_file = plans_file self.output_folder = output_folder self.dataset_directory = dataset_directory self.output_folder_base = self.output_folder self.fold = fold self.plans = None # if we are running inference only then the self.dataset_directory is set (due to checkpoint loading) but it # irrelevant if self.dataset_directory is not None and isdir(self.dataset_directory): self.gt_niftis_folder = join(self.dataset_directory, "gt_segmentations") else: self.gt_niftis_folder = None self.folder_with_preprocessed_data = None # set in self.initialize() self.dl_tr = self.dl_val = None self.num_input_channels = self.num_classes = self.net_pool_per_axis = self.patch_size = self.batch_size = \ self.threeD = self.base_num_features = self.intensity_properties = self.normalization_schemes = \ self.net_num_pool_op_kernel_sizes = self.net_conv_kernel_sizes = None # loaded automatically from plans_file self.basic_generator_patch_size = self.data_aug_params = self.transpose_forward = self.transpose_backward = None self.batch_dice = batch_dice self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}) self.online_eval_foreground_dc = [] self.online_eval_tp = [] self.online_eval_fp = [] self.online_eval_fn = [] self.classes = self.do_dummy_2D_aug = self.use_mask_for_norm = self.only_keep_largest_connected_component = \ self.min_region_size_per_class = self.min_size_per_class = None self.inference_pad_border_mode = "constant" self.inference_pad_kwargs = {'constant_values': 0} self.update_fold(fold) self.pad_all_sides = None self.lr_scheduler_eps = 1e-3 self.lr_scheduler_patience = 30 self.initial_lr = 3e-4 self.weight_decay = 3e-5 self.oversample_foreground_percent = 0.33 self.conv_per_stage = None self.regions_class_order = None def update_fold(self, fold): """ used to swap between folds for inference (ensemble of models from cross-validation) DO NOT USE DURING TRAINING AS THIS WILL NOT UPDATE THE DATASET SPLIT AND THE DATA AUGMENTATION GENERATORS :param fold: :return: """ if fold is not None: if isinstance(fold, str): assert fold == "all" or "xf", "if self.fold is a string then it must be \'all\'" if self.output_folder.endswith("%s" % str(self.fold)): self.output_folder = self.output_folder_base self.output_folder = join(self.output_folder, "%s" % str(fold)) else: if self.output_folder.endswith("fold_%s" % str(self.fold)): self.output_folder = self.output_folder_base self.output_folder = join(self.output_folder, "fold_%s" % str(fold)) self.fold = fold def setup_DA_params(self): if self.threeD: self.data_aug_params = default_3D_augmentation_params if self.do_dummy_2D_aug: self.data_aug_params["dummy_2D"] = True self.print_to_log_file("Using dummy2d data augmentation") self.data_aug_params["elastic_deform_alpha"] = \ default_2D_augmentation_params["elastic_deform_alpha"] self.data_aug_params["elastic_deform_sigma"] = \ default_2D_augmentation_params["elastic_deform_sigma"] self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"] else: self.do_dummy_2D_aug = False if max(self.patch_size) / min(self.patch_size) > 1.5: default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi) self.data_aug_params = default_2D_augmentation_params self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size)) patch_size_for_spatialtransform = self.patch_size[1:] else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) patch_size_for_spatialtransform = self.patch_size self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform def initialize(self, training=True, force_load_plans=False): """ For prediction of test cases just set training=False, this will prevent loading of training data and training batchgenerator initialization :param training: :return: """ maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() if training: self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: self.print_to_log_file("unpacking dataset") unpack_dataset(self.folder_with_preprocessed_data) self.print_to_log_file("done") else: self.print_to_log_file( "INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you " "will wait all winter for your model to finish!") self.tr_gen, self.val_gen = get_default_augmentation(self.dl_tr, self.dl_val, self.data_aug_params[ 'patch_size_for_spatialtransform'], self.data_aug_params) self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() self.was_initialized = True def initialize_network(self): """ This is specific to the U-Net and must be adapted for other network architectures :return: """ # self.print_to_log_file(self.net_num_pool_op_kernel_sizes) # self.print_to_log_file(self.net_conv_kernel_sizes) net_numpool = len(self.net_num_pool_op_kernel_sizes) if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d norm_op_kwargs = {'eps': 1e-5, 'affine': True} dropout_op_kwargs = {'p': 0, 'inplace': True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, net_numpool, self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, False, False, lambda x: x, InitWeights_He(1e-2), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True) self.network.inference_apply_nonlin = softmax_helper if torch.cuda.is_available(): self.network.cuda() def initialize_optimizer_and_scheduler(self): assert self.network is not None, "self.initialize_network must be called first" self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, amsgrad=True) self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2, patience=self.lr_scheduler_patience, verbose=True, threshold=self.lr_scheduler_eps, threshold_mode="abs") def plot_network_architecture(self): try: from batchgenerators.utilities.file_and_folder_operations import join import hiddenlayer as hl if torch.cuda.is_available(): g = hl.build_graph(self.network, torch.rand((2, self.num_input_channels, *self.patch_size)).cuda(), transforms=None) else: g = hl.build_graph(self.network, torch.rand((1, self.num_input_channels, *self.patch_size)), transforms=None) g.save(join(self.output_folder, "network_architecture.pdf")) del g except Exception as e: self.print_to_log_file("Unable to plot network architecture:") self.print_to_log_file(e) self.print_to_log_file("\nprinting the network instead:\n") self.print_to_log_file(self.network) self.print_to_log_file("\n") finally: if torch.cuda.is_available(): torch.cuda.empty_cache() def run_training(self): dct = OrderedDict() for k in self.__dir__(): if not k.startswith("__"): if not callable(getattr(self, k)): dct[k] = str(getattr(self, k)) del dct['plans'] del dct['intensity_properties'] del dct['dataset'] del dct['dataset_tr'] del dct['dataset_val'] save_json(dct, join(self.output_folder, "debug.json")) import shutil shutil.copy(self.plans_file, join(self.output_folder_base, "plans.pkl")) super(nnUNetTrainer, self).run_training() def load_plans_file(self): """ This is what actually configures the entire experiment. The plans file is generated by experiment planning :return: """ self.plans = load_pickle(self.plans_file) def process_plans(self, plans): if self.stage is None: assert len(list(plans['plans_per_stage'].keys())) == 1, \ "If self.stage is None then there can be only one stage in the plans file. That seems to not be the " \ "case. Please specify which stage of the cascade must be trained" self.stage = list(plans['plans_per_stage'].keys())[0] self.plans = plans stage_plans = self.plans['plans_per_stage'][self.stage] self.batch_size = stage_plans['batch_size'] self.net_pool_per_axis = stage_plans['num_pool_per_axis'] self.patch_size = np.array(stage_plans['patch_size']).astype(int) self.do_dummy_2D_aug = stage_plans['do_dummy_2D_data_aug'] if 'pool_op_kernel_sizes' not in stage_plans.keys(): assert 'num_pool_per_axis' in stage_plans.keys() self.print_to_log_file("WARNING! old plans file with missing pool_op_kernel_sizes. Attempting to fix it...") self.net_num_pool_op_kernel_sizes = [] for i in range(max(self.net_pool_per_axis)): curr = [] for j in self.net_pool_per_axis: if (max(self.net_pool_per_axis) - j) <= i: curr.append(2) else: curr.append(1) self.net_num_pool_op_kernel_sizes.append(curr) else: self.net_num_pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes'] if 'conv_kernel_sizes' not in stage_plans.keys(): self.print_to_log_file("WARNING! old plans file with missing conv_kernel_sizes. Attempting to fix it...") self.net_conv_kernel_sizes = [[3] * len(self.net_pool_per_axis)] * (max(self.net_pool_per_axis) + 1) else: self.net_conv_kernel_sizes = stage_plans['conv_kernel_sizes'] self.pad_all_sides = None # self.patch_size self.intensity_properties = plans['dataset_properties']['intensityproperties'] self.normalization_schemes = plans['normalization_schemes'] self.base_num_features = plans['base_num_features'] self.num_input_channels = plans['num_modalities'] self.num_classes = plans['num_classes'] + 1 # background is no longer in num_classes self.classes = plans['all_classes'] self.use_mask_for_norm = plans['use_mask_for_norm'] self.only_keep_largest_connected_component = plans['keep_only_largest_region'] self.min_region_size_per_class = plans['min_region_size_per_class'] self.min_size_per_class = None # DONT USE THIS. plans['min_size_per_class'] if plans.get('transpose_forward') is None or plans.get('transpose_backward') is None: print("WARNING! You seem to have data that was preprocessed with a previous version of nnU-Net. " "You should rerun preprocessing. We will proceed and assume that both transpose_foward " "and transpose_backward are [0, 1, 2]. If that is not correct then weird things will happen!") plans['transpose_forward'] = [0, 1, 2] plans['transpose_backward'] = [0, 1, 2] self.transpose_forward = plans['transpose_forward'] self.transpose_backward = plans['transpose_backward'] if len(self.patch_size) == 2: self.threeD = False elif len(self.patch_size) == 3: self.threeD = True else: raise RuntimeError("invalid patch size in plans file: %s" % str(self.patch_size)) if "conv_per_stage" in plans.keys(): # this ha sbeen added to the plans only recently self.conv_per_stage = plans['conv_per_stage'] else: self.conv_per_stage = 2 def load_dataset(self): self.dataset = load_dataset(self.folder_with_preprocessed_data) def get_basic_generators(self): self.load_dataset() self.do_split() if self.threeD: dl_tr = DataLoader3D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size, False, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r') dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, False, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r') else: dl_tr = DataLoader2D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r') dl_val = DataLoader2D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, oversample_foreground_percent=self.oversample_foreground_percent, pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r') return dl_tr, dl_val def preprocess_patient(self, input_files): """ Used to predict new unseen data. Not used for the preprocessing of the training/test data :param input_files: :return: """ from nnunet.training.model_restore import recursive_find_python_class preprocessor_name = self.plans.get('preprocessor_name') if preprocessor_name is None: if self.threeD: preprocessor_name = "GenericPreprocessor" else: preprocessor_name = "PreprocessorFor2D" print("using preprocessor", preprocessor_name) preprocessor_class = recursive_find_python_class([join(nnunet.__path__[0], "preprocessing")], preprocessor_name, current_module="nnunet.preprocessing") assert preprocessor_class is not None, "Could not find preprocessor %s in nnunet.preprocessing" % \ preprocessor_name preprocessor = preprocessor_class(self.normalization_schemes, self.use_mask_for_norm, self.transpose_forward, self.intensity_properties) d, s, properties = preprocessor.preprocess_test_case(input_files, self.plans['plans_per_stage'][self.stage][ 'current_spacing']) return d, s, properties def preprocess_predict_nifti(self, input_files: List[str], output_file: str = None, softmax_ouput_file: str = None, mixed_precision: bool = True) -> None: """ Use this to predict new data :param input_files: :param output_file: :param softmax_ouput_file: :param mixed_precision: :return: """ print("preprocessing...") d, s, properties = self.preprocess_patient(input_files) print("predicting...") pred = self.predict_preprocessed_data_return_seg_and_softmax(d, do_mirroring=self.data_aug_params["do_mirror"], mirror_axes=self.data_aug_params['mirror_axes'], use_sliding_window=True, step_size=0.5, use_gaussian=True, pad_border_mode='constant', pad_kwargs={'constant_values': 0}, verbose=True, all_in_gpu=False, mixed_precision=mixed_precision)[1] pred = pred.transpose([0] + [i + 1 for i in self.transpose_backward]) if 'segmentation_export_params' in self.plans.keys(): force_separate_z = self.plans['segmentation_export_params']['force_separate_z'] interpolation_order = self.plans['segmentation_export_params']['interpolation_order'] interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z'] else: force_separate_z = None interpolation_order = 1 interpolation_order_z = 0 print("resampling to original spacing and nifti export...") save_segmentation_nifti_from_softmax(pred, output_file, properties, interpolation_order, self.regions_class_order, None, None, softmax_ouput_file, None, force_separate_z=force_separate_z, interpolation_order_z=interpolation_order_z) print("done") def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True, mirror_axes: Tuple[int] = None, use_sliding_window: bool = True, step_size: float = 0.5, use_gaussian: bool = True, pad_border_mode: str = 'constant', pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]: """ :param data: :param do_mirroring: :param mirror_axes: :param use_sliding_window: :param step_size: :param use_gaussian: :param pad_border_mode: :param pad_kwargs: :param all_in_gpu: :param verbose: :return: """ if pad_border_mode == 'constant' and pad_kwargs is None: pad_kwargs = {'constant_values': 0} if do_mirroring and mirror_axes is None: mirror_axes = self.data_aug_params['mirror_axes'] if do_mirroring: assert self.data_aug_params["do_mirror"], "Cannot do mirroring as test time augmentation when training " \ "was done without mirroring" valid = list((SegmentationNetwork, nn.DataParallel)) assert isinstance(self.network, tuple(valid)) current_mode = self.network.training self.network.eval() ret = self.network.predict_3D(data, do_mirroring=do_mirroring, mirror_axes=mirror_axes, use_sliding_window=use_sliding_window, step_size=step_size, patch_size=self.patch_size, regions_class_order=self.regions_class_order, use_gaussian=use_gaussian, pad_border_mode=pad_border_mode, pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose, mixed_precision=mixed_precision) self.network.train(current_mode) return ret def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True, validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False, segmentation_export_kwargs: dict = None): """ if debug=True then the temporary files generated for postprocessing determination will be kept """ current_mode = self.network.training self.network.eval() assert self.was_initialized, "must initialize, ideally with checkpoint (or train first)" if self.dataset_val is None: self.load_dataset() self.do_split() if segmentation_export_kwargs is None: if 'segmentation_export_params' in self.plans.keys(): force_separate_z = self.plans['segmentation_export_params']['force_separate_z'] interpolation_order = self.plans['segmentation_export_params']['interpolation_order'] interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z'] else: force_separate_z = None interpolation_order = 1 interpolation_order_z = 0 else: force_separate_z = segmentation_export_kwargs['force_separate_z'] interpolation_order = segmentation_export_kwargs['interpolation_order'] interpolation_order_z = segmentation_export_kwargs['interpolation_order_z'] # predictions as they come from the network go here output_folder = join(self.output_folder, validation_folder_name) maybe_mkdir_p(output_folder) # this is for debug purposes my_input_args = {'do_mirroring': do_mirroring, 'use_sliding_window': use_sliding_window, 'step_size': step_size, 'save_softmax': save_softmax, 'use_gaussian': use_gaussian, 'overwrite': overwrite, 'validation_folder_name': validation_folder_name, 'debug': debug, 'all_in_gpu': all_in_gpu, 'segmentation_export_kwargs': segmentation_export_kwargs, } save_json(my_input_args, join(output_folder, "validation_args.json")) if do_mirroring: if not self.data_aug_params['do_mirror']: raise RuntimeError("We did not train with mirroring so you cannot do inference with mirroring enabled") mirror_axes = self.data_aug_params['mirror_axes'] else: mirror_axes = () pred_gt_tuples = [] export_pool = Pool(default_num_threads) results = [] for k in self.dataset_val.keys(): properties = load_pickle(self.dataset[k]['properties_file']) fname = properties['list_of_data_files'][0].split("/")[-1][:-12] if overwrite or (not isfile(join(output_folder, fname + ".nii.gz"))) or \ (save_softmax and not isfile(join(output_folder, fname + ".npz"))): data = np.load(self.dataset[k]['data_file'])['data'] print(k, data.shape) data[-1][data[-1] == -1] = 0 softmax_pred = self.predict_preprocessed_data_return_seg_and_softmax(data[:-1], do_mirroring=do_mirroring, mirror_axes=mirror_axes, use_sliding_window=use_sliding_window, step_size=step_size, use_gaussian=use_gaussian, all_in_gpu=all_in_gpu, mixed_precision=self.fp16)[1] softmax_pred = softmax_pred.transpose([0] + [i + 1 for i in self.transpose_backward]) if save_softmax: softmax_fname = join(output_folder, fname + ".npz") else: softmax_fname = None """There is a problem with python process communication that prevents us from communicating obejcts larger than 2 GB between processes (basically when the length of the pickle string that will be sent is communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either filename or np.ndarray and will handle this automatically""" if np.prod(softmax_pred.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save np.save(join(output_folder, fname + ".npy"), softmax_pred) softmax_pred = join(output_folder, fname + ".npy") results.append(export_pool.starmap_async(save_segmentation_nifti_from_softmax, ((softmax_pred, join(output_folder, fname + ".nii.gz"), properties, interpolation_order, self.regions_class_order, None, None, softmax_fname, None, force_separate_z, interpolation_order_z), ) ) ) pred_gt_tuples.append([join(output_folder, fname + ".nii.gz"), join(self.gt_niftis_folder, fname + ".nii.gz")]) _ = [i.get() for i in results] self.print_to_log_file("finished prediction") # evaluate raw predictions self.print_to_log_file("evaluation of raw predictions") task = self.dataset_directory.split("/")[-1] job_name = self.experiment_name _ = aggregate_scores(pred_gt_tuples, labels=list(range(self.num_classes)), json_output_file=join(output_folder, "summary.json"), json_name=job_name + " val tiled %s" % (str(use_sliding_window)), json_author="Fabian", json_task=task, num_threads=default_num_threads) # in the old nnunet we would stop here. Now we add a postprocessing. This postprocessing can remove everything # except the largest connected component for each class. To see if this improves results, we do this for all # classes and then rerun the evaluation. Those classes for which this resulted in an improved dice score will # have this applied during inference as well self.print_to_log_file("determining postprocessing") determine_postprocessing(self.output_folder, self.gt_niftis_folder, validation_folder_name, final_subf_name=validation_folder_name + "_postprocessed", debug=debug) # after this the final predictions for the vlaidation set can be found in validation_folder_name_base + "_postprocessed" # They are always in that folder, even if no postprocessing as applied! # detemining postprocesing on a per-fold basis may be OK for this fold but what if another fold finds another # postprocesing to be better? In this case we need to consolidate. At the time the consolidation is going to be # done we won't know what self.gt_niftis_folder was, so now we copy all the niftis into a separate folder to # be used later gt_nifti_folder = join(self.output_folder_base, "gt_niftis") maybe_mkdir_p(gt_nifti_folder) for f in subfiles(self.gt_niftis_folder, suffix=".nii.gz"): success = False attempts = 0 e = None while not success and attempts < 10: try: shutil.copy(f, gt_nifti_folder) success = True except OSError as e: attempts += 1 sleep(1) if not success: print("Could not copy gt nifti file %s into folder %s" % (f, gt_nifti_folder)) if e is not None: raise e self.network.train(current_mode) def run_online_evaluation(self, output, target): with torch.no_grad(): num_classes = output.shape[1] output_softmax = softmax_helper(output) output_seg = output_softmax.argmax(1) target = target[:, 0] axes = tuple(range(1, len(target.shape))) tp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index) fp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index) fn_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index) for c in range(1, num_classes): tp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target == c).float(), axes=axes) fp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target != c).float(), axes=axes) fn_hard[:, c - 1] = sum_tensor((output_seg != c).float() * (target == c).float(), axes=axes) tp_hard = tp_hard.sum(0, keepdim=False).detach().cpu().numpy() fp_hard = fp_hard.sum(0, keepdim=False).detach().cpu().numpy() fn_hard = fn_hard.sum(0, keepdim=False).detach().cpu().numpy() self.online_eval_foreground_dc.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8))) self.online_eval_tp.append(list(tp_hard)) self.online_eval_fp.append(list(fp_hard)) self.online_eval_fn.append(list(fn_hard)) def finish_online_evaluation(self): self.online_eval_tp = np.sum(self.online_eval_tp, 0) self.online_eval_fp = np.sum(self.online_eval_fp, 0) self.online_eval_fn = np.sum(self.online_eval_fn, 0) global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)] if not np.isnan(i)] self.all_val_eval_metrics.append(np.mean(global_dc_per_class)) self.print_to_log_file("Average global foreground Dice:", str(global_dc_per_class)) self.print_to_log_file("(interpret this as an estimate for the Dice of the different classes. This is not " "exact.)") self.online_eval_foreground_dc = [] self.online_eval_tp = [] self.online_eval_fp = [] self.online_eval_fn = [] def save_checkpoint(self, fname, save_optimizer=True): super(nnUNetTrainer, self).save_checkpoint(fname, save_optimizer) info = OrderedDict() info['init'] = self.init_args info['name'] = self.__class__.__name__ info['class'] = str(self.__class__) info['plans'] = self.plans write_pickle(info, fname + ".pkl")
39,572
53.061475
142
py
CoTr
CoTr-main/CoTr_package/CoTr/network_architecture/neural_network.py
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from batchgenerators.augmentations.utils import pad_nd_image from nnunet.utilities.random_stuff import no_op from nnunet.utilities.to_torch import to_cuda, maybe_to_torch from torch import nn import torch from scipy.ndimage.filters import gaussian_filter from typing import Union, Tuple, List from torch.cuda.amp import autocast import CoTr class NeuralNetwork(nn.Module): def __init__(self): super(NeuralNetwork, self).__init__() def get_device(self): if next(self.parameters()).device == "cpu": return "cpu" else: return next(self.parameters()).device.index def set_device(self, device): if device == "cpu": self.cpu() else: self.cuda(device) def forward(self, x): raise NotImplementedError class SegmentationNetwork(NeuralNetwork): def __init__(self): super(NeuralNetwork, self).__init__() # if we have 5 pooling then our patch size must be divisible by 2**5 self.input_shape_must_be_divisible_by = None # for example in a 2d network that does 5 pool in x and 6 pool # in y this would be (32, 64) # we need to know this because we need to know if we are a 2d or a 3d netowrk self.conv_op = None # nn.Conv2d or nn.Conv3d # this tells us how many channely we have in the output. Important for preallocation in inference self.num_classes = None # number of channels in the output # depending on the loss, we do not hard code a nonlinearity into the architecture. To aggregate predictions # during inference, we need to apply the nonlinearity, however. So it is important to let the newtork know what # to apply in inference. For the most part this will be softmax self.inference_apply_nonlin = lambda x: x # softmax_helper # This is for saving a gaussian importance map for inference. It weights voxels higher that are closer to the # center. Prediction at the borders are often less accurate and are thus downweighted. Creating these Gaussians # can be expensive, so it makes sense to save and reuse them. self._gaussian_3d = self._patch_size_for_gaussian_3d = None self._gaussian_2d = self._patch_size_for_gaussian_2d = None def predict_3D(self, x: np.ndarray, do_mirroring: bool, mirror_axes: Tuple[int, ...] = (0, 1, 2), use_sliding_window: bool = False, step_size: float = 0.5, patch_size: Tuple[int, ...] = None, regions_class_order: Tuple[int, ...] = None, use_gaussian: bool = False, pad_border_mode: str = "constant", pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]: """ Use this function to predict a 3D image. It does not matter whether the network is a 2D or 3D U-Net, it will detect that automatically and run the appropriate code. When running predictions, you need to specify whether you want to run fully convolutional of sliding window based inference. We very strongly recommend you use sliding window with the default settings. It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If the network is not in eval mode it will print a warning. :param x: Your input data. Must be a nd.ndarray of shape (c, x, y, z). :param do_mirroring: If True, use test time data augmentation in the form of mirroring :param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three axes :param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default :param step_size: When running sliding window prediction, the step size determines the distance between adjacent predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between predictions. step_size cannot be larger than 1! :param patch_size: The patch size that was used for training the network. Do not use different patch sizes here, this will either crash or give potentially less accurate segmentations :param regions_class_order: Fabian only :param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting to weigh predictions closer to the center of the current patch higher than those at the borders. The reason behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True :param pad_border_mode: leave this alone :param pad_kwargs: leave this alone :param all_in_gpu: experimental. You probably want to leave this as is it :param verbose: Do you want a wall of text? If yes then set this to True :param mixed_precision: if True, will run inference in mixed precision with autocast() :return: """ torch.cuda.empty_cache() assert step_size <= 1, 'step_size must be smaller than 1. Otherwise there will be a gap between consecutive ' \ 'predictions' if verbose: print("debug: mirroring", do_mirroring, "mirror_axes", mirror_axes) assert self.get_device() != "cpu", "CPU not implemented" if pad_kwargs is None: pad_kwargs = {'constant_values': 0} # A very long time ago the mirror axes were (2, 3, 4) for a 3d network. This is just to intercept any old # code that uses this convention if len(mirror_axes): if self.conv_op == nn.Conv2d: if max(mirror_axes) > 1: raise ValueError("mirror axes. duh") if self.conv_op == nn.Conv3d or self.conv_op == CoTr.network_architecture.ResTranUnet.Conv3d_wd: if max(mirror_axes) > 2: raise ValueError("mirror axes. duh") if self.training: print('WARNING! Network is in train mode during inference. This may be intended, or not...') assert len(x.shape) == 4, "data must have shape (c,x,y,z)" if mixed_precision: context = autocast else: context = no_op with context(): with torch.no_grad(): if self.conv_op == nn.Conv3d or self.conv_op == CoTr.network_architecture.ResTranUnet.Conv3d_wd: if use_sliding_window: res = self._internal_predict_3D_3Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size, regions_class_order, use_gaussian, pad_border_mode, pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose) else: res = self._internal_predict_3D_3Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs, verbose=verbose) elif self.conv_op == nn.Conv2d: if use_sliding_window: res = self._internal_predict_3D_2Dconv_tiled(x, patch_size, do_mirroring, mirror_axes, step_size, regions_class_order, use_gaussian, pad_border_mode, pad_kwargs, all_in_gpu, False) else: res = self._internal_predict_3D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, all_in_gpu, False) else: raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is") return res def predict_2D(self, x, do_mirroring: bool, mirror_axes: tuple = (0, 1, 2), use_sliding_window: bool = False, step_size: float = 0.5, patch_size: tuple = None, regions_class_order: tuple = None, use_gaussian: bool = False, pad_border_mode: str = "constant", pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]: """ Use this function to predict a 2D image. If this is a 3D U-Net it will crash because you cannot predict a 2D image with that (you dummy). When running predictions, you need to specify whether you want to run fully convolutional of sliding window based inference. We very strongly recommend you use sliding window with the default settings. It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If the network is not in eval mode it will print a warning. :param x: Your input data. Must be a nd.ndarray of shape (c, x, y). :param do_mirroring: If True, use test time data augmentation in the form of mirroring :param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three axes :param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default :param step_size: When running sliding window prediction, the step size determines the distance between adjacent predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between predictions. step_size cannot be larger than 1! :param patch_size: The patch size that was used for training the network. Do not use different patch sizes here, this will either crash or give potentially less accurate segmentations :param regions_class_order: Fabian only :param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting to weigh predictions closer to the center of the current patch higher than those at the borders. The reason behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True :param pad_border_mode: leave this alone :param pad_kwargs: leave this alone :param all_in_gpu: experimental. You probably want to leave this as is it :param verbose: Do you want a wall of text? If yes then set this to True :return: """ torch.cuda.empty_cache() assert step_size <= 1, 'step_size must be smaler than 1. Otherwise there will be a gap between consecutive ' \ 'predictions' if self.conv_op == nn.Conv3d: raise RuntimeError("Cannot predict 2d if the network is 3d. Dummy.") if verbose: print("debug: mirroring", do_mirroring, "mirror_axes", mirror_axes) assert self.get_device() != "cpu", "CPU not implemented" if pad_kwargs is None: pad_kwargs = {'constant_values': 0} # A very long time ago the mirror axes were (2, 3) for a 2d network. This is just to intercept any old # code that uses this convention if len(mirror_axes): if max(mirror_axes) > 1: raise ValueError("mirror axes. duh") if self.training: print('WARNING! Network is in train mode during inference. This may be intended, or not...') assert len(x.shape) == 3, "data must have shape (c,x,y)" if mixed_precision: context = autocast else: context = no_op with context(): with torch.no_grad(): if self.conv_op == nn.Conv2d: if use_sliding_window: res = self._internal_predict_2D_2Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size, regions_class_order, use_gaussian, pad_border_mode, pad_kwargs, all_in_gpu, verbose) else: res = self._internal_predict_2D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, verbose) else: raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is") return res @staticmethod def _get_gaussian(patch_size, sigma_scale=1. / 8) -> np.ndarray: tmp = np.zeros(patch_size) center_coords = [i // 2 for i in patch_size] sigmas = [i * sigma_scale for i in patch_size] tmp[tuple(center_coords)] = 1 gaussian_importance_map = gaussian_filter(tmp, sigmas, 0, mode='constant', cval=0) gaussian_importance_map = gaussian_importance_map / np.max(gaussian_importance_map) * 1 gaussian_importance_map = gaussian_importance_map.astype(np.float32) # gaussian_importance_map cannot be 0, otherwise we may end up with nans! gaussian_importance_map[gaussian_importance_map == 0] = np.min( gaussian_importance_map[gaussian_importance_map != 0]) return gaussian_importance_map @staticmethod def _compute_steps_for_sliding_window(patch_size: Tuple[int, ...], image_size: Tuple[int, ...], step_size: float) -> List[List[int]]: assert [i >= j for i, j in zip(image_size, patch_size)], "image size must be as large or larger than patch_size" assert 0 < step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1' # our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of # 110, patch size of 32 and step_size of 0.5, then we want to make 4 steps starting at coordinate 0, 27, 55, 78 target_step_sizes_in_voxels = [i * step_size for i in patch_size] num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j, k in zip(image_size, target_step_sizes_in_voxels, patch_size)] steps = [] for dim in range(len(patch_size)): # the highest step value for this dimension is max_step_value = image_size[dim] - patch_size[dim] if num_steps[dim] > 1: actual_step_size = max_step_value / (num_steps[dim] - 1) else: actual_step_size = 99999999999 # does not matter because there is only one step at 0 steps_here = [int(np.round(actual_step_size * i)) for i in range(num_steps[dim])] steps.append(steps_here) return steps def _internal_predict_3D_3Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple, patch_size: tuple, regions_class_order: tuple, use_gaussian: bool, pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool, verbose: bool) -> Tuple[np.ndarray, np.ndarray]: # better safe than sorry assert len(x.shape) == 4, "x must be (c, x, y, z)" assert self.get_device() != "cpu" if verbose: print("step_size:", step_size) if verbose: print("do mirror:", do_mirroring) assert patch_size is not None, "patch_size cannot be None for tiled prediction" # for sliding window inference the image must at least be as large as the patch size. It does not matter # whether the shape is divisible by 2**num_pool as long as the patch size is data, slicer = pad_nd_image(x, patch_size, pad_border_mode, pad_kwargs, True, None) data_shape = data.shape # still c, x, y, z # compute the steps for sliding window steps = self._compute_steps_for_sliding_window(patch_size, data_shape[1:], step_size) num_tiles = len(steps[0]) * len(steps[1]) * len(steps[2]) if verbose: print("data shape:", data_shape) print("patch size:", patch_size) print("steps (x, y, and z):", steps) print("number of tiles:", num_tiles) # we only need to compute that once. It can take a while to compute this due to the large sigma in # gaussian_filter if use_gaussian and num_tiles > 1: if self._gaussian_3d is None or not all( [i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_3d)]): if verbose: print('computing Gaussian') gaussian_importance_map = self._get_gaussian(patch_size, sigma_scale=1. / 8) self._gaussian_3d = gaussian_importance_map self._patch_size_for_gaussian_3d = patch_size else: if verbose: print("using precomputed Gaussian") gaussian_importance_map = self._gaussian_3d gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(), non_blocking=True) else: gaussian_importance_map = None if all_in_gpu: # If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces # CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU if use_gaussian and num_tiles > 1: # half precision for the outputs should be good enough. If the outputs here are half, the # gaussian_importance_map should be as well gaussian_importance_map = gaussian_importance_map.half() # make sure we did not round anything to 0 gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[ gaussian_importance_map != 0].min() add_for_nb_of_preds = gaussian_importance_map else: add_for_nb_of_preds = torch.ones(data.shape[1:], device=self.get_device()) if verbose: print("initializing result array (on GPU)") aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half, device=self.get_device()) if verbose: print("moving data to GPU") data = torch.from_numpy(data).cuda(self.get_device(), non_blocking=True) if verbose: print("initializing result_numsamples (on GPU)") aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half, device=self.get_device()) else: if use_gaussian and num_tiles > 1: add_for_nb_of_preds = self._gaussian_3d else: # add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32) add_for_nb_of_preds = np.ones(patch_size, dtype=np.float32) aggregated_results = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32) aggregated_nb_of_predictions = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32) for x in steps[0]: lb_x = x ub_x = x + patch_size[0] for y in steps[1]: lb_y = y ub_y = y + patch_size[1] for z in steps[2]: lb_z = z ub_z = z + patch_size[2] predicted_patch = self._internal_maybe_mirror_and_pred_3D( data[None, :, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z], mirror_axes, do_mirroring, gaussian_importance_map)[0] if all_in_gpu: predicted_patch = predicted_patch.half() else: predicted_patch = predicted_patch.cpu().numpy() aggregated_results[:, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z] += predicted_patch aggregated_nb_of_predictions[:, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z] += add_for_nb_of_preds # we reverse the padding here (remeber that we padded the input to be at least as large as the patch size slicer = tuple( [slice(0, aggregated_results.shape[i]) for i in range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:]) aggregated_results = aggregated_results[slicer] aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer] # computing the class_probabilities by dividing the aggregated result with result_numsamples class_probabilities = aggregated_results / aggregated_nb_of_predictions if regions_class_order is None: predicted_segmentation = class_probabilities.argmax(0) else: if all_in_gpu: class_probabilities_here = class_probabilities.detach().cpu().numpy() else: class_probabilities_here = class_probabilities predicted_segmentation = np.zeros(class_probabilities_here.shape[1:], dtype=np.float32) for i, c in enumerate(regions_class_order): predicted_segmentation[class_probabilities_here[i] > 0.5] = c if all_in_gpu: if verbose: print("copying results to CPU") if regions_class_order is None: predicted_segmentation = predicted_segmentation.detach().cpu().numpy() class_probabilities = class_probabilities.detach().cpu().numpy() if verbose: print("prediction done") return predicted_segmentation, class_probabilities def _internal_predict_2D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool, mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None, pad_border_mode: str = "constant", pad_kwargs: dict = None, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]: """ This one does fully convolutional inference. No sliding window """ assert len(x.shape) == 3, "x must be (c, x, y)" assert self.get_device() != "cpu" assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \ 'run _internal_predict_2D_2Dconv' if verbose: print("do mirror:", do_mirroring) data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True, self.input_shape_must_be_divisible_by) predicted_probabilities = self._internal_maybe_mirror_and_pred_2D(data[None], mirror_axes, do_mirroring, None)[0] slicer = tuple( [slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) - (len(slicer) - 1))] + slicer[1:]) predicted_probabilities = predicted_probabilities[slicer] if regions_class_order is None: predicted_segmentation = predicted_probabilities.argmax(0) predicted_segmentation = predicted_segmentation.detach().cpu().numpy() predicted_probabilities = predicted_probabilities.detach().cpu().numpy() else: predicted_probabilities = predicted_probabilities.detach().cpu().numpy() predicted_segmentation = np.zeros(predicted_probabilities.shape[1:], dtype=np.float32) for i, c in enumerate(regions_class_order): predicted_segmentation[predicted_probabilities[i] > 0.5] = c return predicted_segmentation, predicted_probabilities def _internal_predict_3D_3Dconv(self, x: np.ndarray, min_size: Tuple[int, ...], do_mirroring: bool, mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None, pad_border_mode: str = "constant", pad_kwargs: dict = None, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]: """ This one does fully convolutional inference. No sliding window """ assert len(x.shape) == 4, "x must be (c, x, y, z)" assert self.get_device() != "cpu" assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \ 'run _internal_predict_3D_3Dconv' if verbose: print("do mirror:", do_mirroring) data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True, self.input_shape_must_be_divisible_by) predicted_probabilities = self._internal_maybe_mirror_and_pred_3D(data[None], mirror_axes, do_mirroring, None)[0] slicer = tuple( [slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) - (len(slicer) - 1))] + slicer[1:]) predicted_probabilities = predicted_probabilities[slicer] if regions_class_order is None: predicted_segmentation = predicted_probabilities.argmax(0) predicted_segmentation = predicted_segmentation.detach().cpu().numpy() predicted_probabilities = predicted_probabilities.detach().cpu().numpy() else: predicted_probabilities = predicted_probabilities.detach().cpu().numpy() predicted_segmentation = np.zeros(predicted_probabilities.shape[1:], dtype=np.float32) for i, c in enumerate(regions_class_order): predicted_segmentation[predicted_probabilities[i] > 0.5] = c return predicted_segmentation, predicted_probabilities def _internal_maybe_mirror_and_pred_3D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple, do_mirroring: bool = True, mult: np.ndarray or torch.tensor = None) -> torch.tensor: assert len(x.shape) == 5, 'x must be (b, c, x, y, z)' # everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here # we now return a cuda tensor! Not numpy array! x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device()) result_torch = torch.zeros([1, self.num_classes] + list(x.shape[2:]), dtype=torch.float).cuda(self.get_device(), non_blocking=True) if mult is not None: mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device()) if do_mirroring: mirror_idx = 8 num_results = 2 ** len(mirror_axes) else: mirror_idx = 1 num_results = 1 for m in range(mirror_idx): if m == 0: pred = self.inference_apply_nonlin(self(x)) result_torch += 1 / num_results * pred if m == 1 and (2 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (4, )))) result_torch += 1 / num_results * torch.flip(pred, (4,)) if m == 2 and (1 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (3, )))) result_torch += 1 / num_results * torch.flip(pred, (3,)) if m == 3 and (2 in mirror_axes) and (1 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 3)))) result_torch += 1 / num_results * torch.flip(pred, (4, 3)) if m == 4 and (0 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (2, )))) result_torch += 1 / num_results * torch.flip(pred, (2,)) if m == 5 and (0 in mirror_axes) and (2 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 2)))) result_torch += 1 / num_results * torch.flip(pred, (4, 2)) if m == 6 and (0 in mirror_axes) and (1 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2)))) result_torch += 1 / num_results * torch.flip(pred, (3, 2)) if m == 7 and (0 in mirror_axes) and (1 in mirror_axes) and (2 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 3, 2)))) result_torch += 1 / num_results * torch.flip(pred, (4, 3, 2)) if mult is not None: result_torch[:, :] *= mult return result_torch def _internal_maybe_mirror_and_pred_2D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple, do_mirroring: bool = True, mult: np.ndarray or torch.tensor = None) -> torch.tensor: # everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here # we now return a cuda tensor! Not numpy array! assert len(x.shape) == 4, 'x must be (b, c, x, y)' x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device()) result_torch = torch.zeros([x.shape[0], self.num_classes] + list(x.shape[2:]), dtype=torch.float).cuda(self.get_device(), non_blocking=True) if mult is not None: mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device()) if do_mirroring: mirror_idx = 4 num_results = 2 ** len(mirror_axes) else: mirror_idx = 1 num_results = 1 for m in range(mirror_idx): if m == 0: pred = self.inference_apply_nonlin(self(x)) result_torch += 1 / num_results * pred if m == 1 and (1 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (3, )))) result_torch += 1 / num_results * torch.flip(pred, (3, )) if m == 2 and (0 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (2, )))) result_torch += 1 / num_results * torch.flip(pred, (2, )) if m == 3 and (0 in mirror_axes) and (1 in mirror_axes): pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2)))) result_torch += 1 / num_results * torch.flip(pred, (3, 2)) if mult is not None: result_torch[:, :] *= mult return result_torch def _internal_predict_2D_2Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple, patch_size: tuple, regions_class_order: tuple, use_gaussian: bool, pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool, verbose: bool) -> Tuple[np.ndarray, np.ndarray]: # better safe than sorry assert len(x.shape) == 3, "x must be (c, x, y)" assert self.get_device() != "cpu" if verbose: print("step_size:", step_size) if verbose: print("do mirror:", do_mirroring) assert patch_size is not None, "patch_size cannot be None for tiled prediction" # for sliding window inference the image must at least be as large as the patch size. It does not matter # whether the shape is divisible by 2**num_pool as long as the patch size is data, slicer = pad_nd_image(x, patch_size, pad_border_mode, pad_kwargs, True, None) data_shape = data.shape # still c, x, y # compute the steps for sliding window steps = self._compute_steps_for_sliding_window(patch_size, data_shape[1:], step_size) num_tiles = len(steps[0]) * len(steps[1]) if verbose: print("data shape:", data_shape) print("patch size:", patch_size) print("steps (x, y, and z):", steps) print("number of tiles:", num_tiles) # we only need to compute that once. It can take a while to compute this due to the large sigma in # gaussian_filter if use_gaussian and num_tiles > 1: if self._gaussian_2d is None or not all( [i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_2d)]): if verbose: print('computing Gaussian') gaussian_importance_map = self._get_gaussian(patch_size, sigma_scale=1. / 8) self._gaussian_2d = gaussian_importance_map self._patch_size_for_gaussian_2d = patch_size else: if verbose: print("using precomputed Gaussian") gaussian_importance_map = self._gaussian_2d gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(), non_blocking=True) else: gaussian_importance_map = None if all_in_gpu: # If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces # CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU if use_gaussian and num_tiles > 1: # half precision for the outputs should be good enough. If the outputs here are half, the # gaussian_importance_map should be as well gaussian_importance_map = gaussian_importance_map.half() # make sure we did not round anything to 0 gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[ gaussian_importance_map != 0].min() add_for_nb_of_preds = gaussian_importance_map else: add_for_nb_of_preds = torch.ones(data.shape[1:], device=self.get_device()) if verbose: print("initializing result array (on GPU)") aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half, device=self.get_device()) if verbose: print("moving data to GPU") data = torch.from_numpy(data).cuda(self.get_device(), non_blocking=True) if verbose: print("initializing result_numsamples (on GPU)") aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half, device=self.get_device()) else: if use_gaussian and num_tiles > 1: add_for_nb_of_preds = self._gaussian_2d else: add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32) aggregated_results = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32) aggregated_nb_of_predictions = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32) for x in steps[0]: lb_x = x ub_x = x + patch_size[0] for y in steps[1]: lb_y = y ub_y = y + patch_size[1] predicted_patch = self._internal_maybe_mirror_and_pred_2D( data[None, :, lb_x:ub_x, lb_y:ub_y], mirror_axes, do_mirroring, gaussian_importance_map)[0] if all_in_gpu: predicted_patch = predicted_patch.half() else: predicted_patch = predicted_patch.cpu().numpy() aggregated_results[:, lb_x:ub_x, lb_y:ub_y] += predicted_patch aggregated_nb_of_predictions[:, lb_x:ub_x, lb_y:ub_y] += add_for_nb_of_preds # we reverse the padding here (remeber that we padded the input to be at least as large as the patch size slicer = tuple( [slice(0, aggregated_results.shape[i]) for i in range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:]) aggregated_results = aggregated_results[slicer] aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer] # computing the class_probabilities by dividing the aggregated result with result_numsamples class_probabilities = aggregated_results / aggregated_nb_of_predictions if regions_class_order is None: predicted_segmentation = class_probabilities.argmax(0) else: if all_in_gpu: class_probabilities_here = class_probabilities.detach().cpu().numpy() else: class_probabilities_here = class_probabilities predicted_segmentation = np.zeros(class_probabilities_here.shape[1:], dtype=np.float32) for i, c in enumerate(regions_class_order): predicted_segmentation[class_probabilities_here[i] > 0.5] = c if all_in_gpu: if verbose: print("copying results to CPU") if regions_class_order is None: predicted_segmentation = predicted_segmentation.detach().cpu().numpy() class_probabilities = class_probabilities.detach().cpu().numpy() if verbose: print("prediction done") return predicted_segmentation, class_probabilities def _internal_predict_3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool, mirror_axes: tuple = (0, 1), regions_class_order: tuple = None, pad_border_mode: str = "constant", pad_kwargs: dict = None, all_in_gpu: bool = False, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]: if all_in_gpu: raise NotImplementedError assert len(x.shape) == 4, "data must be c, x, y, z" predicted_segmentation = [] softmax_pred = [] for s in range(x.shape[1]): pred_seg, softmax_pres = self._internal_predict_2D_2Dconv( x[:, s], min_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, verbose) predicted_segmentation.append(pred_seg[None]) softmax_pred.append(softmax_pres[None]) predicted_segmentation = np.vstack(predicted_segmentation) softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3)) return predicted_segmentation, softmax_pred def predict_3D_pseudo3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool, mirror_axes: tuple = (0, 1), regions_class_order: tuple = None, pseudo3D_slices: int = 5, all_in_gpu: bool = False, pad_border_mode: str = "constant", pad_kwargs: dict = None, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]: if all_in_gpu: raise NotImplementedError assert len(x.shape) == 4, "data must be c, x, y, z" assert pseudo3D_slices % 2 == 1, "pseudo3D_slices must be odd" extra_slices = (pseudo3D_slices - 1) // 2 shp_for_pad = np.array(x.shape) shp_for_pad[1] = extra_slices pad = np.zeros(shp_for_pad, dtype=np.float32) data = np.concatenate((pad, x, pad), 1) predicted_segmentation = [] softmax_pred = [] for s in range(extra_slices, data.shape[1] - extra_slices): d = data[:, (s - extra_slices):(s + extra_slices + 1)] d = d.reshape((-1, d.shape[-2], d.shape[-1])) pred_seg, softmax_pres = \ self._internal_predict_2D_2Dconv(d, min_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, verbose) predicted_segmentation.append(pred_seg[None]) softmax_pred.append(softmax_pres[None]) predicted_segmentation = np.vstack(predicted_segmentation) softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3)) return predicted_segmentation, softmax_pred def _internal_predict_3D_2Dconv_tiled(self, x: np.ndarray, patch_size: Tuple[int, int], do_mirroring: bool, mirror_axes: tuple = (0, 1), step_size: float = 0.5, regions_class_order: tuple = None, use_gaussian: bool = False, pad_border_mode: str = "edge", pad_kwargs: dict =None, all_in_gpu: bool = False, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]: if all_in_gpu: raise NotImplementedError assert len(x.shape) == 4, "data must be c, x, y, z" predicted_segmentation = [] softmax_pred = [] for s in range(x.shape[1]): pred_seg, softmax_pres = self._internal_predict_2D_2Dconv_tiled( x[:, s], step_size, do_mirroring, mirror_axes, patch_size, regions_class_order, use_gaussian, pad_border_mode, pad_kwargs, all_in_gpu, verbose) predicted_segmentation.append(pred_seg[None]) softmax_pred.append(softmax_pres[None]) predicted_segmentation = np.vstack(predicted_segmentation) softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3)) return predicted_segmentation, softmax_pred if __name__ == '__main__': print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (162, 529, 529), 0.5)) print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (162, 529, 529), 1)) print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (162, 529, 529), 0.1)) print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (60, 448, 224), 1)) print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (60, 448, 224), 0.5)) print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (30, 224, 224), 1)) print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (30, 224, 224), 0.125)) print(SegmentationNetwork._compute_steps_for_sliding_window((123, 54, 123), (246, 162, 369), 0.25))
44,025
52.107358
137
py
CoTr
CoTr-main/CoTr_package/CoTr/network_architecture/CNNBackbone.py
# ------------------------------------------------------------------------ # CNN encoder # ------------------------------------------------------------------------ import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable import math from functools import partial class Conv3d_wd(nn.Conv3d): def __init__(self, in_channels, out_channels, kernel_size, stride=(1,1,1), padding=(0,0,0), dilation=(1,1,1), groups=1, bias=False): super(Conv3d_wd, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) def forward(self, x): weight = self.weight weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True).mean(dim=4, keepdim=True) weight = weight - weight_mean # std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1, 1) + 1e-5 std = torch.sqrt(torch.var(weight.view(weight.size(0), -1), dim=1) + 1e-12).view(-1, 1, 1, 1, 1) weight = weight / std.expand_as(weight) return F.conv3d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def conv3x3x3(in_planes, out_planes, kernel_size, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), bias=False, weight_std=False): "3x3x3 convolution with padding" if weight_std: return Conv3d_wd(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) else: return nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) def Norm_layer(norm_cfg, inplanes): if norm_cfg == 'BN': out = nn.BatchNorm3d(inplanes) elif norm_cfg == 'SyncBN': out = nn.SyncBatchNorm(inplanes) elif norm_cfg == 'GN': out = nn.GroupNorm(16, inplanes) elif norm_cfg == 'IN': out = nn.InstanceNorm3d(inplanes,affine=True) return out def Activation_layer(activation_cfg, inplace=True): if activation_cfg == 'ReLU': out = nn.ReLU(inplace=inplace) elif activation_cfg == 'LeakyReLU': out = nn.LeakyReLU(negative_slope=1e-2, inplace=inplace) return out class ResBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, norm_cfg, activation_cfg, stride=(1, 1, 1), downsample=None, weight_std=False): super(ResBlock, self).__init__() self.conv1 = conv3x3x3(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, weight_std=weight_std) self.norm1 = Norm_layer(norm_cfg, planes) self.nonlin = Activation_layer(activation_cfg, inplace=True) self.downsample = downsample def forward(self, x): residual = x out = self.conv1(x) out = self.norm1(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.nonlin(out) return out class Backbone(nn.Module): arch_settings = { 9: (ResBlock, (3, 3, 2)) } def __init__(self, depth, in_channels=1, norm_cfg='BN', activation_cfg='ReLU', weight_std=False): super(Backbone, self).__init__() if depth not in self.arch_settings: raise KeyError('invalid depth {} for resnet'.format(depth)) self.depth = depth block, layers = self.arch_settings[depth] self.inplanes = 64 self.conv1 = conv3x3x3(in_channels, 64, kernel_size=7, stride=(1, 2, 2), padding=3, bias=False, weight_std=weight_std) self.norm1 = Norm_layer(norm_cfg, 64) self.nonlin = Activation_layer(activation_cfg, inplace=True) self.layer1 = self._make_layer(block, 192, layers[0], stride=(2, 2, 2), norm_cfg=norm_cfg, activation_cfg=activation_cfg, weight_std=weight_std) self.layer2 = self._make_layer(block, 384, layers[1], stride=(2, 2, 2), norm_cfg=norm_cfg, activation_cfg=activation_cfg, weight_std=weight_std) self.layer3 = self._make_layer(block, 384, layers[2], stride=(2, 2, 2), norm_cfg=norm_cfg, activation_cfg=activation_cfg, weight_std=weight_std) self.layers = [] for m in self.modules(): if isinstance(m, (nn.Conv3d, Conv3d_wd)): m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out') elif isinstance(m, (nn.BatchNorm3d, nn.GroupNorm, nn.InstanceNorm3d, nn.SyncBatchNorm)): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=(1, 1, 1), norm_cfg='BN', activation_cfg='ReLU', weight_std=False): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv3x3x3( self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, weight_std=weight_std), Norm_layer(norm_cfg, planes * block.expansion)) layers = [] layers.append(block(self.inplanes, planes, norm_cfg, activation_cfg, stride=stride, downsample=downsample, weight_std=weight_std)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, norm_cfg, activation_cfg, weight_std=weight_std)) return nn.Sequential(*layers) def init_weights(self): for m in self.modules(): if isinstance(m, (nn.Conv3d, Conv3d_wd)): m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out') elif isinstance(m, (nn.BatchNorm3d, nn.GroupNorm, nn.InstanceNorm3d, nn.SyncBatchNorm)): if m.weight is not None: nn.init.constant_(m.weight, 1) if m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x): out = [] x = self.conv1(x) x = self.norm1(x) x = self.nonlin(x) out.append(x) x = self.layer1(x) out.append(x) x = self.layer2(x) out.append(x) x = self.layer3(x) out.append(x) return out
6,314
37.272727
152
py
CoTr
CoTr-main/CoTr_package/CoTr/network_architecture/ResTranUnet.py
# ------------------------------------------------------------------------ # CoTr # ------------------------------------------------------------------------ import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from CoTr.network_architecture import CNNBackbone from CoTr.network_architecture.neural_network import SegmentationNetwork from CoTr.network_architecture.DeTrans.DeformableTrans import DeformableTransformer from CoTr.network_architecture.DeTrans.position_encoding import build_position_encoding class Conv3d_wd(nn.Conv3d): def __init__(self, in_channels, out_channels, kernel_size, stride=(1,1,1), padding=(0,0,0), dilation=(1,1,1), groups=1, bias=False): super(Conv3d_wd, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) def forward(self, x): weight = self.weight weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True).mean(dim=4, keepdim=True) weight = weight - weight_mean # std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1, 1) + 1e-5 std = torch.sqrt(torch.var(weight.view(weight.size(0), -1), dim=1) + 1e-12).view(-1, 1, 1, 1, 1) weight = weight / std.expand_as(weight) return F.conv3d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def conv3x3x3(in_planes, out_planes, kernel_size, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=False, weight_std=False): "3x3x3 convolution with padding" if weight_std: return Conv3d_wd(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) else: return nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) def Norm_layer(norm_cfg, inplanes): if norm_cfg == 'BN': out = nn.BatchNorm3d(inplanes) elif norm_cfg == 'SyncBN': out = nn.SyncBatchNorm(inplanes) elif norm_cfg == 'GN': out = nn.GroupNorm(16, inplanes) elif norm_cfg == 'IN': out = nn.InstanceNorm3d(inplanes,affine=True) return out def Activation_layer(activation_cfg, inplace=True): if activation_cfg == 'ReLU': out = nn.ReLU(inplace=inplace) elif activation_cfg == 'LeakyReLU': out = nn.LeakyReLU(negative_slope=1e-2, inplace=inplace) return out class Conv3dBlock(nn.Module): def __init__(self,in_channels,out_channels,norm_cfg,activation_cfg,kernel_size,stride=(1, 1, 1),padding=(0, 0, 0),dilation=(1, 1, 1),bias=False,weight_std=False): super(Conv3dBlock,self).__init__() self.conv = conv3x3x3(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, weight_std=weight_std) self.norm = Norm_layer(norm_cfg, out_channels) self.nonlin = Activation_layer(activation_cfg, inplace=True) def forward(self,x): x = self.conv(x) x = self.norm(x) x = self.nonlin(x) return x class ResBlock(nn.Module): def __init__(self, inplanes, planes, norm_cfg, activation_cfg, weight_std=False): super(ResBlock, self).__init__() self.resconv1 = Conv3dBlock(inplanes, planes, norm_cfg, activation_cfg, kernel_size=3, stride=1, padding=1, bias=False, weight_std=weight_std) self.resconv2 = Conv3dBlock(planes, planes, norm_cfg, activation_cfg, kernel_size=3, stride=1, padding=1, bias=False, weight_std=weight_std) def forward(self, x): residual = x out = self.resconv1(x) out = self.resconv2(out) out = out + residual return out class U_ResTran3D(nn.Module): def __init__(self, norm_cfg='BN', activation_cfg='ReLU', img_size=None, num_classes=None, weight_std=False): super(U_ResTran3D, self).__init__() self.MODEL_NUM_CLASSES = num_classes self.upsamplex2 = nn.Upsample(scale_factor=(1,2,2), mode='trilinear') self.transposeconv_stage2 = nn.ConvTranspose3d(384, 384, kernel_size=(2,2,2), stride=(2,2,2), bias=False) self.transposeconv_stage1 = nn.ConvTranspose3d(384, 192, kernel_size=(2,2,2), stride=(2,2,2), bias=False) self.transposeconv_stage0 = nn.ConvTranspose3d(192, 64, kernel_size=(2,2,2), stride=(2,2,2), bias=False) self.stage2_de = ResBlock(384, 384, norm_cfg, activation_cfg, weight_std=weight_std) self.stage1_de = ResBlock(192, 192, norm_cfg, activation_cfg, weight_std=weight_std) self.stage0_de = ResBlock(64, 64, norm_cfg, activation_cfg, weight_std=weight_std) self.ds2_cls_conv = nn.Conv3d(384, self.MODEL_NUM_CLASSES, kernel_size=1) self.ds1_cls_conv = nn.Conv3d(192, self.MODEL_NUM_CLASSES, kernel_size=1) self.ds0_cls_conv = nn.Conv3d(64, self.MODEL_NUM_CLASSES, kernel_size=1) self.cls_conv = nn.Conv3d(64, self.MODEL_NUM_CLASSES, kernel_size=1) for m in self.modules(): if isinstance(m, (nn.Conv3d, Conv3d_wd, nn.ConvTranspose3d)): m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out') elif isinstance(m, (nn.BatchNorm3d, nn.SyncBatchNorm, nn.InstanceNorm3d, nn.GroupNorm)): if m.weight is not None: nn.init.constant_(m.weight, 1) if m.bias is not None: nn.init.constant_(m.bias, 0) self.backbone = CNNBackbone.Backbone(depth=9, norm_cfg=norm_cfg, activation_cfg=activation_cfg, weight_std=weight_std) total = sum([param.nelement() for param in self.backbone.parameters()]) print(' + Number of Backbone Params: %.2f(e6)' % (total / 1e6)) self.position_embed = build_position_encoding(mode='v2', hidden_dim=384) self.encoder_Detrans = DeformableTransformer(d_model=384, dim_feedforward=1536, dropout=0.1, activation='gelu', num_feature_levels=2, nhead=6, num_encoder_layers=6, enc_n_points=4) total = sum([param.nelement() for param in self.encoder_Detrans.parameters()]) print(' + Number of Transformer Params: %.2f(e6)' % (total / 1e6)) def posi_mask(self, x): x_fea = [] x_posemb = [] masks = [] for lvl, fea in enumerate(x): if lvl > 1: x_fea.append(fea) x_posemb.append(self.position_embed(fea)) masks.append(torch.zeros((fea.shape[0], fea.shape[2], fea.shape[3], fea.shape[4]), dtype=torch.bool).cuda()) return x_fea, masks, x_posemb def forward(self, inputs): # # %%%%%%%%%%%%% CoTr x_convs = self.backbone(inputs) x_fea, masks, x_posemb = self.posi_mask(x_convs) x_trans = self.encoder_Detrans(x_fea, masks, x_posemb) # # Single_scale # # x = self.transposeconv_stage2(x_trans.transpose(-1, -2).view(x_convs[-1].shape)) # # skip2 = x_convs[-2] # Multi-scale x = self.transposeconv_stage2(x_trans[:, x_fea[0].shape[-3]*x_fea[0].shape[-2]*x_fea[0].shape[-1]::].transpose(-1, -2).view(x_convs[-1].shape)) # x_trans length: 12*24*24+6*12*12=7776 skip2 = x_trans[:, 0:x_fea[0].shape[-3]*x_fea[0].shape[-2]*x_fea[0].shape[-1]].transpose(-1, -2).view(x_convs[-2].shape) x = x + skip2 x = self.stage2_de(x) ds2 = self.ds2_cls_conv(x) x = self.transposeconv_stage1(x) skip1 = x_convs[-3] x = x + skip1 x = self.stage1_de(x) ds1 = self.ds1_cls_conv(x) x = self.transposeconv_stage0(x) skip0 = x_convs[-4] x = x + skip0 x = self.stage0_de(x) ds0 = self.ds0_cls_conv(x) result = self.upsamplex2(x) result = self.cls_conv(result) return [result, ds0, ds1, ds2] class ResTranUnet(SegmentationNetwork): """ ResTran-3D Unet """ def __init__(self, norm_cfg='BN', activation_cfg='ReLU', img_size=None, num_classes=None, weight_std=False, deep_supervision=False): super().__init__() self.do_ds = False self.U_ResTran3D = U_ResTran3D(norm_cfg, activation_cfg, img_size, num_classes, weight_std) # U_ResTran3D if weight_std==False: self.conv_op = nn.Conv3d else: self.conv_op = Conv3d_wd if norm_cfg=='BN': self.norm_op = nn.BatchNorm3d if norm_cfg=='SyncBN': self.norm_op = nn.SyncBatchNorm if norm_cfg=='GN': self.norm_op = nn.GroupNorm if norm_cfg=='IN': self.norm_op = nn.InstanceNorm3d self.dropout_op = nn.Dropout3d self.num_classes = num_classes self._deep_supervision = deep_supervision self.do_ds = deep_supervision def forward(self, x): seg_output = self.U_ResTran3D(x) if self._deep_supervision and self.do_ds: return seg_output else: return seg_output[0]
9,009
41.701422
191
py
CoTr
CoTr-main/CoTr_package/CoTr/network_architecture/DeTrans/position_encoding.py
""" Positional encodings for the transformer. """ import math import torch from torch import nn from typing import Optional from torch import Tensor class PositionEmbeddingSine(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__(self, num_pos_feats=[64, 64, 64], temperature=10000, normalize=False, scale=None): super().__init__() self.num_pos_feats = num_pos_feats self.temperature = temperature self.normalize = normalize if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") if scale is None: scale = 2 * math.pi self.scale = scale def forward(self, x): bs, c, d, h, w = x.shape mask = torch.zeros(bs, d, h, w, dtype=torch.bool).cuda() assert mask is not None not_mask = ~mask d_embed = not_mask.cumsum(1, dtype=torch.float32) y_embed = not_mask.cumsum(2, dtype=torch.float32) x_embed = not_mask.cumsum(3, dtype=torch.float32) if self.normalize: eps = 1e-6 d_embed = (d_embed - 0.5) / (d_embed[:, -1:, :, :] + eps) * self.scale y_embed = (y_embed - 0.5) / (y_embed[:, :, -1:, :] + eps) * self.scale x_embed = (x_embed - 0.5) / (x_embed[:, :, :, -1:] + eps) * self.scale dim_tx = torch.arange(self.num_pos_feats[0], dtype=torch.float32, device=x.device) dim_tx = self.temperature ** (3 * (dim_tx // 3) / self.num_pos_feats[0]) dim_ty = torch.arange(self.num_pos_feats[1], dtype=torch.float32, device=x.device) dim_ty = self.temperature ** (3 * (dim_ty // 3) / self.num_pos_feats[1]) dim_td = torch.arange(self.num_pos_feats[2], dtype=torch.float32, device=x.device) dim_td = self.temperature ** (3 * (dim_td // 3) / self.num_pos_feats[2]) pos_x = x_embed[:, :, :, :, None] / dim_tx pos_y = y_embed[:, :, :, :, None] / dim_ty pos_d = d_embed[:, :, :, :, None] / dim_td pos_x = torch.stack((pos_x[:, :, :, :, 0::2].sin(), pos_x[:, :, :, :, 1::2].cos()), dim=5).flatten(4) pos_y = torch.stack((pos_y[:, :, :, :, 0::2].sin(), pos_y[:, :, :, :, 1::2].cos()), dim=5).flatten(4) pos_d = torch.stack((pos_d[:, :, :, :, 0::2].sin(), pos_d[:, :, :, :, 1::2].cos()), dim=5).flatten(4) pos = torch.cat((pos_d, pos_y, pos_x), dim=4).permute(0, 4, 1, 2, 3) return pos def build_position_encoding(mode, hidden_dim): N_steps = hidden_dim // 3 if (hidden_dim % 3) != 0: N_steps = [N_steps, N_steps, N_steps + hidden_dim % 3] else: N_steps = [N_steps, N_steps, N_steps] if mode in ('v2', 'sine'): position_embedding = PositionEmbeddingSine(num_pos_feats=N_steps, normalize=True) else: raise ValueError(f"not supported {mode}") return position_embedding
3,032
39.986486
109
py
CoTr
CoTr-main/CoTr_package/CoTr/network_architecture/DeTrans/DeformableTrans.py
# ------------------------------------------------------------------------ # 3D Deformable Transformer # ------------------------------------------------------------------------ # Modified from Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] import copy from typing import Optional, List import math import torch import torch.nn.functional as F from torch import nn from torch.nn.init import xavier_uniform_, constant_, normal_ from .ops.modules import MSDeformAttn from .position_encoding import build_position_encoding class DeformableTransformer(nn.Module): def __init__(self, d_model=256, nhead=8, num_encoder_layers=6, dim_feedforward=1024, dropout=0.1, activation="relu", num_feature_levels=4, enc_n_points=4): super().__init__() self.d_model = d_model self.nhead = nhead encoder_layer = DeformableTransformerEncoderLayer(d_model, dim_feedforward, dropout, activation, num_feature_levels, nhead, enc_n_points) self.encoder = DeformableTransformerEncoder(encoder_layer, num_encoder_layers) self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model)) self._reset_parameters() def _reset_parameters(self): for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) for m in self.modules(): if isinstance(m, MSDeformAttn): m._reset_parameters() normal_(self.level_embed) def get_valid_ratio(self, mask): _, D, H, W = mask.shape valid_D = torch.sum(~mask[:, :, 0, 0], 1) valid_H = torch.sum(~mask[:, 0, :, 0], 1) valid_W = torch.sum(~mask[:, 0, 0, :], 1) valid_ratio_d = valid_D.float() / D valid_ratio_h = valid_H.float() / H valid_ratio_w = valid_W.float() / W valid_ratio = torch.stack([valid_ratio_d, valid_ratio_w, valid_ratio_h], -1) return valid_ratio def forward(self, srcs, masks, pos_embeds): # prepare input for encoder src_flatten = [] mask_flatten = [] lvl_pos_embed_flatten = [] spatial_shapes = [] for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)): bs, c, d, h, w = src.shape spatial_shape = (d, h, w) spatial_shapes.append(spatial_shape) src = src.flatten(2).transpose(1, 2) mask = mask.flatten(1) pos_embed = pos_embed.flatten(2).transpose(1, 2) lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1) lvl_pos_embed_flatten.append(lvl_pos_embed) src_flatten.append(src) mask_flatten.append(mask) src_flatten = torch.cat(src_flatten, 1) mask_flatten = torch.cat(mask_flatten, 1) lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device) level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1) # encoder memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten) return memory class DeformableTransformerEncoderLayer(nn.Module): def __init__(self, d_model=256, d_ffn=1024, dropout=0.1, activation="relu", n_levels=4, n_heads=8, n_points=4): super().__init__() # self attention self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points) self.dropout1 = nn.Dropout(dropout) self.norm1 = nn.LayerNorm(d_model) # ffn self.linear1 = nn.Linear(d_model, d_ffn) self.activation = _get_activation_fn(activation) self.dropout2 = nn.Dropout(dropout) self.linear2 = nn.Linear(d_ffn, d_model) self.dropout3 = nn.Dropout(dropout) self.norm2 = nn.LayerNorm(d_model) @staticmethod def with_pos_embed(tensor, pos): return tensor if pos is None else tensor + pos def forward_ffn(self, src): src2 = self.linear2(self.dropout2(self.activation(self.linear1(src)))) src = src + self.dropout3(src2) src = self.norm2(src) return src def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, padding_mask=None): # self attention src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, padding_mask) src = src + self.dropout1(src2) src = self.norm1(src) # ffn src = self.forward_ffn(src) return src class DeformableTransformerEncoder(nn.Module): def __init__(self, encoder_layer, num_layers): super().__init__() self.layers = _get_clones(encoder_layer, num_layers) self.num_layers = num_layers @staticmethod def get_reference_points(spatial_shapes, valid_ratios, device): reference_points_list = [] for lvl, (D_, H_, W_) in enumerate(spatial_shapes): ref_d, ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, D_ - 0.5, D_, dtype=torch.float32, device=device), torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device), torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device)) ref_d = ref_d.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * D_) ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 2] * H_) ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * W_) ref = torch.stack((ref_d, ref_x, ref_y), -1) # D W H reference_points_list.append(ref) reference_points = torch.cat(reference_points_list, 1) reference_points = reference_points[:, :, None] * valid_ratios[:, None] return reference_points def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None): output = src reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device) for _, layer in enumerate(self.layers): output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask) return output def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) def _get_activation_fn(activation): """Return an activation function given a string""" if activation == "relu": return F.relu if activation == "gelu": return F.gelu if activation == "glu": return F.glu raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
7,149
38.502762
132
py
CoTr
CoTr-main/CoTr_package/CoTr/network_architecture/DeTrans/ops/functions/ms_deform_attn_func.py
# ------------------------------------------------------------------------ # 3D Deformable Self-attention # ------------------------------------------------------------------------ # Modified from Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ from __future__ import absolute_import from __future__ import print_function from __future__ import division import torch import torch.nn.functional as F from torch.autograd import Function from torch.autograd.function import once_differentiable def ms_deform_attn_core_pytorch_3D(value, value_spatial_shapes, sampling_locations, attention_weights): N_, S_, M_, D_ = value.shape _, Lq_, M_, L_, P_, _ = sampling_locations.shape value_list = value.split([T_ * H_ * W_ for T_, H_, W_ in value_spatial_shapes], dim=1) sampling_grids = 2 * sampling_locations - 1 # sampling_grids = 3 * sampling_locations - 1 sampling_value_list = [] for lid_, (T_, H_, W_) in enumerate(value_spatial_shapes): value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, T_, H_, W_) sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)[:,None,:,:,:] sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_.to(dtype=value_l_.dtype), mode='bilinear', padding_mode='zeros', align_corners=False)[:,:,0] sampling_value_list.append(sampling_value_l_) attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_) output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_) return output.transpose(1, 2).contiguous()
1,798
55.21875
161
py
CoTr
CoTr-main/CoTr_package/CoTr/network_architecture/DeTrans/ops/modules/ms_deform_attn.py
# ------------------------------------------------------------------------ # 3D Deformable Self-attention # ------------------------------------------------------------------------ # Modified from Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ from __future__ import absolute_import from __future__ import print_function from __future__ import division import warnings import math import torch from torch import nn import torch.nn.functional as F from torch.nn.init import xavier_uniform_, constant_ from ..functions.ms_deform_attn_func import ms_deform_attn_core_pytorch_3D class MSDeformAttn(nn.Module): def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4): """ Multi-Scale Deformable Attention Module :param d_model hidden dimension :param n_levels number of feature levels :param n_heads number of attention heads :param n_points number of sampling points per attention head per feature level """ super().__init__() if d_model % n_heads != 0: raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads)) _d_per_head = d_model // n_heads self.im2col_step = 64 self.d_model = d_model self.n_levels = n_levels self.n_heads = n_heads self.n_points = n_points self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 3) self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points) self.value_proj = nn.Linear(d_model, d_model) self.output_proj = nn.Linear(d_model, d_model) self._reset_parameters() def _reset_parameters(self): constant_(self.sampling_offsets.weight.data, 0.) thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads) grid_init = torch.stack([thetas.cos(), thetas.sin()*thetas.cos(), thetas.sin()*thetas.sin()], -1) grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 3).repeat(1, self.n_levels, self.n_points, 1) for i in range(self.n_points): grid_init[:, :, i, :] *= i + 1 with torch.no_grad(): self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) constant_(self.attention_weights.weight.data, 0.) constant_(self.attention_weights.bias.data, 0.) xavier_uniform_(self.value_proj.weight.data) constant_(self.value_proj.bias.data, 0.) xavier_uniform_(self.output_proj.weight.data) constant_(self.output_proj.bias.data, 0.) def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None): """ :param query (N, Length_{query}, C) :param reference_points (N, Length_{query}, n_levels, 3) :param input_flatten (N, \sum_{l=0}^{L-1} D_l \cdot H_l \cdot W_l, C) :param input_spatial_shapes (n_levels, 3), [(D_0, H_0, W_0), (D_1, H_1, W_1), ..., (D_{L-1}, H_{L-1}, W_{L-1})] :param input_level_start_index (n_levels, ), [0, D_0*H_0*W_0, D_0*H_0*W_0+D_1*H_1*W_1, D_0*H_0*W_0+D_1*H_1*W_1+D_2*H_2*W_2, ..., D_0*H_0*W_0+D_1*H_1*W_1+...+D_{L-1}*H_{L-1}*W_{L-1}] :param input_padding_mask (N, \sum_{l=0}^{L-1} D_l \cdot H_l \cdot W_l), True for padding elements, False for non-padding elements :return output (N, Length_{query}, C) """ N, Len_q, _ = query.shape N, Len_in, _ = input_flatten.shape assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1] * input_spatial_shapes[:, 2]).sum() == Len_in value = self.value_proj(input_flatten) if input_padding_mask is not None: value = value.masked_fill(input_padding_mask[..., None], float(0)) value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads) sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 3) attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points) attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points) if reference_points.shape[-1] == 3: offset_normalizer = torch.stack([input_spatial_shapes[..., 0], input_spatial_shapes[..., 2], input_spatial_shapes[..., 1]], -1) sampling_locations = reference_points[:, :, None, :, None, :] \ + sampling_offsets / offset_normalizer[None, None, None, :, None, :] output = ms_deform_attn_core_pytorch_3D(value, input_spatial_shapes, sampling_locations, attention_weights) output = self.output_proj(output) return output
5,082
51.402062
193
py
Pytorch-implementation-of-SRNet
Pytorch-implementation-of-SRNet-master/test.py
"""This module is used to test the Srnet model.""" from glob import glob import torch import numpy as np import imageio as io from model import Srnet TEST_BATCH_SIZE = 40 COVER_PATH = "/path/to/cover/images/" STEGO_PATH = "/path/to/stego/images/" CHKPT = "./checkpoints/Srnet_model_weights.pt" cover_image_names = glob(COVER_PATH) stego_image_names = glob(STEGO_PATH) cover_labels = np.zeros((len(cover_image_names))) stego_labels = np.ones((len(stego_image_names))) model = Srnet().cuda() ckpt = torch.load(CHKPT) model.load_state_dict(ckpt["model_state_dict"]) # pylint: disable=E1101 images = torch.empty((TEST_BATCH_SIZE, 1, 256, 256), dtype=torch.float) # pylint: enable=E1101 test_accuracy = [] for idx in range(0, len(cover_image_names), TEST_BATCH_SIZE // 2): cover_batch = cover_image_names[idx : idx + TEST_BATCH_SIZE // 2] stego_batch = stego_image_names[idx : idx + TEST_BATCH_SIZE // 2] batch = [] batch_labels = [] xi = 0 yi = 0 for i in range(2 * len(cover_batch)): if i % 2 == 0: batch.append(stego_batch[xi]) batch_labels.append(1) xi += 1 else: batch.append(cover_batch[yi]) batch_labels.append(0) yi += 1 # pylint: disable=E1101 for i in range(TEST_BATCH_SIZE): images[i, 0, :, :] = torch.tensor(io.imread(batch[i])).cuda() image_tensor = images.cuda() batch_labels = torch.tensor(batch_labels, dtype=torch.long).cuda() # pylint: enable=E1101 outputs = model(image_tensor) prediction = outputs.data.max(1)[1] accuracy = ( prediction.eq(batch_labels.data).sum() * 100.0 / (batch_labels.size()[0]) ) test_accuracy.append(accuracy.item()) print(f"test_accuracy = {sum(test_accuracy)/len(test_accuracy):%.2f}")
1,824
27.968254
71
py
Pytorch-implementation-of-SRNet
Pytorch-implementation-of-SRNet-master/train.py
"""This module is use to train the Srnet model.""" import logging import os import sys import time import torch from torch import nn from torch.utils.data import DataLoader from torchvision import transforms from dataset import dataset from opts.options import arguments from model.model import Srnet from utils.utils import ( latest_checkpoint, adjust_learning_rate, weights_init, saver, ) opt = arguments() logging.basicConfig( filename="training.log", format="%(asctime)s %(message)s", level=logging.DEBUG, ) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if __name__ == "__main__": train_data = dataset.DatasetLoad( opt.cover_path, opt.stego_path, opt.train_size, transform=transforms.Compose( [ transforms.ToPILImage(), transforms.RandomRotation(degrees=90), transforms.ToTensor(), ] ), ) val_data = dataset.DatasetLoad( opt.valid_cover_path, opt.valid_stego_path, opt.val_size, transform=transforms.ToTensor(), ) # Creating training and validation loader. train_loader = DataLoader( train_data, batch_size=opt.batch_size, shuffle=True ) valid_loader = DataLoader( val_data, batch_size=opt.batch_size, shuffle=False ) # model creation and initialization. model = Srnet() model.to(device) model = model.apply(weights_init) # Loss function and Optimizer loss_fn = nn.NLLLoss() optimizer = torch.optim.Adamax( model.parameters(), lr=opt.lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, ) check_point = latest_checkpoint() if not check_point: START_EPOCH = 1 if not os.path.exists(opt.checkpoints_dir): os.makedirs(opt.checkpoints_dir) print("No checkpoints found!!, Retraining started... ") else: pth = opt.checkpoints_dir + "net_" + str(check_point) + ".pt" ckpt = torch.load(pth) START_EPOCH = ckpt["epoch"] + 1 model.load_state_dict(ckpt["model_state_dict"]) optimizer.load_state_dict(ckpt["optimizer_state_dict"]) print("Model Loaded from epoch " + str(START_EPOCH) + "..") for epoch in range(START_EPOCH, opt.num_epochs + 1): training_loss = [] training_accuracy = [] validation_loss = [] validation_accuracy = [] test_accuracy = [] # Training model.train() st_time = time.time() adjust_learning_rate(optimizer, epoch) for i, train_batch in enumerate(train_loader): images = torch.cat((train_batch["cover"], train_batch["stego"]), 0) labels = torch.cat( (train_batch["label"][0], train_batch["label"][1]), 0 ) images = images.to(device, dtype=torch.float) labels = labels.to(device, dtype=torch.long) optimizer.zero_grad() outputs = model(images) loss = loss_fn(outputs, labels) loss.backward() optimizer.step() training_loss.append(loss.item()) prediction = outputs.data.max(1)[1] accuracy = ( prediction.eq(labels.data).sum() * 100.0 / (labels.size()[0]) ) training_accuracy.append(accuracy.item()) sys.stdout.write( f"\r Epoch:{epoch}/{opt.num_epochs}" f" Batch:{i+1}/{len(train_loader)}" f" Loss:{training_loss[-1]:.4f}" f" Acc:{training_accuracy[-1]:.2f}" f" LR:{optimizer.param_groups[0]['lr']:.4f}" ) end_time = time.time() # Validation model.eval() with torch.no_grad(): for i, val_batch in enumerate(valid_loader): images = torch.cat((val_batch["cover"], val_batch["stego"]), 0) labels = torch.cat( (val_batch["label"][0], val_batch["label"][1]), 0 ) images = images.to(device, dtype=torch.float) labels = labels.to(device, dtype=torch.long) outputs = model(images) loss = loss_fn(outputs, labels) validation_loss.append(loss.item()) prediction = outputs.data.max(1)[1] accuracy = ( prediction.eq(labels.data).sum() * 100.0 / (labels.size()[0]) ) validation_accuracy.append(accuracy.item()) avg_train_loss = sum(training_loss) / len(training_loss) avg_valid_loss = sum(validation_loss) / len(validation_loss) message = ( f"Epoch: {epoch}. " f"Train Loss:{(sum(training_loss) / len(training_loss)):.5f}. " f"Valid Loss:{(sum(validation_loss) / len(validation_loss)):.5f}. " "Train" f" Acc:{(sum(training_accuracy) / len(training_accuracy)):.2f} " "Valid" f" Acc:{(sum(validation_accuracy) / len(validation_accuracy)):.2f} " ) print("\n", message) logging.info(message) state = { "epoch": epoch, "opt": opt, "train_loss": sum(training_loss) / len(training_loss), "valid_loss": sum(validation_loss) / len(validation_loss), "train_accuracy": sum(training_accuracy) / len(training_accuracy), "valid_accuracy": sum(validation_accuracy) / len(validation_accuracy), "model_state_dict": model.state_dict(), "optimizer_state_dict": optimizer.state_dict(), "lr": optimizer.param_groups[0]["lr"], } saver(state, opt.checkpoints_dir, epoch)
5,885
29.816754
80
py
Pytorch-implementation-of-SRNet
Pytorch-implementation-of-SRNet-master/dataset/dataset.py
"""This module provide the data sample for training.""" import os from typing import Tuple import torch from torch import Tensor from torch.utils.data import Dataset import imageio as io from opts.options import arguments opt = arguments() # pylint: disable=E1101 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # pylint: enable=E1101 class DatasetLoad(Dataset): """This class returns the data samples.""" def __init__( self, cover_path: str, stego_path: str, size: int, transform: Tuple = None, ) -> None: """Constructor. Args: cover_path (str): path to cover images. stego_path (str): path to stego images. size (int): no. of images in any of (cover / stego) directory for training. transform (Tuple, optional): _description_. Defaults to None. """ self.cover = cover_path self.stego = stego_path self.transforms = transform self.data_size = size def __len__(self) -> int: """returns the length of the dataset.""" return self.data_size def __getitem__(self, index: int) -> Tuple[Tensor, Tensor]: """Returns the (cover, stego) pairs for training. Args: index (int): a random int value in range (0, len(dataset)). Returns: Tuple[Tensor, Tensor]: cover and stego pair. """ index += 1 img_name = str(index) + ".pgm" cover_img = io.imread(os.path.join(self.cover, img_name)) stego_img = io.imread(os.path.join(self.stego, img_name)) # pylint: disable=E1101 label1 = torch.tensor(0, dtype=torch.long).to(device) label2 = torch.tensor(1, dtype=torch.long).to(device) # pylint: enable=E1101 if self.transforms: cover_img = self.transforms(cover_img) stego_img = self.transforms(stego_img) sample = {"cover": cover_img, "stego": stego_img} sample["label"] = [label1, label2] return sample
2,091
29.318841
77
py