File size: 5,575 Bytes
b4d7ac8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
import numpy as np
from light_training.dataloading.dataset import get_train_val_test_loader_from_train
import torch
import torch.nn as nn
from monai.inferers import SlidingWindowInferer
from light_training.evaluation.metric import dice
from light_training.trainer import Trainer
from monai.utils import set_determinism
from light_training.utils.files_helper import save_new_model_and_delete_last
from monai.losses.dice import DiceLoss
set_determinism(123)
import os
data_dir = "./data/fullres/train"
logdir = f"./logs/segmamba"
model_save_path = os.path.join(logdir, "model")
# augmentation = "nomirror"
augmentation = True
env = "pytorch"
max_epoch = 1000
batch_size = 2
val_every = 2
num_gpus = 1
device = "cuda:0"
roi_size = [128, 128, 128]
def func(m, epochs):
return np.exp(-10*(1- m / epochs)**2)
class BraTSTrainer(Trainer):
def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)
self.window_infer = SlidingWindowInferer(roi_size=roi_size,
sw_batch_size=1,
overlap=0.5)
self.augmentation = augmentation
from model_segmamba.segmamba import SegMamba
self.model = SegMamba(in_chans=4,
out_chans=4,
depths=[2,2,2,2],
feat_size=[48, 96, 192, 384])
self.patch_size = roi_size
self.best_mean_dice = 0.0
self.ce = nn.CrossEntropyLoss()
self.mse = nn.MSELoss()
self.train_process = 18
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-2, weight_decay=3e-5,
momentum=0.99, nesterov=True)
self.scheduler_type = "poly"
self.cross = nn.CrossEntropyLoss()
def training_step(self, batch):
image, label = self.get_input(batch)
pred = self.model(image)
loss = self.cross(pred, label)
self.log("training_loss", loss, step=self.global_step)
return loss
def convert_labels(self, labels):
## TC, WT and ET
result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3]
return torch.cat(result, dim=1).float()
def get_input(self, batch):
image = batch["data"]
label = batch["seg"]
label = label[:, 0].long()
return image, label
def cal_metric(self, gt, pred, voxel_spacing=[1.0, 1.0, 1.0]):
if pred.sum() > 0 and gt.sum() > 0:
d = dice(pred, gt)
return np.array([d, 50])
elif gt.sum() == 0 and pred.sum() == 0:
return np.array([1.0, 50])
else:
return np.array([0.0, 50])
def validation_step(self, batch):
image, label = self.get_input(batch)
output = self.model(image)
output = output.argmax(dim=1)
output = output[:, None]
output = self.convert_labels(output)
label = label[:, None]
label = self.convert_labels(label)
output = output.cpu().numpy()
target = label.cpu().numpy()
dices = []
c = 3
for i in range(0, c):
pred_c = output[:, i]
target_c = target[:, i]
cal_dice, _ = self.cal_metric(target_c, pred_c)
dices.append(cal_dice)
return dices
def validation_end(self, val_outputs):
dices = val_outputs
tc, wt, et = dices[0].mean(), dices[1].mean(), dices[2].mean()
print(f"dices is {tc, wt, et}")
mean_dice = (tc + wt + et) / 3
self.log("tc", tc, step=self.epoch)
self.log("wt", wt, step=self.epoch)
self.log("et", et, step=self.epoch)
self.log("mean_dice", mean_dice, step=self.epoch)
if mean_dice > self.best_mean_dice:
self.best_mean_dice = mean_dice
save_new_model_and_delete_last(self.model,
os.path.join(model_save_path,
f"best_model_{mean_dice:.4f}.pt"),
delete_symbol="best_model")
save_new_model_and_delete_last(self.model,
os.path.join(model_save_path,
f"final_model_{mean_dice:.4f}.pt"),
delete_symbol="final_model")
if (self.epoch + 1) % 100 == 0:
torch.save(self.model.state_dict(), os.path.join(model_save_path, f"tmp_model_ep{self.epoch}_{mean_dice:.4f}.pt"))
print(f"mean_dice is {mean_dice}")
if __name__ == "__main__":
trainer = BraTSTrainer(env_type=env,
max_epochs=max_epoch,
batch_size=batch_size,
device=device,
logdir=logdir,
val_every=val_every,
num_gpus=num_gpus,
master_port=17759,
training_script=__file__)
train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(data_dir)
trainer.train(train_dataset=train_ds, val_dataset=val_ds)
|