pvst / Task02_Heart /data_proc_for_3d.py
mytcl's picture
Upload 687 files
1c40643 verified
import sys
import cv2
import matplotlib.pyplot as plt
import nibabel as nib
import os
import glob
# from scipy.ndimage import zoom
import numpy as np
import skimage.transform
import torch.optim
from skimage import transform
from scipy.ndimage import binary_fill_holes, zoom
from scipy.ndimage import map_coordinates
from vnet import VNet
from half_vnet import HalfVNet
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
from torch.optim import AdamW
from torch.cuda.amp import GradScaler
from torch.cuda.amp import autocast
from tqdm import tqdm
def handle_image_and_label():
cnt = 0
pos_label = []
image_paths = glob.glob(r'C:\Users\zhang\PycharmProjects\mmsegmentation\data\Task02_Heart\labelsTr\*.nii.gz')
for path in image_paths:
folder = 'mri_train_2d1'
filename = path.split('\\')[-1].split('.')[0].replace('label', 'image')
print(filename)
# 获取image,转换成合适的维度
image = nib.load(path).dataobj
image = np.array(image, dtype=np.int8)
image = np.swapaxes(image, 1, 2)
image = np.swapaxes(image, 0, 1)
D, H, W = image.shape
plt.subplot(1, 3, 1)
plt.imshow(image[60, :, :])
image = transform.resize(image, (128, 320, 320))
plt.subplot(1, 3, 2)
plt.imshow(image[60, :, :])
# 获取归一化的坐标
z_min, z_max = get_min_and_max_by_axis(image, 0)
x_min, x_max = get_min_and_max_by_axis(image, 1)
y_min, y_max = get_min_and_max_by_axis(image, 2)
label = [z_min, z_max, x_min, x_max, y_min, y_max]
print(image.shape, label)
pos_label.append(label)
image = transform.resize(image, (D, 320, 320))
plt.subplot(1, 3, 3)
plt.imshow(image[60, :, :])
plt.show()
pos_label = np.array(pos_label)
print(pos_label.shape)
np.save('./imagesTr/pos_labels.npy', pos_label)
def get_min_and_max_by_axis(image, axis, eps=1e-2):
label_list = []
length = image.shape[axis]
if axis == 0:
for i in range(length):
if len(np.unique(image[i, :, :])) != 1:
label_list.append(i)
elif axis == 1:
for i in range(length):
if len(np.unique(image[:, i, :])) != 1:
label_list.append(i)
elif axis == 2:
for i in range(length):
if len(np.unique(image[:, :, i])) != 1:
label_list.append(i)
norm_min, norm_max = min(label_list) / length - eps, max(label_list) / length + eps
print(min(label_list), int(norm_min * length), max(label_list), int(norm_max * length))
return norm_min, norm_max
class NIIDataset(Dataset):
def __init__(self, path,resize_shape):
super().__init__()
self.image_paths = glob.glob(path)
label_path=path[:-8]+'pos_labels.npy'
self.labels=np.load(label_path)
self.resize_shape=resize_shape
def __len__(self):
return len(self.image_paths)
def __getitem__(self, index):
image=np.array(nib.load(self.image_paths[index]).dataobj)
image=transform.resize(image,output_shape=self.resize_shape)[np.newaxis,:]
label=self.labels[index]
return image,label
if __name__ == '__main__':
# handle_image_and_label()
dataset = NIIDataset(path='./imagesTr/*.nii.gz',resize_shape=(128,320,320))
dataloader=DataLoader(dataset,batch_size=2,shuffle=True)
test_dataloader=DataLoader(dataset,batch_size=1)
device="cuda" if torch.cuda.is_available() else "cpu"
print(device)
model=HalfVNet().to(device)
criterion=torch.nn.L1Loss(reduction='sum').to(device)
optimizer=torch.optim.AdamW(model.parameters(),lr=0.001)
scaler=GradScaler()
EPOCHS=100
# weights=torch.load('./weights/vnet_100.pth')
# model.load_state_dict(weights)
TRAIN=True
TEST=True
if TRAIN:
for epoch in range(1,EPOCHS+1):
model.train()
losses=[]
train_bar=tqdm(dataloader,file=sys.stdout)
for step,(images,labels) in enumerate(train_bar):
with autocast():
images=images.to(device)
labels=labels.to(torch.float32).to(device)
output=model(images)
optimizer.zero_grad()
loss=criterion(output,labels)
# loss.backward()
# optimizer.step()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
losses.append(loss.item())
# print(f"epoch:{epoch},mean loss:{sum(losses)/len(losses)}")
train_bar.set_postfix(epoch=epoch,step=step,step_loss=loss.item(),mean_loss=sum(losses)/len(losses))
if epoch%10==0:
torch.save(model.state_dict(),f'./weights/vnet_{epoch}.pth')
if TEST:
weights=torch.load('./weights/vnet_100.pth')
model.load_state_dict(weights)
model.eval()
for step,(images,labels) in enumerate(dataloader):
with autocast():
images = images.to(device)
labels = labels.to(torch.float32).to(device)
output = model(images)
print("labels:",labels)
print("predicts:",output)