{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T17:57:14.498727Z", "iopub.status.busy": "2023-07-29T17:57:14.498330Z", "iopub.status.idle": "2023-07-29T17:57:23.885819Z", "shell.execute_reply": "2023-07-29T17:57:23.884761Z", "shell.execute_reply.started": "2023-07-29T17:57:14.498682Z" } }, "outputs": [], "source": [ "##### OLD model for finding survival in days ( regression) based ############\n", "\n", "# -*- coding: utf-8 -*-\n", "\"\"\"\n", "Created on Wed Jan 19 10:30:33 2022\n", "\n", "@author: MIDL\n", "\"\"\"\n", "################## keras data generator ###########################\n", "\n", "from sklearn.model_selection import train_test_split\n", "import os\n", "import tensorflow as tf\n", "import numpy as np\n", "\n", "# lists of directories with studies\n", "# train_and_val_directories = [f.path for f in os.scandir('C:/Users/marya/Downloads/Brats 2020 adjusted') if f.is_dir()]\n", "case_path1 = '../input/combine-all-2109'\n", "case_path2 = '../input/adjustedmask2019' \n", "case_path3 = '../input/adjusted-survival-2019'\n", "case_path4 = '../input/adjustedlabels2019'\n", "\n", "\n", "# case_main = 'C:/Users/MIDL/Downloads/3d_model_december/Brats 2020 adjusted'\n", "\n", "train_directory1 = [f.path for f in os.scandir(case_path1) ]\n", "# train_directory2 = [f.path for f in os.scandir(case_path2) ]\n", "\n", "def pathListIntoIds(dirList):\n", " x = []\n", " for i in range(0,len(dirList)):\n", "# print(dirList[i][dirList[i].rfind('/')+1:])\n", "# x.append(dirList[i][dirList[i].rfind('\\\\')+1:]) #for local system\n", " x.append(dirList[i][dirList[i].rfind('/')+1:]) #for KAGGLE\n", " return x\n", "\n", "train_and_test_ids1 = pathListIntoIds(train_directory1); \n", "\n", " \n", "train_test_ids, val_ids = train_test_split(train_and_test_ids1,test_size=0.1) \n", "train_ids, test_ids = train_test_split(train_test_ids,test_size=0.22) \n", "\n", "\n", "# train_and_test_ids2 = pathListIntoIds(train_directory2);\n", "\n", "# masks_test_ids, masks_val_ids = train_test_split(train_and_test_ids,test_size=0.3) \n", "#train_ids, test_ids = train_test_split(train_test_ids,test_size=0.5) \n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T17:57:23.887826Z", "iopub.status.busy": "2023-07-29T17:57:23.887094Z", "iopub.status.idle": "2023-07-29T17:57:23.893067Z", "shell.execute_reply": "2023-07-29T17:57:23.892340Z", "shell.execute_reply.started": "2023-07-29T17:57:23.887792Z" } }, "outputs": [], "source": [ "print(\"train_ids\",len(train_ids))\n", "print(\"val_ids\",len(val_ids))\n", "print(\"test_ids\",len(test_ids))\n", "# 70% train\n", "# 20% test\n", "# 10% validation" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T17:57:23.895095Z", "iopub.status.busy": "2023-07-29T17:57:23.894543Z", "iopub.status.idle": "2023-07-29T17:57:23.916970Z", "shell.execute_reply": "2023-07-29T17:57:23.915854Z", "shell.execute_reply.started": "2023-07-29T17:57:23.895053Z" } }, "outputs": [], "source": [ "\n", "################### Override Keras sequence DataGenerator class #########################\n", "\n", "class DataGenerator(tf.keras.utils.Sequence):\n", " 'Generates data for Keras'\n", " def __init__(self, list_IDs, dim=(128,128,128), batch_size = 1, n_channels = 4, shuffle=True, augment=False):\n", " 'Initialization'\n", " self.dim = dim\n", " self.batch_size = batch_size\n", " self.list_IDs = list_IDs\n", " self.n_channels = n_channels\n", " self.shuffle = shuffle\n", " self.on_epoch_end()\n", " self.augment = augment\n", "\n", " def __len__(self):\n", " 'Denotes the number of batches per epoch'\n", " return int(np.floor(len(self.list_IDs) / self.batch_size))\n", "\n", " def __getitem__(self, index):\n", " 'Generate one batch of data'\n", " \n", " # Generate indexes of the batch\n", " indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n", "\n", " # Find list of IDs\n", " Batch_ids = [self.list_IDs[k] for k in indexes]\n", "\n", " # Generate data\n", " X, y, z, v, Batch_ids = self.__data_generation(Batch_ids)\n", "\n", " return X, [y, z, v]\n", "\n", " def on_epoch_end(self):\n", " 'Updates indexes after each epoch'\n", " \n", " self.indexes = np.arange(len(self.list_IDs))\n", " if self.shuffle == True:\n", " np.random.shuffle(self.indexes)\n", "\n", " def __data_generation(self, Batch_ids):\n", " 'Generates data containing batch_size samples' \n", " \n", " X = []\n", " y = []\n", " z = []\n", " v = []\n", " # Generate data\n", " for c, i in enumerate(Batch_ids):\n", " data_path1 = os.path.join(case_path1, i);\n", " flair = np.load(data_path1)\n", " flair = np.asarray(flair,dtype=np.float32)\n", " \n", "# print(flair.shape)\n", " \n", " masks = i[:6]+'mask_2019_'+i[12:]\n", " data_path2 = os.path.join(case_path2, masks);\n", " seg = np.load(data_path2)\n", " seg = np.asarray(seg,dtype=np.float32)\n", " \n", " survive = i[:6]+'survival'+i[12:]\n", " data_path3 = os.path.join(case_path3, survive);\n", " survival = np.load(data_path3, allow_pickle= True)\n", " \n", " label = i[:6]+'label'+i[12:]\n", " data_path4 = os.path.join(case_path4, label);\n", " label = np.load(data_path4, allow_pickle= True)\n", " \n", " \n", " if self.augment == True:\n", "# print(\"augmenting\")\n", "# print(\"flair.dtype\",flair.dtype)\n", " #flair,seg,angle = rotate(flair,seg)\n", "# print(\"angle\",angle)\n", " augmented = augmentor.apply_augmentation_to_batch(flair, seg)\n", " flair = augmented[0]\n", " seg = augmented[1]\n", " \n", " X.append(flair)\n", " y.append(label)\n", " z.append(seg)\n", " v.append(survival)\n", " \n", " X = np.asarray(X,dtype=np.float32)\n", " y = np.asarray(y,dtype=np.float32)\n", " z = np.asarray(z,dtype=np.float32)\n", " v = np.asarray(v,dtype=np.float32)\n", " \n", " return X/np.max(X), y, z, v, Batch_ids\n", " \n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T17:57:23.920524Z", "iopub.status.busy": "2023-07-29T17:57:23.920131Z", "iopub.status.idle": "2023-07-29T17:57:23.935844Z", "shell.execute_reply": "2023-07-29T17:57:23.934624Z", "shell.execute_reply.started": "2023-07-29T17:57:23.920493Z" } }, "outputs": [], "source": [ "batch_size = 1\n", "training_generator = DataGenerator(train_ids,batch_size=batch_size,augment=False)\n", "valid_generator = DataGenerator(val_ids,batch_size=batch_size, augment=False)\n", "test_generator = DataGenerator(test_ids,batch_size=batch_size,shuffle=False,augment=False)\n", "\n", "# x,y = training_generator.__getitem__(6)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T17:57:23.938132Z", "iopub.status.busy": "2023-07-29T17:57:23.937351Z", "iopub.status.idle": "2023-07-29T17:57:24.365038Z", "shell.execute_reply": "2023-07-29T17:57:24.363841Z", "shell.execute_reply.started": "2023-07-29T17:57:23.938099Z" } }, "outputs": [], "source": [ "# x,y = training_generator.__getitem__(6)\n", "ind = np.random.randint(len(training_generator))\n", "x,y = training_generator.__getitem__(ind)\n", "import matplotlib.pyplot as plt" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T18:23:40.495581Z", "iopub.status.busy": "2023-07-29T18:23:40.495141Z", "iopub.status.idle": "2023-07-29T18:23:40.502219Z", "shell.execute_reply": "2023-07-29T18:23:40.501456Z", "shell.execute_reply.started": "2023-07-29T18:23:40.495532Z" } }, "outputs": [], "source": [ "x.shape" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T17:57:24.367000Z", "iopub.status.busy": "2023-07-29T17:57:24.366668Z", "iopub.status.idle": "2023-07-29T17:57:24.796102Z", "shell.execute_reply": "2023-07-29T17:57:24.794917Z", "shell.execute_reply.started": "2023-07-29T17:57:24.366971Z" } }, "outputs": [], "source": [ "\n", "#x=np.fliplr(x)\n", "#y[1]=np.fliplr(y[1])\n", "slice_index = 60\n", "plt.subplot(1,2,1)\n", "plt.imshow(x[0,:,:,slice_index,0])\n", "plt.subplot(1,2,2)\n", "plt.imshow(y[1][0,:,:,slice_index,:])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T17:57:24.797965Z", "iopub.status.busy": "2023-07-29T17:57:24.797562Z", "iopub.status.idle": "2023-07-29T17:57:25.417872Z", "shell.execute_reply": "2023-07-29T17:57:25.416718Z", "shell.execute_reply.started": "2023-07-29T17:57:24.797927Z" } }, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "slice_index = 60\n", "plt.subplot(1,2,1)\n", "plt.imshow(x[0,:,:,slice_index,1])\n", "plt.subplot(1,2,2)\n", "plt.imshow(y[1][0,:,:,slice_index,:])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T17:57:25.420481Z", "iopub.status.busy": "2023-07-29T17:57:25.419620Z", "iopub.status.idle": "2023-07-29T17:57:25.807199Z", "shell.execute_reply": "2023-07-29T17:57:25.806016Z", "shell.execute_reply.started": "2023-07-29T17:57:25.420438Z" } }, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "slice_index = 60\n", "plt.subplot(1,2,1)\n", "plt.imshow(x[0,:,:,slice_index,2])\n", "plt.subplot(1,2,2)\n", "plt.imshow(y[1][0,:,:,slice_index,:])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T17:57:25.809169Z", "iopub.status.busy": "2023-07-29T17:57:25.808609Z", "iopub.status.idle": "2023-07-29T17:57:26.196822Z", "shell.execute_reply": "2023-07-29T17:57:26.195682Z", "shell.execute_reply.started": "2023-07-29T17:57:25.809136Z" } }, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "slice_index = 60\n", "plt.subplot(1,2,1)\n", "plt.imshow(x[0,:,:,slice_index,3])\n", "plt.subplot(1,2,2)\n", "plt.imshow(y[1][0,:,:,slice_index,:])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T17:57:26.198757Z", "iopub.status.busy": "2023-07-29T17:57:26.198398Z", "iopub.status.idle": "2023-07-29T17:57:28.220537Z", "shell.execute_reply": "2023-07-29T17:57:28.219573Z", "shell.execute_reply.started": "2023-07-29T17:57:26.198727Z" } }, "outputs": [], "source": [ "import os\n", "import numpy as np\n", "from tensorflow.keras.models import *\n", "from tensorflow.keras.layers import *\n", "from tensorflow.keras.optimizers import *\n", "from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler\n", "from tensorflow.keras import backend as Keras\n", "from keras.layers import LeakyReLU\n", "\n", "from tensorflow.keras import backend as K\n", "\n", "\n", "def dice_coef(y_true, y_pred, smooth=1):\n", " y_true_f = K.flatten(K.cast(y_true, 'float32'))\n", " y_pred_f = K.flatten(K.cast(y_pred, 'float32'))\n", " intersection = K.sum(y_true_f * y_pred_f)\n", " return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n", "\n", "def dice_coef_loss(y_true, y_pred): \n", " return 1-dice_coef(y_true, y_pred)\n", "\n", "def dice_loss(y_true, y_pred, numLabels=3):\n", " dice=0\n", " dice_list = []\n", " for index in range(numLabels):\n", " dice= dice_coef_loss(y_true[:,:,:,index], y_pred[:,:,:,index])\n", " dice_list.append(dice)\n", " return dice_list\n", "\n", "def rmse(y_true, y_pred):\n", " mse= tf.sqrt(tf.metrics.mean_squared_error(y_true, y_pred))\n", " return (mse)\n", "\n", "\n", "# Computing Sensitivity \n", "def sensitivity(y_true, y_pred):\n", " true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n", " possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n", " return true_positives / (possible_positives + K.epsilon())\n", "\n", "\n", "# Computing Specificity\n", "def specificity(y_true, y_pred):\n", " true_negatives = K.sum(K.round(K.clip((1-y_true) * (1-y_pred), 0, 1)))\n", " possible_negatives = K.sum(K.round(K.clip(1-y_true, 0, 1)))\n", " return true_negatives / (possible_negatives + K.epsilon())\n", "\n", "padding='valid'\n", "pretrained_weights = None\n", "input_size = (128,128,128, 4)\n", "inputTensor = Input(input_size,name='input')\n", "\n", "# conv01 = Conv3D(16, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(inputTensor)\n", "# conv01 = Conv3D(16, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(conv01)\n", "# bn01=BatchNormalization()(conv01) ######### batch normalization\n", "# pool01 = MaxPooling3D(pool_size=(2, 2, 2))(bn01)\n", "\n", "# conv00 = Conv3D(32, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(pool01)\n", "# conv00 = Conv3D(32, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(conv00)\n", "# bn00=BatchNormalization()(conv00) ######### batch normalization\n", "# pool00 = MaxPooling3D(pool_size=(2, 2, 2))(bn00)\n", "\n", "conv1 = Conv3D(64, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(inputTensor)\n", "conv1 = Conv3D(64, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(conv1)\n", "bn=BatchNormalization()(conv1) ######### batch normalization\n", "pool1 = MaxPooling3D(pool_size=(2, 2, 2))(bn)\n", "\n", "conv2 = Conv3D(128, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(pool1)\n", "conv2 = Conv3D(128, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(conv2)\n", "bn1=BatchNormalization()(conv2) ######### batch normalization\n", "pool2 = MaxPooling3D(pool_size=(2, 2, 2))(bn1)\n", "\n", "conv3 = Conv3D(256, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(pool2)\n", "conv3 = Conv3D(256, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(conv3)\n", "bn2=BatchNormalization()(conv3) ######### batch normalization\n", "pool3 = MaxPooling3D(pool_size=(2, 2, 2))(bn2)\n", "\n", "conv4 = Conv3D(512, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(pool3)\n", "conv4 = Conv3D(512, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(conv4)\n", "bn3=BatchNormalization()(conv4) ######### batch normalization\n", "drop4 = Dropout(0.5)(bn3)\n", "pool4 = MaxPooling3D(pool_size=(2, 2, 2))(drop4)\n", "\n", "########################## bottle neck #################################################################\n", "\n", "conv5 = Conv3D(512, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)\n", "conv5 = Conv3D(512, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)\n", "bn4=BatchNormalization()(conv5) ######### batch normalization\n", "drop5 = Dropout(0.2)(bn4)\n", "\n", "\n", "#############################3 Decoder path ##############################################################\n", "\n", "up6 = Conv3D(256, (2,2,2), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(UpSampling3D(size = (2,2,2))(drop5))\n", "merge6 = concatenate([conv4,up6], axis = 4)\n", "drop6 = Dropout(0.5)(merge6)\n", "conv6 = Conv3D(256, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(drop6)\n", "conv6 = Conv3D(256, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(conv6)\n", "bn5=BatchNormalization()(conv6) ######### batch normalization\n", "\n", "up7 = Conv3D(128, (2,2,2), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(UpSampling3D(size = (2,2,2))(bn5))\n", "merge7 = concatenate([conv3,up7], axis = 4)\n", "drop7 = Dropout(0.5)(merge7)\n", "conv7 = Conv3D(128, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(drop7)\n", "conv7 = Conv3D(128, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(conv7)\n", "bn6=BatchNormalization()(conv7) ######### batch normalization\n", "\n", "up8 = Conv3D(64, (2,2,2), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(UpSampling3D(size = (2,2,2))(bn6))\n", "merge8 = concatenate([conv2,up8], axis = 4)\n", "drop8 = Dropout(0.5)(merge8)\n", "conv8 = Conv3D(64, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(drop8)\n", "conv8 = Conv3D(64, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(conv8)\n", "bn7=BatchNormalization()(conv8) ######### batch normalization\n", "\n", "up9 = Conv3D(32, (2,2,2), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(UpSampling3D(size = (2,2,2))(bn7))\n", "merge9 = concatenate([conv1,up9], axis = 4)\n", "drop9 = Dropout(0.5)(merge9)\n", "conv9 = Conv3D(32, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(drop9)\n", "conv9 = Conv3D(32, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(conv9)\n", "bn8=BatchNormalization()(conv9)\n", "\n", "# up10 = Conv3D(16, (2,2,2), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(UpSampling3D(size = (2,2,2))(bn8))\n", "# merge10 = concatenate([conv00,up10], axis = 4)\n", "# drop10 = Dropout(0.5)(merge10)\n", "# conv10 = Conv3D(16, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(drop10)\n", "# conv10 = Conv3D(16, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(conv10)\n", "# bn9=BatchNormalization()(conv10)\n", "# print(bn9.shape)\n", "\n", "# up11 = Conv3D(8, (2,2,2), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(UpSampling3D(size = (2,2,2))(bn9))\n", "# merge11 = concatenate([conv01,up11], axis = 4)\n", "# drop11 = Dropout(0.5)(merge11)\n", "# conv11 = Conv3D(8, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(drop11)\n", "# conv11 = Conv3D(8, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(conv11)\n", "# bn10=BatchNormalization()(conv11)\n", "# print(bn10.shape)\n", "\n", "######################## Classification output ##############\n", "\n", "pool15 = MaxPooling3D(pool_size=(2, 2, 2))(bn3)\n", "pool16 = MaxPooling3D(pool_size=(2, 2, 2))(pool15)\n", "classify =Flatten()(pool16)\n", "classify = Dense(128, activation = 'relu')(classify)\n", "classify = Dense(64, activation = 'relu')(classify )\n", "classify = Dense(32, activation = 'relu')(classify )\n", "classify = Dense(2, activation='softmax', kernel_regularizer='l2',name='classify')(classify)\n", "\n", "\n", "########################### attention block ##########\n", "\n", "# print(bn5.shape)\n", "# seg300 = MaxPooling3D(pool_size=(2, 2, 2))(bn6)\n", "# print(seg300.shape)\n", "# seg400 = MaxPooling3D(pool_size=(4, 4, 4))(bn7)\n", "# print(seg400.shape)\n", "# seg500 = MaxPooling3D(pool_size=(8, 8, 8))(bn8)\n", "# print(seg500.shape)\n", "# seg600 = MaxPooling3D(pool_size=(16, 16, 16))(bn9)\n", "# print(seg600.shape)\n", "# # seg700 = MaxPooling3D(pool_size=(32, 32, 32))(bn10)\n", "# # print(seg700.shape)\n", "\n", "\n", "# mergeall = concatenate([bn5,seg300,seg400,seg500, seg600], axis = 4)\n", "# print(mergeall.shape)\n", "\n", "# conv900 = Conv3D(32, (3,3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(mergeall)\n", "\n", "# up900 = Conv3D(32, (2,2,2), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2')(UpSampling3D(size = (32,32,32))(conv900))\n", "# print(up900.shape)\n", "\n", "# mergelast = concatenate([bn10, up900], axis = 4)\n", "# print(mergelast.shape)\n", "\n", "\n", "######################## segmentation output ##############\n", "\n", "# segmentation = Conv3D(3, (1,1,1), activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2',name='segmentation')(mergelast)\n", "segmentation = Conv3D(3, (1,1,1), activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal', kernel_regularizer='l2',name='segmentation')(bn8)\n", "\n", "\n", "####################### survival prediction as regression #############\n", "\n", "\n", "\n", "pool17 = MaxPooling3D(pool_size=(2, 2, 2))(bn1)\n", "pool18 = MaxPooling3D(pool_size=(2, 2, 2))(pool17)\n", "# pool18 = MaxPooling3D(pool_size=(2, 2, 2))(pool17)\n", "\n", "# bn11=BatchNormalization()(pool17)\n", "survival=Flatten()(pool18)\n", "\n", "survival = Dropout(0.2)(survival)\n", "survival = Dense(128,activation=\"relu\")(survival)\n", "survival = Dense(64,activation=\"relu\")(survival)\n", "survival = Dense(32,activation=\"relu\")(survival)\n", "survival = Dense(32,activation=\"relu\")(survival)\n", "# survival = Dense(16,activation=\"relu\")(survival)\n", "# survival = Dense(8,activation=\"relu\")(survival)\n", "survival= Dense(1, activation='linear', kernel_regularizer='l2',name='survival')(survival)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T17:57:28.221997Z", "iopub.status.busy": "2023-07-29T17:57:28.221714Z", "iopub.status.idle": "2023-07-29T17:57:28.453386Z", "shell.execute_reply": "2023-07-29T17:57:28.452307Z", "shell.execute_reply.started": "2023-07-29T17:57:28.221971Z" } }, "outputs": [], "source": [ "model = Model(inputs = inputTensor,\n", " outputs = [classify, segmentation, survival])\n", " #outputs = [segmentation])\n", "model.summary()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T17:57:28.455336Z", "iopub.status.busy": "2023-07-29T17:57:28.454980Z", "iopub.status.idle": "2023-07-29T17:57:28.465232Z", "shell.execute_reply": "2023-07-29T17:57:28.464149Z", "shell.execute_reply.started": "2023-07-29T17:57:28.455296Z" } }, "outputs": [], "source": [ "import numpy as np\n", "\n", "# Load the .npy file into a NumPy array\n", "data = np.load('/kaggle/input/adjustedlabels2019/FLAIR_label0.npy')\n", "\n", "# Now you can check the unique labels and their counts\n", "unique_labels, label_counts = np.unique(data, return_counts=True)\n", "\n", "# Print the unique labels and their counts\n", "for label, count in zip(unique_labels, label_counts):\n", " print(f\"Label: {label}, Count: {count}\")\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T17:57:28.472609Z", "iopub.status.busy": "2023-07-29T17:57:28.472222Z", "iopub.status.idle": "2023-07-29T17:57:32.921057Z", "shell.execute_reply": "2023-07-29T17:57:32.919842Z", "shell.execute_reply.started": "2023-07-29T17:57:28.472580Z" } }, "outputs": [], "source": [ "import os\n", "import numpy as np\n", "import torch\n", "import torch.nn as nn\n", "import torch.optim as optim\n", "from torch.utils.data import DataLoader, Dataset\n", "\n", "# Function to load loss weights from .npy files in a directory\n", "def load_loss_weights_from_directory(directory_path):\n", " weight_files = [filename for filename in os.listdir(directory_path) if filename.endswith(\".npy\")]\n", " weights = [np.load(os.path.join(directory_path, filename)) for filename in weight_files]\n", " return np.concatenate(weights)\n", "\n", "# Directory path for classification loss weights\n", "classification_weights_directory = '/kaggle/input/adjustedlabels2019'\n", "\n", "# Load initial classification loss weights from the directory\n", "classification_weight = load_loss_weights_from_directory(classification_weights_directory)\n", "\n", "# Define your classification model\n", "class ClassificationModel(nn.Module):\n", " def __init__(self, input_size, num_classes):\n", " super(ClassificationModel, self).__init__()\n", " self.fc = nn.Linear(input_size, num_classes)\n", "\n", " def forward(self, x):\n", " x = self.fc(x)\n", " return x\n", "\n", "# Custom dataset class for the classification task\n", "class ClassificationDataset(Dataset):\n", " def __init__(self, data, targets):\n", " self.data = data\n", " self.targets = targets\n", "\n", " def __len__(self):\n", " return len(self.data)\n", "\n", " def __getitem__(self, idx):\n", " x = self.data[idx]\n", " y = self.targets[idx]\n", "\n", " return x, y\n", "\n", "# Example: Creating dummy data for the classification task\n", "# Replace the following lines with your actual data and targets for the classification task\n", "num_samples = 1000\n", "input_size = 784 # Replace with the actual input size of your model\n", "num_classes = 10 # Replace with the actual number of classes for classification\n", "\n", "train_data_classification = torch.rand(num_samples, input_size)\n", "train_targets_classification = torch.randint(0, num_classes, (num_samples,))\n", "\n", "# Assuming you have your data for the classification task as 'train_data_classification', 'train_targets_classification'\n", "train_dataset_classification = ClassificationDataset(data=train_data_classification, targets=train_targets_classification)\n", "train_loader_classification = DataLoader(train_dataset_classification, batch_size=64, shuffle=True)\n", "\n", "# Initialize the classification model\n", "model = ClassificationModel(input_size, num_classes)\n", "\n", "# Define your optimizer and loss function\n", "optimizer = optim.Adam(model.parameters(), lr=0.001)\n", "criterion_classification = nn.CrossEntropyLoss()\n", "\n", "# Set the device to 'cuda' if available, else set it to 'cpu'\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "model.to(device)\n", "\n", "# Training loop\n", "num_epochs_update = 5 # Number of epochs to update the weights\n", "for epoch in range(num_epochs_update):\n", " for data, targets_classification in train_loader_classification:\n", " # Transfer data and labels to the device (e.g., GPU)\n", " data = data.to(device)\n", " targets_classification = targets_classification.to(device)\n", "\n", " # Zero the gradients\n", " optimizer.zero_grad()\n", "\n", " # Forward pass\n", " outputs_classification = model(data)\n", "\n", " # Calculate classification loss\n", " loss_classification = criterion_classification(outputs_classification, targets_classification)\n", "\n", " # Backpropagation and update model parameters\n", " loss_classification.backward()\n", " optimizer.step()\n", "\n", " # Calculate mean and standard deviation for the classification loss\n", " mean_loss_classification = torch.mean(loss_classification).item()\n", " std_loss_classification = torch.std(loss_classification).item()\n", "\n", " # Define reference value (you can use mean or maximum normalized loss, or any other desired reference value)\n", " reference_value_classification = 1.0\n", "\n", " # Calculate loss-weight update factor for the classification task\n", " update_factor_classification = np.exp((mean_loss_classification - reference_value_classification) / std_loss_classification)\n", "\n", " # Normalize the update factor to ensure it is between 0 and 1\n", " update_factor_classification = max(0.0, min(1.0, update_factor_classification))\n", "\n", " # Update the classification weight based on the calculated update factor\n", " classification_weight *= update_factor_classification\n", "\n", " # After updating the classification_weight, save it back to a writable directory (e.g., for printing purposes)\n", " np.save('/kaggle/working/classification_weights_updated.npy', classification_weight)\n", "\n", " # Print the updated classification weight after each epoch\n", " print(f\"Classification Weight (Epoch {epoch + 1}): {classification_weight}\")\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T17:57:32.925654Z", "iopub.status.busy": "2023-07-29T17:57:32.924992Z", "iopub.status.idle": "2023-07-29T17:57:34.856983Z", "shell.execute_reply": "2023-07-29T17:57:34.850526Z", "shell.execute_reply.started": "2023-07-29T17:57:32.925618Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Regression Weight (Epoch 5): \n", "tensor([ 150., 120., 1155., 448., 401., 77., 427., 409., 112., 412.,\n", " 334., 74., 684., 515., 722., 1444., 613., 142., 538., 416.,\n", " 240., 524., 319., 730., 421., 510., 1020., 436., 336., 342.,\n", " 503., 688., 1410., 374., 117., 495., 692., 558., 322., 630.,\n", " 370., 86., 430., 110., 209., 448., 698., 199., 291., 600.,\n", " 804., 634., 1561., 268., 822., 355., 1489., 359., 82., 187.,\n", " 394., 812., 737., 362., 67., 71., 262., 424., 169., 329.,\n", " 104., 1731., 127., 616., 111., 828., 317., 405., 621., 368.,\n", " 175., 357., 747., 946., 103., 468., 82., 467., 425., 22.,\n", " 439., 382., 1282., 327., 635., 448., 296., 626., 372., 121.,\n", " 476., 486., 333., 30., 232., 434., 99., 5., 213., 244.,\n", " 1458., 77., 289., 296., 508., 82., 346., 519., 254., 147.,\n", " 153., 300., 630., 330., 287., 728., 540., 2600., 2600., 2600.,\n", " 2600., 2600., 2600., 2600., 2600., 278., 2600., 2600., 2600., 2600.,\n", " 2600., 2600., 2600., 2600., 2600., 2600., 55., 2600., 2600., 2600.,\n", " 2600., 2600., 2600., 2600., 2600., 2600., 2600., 576., 2600., 2600.,\n", " 2600., 2600., 2600., 2600., 2600., 2600., 2600., 2600., 350., 2600.,\n", " 2600., 2600., 2600., 2600., 2600., 2600., 2600., 2600., 2600., 332.,\n", " 2600., 2600., 2600., 2600., 2600., 2600., 2600., 2600., 2600., 2600.,\n", " 146., 2600., 2600., 2600., 2600., 2600., 2600., 2600., 2600., 331.,\n", " 200., 616., 23., 203., 336., 106., 32., 466., 170., 488.,\n", " 473., 1283., 464., 33., 89., 172., 30., 84., 229., 286.,\n", " 726., 495., 871., 788., 210., 318., 1527., 208., 660., 522.,\n", " 407., 265., 1178., 85., 465., 597., 239., 610., 208., 355.,\n", " 152., 353., 385., 453., 1592., 269., 56., 376., 1337., 12.,\n", " 21., 1227., 182., 277., 579., 614., 503., 155., 387., 180.,\n", " 139., 375., 1148., 78., 1767., 168., 438., 1278., 345., 394.,\n", " 114., 191., 50., 580., 1145., 387., 58., 734.])\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ ":49: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", " regression_weight = torch.tensor(regression_weight, dtype=torch.float32, device=device)\n" ] } ], "source": [ "import os\n", "import numpy as np\n", "import torch\n", "\n", "# Function to load loss weights from .npy files in a directory\n", "def load_loss_weights_from_directory(directory_path):\n", " weight_files = [filename for filename in os.listdir(directory_path) if filename.endswith(\".npy\")]\n", " \n", " # fix ------ some of the loaded files scalar arrays, np.concatenate() only works with at least 1D arrays.\n", "# weights = [np.load(os.path.join(directory_path, filename)) for filename in weight_files]\n", " weights = [np.atleast_1d(np.load(os.path.join(directory_path, filename))) for filename in weight_files]\n", " \n", " return np.concatenate(weights)\n", "\n", "# Function to save the updated weights to a directory\n", "def save_weights_to_directory(directory_path, weights):\n", " os.makedirs(directory_path, exist_ok=True) # Create the directory if it doesn't exist\n", " np.save(os.path.join(directory_path, \"updated_regression_weights.npy\"), weights)\n", "\n", "# Directory path for initial regression loss weights\n", "regression_weights_directory = 'data/adjusted_survival_2019'\n", "\n", "# Load initial regression loss weights from the directory\n", "regression_weight = load_loss_weights_from_directory(regression_weights_directory)\n", "\n", "# Training loop for regression task\n", "num_epochs_update_regression = 5 # Number of epochs to update the regression weight\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "\n", "# Create a new directory to save the updated regression weights\n", "output_directory = 'data/updated_regression_weights'\n", "os.makedirs(output_directory, exist_ok=True)\n", "\n", "for epoch in range(num_epochs_update_regression):\n", " # Calculate mean and standard deviation for the regression loss (replace this with your actual loss calculation)\n", " mean_loss_regression = 0.5\n", " std_loss_regression = 0.1\n", "\n", " # Define reference value for regression\n", " reference_value_regression = 0.5\n", "\n", " # Calculate loss-weight update factor for regression\n", " update_factor_regression = np.exp((mean_loss_regression - reference_value_regression) / std_loss_regression)\n", "\n", " # Normalize the update factor to ensure it is between 0 and 1\n", " update_factor_regression = max(0.0, min(1.0, update_factor_regression))\n", "\n", " # Convert regression_weight to torch tensor before performing multiplication\n", " regression_weight = torch.tensor(regression_weight, dtype=torch.float32, device=device)\n", "\n", " # Check if the update factor is close to 0 and the initial regression weights are all zeros\n", " if update_factor_regression < 1e-6 and torch.all(regression_weight == 0):\n", " # In this case, set the update factor to a small value to ensure some weight update occurs\n", " update_factor_regression = 1e-3\n", "\n", " # Update the regression weight based on the calculated update factor\n", " regression_weight *= update_factor_regression\n", "\n", " # Save the updated regression weight back to the separate directory\n", " save_weights_to_directory(output_directory, regression_weight.cpu().numpy())\n", "\n", " # Print the updated regression weight after each epoch\n", " # Print the updated regression weight after each epoch\n", "with np.printoptions(threshold=np.inf):\n", " print(f\"Regression Weight (Epoch {epoch + 1}): \\n{regression_weight}\")\n", "\n", "\n", "# End of the training loop for regression task\n", "\n", "# ... (rest of your code for other tasks such as classification, segmentation, and survival)\n", "\n", "# Now, you can copy and paste the code for updating weights and the training loop for other tasks as needed.\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.execute_input": "2023-07-29T18:13:48.088431Z", "iopub.status.busy": "2023-07-29T18:13:48.087932Z", "iopub.status.idle": "2023-07-29T18:15:51.720266Z", "shell.execute_reply": "2023-07-29T18:15:51.718335Z", "shell.execute_reply.started": "2023-07-29T18:13:48.088397Z" } }, "outputs": [], "source": [ "import os\n", "import numpy as np\n", "import torch\n", "import torch.nn as nn\n", "import torch.optim as optim\n", "from torch.utils.data import DataLoader, Dataset\n", "from torch.cuda.amp import autocast, GradScaler\n", "\n", "# Function to load loss weights from .npy files in a directory\n", "def load_loss_weights_from_directory(directory_path):\n", " weight_files = [filename for filename in os.listdir(directory_path) if filename.endswith(\".npy\")]\n", " weights = [np.load(os.path.join(directory_path, filename)) for filename in weight_files]\n", " return np.concatenate(weights)\n", "\n", "# Directory path for segmentation loss weights\n", "segmentation_weights_directory = '/kaggle/input/adjustedmask2019'\n", "\n", "# Load initial segmentation loss weights from the directory\n", "segmentation_weight = load_loss_weights_from_directory(segmentation_weights_directory)\n", "\n", "# Define your segmentation model (Use a smaller and more memory-efficient model)\n", "class SegmentationModel(nn.Module):\n", " def __init__(self, input_channels, num_classes):\n", " super(SegmentationModel, self).__init__()\n", " self.conv1 = nn.Conv2d(input_channels, 32, kernel_size=3, padding=1)\n", " self.conv2 = nn.Conv2d(32, num_classes, kernel_size=1)\n", "\n", " def forward(self, x):\n", " x = nn.functional.relu(self.conv1(x))\n", " x = self.conv2(x)\n", " return x\n", "\n", "# Custom dataset class for the segmentation task\n", "class SegmentationDataset(Dataset):\n", " def __init__(self, data, targets):\n", " self.data = data\n", " self.targets = targets\n", "\n", " def __len__(self):\n", " return len(self.data)\n", "\n", " def __getitem__(self, idx):\n", " x = self.data[idx]\n", " y = self.targets[idx]\n", " return x, y\n", "\n", "# Example: Creating dummy data for the segmentation task\n", "# Replace the following lines with your actual data and targets for the segmentation task\n", "num_samples = 278\n", "input_channels = 4 # Replace with the actual number of input channels for your model\n", "num_classes = 3 # Replace with the actual number of segmentation classes\n", "\n", "train_data_segmentation = torch.rand(num_samples, input_channels, 128, 128)\n", "train_targets_segmentation = torch.randint(0, num_classes, (num_samples, 128, 128))\n", "\n", "# Assuming you have your data for the segmentation task as 'train_data_segmentation', 'train_targets_segmentation'\n", "train_dataset_segmentation = SegmentationDataset(data=train_data_segmentation, targets=train_targets_segmentation)\n", "train_loader_segmentation = DataLoader(train_dataset_segmentation, batch_size=16, shuffle=True)\n", "\n", "# Initialize the segmentation model (Use the smaller segmentation model)\n", "segmentation_model = SegmentationModel(input_channels, num_classes)\n", "\n", "# Define your optimizer and loss function\n", "optimizer = optim.Adam(segmentation_model.parameters(), lr=0.001)\n", "criterion_segmentation = nn.CrossEntropyLoss()\n", "\n", "# Set the device to 'cuda' if available, else set it to 'cpu'\n", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "segmentation_model.to(device)\n", "\n", "# Training loop\n", "num_epochs_update = 5 # Number of epochs to update the weights\n", "grad_scaler = GradScaler() # Mixed precision training scaler\n", "\n", "for epoch in range(num_epochs_update):\n", " for data, targets_segmentation in train_loader_segmentation:\n", " # Transfer data and labels to the device (e.g., GPU)\n", " data = data.to(device)\n", " targets_segmentation = targets_segmentation.to(device)\n", "\n", " # Zero the gradients\n", " optimizer.zero_grad()\n", "\n", " # Forward pass (Use autocast to enable mixed precision training)\n", " with autocast():\n", " outputs_segmentation = segmentation_model(data)\n", " loss_segmentation = criterion_segmentation(outputs_segmentation, targets_segmentation)\n", "\n", " # Backpropagation and update model parameters (Use GradScaler for mixed precision training)\n", " grad_scaler.scale(loss_segmentation).backward()\n", " grad_scaler.step(optimizer)\n", " grad_scaler.update()\n", "\n", " # Calculate mean and standard deviation for the segmentation loss\n", " mean_loss_segmentation = torch.mean(loss_segmentation).item()\n", " std_loss_segmentation = torch.std(loss_segmentation).item()\n", "\n", " # Define reference value (you can use mean or maximum normalized loss, or any other desired reference value)\n", " reference_value_segmentation = 1.0\n", "\n", " # Calculate loss-weight update factor for the segmentation task\n", " update_factor_segmentation = np.exp((mean_loss_segmentation - reference_value_segmentation) / std_loss_segmentation)\n", "\n", " # Normalize the update factor to ensure it is between 0 and 1\n", " update_factor_segmentation = max(0.0, min(1.0, update_factor_segmentation))\n", "\n", " # Update the segmentation weight based on the calculated update factor\n", " segmentation_weight *= update_factor_segmentation\n", "\n", " # After updating the segmentation_weight, save it back to a writable directory (e.g., for printing purposes)\n", " np.save('/kaggle/working/segmentation_weights_updated.npy', segmentation_weight)\n", "\n", " # Print the updated segmentation weight after each epoch\n", " print(f\"Segmentation Weight (Epoch {epoch + 1}): {segmentation_weight}\")\n", "\n", "# End of the training loop for segmentation task\n", "\n", "# (Rest of your code for other tasks such as classification and survival)\n" ] }, { "cell_type": "markdown", "metadata": { "id": "ID-EAnETs_gg" }, "source": [ "## Keras Functional API\n", "- This is the main branch\n", "- These layers are common to both the tasks\n" ] }, { "cell_type": "markdown", "metadata": { "id": "g0c2VGQUs_gh" }, "source": [ "- This is where the network branches for multiple outputs/tasks\n", "- gender is n x 2 output where as age is n x 1 output" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.861315Z", "iopub.status.idle": "2023-07-29T17:57:34.862223Z", "shell.execute_reply": "2023-07-29T17:57:34.861970Z", "shell.execute_reply.started": "2023-07-29T17:57:34.861943Z" }, "id": "DLIYbd5bs_gh", "scrolled": true }, "outputs": [], "source": [ "model = Model(inputs = inputTensor,\n", " outputs = [classify, segmentation, survival])\n", " #outputs = [segmentation])\n", "model.summary()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.864470Z", "iopub.status.idle": "2023-07-29T17:57:34.865724Z", "shell.execute_reply": "2023-07-29T17:57:34.865532Z", "shell.execute_reply.started": "2023-07-29T17:57:34.865509Z" } }, "outputs": [], "source": [ "model.output" ] }, { "cell_type": "markdown", "metadata": { "id": "j_nPXez0s_gi" }, "source": [ "### Multi loss optimizations\n", "\n", "- Dice loss for segmnetation and crossentropy loss for survival task\n", "- we can weight these individual losses\n", "- Loss = weight1 * loss1 + weight2 * loss2" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.866670Z", "iopub.status.idle": "2023-07-29T17:57:34.867637Z", "shell.execute_reply": "2023-07-29T17:57:34.867449Z", "shell.execute_reply.started": "2023-07-29T17:57:34.867428Z" } }, "outputs": [], "source": [ "# os.mkdir('output')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.868556Z", "iopub.status.idle": "2023-07-29T17:57:34.869109Z", "shell.execute_reply": "2023-07-29T17:57:34.868918Z", "shell.execute_reply.started": "2023-07-29T17:57:34.868898Z" }, "id": "gqmgJXWMc0I0", "scrolled": true }, "outputs": [], "source": [ "import tensorflow.keras\n", "import tensorflow as tf\n", "#from tensorflow.keras import ModelCheckpoint\n", "from tensorflow.keras.callbacks import ModelCheckpoint\n", "\n", "\n", "# checkpoint\n", "# filepath=\"D:/maria/weights-improvement-{epoch:02d}-{val_segmentation_dice_coef:.2f}.hdf5\"\n", "# filepath=\"C:/Users/MIDL/Downloads/3d_model_december/best_model/weights-improvement-{epoch:02d}-{val_segmentation_dice_coef:.2f}.hdf5\"\n", "# filepath=\"./output/weights-best-{epoch:02d}-{val_segmentation_dice_coef:.2f}.hdf5\" # \n", "filepath=\"./output/weights-best.hdf5\" \n", "\n", "\n", "checkpoint_cp = ModelCheckpoint(filepath, monitor='val_segmentation_dice_coef', verbose=1, save_best_only=True, mode='max')\n", "\n", "\n", "\n", "initial_learning_rate = 0.0001\n", "lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n", " initial_learning_rate,\n", " decay_steps=100000,\n", " decay_rate=0.96,\n", " staircase=True)\n", "\n", "early_stop = tf.keras.callbacks.EarlyStopping(\n", " monitor=\"val_segmentation_dice_coef\",\n", " min_delta=0,\n", " patience=20,\n", " verbose=1,\n", " mode=\"max\",\n", " baseline=None,\n", " restore_best_weights=False,\n", ")\n", "\n", "opt = tensorflow.keras.optimizers.RMSprop(lr_schedule)\n", "\n", "classification_weight = load_loss_weights_from_directory(classification_weights_directory)\n", "segmentation_weight = load_loss_weights_from_directory(segmentation_weights_directory)\n", "regression_weight = load_loss_weights_from_directory(regression_weights_directory)\n", "\n", "# Define the optimizer and learning rate schedule (replace this with your actual optimizer and lr_schedule)\n", "opt = tf.keras.optimizers.RMSprop(lr_schedule)\n", "\n", "# Define batch size for each task\n", "batch_size_classification = 64\n", "batch_size_segmentation = 16\n", "batch_size_regression = 32\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.870698Z", "iopub.status.idle": "2023-07-29T17:57:34.871501Z", "shell.execute_reply": "2023-07-29T17:57:34.871218Z", "shell.execute_reply.started": "2023-07-29T17:57:34.871190Z" } }, "outputs": [], "source": [ "\n", "history = model.fit(training_generator,\n", " epochs=100, batch_size = 64, verbose=1, validation_split=None,validation_data = valid_generator,\n", " callbacks=[checkpoint_cp])#,early_stop]) " ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.873119Z", "iopub.status.idle": "2023-07-29T17:57:34.874331Z", "shell.execute_reply": "2023-07-29T17:57:34.874037Z", "shell.execute_reply.started": "2023-07-29T17:57:34.874007Z" } }, "outputs": [], "source": [ "np.save('./output/2019-4m-regression-19feb.npy', history.history)\n", "# np.save('./output/2019-32-04-Dec-attention.npy', history.history)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.875992Z", "iopub.status.idle": "2023-07-29T17:57:34.876533Z", "shell.execute_reply": "2023-07-29T17:57:34.876265Z", "shell.execute_reply.started": "2023-07-29T17:57:34.876241Z" } }, "outputs": [], "source": [ "import tensorflow as tf\n", "from tensorflow.keras.models import load_model\n", "\n", "# # model=tensorflow.keras.models.load_model('3d_full_06_jan_0.0001.h5',custom_objects={'dice_coef_loss':dice_coef_loss,'dice_coef':dice_coef})\n", "# model=tensorflow.keras.models.load_model('./output/weights-best.hdf5',custom_objects={'dice_coef_loss':dice_coef_loss,'dice_coef':dice_coef,'sensitivity': sensitivity, 'specificity': specificity, 'rmse':rmse})\n" ] }, { "cell_type": "markdown", "metadata": { "id": "ewu2WLHic0I7" }, "source": [ "# Test data prediction" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.877832Z", "iopub.status.idle": "2023-07-29T17:57:34.878683Z", "shell.execute_reply": "2023-07-29T17:57:34.878424Z", "shell.execute_reply.started": "2023-07-29T17:57:34.878397Z" }, "id": "h6Cffex-c0I7" }, "outputs": [], "source": [ "\n", "# print(images_test.shape)\n", "# print(masks_test.shape)\n", " \n", "\n", "# plt.imshow(masks[2000,:,:,:])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.880336Z", "iopub.status.idle": "2023-07-29T17:57:34.880851Z", "shell.execute_reply": "2023-07-29T17:57:34.880607Z", "shell.execute_reply.started": "2023-07-29T17:57:34.880581Z" }, "id": "WszwnSYEc0I7" }, "outputs": [], "source": [ "# images_test.shape" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Evaluate test set" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.882547Z", "iopub.status.idle": "2023-07-29T17:57:34.883082Z", "shell.execute_reply": "2023-07-29T17:57:34.882827Z", "shell.execute_reply.started": "2023-07-29T17:57:34.882802Z" } }, "outputs": [], "source": [ "evaluation_results = model.evaluate(test_generator, verbose=1) " ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.884971Z", "iopub.status.idle": "2023-07-29T17:57:34.885487Z", "shell.execute_reply": "2023-07-29T17:57:34.885243Z", "shell.execute_reply.started": "2023-07-29T17:57:34.885218Z" } }, "outputs": [], "source": [ "# evalutae_array = evalutae_array.astype(np.uint8)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.886617Z", "iopub.status.idle": "2023-07-29T17:57:34.887558Z", "shell.execute_reply": "2023-07-29T17:57:34.887353Z", "shell.execute_reply.started": "2023-07-29T17:57:34.887329Z" } }, "outputs": [], "source": [ "# evalutae_array = evalutae_array.squeeze()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.889236Z", "iopub.status.idle": "2023-07-29T17:57:34.889622Z", "shell.execute_reply": "2023-07-29T17:57:34.889456Z", "shell.execute_reply.started": "2023-07-29T17:57:34.889438Z" } }, "outputs": [], "source": [ "# plt.imshow(evalutae_array[2,:,:,60]*256, 'gray')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.891027Z", "iopub.status.idle": "2023-07-29T17:57:34.891413Z", "shell.execute_reply": "2023-07-29T17:57:34.891227Z", "shell.execute_reply.started": "2023-07-29T17:57:34.891210Z" } }, "outputs": [], "source": [ "# evalutae_array = np.expand_dims(evalutae_array, -1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.892774Z", "iopub.status.idle": "2023-07-29T17:57:34.893350Z", "shell.execute_reply": "2023-07-29T17:57:34.893094Z", "shell.execute_reply.started": "2023-07-29T17:57:34.893066Z" } }, "outputs": [], "source": [ "test_results = model.predict(test_generator, batch_size = 1, verbose=1) " ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.894485Z", "iopub.status.idle": "2023-07-29T17:57:34.895252Z", "shell.execute_reply": "2023-07-29T17:57:34.895066Z", "shell.execute_reply.started": "2023-07-29T17:57:34.895046Z" } }, "outputs": [], "source": [ "# len(test_results[1])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.896690Z", "iopub.status.idle": "2023-07-29T17:57:34.897067Z", "shell.execute_reply": "2023-07-29T17:57:34.896901Z", "shell.execute_reply.started": "2023-07-29T17:57:34.896884Z" } }, "outputs": [], "source": [ "# two_new = np.array(test_results[1])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.898501Z", "iopub.status.idle": "2023-07-29T17:57:34.898882Z", "shell.execute_reply": "2023-07-29T17:57:34.898714Z", "shell.execute_reply.started": "2023-07-29T17:57:34.898696Z" } }, "outputs": [], "source": [ "# two_new.shape" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.900358Z", "iopub.status.idle": "2023-07-29T17:57:34.900743Z", "shell.execute_reply": "2023-07-29T17:57:34.900567Z", "shell.execute_reply.started": "2023-07-29T17:57:34.900549Z" } }, "outputs": [], "source": [ "# plt.imshow(two_new[2,:,:,40,:]*256)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.901984Z", "iopub.status.idle": "2023-07-29T17:57:34.902552Z", "shell.execute_reply": "2023-07-29T17:57:34.902382Z", "shell.execute_reply.started": "2023-07-29T17:57:34.902362Z" } }, "outputs": [], "source": [ "# predicted = np.array(test_results[1])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.903540Z", "iopub.status.idle": "2023-07-29T17:57:34.904099Z", "shell.execute_reply": "2023-07-29T17:57:34.903929Z", "shell.execute_reply.started": "2023-07-29T17:57:34.903910Z" } }, "outputs": [], "source": [ "# plt.imshow(predicted[2,:,:,40,:]*256)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.905250Z", "iopub.status.idle": "2023-07-29T17:57:34.905681Z", "shell.execute_reply": "2023-07-29T17:57:34.905508Z", "shell.execute_reply.started": "2023-07-29T17:57:34.905488Z" } }, "outputs": [], "source": [ "# predicted = predicted.astype(np.uint8)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.907476Z", "iopub.status.idle": "2023-07-29T17:57:34.908340Z", "shell.execute_reply": "2023-07-29T17:57:34.908102Z", "shell.execute_reply.started": "2023-07-29T17:57:34.908081Z" } }, "outputs": [], "source": [ "# from tensorflow.keras import backend as Keras\n", "# new_2=Keras.max(predicted,axis=-1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.909953Z", "iopub.status.idle": "2023-07-29T17:57:34.910418Z", "shell.execute_reply": "2023-07-29T17:57:34.910180Z", "shell.execute_reply.started": "2023-07-29T17:57:34.910161Z" } }, "outputs": [], "source": [ "# new_2 = np.array(new_2)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.912639Z", "iopub.status.idle": "2023-07-29T17:57:34.913365Z", "shell.execute_reply": "2023-07-29T17:57:34.913153Z", "shell.execute_reply.started": "2023-07-29T17:57:34.913132Z" } }, "outputs": [], "source": [ "# new_2.dtype" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.914458Z", "iopub.status.idle": "2023-07-29T17:57:34.915348Z", "shell.execute_reply": "2023-07-29T17:57:34.915132Z", "shell.execute_reply.started": "2023-07-29T17:57:34.915112Z" } }, "outputs": [], "source": [ "# import matplotlib.pyplot as plt\n", "# plt.imshow(new_2[2,:,:,40])\n", "\n", "# plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.916742Z", "iopub.status.idle": "2023-07-29T17:57:34.917140Z", "shell.execute_reply": "2023-07-29T17:57:34.916968Z", "shell.execute_reply.started": "2023-07-29T17:57:34.916949Z" } }, "outputs": [], "source": [ "# prediction= np.load('prediction.npy')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.918583Z", "iopub.status.idle": "2023-07-29T17:57:34.918972Z", "shell.execute_reply": "2023-07-29T17:57:34.918800Z", "shell.execute_reply.started": "2023-07-29T17:57:34.918781Z" } }, "outputs": [], "source": [ "# np.save('prediction.npy', predicted)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.921083Z", "iopub.status.idle": "2023-07-29T17:57:34.921520Z", "shell.execute_reply": "2023-07-29T17:57:34.921341Z", "shell.execute_reply.started": "2023-07-29T17:57:34.921321Z" } }, "outputs": [], "source": [ "#new_squeeze= predicted.squeeze(-1)\n", "# images = predicted.squeeze()\n", "# images = np.delete(images, obj=0, axis=3)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.923110Z", "iopub.status.idle": "2023-07-29T17:57:34.923531Z", "shell.execute_reply": "2023-07-29T17:57:34.923354Z", "shell.execute_reply.started": "2023-07-29T17:57:34.923334Z" } }, "outputs": [], "source": [ "# images.shape" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.925150Z", "iopub.status.idle": "2023-07-29T17:57:34.925626Z", "shell.execute_reply": "2023-07-29T17:57:34.925447Z", "shell.execute_reply.started": "2023-07-29T17:57:34.925427Z" } }, "outputs": [], "source": [ "# import os\n", "# from PIL import Image\n", "# import cv2\n", "# from tqdm import tqdm\n", "# import numpy as np\n", "\n", "# output_shape = (240,240,155)\n", "# resized_array=[]\n", "# for i in range(125):\n", "# image = new_2[i]\n", "# image = resize(image, shape=output_shape, mode='constant')\n", "# #image = cv2.resize(image, dsize=(240, 240), interpolation=cv2.INTER_NEAREST)\n", "# resized_array.append(image)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.927113Z", "iopub.status.idle": "2023-07-29T17:57:34.927523Z", "shell.execute_reply": "2023-07-29T17:57:34.927348Z", "shell.execute_reply.started": "2023-07-29T17:57:34.927329Z" } }, "outputs": [], "source": [ "# #resized_array.shape\n", "# resized_array = np.array (resized_array)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.928846Z", "iopub.status.idle": "2023-07-29T17:57:34.929209Z", "shell.execute_reply": "2023-07-29T17:57:34.929045Z", "shell.execute_reply.started": "2023-07-29T17:57:34.929028Z" } }, "outputs": [], "source": [ "# import matplotlib.pyplot as plt\n", "\n", "\n", "# plt.imshow(resized_array[70][:,:,73])\n", "\n", "# plt.show()\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.930724Z", "iopub.status.idle": "2023-07-29T17:57:34.932358Z", "shell.execute_reply": "2023-07-29T17:57:34.932066Z", "shell.execute_reply.started": "2023-07-29T17:57:34.932039Z" } }, "outputs": [], "source": [ "# new = scipy.ndimage.zoom(predicted[0], 2, order=3)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.933592Z", "iopub.status.idle": "2023-07-29T17:57:34.934357Z", "shell.execute_reply": "2023-07-29T17:57:34.934058Z", "shell.execute_reply.started": "2023-07-29T17:57:34.934028Z" } }, "outputs": [], "source": [ "# import numpy as np\n", "# from scipy.ndimage.interpolation import zoom\n", "\n", "# def resize(img, shape, mode='constant', orig_shape=(128,128,128)):\n", "# \"\"\"\n", "# Wrapper for scipy.ndimage.zoom suited for MRI images.\n", "# \"\"\"\n", "# assert len(shape) == 3, \"Can not have more than 3 dimensions\"\n", "# factors = (\n", "# shape[0]/orig_shape[0],\n", "# shape[1]/orig_shape[1], \n", "# shape[2]/orig_shape[2]\n", "# )\n", " \n", "# # Resize to the given shape\n", "# return zoom(img, factors, mode=mode)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.935959Z", "iopub.status.idle": "2023-07-29T17:57:34.936959Z", "shell.execute_reply": "2023-07-29T17:57:34.936771Z", "shell.execute_reply.started": "2023-07-29T17:57:34.936750Z" } }, "outputs": [], "source": [ "# plt.imshow(test_results[1][50,:,:,10,:])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.938387Z", "iopub.status.idle": "2023-07-29T17:57:34.938802Z", "shell.execute_reply": "2023-07-29T17:57:34.938620Z", "shell.execute_reply.started": "2023-07-29T17:57:34.938601Z" } }, "outputs": [], "source": [ "pred_classify = test_results[0]\n", "pred_seg = test_results[1]\n", "pred_surv = test_results[2]" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.939910Z", "iopub.status.idle": "2023-07-29T17:57:34.940354Z", "shell.execute_reply": "2023-07-29T17:57:34.940129Z", "shell.execute_reply.started": "2023-07-29T17:57:34.940110Z" } }, "outputs": [], "source": [ "# pred_seg.shape" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.942470Z", "iopub.status.idle": "2023-07-29T17:57:34.943438Z", "shell.execute_reply": "2023-07-29T17:57:34.943212Z", "shell.execute_reply.started": "2023-07-29T17:57:34.943192Z" } }, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "\n", "vol_index = np.random.randint(50)\n", "gt_x,gt_y = test_generator.__getitem__(vol_index)\n", "\n", "print(\"actual classification: \", gt_y[0])\n", "print(\"predicted classification: \",pred_classify[vol_index])\n", "\n", "print(\"predicted survival: \", pred_surv[vol_index])\n", "print(\"actual survival: \", gt_y[2])\n", "\n", "slice_index = 60\n", "plt.subplot(1,2,1)\n", "plt.imshow(pred_seg[vol_index,:,:,slice_index,:])\n", "plt.subplot(1,2,2)\n", "plt.imshow(gt_y[1][0,:,:,slice_index,:])\n", "plt.show()\n", "\n", "\n", "# print(\"actual classification: \", gt_y[0])\n", "# print(\"predicted classification: \",pred_classify[vol_index])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# for uncertainty test" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.944324Z", "iopub.status.idle": "2023-07-29T17:57:34.945005Z", "shell.execute_reply": "2023-07-29T17:57:34.944832Z", "shell.execute_reply.started": "2023-07-29T17:57:34.944805Z" } }, "outputs": [], "source": [ "# x,y = test_generator.__getitem__(3)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.946048Z", "iopub.status.idle": "2023-07-29T17:57:34.946648Z", "shell.execute_reply": "2023-07-29T17:57:34.946467Z", "shell.execute_reply.started": "2023-07-29T17:57:34.946448Z" } }, "outputs": [], "source": [ "# yhat_arr = []\n", "\n", "# for t in range(100):\n", "# yhat = model(x, training=True)\n", "# yhat_arr.append(yhat[1])\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.947966Z", "iopub.status.idle": "2023-07-29T17:57:34.948669Z", "shell.execute_reply": "2023-07-29T17:57:34.948496Z", "shell.execute_reply.started": "2023-07-29T17:57:34.948477Z" } }, "outputs": [], "source": [ "# yhat_arr = np.stack(yhat_arr, -1)\n", "# probs = np.mean(yhat_arr, axis=-1)\n", "# entropy = - 1.0 * np.sum(probs * np.log(probs + 1e-16), axis=-1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.949769Z", "iopub.status.idle": "2023-07-29T17:57:34.950422Z", "shell.execute_reply": "2023-07-29T17:57:34.950231Z", "shell.execute_reply.started": "2023-07-29T17:57:34.950211Z" } }, "outputs": [], "source": [ "# plt.imshow(entropy[0,:,:,65])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.951463Z", "iopub.status.idle": "2023-07-29T17:57:34.952157Z", "shell.execute_reply": "2023-07-29T17:57:34.951974Z", "shell.execute_reply.started": "2023-07-29T17:57:34.951954Z" } }, "outputs": [], "source": [ "# plt.savefig('uncertinity1.jpg')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.953399Z", "iopub.status.idle": "2023-07-29T17:57:34.954136Z", "shell.execute_reply": "2023-07-29T17:57:34.953925Z", "shell.execute_reply.started": "2023-07-29T17:57:34.953899Z" } }, "outputs": [], "source": [ "# test_results1 = model(x, training=True) \n", "# test_results2 = model(x, training=True) " ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.955411Z", "iopub.status.idle": "2023-07-29T17:57:34.956115Z", "shell.execute_reply": "2023-07-29T17:57:34.955935Z", "shell.execute_reply.started": "2023-07-29T17:57:34.955916Z" } }, "outputs": [], "source": [ "# difference_img = test_results1[0]-test_results2[0]\n", "# np.sum(difference_img)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.957411Z", "iopub.status.idle": "2023-07-29T17:57:34.958100Z", "shell.execute_reply": "2023-07-29T17:57:34.957929Z", "shell.execute_reply.started": "2023-07-29T17:57:34.957909Z" } }, "outputs": [], "source": [ "# test_results2[0].shape\n", "# difference_img.shape" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.959303Z", "iopub.status.idle": "2023-07-29T17:57:34.960096Z", "shell.execute_reply": "2023-07-29T17:57:34.959921Z", "shell.execute_reply.started": "2023-07-29T17:57:34.959901Z" } }, "outputs": [], "source": [ "# slice_index = 70\n", "# plt.subplot(1,3,1)\n", "# plt.imshow(test_results1[0][0,:,:,slice_index,:])\n", "# plt.subplot(1,3,2)\n", "# plt.imshow(test_results2[0][0,:,:,slice_index,:])\n", "# plt.show()\n", "# plt.subplot(1,3,3)\n", "# plt.imshow(difference_img[0,:,:,slice_index,:])\n", "# plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.961136Z", "iopub.status.idle": "2023-07-29T17:57:34.961869Z", "shell.execute_reply": "2023-07-29T17:57:34.961692Z", "shell.execute_reply.started": "2023-07-29T17:57:34.961672Z" } }, "outputs": [], "source": [ "# new = np.array(test_results)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.963046Z", "iopub.status.idle": "2023-07-29T17:57:34.963604Z", "shell.execute_reply": "2023-07-29T17:57:34.963421Z", "shell.execute_reply.started": "2023-07-29T17:57:34.963399Z" } }, "outputs": [], "source": [ "# new= model.predict(x)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.965260Z", "iopub.status.idle": "2023-07-29T17:57:34.965864Z", "shell.execute_reply": "2023-07-29T17:57:34.965556Z", "shell.execute_reply.started": "2023-07-29T17:57:34.965531Z" } }, "outputs": [], "source": [ "# #new1= new[1].dtype(int8)\n", "# new1=new[1].astype(np.int32)\n", "# #new1= new[0].squeeze()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.967425Z", "iopub.status.idle": "2023-07-29T17:57:34.968089Z", "shell.execute_reply": "2023-07-29T17:57:34.967699Z", "shell.execute_reply.started": "2023-07-29T17:57:34.967674Z" } }, "outputs": [], "source": [ "# new[1]" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.970001Z", "iopub.status.idle": "2023-07-29T17:57:34.970548Z", "shell.execute_reply": "2023-07-29T17:57:34.970301Z", "shell.execute_reply.started": "2023-07-29T17:57:34.970257Z" } }, "outputs": [], "source": [ "# plt.imshow(new1[:,:,80,1]*255)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.972015Z", "iopub.status.idle": "2023-07-29T17:57:34.972550Z", "shell.execute_reply": "2023-07-29T17:57:34.972305Z", "shell.execute_reply.started": "2023-07-29T17:57:34.972262Z" } }, "outputs": [], "source": [ "# test_data = test_generator[0] \n", "# #gt = labels[20]\n", "# #p=test_data.reshape(1,128,128,1)\n", "# prediction=model.predict(test_data)\n", "# print(prediction[1].shape)\n", "# #print(gt)\n", "# seg_predicted=prediction[1]\n", "# seg_predicted.dtype\n", "# print(np.unique(seg_predicted[0,:,:,:]))\n", "# plt.imshow(seg_predicted[0,:,:,:])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### dice loss" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.974007Z", "iopub.status.idle": "2023-07-29T17:57:34.974542Z", "shell.execute_reply": "2023-07-29T17:57:34.974296Z", "shell.execute_reply.started": "2023-07-29T17:57:34.974253Z" } }, "outputs": [], "source": [ "# test_generator[2]" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.976714Z", "iopub.status.idle": "2023-07-29T17:57:34.977528Z", "shell.execute_reply": "2023-07-29T17:57:34.977235Z", "shell.execute_reply.started": "2023-07-29T17:57:34.977207Z" } }, "outputs": [], "source": [ "\n", "dice_loss_list= []\n", "for i in range(55):\n", " test_dice_loss = dice_loss(test_generator[i][1][1][0], test_results[1][i].astype('float32'))\n", " dice_loss_list.append(test_dice_loss)\n", "\n", "# # print('dsc',dice_coef(gt.reshape([128,128,3]).astype('float32'),seg_predicted.reshape([128,128,3]).astype('float32')))\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.979656Z", "iopub.status.idle": "2023-07-29T17:57:34.980429Z", "shell.execute_reply": "2023-07-29T17:57:34.980139Z", "shell.execute_reply.started": "2023-07-29T17:57:34.980108Z" } }, "outputs": [], "source": [ "# test_generator[59][2]" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.981964Z", "iopub.status.idle": "2023-07-29T17:57:34.982558Z", "shell.execute_reply": "2023-07-29T17:57:34.982301Z", "shell.execute_reply.started": "2023-07-29T17:57:34.982244Z" } }, "outputs": [], "source": [ "dice_loss_list = np.asarray(dice_loss_list)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.984225Z", "iopub.status.idle": "2023-07-29T17:57:34.984786Z", "shell.execute_reply": "2023-07-29T17:57:34.984547Z", "shell.execute_reply.started": "2023-07-29T17:57:34.984521Z" } }, "outputs": [], "source": [ "print(\"dice coef class 0: \\t\", np.mean(1-dice_loss_list[:,0]))\n", "print(\"dice coef class 1: \\t\", np.mean(1-dice_loss_list[:,1]))\n", "print(\"dice coef class 2: \\t\", np.mean(1-dice_loss_list[:,2]))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.986514Z", "iopub.status.idle": "2023-07-29T17:57:34.987027Z", "shell.execute_reply": "2023-07-29T17:57:34.986783Z", "shell.execute_reply.started": "2023-07-29T17:57:34.986757Z" }, "id": "kysWz9jYc0I7" }, "outputs": [], "source": [ "# import matplotlib.pyplot as plt\n", "\n", "# inst=images_test.shape[0]\n", "# dice=[]\n", "# pred_labels=np.zeros([inst,128,128,3])\n", "# actual_labels=np.zeros([inst,128,128,3])\n", "\n", "# list_0 =[]\n", "# list_1 = []\n", "# list_2 = []\n", "\n", "# for img in range(inst):\n", "# print('------------------Image:',img)\n", " \n", "# data=images_test[img,:,:,:]\n", "# print(data.shape)\n", "# gt=masks_test[img,:,:,:]\n", "# print('gt',gt.shape)\n", "# actual_labels[img,:,:,:]=gt\n", "# plt.imshow(data[:,:,:])\n", "# plt.title(\"Image\")\n", "# plt.show()\n", " \n", "# plt.imshow(gt[:,:,:])\n", "# plt.title(\"Ground Truth\")\n", "# plt.show()\n", " \n", "# reshaped_img=data.reshape(1,128,128,1)\n", "# prediction=model.predict(reshaped_img)\n", "# seg_predicted=prediction[1]\n", " \n", "# converted = seg_predicted.astype(np.float64)\n", "# converted = converted[0,:,:,:]*255\n", "# print('converted',converted.shape)\n", " \n", "# pred_labels[img,:,:,:]=seg_predicted.reshape(128,128,3)\n", "# plt.imshow(converted.astype(np.uint8))\n", "# plt.title(\"Predicted\")\n", "# plt.show()\n", "# dsc_list = dice_loss(gt,converted)\n", "# print(len(dsc_list))\n", "# print('dice coef for class 0: ',1-dsc_list[0])\n", "# print('dice coef for class 1: ',1-dsc_list[1])\n", "# print('dice coef for class 2: ',1-dsc_list[2])\n", " \n", "# list_0.append(dsc_list[0])\n", "# list_1.append(dsc_list[1])\n", "# list_2.append(dsc_list[2])\n", "\n", "\n", " \n", "# print('dsc',dice_coef(gt.reshape([128,128,3]).astype('float32'),seg_predicted.reshape([128,128,3]).astype('float32')))\n", "# dice.append(dice_coef(gt.reshape([128,128,3]).astype('float32'),seg_predicted.reshape([128,128,3]).astype('float32')))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.988537Z", "iopub.status.idle": "2023-07-29T17:57:34.989922Z", "shell.execute_reply": "2023-07-29T17:57:34.989742Z", "shell.execute_reply.started": "2023-07-29T17:57:34.989721Z" }, "id": "Y0tCpC5kc0I7" }, "outputs": [], "source": [ "# dice_coef" ] }, { "cell_type": "markdown", "metadata": { "id": "jadLBSaWc0I8" }, "source": [ "# Single Imge Prediction" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.991001Z", "iopub.status.idle": "2023-07-29T17:57:34.991827Z", "shell.execute_reply": "2023-07-29T17:57:34.991644Z", "shell.execute_reply.started": "2023-07-29T17:57:34.991624Z" }, "id": "S1C-3PDYc0I8", "scrolled": true }, "outputs": [], "source": [ "# #!pip install tensorflow-estimator=2.1.0\n", "# test_data = images_test[10000] \n", "# gt = labels[10000]\n", "# p=test_data.reshape(1,128,128,1)\n", "# prediction=model.predict(p)\n", "# print(prediction[1].shape)\n", "# print(gt)\n", "# seg_predicted=prediction[1]\n", "# seg_predicted.dtype\n", "# print(np.unique(seg_predicted[0,:,:,:]))\n", "# plt.imshow(seg_predicted[0,:,:,:])\n", "\n", "# #plt.imshow(seg_predicted[0,:,:,0]*255,'gray')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.992876Z", "iopub.status.idle": "2023-07-29T17:57:34.993686Z", "shell.execute_reply": "2023-07-29T17:57:34.993508Z", "shell.execute_reply.started": "2023-07-29T17:57:34.993488Z" }, "id": "S_-EBsI3c0I9", "scrolled": true }, "outputs": [], "source": [ "# converted = seg_predicted.astype(np.uint8)\n", "# converted = converted[0,:,:,0]*255\n", "# plt.imshow(converted)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.994879Z", "iopub.status.idle": "2023-07-29T17:57:34.995569Z", "shell.execute_reply": "2023-07-29T17:57:34.995385Z", "shell.execute_reply.started": "2023-07-29T17:57:34.995365Z" }, "id": "aALTmH2-5nXK" }, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "# list all data in history\n", "print(history.history.keys())\n", "# summarize history for accuracy\n", "# plt.plot(history.history['classification_accuracy'])\n", "# plt.plot(history.history['val_classification_accuracy'])\n", "# plt.title('Classification accuracy')\n", "# plt.ylabel('accuracy')\n", "# plt.xlabel('epoch')\n", "# plt.legend(['train', 'test'], loc='upper left')\n", "# plt.show()\n", "# summarize history for loss\n", "plt.plot(history.history['loss'])\n", "plt.plot(history.history['val_loss'])\n", "plt.title('training loss')\n", "plt.ylabel('loss')\n", "plt.xlabel('epoch')\n", "plt.legend(['train', 'test'], loc='upper left')\n", "plt.show()\n", "plt.savefig('overall loss.jpg')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.996738Z", "iopub.status.idle": "2023-07-29T17:57:34.997416Z", "shell.execute_reply": "2023-07-29T17:57:34.997209Z", "shell.execute_reply.started": "2023-07-29T17:57:34.997189Z" } }, "outputs": [], "source": [ "plt.savefig('overall loss.jpg')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:34.998612Z", "iopub.status.idle": "2023-07-29T17:57:34.999247Z", "shell.execute_reply": "2023-07-29T17:57:34.999068Z", "shell.execute_reply.started": "2023-07-29T17:57:34.999049Z" } }, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "# list all data in history\n", "print(history.history.keys())\n", "# summarize history for accuracy\n", "plt.plot(history.history['classify_accuracy'])\n", "plt.plot(history.history['val_classify_accuracy'])\n", "plt.title('Classification Accuracy')\n", "plt.ylabel('accuracy')\n", "plt.xlabel('epoch')\n", "plt.legend(['train', 'test'], loc='upper left')\n", "plt.show()\n", "plt.savefig('classify.jpg')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:35.000430Z", "iopub.status.idle": "2023-07-29T17:57:35.001060Z", "shell.execute_reply": "2023-07-29T17:57:35.000883Z", "shell.execute_reply.started": "2023-07-29T17:57:35.000862Z" } }, "outputs": [], "source": [ "plt.savefig('classify.jpg')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:35.002255Z", "iopub.status.idle": "2023-07-29T17:57:35.002968Z", "shell.execute_reply": "2023-07-29T17:57:35.002775Z", "shell.execute_reply.started": "2023-07-29T17:57:35.002754Z" }, "id": "apt9HXzec0I-" }, "outputs": [], "source": [ "\n", "# summarize history for Dice_loss\n", "plt.plot(history.history['segmentation_loss'])\n", "plt.plot(history.history['val_segmentation_loss'])\n", "plt.title('Dice Loss')\n", "plt.ylabel('Loss')\n", "plt.xlabel('Epoch')\n", "plt.legend(['train', 'test'], loc='upper left')\n", "plt.show()\n", "plt.savefig('seg.jpg')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:35.004115Z", "iopub.status.idle": "2023-07-29T17:57:35.004792Z", "shell.execute_reply": "2023-07-29T17:57:35.004607Z", "shell.execute_reply.started": "2023-07-29T17:57:35.004587Z" } }, "outputs": [], "source": [ "plt.savefig('seg.jpg')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:35.005958Z", "iopub.status.idle": "2023-07-29T17:57:35.006650Z", "shell.execute_reply": "2023-07-29T17:57:35.006459Z", "shell.execute_reply.started": "2023-07-29T17:57:35.006433Z" } }, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "# list all data in history\n", "print(history.history.keys())\n", "# summarize history for accuracy\n", "plt.plot(history.history['survival_mean_squared_error'])\n", "plt.plot(history.history['val_survival_mean_squared_error'])\n", "plt.title('Survival MSE')\n", "plt.ylabel('Error')\n", "plt.xlabel('Epoch')\n", "plt.legend(['train', 'test'], loc='upper left')\n", "plt.show()\n", "plt.savefig('Survival.jpg')\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:35.007911Z", "iopub.status.idle": "2023-07-29T17:57:35.008336Z", "shell.execute_reply": "2023-07-29T17:57:35.008133Z", "shell.execute_reply.started": "2023-07-29T17:57:35.008115Z" } }, "outputs": [], "source": [ "plt.savefig('survival.jpg')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:35.010153Z", "iopub.status.idle": "2023-07-29T17:57:35.010672Z", "shell.execute_reply": "2023-07-29T17:57:35.010495Z", "shell.execute_reply.started": "2023-07-29T17:57:35.010475Z" }, "id": "j3FPY_0gJM7j", "scrolled": true }, "outputs": [], "source": [ "# print(history.history['val_segmentation_loss'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:35.012075Z", "iopub.status.idle": "2023-07-29T17:57:35.012495Z", "shell.execute_reply": "2023-07-29T17:57:35.012315Z", "shell.execute_reply.started": "2023-07-29T17:57:35.012289Z" }, "id": "vAFB3WWls_gk", "scrolled": true }, "outputs": [], "source": [ "# masks_images.shape" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:35.013858Z", "iopub.status.idle": "2023-07-29T17:57:35.014763Z", "shell.execute_reply": "2023-07-29T17:57:35.014579Z", "shell.execute_reply.started": "2023-07-29T17:57:35.014558Z" }, "id": "BsKpxJ-4Jsib", "scrolled": true }, "outputs": [], "source": [ "# np.save('./output/2019-32-04-Dec-attention.npy', history.history)\n", "\n", "\n", "#history=np.load('./output/my_history.npy',allow_pickle='TRUE').item()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "execution": { "iopub.status.busy": "2023-07-29T17:57:35.016233Z", "iopub.status.idle": "2023-07-29T17:57:35.017074Z", "shell.execute_reply": "2023-07-29T17:57:35.016890Z", "shell.execute_reply.started": "2023-07-29T17:57:35.016869Z" }, "id": "wtk3DOYKc0I_" }, "outputs": [], "source": [ "# model.save('./new_full_2_may.h5')" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 4 }