Upload CNN_segmented_tumour_deepfeatures.py
Browse files
CNN_segmented_tumour_deepfeatures.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import nibabel as nib
|
| 3 |
+
import numpy as np
|
| 4 |
+
import tensorflow as tf
|
| 5 |
+
from tensorflow.keras.layers import Conv2D, Activation, BatchNormalization, MaxPooling2D, Flatten, Dense
|
| 6 |
+
from tensorflow.keras.models import Sequential
|
| 7 |
+
from tensorflow.keras.preprocessing import image
|
| 8 |
+
|
| 9 |
+
# Define the CNN model architecture
|
| 10 |
+
model = Sequential()
|
| 11 |
+
|
| 12 |
+
# First convolutional layer
|
| 13 |
+
model.add(Conv2D(96, (9, 9), strides=(4, 4), padding='valid', input_shape=(224, 224, 1)))
|
| 14 |
+
model.add(Activation('relu'))
|
| 15 |
+
model.add(BatchNormalization())
|
| 16 |
+
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
|
| 17 |
+
|
| 18 |
+
# Second convolutional layer
|
| 19 |
+
model.add(Conv2D(256, (7, 7), strides=(1, 1), padding='same'))
|
| 20 |
+
model.add(Activation('relu'))
|
| 21 |
+
model.add(BatchNormalization())
|
| 22 |
+
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
|
| 23 |
+
|
| 24 |
+
# Third convolutional layer
|
| 25 |
+
model.add(Conv2D(384, (3, 3), strides=(1, 1), padding='same'))
|
| 26 |
+
model.add(Activation('relu'))
|
| 27 |
+
|
| 28 |
+
# Fourth convolutional layer
|
| 29 |
+
model.add(Conv2D(384, (3, 3), strides=(1, 1), padding='same'))
|
| 30 |
+
model.add(Activation('relu'))
|
| 31 |
+
|
| 32 |
+
# Fifth convolutional layer
|
| 33 |
+
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same'))
|
| 34 |
+
model.add(Activation('relu'))
|
| 35 |
+
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
|
| 36 |
+
|
| 37 |
+
# Flatten the output of the last pooling layer
|
| 38 |
+
model.add(Flatten())
|
| 39 |
+
|
| 40 |
+
# Fully connected layer
|
| 41 |
+
model.add(Dense(4096))
|
| 42 |
+
model.add(Activation('relu')) # Add activation function
|
| 43 |
+
|
| 44 |
+
# Compile the model
|
| 45 |
+
model.compile(optimizer='adam', loss='categorical_crossentropy')
|
| 46 |
+
|
| 47 |
+
# Define the directory that contains the patient folders
|
| 48 |
+
base_dir = r"C:\Users\pg22\OneDrive - King's College London\Documents\PhD Data\UCSF-PDGM-v3\UCSF_PDGM" # Replace with the actual path
|
| 49 |
+
|
| 50 |
+
# Loop over each patient ID folder
|
| 51 |
+
for folder_name in os.listdir(base_dir):
|
| 52 |
+
patient_dir = os.path.join(base_dir, folder_name)
|
| 53 |
+
|
| 54 |
+
# Check if it's a directory
|
| 55 |
+
if os.path.isdir(patient_dir):
|
| 56 |
+
# Extract the patient ID from the folder name and drop '_nifti'
|
| 57 |
+
patient_id = folder_name.replace('_nifti', '')
|
| 58 |
+
|
| 59 |
+
# Load the segmented tumor file
|
| 60 |
+
tumor_file = os.path.join(patient_dir, f'{patient_id}_tumor_segmentation.nii.gz')
|
| 61 |
+
tumor_data = nib.load(tumor_file).get_fdata()
|
| 62 |
+
|
| 63 |
+
# Preprocess the tumor data to match the input requirements of the CNN model
|
| 64 |
+
# Resize the image to the expected input size of the model (224, 224 in this case)
|
| 65 |
+
tumor_data_resized = np.array([image.array_to_img(slice_2d[..., None], scale=False).resize((224, 224)) for slice_2d in tumor_data])
|
| 66 |
+
# Normalize the pixel values to 0-1 range
|
| 67 |
+
tumor_data_normalized = tumor_data_resized / np.max(tumor_data_resized)
|
| 68 |
+
# Expand the dimensions of the image array if the model expects a 4D input (batch size, height, width, channels)
|
| 69 |
+
tumor_data_expanded = np.expand_dims(tumor_data_normalized, axis=-1)
|
| 70 |
+
|
| 71 |
+
# Pass the preprocessed data through the CNN model to extract deep features
|
| 72 |
+
features = model.predict(tumor_data_expanded)
|
| 73 |
+
|
| 74 |
+
import matplotlib.pyplot as plt
|
| 75 |
+
|
| 76 |
+
# Reshape the features into a 2D array for visualization
|
| 77 |
+
# Calculate the correct dimensions
|
| 78 |
+
dim1 = 64 # This can be any number that divides 983040 evenly
|
| 79 |
+
dim2 = features.size // dim1
|
| 80 |
+
|
| 81 |
+
# Reshape the features into a 2D array for visualization
|
| 82 |
+
features_reshaped = features.reshape((dim1, dim2))
|
| 83 |
+
|
| 84 |
+
# Use Matplotlib to display the image
|
| 85 |
+
plt.imshow(features_reshaped, cmap='viridis')
|
| 86 |
+
plt.colorbar()
|
| 87 |
+
plt.show()
|
| 88 |
+
# The number of features may vary depending on your model architecture
|
| 89 |
+
# Here we assume the features are of shape (1, 4096)
|
| 90 |
+
|
| 91 |
+
# https://www.mdpi.com/2075-4418/12/4/1018
|
| 92 |
+
|
| 93 |
+
#save the feature file# Define the file path where you want to save the features
|
| 94 |
+
features_file_path = r"C:\Users\pg22\OneDrive - King's College London\Documents\PhD Data\features.npy"
|
| 95 |
+
|
| 96 |
+
# Save the features to the file
|
| 97 |
+
np.save(features_file_path, features)
|
| 98 |
+
|