Prathush21 commited on
Commit
bd3e8b6
·
verified ·
1 Parent(s): c3ddc16

Upload 8 files

Browse files
app.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, jsonify, request, send_file, render_template
2
+ from flask_cors import CORS
3
+ from lrp_pipeline import lrp_main
4
+ from utils import create_folders, delete_folders, create_zip_file
5
+ from cam_pipeline import cam_main
6
+
7
+ app = Flask(__name__)
8
+ CORS(app)
9
+
10
+
11
+ @app.route("/api/upload", methods=["GET"])
12
+ def get_data():
13
+ data = {"message": "Hello from Flask backend!"}
14
+ return jsonify(data)
15
+
16
+
17
+ @app.route("/api/upload", methods=["POST"])
18
+ def submit_data():
19
+ # first clear all the existing files in uploads, heatmaps, segmentations, tables, cell_descriptors folders
20
+ folder_names = [
21
+ "uploads",
22
+ "heatmaps",
23
+ "segmentations",
24
+ "tables",
25
+ "cell_descriptors",
26
+ ]
27
+ delete_folders(folder_names)
28
+ create_folders(folder_names)
29
+
30
+ # then upload the submitted file(s)
31
+ file = list(dict(request.files).values())[0]
32
+ print(file)
33
+ file.save(
34
+ f"uploads/{file.filename}"
35
+ ) # Replace 'uploads' with your desired directory
36
+
37
+ # Process data here
38
+ return jsonify({"message": "Data received successfully!"})
39
+
40
+
41
+ @app.route("/api/inputform", methods=["POST"])
42
+ def submit_form():
43
+ data = dict(request.json) # format of data: {'model': 'VGGNet', 'xaiMethod': 'LRP'}
44
+ print(data)
45
+ if "LRP" in data["xaiMethod"]:
46
+ # pixel_ratio = data['pixelRatio']
47
+ return lrp_main(float(data["magval"])) # pixel_ratio
48
+ elif "GradCAM++" in data["xaiMethod"]:
49
+ # pixel_ratio = data['pixelRatio']
50
+ return cam_main(float(data["magval"])) # pixel_ratio
51
+
52
+
53
+ @app.route("/api/zip", methods=["GET"])
54
+ def get_csv():
55
+ create_zip_file()
56
+ return send_file("outputs.zip", as_attachment=True)
57
+
58
+
59
+ if __name__ == "__main__":
60
+ app.run(debug=True)
cam_pipeline.py ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import numpy as np
3
+ from PIL import Image
4
+ import os
5
+ import tensorflow as tf
6
+ import cv2
7
+ from sklearn.mixture import GaussianMixture
8
+ import base64
9
+ import csv
10
+ from tensorflow.keras.applications.efficientnet import preprocess_input
11
+ from tensorflow.keras.preprocessing import image
12
+ from tensorflow.keras.models import Model
13
+ from tensorflow.keras.layers import Lambda
14
+ from tensorflow.keras.models import load_model
15
+ from utils import (
16
+ select_sample_images,
17
+ create_cell_descriptors_table,
18
+ calculate_cell_descriptors,
19
+ )
20
+
21
+ preprocessed_folder = 'uploads/'
22
+ segmentation_folder = 'segmentations/'
23
+ intermediate_folder = 'heatmaps/'
24
+ tables_folder = "tables/"
25
+ cell_descriptors_path = "cell_descriptors/cell_descriptors.csv"
26
+ saved_model_path = 'xception_model_81.h5' # Replace with the path to your saved model
27
+ model = load_model(saved_model_path)
28
+
29
+
30
+ def preprocess_image(img_path):
31
+ img = image.load_img(img_path, target_size=(224, 224))
32
+ x = image.img_to_array(img)
33
+ x = np.expand_dims(x, axis=0)
34
+ x = preprocess_input(x)
35
+ return x
36
+
37
+ def generate_grad_cam_plus_plus(img_path, model, last_conv_layer_name, classifier_layer_names):
38
+ # image = edge_finding(img_path)
39
+ img = image.load_img(img_path, target_size=(224, 224))
40
+ x = image.img_to_array(img)
41
+ x = np.expand_dims(x, axis=0)
42
+ img = tf.keras.applications.xception.preprocess_input(x)
43
+ grad_model = Model(
44
+ inputs=[model.inputs],
45
+ outputs=[model.get_layer(last_conv_layer_name).output, model.output]
46
+ )
47
+
48
+ # print(grad_model)
49
+
50
+
51
+ with tf.GradientTape() as tape1, tf.GradientTape() as tape2:
52
+ last_conv_output, preds = grad_model(img)
53
+ # print(last_conv_output.shape)
54
+ class_idx = np.argmax(preds[0])
55
+ # max_value = tf.reduce_max(preds)
56
+
57
+ # Reshape to get the desired shape (1,)
58
+ # loss = tf.reshape(max_value, shape=(1,))
59
+ loss = preds[:,0]
60
+ # print('loss')
61
+ # print(loss)
62
+ grads = tape1.gradient(loss, last_conv_output)
63
+
64
+ first_derivative = tf.exp(loss) * grads
65
+ # print('grads')
66
+ # print(first_derivative)
67
+
68
+ second_derivative = tape2.gradient(grads, last_conv_output)
69
+ second_derivative = tf.exp(loss) * second_derivative
70
+ # print('grads2')
71
+ # print(second_derivative)
72
+
73
+ global_sum = tf.reduce_sum(first_derivative, axis=(0, 1, 2), keepdims=True)
74
+ alpha_num = second_derivative
75
+ alpha_denom = second_derivative * 2.0 + first_derivative * global_sum
76
+ alphas = alpha_num / (alpha_denom + 1e-7)
77
+
78
+ # print(alphas)
79
+
80
+
81
+ weights = tf.maximum(0, global_sum)
82
+ alpha_normalization_constant = tf.reduce_sum(alphas, axis=(0, 1), keepdims=True)
83
+ alphas /= (alpha_normalization_constant + 1e-7)
84
+
85
+ deep_linearization_weights = tf.reduce_sum(weights * alphas, axis=(0, 3))
86
+
87
+ # Reshape the deep_linearization_weights to match the shape of last_conv_output
88
+ deep_linearization_weights = tf.reshape(deep_linearization_weights, (1,7,7,-1))
89
+
90
+ # print(deep_linearization_weights.shape)
91
+
92
+ # Compute the CAM by taking a weighted sum of the convolutional layer output
93
+ cam = tf.reduce_sum(deep_linearization_weights * last_conv_output, axis=3)
94
+
95
+
96
+
97
+ # Normalize the CAM
98
+ cam = tf.maximum(cam, 0)
99
+ cam /= tf.reduce_max(cam)
100
+
101
+ heatmap = tf.reduce_mean(cam, axis=0) # Take mean along the channel axis
102
+ # heatmap = tf.squeeze(cam)
103
+
104
+ heatmap=heatmap.numpy()
105
+
106
+
107
+ return heatmap
108
+
109
+
110
+ def GMM_abnormal_method(heatmap):
111
+ heatmap = cv2.resize(heatmap, (224, 224))
112
+ flat_heatmap = heatmap.flatten().reshape(-1, 1)
113
+
114
+ # Define the number of clusters (segments)
115
+ n_clusters = 4 # Adjust based on your requirements
116
+
117
+ # Apply Gaussian Mixture Model clustering
118
+ gmm = GaussianMixture(n_components=n_clusters, random_state=0)
119
+ gmm.fit(flat_heatmap)
120
+ labels = gmm.predict(flat_heatmap).reshape(heatmap.shape[:2])
121
+
122
+ # Assign labels to the regions based on their intensity
123
+ sorted_labels = np.argsort(gmm.means_.flatten())
124
+ label_mapping = {sorted_labels[0]: 0, sorted_labels[1]: 1, sorted_labels[2]: 2,sorted_labels[3]: 3}
125
+ labels_mapped = np.vectorize(label_mapping.get)(labels)
126
+
127
+ colour_list=[[0,0,255],[128,0,0],[255,0,0],[255,0,0]]
128
+
129
+
130
+ colors = np.array(colour_list) # BGR format
131
+ colored_labels = colors[labels_mapped]
132
+
133
+ return labels_mapped,colored_labels
134
+
135
+ def GMM_normal_method(heatmap):
136
+ heatmap = cv2.resize(heatmap, (224, 224))
137
+ flat_heatmap = heatmap.flatten().reshape(-1, 1)
138
+
139
+ # Define the number of clusters (segments)
140
+ n_clusters = 4 # Adjust based on your requirements
141
+
142
+ # Apply Gaussian Mixture Model clustering
143
+ gmm = GaussianMixture(n_components=n_clusters, random_state=0)
144
+ gmm.fit(flat_heatmap)
145
+ labels = gmm.predict(flat_heatmap).reshape(heatmap.shape[:2])
146
+
147
+ # Assign labels to the regions based on their intensity
148
+ sorted_labels = np.argsort(gmm.means_.flatten())
149
+ label_mapping = {sorted_labels[0]: 0, sorted_labels[1]: 1, sorted_labels[2]: 2,sorted_labels[3]: 3}
150
+ labels_mapped = np.vectorize(label_mapping.get)(labels)
151
+
152
+ colour_list=[[0,0,255],[128,0,0],[128,0,0],[255,0,0]]
153
+
154
+
155
+ colors = np.array(colour_list) # BGR format
156
+ colored_labels = colors[labels_mapped]
157
+
158
+ return labels_mapped,colored_labels
159
+
160
+ def create_nucelus(img,colored_segmentation_mask):
161
+ mask=colored_segmentation_mask
162
+ # Define the colors
163
+ color_to_extract = [255, 0, 0]
164
+ background_color = [0, 0, 255]
165
+
166
+ # Create masks for the components and the background
167
+ component_mask = np.all(mask == color_to_extract, axis=-1)
168
+ background_mask = ~component_mask
169
+
170
+ # Create an image with the extracted components in red and the background in blue
171
+ result = np.zeros_like(mask)
172
+
173
+ # cv2_imshow(result)
174
+ result[component_mask] = color_to_extract
175
+ result[background_mask] = background_color
176
+
177
+ img= cv2.resize(img, (224,224))
178
+
179
+ fgModel = np.zeros((1, 65), dtype="float")
180
+ bgModel = np.zeros((1, 65), dtype="float")
181
+
182
+ mask = np.zeros(result.shape[:2], np.uint8)
183
+ mask[(result == [255, 0, 0]).all(axis=2)] = cv2.GC_PR_FGD # Foreground
184
+ mask[(result == [0, 0, 255]).all(axis=2)] = cv2.GC_PR_BGD # Background
185
+
186
+ # mask = np.mean(result, axis=2)
187
+ # mask=mask.astype("uint8")
188
+
189
+ rect = (0, 0, img.shape[1], img.shape[0])
190
+
191
+ (mask, bgModel, fgModel) = cv2.grabCut(img, mask, rect, bgModel,
192
+ fgModel, iterCount=10, mode=cv2.GC_INIT_WITH_MASK)
193
+
194
+ output_image_1 = np.zeros((mask.shape[0], mask.shape[1], 3), dtype=np.uint8)
195
+
196
+ # Replace black pixels with red and white pixels with blue
197
+ output_image_1[mask == 2] = [0, 0, 255] # Black to red
198
+ output_image_1[mask == 3] = [255, 0, 0] # White to blue
199
+
200
+ return output_image_1
201
+
202
+ def create_colored_segmentation_mask(labels):
203
+ colour_list=[0,0,0,0]
204
+
205
+ # first_unique, second_unique, third_unique = find_unique_values(labels)
206
+ colour_list[0]=[0,0,255]
207
+ colour_list[1]=[255,0,0]
208
+ colour_list[2]=[255,0,0]
209
+ colour_list[3]=[255,0,0]
210
+ # colour_list[4]=[255,0,0]
211
+
212
+ colors = np.array(colour_list) # BGR format
213
+ colored_labels = colors[labels]
214
+
215
+ return colored_labels
216
+
217
+ def create_background(img,heatmap,labels):
218
+ colored_labels = create_colored_segmentation_mask(labels)
219
+ mask=colored_labels
220
+ # Define the colors
221
+ color_to_extract = [255, 0, 0]
222
+ background_color = [0, 0, 255]
223
+
224
+ # Create masks for the components and the background
225
+ component_mask = np.all(mask == color_to_extract, axis=-1)
226
+ background_mask = ~component_mask
227
+
228
+ # Create an image with the extracted components in red and the background in blue
229
+ result = np.zeros_like(mask)
230
+ result[component_mask] = color_to_extract
231
+ result[background_mask] = background_color
232
+
233
+ fgModel = np.zeros((1, 65), dtype="float")
234
+ bgModel = np.zeros((1, 65), dtype="float")
235
+
236
+ mask1 = np.zeros(result.shape[:2], np.uint8)
237
+ mask1[(result == [255, 0, 0]).all(axis=2)] = cv2.GC_PR_FGD # Foreground
238
+ mask1[(result == [0, 0, 255]).all(axis=2)] = cv2.GC_PR_BGD # Background
239
+
240
+ # mask = np.mean(result, axis=2)
241
+ # mask=mask.astype("uint8")
242
+
243
+ rect = (1, 1, img.shape[1], img.shape[0])
244
+
245
+
246
+ (mask1, bgModel, fgModel) = cv2.grabCut(img, mask1, rect, bgModel,
247
+ fgModel, iterCount=10, mode=cv2.GC_INIT_WITH_MASK)
248
+
249
+
250
+ output_image = np.zeros((mask1.shape[0], mask1.shape[1], 3), dtype=np.uint8)
251
+
252
+ # Replace black pixels with red and white pixels with blue
253
+ output_image[mask1 == 2] = [0, 0, 255] # Black to red
254
+ output_image[mask1 == 3] = [128, 0, 0] # White to blue
255
+
256
+ return output_image
257
+
258
+ # def remove_nucleus(image, blue_mask):
259
+ # #expand the nucleus mask
260
+ # image1 = cv2.resize(image, (224,224))
261
+ # blue_mask1 = cv2.resize(blue_mask, (224,224))
262
+ # kernel = np.ones((5, 5), np.uint8) # Adjust the kernel size as needed
263
+ # expandedmask = cv2.dilate(blue_mask1, kernel, iterations=1)
264
+ # simple_lama = SimpleLama()
265
+ # image_pil = Image.fromarray(cv2.cvtColor(image1, cv2.COLOR_BGR2RGB))
266
+ # mask_pil = Image.fromarray(expandedmask)
267
+ # result = simple_lama(image_pil, mask_pil)
268
+ # result_cv2 = np.array(result)
269
+ # result_cv2 = cv2.cvtColor(result_cv2, cv2.COLOR_RGB2BGR)
270
+ # # result_cv2 = cv2.resize(result_cv2, (x,y))
271
+ # return expandedmask, result_cv2
272
+
273
+ # def get_nucleus_mask(nucleus): #image_path, x, y
274
+ # # nucleus = cv2.imread(nucleus)
275
+ # # Convert image to HSV color space
276
+ # hsv_image = cv2.cvtColor(nucleus, cv2.COLOR_BGR2HSV)
277
+ # # Define lower and upper bounds for blue color in HSV
278
+ # lower_blue = np.array([100, 50, 50])
279
+ # upper_blue = np.array([130, 255, 255])
280
+ # # Create a mask for blue color
281
+ # blue_mask = cv2.inRange(hsv_image, lower_blue, upper_blue)
282
+ # return blue_mask #, image
283
+
284
+ def save_heatmap(heatmap,img_path,heatmap_path):
285
+ img = cv2.imread(img_path)
286
+ heatmap_1 = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
287
+
288
+ heatmap_1 = np.uint8(255 * heatmap_1)
289
+
290
+ heatmap_1 = cv2.applyColorMap(heatmap_1, cv2.COLORMAP_JET)
291
+
292
+ superimposed_img = cv2.addWeighted(heatmap_1, 0.4,img, 0.6, 0)
293
+ superimposed_img = np.uint8(superimposed_img)
294
+
295
+ superimposed_img = cv2.cvtColor(superimposed_img, cv2.COLOR_BGR2RGB)
296
+
297
+ cv2.imwrite(heatmap_path,superimposed_img)
298
+
299
+
300
+ def cam_main(pixel_conversion):
301
+ count=0
302
+
303
+ return_dict_count = 1
304
+ return_dict = {}
305
+ selected_indices = select_sample_images()
306
+ print('selected_indices')
307
+ print(selected_indices)
308
+ resized_shape = (224,224)
309
+ cell_descriptors = [
310
+ ["Image Name", "Nucleus Area", "Cytoplasm Area", "Nucleus to Cytoplasm Ratio"]
311
+ ]
312
+
313
+ image_files = [f for f in os.listdir(preprocessed_folder) if not f.startswith('.DS_Store')]
314
+
315
+ for imagefile in image_files:
316
+ if (
317
+ "MACOSX".lower() in imagefile.lower()
318
+ or "." == imagefile[0]
319
+ or "_" == imagefile[0]
320
+ ):
321
+ print(imagefile)
322
+ continue
323
+ image_path = (
324
+ preprocessed_folder + imagefile
325
+ )
326
+ intermediate_path = (
327
+ intermediate_folder
328
+ + os.path.splitext(imagefile)[0].lower()
329
+ + "_heatmap.png"
330
+ )
331
+ save_path = (
332
+ segmentation_folder + os.path.splitext(imagefile)[0].lower() + "_mask.png"
333
+ )
334
+ table_path = (
335
+ tables_folder + os.path.splitext(imagefile)[0].lower() + "_table.png"
336
+ )
337
+ # img_path=input_folder+'/'+a
338
+
339
+ # print(a)
340
+
341
+ # count+=1
342
+
343
+ # input_image = preprocess_image(img_path)
344
+
345
+ heatmap = generate_grad_cam_plus_plus(image_path, model, 'block14_sepconv2_act', ['dense_1'])
346
+
347
+ save_heatmap(heatmap,image_path,intermediate_path)
348
+
349
+
350
+ pred_class = model.predict(preprocess_image(image_path))
351
+ pred_class = pred_class.argmax(axis=1)[0]
352
+
353
+ # print(pred_class)
354
+
355
+ if pred_class == 0:
356
+ labels,colored_segmentation_mask = GMM_abnormal_method(heatmap)
357
+ else:
358
+ labels,colored_segmentation_mask = GMM_normal_method(heatmap)
359
+
360
+ image=cv2.imread(image_path)
361
+ original_shape = image.shape
362
+ image= cv2.resize(image, (224,224))
363
+
364
+ nucleus= create_nucelus(image,colored_segmentation_mask)
365
+
366
+ # blue_mask = get_nucleus_mask(nucleus)
367
+
368
+ # expandedmask, result_cv2 = remove_nucleus(image, blue_mask)
369
+
370
+ background=create_background(image,heatmap,labels)
371
+
372
+ combined_mask = background & nucleus
373
+
374
+
375
+ for i in range(combined_mask.shape[0]):
376
+ for j in range(combined_mask.shape[1]):
377
+ original_color = tuple(combined_mask[i, j])
378
+ if original_color == (128,0,0):
379
+ combined_mask[i, j] = np.array((255,0,0))
380
+ elif original_color == (0,0,0):
381
+ combined_mask[i, j] = np.array((128,0,0))
382
+
383
+ # combined_mask = cv2.resize(combined_mask, (224,224))
384
+
385
+ cv2.imwrite(save_path,combined_mask)
386
+
387
+
388
+ nucleus_area, cytoplasm_area, ratio = calculate_cell_descriptors(
389
+ original_shape, resized_shape, pixel_conversion, combined_mask
390
+ )
391
+ cell_descriptors.append(
392
+ [
393
+ os.path.splitext(imagefile)[0].lower(),
394
+ nucleus_area,
395
+ cytoplasm_area,
396
+ ratio,
397
+ ]
398
+ )
399
+
400
+ create_cell_descriptors_table(table_path, nucleus_area, cytoplasm_area, ratio)
401
+
402
+ if count in selected_indices:
403
+ return_dict[f"image{return_dict_count}"] = str(
404
+ base64.b64encode(open(image_path, "rb").read()).decode("utf-8")
405
+ )
406
+ return_dict[f"inter{return_dict_count}"] = str(
407
+ base64.b64encode(open(intermediate_path, "rb").read()).decode("utf-8")
408
+ )
409
+ return_dict[f"mask{return_dict_count}"] = str(
410
+ base64.b64encode(open(save_path, "rb").read()).decode("utf-8")
411
+ )
412
+ return_dict[f"table{return_dict_count}"] = str(
413
+ base64.b64encode(open(table_path, "rb").read()).decode("utf-8")
414
+ )
415
+ return_dict_count += 1
416
+
417
+ count+=1
418
+
419
+ print(count)
420
+
421
+ with open(cell_descriptors_path, "w", newline="") as csv_file:
422
+ writer = csv.writer(csv_file)
423
+ writer.writerows(cell_descriptors)
424
+
425
+ print(list(return_dict.keys()))
426
+
427
+ return return_dict
428
+
429
+
430
+
431
+ # cam_main(0.2)
herlev_best_adam_vgg16_modified12_final.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c2570970458b76afa0a6741077fedc8f9e60692c970594b4e69b846c1dc8543
3
+ size 260263734
lrp_pipeline.py ADDED
@@ -0,0 +1,415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import torch
3
+ import torch.nn as nn
4
+ import numpy as np
5
+ import torchvision
6
+ import os
7
+ import copy
8
+ from sklearn.mixture import GaussianMixture as GMM
9
+ from sklearn.cluster import KMeans
10
+ from simple_lama_inpainting import SimpleLama
11
+ from PIL import Image
12
+ from matplotlib.colors import ListedColormap
13
+ import matplotlib.pyplot as plt
14
+ import matplotlib
15
+ import csv
16
+
17
+ matplotlib.use("Agg")
18
+
19
+ import base64
20
+
21
+ from utils import (
22
+ select_sample_images,
23
+ create_cell_descriptors_table,
24
+ calculate_cell_descriptors,
25
+ )
26
+
27
+ preprocessed_folder = "uploads/"
28
+ intermediate_folder = "heatmaps/"
29
+ segmentation_folder = "segmentations/"
30
+ tables_folder = "tables/"
31
+ cell_descriptors_path = "cell_descriptors/cell_descriptors.csv"
32
+ imgclasses = {0: "abnormal", 1: "normal"}
33
+
34
+
35
+ def toconv(layers):
36
+ newlayers = []
37
+ for i, layer in enumerate(layers):
38
+ if isinstance(layer, nn.Linear):
39
+ newlayer = None
40
+ if i == 0:
41
+ m, n = 512, layer.weight.shape[0]
42
+ newlayer = nn.Conv2d(m, n, 4)
43
+ newlayer.weight = nn.Parameter(layer.weight.reshape(n, m, 4, 4))
44
+ else:
45
+ m, n = layer.weight.shape[1], layer.weight.shape[0]
46
+ newlayer = nn.Conv2d(m, n, 1)
47
+ newlayer.weight = nn.Parameter(layer.weight.reshape(n, m, 1, 1))
48
+ newlayer.bias = nn.Parameter(layer.bias)
49
+ newlayers += [newlayer]
50
+ else:
51
+ newlayers += [layer]
52
+ return newlayers
53
+
54
+
55
+ def newlayer(layer, g):
56
+ layer = copy.deepcopy(layer)
57
+ try:
58
+ layer.weight = nn.Parameter(g(layer.weight))
59
+ except AttributeError:
60
+ pass
61
+ try:
62
+ layer.bias = nn.Parameter(g(layer.bias))
63
+ except AttributeError:
64
+ pass
65
+ return layer
66
+
67
+
68
+ def heatmap(R, sx, sy, intermediate_path):
69
+ b = 10 * ((np.abs(R) ** 3.0).mean() ** (1.0 / 3))
70
+ my_cmap = plt.cm.seismic(np.arange(plt.cm.seismic.N))
71
+ my_cmap[:, 0:3] *= 0.85
72
+ my_cmap = ListedColormap(my_cmap)
73
+ plt.figure(figsize=(sx, sy))
74
+ plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
75
+ plt.axis("off")
76
+ plt.imshow(R, cmap=my_cmap, vmin=-b, vmax=b, interpolation="nearest")
77
+ # plt.show()
78
+ plt.savefig(intermediate_path, bbox_inches="tight", pad_inches=0)
79
+ plt.close()
80
+
81
+
82
+ def get_LRP_heatmap(image, L, layers, imgclasses, intermediate_path):
83
+ img = np.array(image)[..., ::-1] / 255.0
84
+ mean = torch.FloatTensor([0.485, 0.456, 0.406]).reshape(1, -1, 1, 1) # torch.cuda
85
+ std = torch.FloatTensor([0.229, 0.224, 0.225]).reshape(1, -1, 1, 1) # torch.cuda
86
+ X = (torch.FloatTensor(img[np.newaxis].transpose([0, 3, 1, 2]) * 1) - mean) / std
87
+
88
+ A = [X] + [None] * L
89
+ for l in range(L):
90
+ A[l + 1] = layers[l].forward(A[l])
91
+
92
+ scores = np.array(A[-1].cpu().data.view(-1))
93
+ ind = np.argsort(-scores)
94
+ for i in ind[:2]:
95
+ print("%20s (%3d): %6.3f" % (imgclasses[i], i, scores[i]))
96
+
97
+ T = torch.FloatTensor(
98
+ (1.0 * (np.arange(2) == ind[0]).reshape([1, 2, 1, 1]))
99
+ ) # SET FOR THE HIGHEST SCORE CLASS
100
+ R = [None] * L + [(A[-1] * T).data]
101
+ for l in range(1, L)[::-1]:
102
+ A[l] = (A[l].data).requires_grad_(True)
103
+ if isinstance(layers[l], torch.nn.MaxPool2d):
104
+ layers[l] = torch.nn.AvgPool2d(2)
105
+ if isinstance(layers[l], torch.nn.Conv2d) or isinstance(
106
+ layers[l], torch.nn.AvgPool2d
107
+ ):
108
+ rho = lambda p: p + 0.25 * p.clamp(min=0)
109
+ incr = lambda z: z + 1e-9 # USE ONLY THE GAMMA RULE FOR ALL LAYERS
110
+
111
+ z = incr(newlayer(layers[l], rho).forward(A[l])) # step 1
112
+ # adding epsilon
113
+ epsilon = 1e-9
114
+ z_nonzero = torch.where(z == 0, torch.tensor(epsilon, device=z.device), z)
115
+ s = (R[l + 1] / z_nonzero).data
116
+ # s = (R[l+1]/z).data # step 2
117
+ (z * s).sum().backward()
118
+ c = A[l].grad # step 3
119
+ R[l] = (A[l] * c).data # step 4
120
+ else:
121
+ R[l] = R[l + 1]
122
+
123
+ A[0] = (A[0].data).requires_grad_(True)
124
+ lb = (A[0].data * 0 + (0 - mean) / std).requires_grad_(True)
125
+ hb = (A[0].data * 0 + (1 - mean) / std).requires_grad_(True)
126
+
127
+ z = layers[0].forward(A[0]) + 1e-9 # step 1 (a)
128
+ z -= newlayer(layers[0], lambda p: p.clamp(min=0)).forward(lb) # step 1 (b)
129
+ z -= newlayer(layers[0], lambda p: p.clamp(max=0)).forward(hb) # step 1 (c)
130
+
131
+ # adding epsilon
132
+ epsilon = 1e-9
133
+ z_nonzero = torch.where(z == 0, torch.tensor(epsilon, device=z.device), z)
134
+ s = (R[1] / z_nonzero).data # step 2
135
+
136
+ (z * s).sum().backward()
137
+ c, cp, cm = A[0].grad, lb.grad, hb.grad # step 3
138
+ R[0] = (A[0] * c + lb * cp + hb * cm).data # step 4
139
+ heatmap(
140
+ np.array(R[0][0].cpu()).sum(axis=0), 2, 2, intermediate_path
141
+ ) # HEATMAPPING TO SEE LRP MAPS WITH NEW RULE
142
+ return R[0][0].cpu()
143
+
144
+
145
+ def get_nucleus_mask_for_graphcut(R):
146
+ res = np.array(R).sum(axis=0)
147
+ # Reshape the data to a 1D array
148
+ data_1d = res.flatten().reshape(-1, 1)
149
+ n_clusters = 2
150
+ kmeans = KMeans(n_clusters=n_clusters, random_state=0)
151
+ # kmeans.fit(data_1d)
152
+ kmeans.fit(data_1d)
153
+ # Step 4: Assign data points to clusters
154
+ cluster_assignments = kmeans.labels_
155
+ # Step 5: Reshape cluster assignments into a 2D binary matrix
156
+ binary_matrix = cluster_assignments.reshape(128, 128)
157
+ # Now, binary_matrix contains 0s and 1s, separating the data into two classes using K-Means clustering
158
+ rel_grouping = np.zeros((128, 128, 3), dtype=np.uint8)
159
+ rel_grouping[binary_matrix == 1] = [255, 0, 0] # Main object (Blue)
160
+ rel_grouping[binary_matrix == 2] = [128, 0, 0] # Second label (Dark Blue)
161
+ rel_grouping[binary_matrix == 0] = [0, 0, 255] # Background (Red)
162
+ return rel_grouping
163
+
164
+
165
+ def segment_nucleus(image, rel_grouping): # clustered = rel_grouping
166
+
167
+ # GET THE BOUNDING BOX FROM CLUSTERED
168
+ blue_pixels = np.sum(np.all(rel_grouping == [255, 0, 0], axis=-1))
169
+ red_pixels = np.sum(np.all(rel_grouping == [0, 0, 255], axis=-1))
170
+ if red_pixels > blue_pixels:
171
+ color = np.array([255, 0, 0])
172
+ else:
173
+ color = np.array([0, 0, 255])
174
+ mask = cv2.inRange(rel_grouping, color, color)
175
+ contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
176
+ contour_areas = []
177
+ for contour in contours:
178
+ x, y, w, h = cv2.boundingRect(contour)
179
+ contour_areas.append(cv2.contourArea(contour))
180
+ contour_areas.sort()
181
+ contour_areas = np.array(contour_areas)
182
+ quartile_50 = np.percentile(contour_areas, 50)
183
+ selected_contours = [
184
+ contour for contour in contours if cv2.contourArea(contour) >= quartile_50
185
+ ]
186
+ x, y, w, h = cv2.boundingRect(np.concatenate(selected_contours))
187
+
188
+ # APPLY GRABCUT
189
+ fgModel = np.zeros((1, 65), dtype="float")
190
+ bgModel = np.zeros((1, 65), dtype="float")
191
+ mask = np.zeros(image.shape[:2], np.uint8)
192
+ rect = (x, y, x + w, y + h)
193
+
194
+ # IF BOUNDING BOX IS THE WHOLE IMAGE, THEN BOUNDING BOX METHOD WONT'T WORK -> SO USE INIT WITH MASK METHOD ITSELF
195
+ if (x, y, x + w, y + h) == (0, 0, 128, 128):
196
+
197
+ if (
198
+ red_pixels > blue_pixels
199
+ ): # red is the dominant color and thus the background
200
+ mask[(rel_grouping == [255, 0, 0]).all(axis=2)] = (
201
+ cv2.GC_PR_FGD
202
+ ) # Probable Foreground
203
+ mask[(rel_grouping == [0, 0, 255]).all(axis=2)] = (
204
+ cv2.GC_PR_BGD
205
+ ) # Probable Background
206
+ else: # blue is the dominant color and thus the background
207
+ mask[(rel_grouping == [0, 0, 255]).all(axis=2)] = (
208
+ cv2.GC_PR_FGD
209
+ ) # Probable Foreground
210
+ mask[(rel_grouping == [255, 0, 0]).all(axis=2)] = (
211
+ cv2.GC_PR_BGD
212
+ ) # Probable Background
213
+
214
+ (mask, bgModel, fgModel) = cv2.grabCut(
215
+ image,
216
+ mask,
217
+ rect,
218
+ bgModel,
219
+ fgModel,
220
+ iterCount=10,
221
+ mode=cv2.GC_INIT_WITH_MASK,
222
+ )
223
+
224
+ # ELSE PASS THE BOUNDING BOX FOR GRABCUT
225
+ else:
226
+ (mask, bgModel, fgModel) = cv2.grabCut(
227
+ image,
228
+ mask,
229
+ rect,
230
+ bgModel,
231
+ fgModel,
232
+ iterCount=10,
233
+ mode=cv2.GC_INIT_WITH_RECT,
234
+ )
235
+
236
+ # FORM THE COLORED SEGMENTATION MASK
237
+ clean_binary_mask = np.where(
238
+ (mask == cv2.GC_FGD) | (mask == cv2.GC_PR_FGD), 1, 0
239
+ ).astype("uint8")
240
+ nucleus_segment = np.zeros((128, 128, 3), dtype=np.uint8)
241
+ nucleus_segment[clean_binary_mask == 1] = [255, 0, 0] # Main object (Blue)
242
+ nucleus_segment[clean_binary_mask == 0] = [0, 0, 255] # Background (Red)
243
+ return nucleus_segment, clean_binary_mask
244
+
245
+
246
+ def remove_nucleus(image1, blue_mask1): # image, blue_mask, x, y
247
+ # expand the nucleus mask
248
+ # image1 = cv2.resize(image, (128,128))
249
+ # blue_mask1 = cv2.resize(blue_mask, (128,128))
250
+ kernel = np.ones((5, 5), np.uint8) # Adjust the kernel size as needed
251
+ expandedmask = cv2.dilate(blue_mask1, kernel, iterations=1)
252
+ simple_lama = SimpleLama()
253
+ image_pil = Image.fromarray(cv2.cvtColor(image1, cv2.COLOR_BGR2RGB))
254
+ mask_pil = Image.fromarray(expandedmask)
255
+ result = simple_lama(image_pil, mask_pil)
256
+ result_cv2 = np.array(result)
257
+ result_cv2 = cv2.cvtColor(result_cv2, cv2.COLOR_RGB2BGR)
258
+ # result_cv2 = cv2.resize(result_cv2, (x,y))
259
+ return expandedmask, result_cv2
260
+
261
+
262
+ def get_final_mask(nucleus_removed_img, blue_mask, expanded_mask):
263
+ # apply graphcut - init with rectangle (not mask approximation mask)
264
+ fgModel = np.zeros((1, 65), dtype="float")
265
+ bgModel = np.zeros((1, 65), dtype="float")
266
+
267
+ rect = (1, 1, nucleus_removed_img.shape[1], nucleus_removed_img.shape[0])
268
+
269
+ (mask, bgModel, fgModel) = cv2.grabCut(
270
+ nucleus_removed_img,
271
+ expanded_mask,
272
+ rect,
273
+ bgModel,
274
+ fgModel,
275
+ iterCount=20,
276
+ mode=cv2.GC_INIT_WITH_RECT,
277
+ )
278
+
279
+ clean_binary_mask = np.where(
280
+ (mask == cv2.GC_FGD) | (mask == cv2.GC_PR_FGD), 1, 0
281
+ ).astype("uint8")
282
+ colored_segmentation_mask = np.zeros((128, 128, 3), dtype=np.uint8)
283
+ colored_segmentation_mask[clean_binary_mask == 1] = [
284
+ 128,
285
+ 0,
286
+ 0,
287
+ ] # Main object (Blue)
288
+ colored_segmentation_mask[clean_binary_mask == 0] = [0, 0, 255] # Background (Red)
289
+ colored_segmentation_mask[blue_mask > 0] = [255, 0, 0]
290
+ return colored_segmentation_mask
291
+
292
+
293
+ def lrp_main(pixel_conversion):
294
+ i = 0
295
+ return_dict_count = 1
296
+ return_dict = {}
297
+ selected_indices = select_sample_images()
298
+ resized_shape = (128, 128)
299
+ cell_descriptors = [
300
+ ["Image Name", "Nucleus Area", "Cytoplasm Area", "Nucleus to Cytoplasm Ratio"]
301
+ ]
302
+
303
+ for imagefile in os.listdir(preprocessed_folder):
304
+ if (
305
+ "MACOSX".lower() in imagefile.lower()
306
+ or "." == imagefile[0]
307
+ or "_" == imagefile[0]
308
+ ):
309
+ print(imagefile)
310
+ continue
311
+ image_path = (
312
+ preprocessed_folder + os.path.splitext(imagefile)[0].lower() + ".png"
313
+ )
314
+ intermediate_path = (
315
+ intermediate_folder
316
+ + os.path.splitext(imagefile)[0].lower()
317
+ + "_heatmap.png"
318
+ )
319
+ save_path = (
320
+ segmentation_folder + os.path.splitext(imagefile)[0].lower() + "_mask.png"
321
+ )
322
+ table_path = (
323
+ tables_folder + os.path.splitext(imagefile)[0].lower() + "_table.png"
324
+ )
325
+
326
+ # print(i, imagefile)
327
+ image = cv2.imread(image_path)
328
+ original_shape = image.shape
329
+
330
+ image = cv2.resize(image, (128, 128))
331
+
332
+ # MODEL SECTION STARTS FOR NEW MODEL
333
+ vgg16 = torchvision.models.vgg16(pretrained=True)
334
+ new_avgpool = nn.AdaptiveAvgPool2d(output_size=(4, 4))
335
+ vgg16.avgpool = new_avgpool
336
+ classifier_list = [
337
+ nn.Linear(8192, vgg16.classifier[0].out_features)
338
+ ] # vgg16.classifier[0].out_features = 4096
339
+ classifier_list += list(vgg16.classifier.children())[
340
+ 1:-1
341
+ ] # Remove the first and last layers
342
+ classifier_list += [
343
+ nn.Linear(vgg16.classifier[6].in_features, 2)
344
+ ] # vgg16.classifier[6].in_features = 4096
345
+ vgg16.classifier = nn.Sequential(
346
+ *classifier_list
347
+ ) # Replace the model classifier
348
+
349
+ PATH = "herlev_best_adam_vgg16_modified12_final.pth"
350
+ checkpoint = torch.load(PATH, map_location=torch.device("cpu"))
351
+ vgg16.load_state_dict(checkpoint)
352
+ # vgg16.to(torch.device('cuda'))
353
+ vgg16.eval()
354
+
355
+ layers = list(vgg16._modules["features"]) + toconv(
356
+ list(vgg16._modules["classifier"])
357
+ )
358
+ L = len(layers)
359
+ # MODEL SECTION ENDS
360
+
361
+ R = get_LRP_heatmap(image, L, layers, imgclasses, intermediate_path)
362
+
363
+ rel_grouping = get_nucleus_mask_for_graphcut(R)
364
+
365
+ nucleus_segment, clean_binary_mask = segment_nucleus(image, rel_grouping)
366
+
367
+ expanded_mask, nucleus_removed_image = remove_nucleus(image, clean_binary_mask)
368
+
369
+ colored_segmentation_mask = get_final_mask(
370
+ nucleus_removed_image, clean_binary_mask, expanded_mask
371
+ )
372
+
373
+ cv2.imwrite(save_path, colored_segmentation_mask)
374
+
375
+ nucleus_area, cytoplasm_area, ratio = calculate_cell_descriptors(
376
+ original_shape, resized_shape, pixel_conversion, colored_segmentation_mask
377
+ )
378
+ cell_descriptors.append(
379
+ [
380
+ os.path.splitext(imagefile)[0].lower(),
381
+ nucleus_area,
382
+ cytoplasm_area,
383
+ ratio,
384
+ ]
385
+ )
386
+
387
+ create_cell_descriptors_table(table_path, nucleus_area, cytoplasm_area, ratio)
388
+
389
+ if i in selected_indices:
390
+ return_dict[f"image{return_dict_count}"] = str(
391
+ base64.b64encode(open(image_path, "rb").read()).decode("utf-8")
392
+ )
393
+ return_dict[f"inter{return_dict_count}"] = str(
394
+ base64.b64encode(open(intermediate_path, "rb").read()).decode("utf-8")
395
+ )
396
+ return_dict[f"mask{return_dict_count}"] = str(
397
+ base64.b64encode(open(save_path, "rb").read()).decode("utf-8")
398
+ )
399
+ return_dict[f"table{return_dict_count}"] = str(
400
+ base64.b64encode(open(table_path, "rb").read()).decode("utf-8")
401
+ )
402
+ return_dict_count += 1
403
+
404
+ i += 1
405
+
406
+ # Visualization
407
+ # for im in [image, gt2, rel_grouping, nucleus_segment, clean_binary_mask*255, nucleus_removed_image, colored_segmentation_mask]:
408
+ # cv2_imshow(im)
409
+
410
+ # write cell_descriptors list to csv file
411
+ with open(cell_descriptors_path, "w", newline="") as csv_file:
412
+ writer = csv.writer(csv_file)
413
+ writer.writerows(cell_descriptors)
414
+
415
+ return return_dict
preprocessing_pipeline.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import os
4
+
5
+ image_height=224
6
+ image_width=224
7
+
8
+
9
+ def read_image(image_path,image_height,image_width):
10
+ image=cv2.imread(image_path)
11
+ image=cv2.resize(image, (image_height,image_width))
12
+ image=cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
13
+
14
+ return image
15
+
16
+ def min_max_normalization (image):
17
+ float_image = image.astype(np.float32)
18
+
19
+ # Calculate the minimum and maximum pixel values
20
+ min_value = np.min(float_image)
21
+ max_value = np.max(float_image)
22
+
23
+ # Perform Min-Max normalization
24
+ normalized_image = (float_image - min_value) / (max_value - min_value)
25
+
26
+ return normalized_image
27
+
28
+
29
+ def apply_histogram_normalization(image):
30
+
31
+ b_channel, g_channel, r_channel = cv2.split(image)
32
+
33
+ normalized_b = cv2.normalize(b_channel, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
34
+ normalized_g = cv2.normalize(g_channel, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
35
+ normalized_r = cv2.normalize(r_channel, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
36
+
37
+
38
+ normalized_image = cv2.merge((normalized_b, normalized_g, normalized_r))
39
+
40
+ return normalized_image
41
+
42
+
43
+ def remove_noise(image):
44
+
45
+ median = cv2.medianBlur(image,5)
46
+
47
+ return median
48
+
49
+ def adaptive_gamma_correction(image):
50
+ def apply_adaptive_gamma_correction(channel, gamma):
51
+ corrected_channel = np.power((channel / 255.0), 1.0 / gamma)
52
+ return cv2.normalize(corrected_channel, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
53
+
54
+
55
+ b_channel, g_channel, r_channel = cv2.split(image)
56
+
57
+
58
+ gamma = 1.5
59
+ gamma_corrected_b = apply_adaptive_gamma_correction(b_channel, gamma)
60
+ gamma_corrected_g = apply_adaptive_gamma_correction(g_channel, gamma)
61
+ gamma_corrected_r = apply_adaptive_gamma_correction(r_channel, gamma)
62
+
63
+
64
+ gamma_corrected_image = cv2.merge((gamma_corrected_b, gamma_corrected_g, gamma_corrected_r))
65
+
66
+
67
+ gamma_corrected_image=min_max_normalization(gamma_corrected_image)
68
+
69
+ return gamma_corrected_image
70
+
71
+
72
+ def preprocess_image(img_path):
73
+
74
+ image = read_image(img_path,image_height,image_width)
75
+
76
+ normalized_image= apply_histogram_normalization(image)
77
+ median= remove_noise(normalized_image)
78
+ gamma_corrected_image=adaptive_gamma_correction(median)
79
+
80
+ return gamma_corrected_image*255
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Flask==3.0.3
2
+ Flask_Cors==4.0.0
3
+ matplotlib==3.8.1
4
+ # matplotlib==3.8.4
5
+ numpy==1.26.4
6
+ opencv_python==4.9.0.80
7
+ pandas==2.2.2
8
+ Pillow==9.5.0
9
+ scikit_learn==1.3.2
10
+ simple_lama_inpainting==0.1.2
11
+ tensorflow==2.15.0
12
+ torch==2.2.2
13
+ torchvision==0.17.2
utils.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import zipfile
3
+ import random
4
+ import pandas as pd
5
+ import matplotlib.pyplot as plt
6
+ import shutil
7
+ import numpy as np
8
+ import cv2
9
+
10
+ preprocessed_folder = "uploads/"
11
+ intermediate_folder = "heatmaps/"
12
+ segmentation_folder = "segmentations/"
13
+ tables_folder = "tables/"
14
+ cell_descriptors_path = "cell_descriptors/cell_descriptors.csv"
15
+ zip_file_path = "outputs.zip"
16
+
17
+
18
+ def select_sample_images():
19
+ # first check if a zip file has been uploaded and extract images from it
20
+ for file_name in os.listdir(preprocessed_folder):
21
+ if file_name.endswith(".zip"):
22
+ zip_file_path = os.path.join(preprocessed_folder, file_name)
23
+ with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
24
+ # Extract all contents of the zip file to the preprocessed_folder
25
+ zip_ref.extractall(path=preprocessed_folder)
26
+ # Remove the original zip file
27
+ os.remove(zip_file_path)
28
+ # print("Contents of the zip file extracted to the folder.")
29
+ break
30
+
31
+ # Get a list of all subfolders in the main folder
32
+ subfolders = [
33
+ f
34
+ for f in os.listdir(preprocessed_folder)
35
+ if os.path.isdir(os.path.join(preprocessed_folder, f))
36
+ ]
37
+ # Iterate through each subfolder and move its files to the main folder
38
+ for subfolder in subfolders:
39
+ if "MACOSX" not in subfolder:
40
+ subfolder_path = os.path.join(preprocessed_folder, subfolder)
41
+ for file_name in os.listdir(subfolder_path):
42
+ source_path = os.path.join(subfolder_path, file_name)
43
+ destination_path = os.path.join(preprocessed_folder, file_name)
44
+ shutil.move(source_path, destination_path)
45
+ # print(f"Moved file '{file_name}' from '{subfolder}' to '{main_folder}'")
46
+ # Delete empty subfolders
47
+ for subfolder in subfolders:
48
+ if "MACOSX" not in subfolder:
49
+ subfolder_path = os.path.join(preprocessed_folder, subfolder)
50
+ try:
51
+ os.rmdir(subfolder_path)
52
+ # print(f"Deleted empty folder '{subfolder}'")
53
+ except OSError as e:
54
+ print(f"Error deleting folder '{subfolder}': {e}")
55
+
56
+ # next check the count of images in the folder
57
+ image_extensions = [
58
+ ".jpg",
59
+ ".jpeg",
60
+ ".png",
61
+ ".gif",
62
+ ".bmp",
63
+ ] # Add more extensions if needed
64
+ image_count = 0
65
+ for file_name in os.listdir(preprocessed_folder):
66
+ if any(file_name.lower().endswith(ext) for ext in image_extensions):
67
+ image_count += 1
68
+
69
+ # if count > 5, return 5 random indices
70
+ # else, return all 5 indices
71
+ if image_count > 5:
72
+ indices = random.sample(range(image_count), 5)
73
+ indices.sort()
74
+ return indices
75
+ else:
76
+ return list(range(image_count))
77
+
78
+
79
+ def create_cell_descriptors_table(table_path, nucleus_area, cytoplasm_area, ratio):
80
+ # Sample data for the table
81
+ data = {
82
+ "Metric": ["Nucleus Area", "Cytoplasm Area", "N:C Ratio"],
83
+ "Value": [
84
+ str(round(nucleus_area, 5)),
85
+ str(round(cytoplasm_area, 5)),
86
+ str(round(ratio, 5)),
87
+ ],
88
+ }
89
+
90
+ # Define cell colors
91
+ cell_colors = [
92
+ ["lightgrey", "lightblue"],
93
+ ["lightgrey", "lightgreen"],
94
+ ["lightgrey", "lightyellow"],
95
+ ]
96
+
97
+ # Create a DataFrame
98
+ df = pd.DataFrame(data)
99
+
100
+ # Plot table
101
+ fig = plt.figure(figsize=(2, 2))
102
+ table = plt.table(
103
+ cellText=df.values,
104
+ colLabels=df.columns,
105
+ loc="center",
106
+ cellLoc="center",
107
+ cellColours=cell_colors,
108
+ )
109
+
110
+ # Set cell heights
111
+ table.auto_set_font_size(False)
112
+ table.set_fontsize(6) # Adjust font size if needed
113
+ table.scale(1, 2) # Increase cell heights
114
+
115
+ # Hide axes
116
+ plt.axis("off")
117
+
118
+ fig.tight_layout()
119
+ # Save as image
120
+ fig.savefig(table_path) # pad_inches=(0.1, 0.1, 0.1, 0.1) bbox_inches="tight"
121
+ plt.close()
122
+ # plt.show()
123
+
124
+
125
+ def delete_folders(folder_names):
126
+ for folder_name in folder_names:
127
+ try:
128
+ shutil.rmtree(folder_name)
129
+ # print(f"Folder deleted: {folder_name}")
130
+ except FileNotFoundError:
131
+ print(f"Folder does not exist: {folder_name}")
132
+ except Exception as e:
133
+ print(f"Error deleting folder {folder_name}: {e}")
134
+
135
+
136
+ def create_folders(folder_names):
137
+ for folder_name in folder_names:
138
+ try:
139
+ os.makedirs(folder_name)
140
+ # print(f"Folder created: {folder_name}")
141
+ except FileExistsError:
142
+ print(f"Folder already exists: {folder_name}")
143
+ except Exception as e:
144
+ print(f"Error creating folder {folder_name}: {e}")
145
+
146
+
147
+ def calculate_cell_descriptors(
148
+ original_shape, resized_shape, pixel_conversion, segmentation_mask
149
+ ):
150
+ area_of_pixel = (
151
+ original_shape[0]
152
+ * original_shape[1]
153
+ * (pixel_conversion**2)
154
+ / (resized_shape[0] * resized_shape[1])
155
+ )
156
+
157
+ binary_nucleus = np.zeros(resized_shape, dtype=np.uint8)
158
+ binary_cytoplasm = np.zeros(resized_shape, dtype=np.uint8)
159
+ binary_nucleus[(segmentation_mask == [255, 0, 0]).all(axis=2)] = 1
160
+ binary_cytoplasm[(segmentation_mask == [128, 0, 0]).all(axis=2)] = 1
161
+
162
+ # Find contours in the binary masks
163
+ nucleus_contours, _ = cv2.findContours(
164
+ binary_nucleus, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
165
+ ) # nucleus
166
+ cytoplasm_contours, _ = cv2.findContours(
167
+ binary_cytoplasm, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
168
+ ) # cytoplasm
169
+
170
+ # Calculate area for nucleus and cytoplasm
171
+ nucleus_area = sum(cv2.contourArea(contour) for contour in nucleus_contours)
172
+ cytoplasm_area = sum(cv2.contourArea(contour) for contour in cytoplasm_contours)
173
+ if cytoplasm_area == 0:
174
+ ratio = np.NaN
175
+ else:
176
+ ratio = nucleus_area / cytoplasm_area
177
+
178
+ return nucleus_area * area_of_pixel, cytoplasm_area * area_of_pixel, ratio
179
+
180
+
181
+ def create_zip_file():
182
+ folders = [intermediate_folder, segmentation_folder]
183
+ csv_file = cell_descriptors_path
184
+ with zipfile.ZipFile(zip_file_path, "w") as zipf:
185
+ # Add folders to the zip file
186
+ for folder in folders:
187
+ for root, dirs, files in os.walk(folder):
188
+ for file in files:
189
+ file_path = os.path.join(root, file)
190
+ arcname = os.path.relpath(file_path, os.path.join(folder, ".."))
191
+ zipf.write(file_path, arcname=arcname)
192
+
193
+ # Add the CSV file to the zip file
194
+ zipf.write(csv_file, arcname=os.path.basename(csv_file))
xception_model_81.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc8dc9524a83d4e90bd0b854639e7ec87e8ba6c09b4f6d11010baf6fefde88ac
3
+ size 90259112