Reboot2004 commited on
Commit
7bff15c
·
verified ·
1 Parent(s): 0aa24e5

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +55 -8
  2. cam_pipeline.py +150 -30
  3. lrp_pipeline_2.py +9 -4
app.py CHANGED
@@ -2,8 +2,9 @@ from flask import Flask, jsonify, request, send_file, render_template
2
  from flask_cors import CORS
3
  from lrp_pipeline_2 import lrp_main
4
  from utils import create_folders, delete_folders, create_zip_file
5
- from cam_pipeline import cam_main
6
  import os
 
7
 
8
  app = Flask(__name__)
9
  CORS(app)
@@ -36,22 +37,68 @@ def submit_data():
36
  # then upload the submitted file(s)
37
  file = list(dict(request.files).values())[0]
38
  print(file)
39
- file.save(os.path.join(uploads_dir, file.filename)) # Save to 'uploads' directory
 
40
 
41
  # Process data here
42
- return jsonify({"message": "Data received successfully!"})
 
 
 
43
 
44
 
45
  @app.route("/api/inputform", methods=["POST"])
46
  def submit_form():
47
  data = dict(request.json) # format of data: {'model': 'VGGNet', 'xaiMethod': 'LRP'}
48
  print(data)
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  if "LRP" in data["xaiMethod"]:
50
- # pixel_ratio = data['pixelRatio']
51
- return lrp_main(float(data["magval"])) # pixel_ratio
 
 
 
 
 
 
 
52
  elif "GradCAM++" in data["xaiMethod"]:
53
- # pixel_ratio = data['pixelRatio']
54
- return cam_main(float(data["magval"])) # pixel_ratio
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
 
57
  @app.route("/api/zip", methods=["GET"])
@@ -61,4 +108,4 @@ def get_csv():
61
 
62
 
63
  if __name__ == "__main__":
64
- app.run(debug=True)
 
2
  from flask_cors import CORS
3
  from lrp_pipeline_2 import lrp_main
4
  from utils import create_folders, delete_folders, create_zip_file
5
+ from cam_pipeline import cam_main, cam_process_single_image
6
  import os
7
+ import base64
8
 
9
  app = Flask(__name__)
10
  CORS(app)
 
37
  # then upload the submitted file(s)
38
  file = list(dict(request.files).values())[0]
39
  print(file)
40
+ file_path = os.path.join(uploads_dir, file.filename)
41
+ file.save(file_path) # Save to 'uploads' directory
42
 
43
  # Process data here
44
+ return jsonify({
45
+ "message": "Data received successfully!",
46
+ "file_path": file_path
47
+ })
48
 
49
 
50
  @app.route("/api/inputform", methods=["POST"])
51
  def submit_form():
52
  data = dict(request.json) # format of data: {'model': 'VGGNet', 'xaiMethod': 'LRP'}
53
  print(data)
54
+
55
+ # Check if we have images in the uploads directory
56
+ uploads_dir = "uploads"
57
+ image_files = [f for f in os.listdir(uploads_dir)
58
+ if f.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp'))
59
+ and not f.startswith('.')]
60
+
61
+ if not image_files:
62
+ return jsonify({"error": "No images found in uploads directory"}), 400
63
+
64
+ # Process the first image (or all images based on your requirements)
65
+ image_path = os.path.join(uploads_dir, image_files[0])
66
+
67
  if "LRP" in data["xaiMethod"]:
68
+ result_dict = lrp_main(float(data["magval"]))
69
+ # Extract relevant results to show in the frontend
70
+ return jsonify({
71
+ "success": True,
72
+ "summary": f"LRP analysis completed with magnification {data['magval']}",
73
+ "details": "Nucleus and cytoplasm segmented successfully",
74
+ "results": result_dict
75
+ })
76
+
77
  elif "GradCAM++" in data["xaiMethod"]:
78
+ # Process single image with GradCAM++
79
+ result_dict, output_paths = cam_process_single_image(image_path, float(data["magval"]))
80
+
81
+ # Read and encode the output files for display
82
+ original_image = base64.b64encode(open(image_path, "rb").read()).decode("utf-8")
83
+ heatmap_image = base64.b64encode(open(output_paths["heatmap"], "rb").read()).decode("utf-8")
84
+ mask_image = base64.b64encode(open(output_paths["mask"], "rb").read()).decode("utf-8")
85
+ table_image = base64.b64encode(open(output_paths["table"], "rb").read()).decode("utf-8")
86
+
87
+ # include predicted class from the pipeline result
88
+ predicted_class = result_dict.get("class1")
89
+
90
+ return jsonify({
91
+ "success": True,
92
+ "summary": f"GradCAM++ analysis completed with magnification {data['magval']}",
93
+ "details": "Nucleus and cytoplasm segmented successfully",
94
+ "classification": predicted_class,
95
+ "results": {
96
+ "originalImage": original_image,
97
+ "heatmapImage": heatmap_image,
98
+ "maskImage": mask_image,
99
+ "tableImage": table_image
100
+ }
101
+ })
102
 
103
 
104
  @app.route("/api/zip", methods=["GET"])
 
108
 
109
 
110
  if __name__ == "__main__":
111
+ app.run(host="0.0.0.0",debug=True)
cam_pipeline.py CHANGED
@@ -7,7 +7,8 @@ import cv2
7
  from sklearn.mixture import GaussianMixture
8
  import base64
9
  import csv
10
- from tensorflow.keras.applications.efficientnet import preprocess_input
 
11
  from tensorflow.keras.preprocessing import image
12
  from tensorflow.keras.models import Model
13
  from tensorflow.keras.layers import Lambda
@@ -16,6 +17,7 @@ from utils import (
16
  select_sample_images,
17
  create_cell_descriptors_table,
18
  calculate_cell_descriptors,
 
19
  )
20
 
21
  preprocessed_folder = 'uploads/'
@@ -24,14 +26,19 @@ intermediate_folder = 'heatmaps/'
24
  tables_folder = "tables/"
25
  cell_descriptors_path = "cell_descriptors/cell_descriptors.csv"
26
  saved_model_path = 'xception_model_81.h5' # Replace with the path to your saved model
 
27
  model = load_model(saved_model_path)
28
 
 
 
 
29
 
30
  def preprocess_image(img_path):
31
  img = image.load_img(img_path, target_size=(224, 224))
32
  x = image.img_to_array(img)
33
  x = np.expand_dims(x, axis=0)
34
- x = preprocess_input(x)
 
35
  return x
36
 
37
  def generate_grad_cam_plus_plus(img_path, model, last_conv_layer_name, classifier_layer_names):
@@ -255,31 +262,31 @@ def create_background(img,heatmap,labels):
255
 
256
  return output_image
257
 
258
- # def remove_nucleus(image, blue_mask):
259
- # #expand the nucleus mask
260
- # image1 = cv2.resize(image, (224,224))
261
- # blue_mask1 = cv2.resize(blue_mask, (224,224))
262
- # kernel = np.ones((5, 5), np.uint8) # Adjust the kernel size as needed
263
- # expandedmask = cv2.dilate(blue_mask1, kernel, iterations=1)
264
- # simple_lama = SimpleLama()
265
- # image_pil = Image.fromarray(cv2.cvtColor(image1, cv2.COLOR_BGR2RGB))
266
- # mask_pil = Image.fromarray(expandedmask)
267
- # result = simple_lama(image_pil, mask_pil)
268
- # result_cv2 = np.array(result)
269
- # result_cv2 = cv2.cvtColor(result_cv2, cv2.COLOR_RGB2BGR)
270
- # # result_cv2 = cv2.resize(result_cv2, (x,y))
271
- # return expandedmask, result_cv2
272
-
273
- # def get_nucleus_mask(nucleus): #image_path, x, y
274
- # # nucleus = cv2.imread(nucleus)
275
- # # Convert image to HSV color space
276
- # hsv_image = cv2.cvtColor(nucleus, cv2.COLOR_BGR2HSV)
277
- # # Define lower and upper bounds for blue color in HSV
278
- # lower_blue = np.array([100, 50, 50])
279
- # upper_blue = np.array([130, 255, 255])
280
- # # Create a mask for blue color
281
- # blue_mask = cv2.inRange(hsv_image, lower_blue, upper_blue)
282
- # return blue_mask #, image
283
 
284
  def save_heatmap(heatmap,img_path,heatmap_path):
285
  img = cv2.imread(img_path)
@@ -349,6 +356,7 @@ def cam_main(pixel_conversion):
349
 
350
  pred_class = model.predict(preprocess_image(image_path))
351
  pred_class = pred_class.argmax(axis=1)[0]
 
352
 
353
  # print(pred_class)
354
 
@@ -363,9 +371,9 @@ def cam_main(pixel_conversion):
363
 
364
  nucleus= create_nucelus(image,colored_segmentation_mask)
365
 
366
- # blue_mask = get_nucleus_mask(nucleus)
367
 
368
- # expandedmask, result_cv2 = remove_nucleus(image, blue_mask)
369
 
370
  background=create_background(image,heatmap,labels)
371
 
@@ -380,7 +388,7 @@ def cam_main(pixel_conversion):
380
  elif original_color == (0,0,0):
381
  combined_mask[i, j] = np.array((128,0,0))
382
 
383
- # combined_mask = cv2.resize(combined_mask, (224,224))
384
 
385
  cv2.imwrite(save_path,combined_mask)
386
 
@@ -412,6 +420,9 @@ def cam_main(pixel_conversion):
412
  return_dict[f"table{return_dict_count}"] = str(
413
  base64.b64encode(open(table_path, "rb").read()).decode("utf-8")
414
  )
 
 
 
415
  return_dict_count += 1
416
 
417
  count+=1
@@ -429,3 +440,112 @@ def cam_main(pixel_conversion):
429
 
430
 
431
  # cam_main(0.2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  from sklearn.mixture import GaussianMixture
8
  import base64
9
  import csv
10
+ from simple_lama_inpainting import SimpleLama
11
+ from tensorflow.keras.applications.xception import preprocess_input as xception_preprocess
12
  from tensorflow.keras.preprocessing import image
13
  from tensorflow.keras.models import Model
14
  from tensorflow.keras.layers import Lambda
 
17
  select_sample_images,
18
  create_cell_descriptors_table,
19
  calculate_cell_descriptors,
20
+ create_folders,
21
  )
22
 
23
  preprocessed_folder = 'uploads/'
 
26
  tables_folder = "tables/"
27
  cell_descriptors_path = "cell_descriptors/cell_descriptors.csv"
28
  saved_model_path = 'xception_model_81.h5' # Replace with the path to your saved model
29
+
30
  model = load_model(saved_model_path)
31
 
32
+ # add mapping from model output index to human-readable class
33
+ imgclasses = {0: "abnormal", 1: "normal"}
34
+
35
 
36
  def preprocess_image(img_path):
37
  img = image.load_img(img_path, target_size=(224, 224))
38
  x = image.img_to_array(img)
39
  x = np.expand_dims(x, axis=0)
40
+ # Use Xception preprocessing to match the saved Xception model
41
+ x = xception_preprocess(x)
42
  return x
43
 
44
  def generate_grad_cam_plus_plus(img_path, model, last_conv_layer_name, classifier_layer_names):
 
262
 
263
  return output_image
264
 
265
+ def remove_nucleus(image, blue_mask):
266
+ #expand the nucleus mask
267
+ image1 = cv2.resize(image, (224,224))
268
+ blue_mask1 = cv2.resize(blue_mask, (224,224))
269
+ kernel = np.ones((5, 5), np.uint8) # Adjust the kernel size as needed
270
+ expandedmask = cv2.dilate(blue_mask1, kernel, iterations=1)
271
+ simple_lama = SimpleLama()
272
+ image_pil = Image.fromarray(cv2.cvtColor(image1, cv2.COLOR_BGR2RGB))
273
+ mask_pil = Image.fromarray(expandedmask)
274
+ result = simple_lama(image_pil, mask_pil)
275
+ result_cv2 = np.array(result)
276
+ result_cv2 = cv2.cvtColor(result_cv2, cv2.COLOR_RGB2BGR)
277
+ # result_cv2 = cv2.resize(result_cv2, (x,y))
278
+ return expandedmask, result_cv2
279
+
280
+ def get_nucleus_mask(nucleus): #image_path, x, y
281
+ # nucleus = cv2.imread(nucleus)
282
+ # Convert image to HSV color space
283
+ hsv_image = cv2.cvtColor(nucleus, cv2.COLOR_BGR2HSV)
284
+ # Define lower and upper bounds for blue color in HSV
285
+ lower_blue = np.array([100, 50, 50])
286
+ upper_blue = np.array([130, 255, 255])
287
+ # Create a mask for blue color
288
+ blue_mask = cv2.inRange(hsv_image, lower_blue, upper_blue)
289
+ return blue_mask #, image
290
 
291
  def save_heatmap(heatmap,img_path,heatmap_path):
292
  img = cv2.imread(img_path)
 
356
 
357
  pred_class = model.predict(preprocess_image(image_path))
358
  pred_class = pred_class.argmax(axis=1)[0]
359
+ class_name = imgclasses.get(pred_class, str(pred_class))
360
 
361
  # print(pred_class)
362
 
 
371
 
372
  nucleus= create_nucelus(image,colored_segmentation_mask)
373
 
374
+ blue_mask = get_nucleus_mask(nucleus)
375
 
376
+ expandedmask, result_cv2 = remove_nucleus(image, blue_mask)
377
 
378
  background=create_background(image,heatmap,labels)
379
 
 
388
  elif original_color == (0,0,0):
389
  combined_mask[i, j] = np.array((128,0,0))
390
 
391
+ combined_mask = cv2.resize(combined_mask, (224,224))
392
 
393
  cv2.imwrite(save_path,combined_mask)
394
 
 
420
  return_dict[f"table{return_dict_count}"] = str(
421
  base64.b64encode(open(table_path, "rb").read()).decode("utf-8")
422
  )
423
+ # add predicted class so frontend can show it
424
+ return_dict[f"class{return_dict_count}"] = class_name
425
+
426
  return_dict_count += 1
427
 
428
  count+=1
 
440
 
441
 
442
  # cam_main(0.2)
443
+
444
+
445
+ # ================= Single Image Entry Point =================
446
+ def cam_process_single_image(image_path: str, pixel_conversion: float):
447
+ """
448
+ Run the CAM pipeline on a single image file path.
449
+
450
+ Inputs:
451
+ - image_path: str, path to an image file (jpg/png/bmp...)
452
+ - pixel_conversion: float, microns-per-pixel (or similar) conversion used in area calculation
453
+
454
+ Returns:
455
+ - return_dict: dict with base64-encoded 'image1', 'inter1', 'mask1', 'table1'
456
+ - output_paths: dict with file paths for 'heatmap', 'mask', 'table'
457
+ """
458
+ # Ensure output folders exist
459
+ folder_names = [
460
+ "uploads",
461
+ "heatmaps",
462
+ "segmentations",
463
+ "tables",
464
+ "cell_descriptors",
465
+ ]
466
+ create_folders(folder_names)
467
+
468
+ # Derive output file paths
469
+ base_name = os.path.splitext(os.path.basename(image_path))[0].lower()
470
+ intermediate_path = os.path.join(intermediate_folder, f"{base_name}_heatmap.png")
471
+ save_path = os.path.join(segmentation_folder, f"{base_name}_mask.png")
472
+ table_path = os.path.join(tables_folder, f"{base_name}_table.png")
473
+
474
+ # Generate heatmap
475
+ heatmap = generate_grad_cam_plus_plus(image_path, model, 'block14_sepconv2_act', ['dense_1'])
476
+ save_heatmap(heatmap, image_path, intermediate_path)
477
+
478
+ # Predict class to choose segmentation strategy
479
+ pred_class = model.predict(preprocess_image(image_path)).argmax(axis=1)[0]
480
+ class_name = imgclasses.get(pred_class, str(pred_class))
481
+ if pred_class == 0:
482
+ labels, colored_segmentation_mask = GMM_abnormal_method(heatmap)
483
+ else:
484
+ labels, colored_segmentation_mask = GMM_normal_method(heatmap)
485
+
486
+ # Build combined mask
487
+ image_cv = cv2.imread(image_path)
488
+ if image_cv is None:
489
+ raise ValueError(f"Can't read image: {image_path}")
490
+ original_shape = image_cv.shape
491
+ image_resized = cv2.resize(image_cv, (224, 224))
492
+
493
+ nucleus = create_nucelus(image_resized, colored_segmentation_mask)
494
+ background = create_background(image_resized, heatmap, labels)
495
+ combined_mask = background & nucleus
496
+
497
+ # Normalize colors to expected values
498
+ for i in range(combined_mask.shape[0]):
499
+ for j in range(combined_mask.shape[1]):
500
+ original_color = tuple(combined_mask[i, j])
501
+ if original_color == (128, 0, 0):
502
+ combined_mask[i, j] = np.array((255, 0, 0))
503
+ elif original_color == (0, 0, 0):
504
+ combined_mask[i, j] = np.array((128, 0, 0))
505
+
506
+ cv2.imwrite(save_path, combined_mask)
507
+
508
+ # Compute descriptors and save table and CSV
509
+ resized_shape = (224, 224)
510
+ nucleus_area, cytoplasm_area, ratio = calculate_cell_descriptors(
511
+ original_shape, resized_shape, pixel_conversion, combined_mask
512
+ )
513
+
514
+ # Save table image
515
+ create_cell_descriptors_table(table_path, nucleus_area, cytoplasm_area, ratio)
516
+
517
+ # Save CSV (header + single row)
518
+ cell_descriptors = [
519
+ ["Image Name", "Nucleus Area", "Cytoplasm Area", "Nucleus to Cytoplasm Ratio"],
520
+ [base_name, nucleus_area, cytoplasm_area, ratio],
521
+ ]
522
+ with open(cell_descriptors_path, "w", newline="") as csv_file:
523
+ writer = csv.writer(csv_file)
524
+ writer.writerows(cell_descriptors)
525
+
526
+ # Build return dict with base64-encoded artifacts and class label
527
+ return_dict = {
528
+ "image1": str(base64.b64encode(open(image_path, "rb").read()).decode("utf-8")),
529
+ "inter1": str(base64.b64encode(open(intermediate_path, "rb").read()).decode("utf-8")),
530
+ "mask1": str(base64.b64encode(open(save_path, "rb").read()).decode("utf-8")),
531
+ "table1": str(base64.b64encode(open(table_path, "rb").read()).decode("utf-8")),
532
+ "class1": class_name
533
+ }
534
+
535
+ output_paths = {"heatmap": intermediate_path, "mask": save_path, "table": table_path}
536
+ return return_dict, output_paths
537
+
538
+
539
+ if __name__ == "__main__":
540
+ import argparse
541
+
542
+ parser = argparse.ArgumentParser(description="Run Grad-CAM++ pipeline on a single image.")
543
+ parser.add_argument("--image", "-i", required=True, help="Path to the input image file")
544
+ parser.add_argument("--magval", "-m", required=True, type=float, help="Pixel conversion value (e.g., 0.2)")
545
+ args = parser.parse_args()
546
+
547
+ result, paths = cam_process_single_image(args.image, args.magval)
548
+ print("Processing complete. Outputs:")
549
+ print(f" Heatmap: {paths['heatmap']}")
550
+ print(f" Segmentation: {paths['mask']}")
551
+ print(f" Table: {paths['table']}")
lrp_pipeline_2.py CHANGED
@@ -94,8 +94,10 @@ def get_LRP_heatmap(image, L, layers, imgclasses, intermediate_path):
94
  for i in ind[:2]:
95
  print("%20s (%3d): %6.3f" % (imgclasses[i], i, scores[i]))
96
 
 
 
97
  T = torch.FloatTensor(
98
- (1.0 * (np.arange(2) == ind[0]).reshape([1, 2, 1, 1]))
99
  ) # SET FOR THE HIGHEST SCORE CLASS
100
  R = [None] * L + [(A[-1] * T).data]
101
  for l in range(1, L)[::-1]:
@@ -139,7 +141,7 @@ def get_LRP_heatmap(image, L, layers, imgclasses, intermediate_path):
139
  heatmap(
140
  np.array(R[0][0].cpu()).sum(axis=0), 2, 2, intermediate_path
141
  ) # HEATMAPPING TO SEE LRP MAPS WITH NEW RULE
142
- return R[0][0].cpu()
143
 
144
 
145
  def get_nucleus_mask_for_graphcut(R):
@@ -339,7 +341,7 @@ def lrp_main(pixel_conversion):
339
  print(imagefile)
340
  continue
341
  image_path = (
342
- preprocessed_folder + os.path.splitext(imagefile)[0].lower() + ".png"
343
  )
344
  intermediate_path = (
345
  intermediate_folder
@@ -360,7 +362,7 @@ def lrp_main(pixel_conversion):
360
  image = cv2.resize(image, (128, 128))
361
 
362
  layers_copy = copy.deepcopy(layers)
363
- R = get_LRP_heatmap(image, L, layers_copy, imgclasses, intermediate_path)
364
 
365
  rel_grouping = get_nucleus_mask_for_graphcut(R)
366
 
@@ -401,6 +403,9 @@ def lrp_main(pixel_conversion):
401
  return_dict[f"table{return_dict_count}"] = str(
402
  base64.b64encode(open(table_path, "rb").read()).decode("utf-8")
403
  )
 
 
 
404
  return_dict_count += 1
405
 
406
  i += 1
 
94
  for i in ind[:2]:
95
  print("%20s (%3d): %6.3f" % (imgclasses[i], i, scores[i]))
96
 
97
+ pred_idx = int(ind[0]) # predicted class index
98
+
99
  T = torch.FloatTensor(
100
+ (1.0 * (np.arange(2) == pred_idx).reshape([1, 2, 1, 1]))
101
  ) # SET FOR THE HIGHEST SCORE CLASS
102
  R = [None] * L + [(A[-1] * T).data]
103
  for l in range(1, L)[::-1]:
 
141
  heatmap(
142
  np.array(R[0][0].cpu()).sum(axis=0), 2, 2, intermediate_path
143
  ) # HEATMAPPING TO SEE LRP MAPS WITH NEW RULE
144
+ return R[0][0].cpu(), pred_idx
145
 
146
 
147
  def get_nucleus_mask_for_graphcut(R):
 
341
  print(imagefile)
342
  continue
343
  image_path = (
344
+ preprocessed_folder + os.path.splitext(imagefile)[0].lower() + ".jpg"
345
  )
346
  intermediate_path = (
347
  intermediate_folder
 
362
  image = cv2.resize(image, (128, 128))
363
 
364
  layers_copy = copy.deepcopy(layers)
365
+ R, pred_idx = get_LRP_heatmap(image, L, layers_copy, imgclasses, intermediate_path)
366
 
367
  rel_grouping = get_nucleus_mask_for_graphcut(R)
368
 
 
403
  return_dict[f"table{return_dict_count}"] = str(
404
  base64.b64encode(open(table_path, "rb").read()).decode("utf-8")
405
  )
406
+ # include class label for frontend
407
+ return_dict[f"class{return_dict_count}"] = imgclasses.get(pred_idx, str(pred_idx))
408
+
409
  return_dict_count += 1
410
 
411
  i += 1