marc-rod commited on
Commit
634f585
·
verified ·
1 Parent(s): 113ac5d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +664 -14
app.py CHANGED
@@ -3,11 +3,661 @@ import cv2
3
  import numpy as np
4
  import zipfile
5
  import io
6
- import pandas as pd
7
  import matplotlib.pyplot as plt
8
  import plotly.express as px
9
 
10
- import toolbox_utilities as sandwich
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  # ==========================================
13
  # 1. HELPER FUNCTIONS
@@ -388,7 +1038,7 @@ with gr.Blocks(title="MRS Demo") as app:
388
  if op == "Resize":
389
  try:
390
  scale = float(param)
391
- new_raw = sandwich.resize_image(raw, scale)
392
  updated_history.append(f"Resize (scale={scale})")
393
  return new_raw, new_raw, to_interactive_plot(to_display(new_raw, 800), height=350), updated_history
394
  except:
@@ -401,21 +1051,21 @@ with gr.Blocks(title="MRS Demo") as app:
401
  img_to_mod = current_proc.copy()
402
 
403
  if op == "Gray":
404
- if img_to_mod.ndim == 3: res = sandwich.rgb2gray(img_to_mod).astype(np.uint8)
405
  else: res = img_to_mod
406
  updated_history.append("Grayscale")
407
  elif op == "Invert":
408
  res = cv2.bitwise_not(img_to_mod)
409
  updated_history.append("Invert")
410
  elif op == "Norm":
411
- norm = sandwich.Normalize(img_to_mod)
412
  res = (norm * 255).astype(np.uint8)
413
  updated_history.append("Normalize")
414
  elif op == "CLAHE":
415
- res = sandwich.apply_clahe(img_to_mod)
416
  updated_history.append("CLAHE")
417
  elif op == "Binary Mask":
418
- res = sandwich.binary_mask(img_to_mod)
419
  updated_history.append("Binary Mask")
420
  else:
421
  res = img_to_mod
@@ -446,7 +1096,7 @@ with gr.Blocks(title="MRS Demo") as app:
446
  def gen_overlay(fixed_raw, moving_raw, dx, dy, sym, opacity):
447
  """Generates Interactive Plotly Overlay."""
448
  if fixed_raw is None or moving_raw is None: return None
449
- moved = sandwich.get_symmetry(moving_raw, sym)
450
  h, w = fixed_raw.shape[:2]
451
  if fixed_raw.ndim==3: canvas = np.zeros((h, w, 3), dtype=np.uint8)
452
  else: canvas = np.zeros((h, w), dtype=np.uint8)
@@ -465,12 +1115,12 @@ with gr.Blocks(title="MRS Demo") as app:
465
 
466
  def on_auto(fp, mp, fr, mr, algo):
467
  if fp is None: return 0,0,"R0", "No Data", None, None
468
- fp_g = fp if fp.ndim==2 else sandwich.rgb2gray(fp)
469
- mp_g = mp if mp.ndim==2 else sandwich.rgb2gray(mp)
470
  try:
471
- if "Pixel" in algo: res, _ = sandwich.find_best_match_pixel(fp_g, mp_g, "x")
472
- elif "Feature" in algo: res, _ = sandwich.find_best_match_features(fp_g, mp_g, "x")
473
- elif "FFT" in algo: res, _ = sandwich.find_best_match_fft(fp_g, mp_g, "x")
474
  if res:
475
  loc = res.get('location', (0,0))
476
  if 'shift_xy' in res: loc = res['shift_xy']
@@ -508,7 +1158,7 @@ with gr.Blocks(title="MRS Demo") as app:
508
  # --- CROP & CONFIRM (Pass History) ---
509
  def apply_crop_wrapper(f_raw, m_raw, params, f_hist, m_hist):
510
  dx, dy, sym = int(params["dx"]), int(params["dy"]), params["sym"]
511
- m_moved = sandwich.get_symmetry(m_raw, sym)
512
  h_f, w_f = f_raw.shape[:2]
513
  h_m, w_m = m_moved.shape[:2]
514
  x1, y1 = max(0, dx), max(0, dy)
 
3
  import numpy as np
4
  import zipfile
5
  import io
6
+ import pandas as pd
7
  import matplotlib.pyplot as plt
8
  import plotly.express as px
9
 
10
+ import numpy as np
11
+ import matplotlib.pyplot as plt
12
+ from PIL import Image
13
+ from sklearn.preprocessing import MinMaxScaler
14
+ import os
15
+ import cv2
16
+ from mpl_toolkits.axes_grid1 import make_axes_locatable
17
+ from pathlib import Path
18
+ import SimpleITK as sitk
19
+ import matplotlib.patches as mpatches
20
+ import napari
21
+
22
+
23
+ #%% === Main Functionalities ===
24
+
25
+ def rgb2gray(rgb):
26
+ """
27
+ Converts RGB images into grayscale images based on the formula
28
+ Input parameters: RGB image
29
+ Output: Graycale image
30
+ """
31
+ if rgb.ndim == 2: return rgb
32
+ return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
33
+
34
+ def Normalize(image):
35
+ img_min = np.min(image)
36
+ img_max = np.max(image)
37
+
38
+ if img_max == img_min:
39
+ return np.zeros_like(image)
40
+
41
+ return (image - img_min)/(img_max-img_min)
42
+
43
+ def Normalize_percentiles (image, low_perc, upp_perc):
44
+ p_min = np.percentile(image, low_perc)
45
+ p_max = np.percentile(image, upp_perc)
46
+
47
+ if p_max == p_min:
48
+ return np.zeros_like(image)
49
+ norm_img = (image- p_min)/(p_max-p_min)
50
+ norm_img = np.clip(norm_img, 0, 1)
51
+
52
+ return norm_img
53
+
54
+ #%% === Pre-processment Functionalities ===
55
+
56
+ def prepare_base_image (path):
57
+ img = cv2.imread(path)
58
+ gray = rgb2gray(img)
59
+ norm = Normalize(gray)
60
+ final_img = (norm * 255).astype(np.uint8)
61
+
62
+ return final_img, img
63
+
64
+ def resize_image (image, scale_factor):
65
+ h, w = image.shape[:2]
66
+ new_size = (int(w * scale_factor), int(h * scale_factor))
67
+ resized = cv2.resize(image, new_size, interpolation=cv2.INTER_LANCZOS4)
68
+ return resized
69
+
70
+ def apply_clahe(image, use_clahe=True):
71
+ if use_clahe:
72
+ # clahe_obj = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
73
+ clahe_obj = cv2.createCLAHE(clipLimit=6.0, tileGridSize=(8,8))
74
+ enhanced = clahe_obj.apply(image)
75
+ else:
76
+ enhanced = cv2.equalizeHist(image)
77
+
78
+ return enhanced
79
+
80
+ def align_centers(fixed_img, moving_img):
81
+ # 1. Obtain dimensions of both images
82
+ h_fixed, w_fixed = fixed_img.shape[:2]
83
+ h_moving, w_moving = moving_img.shape[:2]
84
+
85
+ # --- Compute moments ---
86
+ M_fixed = cv2.moments(fixed_img)
87
+ if M_fixed["m00"] == 0:
88
+ print("Warning: Fixed image is empty/black. Skipping CoM alignment.")
89
+ return moving_img, (0, 0)
90
+
91
+ cX_fixed = int(M_fixed["m10"] / M_fixed["m00"])
92
+ cY_fixed = int(M_fixed["m01"] / M_fixed["m00"])
93
+
94
+ M_moving = cv2.moments(moving_img)
95
+ if M_moving["m00"] == 0:
96
+ print("Warning: Moving image is empty/black. Skipping CoM alignment.")
97
+ return moving_img, (0, 0)
98
+
99
+ cX_moving = int(M_moving["m10"] / M_moving["m00"])
100
+ cY_moving = int(M_moving["m01"] / M_moving["m00"])
101
+
102
+ # Compute displacement
103
+ shift_x = cX_fixed - cX_moving
104
+ shift_y = cY_fixed - cY_moving
105
+
106
+ # Transformation matrix
107
+ T = np.float32([[1, 0, shift_x], [0, 1, shift_y]])
108
+
109
+ centered_moving = cv2.warpAffine(
110
+ moving_img,
111
+ T,
112
+ (w_fixed, h_fixed),
113
+ flags=cv2.INTER_LINEAR,
114
+ borderMode=cv2.BORDER_CONSTANT,
115
+ borderValue=0
116
+ )
117
+
118
+ return centered_moving, (shift_x, shift_y)
119
+
120
+ def Gaussian_blur(image, kernel_size=5, sigma=0):
121
+ if kernel_size % 2 == 0:
122
+ kernel_size += 1
123
+ print("Kernel adjust:{kernel_size}")
124
+ blurred = cv2.GaussianBlur(image, (kernel_size, kernel_size), sigma)
125
+
126
+ return blurred
127
+
128
+ def find_edges(image, sigma=0.33):
129
+ if image.ndim == 3:
130
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
131
+
132
+ v = np.median(image)
133
+
134
+ lower = int(max(0, (1.0 - sigma) * v))
135
+ upper = int(min(255, (1.0 + sigma) * v))
136
+
137
+ edged = cv2.Canny(image, lower, upper)
138
+
139
+ return edged
140
+
141
+ def find_edges_binary(image):
142
+
143
+ _, binary = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
144
+
145
+ edged = cv2.Canny(binary, 30, 100)
146
+
147
+ return edged
148
+
149
+ def binary_mask(image, kernel_size=5):
150
+ """
151
+ Converts an image into a solid binary mask (Silhouette).
152
+
153
+ Assumes the input image is already preprocessed so that the
154
+ tissue is bright and the background is dark (Bright-on-Dark).
155
+
156
+ Args:
157
+ image: Input image (Grayscale or BGR).
158
+ kernel_size: Size of the structuring element for morphological operations.
159
+ Larger size removes bigger noise spots but might smooth shape details.
160
+
161
+ Returns:
162
+ clean_mask: A binary image (0 and 255) containing only the main tissue shape.
163
+ """
164
+
165
+ # 1. CONVERT TO GRAYSCALE
166
+ # Ensure we are working with a single channel
167
+ if image.ndim == 3:
168
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
169
+ else:
170
+ gray = image.copy()
171
+
172
+ # 2. GAUSSIAN BLUR
173
+ # Essential to reduce high-frequency noise before thresholding
174
+ blurred = cv2.GaussianBlur(gray, (7, 7), 0)
175
+
176
+ # 3. BINARIZATION (Otsu's Method)
177
+ # Otsu automatically finds the optimal threshold value
178
+ # Since you already inverted BF, we use standard THRESH_BINARY
179
+ # (Pixels > threshold becomes 255/White, others become 0/Black)
180
+ _, binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
181
+
182
+ # 4. MORPHOLOGICAL OPERATIONS (Cleaning)
183
+ # Create an elliptical kernel for smooth edges
184
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
185
+
186
+ # a) CLOSE: Dilate -> Erode
187
+ # Fills small holes INSIDE the tissue to make it solid
188
+ # We use 2 iterations to ensure gaps are closed
189
+ solid_mask = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel, iterations=2)
190
+
191
+ # b) OPEN: Erode -> Dilate.
192
+ # Removes small noise/dust OUTSIDE the tissue.
193
+ clean_mask = cv2.morphologyEx(solid_mask, cv2.MORPH_OPEN, kernel, iterations=1)
194
+
195
+ return clean_mask
196
+
197
+ #%% === Simple Matcher Functionalities ===
198
+
199
+ def get_symmetry(image, label):
200
+ """
201
+ Apply symmetry for a SQUARE (Group D8) or a RECTANGLE that can rotate 90° (D2+90° rotations):
202
+ R0, R1, R2, R3, M1, M2, D1, D2
203
+ """
204
+
205
+ # Rotations
206
+ if label == "R0": return image
207
+ elif label == "R1": return cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE) # 90°
208
+ elif label == "R2": return cv2.rotate(image, cv2.ROTATE_180) # 180°
209
+ elif label == "R3": return cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE) # 270°
210
+
211
+ # Mirror
212
+ elif label == "M1": return cv2.flip(image, 1) # Flip y
213
+ elif label == "M2": return cv2.flip(image, 0) # Flip x
214
+
215
+ # Diagonals
216
+ elif label == "D1":
217
+ transposed = cv2.transpose(image)
218
+ return transposed
219
+ elif label == "D2":
220
+ transposed = cv2.transpose(image)
221
+ return cv2.flip(transposed, 0)
222
+
223
+ return image
224
+
225
+ def find_best_match_pixel(img_1, img_2, image_name):
226
+ results = []
227
+
228
+ h1, w1 = img_1.shape[:2]
229
+ h2, w2 = img_2.shape[:2]
230
+
231
+ area1 = h1 * w1
232
+ area2 = h2 * w2
233
+
234
+ if area1 > area2:
235
+ image_big = img_1
236
+ image_small = img_2
237
+ else:
238
+ image_big = img_2
239
+ image_small = img_1
240
+
241
+ h_big, w_big = image_big.shape[:2]
242
+ h_small, w_small = image_small.shape[:2]
243
+
244
+ max_dim_small = max(h_small, w_small)
245
+
246
+ missing_h = max(0, max_dim_small - h_big)
247
+ missing_w = max(0, max_dim_small - w_big)
248
+
249
+ pad_top = 0
250
+ pad_left = 0
251
+
252
+ if missing_h > 0 or missing_w > 0:
253
+ margin = int(max_dim_small * 0.1)
254
+
255
+ if missing_h > 0:
256
+ pad_top = (missing_h // 2) + margin
257
+ pad_bottom = (missing_h // 2) + margin
258
+ else:
259
+ pad_top = 0
260
+ pad_bottom = 0
261
+
262
+ if missing_w > 0:
263
+ pad_left = (missing_w // 2) + margin
264
+ pad_right = (missing_w // 2) + margin
265
+ else:
266
+ pad_left = 0
267
+ pad_right = 0
268
+
269
+ image_big = cv2.copyMakeBorder(
270
+ image_big,
271
+ pad_top, pad_bottom, pad_left, pad_right,
272
+ cv2.BORDER_CONSTANT, value=0
273
+ )
274
+
275
+ symmetries = ["R0", "R1", "R2", "R3", "M1", "M2", "D1", "D2"]
276
+
277
+ for sym in symmetries:
278
+ current_small = get_symmetry(image_small, sym)
279
+
280
+ res = cv2.matchTemplate(image_big, current_small, cv2.TM_CCOEFF_NORMED)
281
+
282
+ _, max_val, _, max_loc = cv2.minMaxLoc(res)
283
+
284
+ real_x = max_loc[0] - pad_left
285
+ real_y = max_loc[1] - pad_top
286
+
287
+ results.append({
288
+ "name": image_name,
289
+ "symmetry": sym,
290
+ "score": max_val,
291
+ "location": (real_x, real_y),
292
+ "raman_img": current_small
293
+ })
294
+
295
+ best_result = max(results, key=lambda x: x['score'])
296
+
297
+ return best_result, results
298
+
299
+ def find_best_match_features(img_1, img_2, image_name):
300
+ """
301
+ Tests all 8 symmetries of img_raman (img_1) against img_bf (img_2)
302
+ using SIFT Features + KNN Matching + Lowe's Ratio Test + RANSAC.
303
+
304
+ Returns dictionary with the best result based on Inlier Count.
305
+ """
306
+ # 1. Feature detector (ORB or SIFT)
307
+ #sift = cv2.ORB_create()
308
+ sift = cv2.SIFT_create()
309
+
310
+ # 2.Compute features in Img 2
311
+ kp2, des2 = sift.detectAndCompute(img_2, None)
312
+
313
+ results = []
314
+
315
+ # 3. Symmetry loop for Img 1
316
+ symmetries = ["R0", "R1", "R2", "R3", "M1", "M2", "D1", "D2"]
317
+
318
+ for sym in symmetries:
319
+ current_raman = get_symmetry(img_1, sym)
320
+
321
+ kp1, des1 = sift.detectAndCompute(current_raman, None)
322
+
323
+ if des1 is None or des2 is None or len(kp1) < 5 or len(kp2) < 5:
324
+ continue
325
+
326
+ bf = cv2.BFMatcher(cv2.NORM_L2)
327
+ matches = bf.knnMatch(des1, des2, k=2)
328
+
329
+ good_matches = []
330
+ for m, n in matches:
331
+ if m.distance < 0.75 * n.distance:
332
+ good_matches.append(m)
333
+
334
+ score = 0
335
+ H = None
336
+ matches_mask = []
337
+
338
+ if len(good_matches) >= 4:
339
+ src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
340
+ dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
341
+
342
+ H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
343
+
344
+ if mask is not None:
345
+ matches_mask = mask.ravel().tolist()
346
+ score = np.sum(matches_mask)
347
+
348
+ results.append({
349
+ "name": image_name,
350
+ "symmetry": sym,
351
+ "score": score,
352
+ "homography": H,
353
+ "raman_img": current_raman,
354
+ "keypoints_1": kp1,
355
+ "good_matches": good_matches,
356
+ "matches_mask": matches_mask
357
+ })
358
+
359
+ if not results:
360
+ return None, []
361
+
362
+ best_result = max(results, key=lambda x: x['score'])
363
+
364
+ return best_result, results
365
+
366
+
367
+ def find_best_match_fft(img_1, img_2, image_name):
368
+ """
369
+ Tests all 8 symmetries using FFT Phase Correlation.
370
+ """
371
+
372
+ # 1. PREPARATION
373
+ # FFT requires float32 or float64
374
+ # We use the larger image as the base size
375
+ h_max = max(img_1.shape[0], img_2.shape[0])
376
+ w_max = max(img_1.shape[1], img_2.shape[1])
377
+
378
+ # Helper to pad image to target size (center alignment)
379
+ def pad_to_size(img, th, tw):
380
+ h, w = img.shape
381
+ if h == th and w == tw: return img.astype(np.float32)
382
+
383
+ padded = np.zeros((th, tw), dtype=np.float32)
384
+ # Place in the center (helps with Hanning window)
385
+ y_off = (th - h) // 2
386
+ x_off = (tw - w) // 2
387
+ padded[y_off:y_off+h, x_off:x_off+w] = img
388
+ return padded
389
+
390
+ # Prepare Target (img_2)
391
+ img_2_float = pad_to_size(img_2, h_max, w_max)
392
+
393
+ # Create Hanning Window to reduce edge effects (Spectral Leakage)
394
+ # This greatly improves accuracy for non-periodic images like tissues
395
+ window = cv2.createHanningWindow((w_max, h_max), cv2.CV_32F)
396
+
397
+ results = []
398
+
399
+ # 2. SYMMETRY LOOP
400
+ symmetries = ["R0", "R1", "R2", "R3", "M1", "M2", "D1", "D2"]
401
+
402
+ for sym in symmetries:
403
+ # a) Transform Template
404
+ current_raman = get_symmetry(img_1, sym)
405
+
406
+ # b) Pad to match size
407
+ current_raman_float = pad_to_size(current_raman, h_max, w_max)
408
+
409
+ # c) PHASE CORRELATION
410
+ # Returns: (dx, dy) shift and 'response' (confidence 0.0 to 1.0)
411
+ try:
412
+ # We apply the Hanning window to both images
413
+ shift, response = cv2.phaseCorrelate(current_raman_float, img_2_float, window=window)
414
+
415
+ # Unpack shift
416
+ dx, dy = shift
417
+
418
+ results.append({
419
+ "name": image_name,
420
+ "symmetry": sym,
421
+ "score": response, # Higher is better (0 to 1)
422
+ "shift_xy": (dx, dy),
423
+ "raman_img": current_raman # Store original unpadded for visualization
424
+ })
425
+
426
+ except Exception as e:
427
+ # FFT can fail if images are tiny or completely zero
428
+ print(f"FFT Error on {sym}: {e}")
429
+ continue
430
+
431
+ # 3. SELECT WINNER
432
+ if not results:
433
+ return None, []
434
+
435
+ # Best match is the one with highest Phase Correlation Response (Peak)
436
+ best_result = max(results, key=lambda x: x['score'])
437
+
438
+ return best_result, results
439
+
440
+ #%% === Fine Tuning Functionalities ===
441
+
442
+ def fine_tune_registration(fixed_img_cv, moving_img_cv, transform_type):
443
+ """
444
+ Complete registration using Mutual Information.
445
+ Args:
446
+ fixed_img_cv: BF crop (grayscale).
447
+ moving_img_cv: Preoriented Raman (grayscale).
448
+ transform_type: "Rigid", "Similarity", "Affine", "BSpline"
449
+ Returns:
450
+ registered_img (numpy): transformed Raman image
451
+ final_transform (sitk.Transform): mathematical computed matrix
452
+ """
453
+
454
+ fixed = sitk.GetImageFromArray(fixed_img_cv.astype(np.float32))
455
+ moving = sitk.GetImageFromArray(moving_img_cv.astype(np.float32))
456
+
457
+ if transform_type == "Rigid":
458
+ # DOF: 3 (Rot + Trans)
459
+ initial_transform = sitk.CenteredTransformInitializer(
460
+ fixed, moving,
461
+ sitk.Euler2DTransform(),
462
+ sitk.CenteredTransformInitializerFilter.GEOMETRY
463
+ )
464
+
465
+ elif transform_type == "Similarity":
466
+ # DOF: 4 (Rot + Trans + Escala Uniforme)
467
+ # Ideal si hay diferencia de zoom real entre microscopios
468
+ initial_transform = sitk.CenteredTransformInitializer(
469
+ fixed, moving,
470
+ sitk.Similarity2DTransform(),
471
+ sitk.CenteredTransformInitializerFilter.GEOMETRY
472
+ )
473
+
474
+ elif transform_type == "Affine":
475
+ # DOF: 6 (Rot + Trans + Escala + Shear)
476
+ initial_transform = sitk.CenteredTransformInitializer(
477
+ fixed, moving,
478
+ sitk.AffineTransform(2),
479
+ sitk.CenteredTransformInitializerFilter.GEOMETRY
480
+ )
481
+
482
+ elif transform_type == "BSpline":
483
+ # DOF: (Elastic deformation / Non-Rigid)
484
+ # BSpline needs a previous inicialization (generally Affine)
485
+
486
+ init_rigid = sitk.CenteredTransformInitializer(
487
+ fixed, moving, sitk.Euler2DTransform(), sitk.CenteredTransformInitializerFilter.GEOMETRY
488
+ )
489
+ # Deformation (3x3 Grid)
490
+ grid_physical_spacing = [50.0, 50.0] # Adjustable according to pixel size
491
+ mesh_size = [3, 3]
492
+
493
+ initial_transform = sitk.BSplineTransformInitializer(fixed, mesh_size)
494
+
495
+ else:
496
+ raise ValueError("Use 'Rigid', 'Similarity', 'Affine' or 'BSpline'.")
497
+
498
+ # 3. Configurate Register Method
499
+ R = sitk.ImageRegistrationMethod()
500
+
501
+ # Metrics
502
+ R.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
503
+ R.SetMetricSamplingStrategy(R.RANDOM)
504
+ R.SetMetricSamplingPercentage(0.3)
505
+
506
+ # Optimizer
507
+ if transform_type == "BSpline":
508
+ # LBFGSB is better for high dimensionality (BSpline)
509
+ R.SetOptimizerAsLBFGSB(gradientConvergenceTolerance=1e-5, numberOfIterations=100, maximumNumberOfCorrections=5)
510
+ else:
511
+ # Gradient Descent for linear transformations
512
+ R.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100, convergenceMinimumValue=1e-6, convergenceWindowSize=10)
513
+ R.SetOptimizerScalesFromPhysicalShift()
514
+
515
+ # Final configuration
516
+ R.SetInitialTransform(initial_transform, inPlace=False)
517
+ R.SetInterpolator(sitk.sitkLinear)
518
+
519
+ # 4. Execute register
520
+ try:
521
+ final_transform = R.Execute(fixed, moving)
522
+
523
+ print(f"Register {transform_type} complete. Metric value: {R.GetMetricValue():.4f}")
524
+
525
+ except Exception as e:
526
+ print(f"Register {transform_type} fails: {e}")
527
+ return moving_img_cv, None
528
+
529
+ # 5. Apply transformation (Resample)
530
+ resampler = sitk.ResampleImageFilter()
531
+ resampler.SetReferenceImage(fixed)
532
+ resampler.SetInterpolator(sitk.sitkBSpline) # BSpline
533
+ resampler.SetDefaultPixelValue(0)
534
+ resampler.SetTransform(final_transform)
535
+
536
+ out_sitk = resampler.Execute(moving)
537
+
538
+ return sitk.GetArrayFromImage(out_sitk), final_transform
539
+
540
+ #%% === Visualization ===
541
+
542
+ def visual_debugger(img_1, img_2):
543
+
544
+ _, bin_r = cv2.threshold(img_1.astype(np.uint8), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
545
+
546
+ ret, bf_thresh = cv2.threshold(img_2.astype(np.uint8), 30, 255, cv2.THRESH_TOZERO)
547
+ _, bin_b = cv2.threshold(bf_thresh, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
548
+
549
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
550
+ bin_b_processed = cv2.dilate(bin_b, kernel, iterations=1)
551
+ bin_b_processed = cv2.morphologyEx(bin_b_processed, cv2.MORPH_CLOSE, kernel)
552
+
553
+ h, w = bin_r.shape
554
+ viz = np.zeros((h, w, 3), dtype=np.uint8)
555
+
556
+ # Channel Red: Img 1
557
+ viz[:,:,0] = bin_r
558
+
559
+ # Channel Green: Bright Field
560
+ viz[:,:,1] = bin_b_processed
561
+
562
+ mask_r = bin_r > 0
563
+ mask_b = bin_b_processed > 0
564
+ intersection = np.count_nonzero(np.logical_and(mask_r, mask_b))
565
+ area_r = np.count_nonzero(mask_r)
566
+ area_b = np.count_nonzero(mask_b)
567
+ score = intersection / min(area_r, area_b) if min(area_r, area_b) > 0 else 0
568
+
569
+ plt.figure(figsize=(12, 12))
570
+ plt.imshow(viz)
571
+ plt.title(f"Visual Debugger | Overlap Score: {score:.4f}", fontsize=14, fontweight='bold')
572
+ plt.axis('off')
573
+
574
+ patch_red = mpatches.Patch(color='red', label='Img 1')
575
+ patch_green = mpatches.Patch(color='green', label='Img 2')
576
+ patch_yellow = mpatches.Patch(color='yellow', label='MATCH')
577
+
578
+ plt.legend(handles=[patch_red, patch_green, patch_yellow],
579
+ loc='upper right', framealpha=0.9, fontsize=12, facecolor='black', labelcolor='white')
580
+
581
+ plt.tight_layout()
582
+ plt.show()
583
+
584
+ def show_in_napari (img_1, img_2):
585
+ _, bin_1 = cv2.threshold(img_1.astype(np.uint8), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
586
+
587
+ ret, bf_thresh = cv2.threshold(img_2.astype(np.uint8), 30, 255, cv2.THRESH_TOZERO)
588
+ _, bin_2 = cv2.threshold(bf_thresh, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
589
+
590
+ # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
591
+ # bin_1_dilated = cv2.dilate(bin_1, kernel, iterations=1)
592
+ # bin_1_final = cv2.morphologyEx(bin_1_dilated, cv2.MORPH_CLOSE, kernel)
593
+
594
+ viewer = napari.Viewer(title="IMG 1 vs IMG 2 Registration Debugger")
595
+
596
+ # LAYER IMG 1
597
+ viewer.add_image(
598
+ img_1,
599
+ name='IMG 1',
600
+ colormap='gray',
601
+ opacity=1.0
602
+ )
603
+
604
+ # LAYER IMG 2
605
+ viewer.add_image(
606
+ img_2,
607
+ name='IMG 2',
608
+ colormap='inferno',
609
+ blending='additive',
610
+ opacity=0.8
611
+ )
612
+
613
+ # LAYER MASK IMG 1
614
+ viewer.add_image(
615
+ bin_1,
616
+ name='Debug: IMG 1 Mask',
617
+ colormap='green',
618
+ blending='additive',
619
+ opacity=0.5,
620
+ visible=False
621
+ )
622
+
623
+ # LAYER MASK IMG 2
624
+ viewer.add_image(
625
+ bin_2,
626
+ name='Debug: IMG 2 Mask',
627
+ colormap='red',
628
+ blending='additive',
629
+ opacity=0.5,
630
+ visible=False
631
+ )
632
+
633
+ napari.run()
634
+
635
+ def RGBA_visualization(img_1, img_2):
636
+ # 1. Asegurar que ambas sean uint8 (necesario para merge)
637
+ if img_1.dtype != np.uint8:
638
+ img_1 = cv2.normalize(img_1, None, 0, 255, cv2.NORM_MINMAX).astype(np.uint8)
639
+ if img_2.dtype != np.uint8:
640
+ img_2 = cv2.normalize(img_2, None, 0, 255, cv2.NORM_MINMAX).astype(np.uint8)
641
+
642
+ # 2. Forzar coincidencia EXACTA de dimensiones (Alto y Ancho)
643
+ # OpenCV resize usa (Ancho, Alto)
644
+ if img_1.shape[:2] != img_2.shape[:2]:
645
+ img_2 = cv2.resize(img_2, (img_1.shape[1], img_1.shape[0]))
646
+
647
+ # 3. Crear el mapa de color (esto genera una imagen de 3 canales uint8)
648
+ img_1_color = cv2.applyColorMap(img_1, cv2.COLORMAP_JET)
649
+
650
+ # 4. Separar canales
651
+ b, g, r = cv2.split(img_1_color)
652
+
653
+ # 5. El canal alpha debe ser del mismo tamaño y tipo que los otros
654
+ alpha = img_2
655
+
656
+ # 6. Combinar canales en BGRA
657
+ rgba_img = cv2.merge([b, g, r, alpha])
658
+
659
+ return rgba_img, b, g, r, alpha
660
+
661
 
662
  # ==========================================
663
  # 1. HELPER FUNCTIONS
 
1038
  if op == "Resize":
1039
  try:
1040
  scale = float(param)
1041
+ new_raw = resize_image(raw, scale)
1042
  updated_history.append(f"Resize (scale={scale})")
1043
  return new_raw, new_raw, to_interactive_plot(to_display(new_raw, 800), height=350), updated_history
1044
  except:
 
1051
  img_to_mod = current_proc.copy()
1052
 
1053
  if op == "Gray":
1054
+ if img_to_mod.ndim == 3: res = rgb2gray(img_to_mod).astype(np.uint8)
1055
  else: res = img_to_mod
1056
  updated_history.append("Grayscale")
1057
  elif op == "Invert":
1058
  res = cv2.bitwise_not(img_to_mod)
1059
  updated_history.append("Invert")
1060
  elif op == "Norm":
1061
+ norm = Normalize(img_to_mod)
1062
  res = (norm * 255).astype(np.uint8)
1063
  updated_history.append("Normalize")
1064
  elif op == "CLAHE":
1065
+ res = apply_clahe(img_to_mod)
1066
  updated_history.append("CLAHE")
1067
  elif op == "Binary Mask":
1068
+ res = binary_mask(img_to_mod)
1069
  updated_history.append("Binary Mask")
1070
  else:
1071
  res = img_to_mod
 
1096
  def gen_overlay(fixed_raw, moving_raw, dx, dy, sym, opacity):
1097
  """Generates Interactive Plotly Overlay."""
1098
  if fixed_raw is None or moving_raw is None: return None
1099
+ moved = get_symmetry(moving_raw, sym)
1100
  h, w = fixed_raw.shape[:2]
1101
  if fixed_raw.ndim==3: canvas = np.zeros((h, w, 3), dtype=np.uint8)
1102
  else: canvas = np.zeros((h, w), dtype=np.uint8)
 
1115
 
1116
  def on_auto(fp, mp, fr, mr, algo):
1117
  if fp is None: return 0,0,"R0", "No Data", None, None
1118
+ fp_g = fp if fp.ndim==2 else rgb2gray(fp)
1119
+ mp_g = mp if mp.ndim==2 else rgb2gray(mp)
1120
  try:
1121
+ if "Pixel" in algo: res, _ = find_best_match_pixel(fp_g, mp_g, "x")
1122
+ elif "Feature" in algo: res, _ = find_best_match_features(fp_g, mp_g, "x")
1123
+ elif "FFT" in algo: res, _ = find_best_match_fft(fp_g, mp_g, "x")
1124
  if res:
1125
  loc = res.get('location', (0,0))
1126
  if 'shift_xy' in res: loc = res['shift_xy']
 
1158
  # --- CROP & CONFIRM (Pass History) ---
1159
  def apply_crop_wrapper(f_raw, m_raw, params, f_hist, m_hist):
1160
  dx, dy, sym = int(params["dx"]), int(params["dy"]), params["sym"]
1161
+ m_moved = get_symmetry(m_raw, sym)
1162
  h_f, w_f = f_raw.shape[:2]
1163
  h_m, w_m = m_moved.shape[:2]
1164
  x1, y1 = max(0, dx), max(0, dy)