Ayesha352 commited on
Commit
b4ecde4
·
verified ·
1 Parent(s): 03915c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -86
app.py CHANGED
@@ -1,128 +1,144 @@
1
  import gradio as gr
2
  import cv2
3
  import numpy as np
 
4
  import json
5
  import math
6
- import matplotlib.pyplot as plt
7
 
8
- # === Helper Functions ===
9
  def get_rotated_rect_corners(x, y, w, h, rotation_deg):
10
  rot_rad = np.deg2rad(rotation_deg)
11
  cos_r = np.cos(rot_rad)
12
  sin_r = np.sin(rot_rad)
 
 
 
 
 
 
 
 
 
 
 
13
  R = np.array([[cos_r, -sin_r],
14
  [sin_r, cos_r]])
15
- cx, cy = x + w/2, y + h/2
16
- local_corners = np.array([[-w/2,-h/2],[w/2,-h/2],[w/2,h/2],[-w/2,h/2]])
17
- rotated_corners = np.dot(local_corners, R.T) + np.array([cx, cy])
18
- return rotated_corners.astype(np.float32)
19
 
 
 
 
 
 
20
  def preprocess_gray_clahe(img):
21
  gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
22
- clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
23
  return clahe.apply(gray)
24
 
25
- def detect_and_match(img1_gray, img2_gray, detector_type, ratio_thresh=0.78):
26
- if detector_type == "SIFT":
27
- detector = cv2.SIFT_create(nfeatures=5000)
28
- matcher = cv2.BFMatcher(cv2.NORM_L2)
29
- elif detector_type == "BRISK":
30
- detector = cv2.BRISK_create()
31
- matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
32
- elif detector_type == "ORB":
33
- detector = cv2.ORB_create(5000)
34
- matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
35
- elif detector_type == "AKAZE":
36
- detector = cv2.AKAZE_create()
37
- matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
38
- elif detector_type == "KAZE":
39
- detector = cv2.KAZE_create()
40
- matcher = cv2.BFMatcher(cv2.NORM_L2)
41
  else:
42
- return None, None, []
43
 
 
 
44
  kp1, des1 = detector.detectAndCompute(img1_gray, None)
45
  kp2, des2 = detector.detectAndCompute(img2_gray, None)
46
- if des1 is None or des2 is None:
47
- return kp1, kp2, []
48
 
49
- raw_matches = matcher.knnMatch(des1, des2, k=2)
50
- good = [m for m,n in raw_matches if m.distance < ratio_thresh * n.distance]
51
- return kp1, kp2, good
 
52
 
53
- def get_roi_points_from_json(json_file):
54
- data = json.load(json_file)
55
- area = data["printAreas"][0]
56
- x = area["position"]["x"]
57
- y = area["position"]["y"]
58
- w = area["width"]
59
- h = area["height"]
60
- rot = area["rotation"]
61
- return x, y, w, h, rot
62
 
 
63
  def process_images(flat_img, persp_img, json_file):
64
- # Preprocess
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  flat_gray = preprocess_gray_clahe(flat_img)
66
  persp_gray = preprocess_gray_clahe(persp_img)
67
- x, y, w, h, rot = get_roi_points_from_json(json_file)
68
 
69
- detectors = ["SIFT","BRISK","ORB","AKAZE","KAZE"]
70
- gallery_images = []
71
 
72
  for det in detectors:
73
- kp1, kp2, matches = detect_and_match(flat_gray, persp_gray, det)
74
- if len(matches) < 4:
75
- # Skip if too few matches
 
76
  continue
77
 
78
- src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1,1,2)
79
- dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1,1,2)
80
- H, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
81
-
82
- # ROI in flat
83
- roi_flat = get_rotated_rect_corners(x,y,w,h,rot)
84
- flat_copy = flat_img.copy()
85
- cv2.polylines(flat_copy, [roi_flat.astype(int)], True, (0,0,255),2)
86
-
87
- # Project ROI to perspective
88
- roi_persp = cv2.perspectiveTransform(roi_flat.reshape(-1,1,2), H).reshape(-1,2)
89
- persp_copy = persp_img.copy()
90
- cv2.polylines(persp_copy, [roi_persp.astype(int)], True, (0,255,0),2)
91
- for px, py in roi_persp:
92
- cv2.circle(persp_copy, (int(px),int(py)), 5, (255,0,0), -1)
93
-
94
- # Side-by-side for this detector
95
- fig, ax = plt.subplots(1,2,figsize=(12,6))
96
- ax[0].imshow(flat_copy)
97
- ax[0].set_title(f"Flat ROI - {det}")
98
- ax[0].axis("off")
99
- ax[1].imshow(persp_copy)
100
- ax[1].set_title(f"Perspective ROI - {det}")
101
- ax[1].axis("off")
102
- plt.tight_layout()
103
- filename = f"{det}_result.png"
104
- plt.savefig(filename)
105
- plt.close(fig)
106
- gallery_images.append(filename)
107
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  return gallery_images
109
 
110
  iface = gr.Interface(
111
- fn=process_images,
112
  inputs=[
113
  gr.Image(type="numpy", label="Flat Image"),
114
  gr.Image(type="numpy", label="Perspective Image"),
115
  gr.File(type="filepath", label="JSON File")
116
- ], # <-- ye closing bracket should be ]
117
- outputs=[ # <-- starts a new list
118
- gr.Gallery(label="Results"),
119
- gr.File(label="Download SIFT Result"),
120
- gr.File(label="Download ORB Result"),
121
- gr.File(label="Download BRISK Result"),
122
- gr.File(label="Download AKAZE Result"),
123
- gr.File(label="Download KAZE Result")
124
- ], # <-- should be ] not )
125
- title="Homography & ROI Projection",
126
- description="..."
127
  )
128
 
 
 
1
  import gradio as gr
2
  import cv2
3
  import numpy as np
4
+ import matplotlib.pyplot as plt
5
  import json
6
  import math
 
7
 
8
+ # === Helper: Rotated rectangle corners ===
9
  def get_rotated_rect_corners(x, y, w, h, rotation_deg):
10
  rot_rad = np.deg2rad(rotation_deg)
11
  cos_r = np.cos(rot_rad)
12
  sin_r = np.sin(rot_rad)
13
+
14
+ cx = x + w/2
15
+ cy = y + h/2
16
+
17
+ local_corners = np.array([
18
+ [-w/2, -h/2],
19
+ [ w/2, -h/2],
20
+ [ w/2, h/2],
21
+ [-w/2, h/2]
22
+ ])
23
+
24
  R = np.array([[cos_r, -sin_r],
25
  [sin_r, cos_r]])
 
 
 
 
26
 
27
+ rotated_corners = np.dot(local_corners, R.T)
28
+ corners = rotated_corners + np.array([cx, cy])
29
+ return corners.astype(np.float32)
30
+
31
+ # === Preprocessing ===
32
  def preprocess_gray_clahe(img):
33
  gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
34
+ clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
35
  return clahe.apply(gray)
36
 
37
+ # === Feature detectors ===
38
+ def get_detector(detector_name):
39
+ if detector_name == "SIFT":
40
+ return cv2.SIFT_create(nfeatures=5000)
41
+ elif detector_name == "ORB":
42
+ return cv2.ORB_create(5000)
43
+ elif detector_name == "BRISK":
44
+ return cv2.BRISK_create()
45
+ elif detector_name == "AKAZE":
46
+ return cv2.AKAZE_create()
47
+ elif detector_name == "KAZE":
48
+ return cv2.KAZE_create()
 
 
 
 
49
  else:
50
+ return None
51
 
52
+ def detect_and_match(img1_gray, img2_gray, detector_name, ratio_thresh=0.78):
53
+ detector = get_detector(detector_name)
54
  kp1, des1 = detector.detectAndCompute(img1_gray, None)
55
  kp2, des2 = detector.detectAndCompute(img2_gray, None)
 
 
56
 
57
+ if detector_name in ["SIFT", "KAZE"]:
58
+ matcher = cv2.BFMatcher(cv2.NORM_L2)
59
+ else:
60
+ matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
61
 
62
+ matches = matcher.knnMatch(des1, des2, k=2)
63
+ good = []
64
+ for m, n in matches:
65
+ if m.distance < ratio_thresh * n.distance:
66
+ good.append(m)
67
+ return kp1, kp2, good
 
 
 
68
 
69
+ # === Main processing ===
70
  def process_images(flat_img, persp_img, json_file):
71
+ if flat_img is None or persp_img is None or json_file is None:
72
+ return [None]*6
73
+
74
+ # Load JSON
75
+ try:
76
+ data = json.load(open(json_file.name))
77
+ except Exception as e:
78
+ print("JSON read error:", e)
79
+ return [None]*6
80
+
81
+ roi = data["printAreas"][0]
82
+ roi_x = roi["position"]["x"]
83
+ roi_y = roi["position"]["y"]
84
+ roi_w = roi["width"]
85
+ roi_h = roi["height"]
86
+ roi_rot_deg = roi["rotation"]
87
+
88
+ # Preprocess images
89
  flat_gray = preprocess_gray_clahe(flat_img)
90
  persp_gray = preprocess_gray_clahe(persp_img)
 
91
 
92
+ detectors = ["SIFT", "ORB", "BRISK", "AKAZE", "KAZE"]
93
+ results = []
94
 
95
  for det in detectors:
96
+ kp1, kp2, good_matches = detect_and_match(flat_gray, persp_gray, det)
97
+ if len(good_matches) < 4:
98
+ print(f"Not enough matches for {det}")
99
+ results.append(None)
100
  continue
101
 
102
+ src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1,1,2)
103
+ dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1,1,2)
104
+ H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
+ # ROI corners
107
+ roi_corners_flat = get_rotated_rect_corners(roi_x, roi_y, roi_w, roi_h, roi_rot_deg)
108
+ roi_corners_persp = cv2.perspectiveTransform(roi_corners_flat.reshape(-1,1,2), H).reshape(-1,2)
109
+
110
+ # Draw ROI
111
+ flat_out = flat_img.copy()
112
+ persp_out = persp_img.copy()
113
+ cv2.polylines(flat_out, [roi_corners_flat.astype(int)], True, (255,0,0), 3)
114
+ cv2.polylines(persp_out, [roi_corners_persp.astype(int)], True, (0,255,0), 3)
115
+
116
+ results.append([flat_out, persp_out])
117
+
118
+ return results # List of [flat_out, persp_out] for each detector
119
+
120
+ # === Gradio Interface ===
121
+ def wrap_gradio(flat_img, persp_img, json_file):
122
+ outputs = process_images(flat_img, persp_img, json_file)
123
+ # Flatten the outputs for Gallery display
124
+ gallery_images = []
125
+ for item in outputs:
126
+ if item is not None:
127
+ gallery_images.extend([item[0], item[1]])
128
  return gallery_images
129
 
130
  iface = gr.Interface(
131
+ fn=wrap_gradio,
132
  inputs=[
133
  gr.Image(type="numpy", label="Flat Image"),
134
  gr.Image(type="numpy", label="Perspective Image"),
135
  gr.File(type="filepath", label="JSON File")
136
+ ],
137
+ outputs=[
138
+ gr.Gallery(label="Results (Flat + Perspective per Detector)")
139
+ ],
140
+ title="Feature Detection with ROI Projection",
141
+ description="Shows SIFT, ORB, BRISK, AKAZE, KAZE feature-based ROI projections. Each detector outputs Flat and Perspective images."
 
 
 
 
 
142
  )
143
 
144
+ iface.launch()