Matiullah00999 commited on
Commit
6a34e45
·
verified ·
1 Parent(s): b7d8234

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +147 -98
app.py CHANGED
@@ -1,68 +1,120 @@
1
  import os
2
  import cv2
3
- import numpy as np
4
  import torch
 
 
5
  import matplotlib.pyplot as plt
 
 
 
 
6
  from scipy.spatial.distance import cdist
7
  from scipy.spatial import Delaunay
8
- from skimage.measure import label, regionprops
9
- import gradio as gr
10
- import io
11
- from PIL import Image
12
-
13
- # Constants
14
- DIA_MM = 152.4
15
-
16
- # Main processing function
17
- def analyze_aggregate(image_pil):
18
- results = {}
19
- edge_lengths = []
20
-
21
- # Convert to OpenCV image
22
- img = np.array(image_pil)
23
- img_bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
24
- gray_img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
25
-
26
- # Simulated label (as if predicted by a model)
27
- label_img = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2BGR) # Dummy label for placeholder
28
- _, label_gray = cv2.threshold(gray_img, 127, 255, cv2.THRESH_BINARY)
29
- binary_mask = (label_gray > 0).astype(np.uint8)
30
- color_mask = cv2.cvtColor(label_gray, cv2.COLOR_GRAY2BGR)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
- # Pixel calibration
33
  _, bw = cv2.threshold(gray_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
34
  contours, _ = cv2.findContours(bw, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
35
  contours = sorted(contours, key=cv2.contourArea, reverse=True)
36
  if not contours:
37
- return "No contours found.", None, None, None, None
38
 
39
  boundary = contours[0].squeeze()
40
  dist_matrix = cdist(boundary, boundary)
41
  i, j = np.unravel_index(np.argmax(dist_matrix), dist_matrix.shape)
42
  line_pts = np.array([boundary[i], boundary[j]])
43
  pixel_diameter = np.linalg.norm(boundary[i] - boundary[j])
44
- pixels_per_mm = pixel_diameter / DIA_MM
45
  pixel_length_mm = 1 / pixels_per_mm
46
  line_length_mm = pixel_diameter * pixel_length_mm
47
 
48
- # Plot 1: Boundary and line
49
- fig1, ax1 = plt.subplots(figsize=(6, 6))
50
- ax1.imshow(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB))
51
- ax1.plot(boundary[:, 0], boundary[:, 1], 'g', linewidth=2)
52
- ax1.plot(line_pts[:, 0], line_pts[:, 1], 'r', linewidth=2)
53
- ax1.set_title(f'Line Length: {line_length_mm:.2f} mm')
54
- ax1.axis('off')
55
 
56
- # Aggregate area
57
- num_white_pixels = np.sum(binary_mask == 1)
58
- num_nonblack_pixels = np.count_nonzero(gray_img)
59
- aggregate_area_mm2 = num_white_pixels * (pixel_length_mm ** 2)
60
- total_area_mm2 = num_nonblack_pixels * (pixel_length_mm ** 2)
61
- aggregate_ratio = aggregate_area_mm2 / total_area_mm2 if total_area_mm2 > 0 else 0
62
 
63
- # Feret Rectangles
64
- feret_lengths, feret_widths = [], []
65
- rectangles = []
66
  contours_mask, _ = cv2.findContours(binary_mask * 255, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
67
  for cnt in contours_mask:
68
  if len(cnt) >= 5:
@@ -74,9 +126,8 @@ def analyze_aggregate(image_pil):
74
  feret_widths.append(min(width, height))
75
  rectangles.append((box, feret_length))
76
 
77
- thresholds = np.percentile(feret_lengths, [20, 40, 60, 80]) if feret_lengths else [0, 0, 0, 0]
78
- colors = [(0, 0, 255), (0, 128, 255), (0, 255, 255), (0, 255, 0), (255, 0, 0)]
79
-
80
  for box, length in rectangles:
81
  if length <= thresholds[0]: color = colors[0]
82
  elif length <= thresholds[1]: color = colors[1]
@@ -85,80 +136,78 @@ def analyze_aggregate(image_pil):
85
  else: color = colors[4]
86
  cv2.drawContours(color_mask, [box], 0, color, 3)
87
 
88
- # Plot 2: Feret rectangles
89
- fig2, ax2 = plt.subplots(figsize=(6, 6))
90
- ax2.imshow(cv2.cvtColor(color_mask, cv2.COLOR_BGR2RGB))
91
- ax2.set_title("Feret Rectangles by Size")
92
- ax2.axis('off')
93
-
94
- # Feret Stats
95
- if feret_lengths:
96
- avg_feret_length_mm = np.mean(feret_lengths) * pixel_length_mm
97
- avg_feret_width_mm = np.mean(feret_widths) * pixel_length_mm
98
- max_feret_length_mm = np.max(feret_lengths) * pixel_length_mm
99
- roundness_aggregate = avg_feret_length_mm / avg_feret_width_mm
100
- else:
101
- avg_feret_length_mm = avg_feret_width_mm = max_feret_length_mm = roundness_aggregate = 0
102
 
103
- # Delaunay triangulation
104
  labeled_img = label(binary_mask)
105
  props = regionprops(labeled_img)
106
  centroids = np.array([p.centroid for p in props])
107
 
 
 
 
108
  if len(centroids) >= 3:
109
  tri = Delaunay(centroids)
110
- fig3, ax3 = plt.subplots(figsize=(6, 6))
111
- ax3.imshow(label_gray, cmap='gray')
112
- ax3.triplot(centroids[:, 1], centroids[:, 0], tri.simplices.copy(), color='red')
113
-
114
  for simplex in tri.simplices:
115
  for i in range(3):
116
  pt1 = centroids[simplex[i]]
117
- pt2 = centroids[(i + 1) % 3]
118
  dist_px = np.linalg.norm(pt1 - pt2)
119
  dist_mm = dist_px * pixel_length_mm
120
  edge_lengths.append(dist_mm)
121
  midpoint = (pt1 + pt2) / 2
122
- ax3.text(midpoint[1], midpoint[0], f"{dist_mm:.1f}", color='blue', fontsize=6, ha='center')
 
 
 
 
 
123
 
124
- ax3.set_title("Delaunay Triangulation")
125
- ax3.axis('off')
 
 
 
 
 
 
 
 
 
126
  else:
127
- fig3 = plt.figure()
128
- plt.text(0.5, 0.5, 'Not enough centroids for triangulation.', ha='center')
129
- plt.axis('off')
130
-
131
- # Summary text
132
- summary = f"""
133
- Pixel Size: {pixel_length_mm:.4f} mm/pixel
134
- Aggregate Area: {aggregate_area_mm2:.2f} mm²
135
- Aggregate Ratio: {aggregate_ratio:.4f}
136
- Avg Aggregate Length: {avg_feret_length_mm:.2f} mm
137
- → Avg Aggregate Width: {avg_feret_width_mm:.2f} mm
138
- → Max Aggregate Length: {max_feret_length_mm:.2f} mm
139
- → Avg Aggregate Roundness: {roundness_aggregate:.2f}
140
  """
141
  if edge_lengths:
142
- summary += f"""
143
- Avg inter-Aggregate Distance: {np.mean(edge_lengths):.2f} mm
144
- → Max inter-Aggregate Distance: {np.max(edge_lengths):.2f} mm
145
- """
146
 
147
- return summary.strip(), fig1, fig2, fig3
148
 
149
- # Gradio UI
150
  demo = gr.Interface(
151
- fn=analyze_aggregate,
152
- inputs=[gr.Image(label="Upload Image")],
153
  outputs=[
154
- gr.Textbox(label="Summary Measurements"),
155
- gr.Plot(label="Boundary and Calibration Line"),
156
- gr.Plot(label="Feret Rectangles by Size"),
157
- gr.Plot(label="Delaunay Triangulation")
158
  ],
159
- title="Aggregate Analysis from Uploaded Image",
160
- description="Upload an image with circular calibration. The app will calculate size, aspect ratio, and spacing of aggregates.",
161
- allow_flagging='never'
162
  )
163
 
164
- demo.launch()
 
 
1
  import os
2
  import cv2
 
3
  import torch
4
+ import numpy as np
5
+ import gradio as gr
6
  import matplotlib.pyplot as plt
7
+ import pandas as pd
8
+ from glob import glob
9
+ from PIL import Image
10
+ from skimage.measure import regionprops, label
11
  from scipy.spatial.distance import cdist
12
  from scipy.spatial import Delaunay
13
+ from io import BytesIO
14
+ from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
15
+ import segmentation_models_pytorch as smp
16
+
17
+ # Configuration
18
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
19
+ DIAMETER_MM = 152.4
20
+ MIN_SIZE = 256
21
+
22
+ class PetModel(torch.nn.Module):
23
+ def __init__(self, arch, encoder_name, in_channels, out_classes, **kwargs):
24
+ super().__init__()
25
+ self.model = smp.create_model(
26
+ arch, encoder_name, in_channels=in_channels, classes=out_classes, **kwargs
27
+ )
28
+ params = smp.encoders.get_preprocessing_params(encoder_name)
29
+ self.register_buffer("std", torch.tensor(params["std"]).view(1, 3, 1, 1))
30
+ self.register_buffer("mean", torch.tensor(params["mean"]).view(1, 3, 1, 1))
31
+
32
+ def forward(self, image):
33
+ image = (image - self.mean) / self.std
34
+ return self.model(image)
35
+
36
+ def preprocess_image(image, min_size=MIN_SIZE):
37
+ image = np.array(image)
38
+ if len(image.shape) == 2:
39
+ image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
40
+ elif image.shape[2] == 4:
41
+ image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
42
+ elif image.shape[2] == 1:
43
+ image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
44
+
45
+ original_size = image.shape[:2]
46
+ h, w = image.shape[:2]
47
+ if h < min_size or w < min_size:
48
+ new_size = (max(w, min_size), max(h, min_size))
49
+ image = cv2.resize(image, new_size, interpolation=cv2.INTER_LINEAR)
50
+
51
+ image = image.astype(np.float32) / 255.0
52
+ image = torch.tensor(image).permute(2, 0, 1).unsqueeze(0)
53
+ return image, original_size
54
+
55
+ def postprocess_output(output, original_size):
56
+ prob_mask = output.sigmoid()
57
+ pred_mask = (prob_mask > 0.5).float()
58
+ pred_mask = pred_mask.squeeze().cpu().numpy()
59
+ if pred_mask.shape != original_size:
60
+ pred_mask = cv2.resize(pred_mask, (original_size[1], original_size[0]), interpolation=cv2.INTER_NEAREST)
61
+ return pred_mask
62
+
63
+ def load_model(model_path):
64
+ model = PetModel("unet", "efficientnet-b5", in_channels=3, out_classes=1)
65
+ model.load_state_dict(torch.load(model_path, map_location=DEVICE))
66
+ model = model.to(DEVICE)
67
+ model.eval()
68
+ return model
69
+
70
+ model = load_model("segmentation_model_final.pth")
71
+
72
+ def fig_to_image(fig):
73
+ buf = BytesIO()
74
+ canvas = FigureCanvas(fig)
75
+ canvas.print_png(buf)
76
+ buf.seek(0)
77
+ return Image.open(buf)
78
+
79
+ def analyze(image):
80
+ input_tensor, original_size = preprocess_image(image)
81
+ input_tensor = input_tensor.to(DEVICE)
82
+
83
+ with torch.no_grad():
84
+ output = model(input_tensor)
85
+
86
+ prediction_mask = postprocess_output(output, original_size)
87
+ image_np = np.array(image)
88
+ gray_img = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
89
+ label_img = (prediction_mask * 255).astype(np.uint8)
90
 
 
91
  _, bw = cv2.threshold(gray_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
92
  contours, _ = cv2.findContours(bw, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
93
  contours = sorted(contours, key=cv2.contourArea, reverse=True)
94
  if not contours:
95
+ return None, None, None, "No contour found."
96
 
97
  boundary = contours[0].squeeze()
98
  dist_matrix = cdist(boundary, boundary)
99
  i, j = np.unravel_index(np.argmax(dist_matrix), dist_matrix.shape)
100
  line_pts = np.array([boundary[i], boundary[j]])
101
  pixel_diameter = np.linalg.norm(boundary[i] - boundary[j])
102
+ pixels_per_mm = pixel_diameter / DIAMETER_MM
103
  pixel_length_mm = 1 / pixels_per_mm
104
  line_length_mm = pixel_diameter * pixel_length_mm
105
 
106
+ fig1 = plt.figure(figsize=(6, 6))
107
+ plt.imshow(image_np)
108
+ plt.plot(boundary[:, 0], boundary[:, 1], 'g', linewidth=2)
109
+ plt.plot(line_pts[:, 0], line_pts[:, 1], 'r', linewidth=2)
110
+ plt.title(f"Calibration Line: {line_length_mm:.2f} mm")
111
+ plt.axis("off")
112
+ img1 = fig_to_image(fig1)
113
 
114
+ binary_mask = (label_img > 127).astype(np.uint8)
115
+ color_mask = cv2.cvtColor(label_img, cv2.COLOR_GRAY2BGR)
 
 
 
 
116
 
117
+ feret_lengths, feret_widths, rectangles = [], [], []
 
 
118
  contours_mask, _ = cv2.findContours(binary_mask * 255, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
119
  for cnt in contours_mask:
120
  if len(cnt) >= 5:
 
126
  feret_widths.append(min(width, height))
127
  rectangles.append((box, feret_length))
128
 
129
+ thresholds = np.percentile(feret_lengths, [20, 40, 60, 80]) if feret_lengths else [0]*4
130
+ colors = [(0,0,255),(0,128,255),(0,255,255),(0,255,0),(255,0,0)]
 
131
  for box, length in rectangles:
132
  if length <= thresholds[0]: color = colors[0]
133
  elif length <= thresholds[1]: color = colors[1]
 
136
  else: color = colors[4]
137
  cv2.drawContours(color_mask, [box], 0, color, 3)
138
 
139
+ fig2 = plt.figure(figsize=(6, 6))
140
+ plt.imshow(cv2.cvtColor(color_mask, cv2.COLOR_BGR2RGB))
141
+ plt.title("Feret Rectangles (Colored by Size)")
142
+ plt.axis("off")
143
+ img2 = fig_to_image(fig2)
 
 
 
 
 
 
 
 
 
144
 
 
145
  labeled_img = label(binary_mask)
146
  props = regionprops(labeled_img)
147
  centroids = np.array([p.centroid for p in props])
148
 
149
+ edge_lengths = []
150
+ fig3 = plt.figure(figsize=(6, 6))
151
+ plt.imshow(label_img, cmap="gray")
152
  if len(centroids) >= 3:
153
  tri = Delaunay(centroids)
154
+ plt.triplot(centroids[:, 1], centroids[:, 0], tri.simplices.copy(), color="red")
 
 
 
155
  for simplex in tri.simplices:
156
  for i in range(3):
157
  pt1 = centroids[simplex[i]]
158
+ pt2 = centroids[simplex[(i + 1) % 3]]
159
  dist_px = np.linalg.norm(pt1 - pt2)
160
  dist_mm = dist_px * pixel_length_mm
161
  edge_lengths.append(dist_mm)
162
  midpoint = (pt1 + pt2) / 2
163
+ plt.text(midpoint[1], midpoint[0], f"{dist_mm:.1f}", color="blue", fontsize=6, ha="center")
164
+ plt.title("Delaunay Triangulation")
165
+ else:
166
+ plt.title("Not Enough Aggregates for Triangulation")
167
+ plt.axis("off")
168
+ img3 = fig_to_image(fig3)
169
 
170
+ num_white_pixels = np.sum(binary_mask == 1)
171
+ num_nonblack_pixels = np.count_nonzero(gray_img)
172
+ aggregate_area_mm2 = num_white_pixels * (pixel_length_mm ** 2)
173
+ total_area_mm2 = num_nonblack_pixels * (pixel_length_mm ** 2)
174
+ aggregate_ratio = aggregate_area_mm2 / total_area_mm2 if total_area_mm2 > 0 else 0
175
+
176
+ if feret_lengths:
177
+ avg_feret_length_mm = np.mean(feret_lengths) * pixel_length_mm
178
+ avg_feret_width_mm = np.mean(feret_widths) * pixel_length_mm
179
+ max_feret_length_mm = np.max(feret_lengths) * pixel_length_mm
180
+ roundness_aggregate = avg_feret_length_mm / avg_feret_width_mm
181
  else:
182
+ avg_feret_length_mm = avg_feret_width_mm = max_feret_length_mm = roundness_aggregate = 0
183
+
184
+ summary = f"""📏 **Measurements Summary**:
185
+ - Pixel Size: `{pixel_length_mm:.4f}` mm/pixel
186
+ - Aggregate Area: `{aggregate_area_mm2:.2f}` mm²
187
+ - Aggregate Ratio: `{aggregate_ratio:.4f}`
188
+ - Avg Aggregate Length: `{avg_feret_length_mm:.2f}` mm
189
+ - Avg Aggregate Width: `{avg_feret_width_mm:.2f}` mm
190
+ - Max Aggregate Length: `{max_feret_length_mm:.2f}` mm
191
+ - Aggregate Roundness: `{roundness_aggregate:.2f}`
 
 
 
192
  """
193
  if edge_lengths:
194
+ summary += f"- Avg Inter-Aggregate Distance: `{np.mean(edge_lengths):.2f}` mm\n"
195
+ summary += f"- Max Inter-Aggregate Distance: `{np.max(edge_lengths):.2f}` mm\n"
 
 
196
 
197
+ return img1, img2, img3, summary
198
 
 
199
  demo = gr.Interface(
200
+ fn=analyze,
201
+ inputs=gr.Image(type="pil", label="Upload Concrete Image"),
202
  outputs=[
203
+ gr.Image(label="Boundary & Calibration Line"),
204
+ gr.Image(label="Feret Rectangles"),
205
+ gr.Image(label="Delaunay Triangulation"),
206
+ gr.Markdown(label="Summary Measurements"),
207
  ],
208
+ title="Concrete Aggregate Analysis App",
209
+ description="Upload a concrete image. The model will segment aggregates and analyze their distribution and shape.",
 
210
  )
211
 
212
+ if __name__ == "__main__":
213
+ demo.launch()