rmlaylo commited on
Commit
9101dd7
·
verified ·
1 Parent(s): 5209afc

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -1181
app.py DELETED
@@ -1,1181 +0,0 @@
1
- # --- Standard Library ---
2
- import os
3
- import re
4
- import tempfile
5
- from io import BytesIO
6
- from pathlib import Path
7
- import base64
8
-
9
- # --- Third-party Libraries ---
10
- import numpy as np
11
- import pandas as pd
12
- import matplotlib.pyplot as plt
13
- import matplotlib.patches as mpatches
14
- from matplotlib import font_manager
15
- from PIL import Image
16
- import gradio as gr
17
- import seaborn as sns
18
- from roboflow import Roboflow
19
-
20
- # --- Machine Learning ---
21
- from sklearn.ensemble import GradientBoostingRegressor
22
- from sklearn.metrics import mean_absolute_error
23
- from sklearn.model_selection import LeaveOneOut
24
-
25
-
26
- # Global Styling Setup (Ruda + seaborn white)
27
-
28
- ruda_font = None # Initialize as None
29
- try:
30
- # This path needs to be correct for your system
31
- font_path = "Ruda-Regular.ttf"
32
- font_manager.fontManager.addfont(font_path)
33
- ruda_font = font_manager.FontProperties(fname=font_path)
34
- plt.rcParams['font.family'] = ruda_font.get_name()
35
- print(f"Successfully loaded font: {ruda_font.get_name()}")
36
- except Exception:
37
- print(f"--- FONT WARNING ---")
38
- print(f"Ruda font not found. Plots will use Matplotlib's default font.")
39
- plt.rcParams['font.family'] = 'sans-serif'
40
-
41
- sns.set_theme(style="white", font=ruda_font.get_name())
42
-
43
- # Accent color taken from the predicted progress gradient (first color)
44
- ACCENT_COLOR = "#111827"
45
-
46
- plt.rcParams.update({
47
- "axes.spines.top": False,
48
- "axes.spines.right": False,
49
- "axes.titlesize": 10,
50
- "axes.labelsize": 9,
51
- "xtick.labelsize": 8,
52
- "ytick.labelsize": 8,
53
- "legend.fontsize": 8,
54
- })
55
-
56
-
57
- def _style_axes(ax):
58
- """
59
- Make plot design consistent:
60
- - white background
61
- - hide top/right/left spines
62
- - thick colored bottom border
63
- """
64
- ax.set_facecolor("white")
65
- for s in ["top", "right", "left"]:
66
- if s in ax.spines:
67
- ax.spines[s].set_visible(False)
68
- if "bottom" in ax.spines:
69
- ax.spines["bottom"].set_visible(True)
70
- ax.spines["bottom"].set_linewidth(2)
71
- ax.spines["bottom"].set_color(ACCENT_COLOR)
72
-
73
-
74
- ############################################################
75
- # 1. Config: colors, indices, paths, Roboflow model
76
- ############################################################
77
-
78
- colors = np.array([
79
- [0, 0, 0, 80], # 0 background (black, semi-transparent)
80
-
81
- [255, 0, 0, 128], # 1 beam-concrete
82
- [255, 128, 0, 128], # 2 beam-formwork
83
- [255, 255, 0, 128], # 3 beam-rebar
84
-
85
- [ 0, 255, 0, 128], # 4 columns-concrete
86
- [ 0, 255, 255, 128], # 5 columns-formwork
87
- [ 0, 128, 255, 128], # 6 columns-rebar
88
-
89
- [ 0, 0, 255, 128], # 7 wall-concrete
90
- [128, 0, 255, 128], # 8 wall-formwork
91
- [255, 0, 255, 128], # 9 wall-rebar
92
- ], dtype=np.uint8)
93
-
94
- NUM_CLASSES = len(colors) # 10 (indices 0..9)
95
-
96
- # Indices by stage type
97
- CONCRETE_IDX = [1, 4, 7]
98
- FORMWORK_IDX = [2, 5, 8]
99
- REBAR_IDX = [3, 6, 9]
100
-
101
- # Indices by structural group
102
- BEAM_IDX = [1, 2, 3]
103
- COLUMNS_IDX = [4, 5, 6]
104
- WALL_IDX = [7, 8, 9]
105
-
106
- # Base folder and demo folder (adjust as needed)
107
- BASE_DIR = Path(
108
- r"C:\Users\Thinkpad P16\OneDrive - Department of Education\Desktop\ECAIR\DUNONG\stride\miscellaneous\Progress Reports and Photos"
109
- )
110
- DEMO_DIR = BASE_DIR / "demo"
111
-
112
- # Roboflow model
113
- rf = Roboflow(api_key="9voC8YnnNJ4DQRry6gfd") # <-- your key
114
- project = rf.workspace().project("eagle.ai-str-components-v2-vhblf")
115
- model = project.version(8).model
116
-
117
-
118
- ############################################################
119
- # 2. Utility functions: image prep, mask decoding, legend
120
- ############################################################
121
-
122
- def _prepare_image_for_roboflow(path: str) -> str:
123
- """
124
- If image has transparency, flatten to white and save as a temp JPEG.
125
- Return a path suitable for Roboflow.
126
- """
127
- p = Path(path)
128
- im = Image.open(p)
129
-
130
- if im.mode in ("RGBA", "LA") or (im.mode == "P" and "transparency" in im.info):
131
- if im.mode != "RGBA":
132
- im = im.convert("RGBA")
133
- bg = Image.new("RGB", im.size, (255, 255, 255))
134
- bg.paste(im, mask=im.split()[-1]) # use alpha channel as mask
135
- im = bg
136
- else:
137
- im = im.convert("RGB")
138
-
139
- tmp_jpg = Path(tempfile.gettempdir()) / f"{p.stem}_rf.jpg"
140
- im.save(tmp_jpg, format="JPEG", quality=90)
141
- return str(tmp_jpg)
142
-
143
-
144
- def _roboflow_ready_path(original_path: str) -> str:
145
- """
146
- Return a path Roboflow can ingest (JPEG).
147
- PNGs with alpha get flattened; JPGs pass through.
148
- """
149
- p = Path(original_path)
150
- ext = p.suffix.lower()
151
- if ext in (".jpg", ".jpeg"):
152
- return str(p) # already JPEG-compatible
153
- return _prepare_image_for_roboflow(str(p))
154
-
155
-
156
- def _decode_mask_to_array(result_json) -> np.ndarray:
157
- """
158
- Decode base64 segmentation mask to a numpy array of class indices.
159
- """
160
- preds = result_json.get("predictions", [])
161
- if not preds:
162
- raise ValueError("No predictions returned by the model.")
163
- mask_base64 = preds[0]["segmentation_mask"]
164
- mask_bytes = base64.b64decode(mask_base64)
165
- mask_img = Image.open(BytesIO(mask_bytes))
166
- return np.array(mask_img)
167
-
168
-
169
- def _make_legend(class_map, colors_lut: np.ndarray):
170
- """
171
- Build grouped legend handles with spacing:
172
- Groups: Beams, Columns, Walls.
173
- """
174
-
175
- def pretty_material(label: str) -> str:
176
- # "beam-concrete" -> "Concrete"
177
- return label.split("-", 1)[1].capitalize()
178
-
179
- def make_patch(idx: int, label: str) -> mpatches.Patch:
180
- col = colors_lut[idx][:3]
181
- return mpatches.Patch(color=np.array(col) / 255.0, label=label)
182
-
183
- beams = []
184
- columns = []
185
- walls = []
186
-
187
- for k, lbl in class_map.items():
188
- idx = int(k)
189
- low = lbl.lower()
190
-
191
- if "beam" in low:
192
- beams.append((idx, lbl))
193
- elif "column" in low:
194
- columns.append((idx, lbl))
195
- elif "wall" in low:
196
- walls.append((idx, lbl))
197
-
198
- handles = []
199
-
200
- def add_spacing():
201
- handles.append(
202
- mpatches.Patch(
203
- color=(0, 0, 0, 0),
204
- label=" "
205
- )
206
- )
207
-
208
- add_spacing()
209
- if beams:
210
- handles.append(mpatches.Patch(color='none', label="Beams"))
211
- for idx, lbl in sorted(beams, key=lambda x: x[0]):
212
- handles.append(make_patch(idx, " " + pretty_material(lbl)))
213
- add_spacing()
214
-
215
- if columns:
216
- handles.append(mpatches.Patch(color='none', label="Columns"))
217
- for idx, lbl in sorted(columns, key=lambda x: x[0]):
218
- handles.append(make_patch(idx, " " + pretty_material(lbl)))
219
- add_spacing()
220
-
221
- if walls:
222
- handles.append(mpatches.Patch(color='none', label="Walls"))
223
- for idx, lbl in sorted(walls, key=lambda x: x[0]):
224
- handles.append(make_patch(idx, " " + pretty_material(lbl)))
225
-
226
- return handles
227
-
228
-
229
- ############################################################
230
- # 3. Segmentation & overlay helpers
231
- ############################################################
232
-
233
- def get_mask_from_image(img_path: str):
234
- """
235
- Run Roboflow segmentation and return (mask_array, result_json).
236
- """
237
- rf_path = _roboflow_ready_path(img_path)
238
- result = model.predict(rf_path).json()
239
- mask_array = _decode_mask_to_array(result)
240
- return mask_array, result
241
-
242
-
243
- def make_overlay_image(
244
- img_path: str,
245
- mask_array: np.ndarray,
246
- result_json,
247
- alpha_blend: bool = True
248
- ) -> Image.Image:
249
- """
250
- Create an RGBA overlay image with legend from original image + mask.
251
- Returns a PIL.Image that Gradio can display.
252
- """
253
- original_img = Image.open(img_path).convert("RGBA")
254
-
255
- if mask_array.max() >= len(colors):
256
- raise IndexError(
257
- f"Mask contains class index {mask_array.max()} but colors size is {len(colors)}."
258
- )
259
-
260
- color_mask = colors[mask_array]
261
-
262
- # Ensure RGBA for overlay
263
- if color_mask.shape[-1] == 3:
264
- a = np.full(
265
- color_mask.shape[:2] + (1,),
266
- 128 if alpha_blend else 255,
267
- dtype=np.uint8
268
- )
269
- color_mask = np.concatenate([color_mask, a], axis=-1)
270
- else:
271
- if alpha_blend and np.all(color_mask[..., 3] == 255):
272
- color_mask[..., 3] = 128
273
-
274
- mask_colored = Image.fromarray(color_mask, mode="RGBA").resize(
275
- original_img.size, Image.NEAREST
276
- )
277
-
278
- overlay = Image.alpha_composite(original_img, mask_colored)
279
-
280
- class_map = result_json["predictions"][0]["class_map"]
281
- handles = _make_legend(class_map, colors)
282
-
283
- fig, ax = plt.subplots(figsize=(8, 6))
284
- ax.imshow(overlay)
285
- ax.axis("off")
286
- ax.legend(
287
- handles=handles,
288
- loc="center left",
289
- bbox_to_anchor=(1.01, 0.5), # closer to the image
290
- borderaxespad=0.2,
291
- frameon=False,
292
- title="Classes",
293
- title_fontsize=7,
294
- prop={"size": 7},
295
- labelspacing=0.2,
296
- handlelength=0.8,
297
- handleheight=0.8,
298
- handletextpad=0.4,
299
- )
300
-
301
- plt.tight_layout()
302
-
303
- buf = BytesIO()
304
- fig.savefig(buf, format="png", bbox_inches="tight", dpi=150)
305
- plt.close(fig)
306
- buf.seek(0)
307
- overlay_with_legend = Image.open(buf).convert("RGB")
308
-
309
- return overlay_with_legend
310
-
311
-
312
- ############################################################
313
- # 4. Feature extraction from mask
314
- ############################################################
315
-
316
- def extract_class_features(mask_array: np.ndarray, num_classes: int = NUM_CLASSES):
317
- flat = mask_array.flatten()
318
- counts = np.bincount(flat, minlength=num_classes)
319
- total = mask_array.size
320
- if total == 0:
321
- ratios = np.zeros_like(counts, dtype=float)
322
- else:
323
- ratios = counts / total
324
- return counts, ratios
325
-
326
-
327
- def aggregate_stage_features(ratios: np.ndarray):
328
- """
329
- Compute aggregate features and ratios:
330
- - Stage ratios: C/F, F/R, R/C
331
- """
332
- f_conc = ratios[CONCRETE_IDX].sum()
333
- f_form = ratios[FORMWORK_IDX].sum()
334
- f_rebar = ratios[REBAR_IDX].sum()
335
-
336
- f_beams = ratios[BEAM_IDX].sum()
337
- f_columns = ratios[COLUMNS_IDX].sum()
338
- f_walls = ratios[WALL_IDX].sum()
339
-
340
- f_finished = f_conc
341
- f_in_progress = f_form + f_rebar
342
-
343
- eps = 1e-6
344
- ratio_cf = f_conc / (f_form + eps) # C/F
345
- ratio_fr = f_form / (f_rebar + eps) # F/R
346
- ratio_rc = f_rebar / (f_conc + eps) # R/C
347
-
348
- return {
349
- "ratio_concrete": float(f_conc),
350
- "ratio_formwork": float(f_form),
351
- "ratio_rebar": float(f_rebar),
352
-
353
- "ratio_beams": float(f_beams),
354
- "ratio_columns": float(f_columns),
355
- "ratio_walls": float(f_walls),
356
-
357
- "ratio_finished": float(f_finished),
358
- "ratio_in_progress": float(f_in_progress),
359
-
360
- "ratio_cf": float(ratio_cf), # C/F
361
- "ratio_fr": float(ratio_fr), # F/R
362
- "ratio_rc": float(ratio_rc), # R/C
363
- }
364
-
365
-
366
- ############################################################
367
- # 5. Parse progress from filename (for training only)
368
- ############################################################
369
-
370
- def parse_progress_from_filename(fname: str):
371
- m = re.search(r"(\d+)", fname)
372
- if not m:
373
- return None
374
- return int(m.group(1))
375
-
376
-
377
- ############################################################
378
- # 6. Build dataset from demo folder
379
- ############################################################
380
-
381
- def build_progress_dataset(demo_dir: Path = DEMO_DIR) -> pd.DataFrame:
382
- rows = []
383
-
384
- if not demo_dir.exists():
385
- raise FileNotFoundError(f"Demo directory not found: {demo_dir}")
386
-
387
- for fname in os.listdir(demo_dir):
388
- if not fname.lower().endswith((".jpg", ".jpeg", ".png")):
389
- continue
390
-
391
- img_path = str(demo_dir / fname)
392
-
393
- progress = parse_progress_from_filename(fname)
394
- if progress is None:
395
- continue
396
-
397
- mask, _ = get_mask_from_image(img_path)
398
- _, ratios = extract_class_features(mask, num_classes=NUM_CLASSES)
399
- agg = aggregate_stage_features(ratios)
400
-
401
- per_class_feats = {
402
- f"ratio_class_{i}": float(ratios[i]) for i in range(1, NUM_CLASSES)
403
- }
404
-
405
- row = {
406
- "filename": fname,
407
- "progress": progress,
408
- **agg,
409
- **per_class_feats,
410
- }
411
-
412
- rows.append(row)
413
-
414
- df = pd.DataFrame(rows)
415
- if df.empty:
416
- raise RuntimeError("No valid labeled images found in demo_dir.")
417
- return df
418
-
419
-
420
- ############################################################
421
- # 7. Train Gradient Boosting regressor + LOO validation
422
- ############################################################
423
-
424
- def train_progress_regressor(df: pd.DataFrame):
425
- feature_cols = [
426
- "ratio_concrete", "ratio_formwork", "ratio_rebar",
427
- "ratio_beams", "ratio_columns", "ratio_walls",
428
- "ratio_finished", "ratio_in_progress",
429
- "ratio_cf", "ratio_fr", "ratio_rc", # C/F, F/R, R/C
430
- ]
431
-
432
- print("Using feature columns:")
433
- for c in feature_cols:
434
- print(" ", c)
435
-
436
- X = df[feature_cols].values
437
- y = df["progress"].values
438
-
439
- model = GradientBoostingRegressor(
440
- n_estimators=100,
441
- learning_rate=0.1,
442
- max_depth=2,
443
- random_state=42,
444
- )
445
-
446
- loo = LeaveOneOut()
447
- preds = []
448
- trues = []
449
-
450
- print("\n=== Gradient Boosting: Leave-One-Out Cross-Validation ===")
451
- for train_idx, test_idx in loo.split(X):
452
- X_train, X_test = X[train_idx], X[test_idx]
453
- y_train, y_test = y[train_idx], y[test_idx]
454
-
455
- model.fit(X_train, y_train)
456
- y_pred = model.predict(X_test)[0]
457
-
458
- preds.append(y_pred)
459
- trues.append(y_test[0])
460
-
461
- mae = mean_absolute_error(trues, preds)
462
- print(f" MAE: {mae:.3f} percentage points")
463
- print(" Predictions vs True:")
464
- for fn, yt, yp in zip(df["filename"], trues, preds):
465
- print(f" {fn:15s} true={yt:3d} pred={yp:6.2f}")
466
-
467
- model.fit(X, y)
468
- print("\nFitted GradientBoostingRegressor on all samples.")
469
-
470
- return model, feature_cols
471
-
472
-
473
- ############################################################
474
- # 8. Aggregate features over any number of images
475
- ############################################################
476
-
477
- def aggregate_features_over_images(image_paths, feature_cols):
478
- n_used = len(image_paths)
479
- if n_used == 0:
480
- raise ValueError("No image paths provided for aggregation.")
481
-
482
- agg_sums = None
483
- per_class_sums = np.zeros(NUM_CLASSES, dtype=float)
484
- class_counts_sum = np.zeros(NUM_CLASSES, dtype=float)
485
-
486
- overlays = []
487
- class_map_first = None
488
-
489
- for idx, img_path in enumerate(image_paths):
490
- mask, result_json = get_mask_from_image(img_path)
491
- counts, ratios = extract_class_features(mask, num_classes=NUM_CLASSES)
492
-
493
- overlay_img = make_overlay_image(img_path, mask, result_json)
494
- overlays.append(overlay_img)
495
-
496
- if class_map_first is None:
497
- class_map_first = result_json["predictions"][0]["class_map"]
498
-
499
- agg = aggregate_stage_features(ratios)
500
-
501
- if agg_sums is None:
502
- agg_sums = {k: float(v) for k, v in agg.items()}
503
- else:
504
- for k, v in agg.items():
505
- agg_sums[k] += float(v)
506
-
507
- per_class_sums += ratios
508
- class_counts_sum += counts
509
-
510
- agg_avg = {k: v / n_used for k, v in agg_sums.items()}
511
- per_class_avg = {
512
- f"ratio_class_{i}": float(per_class_sums[i] / n_used)
513
- for i in range(1, NUM_CLASSES)
514
- }
515
-
516
- feat_dict = {**agg_avg, **per_class_avg}
517
- feat_vector = np.array([[feat_dict[c] for c in feature_cols]])
518
-
519
- return (
520
- feat_vector,
521
- agg_avg,
522
- per_class_avg,
523
- class_counts_sum,
524
- overlays,
525
- class_map_first,
526
- n_used,
527
- )
528
-
529
-
530
- ############################################################
531
- # 9. Train model once at startup
532
- ############################################################
533
-
534
- print("Building dataset from demo images...")
535
- df = build_progress_dataset()
536
- print("\nDataset:")
537
- print(df)
538
-
539
- best_model, feat_cols = train_progress_regressor(df)
540
-
541
-
542
- ############################################################
543
- # 10. Single-image prediction (Tab 1)
544
- ############################################################
545
-
546
- def analyze_image(image_path):
547
- if image_path is None:
548
- return (
549
- None,
550
- "<div>Please upload an image.</div>",
551
- None,
552
- None,
553
- None,
554
- )
555
-
556
- mask, result_json = get_mask_from_image(image_path)
557
- overlay_img = make_overlay_image(image_path, mask, result_json)
558
-
559
- counts, ratios = extract_class_features(mask, num_classes=NUM_CLASSES)
560
- agg = aggregate_stage_features(ratios)
561
- per_class_feats = {
562
- f"ratio_class_{i}": float(ratios[i]) for i in range(1, NUM_CLASSES)
563
- }
564
- feat_dict = {**agg, **per_class_feats}
565
- x = np.array([[feat_dict[c] for c in feat_cols]])
566
- pred = float(best_model.predict(x)[0])
567
-
568
- # ------- Predicted progress card (only score here) -------
569
- summary_html = f"""
570
- <div>
571
- <div style="
572
- border:1px solid #d1d5db;
573
- border-radius:16px;
574
- overflow:hidden;
575
- background:#f9fafb;
576
- box-shadow:0 1px 2px rgba(0,0,0,0.03);
577
- ">
578
- <div style="
579
- height:6px;
580
- background:linear-gradient(90deg,#1d4ed8,#9333ea,#dc2626);
581
- "></div>
582
- <div style="padding:12px 16px;">
583
- <div style="text-align:center;">
584
- <div style="font-size:13px;color:#6b7280;">Predicted progress</div>
585
- <div style="font-size:30px;font-weight:700;color:#1d4ed8;">
586
- {pred:.2f}%
587
- </div>
588
- </div>
589
- </div>
590
- </div>
591
-
592
- <div style="margin-top:8px;font-size:11px;color:#4b5563;line-height:1.4;">
593
- <div style="margin-bottom:6px;">
594
- <strong>Stage coverage</strong> – pie chart showing the share of detected pixels
595
- belonging to <em>Concrete</em>, <em>Formwork</em>, and <em>Rebar</em> inside the detected objects.
596
- </div>
597
-
598
- <div style="margin-bottom:6px;">
599
- <strong>Stage ratios (C/F, F/R, R/C)</strong> – bar chart summarizing how advanced the
600
- structure is based on the ratios of Concrete to Formwork, Formwork to Rebar, and Rebar to Concrete.
601
- </div>
602
-
603
- <div>
604
- <strong>Objects heatmap</strong> – 3×3 matrix indicating where detected components are
605
- concentrated across <em>Beams</em>, <em>Columns</em>, and <em>Walls</em> for each construction stage.
606
- </div>
607
- </div>
608
- """
609
-
610
- conc = agg["ratio_concrete"]
611
- form = agg["ratio_formwork"]
612
- reb = agg["ratio_rebar"]
613
- det_sum = conc + form + reb
614
-
615
- if det_sum > 0:
616
- conc_obj_pct = conc / det_sum * 100.0
617
- form_obj_pct = form / det_sum * 100.0
618
- reb_obj_pct = reb / det_sum * 100.0
619
- else:
620
- conc_obj_pct = form_obj_pct = reb_obj_pct = 0.0
621
-
622
- ###############################
623
- # Stage coverage pie chart
624
- ###############################
625
- # Material-like colors for stages
626
- stage_palette = {
627
- "Concrete": "#9e9e9e", # light concrete gray
628
- "Formwork": "#d97706", # vivid wood/plywood orange-brown
629
- "Rebar": "#b7410e",
630
- }
631
-
632
- fig_stage_cov, ax1 = plt.subplots(figsize=(3.0, 3.0))
633
- values = [conc_obj_pct, form_obj_pct, reb_obj_pct]
634
- labels = ["Concrete", "Formwork", "Rebar"]
635
- pie_colors = [stage_palette[l] for l in labels]
636
-
637
- if sum(values) > 0:
638
- wedges, texts, autotexts = ax1.pie(
639
- values,
640
- labels=labels,
641
- colors=pie_colors,
642
- autopct="%1.1f%%",
643
- pctdistance=0.78,
644
- labeldistance=1.1,
645
- startangle=90,
646
- textprops={"fontsize": 8},
647
- )
648
- for autotext in autotexts:
649
- autotext.set_fontsize(7)
650
-
651
- ax1.axis("equal")
652
- else:
653
- ax1.text(
654
- 0.5, 0.5,
655
- "No detected objects",
656
- ha="center", va="center",
657
- fontsize=8,
658
- )
659
- ax1.axis("off")
660
-
661
- _style_axes(ax1)
662
- fig_stage_cov.tight_layout()
663
-
664
- ###############################
665
- # Stage ratios bar (C/F, F/R, R/C)
666
- ###############################
667
- fig_stage_ratios, ax3 = plt.subplots(figsize=(3.0, 3.0))
668
- df_ratios = pd.DataFrame({
669
- "Ratio": ["C/F", "F/R", "R/C"],
670
- "Value": [agg["ratio_cf"], agg["ratio_fr"], agg["ratio_rc"]],
671
- })
672
-
673
- # Palette for ratios – concrete, wood, metal themed
674
- ratio_palette = {
675
- "C/F": "#9e9e9e", # concrete gray
676
- "F/R": "#d97706", # wooden brown
677
- "R/C": "#b7410e",
678
- }
679
-
680
- sns.barplot(
681
- data=df_ratios,
682
- x="Ratio",
683
- y="Value",
684
- ax=ax3,
685
- palette=[ratio_palette[r] for r in df_ratios["Ratio"]],
686
- )
687
- ax3.set_ylabel("Ratio", fontsize=8)
688
- ax3.set_xlabel("", fontsize=8)
689
- ax3.tick_params(axis='both', labelsize=8)
690
-
691
- legend_patches = [
692
- mpatches.Patch(color='none', label="C = Concrete"),
693
- mpatches.Patch(color='none', label="F = Formwork"),
694
- mpatches.Patch(color='none', label="R = Rebar"),
695
- ]
696
- ax3.legend(
697
- handles=legend_patches,
698
- loc="upper right",
699
- frameon=False,
700
- fontsize=7,
701
- )
702
-
703
- _style_axes(ax3)
704
- fig_stage_ratios.tight_layout()
705
-
706
- ###############################
707
- # Objects 3×3 heatmap with class colors (single image)
708
- ###############################
709
- object_total = int(sum(counts[1:]))
710
- groups = ["Beams", "Columns", "Walls"]
711
- stages = ["Concrete", "Formwork", "Rebar"]
712
- heat_counts = np.zeros((3, 3), dtype=float)
713
-
714
- if object_total > 0:
715
- for idx in range(1, NUM_CLASSES):
716
- c_val = counts[idx]
717
- if c_val <= 0:
718
- continue
719
-
720
- if idx in BEAM_IDX:
721
- r = 0
722
- elif idx in COLUMNS_IDX:
723
- r = 1
724
- elif idx in WALL_IDX:
725
- r = 2
726
- else:
727
- continue
728
-
729
- if idx in CONCRETE_IDX:
730
- c_idx = 0
731
- elif idx in FORMWORK_IDX:
732
- c_idx = 1
733
- elif idx in REBAR_IDX:
734
- c_idx = 2
735
- else:
736
- continue
737
-
738
- heat_counts[r, c_idx] += c_val
739
-
740
- heat_pct = (heat_counts / object_total) * 100.0
741
- else:
742
- heat_pct = np.zeros((3, 3), dtype=float)
743
-
744
- idx_grid = np.array([[1, 2, 3],
745
- [4, 5, 6],
746
- [7, 8, 9]])
747
- rgb_img = np.zeros((3, 3, 3), dtype=float)
748
-
749
- # Build RGB image where base hue is class color, intensity = % / 100
750
- for r in range(3):
751
- for c in range(3):
752
- idx = idx_grid[r, c]
753
- base_rgb = colors[idx][:3] / 255.0
754
- alpha = np.clip(heat_pct[r, c] / 100.0, 0.0, 1.0)
755
- rgb_img[r, c, :] = (1 - alpha) * np.array([1.0, 1.0, 1.0]) + (alpha * base_rgb)
756
-
757
- fig_objects, ax4 = plt.subplots(figsize=(3.0, 3.0))
758
-
759
- # Use extent so cells align to [-0.5, 2.5]
760
- ax4.imshow(rgb_img, aspect="equal", extent=(-0.5, 2.5, 2.5, -0.5))
761
-
762
- # Match limits to extent
763
- ax4.set_xlim(-0.5, 2.5)
764
- ax4.set_ylim(2.5, -0.5)
765
-
766
- # --- Light gray borders between cells (including outer border) ---
767
- for x in np.arange(-0.5, 3.0, 1.0): # -0.5, 0.5, 1.5, 2.5
768
- ax4.axvline(x, color="#d1d5db", linewidth=0.8, zorder=3, clip_on=False)
769
- for y in np.arange(-0.5, 3.0, 1.0):
770
- ax4.axhline(y, color="#d1d5db", linewidth=0.8, zorder=3, clip_on=False)
771
-
772
- # Grid & labels
773
- ax4.set_xticks(np.arange(3))
774
- ax4.set_yticks(np.arange(3))
775
- ax4.set_xticklabels(stages, fontsize=8)
776
- ax4.set_yticklabels(groups, fontsize=8)
777
- ax4.tick_params(which="both", length=0)
778
-
779
- # Annotate with percentages (black text)
780
- for r in range(3):
781
- for c in range(3):
782
- val = heat_pct[r, c]
783
- ax4.text(
784
- c, r,
785
- f"{val:.1f}%",
786
- ha="center",
787
- va="center",
788
- fontsize=7,
789
- color="black",
790
- zorder=4,
791
- )
792
-
793
- ax4.set_xlabel("Stage", fontsize=8)
794
- ax4.set_ylabel("Structural group", fontsize=8)
795
-
796
- _style_axes(ax4)
797
- fig_objects.tight_layout()
798
-
799
- return (
800
- overlay_img,
801
- summary_html,
802
- fig_stage_cov,
803
- fig_stage_ratios,
804
- fig_objects,
805
- )
806
-
807
-
808
- ############################################################
809
- # 11. Multi-image aggregated prediction (Tab 2)
810
- ############################################################
811
-
812
- def analyze_images(image_paths):
813
- if not image_paths:
814
- return (
815
- [],
816
- "<div>Please upload at least one image.</div>",
817
- gr.update(value=None, visible=False),
818
- gr.update(value=None, visible=False),
819
- gr.update(value=None, visible=False),
820
- )
821
-
822
- if isinstance(image_paths[0], dict) and "name" in image_paths[0]:
823
- img_paths = [f["name"] for f in image_paths]
824
- else:
825
- img_paths = image_paths
826
-
827
- (
828
- feat_vector,
829
- agg_avg,
830
- _,
831
- class_counts_sum,
832
- overlays,
833
- class_map_first,
834
- n_used,
835
- ) = aggregate_features_over_images(img_paths, feat_cols)
836
-
837
- pred = float(best_model.predict(feat_vector)[0])
838
-
839
- # ------- Multi-image card: progress only, text below -------
840
- summary_html = f"""
841
- <div>
842
- <div style="
843
- border:1px solid #d1d5db;
844
- border-radius:16px;
845
- overflow:hidden;
846
- background:#f9fafb;
847
- box-shadow:0 1px 2px rgba(0,0,0,0.03);
848
- ">
849
- <div style="
850
- height:6px;
851
- background:linear-gradient(90deg,#1d4ed8,#9333ea,#dc2626);
852
- "></div>
853
- <div style="padding:12px 16px;">
854
- <div style="text-align:center;">
855
- <div style="font-size:13px;color:#6b7280;">
856
- Predicted progress averaged over {n_used} photo(s)
857
- </div>
858
- <div style="font-size:30px;font-weight:700;color:#1d4ed8;">
859
- {pred:.2f}%
860
- </div>
861
- </div>
862
- </div>
863
- </div>
864
-
865
- <div style="margin-top:8px;font-size:11px;color:#4b5563;line-height:1.4;">
866
- <div style="margin-bottom:6px;">
867
- <strong>Stage coverage</strong> – pie chart showing the share of detected pixels
868
- belonging to <em>Concrete</em>, <em>Formwork</em>, and <em>Rebar</em> inside the detected objects.
869
- </div>
870
-
871
- <div style="margin-bottom:6px;">
872
- <strong>Stage ratios (C/F, F/R, R/C)</strong> – bar chart summarizing how advanced the
873
- structure is based on the ratios of Concrete to Formwork, Formwork to Rebar, and Rebar to Concrete.
874
- </div>
875
-
876
- <div>
877
- <strong>Objects heatmap</strong> – 3×3 matrix indicating where detected components are
878
- concentrated across <em>Beams</em>, <em>Columns</em>, and <em>Walls</em> for each construction stage.
879
- </div>
880
- </div>
881
- """
882
-
883
- conc = agg_avg["ratio_concrete"]
884
- form = agg_avg["ratio_formwork"]
885
- reb = agg_avg["ratio_rebar"]
886
- det_sum = conc + form + reb
887
-
888
- if det_sum > 0:
889
- conc_obj_pct = conc / det_sum * 100.0
890
- form_obj_pct = form / det_sum * 100.0
891
- reb_obj_pct = reb / det_sum * 100.0
892
- else:
893
- conc_obj_pct = form_obj_pct = reb_obj_pct = 0.0
894
-
895
- ###############################
896
- # Stage coverage pie (avg)
897
- ###############################
898
- stage_palette = {
899
- "Concrete": "#9e9e9e", # light concrete gray
900
- "Formwork": "#d97706", # vivid wood/plywood orange-brown
901
- "Rebar": "#b7410e",
902
- }
903
-
904
- fig_stage_cov, ax1 = plt.subplots(figsize=(3.0, 3.0))
905
- values = [conc_obj_pct, form_obj_pct, reb_obj_pct]
906
- labels = ["Concrete", "Formwork", "Rebar"]
907
- pie_colors = [stage_palette[l] for l in labels]
908
-
909
- if sum(values) > 0:
910
- wedges, texts, autotexts = ax1.pie(
911
- values,
912
- labels=labels,
913
- colors=pie_colors,
914
- autopct="%1.1f%%",
915
- pctdistance=0.78,
916
- labeldistance=1.1,
917
- startangle=90,
918
- textprops={"fontsize": 8},
919
- )
920
- for autotext in autotexts:
921
- autotext.set_fontsize(7)
922
-
923
- ax1.axis("equal")
924
- else:
925
- ax1.text(
926
- 0.5, 0.5,
927
- "No detected objects",
928
- ha="center", va="center",
929
- fontsize=8,
930
- )
931
- ax1.axis("off")
932
-
933
- _style_axes(ax1)
934
- fig_stage_cov.tight_layout()
935
-
936
- ###############################
937
- # Stage ratios bar (avg) C/F, F/R, R/C
938
- ###############################
939
- fig_stage_ratios, ax3 = plt.subplots(figsize=(3.0, 3.0))
940
- df_ratios = pd.DataFrame({
941
- "Ratio": ["C/F", "F/R", "R/C"],
942
- "Value": [agg_avg["ratio_cf"], agg_avg["ratio_fr"], agg_avg["ratio_rc"]],
943
- })
944
-
945
- ratio_palette = {
946
- "C/F": "#9e9e9e", # concrete gray
947
- "F/R": "#d97706", # wooden brown
948
- "R/C": "#b7410e",
949
- }
950
-
951
- sns.barplot(
952
- data=df_ratios,
953
- x="Ratio",
954
- y="Value",
955
- ax=ax3,
956
- palette=[ratio_palette[r] for r in df_ratios["Ratio"]],
957
- )
958
- ax3.set_ylabel("Ratio", fontsize=8)
959
- ax3.set_xlabel("", fontsize=8)
960
- ax3.tick_params(axis='both', labelsize=8)
961
-
962
- legend_patches = [
963
- mpatches.Patch(color='none', label="C = Concrete"),
964
- mpatches.Patch(color='none', label="F = Formwork"),
965
- mpatches.Patch(color='none', label="R = Rebar"),
966
- ]
967
- ax3.legend(
968
- handles=legend_patches,
969
- loc="upper right",
970
- frameon=False,
971
- fontsize=7,
972
- )
973
-
974
- _style_axes(ax3)
975
- fig_stage_ratios.tight_layout()
976
-
977
- ###############################
978
- # Aggregated objects heatmap with class colors (multi)
979
- ###############################
980
- object_total = int(sum(class_counts_sum[1:]))
981
- groups = ["Beams", "Columns", "Walls"]
982
- stages = ["Concrete", "Formwork", "Rebar"]
983
- heat_counts = np.zeros((3, 3), dtype=float)
984
-
985
- if object_total > 0 and class_map_first is not None:
986
- for idx in range(1, NUM_CLASSES):
987
- c_val = class_counts_sum[idx]
988
- if c_val <= 0:
989
- continue
990
-
991
- if idx in BEAM_IDX:
992
- r = 0
993
- elif idx in COLUMNS_IDX:
994
- r = 1
995
- elif idx in WALL_IDX:
996
- r = 2
997
- else:
998
- continue
999
-
1000
- if idx in CONCRETE_IDX:
1001
- c_idx = 0
1002
- elif idx in FORMWORK_IDX:
1003
- c_idx = 1
1004
- elif idx in REBAR_IDX:
1005
- c_idx = 2
1006
- else:
1007
- continue
1008
-
1009
- heat_counts[r, c_idx] += c_val
1010
-
1011
- heat_pct = (heat_counts / object_total) * 100.0
1012
- else:
1013
- heat_pct = np.zeros((3, 3), dtype=float)
1014
-
1015
- idx_grid = np.array([[1, 2, 3],
1016
- [4, 5, 6],
1017
- [7, 8, 9]])
1018
- rgb_img = np.zeros((3, 3, 3), dtype=float)
1019
-
1020
- # Heatmap colors: white (low) → class color (high)
1021
- for r in range(3):
1022
- for c in range(3):
1023
- idx = idx_grid[r, c]
1024
- base_rgb = colors[idx][:3] / 255.0
1025
- alpha = np.clip(heat_pct[r, c] / 100.0, 0.0, 1.0)
1026
- rgb_img[r, c, :] = (1 - alpha) * np.array([1.0, 1.0, 1.0]) + (alpha * base_rgb)
1027
-
1028
- fig_objects_agg, ax4 = plt.subplots(figsize=(3.0, 3.0))
1029
-
1030
- # Same extent trick
1031
- ax4.imshow(rgb_img, aspect="equal", extent=(-0.5, 2.5, 2.5, -0.5))
1032
- ax4.set_xlim(-0.5, 2.5)
1033
- ax4.set_ylim(2.5, -0.5)
1034
-
1035
- # --- Light gray borders between cells (including outer border) ---
1036
- for x in np.arange(-0.5, 3.0, 1.0):
1037
- ax4.axvline(x, color="#d1d5db", linewidth=0.8, zorder=3, clip_on=False)
1038
- for y in np.arange(-0.5, 3.0, 1.0):
1039
- ax4.axhline(y, color="#d1d5db", linewidth=0.8, zorder=3, clip_on=False)
1040
-
1041
- ax4.set_xticks(np.arange(3))
1042
- ax4.set_yticks(np.arange(3))
1043
- ax4.set_xticklabels(stages, fontsize=8)
1044
- ax4.set_yticklabels(groups, fontsize=8)
1045
- ax4.tick_params(which="both", length=0)
1046
-
1047
- for r in range(3):
1048
- for c in range(3):
1049
- val = heat_pct[r, c]
1050
- ax4.text(
1051
- c, r,
1052
- f"{val:.1f}%",
1053
- ha="center",
1054
- va="center",
1055
- fontsize=7,
1056
- color="black",
1057
- zorder=4,
1058
- )
1059
-
1060
- ax4.set_xlabel("Stage", fontsize=8)
1061
- ax4.set_ylabel("Structural group", fontsize=8)
1062
-
1063
- _style_axes(ax4)
1064
- fig_objects_agg.tight_layout()
1065
-
1066
- stage_cov_plot_update = gr.update(value=fig_stage_cov, visible=True)
1067
- stage_ratio_plot_update = gr.update(value=fig_stage_ratios, visible=True)
1068
- objects_plot_update = gr.update(value=fig_objects_agg, visible=True)
1069
-
1070
- return (
1071
- overlays,
1072
- summary_html,
1073
- stage_cov_plot_update,
1074
- stage_ratio_plot_update,
1075
- objects_plot_update,
1076
- )
1077
-
1078
-
1079
- ############################################################
1080
- # 12. Gradio UI with two tabs
1081
- ############################################################
1082
-
1083
- with gr.Blocks(
1084
- css="""
1085
- button.primary {
1086
- background: linear-gradient(
1087
- 90deg,
1088
- #9333ea 0%,
1089
- #dc2626 100%
1090
- ) !important;
1091
- border: none !important;
1092
- color: white !important;
1093
- font-weight: 600;
1094
- transition: all 0.2s ease;
1095
- }
1096
-
1097
- button.primary:hover {
1098
- filter: brightness(1.05);
1099
- }
1100
-
1101
- button.primary:active {
1102
- filter: brightness(0.95);
1103
- }
1104
- """
1105
- ) as demo:
1106
-
1107
- banner = gr.Image(value=r"strive_banner.png", show_label=False, type="filepath")
1108
-
1109
- # ---------------- Tab 1: Single image -----------------
1110
- with gr.Tab("Single image"):
1111
- with gr.Row():
1112
- with gr.Column(scale=1):
1113
- img_in_single = gr.Image(
1114
- type="filepath",
1115
- label="Upload construction photo"
1116
- )
1117
- run_btn_single = gr.Button("Analyze", variant="primary")
1118
- summary_box_single = gr.HTML(label="Predicted progress")
1119
-
1120
- with gr.Column(scale=2):
1121
- img_out_single = gr.Image(label="Overlayed segmentation + legend")
1122
-
1123
- # 3 plots in one row
1124
- with gr.Row():
1125
- stage_cov_plot_single = gr.Plot(label="Stage coverage")
1126
- stage_ratio_plot_single = gr.Plot(label="Stage ratios")
1127
- objects_plot_single = gr.Plot(label="Objects heatmap")
1128
-
1129
- run_btn_single.click(
1130
- fn=analyze_image,
1131
- inputs=[img_in_single],
1132
- outputs=[
1133
- img_out_single,
1134
- summary_box_single,
1135
- stage_cov_plot_single,
1136
- stage_ratio_plot_single,
1137
- objects_plot_single,
1138
- ],
1139
- )
1140
-
1141
- # ---------------- Tab 2: Multiple images -----------------
1142
- with gr.Tab("Multiple images"):
1143
- with gr.Row():
1144
- with gr.Column(scale=1):
1145
- img_in_multi = gr.Files(
1146
- label="Upload multiple construction photos",
1147
- file_types=["image"],
1148
- )
1149
- run_btn_multi = gr.Button("Analyze all", variant="primary")
1150
- summary_box_multi = gr.HTML(label="Predicted progress (averaged)")
1151
-
1152
- with gr.Column(scale=2):
1153
- overlays_gallery = gr.Gallery(
1154
- label="Overlays",
1155
- show_label=True,
1156
- columns=3,
1157
- height="auto",
1158
- )
1159
-
1160
- with gr.Row():
1161
- stage_cov_plot_multi = gr.Plot(label="Stage coverage (avg)")
1162
- stage_ratio_plot_multi = gr.Plot(label="Stage ratios (avg)")
1163
- objects_plot_multi = gr.Plot(label="Objects heatmap (avg)")
1164
-
1165
- run_btn_multi.click(
1166
- fn=analyze_images,
1167
- inputs=[img_in_multi],
1168
- outputs=[
1169
- overlays_gallery,
1170
- summary_box_multi,
1171
- stage_cov_plot_multi,
1172
- stage_ratio_plot_multi,
1173
- objects_plot_multi,
1174
- ],
1175
- )
1176
-
1177
-
1178
- if __name__ == "__main__":
1179
- demo.launch(
1180
- inbrowser=True
1181
- )