Pream912 commited on
Commit
1f52811
Β·
verified Β·
1 Parent(s): bc97b83

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +972 -1087
app.py CHANGED
@@ -1,1113 +1,998 @@
1
  """
2
- Blueprint Room Extractor β€” Flask Server
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  """
 
4
  from __future__ import annotations
5
 
6
- import base64
7
- import io
8
- import json
9
- import os
10
- import threading
11
- import time
12
- import traceback
13
- import uuid
14
- from typing import Any, Dict, List, Optional
15
 
16
  import cv2
17
  import numpy as np
18
- from flask import Flask, Response, jsonify, render_template_string, request
19
- from PIL import Image
20
-
21
- from wall_pipeline import WallPipeline, _CUPY, _TORCH_CUDA, _CV_CUDA
22
-
23
- # Unified "any GPU active" flag for the UI badge
24
- _GPU = _CUPY or _TORCH_CUDA or _CV_CUDA
25
-
26
- app = Flask(__name__)
27
-
28
- # ── In-memory session store ──────────────────────────────────────────────────
29
- _sessions: Dict[str, Dict] = {}
30
- _sessions_lock = threading.Lock()
31
-
32
-
33
- def _new_session() -> str:
34
- sid = str(uuid.uuid4())
35
- with _sessions_lock:
36
- _sessions[sid] = {
37
- "original_bgr" : None, # np.ndarray
38
- "wall_mask" : None,
39
- "room_mask" : None,
40
- "rooms" : [], # list of room dicts
41
- "stage_images" : {}, # key -> np.ndarray
42
- "calibration" : {},
43
- "door_lines" : [], # manual door seal lines [(x1,y1,x2,y2),...]
44
- "log" : [],
45
- "progress" : 0,
46
- "status" : "idle",
47
- }
48
- return sid
49
-
50
-
51
- def _get_session(sid: str) -> Optional[Dict]:
52
- with _sessions_lock:
53
- return _sessions.get(sid)
54
-
55
-
56
- # ── Image helpers ─────────────────────────────────────────────────────────────
57
- def _bgr_to_b64(img: np.ndarray, max_dim: int = 2048) -> str:
58
- h, w = img.shape[:2]
59
- if max(h, w) > max_dim:
60
- scale = max_dim / max(h, w)
61
- img = cv2.resize(img, (int(w*scale), int(h*scale)), interpolation=cv2.INTER_AREA)
62
- _, buf = cv2.imencode(".jpg", img, [cv2.IMWRITE_JPEG_QUALITY, 88])
63
- return base64.b64encode(buf.tobytes()).decode()
64
-
65
-
66
- def _mask_to_b64(mask: np.ndarray, max_dim: int = 2048) -> str:
67
- vis = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
68
- return _bgr_to_b64(vis, max_dim)
69
-
70
-
71
- def _composite_overlay(orig: np.ndarray, rooms: List[Dict],
72
- wall_mask: np.ndarray) -> np.ndarray:
73
- """Render original image + wall overlay + room polygons."""
74
- vis = orig.copy()
75
- # Overlay walls (semi-transparent blue)
76
- if wall_mask is not None:
77
- wm = wall_mask
78
- if wm.shape[:2] != vis.shape[:2]:
79
- wm = cv2.resize(wm, (vis.shape[1], vis.shape[0]),
80
- interpolation=cv2.INTER_NEAREST)
81
- wall_vis = np.zeros_like(vis)
82
- wall_vis[wm > 0] = (0, 80, 220)
83
- vis = cv2.addWeighted(vis, 0.85, wall_vis, 0.4, 0)
84
- # Draw room fills
85
- rng = np.random.default_rng(7)
86
- for room in rooms:
87
- color = rng.integers(80, 200, 3).tolist()
88
- segs = room.get("segmentation", [])
89
- for seg in segs:
90
- pts = np.array(seg, dtype=np.int32).reshape(-1, 2)
91
- if len(pts) >= 3:
92
- overlay = vis.copy()
93
- cv2.fillPoly(overlay, [pts], color)
94
- vis = cv2.addWeighted(vis, 0.55, overlay, 0.45, 0)
95
- cv2.polylines(vis, [pts], True, color, 2)
96
- # Label
97
- cx, cy = room.get("centroid", [0, 0])
98
- label = room.get("label", f"#{room['id']}")
99
- cv2.putText(vis, label, (cx-20, cy),
100
- cv2.FONT_HERSHEY_SIMPLEX, 0.55, (255,255,255), 2, cv2.LINE_AA)
101
- cv2.putText(vis, str(room["id"]), (cx-6, cy+18),
102
- cv2.FONT_HERSHEY_SIMPLEX, 0.45, (220,220,0), 1, cv2.LINE_AA)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  return vis
104
 
105
 
106
- # ── Routes ────────────────────────────────────────────────────────────────────
107
- @app.route("/")
108
- def index():
109
- return render_template_string(HTML_PAGE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
 
 
 
111
 
112
- @app.route("/api/session", methods=["POST"])
113
- def create_session():
114
- sid = _new_session()
115
- return jsonify({"session_id": sid})
116
 
 
 
117
 
118
- @app.route("/api/upload", methods=["POST"])
119
- def upload():
120
- sid = request.form.get("session_id", "")
121
- sess = _get_session(sid)
122
- if sess is None:
123
- return jsonify({"error": "Invalid session"}), 400
124
 
125
- file = request.files.get("image")
126
- if file is None:
127
- return jsonify({"error": "No image"}), 400
 
 
 
128
 
129
- buf = np.frombuffer(file.read(), np.uint8)
130
- img = cv2.imdecode(buf, cv2.IMREAD_COLOR)
131
- if img is None:
132
- return jsonify({"error": "Could not decode image"}), 400
133
-
134
- # Downscale very large images to keep processing fast
135
- max_px = 2400
136
- h, w = img.shape[:2]
137
- if max(h, w) > max_px:
138
- scale = max_px / max(h, w)
139
- img = cv2.resize(img, (int(w*scale), int(h*scale)), interpolation=cv2.INTER_AREA)
140
-
141
- with _sessions_lock:
142
- sess["original_bgr"] = img
143
- sess["wall_mask"] = None
144
- sess["room_mask"] = None
145
- sess["rooms"] = []
146
- sess["stage_images"] = {}
147
- sess["door_lines"] = []
148
- sess["log"] = []
149
- sess["progress"] = 0
150
- sess["status"] = "uploaded"
151
-
152
- preview = _bgr_to_b64(img)
153
- return jsonify({"preview": preview,
154
- "width" : img.shape[1],
155
- "height": img.shape[0]})
156
-
157
-
158
- @app.route("/api/run", methods=["POST"])
159
- def run_pipeline():
160
- data = request.get_json(force=True)
161
- sid = data.get("session_id", "")
162
- sess = _get_session(sid)
163
- if sess is None or sess["original_bgr"] is None:
164
- return jsonify({"error": "No image uploaded"}), 400
165
-
166
- def _worker():
167
- logs = []
168
- def progress(msg, pct):
169
- logs.append(msg)
170
- with _sessions_lock:
171
- sess["log"] = logs[:]
172
- sess["progress"] = pct
173
-
174
- try:
175
- with _sessions_lock:
176
- sess["status"] = "running"
177
-
178
- pipe = WallPipeline(progress_cb=progress)
179
- walls, rooms_mask, cal = pipe.run(
180
- sess["original_bgr"],
181
- extra_door_lines=sess.get("door_lines", [])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182
  )
183
 
184
- # Auto-detect rooms from filtered mask
185
- contours, _ = cv2.findContours(
186
- rooms_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
187
- rooms = []
188
- for idx, cnt in enumerate(contours, 1):
189
- area = cv2.contourArea(cnt)
190
- bx, by, bw, bh = cv2.boundingRect(cnt)
191
- M = cv2.moments(cnt)
192
- cx = int(M["m10"]/M["m00"]) if M["m00"] else bx+bw//2
193
- cy = int(M["m01"]/M["m00"]) if M["m00"] else by+bh//2
194
- seg = cnt[:,0,:].tolist()
195
- seg = [v for pt in seg for v in pt]
196
- rooms.append({
197
- "id" : idx,
198
- "label" : f"Room {idx}",
199
- "segmentation": [seg],
200
- "area" : float(area),
201
- "bbox" : [bx, by, bw, bh],
202
- "centroid" : [cx, cy],
203
- "confidence" : 0.95,
204
- "isAi" : True,
205
- })
206
-
207
- with _sessions_lock:
208
- sess["wall_mask"] = walls
209
- sess["room_mask"] = rooms_mask
210
- sess["rooms"] = rooms
211
- sess["stage_images"] = pipe.stage_images
212
- sess["calibration"] = cal.as_dict() if cal else {}
213
- sess["status"] = "done"
214
- sess["progress"] = 100
215
-
216
- except Exception as exc:
217
- tb = traceback.format_exc()
218
- with _sessions_lock:
219
- sess["status"] = "error"
220
- sess["log"] = logs + [f"ERROR: {exc}", tb]
221
-
222
- t = threading.Thread(target=_worker, daemon=True)
223
- t.start()
224
- return jsonify({"started": True})
225
-
226
-
227
- @app.route("/api/progress", methods=["GET"])
228
- def progress():
229
- sid = request.args.get("session_id", "")
230
- sess = _get_session(sid)
231
- if sess is None:
232
- return jsonify({"error": "Invalid session"}), 400
233
- with _sessions_lock:
234
- return jsonify({
235
- "status" : sess["status"],
236
- "progress": sess["progress"],
237
- "log" : sess["log"][-6:] if sess["log"] else [],
238
- })
239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
 
241
- @app.route("/api/result", methods=["GET"])
242
- def result():
243
- sid = request.args.get("session_id", "")
244
- sess = _get_session(sid)
245
- if sess is None:
246
- return jsonify({"error": "Invalid session"}), 400
247
-
248
- orig = sess.get("original_bgr")
249
- walls = sess.get("wall_mask")
250
- rooms = sess.get("rooms", [])
251
-
252
- if orig is None:
253
- return jsonify({"error": "No image"}), 400
254
-
255
- composite = _composite_overlay(orig, rooms, walls)
256
- return jsonify({
257
- "composite" : _bgr_to_b64(composite),
258
- "wall_mask" : _mask_to_b64(walls) if walls is not None else None,
259
- "rooms" : rooms,
260
- "calibration": sess.get("calibration", {}),
261
- "gpu" : _GPU,
262
- "gpu_detail" : {
263
- "cupy" : _CUPY,
264
- "torch_cuda" : _TORCH_CUDA,
265
- "opencv_cuda" : _CV_CUDA,
266
- },
267
- })
268
-
269
-
270
- @app.route("/api/stages", methods=["GET"])
271
- def stages():
272
- sid = request.args.get("session_id", "")
273
- sess = _get_session(sid)
274
- if sess is None:
275
- return jsonify({"error": "Invalid session"}), 400
276
-
277
- stage_imgs = sess.get("stage_images", {})
278
- result = {}
279
- for key, img in stage_imgs.items():
280
- if img is not None and isinstance(img, np.ndarray):
281
- if len(img.shape) == 2:
282
- disp = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
283
- else:
284
- disp = img
285
- result[key] = _bgr_to_b64(disp, max_dim=800)
286
- return jsonify(result)
287
-
288
-
289
- @app.route("/api/wand", methods=["POST"])
290
- def wand():
291
- """Click-to-segment: flood-fill from clicked pixel."""
292
- data = request.get_json(force=True)
293
- sid = data.get("session_id", "")
294
- sess = _get_session(sid)
295
- if sess is None or sess["wall_mask"] is None:
296
- return jsonify({"error": "Run pipeline first"}), 400
297
-
298
- click_x = int(data.get("x", 0))
299
- click_y = int(data.get("y", 0))
300
-
301
- pipe = WallPipeline()
302
- new_room = pipe.wand_segment(
303
- sess["wall_mask"], click_x, click_y, sess["rooms"]
304
  )
305
- if new_room is None:
306
- return jsonify({"error": "No room found at that location"}), 400
307
-
308
- with _sessions_lock:
309
- sess["rooms"].append(new_room)
310
-
311
- orig = sess["original_bgr"]
312
- composite = _composite_overlay(orig, sess["rooms"], sess["wall_mask"])
313
- return jsonify({
314
- "room" : new_room,
315
- "composite": _bgr_to_b64(composite),
316
- "rooms" : sess["rooms"],
317
- })
318
-
319
-
320
- @app.route("/api/remove_room", methods=["POST"])
321
- def remove_room():
322
- data = request.get_json(force=True)
323
- sid = data.get("session_id", "")
324
- room_id = int(data.get("room_id", -1))
325
- sess = _get_session(sid)
326
- if sess is None:
327
- return jsonify({"error": "Invalid session"}), 400
328
-
329
- with _sessions_lock:
330
- before = len(sess["rooms"])
331
- sess["rooms"] = [r for r in sess["rooms"] if r["id"] != room_id]
332
- removed = before - len(sess["rooms"])
333
-
334
- if removed == 0:
335
- return jsonify({"error": f"Room {room_id} not found"}), 404
336
-
337
- orig = sess["original_bgr"]
338
- composite = _composite_overlay(orig, sess["rooms"], sess["wall_mask"])
339
- return jsonify({
340
- "composite": _bgr_to_b64(composite),
341
- "rooms" : sess["rooms"],
342
- "removed" : room_id,
343
- })
344
-
345
-
346
- @app.route("/api/add_door_line", methods=["POST"])
347
- def add_door_line():
348
- data = request.get_json(force=True)
349
- sid = data.get("session_id", "")
350
- sess = _get_session(sid)
351
- if sess is None:
352
- return jsonify({"error": "Invalid session"}), 400
353
-
354
- x1 = int(data.get("x1", 0))
355
- y1 = int(data.get("y1", 0))
356
- x2 = int(data.get("x2", 0))
357
- y2 = int(data.get("y2", 0))
358
-
359
- with _sessions_lock:
360
- sess["door_lines"].append((x1, y1, x2, y2))
361
-
362
- # If wall mask exists, paint immediately
363
- orig = sess["original_bgr"]
364
- walls = sess["wall_mask"]
365
- if walls is not None:
366
- stroke = sess.get("calibration", {}).get("stroke_width", 3)
367
- lw = max(3, stroke)
368
- cv2.line(walls, (x1, y1), (x2, y2), 255, lw)
369
- # Re-segment rooms
370
- pipe = WallPipeline()
371
- rooms_mask = pipe._segment_rooms(walls)
372
- valid_mask, contours = pipe._filter_rooms(rooms_mask, orig.shape)
373
- rooms = []
374
- for idx, cnt in enumerate(contours, 1):
375
- area = cv2.contourArea(cnt)
376
- bx_, by_, bw_, bh_ = cv2.boundingRect(cnt)
377
- M = cv2.moments(cnt)
378
- cx = int(M["m10"]/M["m00"]) if M["m00"] else bx_+bw_//2
379
- cy = int(M["m01"]/M["m00"]) if M["m00"] else by_+bh_//2
380
- seg = cnt[:,0,:].tolist()
381
- seg = [v for pt in seg for v in pt]
382
- rooms.append({
383
- "id": idx, "label": f"Room {idx}",
384
- "segmentation": [seg],
385
- "area": float(area),
386
- "bbox": [bx_, by_, bw_, bh_],
387
- "centroid": [cx, cy],
388
- "confidence": 0.95,
389
- })
390
- with _sessions_lock:
391
- sess["wall_mask"] = walls
392
- sess["room_mask"] = valid_mask
393
- sess["rooms"] = rooms
394
-
395
- composite = _composite_overlay(orig, sess["rooms"], sess["wall_mask"])
396
- return jsonify({
397
- "composite" : _bgr_to_b64(composite),
398
- "rooms" : sess["rooms"],
399
- "door_lines" : sess["door_lines"],
400
- })
401
-
402
-
403
- @app.route("/api/clear_door_lines", methods=["POST"])
404
- def clear_door_lines():
405
- data = request.get_json(force=True)
406
- sid = data.get("session_id", "")
407
- sess = _get_session(sid)
408
- if sess is None:
409
- return jsonify({"error": "Invalid session"}), 400
410
- with _sessions_lock:
411
- sess["door_lines"] = []
412
- return jsonify({"cleared": True})
413
-
414
-
415
- @app.route("/api/export", methods=["GET"])
416
- def export_json():
417
- sid = request.args.get("session_id", "")
418
- sess = _get_session(sid)
419
- if sess is None:
420
- return jsonify({"error": "Invalid session"}), 400
421
- rooms = sess.get("rooms", [])
422
- safe = []
423
- for r in rooms:
424
- safe.append({k: v for k, v in r.items()
425
- if k in ("id","label","area","bbox","centroid","confidence")})
426
- return Response(
427
- json.dumps({"rooms": safe, "count": len(safe)}, indent=2),
428
- mimetype="application/json",
429
- headers={"Content-Disposition": "attachment; filename=rooms.json"}
430
  )
431
 
 
 
 
 
 
432
 
433
- # ─────────────────────────────────────────────────────────────────────────────
434
- # HTML / CSS / JS (single-page app)
435
- # ─────────────────────────────────────────────────────────────────────────────
436
- HTML_PAGE = r"""
437
- <!DOCTYPE html>
438
- <html lang="en">
439
- <head>
440
- <meta charset="UTF-8"/>
441
- <meta name="viewport" content="width=device-width,initial-scale=1"/>
442
- <title>Blueprint Room Extractor</title>
443
- <style>
444
- /* ── Reset & base ── */
445
- *{box-sizing:border-box;margin:0;padding:0}
446
- :root{
447
- --bg:#0b0c10;--panel:#13151c;--panel2:#1a1d27;--border:#252836;
448
- --accent:#00d4aa;--accent2:#6c63ff;--warn:#f59e0b;--danger:#ef4444;
449
- --text:#e8eaf0;--muted:#8b90a0;--font:'JetBrains Mono',monospace;
450
- }
451
- @import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@300;400;500;700&family=Syne:wght@400;600;800&display=swap');
452
- html,body{height:100%;background:var(--bg);color:var(--text);font-family:var(--font);font-size:13px;overflow:hidden}
453
-
454
- /* ── Layout ── */
455
- #app{display:grid;grid-template-columns:320px 1fr 280px;grid-template-rows:56px 1fr 38px;height:100vh;gap:0}
456
- header{grid-column:1/-1;background:var(--panel);border-bottom:1px solid var(--border);
457
- display:flex;align-items:center;gap:16px;padding:0 20px}
458
- header h1{font-family:'Syne',sans-serif;font-weight:800;font-size:18px;
459
- background:linear-gradient(135deg,var(--accent),var(--accent2));
460
- -webkit-background-clip:text;-webkit-text-fill-color:transparent;letter-spacing:-.5px}
461
- .gpu-badge{padding:3px 10px;border-radius:20px;font-size:10px;font-weight:700;letter-spacing:.8px;
462
- background:rgba(108,99,255,.18);color:var(--accent2);border:1px solid var(--accent2)}
463
- .gpu-badge.on{background:rgba(0,212,170,.18);color:var(--accent);border-color:var(--accent)}
464
-
465
- /* ── Left panel ── */
466
- #left{background:var(--panel);border-right:1px solid var(--border);overflow-y:auto;padding:16px;display:flex;flex-direction:column;gap:14px}
467
- .section-title{font-family:'Syne',sans-serif;font-size:11px;font-weight:600;color:var(--muted);
468
- text-transform:uppercase;letter-spacing:1.2px;padding-bottom:6px;border-bottom:1px solid var(--border)}
469
-
470
- /* ── Upload zone ── */
471
- #drop-zone{border:2px dashed var(--border);border-radius:10px;padding:24px 16px;text-align:center;
472
- cursor:pointer;transition:all .2s;color:var(--muted);position:relative}
473
- #drop-zone:hover,#drop-zone.over{border-color:var(--accent);color:var(--accent);background:rgba(0,212,170,.04)}
474
- #drop-zone input{position:absolute;inset:0;opacity:0;cursor:pointer}
475
- #drop-zone .icon{font-size:28px;margin-bottom:6px}
476
- #drop-zone p{font-size:11px}
477
-
478
- /* ── Buttons ── */
479
- .btn{display:flex;align-items:center;gap:6px;padding:9px 14px;border-radius:8px;border:none;cursor:pointer;
480
- font-family:var(--font);font-size:12px;font-weight:500;transition:all .15s;width:100%;justify-content:center}
481
- .btn-primary{background:linear-gradient(135deg,var(--accent),#00b890);color:#001a14}
482
- .btn-primary:hover{opacity:.9;transform:translateY(-1px)}
483
- .btn-secondary{background:var(--panel2);color:var(--text);border:1px solid var(--border)}
484
- .btn-secondary:hover{border-color:var(--accent);color:var(--accent)}
485
- .btn-danger{background:rgba(239,68,68,.15);color:var(--danger);border:1px solid rgba(239,68,68,.3)}
486
- .btn-danger:hover{background:rgba(239,68,68,.25)}
487
- .btn-warn{background:rgba(245,158,11,.13);color:var(--warn);border:1px solid rgba(245,158,11,.3)}
488
- .btn-warn:hover{background:rgba(245,158,11,.22)}
489
- .btn:disabled{opacity:.4;cursor:not-allowed;transform:none!important}
490
-
491
- /* ── Tool panel ── */
492
- .tool-group{display:flex;flex-direction:column;gap:8px}
493
- .tool-row{display:flex;gap:7px}
494
- .tool-btn{flex:1;padding:8px 6px;border-radius:7px;border:1px solid var(--border);background:var(--panel2);
495
- color:var(--muted);cursor:pointer;font-size:18px;transition:all .15s;font-family:var(--font)}
496
- .tool-btn:hover{border-color:var(--accent);color:var(--accent)}
497
- .tool-btn.active{border-color:var(--accent);background:rgba(0,212,170,.12);color:var(--accent)}
498
- .tool-label{font-size:10px;color:var(--muted);text-align:center;margin-top:2px}
499
-
500
- /* ── Inputs ── */
501
- .field{display:flex;flex-direction:column;gap:5px}
502
- .field label{font-size:10px;color:var(--muted);text-transform:uppercase;letter-spacing:.8px}
503
- .field input{background:var(--panel2);border:1px solid var(--border);border-radius:6px;
504
- color:var(--text);font-family:var(--font);font-size:12px;padding:7px 10px;width:100%}
505
- .field input:focus{outline:none;border-color:var(--accent)}
506
- .coord-row{display:grid;grid-template-columns:1fr 1fr 1fr 1fr;gap:5px}
507
- .coord-row input{text-align:center}
508
-
509
- /* ── Progress ── */
510
- #progress-bar-wrap{height:4px;background:var(--panel2);border-radius:2px;overflow:hidden;margin-top:4px}
511
- #progress-bar{height:100%;width:0%;background:linear-gradient(90deg,var(--accent),var(--accent2));transition:width .4s}
512
- #status-text{font-size:11px;color:var(--muted)}
513
-
514
- /* ── Center canvas area ── */
515
- #center{background:var(--bg);overflow:hidden;position:relative;display:flex;flex-direction:column}
516
- #viewer-toolbar{background:var(--panel);border-bottom:1px solid var(--border);
517
- padding:8px 14px;display:flex;align-items:center;gap:10px;flex-shrink:0}
518
- .zoom-ctrl{display:flex;align-items:center;gap:6px}
519
- .zoom-ctrl label{font-size:11px;color:var(--muted);min-width:38px}
520
- .zoom-ctrl input[type=range]{width:90px;accent-color:var(--accent)}
521
- .zoom-val{font-size:11px;color:var(--accent);min-width:38px}
522
- #canvas-wrap{flex:1;overflow:auto;display:flex;align-items:center;justify-content:center;position:relative}
523
- #img-layer{position:relative;transform-origin:top left;display:inline-block;line-height:0}
524
- #img-layer img{display:block;max-width:none;cursor:crosshair;user-select:none}
525
- #img-layer canvas{position:absolute;top:0;left:0;pointer-events:none}
526
- .crosshair-cursor{cursor:crosshair!important}
527
- .default-cursor{cursor:default!important}
528
-
529
- /* ── Tab view (stages) ── */
530
- #tabs{display:flex;gap:1px;background:var(--bg);padding:0 14px}
531
- .tab{padding:8px 14px;font-size:11px;cursor:pointer;color:var(--muted);border-bottom:2px solid transparent;transition:all .15s}
532
- .tab:hover{color:var(--text)}
533
- .tab.active{color:var(--accent);border-bottom-color:var(--accent)}
534
-
535
- /* ── Right panel ── */
536
- #right{background:var(--panel);border-left:1px solid var(--border);overflow-y:auto;padding:14px;display:flex;flex-direction:column;gap:12px}
537
- #rooms-list{display:flex;flex-direction:column;gap:6px}
538
- .room-card{background:var(--panel2);border:1px solid var(--border);border-radius:8px;padding:10px 12px;
539
- display:flex;align-items:center;gap:8px;transition:border-color .15s}
540
- .room-card:hover{border-color:var(--accent)}
541
- .room-dot{width:10px;height:10px;border-radius:50%;flex-shrink:0}
542
- .room-info{flex:1;min-width:0}
543
- .room-name{font-size:12px;font-weight:500;white-space:nowrap;overflow:hidden;text-overflow:ellipsis}
544
- .room-meta{font-size:10px;color:var(--muted)}
545
- .room-del{background:none;border:none;color:var(--muted);cursor:pointer;font-size:14px;padding:2px 5px;
546
- border-radius:4px;transition:all .15s}
547
- .room-del:hover{color:var(--danger);background:rgba(239,68,68,.1)}
548
-
549
- /* ── Calibration display ── */
550
- .cal-grid{display:grid;grid-template-columns:1fr 1fr;gap:5px}
551
- .cal-item{background:var(--panel2);border-radius:6px;padding:7px 10px}
552
- .cal-key{font-size:9px;color:var(--muted);text-transform:uppercase;letter-spacing:.7px}
553
- .cal-val{font-size:13px;font-weight:500;color:var(--accent);margin-top:2px}
554
-
555
- /* ── Log ── */
556
- #log-box{background:var(--panel2);border-radius:8px;padding:10px;font-size:10px;color:var(--muted);
557
- max-height:130px;overflow-y:auto;line-height:1.6}
558
-
559
- /* ── Status bar ── */
560
- footer{grid-column:1/-1;background:var(--panel);border-top:1px solid var(--border);
561
- padding:0 16px;display:flex;align-items:center;gap:16px;font-size:10px;color:var(--muted)}
562
- footer span{display:flex;align-items:center;gap:5px}
563
- .dot{width:7px;height:7px;border-radius:50%;background:var(--muted)}
564
- .dot.green{background:var(--accent)}
565
- .dot.orange{background:var(--warn);animation:pulse .8s infinite}
566
- .dot.red{background:var(--danger)}
567
- @keyframes pulse{0%,100%{opacity:1}50%{opacity:.3}}
568
-
569
- /* ── Stage grid ── */
570
- #stages-grid{display:grid;grid-template-columns:1fr 1fr;gap:8px;padding:10px}
571
- .stage-card{background:var(--panel2);border:1px solid var(--border);border-radius:8px;overflow:hidden}
572
- .stage-card img{width:100%;display:block}
573
- .stage-card-label{padding:4px 8px;font-size:10px;color:var(--muted)}
574
-
575
- /* ── Divider ── */
576
- .divider{height:1px;background:var(--border);margin:2px 0}
577
-
578
- /* ── Toast ── */
579
- #toast{position:fixed;bottom:50px;left:50%;transform:translateX(-50%) translateY(20px);
580
- background:var(--panel);border:1px solid var(--border);border-radius:10px;
581
- padding:10px 20px;font-size:12px;opacity:0;transition:all .3s;pointer-events:none;z-index:999}
582
- #toast.show{opacity:1;transform:translateX(-50%) translateY(0)}
583
- #toast.err{border-color:var(--danger);color:var(--danger)}
584
- #toast.ok{border-color:var(--accent);color:var(--accent)}
585
- </style>
586
- </head>
587
- <body>
588
- <div id="app">
589
-
590
- <!-- HEADER -->
591
- <header>
592
- <div>πŸ—οΈ</div>
593
- <h1>Blueprint Room Extractor</h1>
594
- <div class="gpu-badge" id="gpu-badge">CPU</div>
595
- <div style="flex:1"></div>
596
- <button class="btn btn-secondary" style="width:auto" onclick="exportJSON()">⬇ Export JSON</button>
597
- </header>
598
-
599
- <!-- LEFT PANEL -->
600
- <div id="left">
601
-
602
- <div class="section-title">Image</div>
603
- <div id="drop-zone">
604
- <input type="file" id="file-input" accept="image/*" onchange="handleFile(this.files[0])"/>
605
- <div class="icon">πŸ–ΌοΈ</div>
606
- <p>Drop blueprint or click to upload</p>
607
- </div>
608
-
609
- <button class="btn btn-primary" id="run-btn" disabled onclick="runPipeline()">
610
- ⚑ Run Wall Extraction
611
- </button>
612
-
613
- <div id="progress-bar-wrap"><div id="progress-bar"></div></div>
614
- <div id="status-text">Idle</div>
615
-
616
- <div class="divider"></div>
617
- <div class="section-title">Tools</div>
618
-
619
- <div class="tool-group">
620
- <div class="tool-row">
621
- <div style="flex:1;text-align:center">
622
- <button class="tool-btn" id="tool-pan" onclick="setTool('pan')" title="Pan / Zoom">πŸ”</button>
623
- <div class="tool-label">Pan/Zoom</div>
624
- </div>
625
- <div style="flex:1;text-align:center">
626
- <button class="tool-btn active" id="tool-wand" onclick="setTool('wand')" title="Magic Wand">πŸͺ„</button>
627
- <div class="tool-label">Wand</div>
628
- </div>
629
- <div style="flex:1;text-align:center">
630
- <button class="tool-btn" id="tool-door" onclick="setTool('door')" title="Door Seal">πŸšͺ</button>
631
- <div class="tool-label">Door Line</div>
632
- </div>
633
- </div>
634
- <div id="tool-hint" style="font-size:10px;color:var(--muted);text-align:center;padding:4px 0">
635
- Wand: click image to detect room
636
- </div>
637
- </div>
638
-
639
- <div class="divider"></div>
640
-
641
- <!-- DOOR LINE manual entry -->
642
- <div class="section-title">πŸšͺ Door Seal Line</div>
643
- <div style="font-size:10px;color:var(--muted);margin-bottom:4px">Enter pixel coords or click on image with Door tool</div>
644
- <div class="coord-row">
645
- <div class="field"><label>X1</label><input id="dl-x1" type="number" value="0" min="0"/></div>
646
- <div class="field"><label>Y1</label><input id="dl-y1" type="number" value="0" min="0"/></div>
647
- <div class="field"><label>X2</label><input id="dl-x2" type="number" value="100" min="0"/></div>
648
- <div class="field"><label>Y2</label><input id="dl-y2" type="number" value="0" min="0"/></div>
649
- </div>
650
- <button class="btn btn-warn" onclick="addDoorLine()">πŸšͺ Add Door Seal Line</button>
651
- <button class="btn btn-secondary" onclick="clearDoorLines()" style="font-size:11px">Clear All Door Lines</button>
652
- <div id="door-lines-list" style="font-size:10px;color:var(--muted)"></div>
653
-
654
- <div class="divider"></div>
655
-
656
- <!-- REMOVE ROOM -->
657
- <div class="section-title">πŸ—‘οΈ Remove Room</div>
658
- <div style="display:flex;gap:6px">
659
- <div class="field" style="flex:1"><label>Room ID</label><input id="remove-id" type="number" min="1" value="1"/></div>
660
- <button class="btn btn-danger" style="width:auto;margin-top:16px;padding:8px 12px" onclick="removeRoom()">Del</button>
661
- </div>
662
-
663
- </div>
664
-
665
- <!-- CENTER: canvas + tab strip -->
666
- <div id="center">
667
- <div id="viewer-toolbar">
668
- <div class="zoom-ctrl">
669
- <label>Zoom</label>
670
- <input type="range" id="zoom-slider" min="20" max="500" value="100" oninput="applyZoom(this.value)"/>
671
- <span class="zoom-val" id="zoom-val">100%</span>
672
- </div>
673
- <div class="zoom-ctrl">
674
- <label>Pan X</label>
675
- <input type="range" id="pan-x" min="-2000" max="2000" value="0" oninput="applyPan()"/>
676
- </div>
677
- <div class="zoom-ctrl">
678
- <label>Pan Y</label>
679
- <input type="range" id="pan-y" min="-2000" max="2000" value="0" oninput="applyPan()"/>
680
- </div>
681
- <button class="btn btn-secondary" style="width:auto;padding:5px 12px;font-size:11px" onclick="resetView()">βŒ‚ Reset</button>
682
- <div id="tabs" style="flex:1;display:flex;justify-content:flex-end">
683
- <div class="tab active" onclick="switchTab('result')">Result</div>
684
- <div class="tab" onclick="switchTab('walls')">Walls</div>
685
- <div class="tab" onclick="switchTab('stages')">Stages</div>
686
- </div>
687
- </div>
688
- <div id="canvas-wrap">
689
- <!-- Result tab -->
690
- <div id="tab-result" style="position:relative;width:100%;height:100%;display:flex;align-items:center;justify-content:center">
691
- <div id="img-layer">
692
- <img id="main-img" src="" alt="" style="display:none" onclick="onCanvasClick(event)"/>
693
- <canvas id="overlay-canvas"></canvas>
694
- </div>
695
- </div>
696
- <!-- Walls tab -->
697
- <div id="tab-walls" style="display:none;width:100%;height:100%;align-items:center;justify-content:center">
698
- <img id="walls-img" src="" style="max-width:100%;max-height:100%;border-radius:6px"/>
699
- </div>
700
- <!-- Stages tab -->
701
- <div id="tab-stages" style="display:none;overflow-y:auto;width:100%;height:100%">
702
- <div id="stages-grid"></div>
703
- </div>
704
- </div>
705
- </div>
706
-
707
- <!-- RIGHT PANEL -->
708
- <div id="right">
709
- <div class="section-title">Rooms (<span id="room-count">0</span>)</div>
710
- <div id="rooms-list"><div style="color:var(--muted);font-size:11px;text-align:center;padding:20px 0">Run pipeline to detect rooms</div></div>
711
-
712
- <div class="divider"></div>
713
- <div class="section-title">Calibration</div>
714
- <div class="cal-grid" id="cal-grid">
715
- <div style="color:var(--muted);font-size:11px;grid-column:1/-1">β€”</div>
716
- </div>
717
-
718
- <div class="divider"></div>
719
- <div class="section-title">Log</div>
720
- <div id="log-box">Ready.</div>
721
- </div>
722
-
723
- <!-- FOOTER -->
724
- <footer>
725
- <span><div class="dot" id="status-dot"></div><span id="footer-status">Idle</span></span>
726
- <span id="footer-coords" style="font-family:monospace">x:β€” y:β€”</span>
727
- <span style="margin-left:auto" id="footer-rooms">0 rooms</span>
728
- </footer>
729
-
730
- </div><!-- #app -->
731
-
732
- <div id="toast"></div>
733
-
734
- <script>
735
- // ── State ──────────────────────────────────────────────────────────────────
736
- let SID = null;
737
- let activeTool = 'wand';
738
- let zoomLevel = 100;
739
- let imgW = 0, imgH = 0;
740
- let doorStart = null;
741
- let pollingTimer = null;
742
- let doorLines = [];
743
-
744
- const ROOM_COLORS = ['#00d4aa','#6c63ff','#f59e0b','#ef4444','#10b981',
745
- '#3b82f6','#ec4899','#8b5cf6','#14b8a6','#f97316'];
746
-
747
- // ── Init ────────────────────────────────────────────────────────────────────
748
- async function init(){
749
- const r = await fetch('/api/session',{method:'POST'});
750
- const d = await r.json();
751
- SID = d.session_id;
752
- }
753
- init();
754
-
755
- // ── File handling ─────────────────────────────────────────────────────────
756
- const dz = document.getElementById('drop-zone');
757
- dz.addEventListener('dragover', e=>{e.preventDefault();dz.classList.add('over')});
758
- dz.addEventListener('dragleave', ()=>dz.classList.remove('over'));
759
- dz.addEventListener('drop', e=>{e.preventDefault();dz.classList.remove('over');
760
- if(e.dataTransfer.files[0]) handleFile(e.dataTransfer.files[0])});
761
-
762
- async function handleFile(file){
763
- if(!file||!SID) return;
764
- const fd = new FormData();
765
- fd.append('session_id', SID);
766
- fd.append('image', file);
767
- setStatus('Uploading...','orange');
768
- const r = await fetch('/api/upload',{method:'POST',body:fd});
769
- const d = await r.json();
770
- if(d.error){toast(d.error,true);return;}
771
- imgW = d.width; imgH = d.height;
772
- showMainImg('data:image/jpeg;base64,'+d.preview);
773
- document.getElementById('run-btn').disabled = false;
774
- setStatus('Image loaded','green');
775
- toast('Image loaded βœ“');
776
- }
777
-
778
- // ── Pipeline ──────────────────────────────────────────────────────────────
779
- async function runPipeline(){
780
- if(!SID) return;
781
- document.getElementById('run-btn').disabled = true;
782
- setStatus('Running...','orange');
783
- setLog(['Starting wall extraction pipeline...']);
784
- setProgress(0);
785
-
786
- const r = await fetch('/api/run',{method:'POST',
787
- headers:{'Content-Type':'application/json'},
788
- body:JSON.stringify({session_id:SID})});
789
- const d = await r.json();
790
- if(d.error){toast(d.error,true);document.getElementById('run-btn').disabled=false;return;}
791
-
792
- pollingTimer = setInterval(pollProgress, 700);
793
- }
794
-
795
- async function pollProgress(){
796
- const r = await fetch('/api/progress?session_id='+SID);
797
- const d = await r.json();
798
- setProgress(d.progress||0);
799
- if(d.log) setLog(d.log);
800
- setStatus(d.status==='running'?'Processing…':d.status, d.status==='error'?'red':d.status==='done'?'green':'orange');
801
- if(d.status==='done'||d.status==='error'){
802
- clearInterval(pollingTimer);
803
- if(d.status==='done') loadResult();
804
- document.getElementById('run-btn').disabled = false;
805
- }
806
- }
807
-
808
- async function loadResult(){
809
- const r = await fetch('/api/result?session_id='+SID);
810
- const d = await r.json();
811
- if(d.error){toast(d.error,true);return;}
812
-
813
- // GPU badge
814
- const badge = document.getElementById('gpu-badge');
815
- const gd = d.gpu_detail || {};
816
- const layers = [];
817
- if (gd.cupy) layers.push('CuPy');
818
- if (gd.torch_cuda) layers.push('Torch');
819
- if (gd.opencv_cuda) layers.push('cv2');
820
- if (layers.length) {
821
- badge.textContent = '⚑ GPU: ' + layers.join('+');
822
- badge.className = 'gpu-badge on';
823
- } else {
824
- badge.textContent = 'CPU only';
825
- badge.className = 'gpu-badge';
826
- }
827
-
828
- showMainImg('data:image/jpeg;base64,'+d.composite);
829
- if(d.wall_mask) document.getElementById('walls-img').src='data:image/jpeg;base64,'+d.wall_mask;
830
- updateRooms(d.rooms||[]);
831
- updateCalibration(d.calibration||{});
832
- loadStages();
833
- toast(`βœ“ Detected ${(d.rooms||[]).length} rooms`,'ok');
834
- }
835
-
836
- async function loadStages(){
837
- const r = await fetch('/api/stages?session_id='+SID);
838
- const d = await r.json();
839
- const grid = document.getElementById('stages-grid');
840
- grid.innerHTML='';
841
- const labels = {
842
- '01_title_removed':'1. Title Block Removed',
843
- '02_colors_removed':'2. Colors Removed',
844
- '03_door_arcs':'3. Door Arcs Closed',
845
- '04_walls_raw':'4. Walls Extracted',
846
- '05b_no_fixtures':'5b. Fixtures Removed',
847
- '05c_thin_removed':'5c. Thin Lines Removed',
848
- '05d_bridged':'5d. Endpoints Bridged',
849
- '05e_doors_closed':'5e. Door Openings Closed',
850
- '05f_dangling_removed':'5f. Dangling Lines Removed',
851
- '05g_large_gaps':'5g. Large Gaps Sealed',
852
- '07_rooms':'7. Room Segmentation',
853
- '08_rooms_filtered':'8. Filtered Rooms',
854
- };
855
- for(const [key, b64] of Object.entries(d)){
856
- const card = document.createElement('div');
857
- card.className='stage-card';
858
- card.innerHTML=`<img src="data:image/jpeg;base64,${b64}"/>
859
- <div class="stage-card-label">${labels[key]||key}</div>`;
860
- grid.appendChild(card);
861
- }
862
- }
863
-
864
- // ── Wand tool ─────────────────────────────────────────────────────────────
865
- async function onCanvasClick(e){
866
- const img = document.getElementById('main-img');
867
- if(!img.src||img.src==='') return;
868
-
869
- const rect = img.getBoundingClientRect();
870
- const scaleX = imgW / (rect.width * (zoomLevel/100));
871
- const scaleY = imgH / (rect.height * (zoomLevel/100));
872
- const rawX = (e.clientX - rect.left) * scaleX;
873
- const rawY = (e.clientY - rect.top) * scaleY;
874
- const px = Math.round(rawX / (zoomLevel/100));
875
- const py = Math.round(rawY / (zoomLevel/100));
876
-
877
- if(activeTool==='wand'){
878
- await doWand(Math.round(rawX), Math.round(rawY));
879
- } else if(activeTool==='door'){
880
- await doDoorClick(Math.round(rawX), Math.round(rawY));
881
- }
882
- }
883
-
884
- function getImgCoords(e){
885
- const img = document.getElementById('main-img');
886
- const rect = img.getBoundingClientRect();
887
- // account for CSS zoom transform
888
- const zoom = zoomLevel/100;
889
- const x = Math.round((e.clientX - rect.left) / zoom);
890
- const y = Math.round((e.clientY - rect.top) / zoom);
891
- return {x,y};
892
- }
893
-
894
- function onImgMouseMove(e){
895
- const {x,y} = getImgCoords(e);
896
- document.getElementById('footer-coords').textContent=`x:${x} y:${y}`;
897
- }
898
-
899
- async function doWand(x,y){
900
- if(!SID){toast('Run pipeline first',true);return;}
901
- toast('πŸͺ„ Detecting room...');
902
- const r = await fetch('/api/wand',{method:'POST',
903
- headers:{'Content-Type':'application/json'},
904
- body:JSON.stringify({session_id:SID,x,y})});
905
- const d = await r.json();
906
- if(d.error){toast(d.error,true);return;}
907
- showMainImg('data:image/jpeg;base64,'+d.composite);
908
- updateRooms(d.rooms||[]);
909
- toast(`βœ“ Added Room ${d.room.id}`);
910
- }
911
-
912
- // ── Door line tool ─────────────────────────────────────────────────────────
913
- async function doDoorClick(x,y){
914
- if(doorStart===null){
915
- doorStart = {x,y};
916
- document.getElementById('dl-x1').value=x;
917
- document.getElementById('dl-y1').value=y;
918
- toast(`Door start: (${x},${y}) β€” click end point`);
919
- } else {
920
- document.getElementById('dl-x2').value=x;
921
- document.getElementById('dl-y2').value=y;
922
- doorStart = null;
923
- await addDoorLine();
924
- }
925
- }
926
-
927
- async function addDoorLine(){
928
- if(!SID) return;
929
- const x1=+document.getElementById('dl-x1').value;
930
- const y1=+document.getElementById('dl-y1').value;
931
- const x2=+document.getElementById('dl-x2').value;
932
- const y2=+document.getElementById('dl-y2').value;
933
- const r = await fetch('/api/add_door_line',{method:'POST',
934
- headers:{'Content-Type':'application/json'},
935
- body:JSON.stringify({session_id:SID,x1,y1,x2,y2})});
936
- const d = await r.json();
937
- if(d.error){toast(d.error,true);return;}
938
- doorLines = d.door_lines||[];
939
- renderDoorLinesList();
940
- showMainImg('data:image/jpeg;base64,'+d.composite);
941
- updateRooms(d.rooms||[]);
942
- toast(`βœ“ Door seal line added`);
943
- }
944
-
945
- async function clearDoorLines(){
946
- if(!SID) return;
947
- await fetch('/api/clear_door_lines',{method:'POST',
948
- headers:{'Content-Type':'application/json'},
949
- body:JSON.stringify({session_id:SID})});
950
- doorLines=[];
951
- renderDoorLinesList();
952
- toast('Door lines cleared');
953
- }
954
-
955
- function renderDoorLinesList(){
956
- const el = document.getElementById('door-lines-list');
957
- if(!doorLines.length){el.textContent='No door lines';return;}
958
- el.innerHTML=doorLines.map((l,i)=>
959
- `<div style="padding:2px 0;border-bottom:1px solid var(--border)">#${i+1}: (${l[0]},${l[1]})β†’(${l[2]},${l[3]})</div>`
960
- ).join('');
961
- }
962
-
963
- // ── Remove room ───────────────────────────────────────────────────────────
964
- async function removeRoom(){
965
- const id = +document.getElementById('remove-id').value;
966
- if(!SID||!id) return;
967
- const r = await fetch('/api/remove_room',{method:'POST',
968
- headers:{'Content-Type':'application/json'},
969
- body:JSON.stringify({session_id:SID,room_id:id})});
970
- const d = await r.json();
971
- if(d.error){toast(d.error,true);return;}
972
- showMainImg('data:image/jpeg;base64,'+d.composite);
973
- updateRooms(d.rooms||[]);
974
- toast(`βœ“ Room ${id} removed`);
975
- }
976
-
977
- // ── View helpers ──────────────────────────────────────────────────────────
978
- function showMainImg(src){
979
- const img = document.getElementById('main-img');
980
- img.src=src;
981
- img.style.display='block';
982
- img.onmousemove = onImgMouseMove;
983
- img.onclick = onCanvasClick;
984
- }
985
-
986
- function applyZoom(v){
987
- zoomLevel = +v;
988
- document.getElementById('zoom-val').textContent=v+'%';
989
- const layer = document.getElementById('img-layer');
990
- const panX = document.getElementById('pan-x').value;
991
- const panY = document.getElementById('pan-y').value;
992
- layer.style.transform=`scale(${v/100}) translate(${panX}px,${panY}px)`;
993
- }
994
-
995
- function applyPan(){
996
- applyZoom(zoomLevel);
997
- }
998
-
999
- function resetView(){
1000
- zoomLevel=100;
1001
- document.getElementById('zoom-slider').value=100;
1002
- document.getElementById('pan-x').value=0;
1003
- document.getElementById('pan-y').value=0;
1004
- applyZoom(100);
1005
- }
1006
-
1007
- function setTool(t){
1008
- activeTool=t;
1009
- document.querySelectorAll('.tool-btn').forEach(b=>b.classList.remove('active'));
1010
- document.getElementById('tool-'+t).classList.add('active');
1011
- const hints = {
1012
- pan:'Pan/Zoom: use sliders above the canvas',
1013
- wand:'Wand: click image to detect & add a room',
1014
- door:'Door: click two points to draw a seal line'
1015
- };
1016
- document.getElementById('tool-hint').textContent=hints[t];
1017
- doorStart=null;
1018
- }
1019
-
1020
- function switchTab(name){
1021
- document.querySelectorAll('.tab').forEach(t=>t.classList.remove('active'));
1022
- event.target.classList.add('active');
1023
- document.getElementById('tab-result').style.display=name==='result'?'flex':'none';
1024
- document.getElementById('tab-walls').style.display =name==='walls' ?'flex':'none';
1025
- document.getElementById('tab-stages').style.display=name==='stages'?'block':'none';
1026
- }
1027
-
1028
- // ── Room list ─────────────────────────────────────────────────────────────
1029
- function updateRooms(rooms){
1030
- const list = document.getElementById('rooms-list');
1031
- document.getElementById('room-count').textContent=rooms.length;
1032
- document.getElementById('footer-rooms').textContent=rooms.length+' rooms';
1033
- if(!rooms.length){
1034
- list.innerHTML='<div style="color:var(--muted);font-size:11px;text-align:center;padding:12px">No rooms detected</div>';
1035
- return;
1036
- }
1037
- list.innerHTML=rooms.map((r,i)=>{
1038
- const col=ROOM_COLORS[i%ROOM_COLORS.length];
1039
- const areaPx=Math.round(r.area||0);
1040
- return `<div class="room-card">
1041
- <div class="room-dot" style="background:${col}"></div>
1042
- <div class="room-info">
1043
- <div class="room-name">#${r.id} ${r.label||''}</div>
1044
- <div class="room-meta">${areaPx.toLocaleString()} pxΒ² Β· [${(r.bbox||[]).join(',')}]</div>
1045
- </div>
1046
- <button class="room-del" title="Delete room" onclick="quickDelete(${r.id})">βœ•</button>
1047
- </div>`;
1048
- }).join('');
1049
- }
1050
-
1051
- async function quickDelete(id){
1052
- document.getElementById('remove-id').value=id;
1053
- await removeRoom();
1054
- }
1055
-
1056
- // ── Calibration ───────────────────────────────────────────────────────────
1057
- function updateCalibration(cal){
1058
- const grid = document.getElementById('cal-grid');
1059
- const entries=[
1060
- ['Stroke','stroke_width','px'],
1061
- ['Bridge gap','bridge_max_gap','px'],
1062
- ['Door gap','door_gap','px'],
1063
- ['Min dim','min_component_dim','px'],
1064
- ];
1065
- grid.innerHTML=entries.map(([label,key,unit])=>`
1066
- <div class="cal-item">
1067
- <div class="cal-key">${label}</div>
1068
- <div class="cal-val">${cal[key]??'β€”'}${cal[key]!==undefined?unit:''}</div>
1069
- </div>`).join('');
1070
- }
1071
-
1072
- // ── Misc ─────────────────────────────────────────────────────────────────
1073
- function setProgress(pct){
1074
- document.getElementById('progress-bar').style.width=pct+'%';
1075
- document.getElementById('status-text').textContent=pct+'%';
1076
- }
1077
-
1078
- function setLog(lines){
1079
- const el=document.getElementById('log-box');
1080
- el.innerHTML=lines.map(l=>`<div>${l}</div>`).join('');
1081
- el.scrollTop=el.scrollHeight;
1082
- }
1083
-
1084
- function setStatus(msg,color=''){
1085
- document.getElementById('footer-status').textContent=msg;
1086
- const dot=document.getElementById('status-dot');
1087
- dot.className='dot'+(color?' '+color:'');
1088
- }
1089
-
1090
- function toast(msg,err=false){
1091
- const t=document.getElementById('toast');
1092
- t.textContent=msg;
1093
- t.className='show'+(err?' err':' ok');
1094
- clearTimeout(t._tid);
1095
- t._tid=setTimeout(()=>{t.className=''},2800);
1096
- }
1097
-
1098
- async function exportJSON(){
1099
- if(!SID) return;
1100
- window.location='/api/export?session_id='+SID;
1101
- }
1102
- </script>
1103
- </body>
1104
- </html>
1105
- """
1106
 
1107
  if __name__ == "__main__":
1108
- print("=" * 60)
1109
- print(" Blueprint Room Extractor")
1110
- print(f" GPU: CuPy={_CUPY} PyTorch-CUDA={_TORCH_CUDA} OpenCV-CUDA={_CV_CUDA}")
1111
- print(" Open: http://localhost:7860")
1112
- print("=" * 60)
1113
- app.run(host="0.0.0.0", port=7860, debug=False, threaded=True)
 
1
  """
2
+ FloorPlan Analyser β€” Gradio Application
3
+ ========================================
4
+ Pipeline (mirrors GeometryAgent v5):
5
+ 1. Load image
6
+ 2. Crop title block
7
+ 3. Remove colors (chroma filter)
8
+ 4. Extract walls adaptive
9
+ 5. User draws door-closing lines (optional, before SAM)
10
+ 6. Segment rooms with SAM (HuggingFace hosted)
11
+ 7. OCR β†’ validate room labels
12
+ 8. Annotate + measure (area in mΒ²)
13
+ 9. Export to Excel
14
+ Optional:
15
+ β€’ Click to select / deselect room
16
+ β€’ Remove wrong annotation
17
+ β€’ Pan / Zoom (Gradio native)
18
+ β€’ Draw lines to close doors on the wall mask
19
  """
20
+
21
  from __future__ import annotations
22
 
23
+ import io, json, os, tempfile, time, requests
24
+ from pathlib import Path
25
+ from typing import Any, Dict, List, Optional, Tuple
 
 
 
 
 
 
26
 
27
  import cv2
28
  import numpy as np
29
+ import gradio as gr
30
+ import openpyxl
31
+ from openpyxl.styles import Font, PatternFill, Alignment
32
+
33
+ # ─── SAM HuggingFace endpoint ───────────────────────────────────────────────
34
+ HF_REPO = "Pream912/sam"
35
+ HF_API = f"https://huggingface.co/{HF_REPO}/resolve/main"
36
+ # We'll download the checkpoint locally on first use
37
+ SAM_CKPT = Path(tempfile.gettempdir()) / "sam_vit_h_4b8939.pth"
38
+ SAM_URL = f"{HF_API}/sam_vit_h_4b8939.pth"
39
+
40
+ DPI = 300
41
+ SCALE_FACTOR = 100 # 1 px = 1/300 inch Γ— 100 cm scale
42
+
43
+ # ─── constants (ported from GeometryAgent) ──────────────────────────────────
44
+ MIN_ROOM_AREA_FRAC = 0.000004
45
+ MAX_ROOM_AREA_FRAC = 0.08
46
+ MIN_ROOM_DIM_FRAC = 0.01
47
+ BORDER_MARGIN_FRAC = 0.01
48
+ MAX_ASPECT_RATIO = 8.0
49
+ MIN_SOLIDITY = 0.25
50
+ MIN_EXTENT = 0.08
51
+ OCR_CONF_THR = 0.3
52
+ SAM_MIN_SCORE = 0.70
53
+ SAM_CLOSET_THR = 300
54
+ SAM_WALL_NEG = 20
55
+ SAM_WALL_PCT = 75
56
+ WALL_MIN_HALF_PX = 3
57
+
58
+ ROOM_COLORS = [
59
+ (255, 99, 71), (100, 149, 237), (60, 179, 113),
60
+ (255, 165, 0), (147, 112, 219), (0, 206, 209),
61
+ (255, 182, 193), (127, 255, 0), (255, 215, 0),
62
+ (176, 224, 230),
63
+ ]
64
+
65
+ # ════════════════════════════════════════════════════════════════════════════
66
+ # PIPELINE HELPERS
67
+ # ════════════════════════════════════════════════════════════════════════════
68
+
69
+ def download_sam_if_needed() -> Optional[str]:
70
+ if SAM_CKPT.exists():
71
+ return str(SAM_CKPT)
72
+ print(f"[SAM] Downloading checkpoint from HuggingFace …")
73
+ try:
74
+ r = requests.get(SAM_URL, stream=True, timeout=300)
75
+ r.raise_for_status()
76
+ with open(SAM_CKPT, "wb") as f:
77
+ for chunk in r.iter_content(1 << 20):
78
+ f.write(chunk)
79
+ print(f"[SAM] Saved to {SAM_CKPT}")
80
+ return str(SAM_CKPT)
81
+ except Exception as e:
82
+ print(f"[SAM] Download failed: {e}")
83
+ return None
84
+
85
+
86
+ def remove_title_block(img: np.ndarray) -> np.ndarray:
87
+ h, w = img.shape[:2]
88
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
89
+ edges = cv2.Canny(gray, 50, 150)
90
+
91
+ h_kern = cv2.getStructuringElement(cv2.MORPH_RECT, (w // 20, 1))
92
+ v_kern = cv2.getStructuringElement(cv2.MORPH_RECT, (1, h // 20))
93
+ h_lines = cv2.morphologyEx(edges, cv2.MORPH_OPEN, h_kern)
94
+ v_lines = cv2.morphologyEx(edges, cv2.MORPH_OPEN, v_kern)
95
+
96
+ crop_r, crop_b = w, h
97
+
98
+ right_region = v_lines[:, int(w * 0.7):]
99
+ if np.any(right_region):
100
+ v_pos = np.where(np.sum(right_region, axis=0) > h * 0.3)[0]
101
+ if len(v_pos):
102
+ crop_r = int(w * 0.7) + v_pos[0] - 10
103
+
104
+ bot_region = h_lines[int(h * 0.7):, :]
105
+ if np.any(bot_region):
106
+ h_pos = np.where(np.sum(bot_region, axis=1) > w * 0.3)[0]
107
+ if len(h_pos):
108
+ crop_b = int(h * 0.7) + h_pos[0] - 10
109
+
110
+ if crop_r == w and crop_b == h:
111
+ main_d = np.sum(gray < 200) / gray.size
112
+ if np.sum(gray[:, int(w*0.8):] < 200) / (gray[:, int(w*0.8):].size) > main_d*1.5:
113
+ crop_r = int(w * 0.8)
114
+ if np.sum(gray[int(h*0.8):, :] < 200) / (gray[int(h*0.8):, :].size) > main_d*1.5:
115
+ crop_b = int(h * 0.8)
116
+
117
+ return img[:crop_b, :crop_r].copy()
118
+
119
+
120
+ def remove_colors(img: np.ndarray) -> np.ndarray:
121
+ b = img[:,:,0].astype(np.int32)
122
+ g = img[:,:,1].astype(np.int32)
123
+ r = img[:,:,2].astype(np.int32)
124
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).astype(np.int32)
125
+ chroma = np.maximum(np.maximum(r,g),b) - np.minimum(np.minimum(r,g),b)
126
+ erase = (chroma > 15) & (gray < 240)
127
+ result = img.copy()
128
+ result[erase] = (255, 255, 255)
129
+ return result
130
+
131
+
132
+ def estimate_wall_thickness(binary: np.ndarray, fallback: int = 12) -> int:
133
+ h, w = binary.shape
134
+ n_cols = min(200, w)
135
+ col_idx = np.linspace(0, w-1, n_cols, dtype=int)
136
+ runs = []
137
+ for ci in col_idx:
138
+ col = (binary[:, ci] > 0).astype(np.int8)
139
+ pad = np.concatenate([[0], col, [0]])
140
+ d = np.diff(pad.astype(np.int16))
141
+ s = np.where(d == 1)[0]
142
+ e = np.where(d == -1)[0]
143
+ n = min(len(s), len(e))
144
+ r = (e[:n] - s[:n]).astype(int)
145
+ runs.extend(r[(r >= 2) & (r <= h*0.15)].tolist())
146
+ if runs:
147
+ return max(6, int(np.median(runs)))
148
+ return fallback
149
+
150
+
151
+ def extract_walls_adaptive(img_clean: np.ndarray) -> Tuple[np.ndarray, int]:
152
+ h, w = img_clean.shape[:2]
153
+ gray = cv2.cvtColor(img_clean, cv2.COLOR_BGR2GRAY)
154
+ otsu_t, binary = cv2.threshold(
155
+ gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU
156
+ )
157
+ wall_threshold = int(otsu_t)
158
+ _, binary = cv2.threshold(gray, wall_threshold, 255, cv2.THRESH_BINARY_INV)
159
+
160
+ min_line_len = max(8, int(0.012 * w))
161
+ body_thickness = estimate_wall_thickness(binary)
162
+ body_thickness = int(np.clip(body_thickness, 9, 30))
163
+
164
+ k_h = cv2.getStructuringElement(cv2.MORPH_RECT, (min_line_len, 1))
165
+ k_v = cv2.getStructuringElement(cv2.MORPH_RECT, (1, min_line_len))
166
+ long_h = cv2.morphologyEx(binary, cv2.MORPH_OPEN, k_h)
167
+ long_v = cv2.morphologyEx(binary, cv2.MORPH_OPEN, k_v)
168
+ orig_walls = cv2.bitwise_or(long_h, long_v)
169
+
170
+ k_bh = cv2.getStructuringElement(cv2.MORPH_RECT, (1, body_thickness))
171
+ k_bv = cv2.getStructuringElement(cv2.MORPH_RECT, (body_thickness, 1))
172
+ dil_h = cv2.dilate(long_h, k_bh)
173
+ dil_v = cv2.dilate(long_v, k_bv)
174
+ walls = cv2.bitwise_or(dil_h, dil_v)
175
+
176
+ collision = cv2.bitwise_and(dil_h, dil_v)
177
+ safe_zone = cv2.bitwise_and(collision, orig_walls)
178
+ walls = cv2.bitwise_or(
179
+ cv2.bitwise_and(walls, cv2.bitwise_not(collision)), safe_zone
180
+ )
181
+
182
+ dist = cv2.distanceTransform(cv2.bitwise_not(orig_walls), cv2.DIST_L2, 5)
183
+ keep_mask = (dist <= body_thickness / 2).astype(np.uint8) * 255
184
+ walls = cv2.bitwise_and(walls, keep_mask)
185
+
186
+ # noise removal
187
+ n_lbl, labels, stats, _ = cv2.connectedComponentsWithStats(walls, connectivity=8)
188
+ if n_lbl > 1:
189
+ areas = stats[1:, cv2.CC_STAT_AREA]
190
+ min_n = max(20, int(np.median(areas) * 0.0001))
191
+ keep_lut = np.zeros(n_lbl, dtype=np.uint8)
192
+ keep_lut[1:] = (areas >= min_n).astype(np.uint8)
193
+ walls = (keep_lut[labels] * 255).astype(np.uint8)
194
+
195
+ return walls, body_thickness
196
+
197
+
198
+ def apply_user_lines_to_walls(
199
+ walls: np.ndarray,
200
+ lines: List[Tuple[int,int,int,int]],
201
+ thickness: int,
202
+ ) -> np.ndarray:
203
+ """Paint user-drawn door-closing lines onto the wall mask."""
204
+ result = walls.copy()
205
+ for x1, y1, x2, y2 in lines:
206
+ cv2.line(result, (x1, y1), (x2, y2), 255, max(thickness, 3))
207
+ return result
208
+
209
+
210
+ def segment_rooms_flood(walls: np.ndarray) -> np.ndarray:
211
+ h, w = walls.shape
212
+ walls[:5, :] = 255; walls[-5:, :] = 255
213
+ walls[:, :5] = 255; walls[:, -5:] = 255
214
+
215
+ filled = walls.copy()
216
+ mask = np.zeros((h+2, w+2), np.uint8)
217
+ for sx, sy in [(0,0),(w-1,0),(0,h-1),(w-1,h-1),
218
+ (w//2,0),(w//2,h-1),(0,h//2),(w-1,h//2)]:
219
+ if filled[sy, sx] == 0:
220
+ cv2.floodFill(filled, mask, (sx, sy), 255)
221
+
222
+ rooms = cv2.bitwise_not(filled)
223
+ rooms = cv2.bitwise_and(rooms, cv2.bitwise_not(walls))
224
+ rooms = cv2.morphologyEx(rooms, cv2.MORPH_OPEN, np.ones((2,2), np.uint8))
225
+ return rooms
226
+
227
+
228
+ def _morphological_skeleton(binary: np.ndarray) -> np.ndarray:
229
+ skel = np.zeros_like(binary)
230
+ img = binary.copy()
231
+ cross = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
232
+ for _ in range(300):
233
+ eroded = cv2.erode(img, cross)
234
+ temp = cv2.subtract(img, cv2.dilate(eroded, cross))
235
+ skel = cv2.bitwise_or(skel, temp)
236
+ img = eroded
237
+ if not cv2.countNonZero(img):
238
+ break
239
+ return skel
240
+
241
+
242
+ def _find_thick_wall_neg_prompts(
243
+ walls_mask: np.ndarray, n: int = SAM_WALL_NEG
244
+ ) -> List[Tuple[int,int]]:
245
+ h, w = walls_mask.shape
246
+ dist = cv2.distanceTransform(walls_mask, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
247
+ try:
248
+ skel = cv2.ximgproc.thinning(
249
+ walls_mask, thinningType=cv2.ximgproc.THINNING_ZHANGSUEN
250
+ )
251
+ except AttributeError:
252
+ skel = _morphological_skeleton(walls_mask)
253
+
254
+ skel_vals = dist[skel > 0]
255
+ if len(skel_vals) == 0:
256
+ return []
257
+ thr = max(float(np.percentile(skel_vals, SAM_WALL_PCT)), WALL_MIN_HALF_PX)
258
+ ys, xs = np.where((skel > 0) & (dist >= thr))
259
+ if len(ys) == 0:
260
+ return []
261
+
262
+ grid_cells = max(1, int(np.ceil(np.sqrt(n * 4))))
263
+ cell_h = max(1, h // grid_cells)
264
+ cell_w = max(1, w // grid_cells)
265
+ cell_ids = (ys // cell_h) * grid_cells + (xs // cell_w)
266
+ _, first = np.unique(cell_ids, return_index=True)
267
+ sel = first[:n]
268
+ return [(int(xs[i]), int(ys[i])) for i in sel]
269
+
270
+
271
+ def generate_prompts(
272
+ walls_mask: np.ndarray, rooms_flood: np.ndarray
273
+ ) -> Tuple[np.ndarray, np.ndarray]:
274
+ h, w = walls_mask.shape
275
+ inv = cv2.bitwise_not(walls_mask)
276
+ n, labels, stats, centroids = cv2.connectedComponentsWithStats(inv, connectivity=8)
277
+
278
+ pts, lbls = [], []
279
+ for i in range(1, n):
280
+ area = int(stats[i, cv2.CC_STAT_AREA])
281
+ if area < SAM_CLOSET_THR:
282
+ continue
283
+ bx = int(stats[i, cv2.CC_STAT_LEFT]); by = int(stats[i, cv2.CC_STAT_TOP])
284
+ bw = int(stats[i, cv2.CC_STAT_WIDTH]); bh = int(stats[i, cv2.CC_STAT_HEIGHT])
285
+ if bx <= 5 and by <= 5 and bx+bw >= w-5 and by+bh >= h-5:
286
+ continue
287
+ cx = int(np.clip(centroids[i][0], 0, w-1))
288
+ cy = int(np.clip(centroids[i][1], 0, h-1))
289
+ if walls_mask[cy, cx] > 0:
290
+ found = False
291
+ for dy in range(-10, 11, 2):
292
+ for dx in range(-10, 11, 2):
293
+ ny2, nx2 = cy+dy, cx+dx
294
+ if 0<=ny2<h and 0<=nx2<w and walls_mask[ny2,nx2]==0:
295
+ cx, cy = nx2, ny2; found = True; break
296
+ if found: break
297
+ if not found: continue
298
+ pts.append([cx, cy]); lbls.append(1)
299
+
300
+ for pt in _find_thick_wall_neg_prompts(walls_mask):
301
+ pts.append(list(pt)); lbls.append(0)
302
+
303
+ return np.array(pts, dtype=np.float32), np.array(lbls, dtype=np.int32)
304
+
305
+
306
+ def mask_to_rle(mask: np.ndarray) -> Dict:
307
+ h, w = mask.shape
308
+ flat = mask.flatten(order='F').astype(bool)
309
+ counts, run, cur = [], 0, False
310
+ for v in flat:
311
+ if v == cur: run += 1
312
+ else: counts.append(run); run = 1; cur = v
313
+ counts.append(run)
314
+ if mask[0, 0]: counts.insert(0, 0)
315
+ return {"counts": counts, "size": [h, w]}
316
+
317
+
318
+ def segment_with_sam(
319
+ img_rgb: np.ndarray,
320
+ walls: np.ndarray,
321
+ sam_ckpt: str,
322
+ ) -> List[Dict]:
323
+ """Returns list of room dicts with keys: mask, score, prompt."""
324
+ rooms_flood = segment_rooms_flood(walls.copy())
325
+
326
+ try:
327
+ import torch
328
+ from segment_anything import sam_model_registry, SamPredictor
329
+ device = "cuda" if torch.cuda.is_available() else "cpu"
330
+ sam = sam_model_registry["vit_h"](checkpoint=sam_ckpt)
331
+ sam.to(device); sam.eval()
332
+ predictor = SamPredictor(sam)
333
+ except Exception as e:
334
+ print(f"[SAM] Load failed: {e} β€” fallback to flood-fill")
335
+ return _flood_fill_rooms(rooms_flood)
336
+
337
+ pts, lbls = generate_prompts(walls, rooms_flood)
338
+ if len(pts) == 0:
339
+ return _flood_fill_rooms(rooms_flood)
340
+
341
+ predictor.set_image(img_rgb)
342
+ pos_pts = [(tuple(p), int(l)) for p, l in zip(pts, lbls) if l == 1]
343
+ neg_pts = [tuple(p) for p, l in zip(pts, lbls) if l == 0]
344
+
345
+ neg_coords = np.array(neg_pts, dtype=np.float32) if neg_pts else None
346
+ neg_lbls = np.zeros(len(neg_pts), dtype=np.int32) if neg_pts else None
347
+ denoise_k = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
348
+
349
+ results = []
350
+ for (px, py), lbl in pos_pts:
351
+ if neg_coords is not None:
352
+ pt_c = np.vstack([[[px, py]], neg_coords])
353
+ pt_l = np.concatenate([[lbl], neg_lbls])
354
+ else:
355
+ pt_c = np.array([[px, py]], dtype=np.float32)
356
+ pt_l = np.array([lbl], dtype=np.int32)
357
+
358
+ masks, scores, _ = predictor.predict(
359
+ point_coords=pt_c, point_labels=pt_l, multimask_output=True
360
+ )
361
+ best = int(np.argmax(scores))
362
+ if float(scores[best]) < SAM_MIN_SCORE: continue
363
+
364
+ m = (masks[best] > 0).astype(np.uint8) * 255
365
+ m = cv2.bitwise_and(m, rooms_flood)
366
+ m = cv2.morphologyEx(m, cv2.MORPH_OPEN, denoise_k)
367
+ if not np.any(m): continue
368
+
369
+ results.append({"mask": m, "score": float(scores[best]), "prompt": (px, py)})
370
+
371
+ if not results:
372
+ return _flood_fill_rooms(rooms_flood)
373
+ return results
374
+
375
+
376
+ def _flood_fill_rooms(rooms_flood: np.ndarray) -> List[Dict]:
377
+ contours, _ = cv2.findContours(
378
+ rooms_flood, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
379
+ )
380
+ results = []
381
+ for cnt in contours:
382
+ m = np.zeros_like(rooms_flood)
383
+ cv2.drawContours(m, [cnt], -1, 255, -1)
384
+ M = cv2.moments(cnt)
385
+ cx = int(M["m10"]/M["m00"]) if M["m00"] else 0
386
+ cy = int(M["m01"]/M["m00"]) if M["m00"] else 0
387
+ results.append({"mask": m, "score": 1.0, "prompt": (cx, cy)})
388
+ return results
389
+
390
+
391
+ def filter_room_masks(
392
+ room_masks: List[Dict], img_shape: Tuple
393
+ ) -> List[Dict]:
394
+ h, w = img_shape[:2]
395
+ img_area = float(h * w)
396
+ min_area = img_area * MIN_ROOM_AREA_FRAC
397
+ max_area = img_area * MAX_ROOM_AREA_FRAC
398
+ min_dim = w * MIN_ROOM_DIM_FRAC
399
+ margin = max(5.0, w * BORDER_MARGIN_FRAC)
400
+
401
+ valid = []
402
+ for entry in room_masks:
403
+ m = entry["mask"]
404
+ cnts, _ = cv2.findContours(m, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
405
+ if not cnts: continue
406
+ cnt = max(cnts, key=cv2.contourArea)
407
+ area = cv2.contourArea(cnt)
408
+ if not (min_area <= area <= max_area): continue
409
+ bx, by, bw, bh = cv2.boundingRect(cnt)
410
+ if bx < margin or by < margin or bx+bw > w-margin or by+bh > h-margin:
411
+ continue
412
+ if bw < min_dim and bh < min_dim: continue
413
+ asp = max(bw, bh) / (min(bw, bh) + 1e-6)
414
+ if asp > MAX_ASPECT_RATIO: continue
415
+ if (area / (bw*bh+1e-6)) < MIN_EXTENT: continue
416
+ hull = cv2.convexHull(cnt)
417
+ ha = cv2.contourArea(hull)
418
+ if ha > 0 and (area / ha) < MIN_SOLIDITY: continue
419
+
420
+ entry = dict(entry)
421
+ entry["contour"] = cnt
422
+ entry["area_px"] = area
423
+ valid.append(entry)
424
+
425
+ return valid
426
+
427
+
428
+ def pixel_area_to_m2(area_px: float) -> float:
429
+ return area_px * (2.54 / DPI) ** 2 * (SCALE_FACTOR ** 2) / 10000
430
+
431
+
432
+ def run_ocr_on_room(img_bgr: np.ndarray, contour: np.ndarray) -> Optional[str]:
433
+ try:
434
+ import easyocr
435
+ if not hasattr(run_ocr_on_room, "_reader"):
436
+ run_ocr_on_room._reader = easyocr.Reader(["en"], gpu=False)
437
+ reader = run_ocr_on_room._reader
438
+ except ImportError:
439
+ return None
440
+
441
+ x, y, rw, rh = cv2.boundingRect(contour)
442
+ pad = 20
443
+ roi = img_bgr[max(0,y-pad):min(img_bgr.shape[0],y+rh+pad),
444
+ max(0,x-pad):min(img_bgr.shape[1],x+rw+pad)]
445
+ if roi.size == 0:
446
+ return None
447
+
448
+ gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
449
+ clahe = cv2.createCLAHE(2.0, (8,8))
450
+ proc = clahe.apply(gray)
451
+ _, bin_img = cv2.threshold(proc, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
452
+ rgb = cv2.cvtColor(cv2.medianBlur(bin_img, 3), cv2.COLOR_GRAY2RGB)
453
+
454
+ try:
455
+ results = reader.readtext(rgb, detail=1, paragraph=False)
456
+ cands = [
457
+ (t.strip().upper(), c)
458
+ for _, t, c in results
459
+ if c >= OCR_CONF_THR and len(t.strip()) >= 2 and any(ch.isalpha() for ch in t)
460
+ ]
461
+ return max(cands, key=lambda x: x[1])[0] if cands else None
462
+ except Exception:
463
+ return None
464
+
465
+
466
+ def validate_label(label: str) -> bool:
467
+ if not label: return False
468
+ label = label.strip()
469
+ if not label[0].isalpha(): return False
470
+ lc = sum(1 for c in label if c.isalpha())
471
+ return lc == 1 or lc >= 3
472
+
473
+
474
+ def build_annotated_image(
475
+ img_bgr: np.ndarray,
476
+ rooms: List[Dict],
477
+ selected_ids: Optional[List[int]] = None,
478
+ ) -> np.ndarray:
479
+ vis = img_bgr.copy()
480
+ overlay = vis.copy()
481
+
482
+ for i, room in enumerate(rooms):
483
+ color = ROOM_COLORS[i % len(ROOM_COLORS)]
484
+ bgr = (color[2], color[1], color[0])
485
+ cnt = room.get("contour")
486
+ if cnt is None: continue
487
+
488
+ cv2.drawContours(overlay, [cnt], -1, bgr, -1)
489
+ alpha = 0.35
490
+ vis = cv2.addWeighted(overlay, alpha, vis, 1-alpha, 0)
491
+ overlay = vis.copy()
492
+
493
+ is_sel = selected_ids and room["id"] in selected_ids
494
+ border_t = 4 if is_sel else 2
495
+ border_c = (0, 255, 255) if is_sel else bgr
496
+ cv2.drawContours(vis, [cnt], -1, border_c, border_t)
497
+
498
+ M = cv2.moments(cnt)
499
+ cx = int(M["m10"]/M["m00"]) if M["m00"] else 0
500
+ cy = int(M["m01"]/M["m00"]) if M["m00"] else 0
501
+
502
+ label = room.get("label", f"Room {room['id']}")
503
+ area = room.get("area_m2", 0.0)
504
+ text1 = label
505
+ text2 = f"{area:.1f} mΒ²"
506
+
507
+ fs = 0.55
508
+ th = 1
509
+ (tw1, th1), _ = cv2.getTextSize(text1, cv2.FONT_HERSHEY_SIMPLEX, fs, th)
510
+ (tw2, th2), _ = cv2.getTextSize(text2, cv2.FONT_HERSHEY_SIMPLEX, fs-0.1, th)
511
+
512
+ bx = cx - max(tw1, tw2)//2 - 4
513
+ by = cy - th1 - th2 - 12
514
+ bw2 = max(tw1, tw2) + 8
515
+ bh2 = th1 + th2 + 16
516
+
517
+ sub = vis[max(0,by):max(0,by)+bh2, max(0,bx):max(0,bx)+bw2]
518
+ if sub.size > 0:
519
+ white = np.ones_like(sub) * 255
520
+ vis[max(0,by):max(0,by)+bh2, max(0,bx):max(0,bx)+bw2] = \
521
+ cv2.addWeighted(sub, 0.3, white, 0.7, 0)
522
+
523
+ cv2.putText(vis, text1,
524
+ (cx - tw1//2, cy - th2 - 6),
525
+ cv2.FONT_HERSHEY_SIMPLEX, fs, (20,20,20), th+1, cv2.LINE_AA)
526
+ cv2.putText(vis, text2,
527
+ (cx - tw2//2, cy + th2 + 2),
528
+ cv2.FONT_HERSHEY_SIMPLEX, fs-0.1, (20,20,20), th, cv2.LINE_AA)
529
+
530
  return vis
531
 
532
 
533
+ def export_to_excel(rooms: List[Dict]) -> str:
534
+ wb = openpyxl.Workbook()
535
+ ws = wb.active
536
+ ws.title = "Room Analysis"
537
+
538
+ headers = ["ID", "Label", "Area (px)", "Area (mΒ²)", "Centroid X", "Centroid Y",
539
+ "Bbox X", "Bbox Y", "Bbox W", "Bbox H", "SAM Score", "Confidence"]
540
+ header_fill = PatternFill("solid", fgColor="1F4E79")
541
+ header_font = Font(bold=True, color="FFFFFF", size=11)
542
+
543
+ for col, h in enumerate(headers, 1):
544
+ cell = ws.cell(row=1, column=col, value=h)
545
+ cell.fill = header_fill
546
+ cell.font = header_font
547
+ cell.alignment = Alignment(horizontal="center")
548
+
549
+ alt_fill = PatternFill("solid", fgColor="D6E4F0")
550
+ for row_n, room in enumerate(rooms, 2):
551
+ cnt = room.get("contour")
552
+ M = cv2.moments(cnt) if cnt is not None else {}
553
+ cx = int(M["m10"]/M["m00"]) if M.get("m00") else 0
554
+ cy = int(M["m01"]/M["m00"]) if M.get("m00") else 0
555
+ bbox = cv2.boundingRect(cnt) if cnt is not None else (0,0,0,0)
556
+
557
+ row_data = [
558
+ room.get("id"), room.get("label","?"),
559
+ round(room.get("area_px",0),1),
560
+ round(room.get("area_m2",0.0),2),
561
+ cx, cy,
562
+ bbox[0], bbox[1], bbox[2], bbox[3],
563
+ round(room.get("score",1.0),4),
564
+ round(room.get("confidence",0.95),2),
565
+ ]
566
+ fill = alt_fill if row_n % 2 == 0 else None
567
+ for col, val in enumerate(row_data, 1):
568
+ cell = ws.cell(row=row_n, column=col, value=val)
569
+ cell.alignment = Alignment(horizontal="center")
570
+ if fill: cell.fill = fill
571
+
572
+ for col in ws.columns:
573
+ max_len = max(len(str(c.value or "")) for c in col) + 4
574
+ ws.column_dimensions[col[0].column_letter].width = min(max_len, 25)
575
+
576
+ out = Path(tempfile.gettempdir()) / f"floorplan_rooms_{int(time.time())}.xlsx"
577
+ wb.save(str(out))
578
+ return str(out)
579
+
580
+
581
+ # ═════════════��══════════════════════════════════════════════════════════════
582
+ # STATE (Gradio state object, passed between callbacks)
583
+ # ════════════════════════════════════════════════════════════════════════════
584
+
585
+ def init_state() -> Dict:
586
+ return {
587
+ "img_orig": None, # BGR
588
+ "img_cropped": None,
589
+ "img_clean": None,
590
+ "walls": None,
591
+ "user_lines": [], # [(x1,y1,x2,y2), …]
592
+ "draw_start": None, # pending line start pixel
593
+ "walls_thickness": 8,
594
+ "rooms": [], # list of room dicts
595
+ "selected_ids": [],
596
+ "annotated": None, # BGR annotated image
597
+ "status": "Idle",
598
+ }
599
+
600
+
601
+ # ════════════════════════════════════════════════════════════════════════════
602
+ # GRADIO CALLBACKS
603
+ # ════════════════════════════════════════════════════════════════════════════
604
+
605
+ def cb_load_image(upload, state):
606
+ if upload is None:
607
+ return None, state, "Upload a floor-plan image to begin."
608
+ img_bgr = cv2.imdecode(
609
+ np.frombuffer(upload, dtype=np.uint8), cv2.IMREAD_COLOR
610
+ )
611
+ if img_bgr is None:
612
+ return None, state, "❌ Could not decode image."
613
+ state = init_state()
614
+ state["img_orig"] = img_bgr
615
+ state["status"] = "Image loaded."
616
+ preview = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
617
+ return preview, state, f"βœ… Loaded {img_bgr.shape[1]}Γ—{img_bgr.shape[0]} px"
618
+
619
+
620
+ def cb_preprocess(state):
621
+ img = state.get("img_orig")
622
+ if img is None:
623
+ return None, None, state, "Load an image first."
624
+
625
+ cropped = remove_title_block(img)
626
+ clean = remove_colors(cropped)
627
+
628
+ state["img_cropped"] = cropped
629
+ state["img_clean"] = clean
630
+
631
+ walls, thick = extract_walls_adaptive(clean)
632
+ state["walls"] = walls.copy()
633
+ state["walls_thickness"] = thick
634
+
635
+ walls_rgb = cv2.cvtColor(walls, cv2.COLOR_GRAY2RGB)
636
+ clean_rgb = cv2.cvtColor(clean, cv2.COLOR_BGR2RGB)
637
+ return clean_rgb, walls_rgb, state, f"βœ… Walls extracted (thicknessβ‰ˆ{thick}px)"
638
+
639
+
640
+ def cb_add_door_line(evt: gr.SelectData, state):
641
+ """
642
+ Two-click line drawing on the wall image.
643
+ First click β†’ sets start, second click β†’ draws line and resets.
644
+ """
645
+ walls = state.get("walls")
646
+ if walls is None:
647
+ return None, state, "Run preprocessing first."
648
+
649
+ x, y = int(evt.index[0]), int(evt.index[1])
650
+
651
+ if state["draw_start"] is None:
652
+ state["draw_start"] = (x, y)
653
+ msg = f"πŸ–Š Start point set ({x},{y}). Click end point."
654
+ else:
655
+ x1, y1 = state["draw_start"]
656
+ state["user_lines"].append((x1, y1, x, y))
657
+ state["draw_start"] = None
658
+
659
+ # apply all lines to walls
660
+ walls_upd = apply_user_lines_to_walls(
661
+ state["walls"], state["user_lines"], state["walls_thickness"]
662
+ )
663
+ state["walls"] = walls_upd
664
+
665
+ vis = cv2.cvtColor(walls_upd, cv2.COLOR_GRAY2RGB)
666
+ for lx1, ly1, lx2, ly2 in state["user_lines"]:
667
+ cv2.line(vis, (lx1,ly1), (lx2,ly2), (255,80,80), 3)
668
+ return vis, state, f"βœ… Door line drawn ({x1},{y1})β†’({x},{y}) Total: {len(state['user_lines'])}"
669
+
670
+ vis = cv2.cvtColor(walls, cv2.COLOR_GRAY2RGB)
671
+ for lx1, ly1, lx2, ly2 in state["user_lines"]:
672
+ cv2.line(vis, (lx1,ly1), (lx2,ly2), (255,80,80), 3)
673
+ if state["draw_start"]:
674
+ cv2.circle(vis, state["draw_start"], 6, (0,200,255), -1)
675
+ return vis, state, msg
676
+
677
+
678
+ def cb_undo_door_line(state):
679
+ if not state["user_lines"]:
680
+ return None, state, "No lines to undo."
681
+ state["user_lines"].pop()
682
+ state["draw_start"] = None
683
+
684
+ walls = state.get("walls")
685
+ img = state.get("img_clean")
686
+ if walls is None:
687
+ return None, state, "Re-run preprocessing."
688
+
689
+ # recompute from scratch
690
+ walls_base, thick = extract_walls_adaptive(state["img_clean"])
691
+ walls_upd = apply_user_lines_to_walls(
692
+ walls_base, state["user_lines"], thick
693
+ )
694
+ state["walls"] = walls_upd
695
+
696
+ vis = cv2.cvtColor(walls_upd, cv2.COLOR_GRAY2RGB)
697
+ for lx1, ly1, lx2, ly2 in state["user_lines"]:
698
+ cv2.line(vis, (lx1,ly1), (lx2,ly2), (255,80,80), 3)
699
+ return vis, state, f"↩ Last line removed. Remaining: {len(state['user_lines'])}"
700
+
701
+
702
+ def cb_run_sam(state, progress=gr.Progress()):
703
+ walls = state.get("walls")
704
+ img = state.get("img_cropped")
705
+ if walls is None or img is None:
706
+ return None, None, state, "Run preprocessing first."
707
+
708
+ progress(0.1, desc="Downloading SAM checkpoint…")
709
+ ckpt = download_sam_if_needed()
710
+ if ckpt is None:
711
+ return None, None, state, "❌ SAM checkpoint download failed."
712
+
713
+ progress(0.3, desc="Segmenting rooms…")
714
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
715
+ raw = segment_with_sam(img_rgb, walls.copy(), ckpt)
716
+
717
+ progress(0.6, desc="Filtering rooms…")
718
+ filtered = filter_room_masks(raw, img.shape)
719
+
720
+ progress(0.75, desc="Running OCR…")
721
+ rooms = []
722
+ for idx, entry in enumerate(filtered, 1):
723
+ cnt = entry["contour"]
724
+ label = run_ocr_on_room(img, cnt)
725
+ if not label or not validate_label(label):
726
+ label = f"ROOM {idx}"
727
+
728
+ M = cv2.moments(cnt)
729
+ cx = int(M["m10"]/M["m00"]) if M["m00"] else 0
730
+ cy = int(M["m01"]/M["m00"]) if M["m00"] else 0
731
+ area_px = entry["area_px"]
732
+ area_m2 = pixel_area_to_m2(area_px)
733
+ bx,by,bw,bh = cv2.boundingRect(cnt)
734
+
735
+ rooms.append({
736
+ "id": idx,
737
+ "label": label,
738
+ "contour": cnt,
739
+ "mask": entry["mask"],
740
+ "score": entry["score"],
741
+ "area_px": round(area_px, 1),
742
+ "area_m2": round(area_m2, 2),
743
+ "bbox": [bx, by, bw, bh],
744
+ "centroid": [cx, cy],
745
+ "confidence": 0.95,
746
+ })
747
+
748
+ state["rooms"] = rooms
749
+ state["selected_ids"] = []
750
 
751
+ progress(0.9, desc="Rendering…")
752
+ annotated = build_annotated_image(img, rooms)
753
+ state["annotated"] = annotated
754
 
755
+ table = [[r["id"], r["label"], f"{r['area_m2']} mΒ²", f"{r['score']:.2f}"]
756
+ for r in rooms]
 
 
757
 
758
+ ann_rgb = cv2.cvtColor(annotated, cv2.COLOR_BGR2RGB)
759
+ return ann_rgb, table, state, f"βœ… {len(rooms)} rooms detected."
760
 
 
 
 
 
 
 
761
 
762
+ def cb_click_room(evt: gr.SelectData, state):
763
+ annotated = state.get("annotated")
764
+ rooms = state.get("rooms", [])
765
+ img = state.get("img_cropped")
766
+ if annotated is None or not rooms:
767
+ return None, state, "Run SAM first."
768
 
769
+ x, y = int(evt.index[0]), int(evt.index[1])
770
+ clicked_id = None
771
+ for room in rooms:
772
+ cnt = room.get("contour")
773
+ if cnt is None: continue
774
+ if cv2.pointPolygonTest(cnt, (float(x), float(y)), False) >= 0:
775
+ clicked_id = room["id"]
776
+ break
777
+
778
+ if clicked_id is None:
779
+ state["selected_ids"] = []
780
+ msg = "Clicked outside all rooms β€” selection cleared."
781
+ else:
782
+ sel = state["selected_ids"]
783
+ if clicked_id in sel:
784
+ sel.remove(clicked_id)
785
+ msg = f"Room {clicked_id} deselected."
786
+ else:
787
+ sel.append(clicked_id)
788
+ msg = f"Room {clicked_id} selected."
789
+ state["selected_ids"] = sel
790
+
791
+ new_ann = build_annotated_image(img, rooms, state["selected_ids"])
792
+ state["annotated"] = new_ann
793
+ return cv2.cvtColor(new_ann, cv2.COLOR_BGR2RGB), state, msg
794
+
795
+
796
+ def cb_remove_selected(state):
797
+ sel = state.get("selected_ids", [])
798
+ rooms = state.get("rooms", [])
799
+ img = state.get("img_cropped")
800
+ if not sel:
801
+ return None, None, state, "No rooms selected."
802
+
803
+ removed = [r["label"] for r in rooms if r["id"] in sel]
804
+ rooms = [r for r in rooms if r["id"] not in sel]
805
+ for i, r in enumerate(rooms, 1):
806
+ r["id"] = i
807
+ state["rooms"] = rooms
808
+ state["selected_ids"] = []
809
+
810
+ ann = build_annotated_image(img, rooms)
811
+ state["annotated"] = ann
812
+
813
+ table = [[r["id"], r["label"], f"{r['area_m2']} mΒ²", f"{r['score']:.2f}"]
814
+ for r in rooms]
815
+ return cv2.cvtColor(ann, cv2.COLOR_BGR2RGB), table, state, \
816
+ f"πŸ—‘ Removed: {', '.join(removed)}"
817
+
818
+
819
+ def cb_rename_selected(new_label, state):
820
+ sel = state.get("selected_ids", [])
821
+ rooms = state.get("rooms", [])
822
+ img = state.get("img_cropped")
823
+ if not sel:
824
+ return None, None, state, "Select a room first."
825
+ if not new_label.strip():
826
+ return None, None, state, "Enter a non-empty label."
827
+
828
+ for r in rooms:
829
+ if r["id"] in sel:
830
+ r["label"] = new_label.strip().upper()
831
+ state["rooms"] = rooms
832
+
833
+ ann = build_annotated_image(img, rooms, sel)
834
+ state["annotated"] = ann
835
+ table = [[r["id"], r["label"], f"{r['area_m2']} mΒ²", f"{r['score']:.2f}"]
836
+ for r in rooms]
837
+ return cv2.cvtColor(ann, cv2.COLOR_BGR2RGB), table, state, \
838
+ f"✏ Renamed to '{new_label.strip().upper()}'"
839
+
840
+
841
+ def cb_export_excel(state):
842
+ rooms = state.get("rooms", [])
843
+ if not rooms:
844
+ return None, "No rooms to export."
845
+ path = export_to_excel(rooms)
846
+ return path, f"βœ… Exported {len(rooms)} rooms β†’ {Path(path).name}"
847
+
848
+
849
+ # ════════════════════════════════════════════════════════════════════════════
850
+ # GRADIO UI
851
+ # ════════════════════════════════════════════════════════════════════════════
852
+
853
+ CSS = """
854
+ #title { text-align: center; font-size: 1.8em; font-weight: 700; color: #1F4E79; }
855
+ #subtitle { text-align: center; color: #555; margin-top: -8px; margin-bottom: 16px; }
856
+ .step-card { border-left: 4px solid #1F4E79 !important; padding-left: 10px !important; }
857
+ #status-box textarea { font-size: 0.95em !important; color: #1a6b2e !important; font-weight: 600 !important; }
858
+ """
859
+
860
+ with gr.Blocks(css=CSS, title="FloorPlan Analyser") as app:
861
+ state = gr.State(init_state())
862
+
863
+ gr.Markdown("# 🏒 Floor Plan Room Analyser", elem_id="title")
864
+ gr.Markdown(
865
+ "Upload a floor-plan β†’ auto-extract walls β†’ close doors β†’ SAM segmentation β†’ OCR labels β†’ export Excel",
866
+ elem_id="subtitle"
867
+ )
868
+
869
+ status_box = gr.Textbox(label="Status", interactive=False,
870
+ value="Idle β€” upload a floor plan to begin.",
871
+ elem_id="status-box")
872
+
873
+ # ── Row 1: Upload + Preprocessing ───────────────────────────────────────
874
+ with gr.Row():
875
+ with gr.Column(scale=1, elem_classes="step-card"):
876
+ gr.Markdown("### 1️⃣ Upload Floor Plan")
877
+ upload_btn = gr.UploadButton(
878
+ "πŸ“‚ Upload Image", file_types=["image"], size="sm"
879
+ )
880
+ raw_preview = gr.Image(label="Loaded Image", height=320)
881
+
882
+ with gr.Column(scale=1, elem_classes="step-card"):
883
+ gr.Markdown("### 2️⃣ Pre-process (Crop β†’ De-color β†’ Walls)")
884
+ preprocess_btn = gr.Button("βš™ Run Preprocessing", variant="primary")
885
+ with gr.Tabs():
886
+ with gr.Tab("Clean Image"):
887
+ clean_img = gr.Image(label="After color removal", height=300)
888
+ with gr.Tab("Walls"):
889
+ walls_img = gr.Image(label="Extracted walls", height=300)
890
+
891
+ # ── Row 2: Door Line Drawing ─────────────────────────────────────────────
892
+ with gr.Row():
893
+ with gr.Column(elem_classes="step-card"):
894
+ gr.Markdown("### 3️⃣ Draw Door-Closing Lines *(click start β†’ click end)*")
895
+ gr.Markdown(
896
+ "Click on the **Walls** image below to define start/end of a door-closing line. "
897
+ "Drawn lines are applied to the wall mask before SAM runs, preventing segment leakage."
898
+ )
899
+ with gr.Row():
900
+ undo_line_btn = gr.Button("↩ Undo Last Line", size="sm")
901
+ wall_draw_img = gr.Image(
902
+ label="Wall mask β€” click to draw door lines",
903
+ height=380, interactive=False
904
  )
905
 
906
+ # ── Row 3: SAM + Annotation ──────────────────────────────────────────────
907
+ with gr.Row():
908
+ with gr.Column(scale=2, elem_classes="step-card"):
909
+ gr.Markdown("### 4️⃣ SAM Segmentation + OCR")
910
+ sam_btn = gr.Button("πŸ€– Run SAM + OCR", variant="primary")
911
+ ann_img = gr.Image(
912
+ label="Annotated rooms β€” click to select/deselect",
913
+ height=480, interactive=False
914
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
915
 
916
+ with gr.Column(scale=1, elem_classes="step-card"):
917
+ gr.Markdown("### 5️⃣ Room Table & Actions")
918
+ room_table = gr.Dataframe(
919
+ headers=["ID", "Label", "Area", "SAM Score"],
920
+ datatype=["number", "str", "str", "str"],
921
+ interactive=False, label="Detected Rooms"
922
+ )
923
+ with gr.Group():
924
+ gr.Markdown("**Edit selected room(s)**")
925
+ rename_txt = gr.Textbox(
926
+ placeholder="New label…", label="Rename Label"
927
+ )
928
+ with gr.Row():
929
+ rename_btn = gr.Button("✏ Rename", size="sm")
930
+ remove_btn = gr.Button("πŸ—‘ Remove Selected", size="sm", variant="stop")
931
+
932
+ gr.Markdown("---")
933
+ export_btn = gr.Button("πŸ“Š Export to Excel", variant="secondary")
934
+ excel_file = gr.File(label="Download Excel", visible=True)
935
+
936
+ # ── Wiring ───────────────────────────────────────────────────────────────
937
+
938
+ upload_btn.upload(
939
+ cb_load_image,
940
+ inputs=[upload_btn, state],
941
+ outputs=[raw_preview, state, status_box]
942
+ )
943
 
944
+ preprocess_btn.click(
945
+ cb_preprocess,
946
+ inputs=[state],
947
+ outputs=[clean_img, walls_img, state, status_box]
948
+ ).then(
949
+ lambda s: cv2.cvtColor(s["walls"], cv2.COLOR_GRAY2RGB)
950
+ if s.get("walls") is not None else None,
951
+ inputs=[state],
952
+ outputs=[wall_draw_img]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
953
  )
954
+
955
+ wall_draw_img.select(
956
+ cb_add_door_line,
957
+ inputs=[state],
958
+ outputs=[wall_draw_img, state, status_box]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
959
  )
960
 
961
+ undo_line_btn.click(
962
+ cb_undo_door_line,
963
+ inputs=[state],
964
+ outputs=[wall_draw_img, state, status_box]
965
+ )
966
 
967
+ sam_btn.click(
968
+ cb_run_sam,
969
+ inputs=[state],
970
+ outputs=[ann_img, room_table, state, status_box]
971
+ )
972
+
973
+ ann_img.select(
974
+ cb_click_room,
975
+ inputs=[state],
976
+ outputs=[ann_img, state, status_box]
977
+ )
978
+
979
+ remove_btn.click(
980
+ cb_remove_selected,
981
+ inputs=[state],
982
+ outputs=[ann_img, room_table, state, status_box]
983
+ )
984
+
985
+ rename_btn.click(
986
+ cb_rename_selected,
987
+ inputs=[rename_txt, state],
988
+ outputs=[ann_img, room_table, state, status_box]
989
+ )
990
+
991
+ export_btn.click(
992
+ cb_export_excel,
993
+ inputs=[state],
994
+ outputs=[excel_file, status_box]
995
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
996
 
997
  if __name__ == "__main__":
998
+ app.launch(share=False, debug=True)