WSLINMSAI commited on
Commit
db1f63e
·
verified ·
1 Parent(s): 30cc382

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -16
app.py CHANGED
@@ -1,18 +1,20 @@
1
- # app.py — Gradio app for panoramic radiograph segmentation (Detectron2)
 
 
 
2
 
3
- import os, json, time, cv2, numpy as np, torch, gradio as gr
4
  from detectron2.config import get_cfg
5
  from detectron2.engine import DefaultPredictor
6
  from detectron2.data import MetadataCatalog
7
  from detectron2.utils.visualizer import Visualizer, ColorMode
8
 
9
- # --- Local artifacts (uploaded to this Space) ---
10
- LOAD_DIR = "./artifacts"
11
- WEIGHTS = f"{LOAD_DIR}/model_final.pth"
12
- CFG_PATH = f"{LOAD_DIR}/config.yaml"
13
- CLASSES_PATH = f"{LOAD_DIR}/classes.json"
14
 
15
- # --- Build cfg & load model ---
16
  cfg = get_cfg()
17
  cfg.merge_from_file(CFG_PATH)
18
  cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
@@ -29,7 +31,7 @@ predictor = DefaultPredictor(cfg)
29
  meta = MetadataCatalog.get("inference_only")
30
  meta.thing_classes = classes if classes else [f"class_{i}" for i in range(cfg.MODEL.ROI_HEADS.NUM_CLASSES)]
31
 
32
- # --- Inference (auto-downscale very wide panoramics) ---
33
  MAX_SIDE = 1600
34
 
35
  def segment(rgb: np.ndarray):
@@ -43,8 +45,7 @@ def segment(rgb: np.ndarray):
43
  else:
44
  rgb_small = rgb
45
 
46
- # Detectron2 expects BGR; our array is RGB
47
- outputs = predictor(rgb_small[:, :, ::-1])
48
  inst = outputs["instances"].to("cpu")
49
 
50
  vis = Visualizer(rgb_small, metadata=meta, scale=1.0, instance_mode=ColorMode.IMAGE_BW)
@@ -52,18 +53,18 @@ def segment(rgb: np.ndarray):
52
 
53
  dets = []
54
  boxes_small = inst.pred_boxes.tensor.numpy().tolist() if inst.has("pred_boxes") else []
55
- scores = inst.scores.numpy().tolist() if inst.has("scores") else []
56
  classes_idx = inst.pred_classes.numpy().tolist() if inst.has("pred_classes") else []
57
  inv = (1.0/scale) if scale != 1.0 else 1.0
58
  for b, s, c in zip(boxes_small, scores, classes_idx):
59
- b = [float(x*inv) for x in b] # map back to original size
60
  label = meta.thing_classes[c] if 0 <= c < len(meta.thing_classes) else str(c)
61
  dets.append({"box": b, "class": label, "score": float(s)})
62
 
63
  return overlay_rgb, {
64
  "instances": dets,
65
  "original_size": [int(h0), int(w0)],
66
- "latency_ms": int((time.time() - t0)*1000),
67
  }
68
 
69
  with gr.Blocks(title="Panoramic Radiograph Segmentation") as demo:
@@ -71,8 +72,7 @@ with gr.Blocks(title="Panoramic Radiograph Segmentation") as demo:
71
  img_in = gr.Image(type="numpy", label="Panoramic Radiograph")
72
  with gr.Row():
73
  img_out = gr.Image(label="Overlay")
74
- json_out = gr.JSON(label="Detections (boxes in original image coords)")
75
- # Keep a stable API route for future programmatic calls
76
  gr.Button("Run").click(segment, inputs=img_in, outputs=[img_out, json_out], api_name="/predict")
77
 
78
  if __name__ == "__main__":
 
1
+ # app.py — Gradio app for panoramic radiograph segmentation (Detectron2, CPU/GPU)
2
+
3
+ import os, json, time
4
+ import numpy as np, cv2, torch, gradio as gr
5
 
 
6
  from detectron2.config import get_cfg
7
  from detectron2.engine import DefaultPredictor
8
  from detectron2.data import MetadataCatalog
9
  from detectron2.utils.visualizer import Visualizer, ColorMode
10
 
11
+ # --------- Artifacts already in the repo ---------
12
+ LOAD_DIR = "./artifacts"
13
+ WEIGHTS = os.path.join(LOAD_DIR, "model_final.pth")
14
+ CFG_PATH = os.path.join(LOAD_DIR, "config.yaml")
15
+ CLASSES_PATH = os.path.join(LOAD_DIR, "classes.json")
16
 
17
+ # --------- Build cfg & load model ---------
18
  cfg = get_cfg()
19
  cfg.merge_from_file(CFG_PATH)
20
  cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
 
31
  meta = MetadataCatalog.get("inference_only")
32
  meta.thing_classes = classes if classes else [f"class_{i}" for i in range(cfg.MODEL.ROI_HEADS.NUM_CLASSES)]
33
 
34
+ # --------- Inference (auto-downscale very wide panoramics) ---------
35
  MAX_SIDE = 1600
36
 
37
  def segment(rgb: np.ndarray):
 
45
  else:
46
  rgb_small = rgb
47
 
48
+ outputs = predictor(rgb_small[:, :, ::-1]) # predictor expects BGR
 
49
  inst = outputs["instances"].to("cpu")
50
 
51
  vis = Visualizer(rgb_small, metadata=meta, scale=1.0, instance_mode=ColorMode.IMAGE_BW)
 
53
 
54
  dets = []
55
  boxes_small = inst.pred_boxes.tensor.numpy().tolist() if inst.has("pred_boxes") else []
56
+ scores = inst.scores.numpy().tolist() if inst.has("scores") else []
57
  classes_idx = inst.pred_classes.numpy().tolist() if inst.has("pred_classes") else []
58
  inv = (1.0/scale) if scale != 1.0 else 1.0
59
  for b, s, c in zip(boxes_small, scores, classes_idx):
60
+ b = [float(x*inv) for x in b] # back to original image coords
61
  label = meta.thing_classes[c] if 0 <= c < len(meta.thing_classes) else str(c)
62
  dets.append({"box": b, "class": label, "score": float(s)})
63
 
64
  return overlay_rgb, {
65
  "instances": dets,
66
  "original_size": [int(h0), int(w0)],
67
+ "latency_ms": int((time.time()-t0)*1000),
68
  }
69
 
70
  with gr.Blocks(title="Panoramic Radiograph Segmentation") as demo:
 
72
  img_in = gr.Image(type="numpy", label="Panoramic Radiograph")
73
  with gr.Row():
74
  img_out = gr.Image(label="Overlay")
75
+ json_out = gr.JSON(label="Detections (boxes in original coords)")
 
76
  gr.Button("Run").click(segment, inputs=img_in, outputs=[img_out, json_out], api_name="/predict")
77
 
78
  if __name__ == "__main__":