WSLINMSAI commited on
Commit
13e00dd
·
verified ·
1 Parent(s): 08fffc6

Upload 6 files

Browse files
Files changed (6) hide show
  1. Dockerfile +33 -0
  2. README.md +26 -0
  3. app.py +120 -0
  4. gitattributes +40 -0
  5. requirements.txt +4 -0
  6. runtime.txt +1 -0
Dockerfile ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10
2
+
3
+ # 1. Install system dependencies
4
+ RUN apt-get update && apt-get install -y \
5
+ git \
6
+ libgl1 \
7
+ libglib2.0-0 \
8
+ && rm -rf /var/lib/apt/lists/*
9
+
10
+ # 2. Create user
11
+ RUN useradd -m -u 1000 user
12
+ USER user
13
+ ENV PATH="/home/user/.local/bin:$PATH"
14
+
15
+ # 3. Install Torch dependencies FIRST
16
+ RUN pip install --no-cache-dir pip --upgrade && \
17
+ pip install --no-cache-dir \
18
+ torch==2.0.1+cpu \
19
+ torchvision==0.15.2+cpu \
20
+ --extra-index-url https://download.pytorch.org/whl/cpu
21
+
22
+ # 4. Install Detectron2 with --no-build-isolation
23
+ # This flag fixes the "No module named torch" error
24
+ RUN pip install --no-cache-dir --no-build-isolation git+https://github.com/facebookresearch/detectron2.git
25
+
26
+ # 5. Install the requirements.txt
27
+ WORKDIR /app
28
+ COPY --chown=user ./requirements.txt requirements.txt
29
+ RUN pip install --no-cache-dir -r requirements.txt
30
+
31
+ # 6. Copy app files and run
32
+ COPY --chown=user . /app
33
+ CMD ["python", "app.py"]
README.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Capstone
3
+ emoji: 🌖
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: docker
7
+ app_port: 7860
8
+ ---
9
+
10
+ # Dental X-Ray Segmentation Capstone
11
+
12
+ This Space hosts a deep learning model for the automatic segmentation of dental panoramic X-rays.
13
+
14
+ ## How to use
15
+ 1. Upload a panoramic dental X-ray (JPEG/PNG).
16
+ 2. Click "Run Segmentation".
17
+ 3. View the overlay and detection data.
18
+
19
+ ## Citations and References
20
+
21
+ This project utilizes the following research and datasets. Please cite them if you use this work:
22
+
23
+ > Brahmi, W., & Jdey, I. (2024). Automatic tooth instance segmentation and identification from panoramic X-Ray images using deep CNN. *Multimedia Tools and Applications, 83*(18), 55565–55585.
24
+ > Brahmi, W., Jdey, I., & Drira, F. (2024). Exploring the role of Convolutional Neural Networks (CNN) in dental radiography segmentation: A comprehensive Systematic Literature Review. *Engineering Applications of Artificial Intelligence, 133*, 108510.
25
+ > Abderrahim, H. (2020). *Panoramic Dental X-rays* [Data set]. Mendeley Data, V3.
26
+ > Available at: [https://data.mendeley.com/datasets/73n3kz2k4k/3](https://data.mendeley.com/datasets/73n3kz2k4k/3)
app.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, json, time
2
+ import numpy as np, cv2, torch, gradio as gr
3
+ from detectron2.config import get_cfg
4
+ from detectron2.engine import DefaultPredictor
5
+ from detectron2.data import MetadataCatalog
6
+ from detectron2.utils.visualizer import Visualizer, ColorMode
7
+
8
+ # --- 1. CONFIGURATION & MODEL LOADING ---
9
+ LOAD_DIR = "./artifacts"
10
+ WEIGHTS = os.path.join(LOAD_DIR, "model_final.pth")
11
+ CFG_PATH = os.path.join(LOAD_DIR, "config.yaml")
12
+ CLASSES_PATH = os.path.join(LOAD_DIR, "classes.json")
13
+
14
+ cfg = get_cfg()
15
+ cfg.merge_from_file(CFG_PATH)
16
+ cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
17
+ cfg.MODEL.WEIGHTS = WEIGHTS
18
+ cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
19
+
20
+ classes = None
21
+ if os.path.exists(CLASSES_PATH):
22
+ with open(CLASSES_PATH) as f:
23
+ classes = json.load(f)
24
+ cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(classes)
25
+
26
+ predictor = DefaultPredictor(cfg)
27
+ meta = MetadataCatalog.get("inference_only")
28
+ meta.thing_classes = classes if classes else [f"class_{i}" for i in range(cfg.MODEL.ROI_HEADS.NUM_CLASSES)]
29
+
30
+ MAX_SIDE = 1600
31
+
32
+ # --- 2. INFERENCE FUNCTION ---
33
+ def segment(rgb: np.ndarray):
34
+ t0 = time.time()
35
+ # Handle potential None input if user clicks run without image
36
+ if rgb is None:
37
+ return None, {"error": "No image uploaded"}
38
+
39
+ h0, w0 = rgb.shape[:2]
40
+ scale = 1.0
41
+ if max(h0, w0) > MAX_SIDE:
42
+ scale = MAX_SIDE / max(h0, w0)
43
+ rgb_small = cv2.resize(rgb, (int(w0*scale), int(h0*scale)), interpolation=cv2.INTER_AREA)
44
+ else:
45
+ rgb_small = rgb
46
+
47
+ outputs = predictor(rgb_small[:, :, ::-1]) # predictor expects BGR
48
+ inst = outputs["instances"].to("cpu")
49
+
50
+ vis = Visualizer(rgb_small, metadata=meta, scale=1.0, instance_mode=ColorMode.IMAGE_BW)
51
+ overlay_rgb = vis.draw_instance_predictions(inst).get_image()
52
+
53
+ dets = []
54
+ if inst.has("pred_boxes"):
55
+ boxes = inst.pred_boxes.tensor.numpy().tolist()
56
+ scores = inst.scores.numpy().tolist() if inst.has("scores") else [None]*len(boxes)
57
+ classes_idx = inst.pred_classes.numpy().tolist() if inst.has("pred_classes") else [0]*len(boxes)
58
+ inv = (1.0/scale) if scale != 1.0 else 1.0
59
+ for b, s, c in zip(boxes, scores, classes_idx):
60
+ b = [float(x*inv) for x in b]
61
+ label = meta.thing_classes[c] if 0 <= c < len(meta.thing_classes) else str(c)
62
+ dets.append({"box": b, "class": label, "score": float(s)})
63
+
64
+ return overlay_rgb, {
65
+ "instances": dets,
66
+ "original_size": [int(h0), int(w0)],
67
+ "latency_ms": int((time.time()-t0)*1000),
68
+ }
69
+
70
+ # --- 3. GRADIO INTERFACE ---
71
+
72
+ # Define the paths to your example images
73
+ example_files = [
74
+ ["examples/1.jpg"],
75
+ ["examples/2.jpg"],
76
+ ["examples/3.jpg"],
77
+ ["examples/4.jpg"],
78
+ ["examples/5.jpg"]
79
+ ]
80
+
81
+ with gr.Blocks(title="Panoramic Radiograph Segmentation") as demo:
82
+ gr.Markdown("## Dental X-Ray Segmentation App")
83
+ gr.Markdown("Upload a panoramic radiograph (or click an example below) to detect teeth.")
84
+
85
+ with gr.Row():
86
+ # --- Left Column: Input ---
87
+ with gr.Column():
88
+ img_in = gr.Image(type="numpy", label="Input Radiograph")
89
+
90
+ # This adds the thumbnails row
91
+ gr.Examples(
92
+ examples=example_files,
93
+ inputs=img_in,
94
+ label="Click an example to load it:"
95
+ )
96
+
97
+ submit_btn = gr.Button("Run Segmentation", variant="primary")
98
+
99
+ # --- Right Column: Output ---
100
+ with gr.Column():
101
+ img_out = gr.Image(label="Overlay Result")
102
+ json_out = gr.JSON(label="Detections Data")
103
+
104
+ # Link the button to the function
105
+ submit_btn.click(fn=segment, inputs=img_in, outputs=[img_out, json_out], api_name="/predict")
106
+
107
+ # --- CITATIONS SECTION ---
108
+ gr.Markdown("---")
109
+ gr.Markdown(
110
+ """
111
+ ### Credits & Citations
112
+ Credits & Citations:
113
+ * **Brahmi, W., & Jdey, I. (2024). Automatic tooth instance segmentation and identification from panoramic X-Ray images using deep CNN. *Multimedia Tools and Applications, 83*(18), 55565–55585.
114
+ * **Brahmi, W., Jdey, I., & Drira, F. (2024). Exploring the role of Convolutional Neural Networks (CNN) in dental radiography segmentation: A comprehensive Systematic Literature Review. *Engineering Applications of Artificial Intelligence, 133*, 108510.
115
+ * **[Panoramic Dental X-rays (Mendeley Data)](https://data.mendeley.com/datasets/73n3kz2k4k/3)
116
+ """
117
+ )
118
+
119
+ if __name__ == "__main__":
120
+ demo.launch(server_name="0.0.0.0", server_port=7860)
gitattributes ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ examples/1.jpg filter=lfs diff=lfs merge=lfs -text
37
+ examples/2.jpg filter=lfs diff=lfs merge=lfs -text
38
+ examples/3.jpg filter=lfs diff=lfs merge=lfs -text
39
+ examples/4.jpg filter=lfs diff=lfs merge=lfs -text
40
+ examples/5.jpg filter=lfs diff=lfs merge=lfs -text
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ opencv-python
3
+ numpy<2
4
+ Pillow
runtime.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ python-3.10