| """Gradio Space β Paint Defect Detector.""" |
| from __future__ import annotations |
|
|
| import os |
| import sys |
| import tempfile |
| from pathlib import Path |
|
|
| import cv2 |
| import gradio as gr |
| import numpy as np |
| import torch |
|
|
| |
| ROOT = Path(__file__).resolve().parent |
| if str(ROOT) not in sys.path: |
| sys.path.insert(0, str(ROOT)) |
|
|
| |
| DEFECT_THRESHOLD = float(os.getenv("PDD_DEFECT_THRESHOLD", 0.55)) |
| PANEL_DEFECT_RATIO = 0.005 |
| IMG_SIZE = 384 |
| PATCH_SIZE = 512 |
| PATCH_STRIDE = 256 |
| BACKBONE = os.getenv("PDD_BACKBONE", "tf_efficientnetv2_s.in21k_ft_in1k") |
|
|
| from src.infer import load_model, predict_image, render_visualization |
|
|
| |
| _device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| _model = None |
|
|
| CHECKPOINT = ROOT / "checkpoints" / "best.pt" |
|
|
|
|
| def _get_model(): |
| global _model |
| if _model is None: |
| if not CHECKPOINT.exists(): |
| raise gr.Error( |
| "No trained checkpoint found at checkpoints/best.pt. " |
| "Please train the model first and upload the checkpoint." |
| ) |
| _model = load_model(CHECKPOINT, device=_device) |
| return _model |
|
|
|
|
| |
|
|
| def run_inference(image: np.ndarray, vin: str, threshold: float) -> tuple: |
| if image is None: |
| return None, "β οΈ Please upload an image.", "" |
|
|
| |
| bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) |
|
|
| try: |
| model = _get_model() |
| except gr.Error as e: |
| return None, str(e), "" |
|
|
| result = predict_image(bgr, model, _device, threshold=threshold) |
| vis_bgr = render_visualization(result) |
| vis_rgb = cv2.cvtColor(vis_bgr, cv2.COLOR_BGR2RGB) |
|
|
| |
| verdict = "π΄ DEFECT DETECTED" if result["is_defect"] else "π’ NO DEFECT β OK" |
| vin_line = f"**VIN:** {vin.strip()}\n\n" if vin.strip() else "" |
| summary = ( |
| f"{vin_line}" |
| f"**Verdict:** {verdict}\n\n" |
| f"**Defect ratio:** {result['defect_ratio']*100:.2f}%\n\n" |
| f"**Max patch probability:** {result['max_prob']:.3f}\n\n" |
| f"**Defect regions found:** {len(result['boxes'])}\n\n" |
| f"**Panel size:** {result['panel_size']['w']} Γ {result['panel_size']['h']} px" |
| ) |
|
|
| |
| if result["boxes"]: |
| rows = "\n".join( |
| f"| {i+1} | {b['x']},{b['y']} | {b['w']}Γ{b['h']} | {b['confidence']:.3f} |" |
| for i, b in enumerate(result["boxes"]) |
| ) |
| table = ( |
| "### Defect Regions\n" |
| "| # | Position (x,y) | Size (wΓh) | Confidence |\n" |
| "|---|----------------|------------|------------|\n" |
| + rows |
| ) |
| else: |
| table = "" |
|
|
| return vis_rgb, summary, table |
|
|
|
|
| |
|
|
| DESCRIPTION = """ |
| # π Paint Defect Detector |
| |
| Upload a photo of a car body panel to detect paint defects using a sliding-window |
| **EfficientNetV2-S** classifier. The model returns a heatmap overlay with bounding |
| boxes around defective regions. |
| |
| > **Note:** A trained checkpoint (`checkpoints/best.pt`) must be present. |
| """ |
|
|
| with gr.Blocks(title="Paint Defect Detector", theme=gr.themes.Soft()) as demo: |
| gr.Markdown(DESCRIPTION) |
|
|
| with gr.Row(): |
| with gr.Column(scale=1): |
| img_input = gr.Image(label="Car Body Panel Photo", type="numpy") |
| vin_input = gr.Textbox(label="VIN (optional)", placeholder="e.g. XTA210930Y2837465") |
| threshold = gr.Slider( |
| minimum=0.1, maximum=0.9, value=DEFECT_THRESHOLD, step=0.05, |
| label="Defect Threshold", |
| info="Patch probability above this value is marked as defective." |
| ) |
| run_btn = gr.Button("π Analyze", variant="primary") |
|
|
| with gr.Column(scale=1): |
| img_output = gr.Image(label="Visualization (Heatmap + Bounding Boxes)", type="numpy") |
| verdict_md = gr.Markdown(label="Result") |
| table_md = gr.Markdown(label="Defect Regions") |
|
|
| run_btn.click( |
| fn=run_inference, |
| inputs=[img_input, vin_input, threshold], |
| outputs=[img_output, verdict_md, table_md], |
| ) |
|
|
| gr.Examples( |
| examples=[], |
| inputs=[img_input], |
| label="Examples" |
| ) |
|
|
| if __name__ == "__main__": |
| demo.launch() |
|
|