Spaces:
Sleeping
Sleeping
| import os | |
| import io | |
| import cv2 | |
| import time | |
| import json | |
| import math | |
| import torch | |
| import numpy as np | |
| import pandas as pd | |
| from PIL import Image | |
| import gradio as gr | |
| # ---------------------------- | |
| # Config | |
| # ---------------------------- | |
| DEFAULT_MODEL_PATH = os.getenv("MODEL_PATH", "weights/best.pt") | |
| DEVICE = "cuda" if torch.cuda.is_available() else "cpu" | |
| # ---------------------------- | |
| # Model loading (edit this) | |
| # ---------------------------- | |
| _model = None | |
| def load_model(model_path: str = DEFAULT_MODEL_PATH): | |
| """ | |
| Load your trained model once when the Space boots. | |
| Replace the placeholder with your code. | |
| """ | |
| global _model | |
| if _model is not None: | |
| return _model | |
| # >>> YOUR MODEL HERE <<< | |
| # Example (PyTorch scripted/ckpt): | |
| # ckpt = torch.load(model_path, map_location=DEVICE) | |
| # model = MyNet(...) | |
| # model.load_state_dict(ckpt["state_dict"] if "state_dict" in ckpt else ckpt) | |
| # model.to(DEVICE).eval() | |
| # | |
| # For YOLO-like: | |
| # from ultralytics import YOLO | |
| # model = YOLO(model_path) | |
| # Placeholder “no-model” to keep UI running: | |
| class DummyModel: | |
| def __init__(self): | |
| pass | |
| _model = DummyModel() | |
| return _model | |
| # ---------------------------- | |
| # Inference wrapper (edit this) | |
| # ---------------------------- | |
| def infer(image_bgr: np.ndarray, conf: float = 0.25): | |
| """ | |
| Return defects as a list of boxes: [x1,y1,x2,y2,score,label] | |
| OR return a binary mask (H,W) where 1=defect. | |
| Edit this to call your model. | |
| """ | |
| model = load_model() | |
| # >>> YOUR MODEL HERE <<< | |
| # Option A (detection): | |
| # results = model(image_bgr[..., ::-1]) # example if model expects RGB | |
| # boxes = [[x1,y1,x2,y2,score,"defect"], ...] | |
| # return {"type": "boxes", "boxes": boxes} | |
| # Option B (segmentation): | |
| # mask = your_segmentation(image_bgr) # 0/1 uint8 | |
| # return {"type": "mask", "mask": mask} | |
| # --------- PLACEHOLDER (edge blobs as fake defects) --------- | |
| gray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2GRAY) | |
| e = cv2.Canny(gray, 50, 150) | |
| cnts, _ = cv2.findContours(e, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) | |
| boxes = [] | |
| h, w = gray.shape[:2] | |
| for c in cnts: | |
| x, y, bw, bh = cv2.boundingRect(c) | |
| if bw * bh < max(0.0005 * w * h, 150): # skip tiny | |
| continue | |
| boxes.append([x, y, x + bw, y + bh, 0.5, "defect"]) | |
| if len(boxes) >= 20: | |
| break | |
| return {"type": "boxes", "boxes": boxes} | |
| # ---------------------------- | |
| # Utilities | |
| # ---------------------------- | |
| def draw_boxes_with_x(image_bgr: np.ndarray, boxes, thickness: int = 3): | |
| img = image_bgr.copy() | |
| color = (0, 0, 255) # red in BGR | |
| for (x1, y1, x2, y2, score, label) in boxes: | |
| x1, y1, x2, y2 = map(int, [x1, y1, x2, y2]) | |
| cv2.rectangle(img, (x1, y1), (x2, y2), color, thickness) | |
| # draw X | |
| cv2.line(img, (x1, y1), (x2, y2), color, thickness) | |
| cv2.line(img, (x1, y2), (x2, y1), color, thickness) | |
| # label | |
| txt = f"{label}:{score:.2f}" | |
| cv2.putText(img, txt, (x1, max(y1 - 6, 0)), | |
| cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2, cv2.LINE_AA) | |
| return img | |
| def boxes_from_mask(mask: np.ndarray, min_area: int = 50): | |
| mask = (mask > 0).astype(np.uint8) | |
| cnts, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) | |
| out = [] | |
| for c in cnts: | |
| x, y, w, h = cv2.boundingRect(c) | |
| if w * h >= min_area: | |
| out.append([x, y, x + w, y + h, 1.0, "defect"]) | |
| return out | |
| def to_csv_file(rows, path="/tmp/defect_report.csv"): | |
| df = pd.DataFrame(rows, columns=["x1", "y1", "x2", "y2", "score", "label"]) | |
| df.to_csv(path, index=False) | |
| return path, df | |
| # ---------------------------- | |
| # Gradio handlers | |
| # ---------------------------- | |
| def process(image: Image.Image, conf: float, draw_x: bool, min_area: int): | |
| if image is None: | |
| return None, pd.DataFrame(), None | |
| # PIL -> BGR np | |
| img_rgb = np.array(image.convert("RGB")) | |
| img_bgr = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR) | |
| res = infer(img_bgr, conf=conf) | |
| if res["type"] == "mask": | |
| boxes = boxes_from_mask(res["mask"], min_area=min_area) | |
| else: | |
| boxes = [b for b in res["boxes"] if b[4] >= conf] | |
| # draw | |
| vis = draw_boxes_with_x(img_bgr, boxes) if draw_x else img_bgr.copy() | |
| vis_rgb = cv2.cvtColor(vis, cv2.COLOR_BGR2RGB) | |
| # csv + table | |
| csv_path, df = to_csv_file(boxes) | |
| return Image.fromarray(vis_rgb), df, csv_path | |
| # ---------------------------- | |
| # UI | |
| # ---------------------------- | |
| with gr.Blocks(title="AI-Driven EL Defect Recognition") as demo: | |
| gr.Markdown( | |
| "## AI-Driven Defect Recognition in EL Images\n" | |
| "Upload an electroluminescence (EL) image. The app detects defective cells, " | |
| "draws a red square with an X, and provides a CSV report." | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| inp = gr.Image(type="pil", label="Upload EL image") | |
| conf = gr.Slider(0.0, 1.0, value=0.25, step=0.01, label="Confidence threshold") | |
| draw_x = gr.Checkbox(True, label="Draw red box + X") | |
| min_area = gr.Slider(10, 5000, value=120, step=10, label="Min defect area (pixels, for masks)") | |
| run_btn = gr.Button("Run inference", variant="primary") | |
| with gr.Column(): | |
| out_img = gr.Image(type="pil", label="Annotated output") | |
| out_table = gr.Dataframe(headers=["x1","y1","x2","y2","score","label"], label="Defect report (preview)") | |
| out_csv = gr.File(label="Download CSV") | |
| run_btn.click(process, inputs=[inp, conf, draw_x, min_area], | |
| outputs=[out_img, out_table, out_csv]) | |
| if __name__ == "__main__": | |
| load_model() # warmup | |
| demo.launch() | |