Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import numpy as np | |
| from PIL import Image | |
| import cv2 | |
| import json | |
| from ultralytics import YOLO | |
| from huggingface_hub import hf_hub_download | |
| # Download model from HuggingFace Hub | |
| model_path = hf_hub_download( | |
| repo_id="Askhedi/Inspection_V0", | |
| filename="Phase3.pt", | |
| token=True # Uses HF_TOKEN environment variable | |
| ) | |
| # Load model | |
| model = YOLO(model_path) | |
| # Class colors (BGR format) | |
| CLASS_COLORS = { | |
| 'dent': (255, 165, 0), | |
| 'scratch': (255, 255, 0), | |
| 'crack': (255, 0, 0), | |
| 'glass_shatter': (255, 0, 255), | |
| 'lamp_broken': (0, 165, 255), | |
| 'tire_flat': (0, 255, 0) | |
| } | |
| def predict_damage(image, conf_threshold, iou_threshold, show_boxes, show_masks): | |
| """ | |
| Perform vehicle damage detection and segmentation. | |
| Args: | |
| image: Input image (PIL Image or numpy array) | |
| conf_threshold: Confidence threshold (0-1) | |
| iou_threshold: IoU threshold for NMS (0-1) | |
| show_boxes: Whether to show bounding boxes | |
| show_masks: Whether to show segmentation masks | |
| Returns: | |
| annotated_image: Image with detections | |
| detection_info: Text summary of detections | |
| bbox_data: Dictionary with bbox coordinates and metadata | |
| """ | |
| # Run inference | |
| results = model.predict( | |
| source=image, | |
| conf=conf_threshold, | |
| iou=iou_threshold, | |
| verbose=False | |
| ) | |
| # Get result | |
| result = results[0] | |
| # Prepare output image | |
| img = np.array(image) | |
| img_bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) | |
| detections = [] | |
| bbox_list = [] | |
| if result.boxes is not None and len(result.boxes) > 0: | |
| boxes = result.boxes.cpu().numpy() | |
| for i, box in enumerate(boxes): | |
| cls_id = int(box.cls[0]) | |
| conf = float(box.conf[0]) | |
| x1, y1, x2, y2 = map(int, box.xyxy[0]) | |
| class_name = model.names[cls_id] | |
| color = CLASS_COLORS.get(class_name, (255, 255, 255)) | |
| # Collect bbox data | |
| bbox_list.append({ | |
| "class_id": cls_id, | |
| "class_name": class_name, | |
| "confidence": round(conf, 4), | |
| "bbox": { | |
| "x1": x1, | |
| "y1": y1, | |
| "x2": x2, | |
| "y2": y2 | |
| } | |
| }) | |
| # Draw mask if available and enabled | |
| if show_masks and result.masks is not None: | |
| mask = result.masks.data[i].cpu().numpy() | |
| mask_resized = cv2.resize(mask, (img_bgr.shape[1], img_bgr.shape[0])) | |
| mask_bool = mask_resized > 0.5 | |
| # Apply colored overlay | |
| overlay = img_bgr.copy() | |
| overlay[mask_bool] = color | |
| img_bgr = cv2.addWeighted(img_bgr, 0.7, overlay, 0.3, 0) | |
| # Draw bounding box if enabled | |
| if show_boxes: | |
| cv2.rectangle(img_bgr, (x1, y1), (x2, y2), color, 2) | |
| # Draw label | |
| label = f"{class_name} {conf:.2f}" | |
| (label_w, label_h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1) | |
| cv2.rectangle(img_bgr, (x1, y1 - label_h - 10), (x1 + label_w, y1), color, -1) | |
| cv2.putText(img_bgr, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) | |
| detections.append(f"β’ {class_name}: {conf:.2%} confidence") | |
| # Convert back to RGB | |
| img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB) | |
| # Prepare detection summary | |
| if detections: | |
| detection_info = f"**Found {len(detections)} damage(s):**\n\n" + "\n".join(detections) | |
| else: | |
| detection_info = "No damage detected." | |
| # Prepare bbox JSON output | |
| bbox_data = { | |
| "total_detections": len(bbox_list), | |
| "detections": bbox_list | |
| } | |
| return img_rgb, detection_info, bbox_data | |
| # Create Gradio interface | |
| with gr.Blocks(title="Vehicle Damage Inspection - Phase 3") as demo: | |
| gr.Markdown("# π Vehicle Damage Detection & Segmentation") | |
| gr.Markdown("Upload a vehicle image to detect and segment 6 types of damage: dent, scratch, crack, glass shatter, broken lamp, and flat tire.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| input_image = gr.Image(type="pil", label="Upload Vehicle Image") | |
| with gr.Accordion("Advanced Settings", open=False): | |
| conf_slider = gr.Slider(0.1, 0.9, value=0.25, step=0.05, label="Confidence Threshold") | |
| iou_slider = gr.Slider(0.1, 0.9, value=0.7, step=0.05, label="IoU Threshold") | |
| show_boxes = gr.Checkbox(value=True, label="Show Bounding Boxes") | |
| show_masks = gr.Checkbox(value=True, label="Show Segmentation Masks") | |
| predict_btn = gr.Button("π Detect Damage", variant="primary") | |
| with gr.Column(): | |
| output_image = gr.Image(type="numpy", label="Detection Results") | |
| detection_text = gr.Markdown(label="Detections") | |
| bbox_json = gr.JSON(label="Bounding Box Coordinates (JSON)") | |
| # Example images (add your own examples) | |
| gr.Examples( | |
| examples=[], | |
| inputs=input_image, | |
| label="Example Images" | |
| ) | |
| predict_btn.click( | |
| fn=predict_damage, | |
| inputs=[input_image, conf_slider, iou_slider, show_boxes, show_masks], | |
| outputs=[output_image, detection_text, bbox_json] | |
| ) | |
| gr.Markdown(""" | |
| ### π Model Information | |
| - **Classes**: dent π , scratch π‘, crack π΄, glass_shatter π£, lamp_broken π΅, tire_flat π’ | |
| """) | |
| if __name__ == "__main__": | |
| demo.launch() | |