import os from pathlib import Path from typing import List, Union from PIL import Image import ezdxf.units import numpy as np import torch from torchvision import transforms from ultralytics import YOLOWorld, YOLO from ultralytics.engine.results import Results from ultralytics.utils.plotting import save_one_box from transformers import AutoModelForImageSegmentation import cv2 import ezdxf import gradio as gr import gc from scalingtestupdated import calculate_scaling_factor from scipy.interpolate import splprep, splev from scipy.ndimage import gaussian_filter1d # --- DOCUMENTATION STRINGS (Drawer Detection App) --- GUIDELINE_SETUP = """ ## 1. Quick Start Guide: Setup and DXF Generation This application analyzes an image of items inside a drawer, calculates scaling, and outputs a manufacturing-ready DXF file with offsets applied. 1. **Upload Image:** Upload a clear image of the drawer area, ensuring the items and the scaling reference box are visible. 2. **Set Offset:** Enter the desired offset value in **inches**. This determines the clearance around the contour (e.g., 0.075 inches is the default). 3. **Run:** Click the **"Submit"** button (or run using an example). 4. **Review & Download:** Review the resulting images (Contoured Output, Outlines, Mask) and download the generated **DXF file**. """ GUIDELINE_INPUT = """ ## 2. Expected Inputs and Preprocessing | Input Field | Purpose | Requirement | | :--- | :--- | :--- | | **Input Image** | A high-resolution image of the drawer containing the objects to be contoured. | Must show the items and the reference scaling box clearly. | | **Offset value (inches)** | The physical distance (clearance) to be added around the detected contours for manufacturing tolerance. | Input must be a positive number (float). Default is 0.075 inches. | """ GUIDELINE_OUTPUT = """ ## 3. Expected Outputs (Manufacturing Results) The application provides five key outputs: 1. **Ouput Image:** The original cropped drawer image overlaid with the final, offset contours (blue lines). 2. **Outlines of Objects:** A grayscale image showing only the final, smoothed contour lines. 3. **DXF file (Downloadable):** The primary output. This file contains scaled 2D spline geometry (in inches) based on the calculated contours, ready for CAD or CNC machines. 4. **Mask:** The raw, dilated binary mask used to generate the contours. 5. **Scaling Factor (Textbox):** The calculated ratio (in pixels per inch) used to accurately convert pixel dimensions into real-world units for the DXF file. """ # ---------------------------------------------------- # END GUIDELINE DEFINITIONS # ---------------------------------------------------- birefnet = AutoModelForImageSegmentation.from_pretrained( "zhengpeng7/BiRefNet", trust_remote_code=True ) device = "cpu" torch.set_float32_matmul_precision(["high", "highest"][0]) birefnet.to(device) birefnet.eval() transform_image = transforms.Compose( [ transforms.Resize((1024, 1024)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ] ) def yolo_detect( image: Union[str, Path, int, Image.Image, list, tuple, np.ndarray, torch.Tensor], classes: List[str], ) -> np.ndarray: drawer_detector = YOLOWorld("yolov8x-worldv2.pt") drawer_detector.set_classes(classes) results: List[Results] = drawer_detector.predict(image) boxes = [] for result in results: boxes.append( save_one_box(result.cpu().boxes.xyxy, im=result.orig_img, save=False) ) del drawer_detector gc.collect() # Ensure memory is cleared return boxes[0] def remove_bg(image: np.ndarray) -> np.ndarray: image = Image.fromarray(image) input_images = transform_image(image).unsqueeze(0).to("cpu") # Prediction with torch.no_grad(): preds = birefnet(input_images)[-1].sigmoid().cpu() pred = preds[0].squeeze() # Show Results pred_pil: Image = transforms.ToPILImage()(pred) # Scale proportionally with max length to 1024 for faster showing scale_ratio = 1024 / max(image.size) scaled_size = (int(image.size[0] * scale_ratio), int(image.size[1] * scale_ratio)) return np.array(pred_pil.resize(scaled_size)) def make_square(img: np.ndarray): # Get dimensions height, width = img.shape[:2] # Find the larger dimension max_dim = max(height, width) # Calculate padding pad_height = (max_dim - height) // 2 pad_width = (max_dim - width) // 2 # Handle odd dimensions pad_height_extra = max_dim - height - 2 * pad_height pad_width_extra = max_dim - width - 2 * pad_width # Create padding with edge colors if len(img.shape) == 3: # Color image # Pad the image padded = np.pad( img, ( (pad_height, pad_height + pad_height_extra), (pad_width, pad_width + pad_width_extra), (0, 0), ), mode="edge", ) else: # Grayscale image padded = np.pad( img, ( (pad_height, pad_height + pad_height_extra), (pad_width, pad_width + pad_width_extra), ), mode="edge", ) return padded def exclude_scaling_box( image: np.ndarray, bbox: np.ndarray, orig_size: tuple, processed_size: tuple, expansion_factor: float = 1.2, ) -> np.ndarray: # Unpack the bounding box x_min, y_min, x_max, y_max = map(int, bbox) # Calculate scaling factors scale_x = processed_size[1] / orig_size[1] # Width scale scale_y = processed_size[0] / orig_size[0] # Height scale # Adjust bounding box coordinates x_min = int(x_min * scale_x) x_max = int(x_max * scale_x) y_min = int(y_min * scale_y) y_max = int(y_max * scale_y) # Calculate expanded box coordinates box_width = x_max - x_min box_height = y_max - y_min expanded_x_min = max(0, int(x_min - (expansion_factor - 1) * box_width / 2)) expanded_x_max = min( image.shape[1], int(x_max + (expansion_factor - 1) * box_width / 2) ) expanded_y_min = max(0, int(y_min - (expansion_factor - 1) * box_height / 2)) expanded_y_max = min( image.shape[0], int(y_max + (expansion_factor - 1) * box_height / 2) ) # Black out the expanded region image[expanded_y_min:expanded_y_max, expanded_x_min:expanded_x_max] = 0 return image def resample_contour(contour): # Get all the parameters at the start: num_points = 1000 smoothing_factor = 5 spline_degree = 3 # Typically k=3 for cubic spline smoothed_x_sigma = 1 smoothed_y_sigma = 1 # Ensure contour has enough points if len(contour) < spline_degree + 1: raise ValueError(f"Contour must have at least {spline_degree + 1} points, but has {len(contour)} points.") contour = contour[:, 0, :] tck, _ = splprep([contour[:, 0], contour[:, 1]], s=smoothing_factor) u = np.linspace(0, 1, num_points) resampled_points = splev(u, tck) smoothed_x = gaussian_filter1d(resampled_points[0], sigma=smoothed_x_sigma) smoothed_y = gaussian_filter1d(resampled_points[1], sigma=smoothed_y_sigma) return np.array([smoothed_x, smoothed_y]).T def save_dxf_spline(inflated_contours, scaling_factor, height): degree = 3 closed = True doc = ezdxf.new(units=0) doc.units = ezdxf.units.IN doc.header["$INSUNITS"] = ezdxf.units.IN msp = doc.modelspace() for contour in inflated_contours: try: resampled_contour = resample_contour(contour) points = [ (x * scaling_factor, (height - y) * scaling_factor) for x, y in resampled_contour ] if len(points) >= 3: if np.linalg.norm(np.array(points[0]) - np.array(points[-1])) > 1e-2: points.append(points[0]) spline = msp.add_spline(points, degree=degree) spline.closed = closed except ValueError as e: print(f"Skipping contour: {e}") dxf_filepath = os.path.join("./outputs", "out.dxf") doc.saveas(dxf_filepath) return dxf_filepath def extract_outlines(binary_image: np.ndarray) -> np.ndarray: """ Extracts and draws the outlines of masks from a binary image. Args: binary_image: Grayscale binary image where white represents masks and black is the background. Returns: Image with outlines drawn. """ # Detect contours from the binary image contours, _ = cv2.findContours( binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE ) # Create a blank image to draw contours outline_image = np.zeros_like(binary_image) # Draw the contours on the blank image cv2.drawContours( outline_image, contours, -1, (255), thickness=1 ) # White color for outlines return cv2.bitwise_not(outline_image), contours def shrink_bbox(image: np.ndarray, shrink_factor: float): """ Crops the central portion of the image. """ height, width = image.shape[:2] center_x, center_y = width // 2, height // 2 # Calculate dimensions new_width = int(width * shrink_factor) new_height = int(height * shrink_factor) # Determine the top-left and bottom-right points for cropping x1 = max(center_x - new_width // 2, 0) y1 = max(center_y - new_height // 2, 0) x2 = min(center_x + new_width // 2, width) y2 = min(center_y + new_height // 2, height) # Crop the image cropped_image = image[y1:y2, x1:x2] return cropped_image def to_dxf(contours): doc = ezdxf.new() msp = doc.modelspace() for contour in contours: points = [(point[0][0], point[0][1]) for point in contour] msp.add_lwpolyline(points, close=True) # Add a polyline for each contour doc.saveas("./outputs/out.dxf") return "./outputs/out.dxf" def smooth_contours(contour): epsilon = 0.01 * cv2.arcLength(contour, True) # Adjust factor (e.g., 0.01) return cv2.approxPolyDP(contour, epsilon, True) def scale_image(image: np.ndarray, scale_factor: float) -> np.ndarray: """ Resize image by scaling both width and height by the same factor. """ if scale_factor <= 0: raise ValueError("Scale factor must be positive") current_height, current_width = image.shape[:2] # Calculate new dimensions new_width = int(current_width * scale_factor) new_height = int(current_height * scale_factor) # Choose interpolation method based on whether we're scaling up or down interpolation = cv2.INTER_AREA if scale_factor < 1 else cv2.INTER_CUBIC # Resize image resized_image = cv2.resize( image, (new_width, new_height), interpolation=interpolation ) return resized_image def detect_reference_square(img) -> np.ndarray: box_detector = YOLO("./last.pt") res = box_detector.predict(img, conf=0.05) del box_detector gc.collect() return save_one_box(res[0].cpu().boxes.xyxy, res[0].orig_img, save=False), res[ 0 ].cpu().boxes.xyxy[0] def resize_img(img: np.ndarray, resize_dim): return np.array(Image.fromarray(img).resize(resize_dim)) def predict(image, offset_inches): try: drawer_img = yolo_detect(image, ["box"]) shrunked_img = make_square(shrink_bbox(drawer_img, 0.90)) except: raise gr.Error("Unable to DETECT DRAWER, please take another picture with different magnification level!") # Detect the scaling reference square try: reference_obj_img, scaling_box_coords = detect_reference_square(shrunked_img) except: raise gr.Error("Unable to DETECT REFERENCE BOX, please take another picture with different magnification level!") # make the image sqaure so it does not effect the size of objects reference_obj_img = make_square(reference_obj_img) reference_square_mask = remove_bg(reference_obj_img) # make the mask same size as org image reference_square_mask = resize_img( reference_square_mask, (reference_obj_img.shape[1], reference_obj_img.shape[0]) ) scaling_factor = 1.0 try: scaling_factor = calculate_scaling_factor( reference_image_path="./Reference_ScalingBox.jpg", target_image=reference_square_mask, feature_detector="ORB", ) except ZeroDivisionError: print("Error calculating scaling factor: Division by zero") except Exception as e: print(f"Error calculating scaling factor: {e}") # Default to a scaling factor of 1.0 if calculation fails or is 0 if scaling_factor is None or scaling_factor <= 0: scaling_factor = 1.0 print("Using default scaling factor of 1.0 due to calculation error") # Save original size before `remove_bg` processing orig_size = shrunked_img.shape[:2] # Generate foreground mask and save its size objects_mask = remove_bg(shrunked_img) processed_size = objects_mask.shape[:2] # Exclude scaling box region from objects mask objects_mask = exclude_scaling_box( objects_mask, scaling_box_coords, orig_size, processed_size, expansion_factor=1.2, ) objects_mask = resize_img( objects_mask, (shrunked_img.shape[1], shrunked_img.shape[0]) ) # Ensure offset_inches is valid # Calculate pixel dilation amount: (offset_inches / scaling_factor) * 2 + 1 # We use 1.0 / scaling_factor because scaling_factor is px/inch. if scaling_factor > 0: # Convert inches to pixels offset_pixels = int(offset_inches / scaling_factor * 2) + 1 else: offset_pixels = 1 # Dilate mask for offset dilated_mask = cv2.dilate( objects_mask, np.ones((offset_pixels, offset_pixels), np.uint8) ) Image.fromarray(dilated_mask).save("./outputs/scaled_mask_new.jpg") outlines, contours = extract_outlines(dilated_mask) shrunked_img_contours = cv2.drawContours( shrunked_img, contours, -1, (0, 0, 255), thickness=2 ) dxf = save_dxf_spline(contours, scaling_factor, processed_size[0]) return ( cv2.cvtColor(shrunked_img_contours, cv2.COLOR_BGR2RGB), outlines, dxf, dilated_mask, scaling_factor, ) if __name__ == "__main__": os.makedirs("./outputs", exist_ok=True) # Use gr.Blocks to allow for the structured guideline accordion with gr.Blocks(title="Drawer Contouring and DXF Generator") as demo: gr.Markdown("

Drawer Contouring and DXF Generator (YOLO + BiRefNet)

") gr.Markdown("Tool for generating scaled manufacturing contours from an input image.") # 1. Guidelines Section with gr.Accordion(" Tips & User Guidelines", open=False): gr.Markdown(GUIDELINE_SETUP) gr.Markdown("---") gr.Markdown(GUIDELINE_INPUT) gr.Markdown("---") gr.Markdown(GUIDELINE_OUTPUT) # 2. Main Interface with gr.Row(): with gr.Column(scale=1): gr.Markdown("## Step 1: Upload an Image") input_image = gr.Image(label="1. Input Image", type="numpy") gr.Markdown("## Step 2: Set the offset value (Optional) ") offset_input = gr.Number(label="2. Offset value for Mask (inches)", value=0.075) gr.Markdown("## Step 3: Click the button ") submit_button = gr.Button(" Process and Generate DXF", variant="primary") with gr.Column(scale=2): gr.Markdown("## Results") scaling_output = gr.Textbox( label="Scaling Factor (pixels/inch)", placeholder="Calculated conversion rate", ) output_image = gr.Image(label="Output Image (Contours Drawn)") with gr.Row(): output_outlines = gr.Image(label="Outlines of Objects") output_mask = gr.Image(label="Final Dilated Mask") dxf_file = gr.File(label="DXF file (Download)") # 3. Examples Section gr.Markdown("## Examples ") gr.Examples( examples=[ ["./examples/Test20.jpg", 0.075], ["./examples/Test21.jpg", 0.075], ["./examples/Test22.jpg", 0.075], ["./examples/Test23.jpg", 0.075], ], inputs=[input_image, offset_input], outputs=[output_image, output_outlines, dxf_file, output_mask, scaling_output], fn=predict, cache_examples=False, label="Example Images (Click to load and run)", ) # Event Handler submit_button.click( fn=predict, inputs=[input_image, offset_input], outputs=[output_image, output_outlines, dxf_file, output_mask, scaling_output], ) demo.queue() demo.launch( server_name="0.0.0.0", server_port=7860, share=True)