Spaces:
Running
Running
File size: 8,128 Bytes
2793310 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 | # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
from __future__ import annotations
import argparse
import cv2
import numpy as np
import onnxruntime as ort
import torch
from ultralytics.engine.results import Results
from ultralytics.utils import ASSETS, YAML, nms, ops
from ultralytics.utils.checks import check_yaml
class YOLOv8Seg:
"""YOLOv8 segmentation model for performing instance segmentation using ONNX Runtime.
This class implements a YOLOv8 instance segmentation model using ONNX Runtime for inference. It handles
preprocessing of input images, running inference with the ONNX model, and postprocessing the results to generate
bounding boxes and segmentation masks.
Attributes:
session (ort.InferenceSession): ONNX Runtime inference session for model execution.
imgsz (tuple[int, int]): Input image size as (height, width) for the model.
classes (dict): Dictionary mapping class indices to class names from the dataset.
conf (float): Confidence threshold for filtering detections.
iou (float): IoU threshold used by non-maximum suppression.
Methods:
letterbox: Resize and pad image while maintaining aspect ratio.
preprocess: Preprocess the input image before feeding it into the model.
postprocess: Post-process model predictions to extract meaningful results.
process_mask: Process prototype masks with predicted mask coefficients to generate instance segmentation masks.
Examples:
>>> model = YOLOv8Seg("yolov8n-seg.onnx", conf=0.25, iou=0.7)
>>> img = cv2.imread("image.jpg")
>>> results = model(img)
>>> cv2.imshow("Segmentation", results[0].plot())
"""
def __init__(self, onnx_model: str, conf: float = 0.25, iou: float = 0.7, imgsz: int | tuple[int, int] = 640):
"""Initialize the instance segmentation model using an ONNX model.
Args:
onnx_model (str): Path to the ONNX model file.
conf (float, optional): Confidence threshold for filtering detections.
iou (float, optional): IoU threshold for non-maximum suppression.
imgsz (int | tuple[int, int], optional): Input image size of the model. Can be an integer for square input
or a tuple for rectangular input.
"""
available = ort.get_available_providers()
providers = [p for p in ("CUDAExecutionProvider", "CPUExecutionProvider") if p in available]
self.session = ort.InferenceSession(onnx_model, providers=providers or available)
self.imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz
self.classes = YAML.load(check_yaml("coco8.yaml"))["names"]
self.conf = conf
self.iou = iou
def __call__(self, img: np.ndarray) -> list[Results]:
"""Run inference on the input image using the ONNX model.
Args:
img (np.ndarray): The original input image in BGR format.
Returns:
(list[Results]): Processed detection results after post-processing, containing bounding boxes and
segmentation masks.
"""
prep_img = self.preprocess(img, self.imgsz)
outs = self.session.run(None, {self.session.get_inputs()[0].name: prep_img})
return self.postprocess(img, prep_img, outs)
def letterbox(self, img: np.ndarray, new_shape: tuple[int, int] = (640, 640)) -> np.ndarray:
"""Resize and pad image while maintaining aspect ratio.
Args:
img (np.ndarray): Input image in BGR format.
new_shape (tuple[int, int], optional): Target shape as (height, width).
Returns:
(np.ndarray): Resized and padded image.
"""
shape = img.shape[:2] # current shape [height, width]
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
# Compute padding
new_unpad = round(shape[1] * r), round(shape[0] * r)
dw, dh = (new_shape[1] - new_unpad[0]) / 2, (new_shape[0] - new_unpad[1]) / 2 # wh padding
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114))
return img
def preprocess(self, img: np.ndarray, new_shape: tuple[int, int]) -> np.ndarray:
"""Preprocess the input image before feeding it into the model.
Args:
img (np.ndarray): The input image in BGR format.
new_shape (tuple[int, int]): The target shape for resizing as (height, width).
Returns:
(np.ndarray): Preprocessed image ready for model inference, with shape (1, 3, height, width) and normalized
to [0, 1].
"""
img = self.letterbox(img, new_shape)
img = img[..., ::-1].transpose([2, 0, 1])[None] # BGR to RGB, BHWC to BCHW
img = np.ascontiguousarray(img)
img = img.astype(np.float32) / 255 # Normalize to [0, 1]
return img
def postprocess(self, img: np.ndarray, prep_img: np.ndarray, outs: list) -> list[Results]:
"""Post-process model predictions to extract meaningful results.
Args:
img (np.ndarray): The original input image.
prep_img (np.ndarray): The preprocessed image used for inference.
outs (list): Model outputs containing predictions and prototype masks.
Returns:
(list[Results]): Processed detection results containing bounding boxes and segmentation masks.
"""
preds, protos = (torch.from_numpy(p) for p in outs)
preds = nms.non_max_suppression(preds, self.conf, self.iou, nc=len(self.classes))
results = []
for i, pred in enumerate(preds):
pred[:, :4] = ops.scale_boxes(prep_img.shape[2:], pred[:, :4], img.shape)
masks = self.process_mask(protos[i], pred[:, 6:], pred[:, :4], img.shape[:2])
results.append(Results(img, path="", names=self.classes, boxes=pred[:, :6], masks=masks))
return results
def process_mask(
self, protos: torch.Tensor, masks_in: torch.Tensor, bboxes: torch.Tensor, shape: tuple[int, int]
) -> torch.Tensor:
"""Process prototype masks with predicted mask coefficients to generate instance segmentation masks.
Args:
protos (torch.Tensor): Prototype masks with shape (mask_dim, mask_h, mask_w).
masks_in (torch.Tensor): Predicted mask coefficients with shape (N, mask_dim), where N is number of
detections.
bboxes (torch.Tensor): Bounding boxes with shape (N, 4), where N is number of detections.
shape (tuple[int, int]): The size of the input image as (height, width).
Returns:
(torch.Tensor): Binary segmentation masks with shape (N, height, width).
"""
c, mh, mw = protos.shape # CHW
masks = (masks_in @ protos.float().view(c, -1)).view(-1, mh, mw) # Matrix multiplication
masks = ops.scale_masks(masks[None], shape)[0] # Scale masks to original image size
masks = ops.crop_mask(masks, bboxes) # Crop masks to bounding boxes
return masks.gt_(0.0) # Convert to binary masks
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True, help="Path to ONNX model")
parser.add_argument("--source", type=str, default=str(ASSETS / "bus.jpg"), help="Path to input image")
parser.add_argument("--conf", type=float, default=0.25, help="Confidence threshold")
parser.add_argument("--iou", type=float, default=0.7, help="NMS IoU threshold")
args = parser.parse_args()
model = YOLOv8Seg(args.model, args.conf, args.iou)
img = cv2.imread(args.source)
results = model(img)
cv2.imshow("Segmented Image", results[0].plot())
cv2.waitKey(0)
cv2.destroyAllWindows()
|