yolofast upload
Browse files- yolofastapi/__init__.py +0 -0
- yolofastapi/detectors/__init__.py +0 -0
- yolofastapi/detectors/yolov8.py +123 -0
- yolofastapi/main.py +23 -0
- yolofastapi/routers/__init__.py +0 -0
- yolofastapi/routers/yolo.py +89 -0
- yolofastapi/schemas/yolo.py +7 -0
yolofastapi/__init__.py
ADDED
|
File without changes
|
yolofastapi/detectors/__init__.py
ADDED
|
File without changes
|
yolofastapi/detectors/yolov8.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# For machine learning
|
| 2 |
+
import torch
|
| 3 |
+
# For array computations
|
| 4 |
+
import numpy as np
|
| 5 |
+
# For image decoding / editing
|
| 6 |
+
import cv2
|
| 7 |
+
# For environment variables
|
| 8 |
+
import os
|
| 9 |
+
# For detecting which ML Devices we can use
|
| 10 |
+
import platform
|
| 11 |
+
# For actually using the YOLO models
|
| 12 |
+
from ultralytics import YOLO
|
| 13 |
+
|
| 14 |
+
class YoloV8ImageObjectDetection:
|
| 15 |
+
PATH = os.environ.get("YOLO_WEIGHTS_PATH", "yolov8n.pt") # Path to a model. yolov8n.pt means download from PyTorch Hub
|
| 16 |
+
CONF_THRESH = float(os.environ.get("YOLO_CONF_THRESHOLD", "0.70")) # Confidence threshold
|
| 17 |
+
|
| 18 |
+
def __init__(self, chunked: bytes = None):
|
| 19 |
+
"""Initializes a yolov8 detector with a binary image
|
| 20 |
+
|
| 21 |
+
Arguments:
|
| 22 |
+
chunked (bytes): A binary image representation
|
| 23 |
+
"""
|
| 24 |
+
self._bytes = chunked
|
| 25 |
+
self.model = self._load_model()
|
| 26 |
+
self.device = self._get_device()
|
| 27 |
+
self.classes = self.model.names
|
| 28 |
+
|
| 29 |
+
def _get_device(self):
|
| 30 |
+
"""Gets best device for your system
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
device (str): The device to use for YOLO for your system
|
| 34 |
+
"""
|
| 35 |
+
if platform.system().lower() == "darwin":
|
| 36 |
+
return "mps"
|
| 37 |
+
if torch.cuda.is_available():
|
| 38 |
+
return "cuda"
|
| 39 |
+
return "cpu"
|
| 40 |
+
|
| 41 |
+
def _load_model(self):
|
| 42 |
+
"""Loads Yolo8 model from pytorch hub or a path on disk
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
model (Model) - Trained Pytorch model
|
| 46 |
+
"""
|
| 47 |
+
model = YOLO(YoloV8ImageObjectDetection.PATH)
|
| 48 |
+
return model
|
| 49 |
+
|
| 50 |
+
async def __call__(self):
|
| 51 |
+
"""This function is called when class is executed.
|
| 52 |
+
It analyzes a single image passed to its constructor
|
| 53 |
+
and returns the annotated image and its labels
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
frame (numpy.ndarray): Frame with bounding boxes and labels ploted on it.
|
| 57 |
+
labels (list(str)): The corresponding labels that were found
|
| 58 |
+
"""
|
| 59 |
+
frame = self._get_image_from_chunked()
|
| 60 |
+
results = self.score_frame(frame)
|
| 61 |
+
frame, labels = self.plot_boxes(results, frame)
|
| 62 |
+
return frame, set(labels)
|
| 63 |
+
|
| 64 |
+
def _get_image_from_chunked(self):
|
| 65 |
+
"""Loads an openCV image from the raw image bytes passed by
|
| 66 |
+
the API.
|
| 67 |
+
|
| 68 |
+
Returns:
|
| 69 |
+
img (numpy.ndarray): opencv2 image object from the raw binary
|
| 70 |
+
"""
|
| 71 |
+
arr = np.asarray(bytearray(self._bytes), dtype=np.uint8)
|
| 72 |
+
img = cv2.imdecode(arr, -1) # 'Load it as it is'
|
| 73 |
+
return img
|
| 74 |
+
|
| 75 |
+
def score_frame(self, frame):
|
| 76 |
+
"""Scores a single image with a YoloV8 model
|
| 77 |
+
|
| 78 |
+
Arguments:
|
| 79 |
+
frame (numpy.ndarray): input frame in numpy/list/tuple format.
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
results list(ultralytics.engine.results.Results)): Labels and Coordinates of objects detected by model in the frame.
|
| 83 |
+
"""
|
| 84 |
+
self.model.to(self.device)
|
| 85 |
+
frame = [frame]
|
| 86 |
+
results = self.model(
|
| 87 |
+
frame,
|
| 88 |
+
conf=YoloV8ImageObjectDetection.CONF_THRESH,
|
| 89 |
+
save_conf=True
|
| 90 |
+
)
|
| 91 |
+
return results
|
| 92 |
+
|
| 93 |
+
def class_to_label(self, x):
|
| 94 |
+
"""For a given label value, return corresponding string label.
|
| 95 |
+
Arguments:
|
| 96 |
+
x (int): numeric label
|
| 97 |
+
|
| 98 |
+
Returns:
|
| 99 |
+
class (str): corresponding string label
|
| 100 |
+
"""
|
| 101 |
+
return self.classes[int(x)]
|
| 102 |
+
|
| 103 |
+
def plot_boxes(self, results, frame):
|
| 104 |
+
"""Takes a frame and its results as input,
|
| 105 |
+
and plots the bounding boxes and label on to the frame.
|
| 106 |
+
|
| 107 |
+
Arguments:
|
| 108 |
+
results (list(ultralytics.engine.results.Results)): contains labels and coordinates predicted by model on the given frame.
|
| 109 |
+
frame (numpy.ndarray): Frame which has been scored.
|
| 110 |
+
|
| 111 |
+
Returns:
|
| 112 |
+
frame (numpy.ndarray): Frame with bounding boxes and labels ploted on it.
|
| 113 |
+
labels (list(str)): The corresponding labels that were found
|
| 114 |
+
"""
|
| 115 |
+
for r in results:
|
| 116 |
+
boxes = r.boxes
|
| 117 |
+
labels = []
|
| 118 |
+
for box in boxes:
|
| 119 |
+
c = box.cls
|
| 120 |
+
l = self.model.names[int(c)]
|
| 121 |
+
labels.append(l)
|
| 122 |
+
frame = results[0].plot()
|
| 123 |
+
return frame, labels
|
yolofastapi/main.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from uvicorn import Server, Config
|
| 2 |
+
from fastapi import FastAPI
|
| 3 |
+
from starlette.middleware.cors import CORSMiddleware
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
from yolofastapi.routers import yolo
|
| 7 |
+
|
| 8 |
+
app = FastAPI()
|
| 9 |
+
|
| 10 |
+
app.add_middleware(
|
| 11 |
+
CORSMiddleware,
|
| 12 |
+
allow_credentials=False,
|
| 13 |
+
allow_origins=["*"],
|
| 14 |
+
allow_methods=["*"],
|
| 15 |
+
allow_headers=["*"],
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
app.include_router(yolo.router)
|
| 19 |
+
|
| 20 |
+
if __name__ == "__main__":
|
| 21 |
+
port = int(os.environ.get("PORT", 80))
|
| 22 |
+
server = Server(Config(app, host="0.0.0.0", port=port, lifespan="on"))
|
| 23 |
+
server.run()
|
yolofastapi/routers/__init__.py
ADDED
|
File without changes
|
yolofastapi/routers/yolo.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# For API operations and standards
|
| 2 |
+
from fastapi import APIRouter, UploadFile, Response, status, HTTPException
|
| 3 |
+
# Our detector objects
|
| 4 |
+
from yolofastapi.detectors import yolov8
|
| 5 |
+
# For encoding images
|
| 6 |
+
import cv2
|
| 7 |
+
# For response schemas
|
| 8 |
+
from yolofastapi.schemas.yolo import ImageAnalysisResponse
|
| 9 |
+
|
| 10 |
+
# A new router object that we can add endpoints to.
|
| 11 |
+
# Note that the prefix is /yolo, so all endpoints from
|
| 12 |
+
# here on will be relative to /yolo
|
| 13 |
+
router = APIRouter(tags=["Image Upload and analysis"], prefix="/yolo")
|
| 14 |
+
|
| 15 |
+
# A cache of annotated images. Note that this would typically
|
| 16 |
+
# be some sort of persistent storage (think maybe postgres + S3)
|
| 17 |
+
# but for simplicity, we can keep things in memory
|
| 18 |
+
images = []
|
| 19 |
+
|
| 20 |
+
@router.post("/",
|
| 21 |
+
status_code=status.HTTP_201_CREATED,
|
| 22 |
+
responses={
|
| 23 |
+
201: {"description": "Successfully Analyzed Image."}
|
| 24 |
+
},
|
| 25 |
+
response_model=ImageAnalysisResponse,
|
| 26 |
+
)
|
| 27 |
+
async def yolo_image_upload(file: UploadFile) -> ImageAnalysisResponse:
|
| 28 |
+
"""Takes a multi-part upload image and runs yolov8 on it to detect objects
|
| 29 |
+
|
| 30 |
+
Arguments:
|
| 31 |
+
file (UploadFile): The multi-part upload file
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
response (ImageAnalysisResponse): The image ID and labels in
|
| 35 |
+
the pydantic object
|
| 36 |
+
|
| 37 |
+
Examlple cURL:
|
| 38 |
+
curl -X 'POST' \
|
| 39 |
+
'http://localhost/yolo/' \
|
| 40 |
+
-H 'accept: application/json' \
|
| 41 |
+
-H 'Content-Type: multipart/form-data' \
|
| 42 |
+
-F 'file=@image.jpg;type=image/jpeg'
|
| 43 |
+
|
| 44 |
+
Example Return:
|
| 45 |
+
{
|
| 46 |
+
"id": 1,
|
| 47 |
+
"labels": [
|
| 48 |
+
"vase"
|
| 49 |
+
]
|
| 50 |
+
}
|
| 51 |
+
"""
|
| 52 |
+
contents = await file.read()
|
| 53 |
+
dt = yolov8.YoloV8ImageObjectDetection(chunked=contents)
|
| 54 |
+
frame, labels = await dt()
|
| 55 |
+
success, encoded_image = cv2.imencode(".png", frame)
|
| 56 |
+
images.append(encoded_image)
|
| 57 |
+
return ImageAnalysisResponse(id=len(images), labels=labels)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@router.get(
|
| 61 |
+
"/{image_id}",
|
| 62 |
+
status_code=status.HTTP_200_OK,
|
| 63 |
+
responses={
|
| 64 |
+
200: {"content": {"image/png": {}}},
|
| 65 |
+
404: {"description": "Image ID Not Found."}
|
| 66 |
+
},
|
| 67 |
+
response_class=Response,
|
| 68 |
+
)
|
| 69 |
+
async def yolo_image_download(image_id: int) -> Response:
|
| 70 |
+
"""Takes an image id as a path param and returns that encoded
|
| 71 |
+
image from the images array
|
| 72 |
+
|
| 73 |
+
Arguments:
|
| 74 |
+
image_id (int): The image ID to download
|
| 75 |
+
|
| 76 |
+
Returns:
|
| 77 |
+
response (Response): The encoded image in PNG format
|
| 78 |
+
|
| 79 |
+
Examlple cURL:
|
| 80 |
+
curl -X 'GET' \
|
| 81 |
+
'http://localhost/yolo/1' \
|
| 82 |
+
-H 'accept: image/png'
|
| 83 |
+
|
| 84 |
+
Example Return: A Binary Image
|
| 85 |
+
"""
|
| 86 |
+
try:
|
| 87 |
+
return Response(content=images[image_id - 1].tobytes(), media_type="image/png")
|
| 88 |
+
except IndexError:
|
| 89 |
+
raise HTTPException(status_code=404, detail="Image not found")
|
yolofastapi/schemas/yolo.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel
|
| 2 |
+
from typing import Set
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class ImageAnalysisResponse(BaseModel):
|
| 6 |
+
id: int
|
| 7 |
+
labels: Set[str]
|