tarasevicius commited on
Commit
68c0f15
·
1 Parent(s): 2eeb8ec

feat: initial implementation

Browse files
Files changed (2) hide show
  1. app.py +100 -0
  2. requirements.txt +11 -0
app.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from PIL import Image
4
+ import numpy as np
5
+ import cv2
6
+
7
+ # Import YOLOv5
8
+ from yolov5.models.common import DetectMultiBackend
9
+ from yolov5.utils.torch_utils import select_device
10
+ from yolov5.utils.general import non_max_suppression, scale_coords
11
+
12
+ # Load model
13
+ device = select_device('cpu') # use 'cuda:0' for GPU
14
+ model = DetectMultiBackend('ethandavey/yoloV5-coursework-model', device=device)
15
+ model.warmup(imgsz=(1, 3, 640, 640)) # warmup
16
+
17
+ def detect(image):
18
+ img = np.array(image)
19
+ img0 = img.copy()
20
+
21
+ # Padded resize
22
+ img = letterbox(img, 640, stride=32, auto=False)[0]
23
+
24
+ # Convert
25
+ img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
26
+ img = np.ascontiguousarray(img)
27
+
28
+ img = torch.from_numpy(img).to(device)
29
+ img = img.float()
30
+ img /= 255.0 # 0 - 255 to 0.0 - 1.0
31
+ if img.ndimension() == 3:
32
+ img = img.unsqueeze(0)
33
+
34
+ # Inference
35
+ pred = model(img)
36
+
37
+ # NMS
38
+ pred = non_max_suppression(pred, 0.25, 0.45, classes=None, agnostic=False)
39
+
40
+ # Process detections
41
+ for i, det in enumerate(pred): # detections per image
42
+ im0 = img0.copy()
43
+
44
+ if len(det):
45
+ # Rescale boxes from img_size to im0 size
46
+ det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
47
+
48
+ for *xyxy, conf, cls in reversed(det):
49
+ label = f'{model.names[int(cls)]} {conf:.2f}'
50
+ plot_one_box(xyxy, im0, label=label, color=(255, 0, 0), line_thickness=2)
51
+
52
+ return Image.fromarray(im0)
53
+
54
+ def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), stride=32, auto=True):
55
+ # Resize and pad image while meeting stride-multiple constraints
56
+ shape = im.shape[:2] # current shape [height, width]
57
+ if isinstance(new_shape, int):
58
+ new_shape = (new_shape, new_shape)
59
+
60
+ # Scale ratio (new / old)
61
+ r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
62
+ r = min(r, 1.0)
63
+
64
+ # Compute padding
65
+ new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
66
+ dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
67
+ if auto: # minimum rectangle
68
+ dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
69
+
70
+ dw /= 2 # divide padding into 2 sides
71
+ dh /= 2
72
+
73
+ if shape[::-1] != new_unpad: # resize
74
+ im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
75
+ top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
76
+ left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
77
+ im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
78
+ return im, r, (dw, dh)
79
+
80
+ def plot_one_box(xyxy, im, color=(128, 128, 128), label=None, line_thickness=3):
81
+ # Add a bounding box to an image
82
+ tl = line_thickness or round(0.002 * max(im.shape[0:2])) + 1 # line thickness
83
+ c1, c2 = (int(xyxy[0]), int(xyxy[1])), (int(xyxy[2]), int(xyxy[3]))
84
+ cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
85
+ if label:
86
+ tf = max(tl - 1, 1) # font thickness
87
+ t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
88
+ c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
89
+ cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled
90
+ cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, (255, 255, 255), thickness=tf, lineType=cv2.LINE_AA)
91
+
92
+ iface = gr.Interface(
93
+ fn=detect,
94
+ inputs=gr.Image(type="pil"),
95
+ outputs=gr.Image(type="pil"),
96
+ title="YOLOv5 Object Detection",
97
+ description="Upload an image to detect objects using the YOLOv5 model."
98
+ )
99
+
100
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch>=1.7.0
2
+ torchvision>=0.8.1
3
+ pillow
4
+ numpy
5
+ matplotlib
6
+ opencv-python
7
+ PyYAML>=5.3.1
8
+ seaborn
9
+ tqdm
10
+ pandas
11
+ git+https://github.com/ultralytics/yolov5.git