HilmiZr commited on
Commit
0a21b08
·
1 Parent(s): 8707990

initial commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +7 -0
  2. app.py +112 -0
  3. detect.py +285 -0
  4. models/__init__.py +0 -0
  5. models/common.py +883 -0
  6. models/experimental.py +111 -0
  7. models/hub/anchors.yaml +59 -0
  8. models/hub/yolov3-spp.yaml +51 -0
  9. models/hub/yolov3-tiny.yaml +41 -0
  10. models/hub/yolov3.yaml +51 -0
  11. models/hub/yolov5-bifpn.yaml +48 -0
  12. models/hub/yolov5-fpn.yaml +42 -0
  13. models/hub/yolov5-p2.yaml +54 -0
  14. models/hub/yolov5-p34.yaml +41 -0
  15. models/hub/yolov5-p6.yaml +56 -0
  16. models/hub/yolov5-p7.yaml +67 -0
  17. models/hub/yolov5-panet.yaml +48 -0
  18. models/hub/yolov5l6.yaml +60 -0
  19. models/hub/yolov5m6.yaml +60 -0
  20. models/hub/yolov5n6.yaml +60 -0
  21. models/hub/yolov5s-LeakyReLU.yaml +49 -0
  22. models/hub/yolov5s-ghost.yaml +48 -0
  23. models/hub/yolov5s-transformer.yaml +48 -0
  24. models/hub/yolov5s6.yaml +60 -0
  25. models/hub/yolov5x6.yaml +60 -0
  26. models/segment/yolov5l-seg.yaml +48 -0
  27. models/segment/yolov5m-seg.yaml +48 -0
  28. models/segment/yolov5n-seg.yaml +48 -0
  29. models/segment/yolov5s-seg.yaml +48 -0
  30. models/segment/yolov5x-seg.yaml +48 -0
  31. models/tf.py +608 -0
  32. models/yolo.py +391 -0
  33. models/yolov5l.yaml +48 -0
  34. models/yolov5m.yaml +48 -0
  35. models/yolov5n.yaml +48 -0
  36. models/yolov5s.yaml +48 -0
  37. models/yolov5x.yaml +48 -0
  38. requirements.txt +51 -0
  39. utils/__init__.py +86 -0
  40. utils/activations.py +103 -0
  41. utils/augmentations.py +397 -0
  42. utils/autoanchor.py +169 -0
  43. utils/autobatch.py +72 -0
  44. utils/aws/__init__.py +0 -0
  45. utils/aws/mime.sh +26 -0
  46. utils/aws/resume.py +40 -0
  47. utils/aws/userdata.sh +27 -0
  48. utils/callbacks.py +76 -0
  49. utils/dataloaders.py +1222 -0
  50. utils/docker/Dockerfile +73 -0
.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ flagged/
2
+ *.pt
3
+ *.png
4
+ *.jpg
5
+ *.mp4
6
+ *.mkv
7
+ gradio_cached_examples/
app.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import requests
4
+ import os
5
+ import torch
6
+ from models.yolo import Model
7
+
8
+
9
+ file_urls = [
10
+ 'https://www.dropbox.com/s/b5g97xo901zb3ds/pothole_example.jpg?dl=1',
11
+ 'https://www.dropbox.com/s/86uxlxxlm1iaexa/pothole_screenshot.png?dl=1',
12
+ 'https://www.dropbox.com/s/7sjfwncffg8xej2/video_7.mp4?dl=1'
13
+ ]
14
+
15
+
16
+ def download_file(url, save_name):
17
+ url = url
18
+ if not os.path.exists(save_name):
19
+ file = requests.get(url)
20
+ open(save_name, 'wb').write(file.content)
21
+
22
+
23
+ for i, url in enumerate(file_urls):
24
+ if 'mp4' in file_urls[i]:
25
+ download_file(
26
+ file_urls[i],
27
+ f"video.mp4"
28
+ )
29
+ else:
30
+ download_file(
31
+ file_urls[i],
32
+ f"image_{i}.jpg"
33
+ )
34
+
35
+ model = Model('models/yolov5s.yaml')
36
+ model.load_state_dict(torch.load('ppe-yolov5.pt'))
37
+ model.eval()
38
+ path = [['image_0.jpg'], ['image_1.jpg']]
39
+ video_path = [['video.mp4']]
40
+
41
+
42
+ def show_preds_image(image_path):
43
+ image = cv2.imread(image_path)
44
+ results = model(image)
45
+ for i, det in enumerate(results.boxes.xyxy):
46
+ cv2.rectangle(
47
+ image,
48
+ (int(det[0]), int(det[1])),
49
+ (int(det[2]), int(det[3])),
50
+ color=(0, 0, 255),
51
+ thickness=2,
52
+ lineType=cv2.LINE_AA
53
+ )
54
+ return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
55
+
56
+
57
+ inputs_image = [
58
+ gr.components.Image(type="filepath", label="Input Image"),
59
+ ]
60
+ outputs_image = [
61
+ gr.components.Image(type="numpy", label="Output Image"),
62
+ ]
63
+ interface_image = gr.Interface(
64
+ fn=show_preds_image,
65
+ inputs=inputs_image,
66
+ outputs=outputs_image,
67
+ title="PPE Detector",
68
+ examples=path,
69
+ cache_examples=False,
70
+ )
71
+
72
+
73
+ def show_preds_video(video_path):
74
+ cap = cv2.VideoCapture(video_path)
75
+ while (cap.isOpened()):
76
+ ret, frame = cap.read()
77
+ if ret:
78
+ frame_copy = frame.copy()
79
+ outputs = model.predict(source=frame)
80
+ results = outputs[0].cpu().numpy()
81
+ for i, det in enumerate(results.boxes.xyxy):
82
+ cv2.rectangle(
83
+ frame_copy,
84
+ (int(det[0]), int(det[1])),
85
+ (int(det[2]), int(det[3])),
86
+ color=(0, 0, 255),
87
+ thickness=2,
88
+ lineType=cv2.LINE_AA
89
+ )
90
+ yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
91
+
92
+
93
+ inputs_video = [
94
+ gr.components.Video(type="filepath", label="Input Video"),
95
+
96
+ ]
97
+ outputs_video = [
98
+ gr.components.Image(type="numpy", label="Output Image"),
99
+ ]
100
+ interface_video = gr.Interface(
101
+ fn=show_preds_video,
102
+ inputs=inputs_video,
103
+ outputs=outputs_video,
104
+ title="PPE Detector",
105
+ examples=video_path,
106
+ cache_examples=False,
107
+ )
108
+
109
+ gr.TabbedInterface(
110
+ [interface_image, interface_video],
111
+ tab_names=['Image inference', 'Video inference']
112
+ ).queue().launch()
detect.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+ """
3
+ Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
4
+
5
+ Usage - sources:
6
+ $ python detect.py --weights yolov5s.pt --source 0 # webcam
7
+ img.jpg # image
8
+ vid.mp4 # video
9
+ screen # screenshot
10
+ path/ # directory
11
+ list.txt # list of images
12
+ list.streams # list of streams
13
+ 'path/*.jpg' # glob
14
+ 'https://youtu.be/LNwODJXcvt4' # YouTube
15
+ 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
16
+
17
+ Usage - formats:
18
+ $ python detect.py --weights yolov5s.pt # PyTorch
19
+ yolov5s.torchscript # TorchScript
20
+ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
21
+ yolov5s_openvino_model # OpenVINO
22
+ yolov5s.engine # TensorRT
23
+ yolov5s.mlmodel # CoreML (macOS-only)
24
+ yolov5s_saved_model # TensorFlow SavedModel
25
+ yolov5s.pb # TensorFlow GraphDef
26
+ yolov5s.tflite # TensorFlow Lite
27
+ yolov5s_edgetpu.tflite # TensorFlow Edge TPU
28
+ yolov5s_paddle_model # PaddlePaddle
29
+ """
30
+
31
+ import argparse
32
+ import csv
33
+ import os
34
+ import platform
35
+ import sys
36
+ from pathlib import Path
37
+
38
+ import torch
39
+
40
+ FILE = Path(__file__).resolve()
41
+ ROOT = FILE.parents[0] # YOLOv5 root directory
42
+ if str(ROOT) not in sys.path:
43
+ sys.path.append(str(ROOT)) # add ROOT to PATH
44
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
45
+
46
+ from ultralytics.utils.plotting import Annotator, colors, save_one_box
47
+
48
+ from models.common import DetectMultiBackend
49
+ from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
50
+ from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
51
+ increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
52
+ from utils.torch_utils import select_device, smart_inference_mode
53
+
54
+
55
+ @smart_inference_mode()
56
+ def run(
57
+ weights=ROOT / 'yolov5s.pt', # model path or triton URL
58
+ source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)
59
+ data=ROOT / 'data/coco128.yaml', # dataset.yaml path
60
+ imgsz=(640, 640), # inference size (height, width)
61
+ conf_thres=0.25, # confidence threshold
62
+ iou_thres=0.45, # NMS IOU threshold
63
+ max_det=1000, # maximum detections per image
64
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
65
+ view_img=False, # show results
66
+ save_txt=False, # save results to *.txt
67
+ save_csv=False, # save results in CSV format
68
+ save_conf=False, # save confidences in --save-txt labels
69
+ save_crop=False, # save cropped prediction boxes
70
+ nosave=False, # do not save images/videos
71
+ classes=None, # filter by class: --class 0, or --class 0 2 3
72
+ agnostic_nms=False, # class-agnostic NMS
73
+ augment=False, # augmented inference
74
+ visualize=False, # visualize features
75
+ update=False, # update all models
76
+ project=ROOT / 'runs/detect', # save results to project/name
77
+ name='exp', # save results to project/name
78
+ exist_ok=False, # existing project/name ok, do not increment
79
+ line_thickness=3, # bounding box thickness (pixels)
80
+ hide_labels=False, # hide labels
81
+ hide_conf=False, # hide confidences
82
+ half=False, # use FP16 half-precision inference
83
+ dnn=False, # use OpenCV DNN for ONNX inference
84
+ vid_stride=1, # video frame-rate stride
85
+ ):
86
+ source = str(source)
87
+ save_img = not nosave and not source.endswith('.txt') # save inference images
88
+ is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
89
+ is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
90
+ webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
91
+ screenshot = source.lower().startswith('screen')
92
+ if is_url and is_file:
93
+ source = check_file(source) # download
94
+
95
+ # Directories
96
+ save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
97
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
98
+
99
+ # Load model
100
+ device = select_device(device)
101
+ model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
102
+ stride, names, pt = model.stride, model.names, model.pt
103
+ imgsz = check_img_size(imgsz, s=stride) # check image size
104
+
105
+ # Dataloader
106
+ bs = 1 # batch_size
107
+ if webcam:
108
+ view_img = check_imshow(warn=True)
109
+ dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
110
+ bs = len(dataset)
111
+ elif screenshot:
112
+ dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
113
+ else:
114
+ dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
115
+ vid_path, vid_writer = [None] * bs, [None] * bs
116
+
117
+ # Run inference
118
+ model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup
119
+ seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
120
+ for path, im, im0s, vid_cap, s in dataset:
121
+ with dt[0]:
122
+ im = torch.from_numpy(im).to(model.device)
123
+ im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
124
+ im /= 255 # 0 - 255 to 0.0 - 1.0
125
+ if len(im.shape) == 3:
126
+ im = im[None] # expand for batch dim
127
+
128
+ # Inference
129
+ with dt[1]:
130
+ visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
131
+ pred = model(im, augment=augment, visualize=visualize)
132
+
133
+ # NMS
134
+ with dt[2]:
135
+ pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
136
+
137
+ # Second-stage classifier (optional)
138
+ # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
139
+
140
+ # Define the path for the CSV file
141
+ csv_path = save_dir / 'predictions.csv'
142
+
143
+ # Create or append to the CSV file
144
+ def write_to_csv(image_name, prediction, confidence):
145
+ data = {'Image Name': image_name, 'Prediction': prediction, 'Confidence': confidence}
146
+ with open(csv_path, mode='a', newline='') as f:
147
+ writer = csv.DictWriter(f, fieldnames=data.keys())
148
+ if not csv_path.is_file():
149
+ writer.writeheader()
150
+ writer.writerow(data)
151
+
152
+ # Process predictions
153
+ for i, det in enumerate(pred): # per image
154
+ seen += 1
155
+ if webcam: # batch_size >= 1
156
+ p, im0, frame = path[i], im0s[i].copy(), dataset.count
157
+ s += f'{i}: '
158
+ else:
159
+ p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
160
+
161
+ p = Path(p) # to Path
162
+ save_path = str(save_dir / p.name) # im.jpg
163
+ txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
164
+ s += '%gx%g ' % im.shape[2:] # print string
165
+ gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
166
+ imc = im0.copy() if save_crop else im0 # for save_crop
167
+ annotator = Annotator(im0, line_width=line_thickness, example=str(names))
168
+ if len(det):
169
+ # Rescale boxes from img_size to im0 size
170
+ det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
171
+
172
+ # Print results
173
+ for c in det[:, 5].unique():
174
+ n = (det[:, 5] == c).sum() # detections per class
175
+ s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
176
+
177
+ # Write results
178
+ for *xyxy, conf, cls in reversed(det):
179
+ c = int(cls) # integer class
180
+ label = names[c] if hide_conf else f'{names[c]}'
181
+ confidence = float(conf)
182
+ confidence_str = f'{confidence:.2f}'
183
+
184
+ if save_csv:
185
+ write_to_csv(p.name, label, confidence_str)
186
+
187
+ if save_txt: # Write to file
188
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
189
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
190
+ with open(f'{txt_path}.txt', 'a') as f:
191
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
192
+
193
+ if save_img or save_crop or view_img: # Add bbox to image
194
+ c = int(cls) # integer class
195
+ label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
196
+ annotator.box_label(xyxy, label, color=colors(c, True))
197
+ if save_crop:
198
+ save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
199
+
200
+ # Stream results
201
+ im0 = annotator.result()
202
+ if view_img:
203
+ if platform.system() == 'Linux' and p not in windows:
204
+ windows.append(p)
205
+ cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
206
+ cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
207
+ cv2.imshow(str(p), im0)
208
+ cv2.waitKey(1) # 1 millisecond
209
+
210
+ # Save results (image with detections)
211
+ if save_img:
212
+ if dataset.mode == 'image':
213
+ cv2.imwrite(save_path, im0)
214
+ else: # 'video' or 'stream'
215
+ if vid_path[i] != save_path: # new video
216
+ vid_path[i] = save_path
217
+ if isinstance(vid_writer[i], cv2.VideoWriter):
218
+ vid_writer[i].release() # release previous video writer
219
+ if vid_cap: # video
220
+ fps = vid_cap.get(cv2.CAP_PROP_FPS)
221
+ w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
222
+ h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
223
+ else: # stream
224
+ fps, w, h = 30, im0.shape[1], im0.shape[0]
225
+ save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
226
+ vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
227
+ vid_writer[i].write(im0)
228
+
229
+ # Print time (inference-only)
230
+ LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
231
+
232
+ # Print results
233
+ t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
234
+ LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
235
+ if save_txt or save_img:
236
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
237
+ LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
238
+ if update:
239
+ strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
240
+
241
+
242
+ def parse_opt():
243
+ parser = argparse.ArgumentParser()
244
+ parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
245
+ parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
246
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
247
+ parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
248
+ parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
249
+ parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
250
+ parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
251
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
252
+ parser.add_argument('--view-img', action='store_true', help='show results')
253
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
254
+ parser.add_argument('--save-csv', action='store_true', help='save results in CSV format')
255
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
256
+ parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
257
+ parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
258
+ parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
259
+ parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
260
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
261
+ parser.add_argument('--visualize', action='store_true', help='visualize features')
262
+ parser.add_argument('--update', action='store_true', help='update all models')
263
+ parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')
264
+ parser.add_argument('--name', default='exp', help='save results to project/name')
265
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
266
+ parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
267
+ parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
268
+ parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
269
+ parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
270
+ parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
271
+ parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
272
+ opt = parser.parse_args()
273
+ opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
274
+ print_args(vars(opt))
275
+ return opt
276
+
277
+
278
+ def main(opt):
279
+ check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
280
+ run(**vars(opt))
281
+
282
+
283
+ if __name__ == '__main__':
284
+ opt = parse_opt()
285
+ main(opt)
models/__init__.py ADDED
File without changes
models/common.py ADDED
@@ -0,0 +1,883 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+ """
3
+ Common modules
4
+ """
5
+
6
+ import ast
7
+ import contextlib
8
+ import json
9
+ import math
10
+ import platform
11
+ import warnings
12
+ import zipfile
13
+ from collections import OrderedDict, namedtuple
14
+ from copy import copy
15
+ from pathlib import Path
16
+ from urllib.parse import urlparse
17
+
18
+ import cv2
19
+ import numpy as np
20
+ import pandas as pd
21
+ import requests
22
+ import torch
23
+ import torch.nn as nn
24
+ from PIL import Image
25
+ from torch.cuda import amp
26
+
27
+ # Import 'ultralytics' package or install if if missing
28
+ try:
29
+ import ultralytics
30
+
31
+ assert hasattr(ultralytics, '__version__') # verify package is not directory
32
+ except (ImportError, AssertionError):
33
+ import os
34
+
35
+ os.system('pip install -U ultralytics')
36
+ import ultralytics
37
+
38
+ from ultralytics.utils.plotting import Annotator, colors, save_one_box
39
+
40
+ from utils import TryExcept
41
+ from utils.dataloaders import exif_transpose, letterbox
42
+ from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr,
43
+ increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
44
+ xyxy2xywh, yaml_load)
45
+ from utils.torch_utils import copy_attr, smart_inference_mode
46
+
47
+
48
+ def autopad(k, p=None, d=1): # kernel, padding, dilation
49
+ # Pad to 'same' shape outputs
50
+ if d > 1:
51
+ k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
52
+ if p is None:
53
+ p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
54
+ return p
55
+
56
+
57
+ class Conv(nn.Module):
58
+ # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)
59
+ default_act = nn.SiLU() # default activation
60
+
61
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
62
+ super().__init__()
63
+ self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
64
+ self.bn = nn.BatchNorm2d(c2)
65
+ self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
66
+
67
+ def forward(self, x):
68
+ return self.act(self.bn(self.conv(x)))
69
+
70
+ def forward_fuse(self, x):
71
+ return self.act(self.conv(x))
72
+
73
+
74
+ class DWConv(Conv):
75
+ # Depth-wise convolution
76
+ def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation
77
+ super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act)
78
+
79
+
80
+ class DWConvTranspose2d(nn.ConvTranspose2d):
81
+ # Depth-wise transpose convolution
82
+ def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out
83
+ super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))
84
+
85
+
86
+ class TransformerLayer(nn.Module):
87
+ # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
88
+ def __init__(self, c, num_heads):
89
+ super().__init__()
90
+ self.q = nn.Linear(c, c, bias=False)
91
+ self.k = nn.Linear(c, c, bias=False)
92
+ self.v = nn.Linear(c, c, bias=False)
93
+ self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
94
+ self.fc1 = nn.Linear(c, c, bias=False)
95
+ self.fc2 = nn.Linear(c, c, bias=False)
96
+
97
+ def forward(self, x):
98
+ x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
99
+ x = self.fc2(self.fc1(x)) + x
100
+ return x
101
+
102
+
103
+ class TransformerBlock(nn.Module):
104
+ # Vision Transformer https://arxiv.org/abs/2010.11929
105
+ def __init__(self, c1, c2, num_heads, num_layers):
106
+ super().__init__()
107
+ self.conv = None
108
+ if c1 != c2:
109
+ self.conv = Conv(c1, c2)
110
+ self.linear = nn.Linear(c2, c2) # learnable position embedding
111
+ self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
112
+ self.c2 = c2
113
+
114
+ def forward(self, x):
115
+ if self.conv is not None:
116
+ x = self.conv(x)
117
+ b, _, w, h = x.shape
118
+ p = x.flatten(2).permute(2, 0, 1)
119
+ return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)
120
+
121
+
122
+ class Bottleneck(nn.Module):
123
+ # Standard bottleneck
124
+ def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
125
+ super().__init__()
126
+ c_ = int(c2 * e) # hidden channels
127
+ self.cv1 = Conv(c1, c_, 1, 1)
128
+ self.cv2 = Conv(c_, c2, 3, 1, g=g)
129
+ self.add = shortcut and c1 == c2
130
+
131
+ def forward(self, x):
132
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
133
+
134
+
135
+ class BottleneckCSP(nn.Module):
136
+ # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
137
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
138
+ super().__init__()
139
+ c_ = int(c2 * e) # hidden channels
140
+ self.cv1 = Conv(c1, c_, 1, 1)
141
+ self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
142
+ self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
143
+ self.cv4 = Conv(2 * c_, c2, 1, 1)
144
+ self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
145
+ self.act = nn.SiLU()
146
+ self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
147
+
148
+ def forward(self, x):
149
+ y1 = self.cv3(self.m(self.cv1(x)))
150
+ y2 = self.cv2(x)
151
+ return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))
152
+
153
+
154
+ class CrossConv(nn.Module):
155
+ # Cross Convolution Downsample
156
+ def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
157
+ # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
158
+ super().__init__()
159
+ c_ = int(c2 * e) # hidden channels
160
+ self.cv1 = Conv(c1, c_, (1, k), (1, s))
161
+ self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
162
+ self.add = shortcut and c1 == c2
163
+
164
+ def forward(self, x):
165
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
166
+
167
+
168
+ class C3(nn.Module):
169
+ # CSP Bottleneck with 3 convolutions
170
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
171
+ super().__init__()
172
+ c_ = int(c2 * e) # hidden channels
173
+ self.cv1 = Conv(c1, c_, 1, 1)
174
+ self.cv2 = Conv(c1, c_, 1, 1)
175
+ self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2)
176
+ self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
177
+
178
+ def forward(self, x):
179
+ return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))
180
+
181
+
182
+ class C3x(C3):
183
+ # C3 module with cross-convolutions
184
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
185
+ super().__init__(c1, c2, n, shortcut, g, e)
186
+ c_ = int(c2 * e)
187
+ self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)))
188
+
189
+
190
+ class C3TR(C3):
191
+ # C3 module with TransformerBlock()
192
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
193
+ super().__init__(c1, c2, n, shortcut, g, e)
194
+ c_ = int(c2 * e)
195
+ self.m = TransformerBlock(c_, c_, 4, n)
196
+
197
+
198
+ class C3SPP(C3):
199
+ # C3 module with SPP()
200
+ def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
201
+ super().__init__(c1, c2, n, shortcut, g, e)
202
+ c_ = int(c2 * e)
203
+ self.m = SPP(c_, c_, k)
204
+
205
+
206
+ class C3Ghost(C3):
207
+ # C3 module with GhostBottleneck()
208
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
209
+ super().__init__(c1, c2, n, shortcut, g, e)
210
+ c_ = int(c2 * e) # hidden channels
211
+ self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
212
+
213
+
214
+ class SPP(nn.Module):
215
+ # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
216
+ def __init__(self, c1, c2, k=(5, 9, 13)):
217
+ super().__init__()
218
+ c_ = c1 // 2 # hidden channels
219
+ self.cv1 = Conv(c1, c_, 1, 1)
220
+ self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
221
+ self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
222
+
223
+ def forward(self, x):
224
+ x = self.cv1(x)
225
+ with warnings.catch_warnings():
226
+ warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
227
+ return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
228
+
229
+
230
+ class SPPF(nn.Module):
231
+ # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
232
+ def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
233
+ super().__init__()
234
+ c_ = c1 // 2 # hidden channels
235
+ self.cv1 = Conv(c1, c_, 1, 1)
236
+ self.cv2 = Conv(c_ * 4, c2, 1, 1)
237
+ self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
238
+
239
+ def forward(self, x):
240
+ x = self.cv1(x)
241
+ with warnings.catch_warnings():
242
+ warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
243
+ y1 = self.m(x)
244
+ y2 = self.m(y1)
245
+ return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
246
+
247
+
248
+ class Focus(nn.Module):
249
+ # Focus wh information into c-space
250
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
251
+ super().__init__()
252
+ self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act)
253
+ # self.contract = Contract(gain=2)
254
+
255
+ def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
256
+ return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1))
257
+ # return self.conv(self.contract(x))
258
+
259
+
260
+ class GhostConv(nn.Module):
261
+ # Ghost Convolution https://github.com/huawei-noah/ghostnet
262
+ def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
263
+ super().__init__()
264
+ c_ = c2 // 2 # hidden channels
265
+ self.cv1 = Conv(c1, c_, k, s, None, g, act=act)
266
+ self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act)
267
+
268
+ def forward(self, x):
269
+ y = self.cv1(x)
270
+ return torch.cat((y, self.cv2(y)), 1)
271
+
272
+
273
+ class GhostBottleneck(nn.Module):
274
+ # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
275
+ def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
276
+ super().__init__()
277
+ c_ = c2 // 2
278
+ self.conv = nn.Sequential(
279
+ GhostConv(c1, c_, 1, 1), # pw
280
+ DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
281
+ GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
282
+ self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1,
283
+ act=False)) if s == 2 else nn.Identity()
284
+
285
+ def forward(self, x):
286
+ return self.conv(x) + self.shortcut(x)
287
+
288
+
289
+ class Contract(nn.Module):
290
+ # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
291
+ def __init__(self, gain=2):
292
+ super().__init__()
293
+ self.gain = gain
294
+
295
+ def forward(self, x):
296
+ b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
297
+ s = self.gain
298
+ x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)
299
+ x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
300
+ return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)
301
+
302
+
303
+ class Expand(nn.Module):
304
+ # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
305
+ def __init__(self, gain=2):
306
+ super().__init__()
307
+ self.gain = gain
308
+
309
+ def forward(self, x):
310
+ b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
311
+ s = self.gain
312
+ x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80)
313
+ x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
314
+ return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160)
315
+
316
+
317
+ class Concat(nn.Module):
318
+ # Concatenate a list of tensors along dimension
319
+ def __init__(self, dimension=1):
320
+ super().__init__()
321
+ self.d = dimension
322
+
323
+ def forward(self, x):
324
+ return torch.cat(x, self.d)
325
+
326
+
327
+ class DetectMultiBackend(nn.Module):
328
+ # YOLOv5 MultiBackend class for python inference on various backends
329
+ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):
330
+ # Usage:
331
+ # PyTorch: weights = *.pt
332
+ # TorchScript: *.torchscript
333
+ # ONNX Runtime: *.onnx
334
+ # ONNX OpenCV DNN: *.onnx --dnn
335
+ # OpenVINO: *_openvino_model
336
+ # CoreML: *.mlmodel
337
+ # TensorRT: *.engine
338
+ # TensorFlow SavedModel: *_saved_model
339
+ # TensorFlow GraphDef: *.pb
340
+ # TensorFlow Lite: *.tflite
341
+ # TensorFlow Edge TPU: *_edgetpu.tflite
342
+ # PaddlePaddle: *_paddle_model
343
+ from models.experimental import attempt_download, attempt_load # scoped to avoid circular import
344
+
345
+ super().__init__()
346
+ w = str(weights[0] if isinstance(weights, list) else weights)
347
+ pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)
348
+ fp16 &= pt or jit or onnx or engine or triton # FP16
349
+ nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
350
+ stride = 32 # default stride
351
+ cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
352
+ if not (pt or triton):
353
+ w = attempt_download(w) # download if not local
354
+
355
+ if pt: # PyTorch
356
+ model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)
357
+ stride = max(int(model.stride.max()), 32) # model stride
358
+ names = model.module.names if hasattr(model, 'module') else model.names # get class names
359
+ model.half() if fp16 else model.float()
360
+ self.model = model # explicitly assign for to(), cpu(), cuda(), half()
361
+ elif jit: # TorchScript
362
+ LOGGER.info(f'Loading {w} for TorchScript inference...')
363
+ extra_files = {'config.txt': ''} # model metadata
364
+ model = torch.jit.load(w, _extra_files=extra_files, map_location=device)
365
+ model.half() if fp16 else model.float()
366
+ if extra_files['config.txt']: # load metadata dict
367
+ d = json.loads(extra_files['config.txt'],
368
+ object_hook=lambda d: {
369
+ int(k) if k.isdigit() else k: v
370
+ for k, v in d.items()})
371
+ stride, names = int(d['stride']), d['names']
372
+ elif dnn: # ONNX OpenCV DNN
373
+ LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
374
+ check_requirements('opencv-python>=4.5.4')
375
+ net = cv2.dnn.readNetFromONNX(w)
376
+ elif onnx: # ONNX Runtime
377
+ LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
378
+ check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
379
+ import onnxruntime
380
+ providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
381
+ session = onnxruntime.InferenceSession(w, providers=providers)
382
+ output_names = [x.name for x in session.get_outputs()]
383
+ meta = session.get_modelmeta().custom_metadata_map # metadata
384
+ if 'stride' in meta:
385
+ stride, names = int(meta['stride']), eval(meta['names'])
386
+ elif xml: # OpenVINO
387
+ LOGGER.info(f'Loading {w} for OpenVINO inference...')
388
+ check_requirements('openvino>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/
389
+ from openvino.runtime import Core, Layout, get_batch
390
+ core = Core()
391
+ if not Path(w).is_file(): # if not *.xml
392
+ w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
393
+ ov_model = core.read_model(model=w, weights=Path(w).with_suffix('.bin'))
394
+ if ov_model.get_parameters()[0].get_layout().empty:
395
+ ov_model.get_parameters()[0].set_layout(Layout('NCHW'))
396
+ batch_dim = get_batch(ov_model)
397
+ if batch_dim.is_static:
398
+ batch_size = batch_dim.get_length()
399
+ ov_compiled_model = core.compile_model(ov_model, device_name='AUTO') # AUTO selects best available device
400
+ stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
401
+ elif engine: # TensorRT
402
+ LOGGER.info(f'Loading {w} for TensorRT inference...')
403
+ import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
404
+ check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0
405
+ if device.type == 'cpu':
406
+ device = torch.device('cuda:0')
407
+ Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
408
+ logger = trt.Logger(trt.Logger.INFO)
409
+ with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
410
+ model = runtime.deserialize_cuda_engine(f.read())
411
+ context = model.create_execution_context()
412
+ bindings = OrderedDict()
413
+ output_names = []
414
+ fp16 = False # default updated below
415
+ dynamic = False
416
+ for i in range(model.num_bindings):
417
+ name = model.get_binding_name(i)
418
+ dtype = trt.nptype(model.get_binding_dtype(i))
419
+ if model.binding_is_input(i):
420
+ if -1 in tuple(model.get_binding_shape(i)): # dynamic
421
+ dynamic = True
422
+ context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))
423
+ if dtype == np.float16:
424
+ fp16 = True
425
+ else: # output
426
+ output_names.append(name)
427
+ shape = tuple(context.get_binding_shape(i))
428
+ im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
429
+ bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
430
+ binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
431
+ batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size
432
+ elif coreml: # CoreML
433
+ LOGGER.info(f'Loading {w} for CoreML inference...')
434
+ import coremltools as ct
435
+ model = ct.models.MLModel(w)
436
+ elif saved_model: # TF SavedModel
437
+ LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
438
+ import tensorflow as tf
439
+ keras = False # assume TF1 saved_model
440
+ model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
441
+ elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
442
+ LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
443
+ import tensorflow as tf
444
+
445
+ def wrap_frozen_graph(gd, inputs, outputs):
446
+ x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped
447
+ ge = x.graph.as_graph_element
448
+ return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
449
+
450
+ def gd_outputs(gd):
451
+ name_list, input_list = [], []
452
+ for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef
453
+ name_list.append(node.name)
454
+ input_list.extend(node.input)
455
+ return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))
456
+
457
+ gd = tf.Graph().as_graph_def() # TF GraphDef
458
+ with open(w, 'rb') as f:
459
+ gd.ParseFromString(f.read())
460
+ frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd))
461
+ elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
462
+ try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
463
+ from tflite_runtime.interpreter import Interpreter, load_delegate
464
+ except ImportError:
465
+ import tensorflow as tf
466
+ Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
467
+ if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
468
+ LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
469
+ delegate = {
470
+ 'Linux': 'libedgetpu.so.1',
471
+ 'Darwin': 'libedgetpu.1.dylib',
472
+ 'Windows': 'edgetpu.dll'}[platform.system()]
473
+ interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])
474
+ else: # TFLite
475
+ LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
476
+ interpreter = Interpreter(model_path=w) # load TFLite model
477
+ interpreter.allocate_tensors() # allocate
478
+ input_details = interpreter.get_input_details() # inputs
479
+ output_details = interpreter.get_output_details() # outputs
480
+ # load metadata
481
+ with contextlib.suppress(zipfile.BadZipFile):
482
+ with zipfile.ZipFile(w, 'r') as model:
483
+ meta_file = model.namelist()[0]
484
+ meta = ast.literal_eval(model.read(meta_file).decode('utf-8'))
485
+ stride, names = int(meta['stride']), meta['names']
486
+ elif tfjs: # TF.js
487
+ raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')
488
+ elif paddle: # PaddlePaddle
489
+ LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
490
+ check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
491
+ import paddle.inference as pdi
492
+ if not Path(w).is_file(): # if not *.pdmodel
493
+ w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir
494
+ weights = Path(w).with_suffix('.pdiparams')
495
+ config = pdi.Config(str(w), str(weights))
496
+ if cuda:
497
+ config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)
498
+ predictor = pdi.create_predictor(config)
499
+ input_handle = predictor.get_input_handle(predictor.get_input_names()[0])
500
+ output_names = predictor.get_output_names()
501
+ elif triton: # NVIDIA Triton Inference Server
502
+ LOGGER.info(f'Using {w} as Triton Inference Server...')
503
+ check_requirements('tritonclient[all]')
504
+ from utils.triton import TritonRemoteModel
505
+ model = TritonRemoteModel(url=w)
506
+ nhwc = model.runtime.startswith('tensorflow')
507
+ else:
508
+ raise NotImplementedError(f'ERROR: {w} is not a supported format')
509
+
510
+ # class names
511
+ if 'names' not in locals():
512
+ names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}
513
+ if names[0] == 'n01440764' and len(names) == 1000: # ImageNet
514
+ names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names
515
+
516
+ self.__dict__.update(locals()) # assign all variables to self
517
+
518
+ def forward(self, im, augment=False, visualize=False):
519
+ # YOLOv5 MultiBackend inference
520
+ b, ch, h, w = im.shape # batch, channel, height, width
521
+ if self.fp16 and im.dtype != torch.float16:
522
+ im = im.half() # to FP16
523
+ if self.nhwc:
524
+ im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)
525
+
526
+ if self.pt: # PyTorch
527
+ y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)
528
+ elif self.jit: # TorchScript
529
+ y = self.model(im)
530
+ elif self.dnn: # ONNX OpenCV DNN
531
+ im = im.cpu().numpy() # torch to numpy
532
+ self.net.setInput(im)
533
+ y = self.net.forward()
534
+ elif self.onnx: # ONNX Runtime
535
+ im = im.cpu().numpy() # torch to numpy
536
+ y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
537
+ elif self.xml: # OpenVINO
538
+ im = im.cpu().numpy() # FP32
539
+ y = list(self.ov_compiled_model(im).values())
540
+ elif self.engine: # TensorRT
541
+ if self.dynamic and im.shape != self.bindings['images'].shape:
542
+ i = self.model.get_binding_index('images')
543
+ self.context.set_binding_shape(i, im.shape) # reshape if dynamic
544
+ self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)
545
+ for name in self.output_names:
546
+ i = self.model.get_binding_index(name)
547
+ self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))
548
+ s = self.bindings['images'].shape
549
+ assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
550
+ self.binding_addrs['images'] = int(im.data_ptr())
551
+ self.context.execute_v2(list(self.binding_addrs.values()))
552
+ y = [self.bindings[x].data for x in sorted(self.output_names)]
553
+ elif self.coreml: # CoreML
554
+ im = im.cpu().numpy()
555
+ im = Image.fromarray((im[0] * 255).astype('uint8'))
556
+ # im = im.resize((192, 320), Image.BILINEAR)
557
+ y = self.model.predict({'image': im}) # coordinates are xywh normalized
558
+ if 'confidence' in y:
559
+ box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
560
+ conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)
561
+ y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
562
+ else:
563
+ y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)
564
+ elif self.paddle: # PaddlePaddle
565
+ im = im.cpu().numpy().astype(np.float32)
566
+ self.input_handle.copy_from_cpu(im)
567
+ self.predictor.run()
568
+ y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]
569
+ elif self.triton: # NVIDIA Triton Inference Server
570
+ y = self.model(im)
571
+ else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
572
+ im = im.cpu().numpy()
573
+ if self.saved_model: # SavedModel
574
+ y = self.model(im, training=False) if self.keras else self.model(im)
575
+ elif self.pb: # GraphDef
576
+ y = self.frozen_func(x=self.tf.constant(im))
577
+ else: # Lite or Edge TPU
578
+ input = self.input_details[0]
579
+ int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model
580
+ if int8:
581
+ scale, zero_point = input['quantization']
582
+ im = (im / scale + zero_point).astype(np.uint8) # de-scale
583
+ self.interpreter.set_tensor(input['index'], im)
584
+ self.interpreter.invoke()
585
+ y = []
586
+ for output in self.output_details:
587
+ x = self.interpreter.get_tensor(output['index'])
588
+ if int8:
589
+ scale, zero_point = output['quantization']
590
+ x = (x.astype(np.float32) - zero_point) * scale # re-scale
591
+ y.append(x)
592
+ y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
593
+ y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels
594
+
595
+ if isinstance(y, (list, tuple)):
596
+ return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
597
+ else:
598
+ return self.from_numpy(y)
599
+
600
+ def from_numpy(self, x):
601
+ return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
602
+
603
+ def warmup(self, imgsz=(1, 3, 640, 640)):
604
+ # Warmup model by running inference once
605
+ warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton
606
+ if any(warmup_types) and (self.device.type != 'cpu' or self.triton):
607
+ im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
608
+ for _ in range(2 if self.jit else 1): #
609
+ self.forward(im) # warmup
610
+
611
+ @staticmethod
612
+ def _model_type(p='path/to/model.pt'):
613
+ # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
614
+ # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
615
+ from export import export_formats
616
+ from utils.downloads import is_url
617
+ sf = list(export_formats().Suffix) # export suffixes
618
+ if not is_url(p, check=False):
619
+ check_suffix(p, sf) # checks
620
+ url = urlparse(p) # if url may be Triton inference server
621
+ types = [s in Path(p).name for s in sf]
622
+ types[8] &= not types[9] # tflite &= not edgetpu
623
+ triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc])
624
+ return types + [triton]
625
+
626
+ @staticmethod
627
+ def _load_metadata(f=Path('path/to/meta.yaml')):
628
+ # Load metadata from meta.yaml if it exists
629
+ if f.exists():
630
+ d = yaml_load(f)
631
+ return d['stride'], d['names'] # assign stride, names
632
+ return None, None
633
+
634
+
635
+ class AutoShape(nn.Module):
636
+ # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
637
+ conf = 0.25 # NMS confidence threshold
638
+ iou = 0.45 # NMS IoU threshold
639
+ agnostic = False # NMS class-agnostic
640
+ multi_label = False # NMS multiple labels per box
641
+ classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
642
+ max_det = 1000 # maximum number of detections per image
643
+ amp = False # Automatic Mixed Precision (AMP) inference
644
+
645
+ def __init__(self, model, verbose=True):
646
+ super().__init__()
647
+ if verbose:
648
+ LOGGER.info('Adding AutoShape... ')
649
+ copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes
650
+ self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance
651
+ self.pt = not self.dmb or model.pt # PyTorch model
652
+ self.model = model.eval()
653
+ if self.pt:
654
+ m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
655
+ m.inplace = False # Detect.inplace=False for safe multithread inference
656
+ m.export = True # do not output loss values
657
+
658
+ def _apply(self, fn):
659
+ # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
660
+ self = super()._apply(fn)
661
+ if self.pt:
662
+ m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
663
+ m.stride = fn(m.stride)
664
+ m.grid = list(map(fn, m.grid))
665
+ if isinstance(m.anchor_grid, list):
666
+ m.anchor_grid = list(map(fn, m.anchor_grid))
667
+ return self
668
+
669
+ @smart_inference_mode()
670
+ def forward(self, ims, size=640, augment=False, profile=False):
671
+ # Inference from various sources. For size(height=640, width=1280), RGB images example inputs are:
672
+ # file: ims = 'data/images/zidane.jpg' # str or PosixPath
673
+ # URI: = 'https://ultralytics.com/images/zidane.jpg'
674
+ # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
675
+ # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
676
+ # numpy: = np.zeros((640,1280,3)) # HWC
677
+ # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
678
+ # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
679
+
680
+ dt = (Profile(), Profile(), Profile())
681
+ with dt[0]:
682
+ if isinstance(size, int): # expand
683
+ size = (size, size)
684
+ p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param
685
+ autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference
686
+ if isinstance(ims, torch.Tensor): # torch
687
+ with amp.autocast(autocast):
688
+ return self.model(ims.to(p.device).type_as(p), augment=augment) # inference
689
+
690
+ # Pre-process
691
+ n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images
692
+ shape0, shape1, files = [], [], [] # image and inference shapes, filenames
693
+ for i, im in enumerate(ims):
694
+ f = f'image{i}' # filename
695
+ if isinstance(im, (str, Path)): # filename or uri
696
+ im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
697
+ im = np.asarray(exif_transpose(im))
698
+ elif isinstance(im, Image.Image): # PIL Image
699
+ im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f
700
+ files.append(Path(f).with_suffix('.jpg').name)
701
+ if im.shape[0] < 5: # image in CHW
702
+ im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
703
+ im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input
704
+ s = im.shape[:2] # HWC
705
+ shape0.append(s) # image shape
706
+ g = max(size) / max(s) # gain
707
+ shape1.append([int(y * g) for y in s])
708
+ ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
709
+ shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] # inf shape
710
+ x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad
711
+ x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
712
+ x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
713
+
714
+ with amp.autocast(autocast):
715
+ # Inference
716
+ with dt[1]:
717
+ y = self.model(x, augment=augment) # forward
718
+
719
+ # Post-process
720
+ with dt[2]:
721
+ y = non_max_suppression(y if self.dmb else y[0],
722
+ self.conf,
723
+ self.iou,
724
+ self.classes,
725
+ self.agnostic,
726
+ self.multi_label,
727
+ max_det=self.max_det) # NMS
728
+ for i in range(n):
729
+ scale_boxes(shape1, y[i][:, :4], shape0[i])
730
+
731
+ return Detections(ims, y, files, dt, self.names, x.shape)
732
+
733
+
734
+ class Detections:
735
+ # YOLOv5 detections class for inference results
736
+ def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):
737
+ super().__init__()
738
+ d = pred[0].device # device
739
+ gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations
740
+ self.ims = ims # list of images as numpy arrays
741
+ self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
742
+ self.names = names # class names
743
+ self.files = files # image filenames
744
+ self.times = times # profiling times
745
+ self.xyxy = pred # xyxy pixels
746
+ self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
747
+ self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
748
+ self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
749
+ self.n = len(self.pred) # number of images (batch size)
750
+ self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms)
751
+ self.s = tuple(shape) # inference BCHW shape
752
+
753
+ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')):
754
+ s, crops = '', []
755
+ for i, (im, pred) in enumerate(zip(self.ims, self.pred)):
756
+ s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
757
+ if pred.shape[0]:
758
+ for c in pred[:, -1].unique():
759
+ n = (pred[:, -1] == c).sum() # detections per class
760
+ s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
761
+ s = s.rstrip(', ')
762
+ if show or save or render or crop:
763
+ annotator = Annotator(im, example=str(self.names))
764
+ for *box, conf, cls in reversed(pred): # xyxy, confidence, class
765
+ label = f'{self.names[int(cls)]} {conf:.2f}'
766
+ if crop:
767
+ file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None
768
+ crops.append({
769
+ 'box': box,
770
+ 'conf': conf,
771
+ 'cls': cls,
772
+ 'label': label,
773
+ 'im': save_one_box(box, im, file=file, save=save)})
774
+ else: # all others
775
+ annotator.box_label(box, label if labels else '', color=colors(cls))
776
+ im = annotator.im
777
+ else:
778
+ s += '(no detections)'
779
+
780
+ im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
781
+ if show:
782
+ if is_jupyter():
783
+ from IPython.display import display
784
+ display(im)
785
+ else:
786
+ im.show(self.files[i])
787
+ if save:
788
+ f = self.files[i]
789
+ im.save(save_dir / f) # save
790
+ if i == self.n - 1:
791
+ LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
792
+ if render:
793
+ self.ims[i] = np.asarray(im)
794
+ if pprint:
795
+ s = s.lstrip('\n')
796
+ return f'{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t
797
+ if crop:
798
+ if save:
799
+ LOGGER.info(f'Saved results to {save_dir}\n')
800
+ return crops
801
+
802
+ @TryExcept('Showing images is not supported in this environment')
803
+ def show(self, labels=True):
804
+ self._run(show=True, labels=labels) # show results
805
+
806
+ def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False):
807
+ save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir
808
+ self._run(save=True, labels=labels, save_dir=save_dir) # save results
809
+
810
+ def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False):
811
+ save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None
812
+ return self._run(crop=True, save=save, save_dir=save_dir) # crop results
813
+
814
+ def render(self, labels=True):
815
+ self._run(render=True, labels=labels) # render results
816
+ return self.ims
817
+
818
+ def pandas(self):
819
+ # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
820
+ new = copy(self) # return copy
821
+ ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
822
+ cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
823
+ for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
824
+ a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
825
+ setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
826
+ return new
827
+
828
+ def tolist(self):
829
+ # return a list of Detections objects, i.e. 'for result in results.tolist():'
830
+ r = range(self.n) # iterable
831
+ x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
832
+ # for d in x:
833
+ # for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
834
+ # setattr(d, k, getattr(d, k)[0]) # pop out of list
835
+ return x
836
+
837
+ def print(self):
838
+ LOGGER.info(self.__str__())
839
+
840
+ def __len__(self): # override len(results)
841
+ return self.n
842
+
843
+ def __str__(self): # override print(results)
844
+ return self._run(pprint=True) # print results
845
+
846
+ def __repr__(self):
847
+ return f'YOLOv5 {self.__class__} instance\n' + self.__str__()
848
+
849
+
850
+ class Proto(nn.Module):
851
+ # YOLOv5 mask Proto module for segmentation models
852
+ def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks
853
+ super().__init__()
854
+ self.cv1 = Conv(c1, c_, k=3)
855
+ self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
856
+ self.cv2 = Conv(c_, c_, k=3)
857
+ self.cv3 = Conv(c_, c2)
858
+
859
+ def forward(self, x):
860
+ return self.cv3(self.cv2(self.upsample(self.cv1(x))))
861
+
862
+
863
+ class Classify(nn.Module):
864
+ # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2)
865
+ def __init__(self,
866
+ c1,
867
+ c2,
868
+ k=1,
869
+ s=1,
870
+ p=None,
871
+ g=1,
872
+ dropout_p=0.0): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability
873
+ super().__init__()
874
+ c_ = 1280 # efficientnet_b0 size
875
+ self.conv = Conv(c1, c_, k, s, autopad(k, p), g)
876
+ self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1)
877
+ self.drop = nn.Dropout(p=dropout_p, inplace=True)
878
+ self.linear = nn.Linear(c_, c2) # to x(b,c2)
879
+
880
+ def forward(self, x):
881
+ if isinstance(x, list):
882
+ x = torch.cat(x, 1)
883
+ return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
models/experimental.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+ """
3
+ Experimental modules
4
+ """
5
+ import math
6
+
7
+ import numpy as np
8
+ import torch
9
+ import torch.nn as nn
10
+
11
+ from utils.downloads import attempt_download
12
+
13
+
14
+ class Sum(nn.Module):
15
+ # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
16
+ def __init__(self, n, weight=False): # n: number of inputs
17
+ super().__init__()
18
+ self.weight = weight # apply weights boolean
19
+ self.iter = range(n - 1) # iter object
20
+ if weight:
21
+ self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights
22
+
23
+ def forward(self, x):
24
+ y = x[0] # no weight
25
+ if self.weight:
26
+ w = torch.sigmoid(self.w) * 2
27
+ for i in self.iter:
28
+ y = y + x[i + 1] * w[i]
29
+ else:
30
+ for i in self.iter:
31
+ y = y + x[i + 1]
32
+ return y
33
+
34
+
35
+ class MixConv2d(nn.Module):
36
+ # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
37
+ def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy
38
+ super().__init__()
39
+ n = len(k) # number of convolutions
40
+ if equal_ch: # equal c_ per group
41
+ i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices
42
+ c_ = [(i == g).sum() for g in range(n)] # intermediate channels
43
+ else: # equal weight.numel() per group
44
+ b = [c2] + [0] * n
45
+ a = np.eye(n + 1, n, k=-1)
46
+ a -= np.roll(a, 1, axis=1)
47
+ a *= np.array(k) ** 2
48
+ a[0] = 1
49
+ c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
50
+
51
+ self.m = nn.ModuleList([
52
+ nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])
53
+ self.bn = nn.BatchNorm2d(c2)
54
+ self.act = nn.SiLU()
55
+
56
+ def forward(self, x):
57
+ return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
58
+
59
+
60
+ class Ensemble(nn.ModuleList):
61
+ # Ensemble of models
62
+ def __init__(self):
63
+ super().__init__()
64
+
65
+ def forward(self, x, augment=False, profile=False, visualize=False):
66
+ y = [module(x, augment, profile, visualize)[0] for module in self]
67
+ # y = torch.stack(y).max(0)[0] # max ensemble
68
+ # y = torch.stack(y).mean(0) # mean ensemble
69
+ y = torch.cat(y, 1) # nms ensemble
70
+ return y, None # inference, train output
71
+
72
+
73
+ def attempt_load(weights, device=None, inplace=True, fuse=True):
74
+ # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
75
+ from models.yolo import Detect, Model
76
+
77
+ model = Ensemble()
78
+ for w in weights if isinstance(weights, list) else [weights]:
79
+ ckpt = torch.load(attempt_download(w), map_location='cpu') # load
80
+ ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
81
+
82
+ # Model compatibility updates
83
+ if not hasattr(ckpt, 'stride'):
84
+ ckpt.stride = torch.tensor([32.])
85
+ if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):
86
+ ckpt.names = dict(enumerate(ckpt.names)) # convert to dict
87
+
88
+ model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
89
+
90
+ # Module updates
91
+ for m in model.modules():
92
+ t = type(m)
93
+ if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
94
+ m.inplace = inplace
95
+ if t is Detect and not isinstance(m.anchor_grid, list):
96
+ delattr(m, 'anchor_grid')
97
+ setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
98
+ elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
99
+ m.recompute_scale_factor = None # torch 1.11.0 compatibility
100
+
101
+ # Return model
102
+ if len(model) == 1:
103
+ return model[-1]
104
+
105
+ # Return detection ensemble
106
+ print(f'Ensemble created with {weights}\n')
107
+ for k in 'names', 'nc', 'yaml':
108
+ setattr(model, k, getattr(model[0], k))
109
+ model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
110
+ assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
111
+ return model
models/hub/anchors.yaml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+ # Default anchors for COCO data
3
+
4
+
5
+ # P5 -------------------------------------------------------------------------------------------------------------------
6
+ # P5-640:
7
+ anchors_p5_640:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+
13
+ # P6 -------------------------------------------------------------------------------------------------------------------
14
+ # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387
15
+ anchors_p6_640:
16
+ - [9,11, 21,19, 17,41] # P3/8
17
+ - [43,32, 39,70, 86,64] # P4/16
18
+ - [65,131, 134,130, 120,265] # P5/32
19
+ - [282,180, 247,354, 512,387] # P6/64
20
+
21
+ # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792
22
+ anchors_p6_1280:
23
+ - [19,27, 44,40, 38,94] # P3/8
24
+ - [96,68, 86,152, 180,137] # P4/16
25
+ - [140,301, 303,264, 238,542] # P5/32
26
+ - [436,615, 739,380, 925,792] # P6/64
27
+
28
+ # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187
29
+ anchors_p6_1920:
30
+ - [28,41, 67,59, 57,141] # P3/8
31
+ - [144,103, 129,227, 270,205] # P4/16
32
+ - [209,452, 455,396, 358,812] # P5/32
33
+ - [653,922, 1109,570, 1387,1187] # P6/64
34
+
35
+
36
+ # P7 -------------------------------------------------------------------------------------------------------------------
37
+ # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372
38
+ anchors_p7_640:
39
+ - [11,11, 13,30, 29,20] # P3/8
40
+ - [30,46, 61,38, 39,92] # P4/16
41
+ - [78,80, 146,66, 79,163] # P5/32
42
+ - [149,150, 321,143, 157,303] # P6/64
43
+ - [257,402, 359,290, 524,372] # P7/128
44
+
45
+ # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818
46
+ anchors_p7_1280:
47
+ - [19,22, 54,36, 32,77] # P3/8
48
+ - [70,83, 138,71, 75,173] # P4/16
49
+ - [165,159, 148,334, 375,151] # P5/32
50
+ - [334,317, 251,626, 499,474] # P6/64
51
+ - [750,326, 534,814, 1079,818] # P7/128
52
+
53
+ # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227
54
+ anchors_p7_1920:
55
+ - [29,34, 81,55, 47,115] # P3/8
56
+ - [105,124, 207,107, 113,259] # P4/16
57
+ - [247,238, 222,500, 563,227] # P5/32
58
+ - [501,476, 376,939, 749,711] # P6/64
59
+ - [1126,489, 801,1222, 1618,1227] # P7/128
models/hub/yolov3-spp.yaml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # darknet53 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [32, 3, 1]], # 0
16
+ [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17
+ [-1, 1, Bottleneck, [64]],
18
+ [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19
+ [-1, 2, Bottleneck, [128]],
20
+ [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21
+ [-1, 8, Bottleneck, [256]],
22
+ [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23
+ [-1, 8, Bottleneck, [512]],
24
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25
+ [-1, 4, Bottleneck, [1024]], # 10
26
+ ]
27
+
28
+ # YOLOv3-SPP head
29
+ head:
30
+ [[-1, 1, Bottleneck, [1024, False]],
31
+ [-1, 1, SPP, [512, [5, 9, 13]]],
32
+ [-1, 1, Conv, [1024, 3, 1]],
33
+ [-1, 1, Conv, [512, 1, 1]],
34
+ [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35
+
36
+ [-2, 1, Conv, [256, 1, 1]],
37
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38
+ [[-1, 8], 1, Concat, [1]], # cat backbone P4
39
+ [-1, 1, Bottleneck, [512, False]],
40
+ [-1, 1, Bottleneck, [512, False]],
41
+ [-1, 1, Conv, [256, 1, 1]],
42
+ [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43
+
44
+ [-2, 1, Conv, [128, 1, 1]],
45
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46
+ [[-1, 6], 1, Concat, [1]], # cat backbone P3
47
+ [-1, 1, Bottleneck, [256, False]],
48
+ [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49
+
50
+ [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51
+ ]
models/hub/yolov3-tiny.yaml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,14, 23,27, 37,58] # P4/16
9
+ - [81,82, 135,169, 344,319] # P5/32
10
+
11
+ # YOLOv3-tiny backbone
12
+ backbone:
13
+ # [from, number, module, args]
14
+ [[-1, 1, Conv, [16, 3, 1]], # 0
15
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
16
+ [-1, 1, Conv, [32, 3, 1]],
17
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
18
+ [-1, 1, Conv, [64, 3, 1]],
19
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
20
+ [-1, 1, Conv, [128, 3, 1]],
21
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
22
+ [-1, 1, Conv, [256, 3, 1]],
23
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
24
+ [-1, 1, Conv, [512, 3, 1]],
25
+ [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
26
+ [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
27
+ ]
28
+
29
+ # YOLOv3-tiny head
30
+ head:
31
+ [[-1, 1, Conv, [1024, 3, 1]],
32
+ [-1, 1, Conv, [256, 1, 1]],
33
+ [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
34
+
35
+ [-2, 1, Conv, [128, 1, 1]],
36
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37
+ [[-1, 8], 1, Concat, [1]], # cat backbone P4
38
+ [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
39
+
40
+ [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5)
41
+ ]
models/hub/yolov3.yaml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # darknet53 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [32, 3, 1]], # 0
16
+ [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17
+ [-1, 1, Bottleneck, [64]],
18
+ [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19
+ [-1, 2, Bottleneck, [128]],
20
+ [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21
+ [-1, 8, Bottleneck, [256]],
22
+ [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23
+ [-1, 8, Bottleneck, [512]],
24
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25
+ [-1, 4, Bottleneck, [1024]], # 10
26
+ ]
27
+
28
+ # YOLOv3 head
29
+ head:
30
+ [[-1, 1, Bottleneck, [1024, False]],
31
+ [-1, 1, Conv, [512, 1, 1]],
32
+ [-1, 1, Conv, [1024, 3, 1]],
33
+ [-1, 1, Conv, [512, 1, 1]],
34
+ [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35
+
36
+ [-2, 1, Conv, [256, 1, 1]],
37
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38
+ [[-1, 8], 1, Concat, [1]], # cat backbone P4
39
+ [-1, 1, Bottleneck, [512, False]],
40
+ [-1, 1, Bottleneck, [512, False]],
41
+ [-1, 1, Conv, [256, 1, 1]],
42
+ [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43
+
44
+ [-2, 1, Conv, [128, 1, 1]],
45
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46
+ [[-1, 6], 1, Concat, [1]], # cat backbone P3
47
+ [-1, 1, Bottleneck, [256, False]],
48
+ [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49
+
50
+ [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51
+ ]
models/hub/yolov5-bifpn.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 BiFPN head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]
models/hub/yolov5-fpn.yaml ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 FPN head
28
+ head:
29
+ [[-1, 3, C3, [1024, False]], # 10 (P5/32-large)
30
+
31
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
33
+ [-1, 1, Conv, [512, 1, 1]],
34
+ [-1, 3, C3, [512, False]], # 14 (P4/16-medium)
35
+
36
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
38
+ [-1, 1, Conv, [256, 1, 1]],
39
+ [-1, 3, C3, [256, False]], # 18 (P3/8-small)
40
+
41
+ [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
42
+ ]
models/hub/yolov5-p2.yaml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8
+
9
+ # YOLOv5 v6.0 backbone
10
+ backbone:
11
+ # [from, number, module, args]
12
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14
+ [-1, 3, C3, [128]],
15
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16
+ [-1, 6, C3, [256]],
17
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18
+ [-1, 9, C3, [512]],
19
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
20
+ [-1, 3, C3, [1024]],
21
+ [-1, 1, SPPF, [1024, 5]], # 9
22
+ ]
23
+
24
+ # YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs
25
+ head:
26
+ [[-1, 1, Conv, [512, 1, 1]],
27
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
28
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
29
+ [-1, 3, C3, [512, False]], # 13
30
+
31
+ [-1, 1, Conv, [256, 1, 1]],
32
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
33
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
34
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
35
+
36
+ [-1, 1, Conv, [128, 1, 1]],
37
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38
+ [[-1, 2], 1, Concat, [1]], # cat backbone P2
39
+ [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall)
40
+
41
+ [-1, 1, Conv, [128, 3, 2]],
42
+ [[-1, 18], 1, Concat, [1]], # cat head P3
43
+ [-1, 3, C3, [256, False]], # 24 (P3/8-small)
44
+
45
+ [-1, 1, Conv, [256, 3, 2]],
46
+ [[-1, 14], 1, Concat, [1]], # cat head P4
47
+ [-1, 3, C3, [512, False]], # 27 (P4/16-medium)
48
+
49
+ [-1, 1, Conv, [512, 3, 2]],
50
+ [[-1, 10], 1, Concat, [1]], # cat head P5
51
+ [-1, 3, C3, [1024, False]], # 30 (P5/32-large)
52
+
53
+ [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5)
54
+ ]
models/hub/yolov5-p34.yaml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.50 # layer channel multiple
7
+ anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8
+
9
+ # YOLOv5 v6.0 backbone
10
+ backbone:
11
+ # [from, number, module, args]
12
+ [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2
13
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
14
+ [ -1, 3, C3, [ 128 ] ],
15
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
16
+ [ -1, 6, C3, [ 256 ] ],
17
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
18
+ [ -1, 9, C3, [ 512 ] ],
19
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
20
+ [ -1, 3, C3, [ 1024 ] ],
21
+ [ -1, 1, SPPF, [ 1024, 5 ] ], # 9
22
+ ]
23
+
24
+ # YOLOv5 v6.0 head with (P3, P4) outputs
25
+ head:
26
+ [ [ -1, 1, Conv, [ 512, 1, 1 ] ],
27
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
28
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
29
+ [ -1, 3, C3, [ 512, False ] ], # 13
30
+
31
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
32
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
33
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
34
+ [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small)
35
+
36
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
37
+ [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
38
+ [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium)
39
+
40
+ [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4)
41
+ ]
models/hub/yolov5-p6.yaml ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8
+
9
+ # YOLOv5 v6.0 backbone
10
+ backbone:
11
+ # [from, number, module, args]
12
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14
+ [-1, 3, C3, [128]],
15
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16
+ [-1, 6, C3, [256]],
17
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18
+ [-1, 9, C3, [512]],
19
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
20
+ [-1, 3, C3, [768]],
21
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
22
+ [-1, 3, C3, [1024]],
23
+ [-1, 1, SPPF, [1024, 5]], # 11
24
+ ]
25
+
26
+ # YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs
27
+ head:
28
+ [[-1, 1, Conv, [768, 1, 1]],
29
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
30
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
31
+ [-1, 3, C3, [768, False]], # 15
32
+
33
+ [-1, 1, Conv, [512, 1, 1]],
34
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
35
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
36
+ [-1, 3, C3, [512, False]], # 19
37
+
38
+ [-1, 1, Conv, [256, 1, 1]],
39
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
40
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
41
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
42
+
43
+ [-1, 1, Conv, [256, 3, 2]],
44
+ [[-1, 20], 1, Concat, [1]], # cat head P4
45
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
46
+
47
+ [-1, 1, Conv, [512, 3, 2]],
48
+ [[-1, 16], 1, Concat, [1]], # cat head P5
49
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
50
+
51
+ [-1, 1, Conv, [768, 3, 2]],
52
+ [[-1, 12], 1, Concat, [1]], # cat head P6
53
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
54
+
55
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
56
+ ]
models/hub/yolov5-p7.yaml ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8
+
9
+ # YOLOv5 v6.0 backbone
10
+ backbone:
11
+ # [from, number, module, args]
12
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14
+ [-1, 3, C3, [128]],
15
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16
+ [-1, 6, C3, [256]],
17
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18
+ [-1, 9, C3, [512]],
19
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
20
+ [-1, 3, C3, [768]],
21
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
22
+ [-1, 3, C3, [1024]],
23
+ [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128
24
+ [-1, 3, C3, [1280]],
25
+ [-1, 1, SPPF, [1280, 5]], # 13
26
+ ]
27
+
28
+ # YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs
29
+ head:
30
+ [[-1, 1, Conv, [1024, 1, 1]],
31
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32
+ [[-1, 10], 1, Concat, [1]], # cat backbone P6
33
+ [-1, 3, C3, [1024, False]], # 17
34
+
35
+ [-1, 1, Conv, [768, 1, 1]],
36
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
38
+ [-1, 3, C3, [768, False]], # 21
39
+
40
+ [-1, 1, Conv, [512, 1, 1]],
41
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
42
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
43
+ [-1, 3, C3, [512, False]], # 25
44
+
45
+ [-1, 1, Conv, [256, 1, 1]],
46
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
47
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
48
+ [-1, 3, C3, [256, False]], # 29 (P3/8-small)
49
+
50
+ [-1, 1, Conv, [256, 3, 2]],
51
+ [[-1, 26], 1, Concat, [1]], # cat head P4
52
+ [-1, 3, C3, [512, False]], # 32 (P4/16-medium)
53
+
54
+ [-1, 1, Conv, [512, 3, 2]],
55
+ [[-1, 22], 1, Concat, [1]], # cat head P5
56
+ [-1, 3, C3, [768, False]], # 35 (P5/32-large)
57
+
58
+ [-1, 1, Conv, [768, 3, 2]],
59
+ [[-1, 18], 1, Concat, [1]], # cat head P6
60
+ [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge)
61
+
62
+ [-1, 1, Conv, [1024, 3, 2]],
63
+ [[-1, 14], 1, Concat, [1]], # cat head P7
64
+ [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge)
65
+
66
+ [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7)
67
+ ]
models/hub/yolov5-panet.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 PANet head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]
models/hub/yolov5l6.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [19,27, 44,40, 38,94] # P3/8
9
+ - [96,68, 86,152, 180,137] # P4/16
10
+ - [140,301, 303,264, 238,542] # P5/32
11
+ - [436,615, 739,380, 925,792] # P6/64
12
+
13
+ # YOLOv5 v6.0 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18
+ [-1, 3, C3, [128]],
19
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20
+ [-1, 6, C3, [256]],
21
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22
+ [-1, 9, C3, [512]],
23
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24
+ [-1, 3, C3, [768]],
25
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26
+ [-1, 3, C3, [1024]],
27
+ [-1, 1, SPPF, [1024, 5]], # 11
28
+ ]
29
+
30
+ # YOLOv5 v6.0 head
31
+ head:
32
+ [[-1, 1, Conv, [768, 1, 1]],
33
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
35
+ [-1, 3, C3, [768, False]], # 15
36
+
37
+ [-1, 1, Conv, [512, 1, 1]],
38
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
40
+ [-1, 3, C3, [512, False]], # 19
41
+
42
+ [-1, 1, Conv, [256, 1, 1]],
43
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
45
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46
+
47
+ [-1, 1, Conv, [256, 3, 2]],
48
+ [[-1, 20], 1, Concat, [1]], # cat head P4
49
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50
+
51
+ [-1, 1, Conv, [512, 3, 2]],
52
+ [[-1, 16], 1, Concat, [1]], # cat head P5
53
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54
+
55
+ [-1, 1, Conv, [768, 3, 2]],
56
+ [[-1, 12], 1, Concat, [1]], # cat head P6
57
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58
+
59
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60
+ ]
models/hub/yolov5m6.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.67 # model depth multiple
6
+ width_multiple: 0.75 # layer channel multiple
7
+ anchors:
8
+ - [19,27, 44,40, 38,94] # P3/8
9
+ - [96,68, 86,152, 180,137] # P4/16
10
+ - [140,301, 303,264, 238,542] # P5/32
11
+ - [436,615, 739,380, 925,792] # P6/64
12
+
13
+ # YOLOv5 v6.0 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18
+ [-1, 3, C3, [128]],
19
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20
+ [-1, 6, C3, [256]],
21
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22
+ [-1, 9, C3, [512]],
23
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24
+ [-1, 3, C3, [768]],
25
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26
+ [-1, 3, C3, [1024]],
27
+ [-1, 1, SPPF, [1024, 5]], # 11
28
+ ]
29
+
30
+ # YOLOv5 v6.0 head
31
+ head:
32
+ [[-1, 1, Conv, [768, 1, 1]],
33
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
35
+ [-1, 3, C3, [768, False]], # 15
36
+
37
+ [-1, 1, Conv, [512, 1, 1]],
38
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
40
+ [-1, 3, C3, [512, False]], # 19
41
+
42
+ [-1, 1, Conv, [256, 1, 1]],
43
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
45
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46
+
47
+ [-1, 1, Conv, [256, 3, 2]],
48
+ [[-1, 20], 1, Concat, [1]], # cat head P4
49
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50
+
51
+ [-1, 1, Conv, [512, 3, 2]],
52
+ [[-1, 16], 1, Concat, [1]], # cat head P5
53
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54
+
55
+ [-1, 1, Conv, [768, 3, 2]],
56
+ [[-1, 12], 1, Concat, [1]], # cat head P6
57
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58
+
59
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60
+ ]
models/hub/yolov5n6.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.25 # layer channel multiple
7
+ anchors:
8
+ - [19,27, 44,40, 38,94] # P3/8
9
+ - [96,68, 86,152, 180,137] # P4/16
10
+ - [140,301, 303,264, 238,542] # P5/32
11
+ - [436,615, 739,380, 925,792] # P6/64
12
+
13
+ # YOLOv5 v6.0 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18
+ [-1, 3, C3, [128]],
19
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20
+ [-1, 6, C3, [256]],
21
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22
+ [-1, 9, C3, [512]],
23
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24
+ [-1, 3, C3, [768]],
25
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26
+ [-1, 3, C3, [1024]],
27
+ [-1, 1, SPPF, [1024, 5]], # 11
28
+ ]
29
+
30
+ # YOLOv5 v6.0 head
31
+ head:
32
+ [[-1, 1, Conv, [768, 1, 1]],
33
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
35
+ [-1, 3, C3, [768, False]], # 15
36
+
37
+ [-1, 1, Conv, [512, 1, 1]],
38
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
40
+ [-1, 3, C3, [512, False]], # 19
41
+
42
+ [-1, 1, Conv, [256, 1, 1]],
43
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
45
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46
+
47
+ [-1, 1, Conv, [256, 3, 2]],
48
+ [[-1, 20], 1, Concat, [1]], # cat head P4
49
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50
+
51
+ [-1, 1, Conv, [512, 3, 2]],
52
+ [[-1, 16], 1, Concat, [1]], # cat head P5
53
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54
+
55
+ [-1, 1, Conv, [768, 3, 2]],
56
+ [[-1, 12], 1, Concat, [1]], # cat head P6
57
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58
+
59
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60
+ ]
models/hub/yolov5s-LeakyReLU.yaml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ activation: nn.LeakyReLU(0.1) # <----- Conv() activation used throughout entire YOLOv5 model
6
+ depth_multiple: 0.33 # model depth multiple
7
+ width_multiple: 0.50 # layer channel multiple
8
+ anchors:
9
+ - [10,13, 16,30, 33,23] # P3/8
10
+ - [30,61, 62,45, 59,119] # P4/16
11
+ - [116,90, 156,198, 373,326] # P5/32
12
+
13
+ # YOLOv5 v6.0 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18
+ [-1, 3, C3, [128]],
19
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20
+ [-1, 6, C3, [256]],
21
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22
+ [-1, 9, C3, [512]],
23
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
24
+ [-1, 3, C3, [1024]],
25
+ [-1, 1, SPPF, [1024, 5]], # 9
26
+ ]
27
+
28
+ # YOLOv5 v6.0 head
29
+ head:
30
+ [[-1, 1, Conv, [512, 1, 1]],
31
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
33
+ [-1, 3, C3, [512, False]], # 13
34
+
35
+ [-1, 1, Conv, [256, 1, 1]],
36
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
38
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
39
+
40
+ [-1, 1, Conv, [256, 3, 2]],
41
+ [[-1, 14], 1, Concat, [1]], # cat head P4
42
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
43
+
44
+ [-1, 1, Conv, [512, 3, 2]],
45
+ [[-1, 10], 1, Concat, [1]], # cat head P5
46
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
47
+
48
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
49
+ ]
models/hub/yolov5s-ghost.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.50 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3Ghost, [128]],
18
+ [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3Ghost, [256]],
20
+ [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3Ghost, [512]],
22
+ [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3Ghost, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, GhostConv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3Ghost, [512, False]], # 13
33
+
34
+ [-1, 1, GhostConv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, GhostConv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, GhostConv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]
models/hub/yolov5s-transformer.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.50 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]
models/hub/yolov5s6.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.50 # layer channel multiple
7
+ anchors:
8
+ - [19,27, 44,40, 38,94] # P3/8
9
+ - [96,68, 86,152, 180,137] # P4/16
10
+ - [140,301, 303,264, 238,542] # P5/32
11
+ - [436,615, 739,380, 925,792] # P6/64
12
+
13
+ # YOLOv5 v6.0 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18
+ [-1, 3, C3, [128]],
19
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20
+ [-1, 6, C3, [256]],
21
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22
+ [-1, 9, C3, [512]],
23
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24
+ [-1, 3, C3, [768]],
25
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26
+ [-1, 3, C3, [1024]],
27
+ [-1, 1, SPPF, [1024, 5]], # 11
28
+ ]
29
+
30
+ # YOLOv5 v6.0 head
31
+ head:
32
+ [[-1, 1, Conv, [768, 1, 1]],
33
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
35
+ [-1, 3, C3, [768, False]], # 15
36
+
37
+ [-1, 1, Conv, [512, 1, 1]],
38
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
40
+ [-1, 3, C3, [512, False]], # 19
41
+
42
+ [-1, 1, Conv, [256, 1, 1]],
43
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
45
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46
+
47
+ [-1, 1, Conv, [256, 3, 2]],
48
+ [[-1, 20], 1, Concat, [1]], # cat head P4
49
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50
+
51
+ [-1, 1, Conv, [512, 3, 2]],
52
+ [[-1, 16], 1, Concat, [1]], # cat head P5
53
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54
+
55
+ [-1, 1, Conv, [768, 3, 2]],
56
+ [[-1, 12], 1, Concat, [1]], # cat head P6
57
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58
+
59
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60
+ ]
models/hub/yolov5x6.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.33 # model depth multiple
6
+ width_multiple: 1.25 # layer channel multiple
7
+ anchors:
8
+ - [19,27, 44,40, 38,94] # P3/8
9
+ - [96,68, 86,152, 180,137] # P4/16
10
+ - [140,301, 303,264, 238,542] # P5/32
11
+ - [436,615, 739,380, 925,792] # P6/64
12
+
13
+ # YOLOv5 v6.0 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18
+ [-1, 3, C3, [128]],
19
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20
+ [-1, 6, C3, [256]],
21
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22
+ [-1, 9, C3, [512]],
23
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24
+ [-1, 3, C3, [768]],
25
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26
+ [-1, 3, C3, [1024]],
27
+ [-1, 1, SPPF, [1024, 5]], # 11
28
+ ]
29
+
30
+ # YOLOv5 v6.0 head
31
+ head:
32
+ [[-1, 1, Conv, [768, 1, 1]],
33
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
35
+ [-1, 3, C3, [768, False]], # 15
36
+
37
+ [-1, 1, Conv, [512, 1, 1]],
38
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
40
+ [-1, 3, C3, [512, False]], # 19
41
+
42
+ [-1, 1, Conv, [256, 1, 1]],
43
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
45
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46
+
47
+ [-1, 1, Conv, [256, 3, 2]],
48
+ [[-1, 20], 1, Concat, [1]], # cat head P4
49
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50
+
51
+ [-1, 1, Conv, [512, 3, 2]],
52
+ [[-1, 16], 1, Concat, [1]], # cat head P5
53
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54
+
55
+ [-1, 1, Conv, [768, 3, 2]],
56
+ [[-1, 12], 1, Concat, [1]], # cat head P6
57
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58
+
59
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60
+ ]
models/segment/yolov5l-seg.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
48
+ ]
models/segment/yolov5m-seg.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.67 # model depth multiple
6
+ width_multiple: 0.75 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
48
+ ]
models/segment/yolov5n-seg.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.25 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
48
+ ]
models/segment/yolov5s-seg.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.5 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
48
+ ]
models/segment/yolov5x-seg.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.33 # model depth multiple
6
+ width_multiple: 1.25 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
48
+ ]
models/tf.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+ """
3
+ TensorFlow, Keras and TFLite versions of YOLOv5
4
+ Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127
5
+
6
+ Usage:
7
+ $ python models/tf.py --weights yolov5s.pt
8
+
9
+ Export:
10
+ $ python export.py --weights yolov5s.pt --include saved_model pb tflite tfjs
11
+ """
12
+
13
+ import argparse
14
+ import sys
15
+ from copy import deepcopy
16
+ from pathlib import Path
17
+
18
+ FILE = Path(__file__).resolve()
19
+ ROOT = FILE.parents[1] # YOLOv5 root directory
20
+ if str(ROOT) not in sys.path:
21
+ sys.path.append(str(ROOT)) # add ROOT to PATH
22
+ # ROOT = ROOT.relative_to(Path.cwd()) # relative
23
+
24
+ import numpy as np
25
+ import tensorflow as tf
26
+ import torch
27
+ import torch.nn as nn
28
+ from tensorflow import keras
29
+
30
+ from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv,
31
+ DWConvTranspose2d, Focus, autopad)
32
+ from models.experimental import MixConv2d, attempt_load
33
+ from models.yolo import Detect, Segment
34
+ from utils.activations import SiLU
35
+ from utils.general import LOGGER, make_divisible, print_args
36
+
37
+
38
+ class TFBN(keras.layers.Layer):
39
+ # TensorFlow BatchNormalization wrapper
40
+ def __init__(self, w=None):
41
+ super().__init__()
42
+ self.bn = keras.layers.BatchNormalization(
43
+ beta_initializer=keras.initializers.Constant(w.bias.numpy()),
44
+ gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
45
+ moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()),
46
+ moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()),
47
+ epsilon=w.eps)
48
+
49
+ def call(self, inputs):
50
+ return self.bn(inputs)
51
+
52
+
53
+ class TFPad(keras.layers.Layer):
54
+ # Pad inputs in spatial dimensions 1 and 2
55
+ def __init__(self, pad):
56
+ super().__init__()
57
+ if isinstance(pad, int):
58
+ self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
59
+ else: # tuple/list
60
+ self.pad = tf.constant([[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]])
61
+
62
+ def call(self, inputs):
63
+ return tf.pad(inputs, self.pad, mode='constant', constant_values=0)
64
+
65
+
66
+ class TFConv(keras.layers.Layer):
67
+ # Standard convolution
68
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
69
+ # ch_in, ch_out, weights, kernel, stride, padding, groups
70
+ super().__init__()
71
+ assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
72
+ # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)
73
+ # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch
74
+ conv = keras.layers.Conv2D(
75
+ filters=c2,
76
+ kernel_size=k,
77
+ strides=s,
78
+ padding='SAME' if s == 1 else 'VALID',
79
+ use_bias=not hasattr(w, 'bn'),
80
+ kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
81
+ bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
82
+ self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
83
+ self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
84
+ self.act = activations(w.act) if act else tf.identity
85
+
86
+ def call(self, inputs):
87
+ return self.act(self.bn(self.conv(inputs)))
88
+
89
+
90
+ class TFDWConv(keras.layers.Layer):
91
+ # Depthwise convolution
92
+ def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):
93
+ # ch_in, ch_out, weights, kernel, stride, padding, groups
94
+ super().__init__()
95
+ assert c2 % c1 == 0, f'TFDWConv() output={c2} must be a multiple of input={c1} channels'
96
+ conv = keras.layers.DepthwiseConv2D(
97
+ kernel_size=k,
98
+ depth_multiplier=c2 // c1,
99
+ strides=s,
100
+ padding='SAME' if s == 1 else 'VALID',
101
+ use_bias=not hasattr(w, 'bn'),
102
+ depthwise_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
103
+ bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
104
+ self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
105
+ self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
106
+ self.act = activations(w.act) if act else tf.identity
107
+
108
+ def call(self, inputs):
109
+ return self.act(self.bn(self.conv(inputs)))
110
+
111
+
112
+ class TFDWConvTranspose2d(keras.layers.Layer):
113
+ # Depthwise ConvTranspose2d
114
+ def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):
115
+ # ch_in, ch_out, weights, kernel, stride, padding, groups
116
+ super().__init__()
117
+ assert c1 == c2, f'TFDWConv() output={c2} must be equal to input={c1} channels'
118
+ assert k == 4 and p1 == 1, 'TFDWConv() only valid for k=4 and p1=1'
119
+ weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy()
120
+ self.c1 = c1
121
+ self.conv = [
122
+ keras.layers.Conv2DTranspose(filters=1,
123
+ kernel_size=k,
124
+ strides=s,
125
+ padding='VALID',
126
+ output_padding=p2,
127
+ use_bias=True,
128
+ kernel_initializer=keras.initializers.Constant(weight[..., i:i + 1]),
129
+ bias_initializer=keras.initializers.Constant(bias[i])) for i in range(c1)]
130
+
131
+ def call(self, inputs):
132
+ return tf.concat([m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3)[:, 1:-1, 1:-1]
133
+
134
+
135
+ class TFFocus(keras.layers.Layer):
136
+ # Focus wh information into c-space
137
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
138
+ # ch_in, ch_out, kernel, stride, padding, groups
139
+ super().__init__()
140
+ self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)
141
+
142
+ def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
143
+ # inputs = inputs / 255 # normalize 0-255 to 0-1
144
+ inputs = [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]]
145
+ return self.conv(tf.concat(inputs, 3))
146
+
147
+
148
+ class TFBottleneck(keras.layers.Layer):
149
+ # Standard bottleneck
150
+ def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion
151
+ super().__init__()
152
+ c_ = int(c2 * e) # hidden channels
153
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
154
+ self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)
155
+ self.add = shortcut and c1 == c2
156
+
157
+ def call(self, inputs):
158
+ return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
159
+
160
+
161
+ class TFCrossConv(keras.layers.Layer):
162
+ # Cross Convolution
163
+ def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):
164
+ super().__init__()
165
+ c_ = int(c2 * e) # hidden channels
166
+ self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1)
167
+ self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2)
168
+ self.add = shortcut and c1 == c2
169
+
170
+ def call(self, inputs):
171
+ return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
172
+
173
+
174
+ class TFConv2d(keras.layers.Layer):
175
+ # Substitution for PyTorch nn.Conv2D
176
+ def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
177
+ super().__init__()
178
+ assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
179
+ self.conv = keras.layers.Conv2D(filters=c2,
180
+ kernel_size=k,
181
+ strides=s,
182
+ padding='VALID',
183
+ use_bias=bias,
184
+ kernel_initializer=keras.initializers.Constant(
185
+ w.weight.permute(2, 3, 1, 0).numpy()),
186
+ bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None)
187
+
188
+ def call(self, inputs):
189
+ return self.conv(inputs)
190
+
191
+
192
+ class TFBottleneckCSP(keras.layers.Layer):
193
+ # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
194
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
195
+ # ch_in, ch_out, number, shortcut, groups, expansion
196
+ super().__init__()
197
+ c_ = int(c2 * e) # hidden channels
198
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
199
+ self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)
200
+ self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3)
201
+ self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4)
202
+ self.bn = TFBN(w.bn)
203
+ self.act = lambda x: keras.activations.swish(x)
204
+ self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
205
+
206
+ def call(self, inputs):
207
+ y1 = self.cv3(self.m(self.cv1(inputs)))
208
+ y2 = self.cv2(inputs)
209
+ return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3))))
210
+
211
+
212
+ class TFC3(keras.layers.Layer):
213
+ # CSP Bottleneck with 3 convolutions
214
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
215
+ # ch_in, ch_out, number, shortcut, groups, expansion
216
+ super().__init__()
217
+ c_ = int(c2 * e) # hidden channels
218
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
219
+ self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
220
+ self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
221
+ self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
222
+
223
+ def call(self, inputs):
224
+ return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
225
+
226
+
227
+ class TFC3x(keras.layers.Layer):
228
+ # 3 module with cross-convolutions
229
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
230
+ # ch_in, ch_out, number, shortcut, groups, expansion
231
+ super().__init__()
232
+ c_ = int(c2 * e) # hidden channels
233
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
234
+ self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
235
+ self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
236
+ self.m = keras.Sequential([
237
+ TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)])
238
+
239
+ def call(self, inputs):
240
+ return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
241
+
242
+
243
+ class TFSPP(keras.layers.Layer):
244
+ # Spatial pyramid pooling layer used in YOLOv3-SPP
245
+ def __init__(self, c1, c2, k=(5, 9, 13), w=None):
246
+ super().__init__()
247
+ c_ = c1 // 2 # hidden channels
248
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
249
+ self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)
250
+ self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k]
251
+
252
+ def call(self, inputs):
253
+ x = self.cv1(inputs)
254
+ return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3))
255
+
256
+
257
+ class TFSPPF(keras.layers.Layer):
258
+ # Spatial pyramid pooling-Fast layer
259
+ def __init__(self, c1, c2, k=5, w=None):
260
+ super().__init__()
261
+ c_ = c1 // 2 # hidden channels
262
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
263
+ self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)
264
+ self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME')
265
+
266
+ def call(self, inputs):
267
+ x = self.cv1(inputs)
268
+ y1 = self.m(x)
269
+ y2 = self.m(y1)
270
+ return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3))
271
+
272
+
273
+ class TFDetect(keras.layers.Layer):
274
+ # TF YOLOv5 Detect layer
275
+ def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer
276
+ super().__init__()
277
+ self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
278
+ self.nc = nc # number of classes
279
+ self.no = nc + 5 # number of outputs per anchor
280
+ self.nl = len(anchors) # number of detection layers
281
+ self.na = len(anchors[0]) // 2 # number of anchors
282
+ self.grid = [tf.zeros(1)] * self.nl # init grid
283
+ self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32)
284
+ self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), [self.nl, 1, -1, 1, 2])
285
+ self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)]
286
+ self.training = False # set to False after building model
287
+ self.imgsz = imgsz
288
+ for i in range(self.nl):
289
+ ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
290
+ self.grid[i] = self._make_grid(nx, ny)
291
+
292
+ def call(self, inputs):
293
+ z = [] # inference output
294
+ x = []
295
+ for i in range(self.nl):
296
+ x.append(self.m[i](inputs[i]))
297
+ # x(bs,20,20,255) to x(bs,3,20,20,85)
298
+ ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
299
+ x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no])
300
+
301
+ if not self.training: # inference
302
+ y = x[i]
303
+ grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5
304
+ anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4
305
+ xy = (tf.sigmoid(y[..., 0:2]) * 2 + grid) * self.stride[i] # xy
306
+ wh = tf.sigmoid(y[..., 2:4]) ** 2 * anchor_grid
307
+ # Normalize xywh to 0-1 to reduce calibration error
308
+ xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
309
+ wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
310
+ y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1)
311
+ z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no]))
312
+
313
+ return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), )
314
+
315
+ @staticmethod
316
+ def _make_grid(nx=20, ny=20):
317
+ # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
318
+ # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
319
+ xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny))
320
+ return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32)
321
+
322
+
323
+ class TFSegment(TFDetect):
324
+ # YOLOv5 Segment head for segmentation models
325
+ def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None):
326
+ super().__init__(nc, anchors, ch, imgsz, w)
327
+ self.nm = nm # number of masks
328
+ self.npr = npr # number of protos
329
+ self.no = 5 + nc + self.nm # number of outputs per anchor
330
+ self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] # output conv
331
+ self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto) # protos
332
+ self.detect = TFDetect.call
333
+
334
+ def call(self, x):
335
+ p = self.proto(x[0])
336
+ # p = TFUpsample(None, scale_factor=4, mode='nearest')(self.proto(x[0])) # (optional) full-size protos
337
+ p = tf.transpose(p, [0, 3, 1, 2]) # from shape(1,160,160,32) to shape(1,32,160,160)
338
+ x = self.detect(self, x)
339
+ return (x, p) if self.training else (x[0], p)
340
+
341
+
342
+ class TFProto(keras.layers.Layer):
343
+
344
+ def __init__(self, c1, c_=256, c2=32, w=None):
345
+ super().__init__()
346
+ self.cv1 = TFConv(c1, c_, k=3, w=w.cv1)
347
+ self.upsample = TFUpsample(None, scale_factor=2, mode='nearest')
348
+ self.cv2 = TFConv(c_, c_, k=3, w=w.cv2)
349
+ self.cv3 = TFConv(c_, c2, w=w.cv3)
350
+
351
+ def call(self, inputs):
352
+ return self.cv3(self.cv2(self.upsample(self.cv1(inputs))))
353
+
354
+
355
+ class TFUpsample(keras.layers.Layer):
356
+ # TF version of torch.nn.Upsample()
357
+ def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
358
+ super().__init__()
359
+ assert scale_factor % 2 == 0, 'scale_factor must be multiple of 2'
360
+ self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode)
361
+ # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
362
+ # with default arguments: align_corners=False, half_pixel_centers=False
363
+ # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x,
364
+ # size=(x.shape[1] * 2, x.shape[2] * 2))
365
+
366
+ def call(self, inputs):
367
+ return self.upsample(inputs)
368
+
369
+
370
+ class TFConcat(keras.layers.Layer):
371
+ # TF version of torch.concat()
372
+ def __init__(self, dimension=1, w=None):
373
+ super().__init__()
374
+ assert dimension == 1, 'convert only NCHW to NHWC concat'
375
+ self.d = 3
376
+
377
+ def call(self, inputs):
378
+ return tf.concat(inputs, self.d)
379
+
380
+
381
+ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
382
+ LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
383
+ anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
384
+ na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
385
+ no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
386
+
387
+ layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
388
+ for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
389
+ m_str = m
390
+ m = eval(m) if isinstance(m, str) else m # eval strings
391
+ for j, a in enumerate(args):
392
+ try:
393
+ args[j] = eval(a) if isinstance(a, str) else a # eval strings
394
+ except NameError:
395
+ pass
396
+
397
+ n = max(round(n * gd), 1) if n > 1 else n # depth gain
398
+ if m in [
399
+ nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv,
400
+ BottleneckCSP, C3, C3x]:
401
+ c1, c2 = ch[f], args[0]
402
+ c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
403
+
404
+ args = [c1, c2, *args[1:]]
405
+ if m in [BottleneckCSP, C3, C3x]:
406
+ args.insert(2, n)
407
+ n = 1
408
+ elif m is nn.BatchNorm2d:
409
+ args = [ch[f]]
410
+ elif m is Concat:
411
+ c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
412
+ elif m in [Detect, Segment]:
413
+ args.append([ch[x + 1] for x in f])
414
+ if isinstance(args[1], int): # number of anchors
415
+ args[1] = [list(range(args[1] * 2))] * len(f)
416
+ if m is Segment:
417
+ args[3] = make_divisible(args[3] * gw, 8)
418
+ args.append(imgsz)
419
+ else:
420
+ c2 = ch[f]
421
+
422
+ tf_m = eval('TF' + m_str.replace('nn.', ''))
423
+ m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \
424
+ else tf_m(*args, w=model.model[i]) # module
425
+
426
+ torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
427
+ t = str(m)[8:-2].replace('__main__.', '') # module type
428
+ np = sum(x.numel() for x in torch_m_.parameters()) # number params
429
+ m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
430
+ LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print
431
+ save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
432
+ layers.append(m_)
433
+ ch.append(c2)
434
+ return keras.Sequential(layers), sorted(save)
435
+
436
+
437
+ class TFModel:
438
+ # TF YOLOv5 model
439
+ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes
440
+ super().__init__()
441
+ if isinstance(cfg, dict):
442
+ self.yaml = cfg # model dict
443
+ else: # is *.yaml
444
+ import yaml # for torch hub
445
+ self.yaml_file = Path(cfg).name
446
+ with open(cfg) as f:
447
+ self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
448
+
449
+ # Define model
450
+ if nc and nc != self.yaml['nc']:
451
+ LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}")
452
+ self.yaml['nc'] = nc # override yaml value
453
+ self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)
454
+
455
+ def predict(self,
456
+ inputs,
457
+ tf_nms=False,
458
+ agnostic_nms=False,
459
+ topk_per_class=100,
460
+ topk_all=100,
461
+ iou_thres=0.45,
462
+ conf_thres=0.25):
463
+ y = [] # outputs
464
+ x = inputs
465
+ for m in self.model.layers:
466
+ if m.f != -1: # if not from previous layer
467
+ x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
468
+
469
+ x = m(x) # run
470
+ y.append(x if m.i in self.savelist else None) # save output
471
+
472
+ # Add TensorFlow NMS
473
+ if tf_nms:
474
+ boxes = self._xywh2xyxy(x[0][..., :4])
475
+ probs = x[0][:, :, 4:5]
476
+ classes = x[0][:, :, 5:]
477
+ scores = probs * classes
478
+ if agnostic_nms:
479
+ nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres)
480
+ else:
481
+ boxes = tf.expand_dims(boxes, 2)
482
+ nms = tf.image.combined_non_max_suppression(boxes,
483
+ scores,
484
+ topk_per_class,
485
+ topk_all,
486
+ iou_thres,
487
+ conf_thres,
488
+ clip_boxes=False)
489
+ return (nms, )
490
+ return x # output [1,6300,85] = [xywh, conf, class0, class1, ...]
491
+ # x = x[0] # [x(1,6300,85), ...] to x(6300,85)
492
+ # xywh = x[..., :4] # x(6300,4) boxes
493
+ # conf = x[..., 4:5] # x(6300,1) confidences
494
+ # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes
495
+ # return tf.concat([conf, cls, xywh], 1)
496
+
497
+ @staticmethod
498
+ def _xywh2xyxy(xywh):
499
+ # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
500
+ x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1)
501
+ return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1)
502
+
503
+
504
+ class AgnosticNMS(keras.layers.Layer):
505
+ # TF Agnostic NMS
506
+ def call(self, input, topk_all, iou_thres, conf_thres):
507
+ # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450
508
+ return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres),
509
+ input,
510
+ fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32),
511
+ name='agnostic_nms')
512
+
513
+ @staticmethod
514
+ def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS
515
+ boxes, classes, scores = x
516
+ class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32)
517
+ scores_inp = tf.reduce_max(scores, -1)
518
+ selected_inds = tf.image.non_max_suppression(boxes,
519
+ scores_inp,
520
+ max_output_size=topk_all,
521
+ iou_threshold=iou_thres,
522
+ score_threshold=conf_thres)
523
+ selected_boxes = tf.gather(boxes, selected_inds)
524
+ padded_boxes = tf.pad(selected_boxes,
525
+ paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]],
526
+ mode='CONSTANT',
527
+ constant_values=0.0)
528
+ selected_scores = tf.gather(scores_inp, selected_inds)
529
+ padded_scores = tf.pad(selected_scores,
530
+ paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
531
+ mode='CONSTANT',
532
+ constant_values=-1.0)
533
+ selected_classes = tf.gather(class_inds, selected_inds)
534
+ padded_classes = tf.pad(selected_classes,
535
+ paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
536
+ mode='CONSTANT',
537
+ constant_values=-1.0)
538
+ valid_detections = tf.shape(selected_inds)[0]
539
+ return padded_boxes, padded_scores, padded_classes, valid_detections
540
+
541
+
542
+ def activations(act=nn.SiLU):
543
+ # Returns TF activation from input PyTorch activation
544
+ if isinstance(act, nn.LeakyReLU):
545
+ return lambda x: keras.activations.relu(x, alpha=0.1)
546
+ elif isinstance(act, nn.Hardswish):
547
+ return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667
548
+ elif isinstance(act, (nn.SiLU, SiLU)):
549
+ return lambda x: keras.activations.swish(x)
550
+ else:
551
+ raise Exception(f'no matching TensorFlow activation found for PyTorch activation {act}')
552
+
553
+
554
+ def representative_dataset_gen(dataset, ncalib=100):
555
+ # Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays
556
+ for n, (path, img, im0s, vid_cap, string) in enumerate(dataset):
557
+ im = np.transpose(img, [1, 2, 0])
558
+ im = np.expand_dims(im, axis=0).astype(np.float32)
559
+ im /= 255
560
+ yield [im]
561
+ if n >= ncalib:
562
+ break
563
+
564
+
565
+ def run(
566
+ weights=ROOT / 'yolov5s.pt', # weights path
567
+ imgsz=(640, 640), # inference size h,w
568
+ batch_size=1, # batch size
569
+ dynamic=False, # dynamic batch size
570
+ ):
571
+ # PyTorch model
572
+ im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image
573
+ model = attempt_load(weights, device=torch.device('cpu'), inplace=True, fuse=False)
574
+ _ = model(im) # inference
575
+ model.info()
576
+
577
+ # TensorFlow model
578
+ im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image
579
+ tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
580
+ _ = tf_model.predict(im) # inference
581
+
582
+ # Keras model
583
+ im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
584
+ keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im))
585
+ keras_model.summary()
586
+
587
+ LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.')
588
+
589
+
590
+ def parse_opt():
591
+ parser = argparse.ArgumentParser()
592
+ parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')
593
+ parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
594
+ parser.add_argument('--batch-size', type=int, default=1, help='batch size')
595
+ parser.add_argument('--dynamic', action='store_true', help='dynamic batch size')
596
+ opt = parser.parse_args()
597
+ opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
598
+ print_args(vars(opt))
599
+ return opt
600
+
601
+
602
+ def main(opt):
603
+ run(**vars(opt))
604
+
605
+
606
+ if __name__ == '__main__':
607
+ opt = parse_opt()
608
+ main(opt)
models/yolo.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+ """
3
+ YOLO-specific modules
4
+
5
+ Usage:
6
+ $ python models/yolo.py --cfg yolov5s.yaml
7
+ """
8
+
9
+ import argparse
10
+ import contextlib
11
+ import os
12
+ import platform
13
+ import sys
14
+ from copy import deepcopy
15
+ from pathlib import Path
16
+
17
+ FILE = Path(__file__).resolve()
18
+ ROOT = FILE.parents[1] # YOLOv5 root directory
19
+ if str(ROOT) not in sys.path:
20
+ sys.path.append(str(ROOT)) # add ROOT to PATH
21
+ if platform.system() != 'Windows':
22
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
23
+
24
+ from models.common import * # noqa
25
+ from models.experimental import * # noqa
26
+ from utils.autoanchor import check_anchor_order
27
+ from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
28
+ from utils.plots import feature_visualization
29
+ from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device,
30
+ time_sync)
31
+
32
+ try:
33
+ import thop # for FLOPs computation
34
+ except ImportError:
35
+ thop = None
36
+
37
+
38
+ class Detect(nn.Module):
39
+ # YOLOv5 Detect head for detection models
40
+ stride = None # strides computed during build
41
+ dynamic = False # force grid reconstruction
42
+ export = False # export mode
43
+
44
+ def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
45
+ super().__init__()
46
+ self.nc = nc # number of classes
47
+ self.no = nc + 5 # number of outputs per anchor
48
+ self.nl = len(anchors) # number of detection layers
49
+ self.na = len(anchors[0]) // 2 # number of anchors
50
+ self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid
51
+ self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid
52
+ self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2)
53
+ self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
54
+ self.inplace = inplace # use inplace ops (e.g. slice assignment)
55
+
56
+ def forward(self, x):
57
+ z = [] # inference output
58
+ for i in range(self.nl):
59
+ x[i] = self.m[i](x[i]) # conv
60
+ bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
61
+ x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
62
+
63
+ if not self.training: # inference
64
+ if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
65
+ self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)
66
+
67
+ if isinstance(self, Segment): # (boxes + masks)
68
+ xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4)
69
+ xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy
70
+ wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh
71
+ y = torch.cat((xy, wh, conf.sigmoid(), mask), 4)
72
+ else: # Detect (boxes only)
73
+ xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4)
74
+ xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy
75
+ wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh
76
+ y = torch.cat((xy, wh, conf), 4)
77
+ z.append(y.view(bs, self.na * nx * ny, self.no))
78
+
79
+ return x if self.training else (torch.cat(z, 1), ) if self.export else (torch.cat(z, 1), x)
80
+
81
+ def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
82
+ d = self.anchors[i].device
83
+ t = self.anchors[i].dtype
84
+ shape = 1, self.na, ny, nx, 2 # grid shape
85
+ y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t)
86
+ yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility
87
+ grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5
88
+ anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape)
89
+ return grid, anchor_grid
90
+
91
+
92
+ class Segment(Detect):
93
+ # YOLOv5 Segment head for segmentation models
94
+ def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True):
95
+ super().__init__(nc, anchors, ch, inplace)
96
+ self.nm = nm # number of masks
97
+ self.npr = npr # number of protos
98
+ self.no = 5 + nc + self.nm # number of outputs per anchor
99
+ self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
100
+ self.proto = Proto(ch[0], self.npr, self.nm) # protos
101
+ self.detect = Detect.forward
102
+
103
+ def forward(self, x):
104
+ p = self.proto(x[0])
105
+ x = self.detect(self, x)
106
+ return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1])
107
+
108
+
109
+ class BaseModel(nn.Module):
110
+ # YOLOv5 base model
111
+ def forward(self, x, profile=False, visualize=False):
112
+ return self._forward_once(x, profile, visualize) # single-scale inference, train
113
+
114
+ def _forward_once(self, x, profile=False, visualize=False):
115
+ y, dt = [], [] # outputs
116
+ for m in self.model:
117
+ if m.f != -1: # if not from previous layer
118
+ x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
119
+ if profile:
120
+ self._profile_one_layer(m, x, dt)
121
+ x = m(x) # run
122
+ y.append(x if m.i in self.save else None) # save output
123
+ if visualize:
124
+ feature_visualization(x, m.type, m.i, save_dir=visualize)
125
+ return x
126
+
127
+ def _profile_one_layer(self, m, x, dt):
128
+ c = m == self.model[-1] # is final layer, copy input as inplace fix
129
+ o = thop.profile(m, inputs=(x.copy() if c else x, ), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
130
+ t = time_sync()
131
+ for _ in range(10):
132
+ m(x.copy() if c else x)
133
+ dt.append((time_sync() - t) * 100)
134
+ if m == self.model[0]:
135
+ LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module")
136
+ LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
137
+ if c:
138
+ LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total")
139
+
140
+ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
141
+ LOGGER.info('Fusing layers... ')
142
+ for m in self.model.modules():
143
+ if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
144
+ m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
145
+ delattr(m, 'bn') # remove batchnorm
146
+ m.forward = m.forward_fuse # update forward
147
+ self.info()
148
+ return self
149
+
150
+ def info(self, verbose=False, img_size=640): # print model information
151
+ model_info(self, verbose, img_size)
152
+
153
+ def _apply(self, fn):
154
+ # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
155
+ self = super()._apply(fn)
156
+ m = self.model[-1] # Detect()
157
+ if isinstance(m, (Detect, Segment)):
158
+ m.stride = fn(m.stride)
159
+ m.grid = list(map(fn, m.grid))
160
+ if isinstance(m.anchor_grid, list):
161
+ m.anchor_grid = list(map(fn, m.anchor_grid))
162
+ return self
163
+
164
+
165
+ class DetectionModel(BaseModel):
166
+ # YOLOv5 detection model
167
+ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
168
+ super().__init__()
169
+ if isinstance(cfg, dict):
170
+ self.yaml = cfg # model dict
171
+ else: # is *.yaml
172
+ import yaml # for torch hub
173
+ self.yaml_file = Path(cfg).name
174
+ with open(cfg, encoding='ascii', errors='ignore') as f:
175
+ self.yaml = yaml.safe_load(f) # model dict
176
+
177
+ # Define model
178
+ ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
179
+ if nc and nc != self.yaml['nc']:
180
+ LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
181
+ self.yaml['nc'] = nc # override yaml value
182
+ if anchors:
183
+ LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}')
184
+ self.yaml['anchors'] = round(anchors) # override yaml value
185
+ self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
186
+ self.names = [str(i) for i in range(self.yaml['nc'])] # default names
187
+ self.inplace = self.yaml.get('inplace', True)
188
+
189
+ # Build strides, anchors
190
+ m = self.model[-1] # Detect()
191
+ if isinstance(m, (Detect, Segment)):
192
+ s = 256 # 2x min stride
193
+ m.inplace = self.inplace
194
+ forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)
195
+ m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward
196
+ check_anchor_order(m)
197
+ m.anchors /= m.stride.view(-1, 1, 1)
198
+ self.stride = m.stride
199
+ self._initialize_biases() # only run once
200
+
201
+ # Init weights, biases
202
+ initialize_weights(self)
203
+ self.info()
204
+ LOGGER.info('')
205
+
206
+ def forward(self, x, augment=False, profile=False, visualize=False):
207
+ if augment:
208
+ return self._forward_augment(x) # augmented inference, None
209
+ return self._forward_once(x, profile, visualize) # single-scale inference, train
210
+
211
+ def _forward_augment(self, x):
212
+ img_size = x.shape[-2:] # height, width
213
+ s = [1, 0.83, 0.67] # scales
214
+ f = [None, 3, None] # flips (2-ud, 3-lr)
215
+ y = [] # outputs
216
+ for si, fi in zip(s, f):
217
+ xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
218
+ yi = self._forward_once(xi)[0] # forward
219
+ # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
220
+ yi = self._descale_pred(yi, fi, si, img_size)
221
+ y.append(yi)
222
+ y = self._clip_augmented(y) # clip augmented tails
223
+ return torch.cat(y, 1), None # augmented inference, train
224
+
225
+ def _descale_pred(self, p, flips, scale, img_size):
226
+ # de-scale predictions following augmented inference (inverse operation)
227
+ if self.inplace:
228
+ p[..., :4] /= scale # de-scale
229
+ if flips == 2:
230
+ p[..., 1] = img_size[0] - p[..., 1] # de-flip ud
231
+ elif flips == 3:
232
+ p[..., 0] = img_size[1] - p[..., 0] # de-flip lr
233
+ else:
234
+ x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale
235
+ if flips == 2:
236
+ y = img_size[0] - y # de-flip ud
237
+ elif flips == 3:
238
+ x = img_size[1] - x # de-flip lr
239
+ p = torch.cat((x, y, wh, p[..., 4:]), -1)
240
+ return p
241
+
242
+ def _clip_augmented(self, y):
243
+ # Clip YOLOv5 augmented inference tails
244
+ nl = self.model[-1].nl # number of detection layers (P3-P5)
245
+ g = sum(4 ** x for x in range(nl)) # grid points
246
+ e = 1 # exclude layer count
247
+ i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices
248
+ y[0] = y[0][:, :-i] # large
249
+ i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices
250
+ y[-1] = y[-1][:, i:] # small
251
+ return y
252
+
253
+ def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
254
+ # https://arxiv.org/abs/1708.02002 section 3.3
255
+ # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
256
+ m = self.model[-1] # Detect() module
257
+ for mi, s in zip(m.m, m.stride): # from
258
+ b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
259
+ b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
260
+ b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls
261
+ mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
262
+
263
+
264
+ Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility
265
+
266
+
267
+ class SegmentationModel(DetectionModel):
268
+ # YOLOv5 segmentation model
269
+ def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):
270
+ super().__init__(cfg, ch, nc, anchors)
271
+
272
+
273
+ class ClassificationModel(BaseModel):
274
+ # YOLOv5 classification model
275
+ def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index
276
+ super().__init__()
277
+ self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg)
278
+
279
+ def _from_detection_model(self, model, nc=1000, cutoff=10):
280
+ # Create a YOLOv5 classification model from a YOLOv5 detection model
281
+ if isinstance(model, DetectMultiBackend):
282
+ model = model.model # unwrap DetectMultiBackend
283
+ model.model = model.model[:cutoff] # backbone
284
+ m = model.model[-1] # last layer
285
+ ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module
286
+ c = Classify(ch, nc) # Classify()
287
+ c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type
288
+ model.model[-1] = c # replace
289
+ self.model = model.model
290
+ self.stride = model.stride
291
+ self.save = []
292
+ self.nc = nc
293
+
294
+ def _from_yaml(self, cfg):
295
+ # Create a YOLOv5 classification model from a *.yaml file
296
+ self.model = None
297
+
298
+
299
+ def parse_model(d, ch): # model_dict, input_channels(3)
300
+ # Parse a YOLOv5 model.yaml dictionary
301
+ LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
302
+ anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
303
+ if act:
304
+ Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU()
305
+ LOGGER.info(f"{colorstr('activation:')} {act}") # print
306
+ na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
307
+ no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
308
+
309
+ layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
310
+ for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
311
+ m = eval(m) if isinstance(m, str) else m # eval strings
312
+ for j, a in enumerate(args):
313
+ with contextlib.suppress(NameError):
314
+ args[j] = eval(a) if isinstance(a, str) else a # eval strings
315
+
316
+ n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain
317
+ if m in {
318
+ Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
319
+ BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}:
320
+ c1, c2 = ch[f], args[0]
321
+ if c2 != no: # if not output
322
+ c2 = make_divisible(c2 * gw, 8)
323
+
324
+ args = [c1, c2, *args[1:]]
325
+ if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}:
326
+ args.insert(2, n) # number of repeats
327
+ n = 1
328
+ elif m is nn.BatchNorm2d:
329
+ args = [ch[f]]
330
+ elif m is Concat:
331
+ c2 = sum(ch[x] for x in f)
332
+ # TODO: channel, gw, gd
333
+ elif m in {Detect, Segment}:
334
+ args.append([ch[x] for x in f])
335
+ if isinstance(args[1], int): # number of anchors
336
+ args[1] = [list(range(args[1] * 2))] * len(f)
337
+ if m is Segment:
338
+ args[3] = make_divisible(args[3] * gw, 8)
339
+ elif m is Contract:
340
+ c2 = ch[f] * args[0] ** 2
341
+ elif m is Expand:
342
+ c2 = ch[f] // args[0] ** 2
343
+ else:
344
+ c2 = ch[f]
345
+
346
+ m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
347
+ t = str(m)[8:-2].replace('__main__.', '') # module type
348
+ np = sum(x.numel() for x in m_.parameters()) # number params
349
+ m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
350
+ LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print
351
+ save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
352
+ layers.append(m_)
353
+ if i == 0:
354
+ ch = []
355
+ ch.append(c2)
356
+ return nn.Sequential(*layers), sorted(save)
357
+
358
+
359
+ if __name__ == '__main__':
360
+ parser = argparse.ArgumentParser()
361
+ parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
362
+ parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs')
363
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
364
+ parser.add_argument('--profile', action='store_true', help='profile model speed')
365
+ parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer')
366
+ parser.add_argument('--test', action='store_true', help='test all yolo*.yaml')
367
+ opt = parser.parse_args()
368
+ opt.cfg = check_yaml(opt.cfg) # check YAML
369
+ print_args(vars(opt))
370
+ device = select_device(opt.device)
371
+
372
+ # Create model
373
+ im = torch.rand(opt.batch_size, 3, 640, 640).to(device)
374
+ model = Model(opt.cfg).to(device)
375
+
376
+ # Options
377
+ if opt.line_profile: # profile layer by layer
378
+ model(im, profile=True)
379
+
380
+ elif opt.profile: # profile forward-backward
381
+ results = profile(input=im, ops=[model], n=3)
382
+
383
+ elif opt.test: # test all models
384
+ for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'):
385
+ try:
386
+ _ = Model(cfg)
387
+ except Exception as e:
388
+ print(f'Error in {cfg}: {e}')
389
+
390
+ else: # report fused model summary
391
+ model.fuse()
models/yolov5l.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]
models/yolov5m.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.67 # model depth multiple
6
+ width_multiple: 0.75 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]
models/yolov5n.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.25 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]
models/yolov5s.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.50 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]
models/yolov5x.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.33 # model depth multiple
6
+ width_multiple: 1.25 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]
requirements.txt ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 requirements
2
+ # Usage: pip install -r requirements.txt
3
+
4
+ gradio
5
+ torch
6
+ # Base ------------------------------------------------------------------------
7
+ gitpython>=3.1.30
8
+ matplotlib>=3.3
9
+ numpy>=1.22.2
10
+ opencv-python>=4.1.1
11
+ Pillow>=7.1.2
12
+ psutil # system resources
13
+ PyYAML>=5.3.1
14
+ requests>=2.23.0
15
+ scipy>=1.4.1
16
+ thop>=0.1.1 # FLOPs computation
17
+ torch>=1.8.0 # see https://pytorch.org/get-started/locally (recommended)
18
+ torchvision>=0.9.0
19
+ tqdm>=4.64.0
20
+ ultralytics>=8.0.147
21
+ # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012
22
+
23
+ # Logging ---------------------------------------------------------------------
24
+ # tensorboard>=2.4.1
25
+ # clearml>=1.2.0
26
+ # comet
27
+
28
+ # Plotting --------------------------------------------------------------------
29
+ pandas>=1.1.4
30
+ seaborn>=0.11.0
31
+
32
+ # Export ----------------------------------------------------------------------
33
+ # coremltools>=6.0 # CoreML export
34
+ # onnx>=1.10.0 # ONNX export
35
+ # onnx-simplifier>=0.4.1 # ONNX simplifier
36
+ # nvidia-pyindex # TensorRT export
37
+ # nvidia-tensorrt # TensorRT export
38
+ # scikit-learn<=1.1.2 # CoreML quantization
39
+ # tensorflow>=2.4.0 # TF exports (-cpu, -aarch64, -macos)
40
+ # tensorflowjs>=3.9.0 # TF.js export
41
+ # openvino-dev>=2023.0 # OpenVINO export
42
+
43
+ # Deploy ----------------------------------------------------------------------
44
+ setuptools>=65.5.1 # Snyk vulnerability fix
45
+ # tritonclient[all]~=2.24.0
46
+
47
+ # Extras ----------------------------------------------------------------------
48
+ # ipython # interactive notebook
49
+ # mss # screenshots
50
+ # albumentations>=1.0.3
51
+ # pycocotools>=2.0.6 # COCO mAP
utils/__init__.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+ """
3
+ utils/initialization
4
+ """
5
+
6
+ import contextlib
7
+ import platform
8
+ import threading
9
+
10
+
11
+ def emojis(str=''):
12
+ # Return platform-dependent emoji-safe version of string
13
+ return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
14
+
15
+
16
+ class TryExcept(contextlib.ContextDecorator):
17
+ # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager
18
+ def __init__(self, msg=''):
19
+ self.msg = msg
20
+
21
+ def __enter__(self):
22
+ pass
23
+
24
+ def __exit__(self, exc_type, value, traceback):
25
+ if value:
26
+ print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}"))
27
+ return True
28
+
29
+
30
+ def threaded(func):
31
+ # Multi-threads a target function and returns thread. Usage: @threaded decorator
32
+ def wrapper(*args, **kwargs):
33
+ thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
34
+ thread.start()
35
+ return thread
36
+
37
+ return wrapper
38
+
39
+
40
+ def join_threads(verbose=False):
41
+ # Join all daemon threads, i.e. atexit.register(lambda: join_threads())
42
+ main_thread = threading.current_thread()
43
+ for t in threading.enumerate():
44
+ if t is not main_thread:
45
+ if verbose:
46
+ print(f'Joining thread {t.name}')
47
+ t.join()
48
+
49
+
50
+ def notebook_init(verbose=True):
51
+ # Check system software and hardware
52
+ print('Checking setup...')
53
+
54
+ import os
55
+ import shutil
56
+
57
+ from ultralytics.utils.checks import check_requirements
58
+
59
+ from utils.general import check_font, is_colab
60
+ from utils.torch_utils import select_device # imports
61
+
62
+ check_font()
63
+
64
+ import psutil
65
+
66
+ if check_requirements('wandb', install=False):
67
+ os.system('pip uninstall -y wandb') # eliminate unexpected account creation prompt with infinite hang
68
+ if is_colab():
69
+ shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory
70
+
71
+ # System info
72
+ display = None
73
+ if verbose:
74
+ gb = 1 << 30 # bytes to GiB (1024 ** 3)
75
+ ram = psutil.virtual_memory().total
76
+ total, used, free = shutil.disk_usage('/')
77
+ with contextlib.suppress(Exception): # clear display if ipython is installed
78
+ from IPython import display
79
+ display.clear_output()
80
+ s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)'
81
+ else:
82
+ s = ''
83
+
84
+ select_device(newline=False)
85
+ print(emojis(f'Setup complete ✅ {s}'))
86
+ return display
utils/activations.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+ """
3
+ Activation functions
4
+ """
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+
10
+
11
+ class SiLU(nn.Module):
12
+ # SiLU activation https://arxiv.org/pdf/1606.08415.pdf
13
+ @staticmethod
14
+ def forward(x):
15
+ return x * torch.sigmoid(x)
16
+
17
+
18
+ class Hardswish(nn.Module):
19
+ # Hard-SiLU activation
20
+ @staticmethod
21
+ def forward(x):
22
+ # return x * F.hardsigmoid(x) # for TorchScript and CoreML
23
+ return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX
24
+
25
+
26
+ class Mish(nn.Module):
27
+ # Mish activation https://github.com/digantamisra98/Mish
28
+ @staticmethod
29
+ def forward(x):
30
+ return x * F.softplus(x).tanh()
31
+
32
+
33
+ class MemoryEfficientMish(nn.Module):
34
+ # Mish activation memory-efficient
35
+ class F(torch.autograd.Function):
36
+
37
+ @staticmethod
38
+ def forward(ctx, x):
39
+ ctx.save_for_backward(x)
40
+ return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
41
+
42
+ @staticmethod
43
+ def backward(ctx, grad_output):
44
+ x = ctx.saved_tensors[0]
45
+ sx = torch.sigmoid(x)
46
+ fx = F.softplus(x).tanh()
47
+ return grad_output * (fx + x * sx * (1 - fx * fx))
48
+
49
+ def forward(self, x):
50
+ return self.F.apply(x)
51
+
52
+
53
+ class FReLU(nn.Module):
54
+ # FReLU activation https://arxiv.org/abs/2007.11824
55
+ def __init__(self, c1, k=3): # ch_in, kernel
56
+ super().__init__()
57
+ self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
58
+ self.bn = nn.BatchNorm2d(c1)
59
+
60
+ def forward(self, x):
61
+ return torch.max(x, self.bn(self.conv(x)))
62
+
63
+
64
+ class AconC(nn.Module):
65
+ r""" ACON activation (activate or not)
66
+ AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
67
+ according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
68
+ """
69
+
70
+ def __init__(self, c1):
71
+ super().__init__()
72
+ self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
73
+ self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
74
+ self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
75
+
76
+ def forward(self, x):
77
+ dpx = (self.p1 - self.p2) * x
78
+ return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
79
+
80
+
81
+ class MetaAconC(nn.Module):
82
+ r""" ACON activation (activate or not)
83
+ MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
84
+ according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>.
85
+ """
86
+
87
+ def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
88
+ super().__init__()
89
+ c2 = max(r, c1 // r)
90
+ self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
91
+ self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
92
+ self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
93
+ self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
94
+ # self.bn1 = nn.BatchNorm2d(c2)
95
+ # self.bn2 = nn.BatchNorm2d(c1)
96
+
97
+ def forward(self, x):
98
+ y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
99
+ # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
100
+ # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
101
+ beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
102
+ dpx = (self.p1 - self.p2) * x
103
+ return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
utils/augmentations.py ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+ """
3
+ Image augmentation functions
4
+ """
5
+
6
+ import math
7
+ import random
8
+
9
+ import cv2
10
+ import numpy as np
11
+ import torch
12
+ import torchvision.transforms as T
13
+ import torchvision.transforms.functional as TF
14
+
15
+ from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy
16
+ from utils.metrics import bbox_ioa
17
+
18
+ IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean
19
+ IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation
20
+
21
+
22
+ class Albumentations:
23
+ # YOLOv5 Albumentations class (optional, only used if package is installed)
24
+ def __init__(self, size=640):
25
+ self.transform = None
26
+ prefix = colorstr('albumentations: ')
27
+ try:
28
+ import albumentations as A
29
+ check_version(A.__version__, '1.0.3', hard=True) # version requirement
30
+
31
+ T = [
32
+ A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0),
33
+ A.Blur(p=0.01),
34
+ A.MedianBlur(p=0.01),
35
+ A.ToGray(p=0.01),
36
+ A.CLAHE(p=0.01),
37
+ A.RandomBrightnessContrast(p=0.0),
38
+ A.RandomGamma(p=0.0),
39
+ A.ImageCompression(quality_lower=75, p=0.0)] # transforms
40
+ self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))
41
+
42
+ LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))
43
+ except ImportError: # package not installed, skip
44
+ pass
45
+ except Exception as e:
46
+ LOGGER.info(f'{prefix}{e}')
47
+
48
+ def __call__(self, im, labels, p=1.0):
49
+ if self.transform and random.random() < p:
50
+ new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed
51
+ im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])
52
+ return im, labels
53
+
54
+
55
+ def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False):
56
+ # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std
57
+ return TF.normalize(x, mean, std, inplace=inplace)
58
+
59
+
60
+ def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD):
61
+ # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean
62
+ for i in range(3):
63
+ x[:, i] = x[:, i] * std[i] + mean[i]
64
+ return x
65
+
66
+
67
+ def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
68
+ # HSV color-space augmentation
69
+ if hgain or sgain or vgain:
70
+ r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
71
+ hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))
72
+ dtype = im.dtype # uint8
73
+
74
+ x = np.arange(0, 256, dtype=r.dtype)
75
+ lut_hue = ((x * r[0]) % 180).astype(dtype)
76
+ lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
77
+ lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
78
+
79
+ im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
80
+ cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed
81
+
82
+
83
+ def hist_equalize(im, clahe=True, bgr=False):
84
+ # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255
85
+ yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
86
+ if clahe:
87
+ c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
88
+ yuv[:, :, 0] = c.apply(yuv[:, :, 0])
89
+ else:
90
+ yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
91
+ return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
92
+
93
+
94
+ def replicate(im, labels):
95
+ # Replicate labels
96
+ h, w = im.shape[:2]
97
+ boxes = labels[:, 1:].astype(int)
98
+ x1, y1, x2, y2 = boxes.T
99
+ s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
100
+ for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
101
+ x1b, y1b, x2b, y2b = boxes[i]
102
+ bh, bw = y2b - y1b, x2b - x1b
103
+ yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
104
+ x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
105
+ im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax]
106
+ labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
107
+
108
+ return im, labels
109
+
110
+
111
+ def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
112
+ # Resize and pad image while meeting stride-multiple constraints
113
+ shape = im.shape[:2] # current shape [height, width]
114
+ if isinstance(new_shape, int):
115
+ new_shape = (new_shape, new_shape)
116
+
117
+ # Scale ratio (new / old)
118
+ r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
119
+ if not scaleup: # only scale down, do not scale up (for better val mAP)
120
+ r = min(r, 1.0)
121
+
122
+ # Compute padding
123
+ ratio = r, r # width, height ratios
124
+ new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
125
+ dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
126
+ if auto: # minimum rectangle
127
+ dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
128
+ elif scaleFill: # stretch
129
+ dw, dh = 0.0, 0.0
130
+ new_unpad = (new_shape[1], new_shape[0])
131
+ ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
132
+
133
+ dw /= 2 # divide padding into 2 sides
134
+ dh /= 2
135
+
136
+ if shape[::-1] != new_unpad: # resize
137
+ im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
138
+ top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
139
+ left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
140
+ im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
141
+ return im, ratio, (dw, dh)
142
+
143
+
144
+ def random_perspective(im,
145
+ targets=(),
146
+ segments=(),
147
+ degrees=10,
148
+ translate=.1,
149
+ scale=.1,
150
+ shear=10,
151
+ perspective=0.0,
152
+ border=(0, 0)):
153
+ # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))
154
+ # targets = [cls, xyxy]
155
+
156
+ height = im.shape[0] + border[0] * 2 # shape(h,w,c)
157
+ width = im.shape[1] + border[1] * 2
158
+
159
+ # Center
160
+ C = np.eye(3)
161
+ C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
162
+ C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
163
+
164
+ # Perspective
165
+ P = np.eye(3)
166
+ P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
167
+ P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
168
+
169
+ # Rotation and Scale
170
+ R = np.eye(3)
171
+ a = random.uniform(-degrees, degrees)
172
+ # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
173
+ s = random.uniform(1 - scale, 1 + scale)
174
+ # s = 2 ** random.uniform(-scale, scale)
175
+ R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
176
+
177
+ # Shear
178
+ S = np.eye(3)
179
+ S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
180
+ S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
181
+
182
+ # Translation
183
+ T = np.eye(3)
184
+ T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
185
+ T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
186
+
187
+ # Combined rotation matrix
188
+ M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
189
+ if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
190
+ if perspective:
191
+ im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
192
+ else: # affine
193
+ im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
194
+
195
+ # Visualize
196
+ # import matplotlib.pyplot as plt
197
+ # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
198
+ # ax[0].imshow(im[:, :, ::-1]) # base
199
+ # ax[1].imshow(im2[:, :, ::-1]) # warped
200
+
201
+ # Transform label coordinates
202
+ n = len(targets)
203
+ if n:
204
+ use_segments = any(x.any() for x in segments) and len(segments) == n
205
+ new = np.zeros((n, 4))
206
+ if use_segments: # warp segments
207
+ segments = resample_segments(segments) # upsample
208
+ for i, segment in enumerate(segments):
209
+ xy = np.ones((len(segment), 3))
210
+ xy[:, :2] = segment
211
+ xy = xy @ M.T # transform
212
+ xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
213
+
214
+ # clip
215
+ new[i] = segment2box(xy, width, height)
216
+
217
+ else: # warp boxes
218
+ xy = np.ones((n * 4, 3))
219
+ xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
220
+ xy = xy @ M.T # transform
221
+ xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
222
+
223
+ # create new boxes
224
+ x = xy[:, [0, 2, 4, 6]]
225
+ y = xy[:, [1, 3, 5, 7]]
226
+ new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
227
+
228
+ # clip
229
+ new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
230
+ new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
231
+
232
+ # filter candidates
233
+ i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
234
+ targets = targets[i]
235
+ targets[:, 1:5] = new[i]
236
+
237
+ return im, targets
238
+
239
+
240
+ def copy_paste(im, labels, segments, p=0.5):
241
+ # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
242
+ n = len(segments)
243
+ if p and n:
244
+ h, w, c = im.shape # height, width, channels
245
+ im_new = np.zeros(im.shape, np.uint8)
246
+ for j in random.sample(range(n), k=round(p * n)):
247
+ l, s = labels[j], segments[j]
248
+ box = w - l[3], l[2], w - l[1], l[4]
249
+ ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
250
+ if (ioa < 0.30).all(): # allow 30% obscuration of existing labels
251
+ labels = np.concatenate((labels, [[l[0], *box]]), 0)
252
+ segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))
253
+ cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED)
254
+
255
+ result = cv2.flip(im, 1) # augment segments (flip left-right)
256
+ i = cv2.flip(im_new, 1).astype(bool)
257
+ im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug
258
+
259
+ return im, labels, segments
260
+
261
+
262
+ def cutout(im, labels, p=0.5):
263
+ # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
264
+ if random.random() < p:
265
+ h, w = im.shape[:2]
266
+ scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
267
+ for s in scales:
268
+ mask_h = random.randint(1, int(h * s)) # create random masks
269
+ mask_w = random.randint(1, int(w * s))
270
+
271
+ # box
272
+ xmin = max(0, random.randint(0, w) - mask_w // 2)
273
+ ymin = max(0, random.randint(0, h) - mask_h // 2)
274
+ xmax = min(w, xmin + mask_w)
275
+ ymax = min(h, ymin + mask_h)
276
+
277
+ # apply random color mask
278
+ im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
279
+
280
+ # return unobscured labels
281
+ if len(labels) and s > 0.03:
282
+ box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
283
+ ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h)) # intersection over area
284
+ labels = labels[ioa < 0.60] # remove >60% obscured labels
285
+
286
+ return labels
287
+
288
+
289
+ def mixup(im, labels, im2, labels2):
290
+ # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
291
+ r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
292
+ im = (im * r + im2 * (1 - r)).astype(np.uint8)
293
+ labels = np.concatenate((labels, labels2), 0)
294
+ return im, labels
295
+
296
+
297
+ def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
298
+ # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
299
+ w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
300
+ w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
301
+ ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
302
+ return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
303
+
304
+
305
+ def classify_albumentations(
306
+ augment=True,
307
+ size=224,
308
+ scale=(0.08, 1.0),
309
+ ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33
310
+ hflip=0.5,
311
+ vflip=0.0,
312
+ jitter=0.4,
313
+ mean=IMAGENET_MEAN,
314
+ std=IMAGENET_STD,
315
+ auto_aug=False):
316
+ # YOLOv5 classification Albumentations (optional, only used if package is installed)
317
+ prefix = colorstr('albumentations: ')
318
+ try:
319
+ import albumentations as A
320
+ from albumentations.pytorch import ToTensorV2
321
+ check_version(A.__version__, '1.0.3', hard=True) # version requirement
322
+ if augment: # Resize and crop
323
+ T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)]
324
+ if auto_aug:
325
+ # TODO: implement AugMix, AutoAug & RandAug in albumentation
326
+ LOGGER.info(f'{prefix}auto augmentations are currently not supported')
327
+ else:
328
+ if hflip > 0:
329
+ T += [A.HorizontalFlip(p=hflip)]
330
+ if vflip > 0:
331
+ T += [A.VerticalFlip(p=vflip)]
332
+ if jitter > 0:
333
+ color_jitter = (float(jitter), ) * 3 # repeat value for brightness, contrast, satuaration, 0 hue
334
+ T += [A.ColorJitter(*color_jitter, 0)]
335
+ else: # Use fixed crop for eval set (reproducibility)
336
+ T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)]
337
+ T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor
338
+ LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p))
339
+ return A.Compose(T)
340
+
341
+ except ImportError: # package not installed, skip
342
+ LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)')
343
+ except Exception as e:
344
+ LOGGER.info(f'{prefix}{e}')
345
+
346
+
347
+ def classify_transforms(size=224):
348
+ # Transforms to apply if albumentations not installed
349
+ assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)'
350
+ # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])
351
+ return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)])
352
+
353
+
354
+ class LetterBox:
355
+ # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
356
+ def __init__(self, size=(640, 640), auto=False, stride=32):
357
+ super().__init__()
358
+ self.h, self.w = (size, size) if isinstance(size, int) else size
359
+ self.auto = auto # pass max size integer, automatically solve for short side using stride
360
+ self.stride = stride # used with auto
361
+
362
+ def __call__(self, im): # im = np.array HWC
363
+ imh, imw = im.shape[:2]
364
+ r = min(self.h / imh, self.w / imw) # ratio of new/old
365
+ h, w = round(imh * r), round(imw * r) # resized image
366
+ hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w
367
+ top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1)
368
+ im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype)
369
+ im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
370
+ return im_out
371
+
372
+
373
+ class CenterCrop:
374
+ # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()])
375
+ def __init__(self, size=640):
376
+ super().__init__()
377
+ self.h, self.w = (size, size) if isinstance(size, int) else size
378
+
379
+ def __call__(self, im): # im = np.array HWC
380
+ imh, imw = im.shape[:2]
381
+ m = min(imh, imw) # min dimension
382
+ top, left = (imh - m) // 2, (imw - m) // 2
383
+ return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR)
384
+
385
+
386
+ class ToTensor:
387
+ # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
388
+ def __init__(self, half=False):
389
+ super().__init__()
390
+ self.half = half
391
+
392
+ def __call__(self, im): # im = np.array HWC in BGR order
393
+ im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous
394
+ im = torch.from_numpy(im) # to torch
395
+ im = im.half() if self.half else im.float() # uint8 to fp16/32
396
+ im /= 255.0 # 0-255 to 0.0-1.0
397
+ return im
utils/autoanchor.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+ """
3
+ AutoAnchor utils
4
+ """
5
+
6
+ import random
7
+
8
+ import numpy as np
9
+ import torch
10
+ import yaml
11
+ from tqdm import tqdm
12
+
13
+ from utils import TryExcept
14
+ from utils.general import LOGGER, TQDM_BAR_FORMAT, colorstr
15
+
16
+ PREFIX = colorstr('AutoAnchor: ')
17
+
18
+
19
+ def check_anchor_order(m):
20
+ # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
21
+ a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer
22
+ da = a[-1] - a[0] # delta a
23
+ ds = m.stride[-1] - m.stride[0] # delta s
24
+ if da and (da.sign() != ds.sign()): # same order
25
+ LOGGER.info(f'{PREFIX}Reversing anchor order')
26
+ m.anchors[:] = m.anchors.flip(0)
27
+
28
+
29
+ @TryExcept(f'{PREFIX}ERROR')
30
+ def check_anchors(dataset, model, thr=4.0, imgsz=640):
31
+ # Check anchor fit to data, recompute if necessary
32
+ m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
33
+ shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
34
+ scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
35
+ wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
36
+
37
+ def metric(k): # compute metric
38
+ r = wh[:, None] / k[None]
39
+ x = torch.min(r, 1 / r).min(2)[0] # ratio metric
40
+ best = x.max(1)[0] # best_x
41
+ aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold
42
+ bpr = (best > 1 / thr).float().mean() # best possible recall
43
+ return bpr, aat
44
+
45
+ stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides
46
+ anchors = m.anchors.clone() * stride # current anchors
47
+ bpr, aat = metric(anchors.cpu().view(-1, 2))
48
+ s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). '
49
+ if bpr > 0.98: # threshold to recompute
50
+ LOGGER.info(f'{s}Current anchors are a good fit to dataset ✅')
51
+ else:
52
+ LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...')
53
+ na = m.anchors.numel() // 2 # number of anchors
54
+ anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
55
+ new_bpr = metric(anchors)[0]
56
+ if new_bpr > bpr: # replace anchors
57
+ anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)
58
+ m.anchors[:] = anchors.clone().view_as(m.anchors)
59
+ check_anchor_order(m) # must be in pixel-space (not grid-space)
60
+ m.anchors /= stride
61
+ s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)'
62
+ else:
63
+ s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)'
64
+ LOGGER.info(s)
65
+
66
+
67
+ def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
68
+ """ Creates kmeans-evolved anchors from training dataset
69
+
70
+ Arguments:
71
+ dataset: path to data.yaml, or a loaded dataset
72
+ n: number of anchors
73
+ img_size: image size used for training
74
+ thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
75
+ gen: generations to evolve anchors using genetic algorithm
76
+ verbose: print all results
77
+
78
+ Return:
79
+ k: kmeans evolved anchors
80
+
81
+ Usage:
82
+ from utils.autoanchor import *; _ = kmean_anchors()
83
+ """
84
+ from scipy.cluster.vq import kmeans
85
+
86
+ npr = np.random
87
+ thr = 1 / thr
88
+
89
+ def metric(k, wh): # compute metrics
90
+ r = wh[:, None] / k[None]
91
+ x = torch.min(r, 1 / r).min(2)[0] # ratio metric
92
+ # x = wh_iou(wh, torch.tensor(k)) # iou metric
93
+ return x, x.max(1)[0] # x, best_x
94
+
95
+ def anchor_fitness(k): # mutation fitness
96
+ _, best = metric(torch.tensor(k, dtype=torch.float32), wh)
97
+ return (best * (best > thr).float()).mean() # fitness
98
+
99
+ def print_results(k, verbose=True):
100
+ k = k[np.argsort(k.prod(1))] # sort small to large
101
+ x, best = metric(k, wh0)
102
+ bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
103
+ s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \
104
+ f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \
105
+ f'past_thr={x[x > thr].mean():.3f}-mean: '
106
+ for x in k:
107
+ s += '%i,%i, ' % (round(x[0]), round(x[1]))
108
+ if verbose:
109
+ LOGGER.info(s[:-2])
110
+ return k
111
+
112
+ if isinstance(dataset, str): # *.yaml file
113
+ with open(dataset, errors='ignore') as f:
114
+ data_dict = yaml.safe_load(f) # model dict
115
+ from utils.dataloaders import LoadImagesAndLabels
116
+ dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
117
+
118
+ # Get label wh
119
+ shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
120
+ wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
121
+
122
+ # Filter
123
+ i = (wh0 < 3.0).any(1).sum()
124
+ if i:
125
+ LOGGER.info(f'{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size')
126
+ wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels
127
+ # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
128
+
129
+ # Kmeans init
130
+ try:
131
+ LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...')
132
+ assert n <= len(wh) # apply overdetermined constraint
133
+ s = wh.std(0) # sigmas for whitening
134
+ k = kmeans(wh / s, n, iter=30)[0] * s # points
135
+ assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar
136
+ except Exception:
137
+ LOGGER.warning(f'{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init')
138
+ k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init
139
+ wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0))
140
+ k = print_results(k, verbose=False)
141
+
142
+ # Plot
143
+ # k, d = [None] * 20, [None] * 20
144
+ # for i in tqdm(range(1, 21)):
145
+ # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
146
+ # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)
147
+ # ax = ax.ravel()
148
+ # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
149
+ # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
150
+ # ax[0].hist(wh[wh[:, 0]<100, 0],400)
151
+ # ax[1].hist(wh[wh[:, 1]<100, 1],400)
152
+ # fig.savefig('wh.png', dpi=200)
153
+
154
+ # Evolve
155
+ f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
156
+ pbar = tqdm(range(gen), bar_format=TQDM_BAR_FORMAT) # progress bar
157
+ for _ in pbar:
158
+ v = np.ones(sh)
159
+ while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
160
+ v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
161
+ kg = (k.copy() * v).clip(min=2.0)
162
+ fg = anchor_fitness(kg)
163
+ if fg > f:
164
+ f, k = fg, kg.copy()
165
+ pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}'
166
+ if verbose:
167
+ print_results(k, verbose)
168
+
169
+ return print_results(k).astype(np.float32)
utils/autobatch.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+ """
3
+ Auto-batch utils
4
+ """
5
+
6
+ from copy import deepcopy
7
+
8
+ import numpy as np
9
+ import torch
10
+
11
+ from utils.general import LOGGER, colorstr
12
+ from utils.torch_utils import profile
13
+
14
+
15
+ def check_train_batch_size(model, imgsz=640, amp=True):
16
+ # Check YOLOv5 training batch size
17
+ with torch.cuda.amp.autocast(amp):
18
+ return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
19
+
20
+
21
+ def autobatch(model, imgsz=640, fraction=0.8, batch_size=16):
22
+ # Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory
23
+ # Usage:
24
+ # import torch
25
+ # from utils.autobatch import autobatch
26
+ # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)
27
+ # print(autobatch(model))
28
+
29
+ # Check device
30
+ prefix = colorstr('AutoBatch: ')
31
+ LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}')
32
+ device = next(model.parameters()).device # get model device
33
+ if device.type == 'cpu':
34
+ LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
35
+ return batch_size
36
+ if torch.backends.cudnn.benchmark:
37
+ LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}')
38
+ return batch_size
39
+
40
+ # Inspect CUDA memory
41
+ gb = 1 << 30 # bytes to GiB (1024 ** 3)
42
+ d = str(device).upper() # 'CUDA:0'
43
+ properties = torch.cuda.get_device_properties(device) # device properties
44
+ t = properties.total_memory / gb # GiB total
45
+ r = torch.cuda.memory_reserved(device) / gb # GiB reserved
46
+ a = torch.cuda.memory_allocated(device) / gb # GiB allocated
47
+ f = t - (r + a) # GiB free
48
+ LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
49
+
50
+ # Profile batch sizes
51
+ batch_sizes = [1, 2, 4, 8, 16]
52
+ try:
53
+ img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes]
54
+ results = profile(img, model, n=3, device=device)
55
+ except Exception as e:
56
+ LOGGER.warning(f'{prefix}{e}')
57
+
58
+ # Fit a solution
59
+ y = [x[2] for x in results if x] # memory [2]
60
+ p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit
61
+ b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
62
+ if None in results: # some sizes failed
63
+ i = results.index(None) # first fail index
64
+ if b >= batch_sizes[i]: # y intercept above failure point
65
+ b = batch_sizes[max(i - 1, 0)] # select prior safe point
66
+ if b < 1 or b > 1024: # b outside of safe range
67
+ b = batch_size
68
+ LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.')
69
+
70
+ fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted
71
+ LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅')
72
+ return b
utils/aws/__init__.py ADDED
File without changes
utils/aws/mime.sh ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
2
+ # This script will run on every instance restart, not only on first start
3
+ # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
4
+
5
+ Content-Type: multipart/mixed; boundary="//"
6
+ MIME-Version: 1.0
7
+
8
+ --//
9
+ Content-Type: text/cloud-config; charset="us-ascii"
10
+ MIME-Version: 1.0
11
+ Content-Transfer-Encoding: 7bit
12
+ Content-Disposition: attachment; filename="cloud-config.txt"
13
+
14
+ #cloud-config
15
+ cloud_final_modules:
16
+ - [scripts-user, always]
17
+
18
+ --//
19
+ Content-Type: text/x-shellscript; charset="us-ascii"
20
+ MIME-Version: 1.0
21
+ Content-Transfer-Encoding: 7bit
22
+ Content-Disposition: attachment; filename="userdata.txt"
23
+
24
+ #!/bin/bash
25
+ # --- paste contents of userdata.sh here ---
26
+ --//
utils/aws/resume.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Resume all interrupted trainings in yolov5/ dir including DDP trainings
2
+ # Usage: $ python utils/aws/resume.py
3
+
4
+ import os
5
+ import sys
6
+ from pathlib import Path
7
+
8
+ import torch
9
+ import yaml
10
+
11
+ FILE = Path(__file__).resolve()
12
+ ROOT = FILE.parents[2] # YOLOv5 root directory
13
+ if str(ROOT) not in sys.path:
14
+ sys.path.append(str(ROOT)) # add ROOT to PATH
15
+
16
+ port = 0 # --master_port
17
+ path = Path('').resolve()
18
+ for last in path.rglob('*/**/last.pt'):
19
+ ckpt = torch.load(last)
20
+ if ckpt['optimizer'] is None:
21
+ continue
22
+
23
+ # Load opt.yaml
24
+ with open(last.parent.parent / 'opt.yaml', errors='ignore') as f:
25
+ opt = yaml.safe_load(f)
26
+
27
+ # Get device count
28
+ d = opt['device'].split(',') # devices
29
+ nd = len(d) # number of devices
30
+ ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
31
+
32
+ if ddp: # multi-GPU
33
+ port += 1
34
+ cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
35
+ else: # single-GPU
36
+ cmd = f'python train.py --resume {last}'
37
+
38
+ cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
39
+ print(cmd)
40
+ os.system(cmd)
utils/aws/userdata.sh ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
3
+ # This script will run only once on first instance start (for a re-start script see mime.sh)
4
+ # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
5
+ # Use >300 GB SSD
6
+
7
+ cd home/ubuntu
8
+ if [ ! -d yolov5 ]; then
9
+ echo "Running first-time script." # install dependencies, download COCO, pull Docker
10
+ git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5
11
+ cd yolov5
12
+ bash data/scripts/get_coco.sh && echo "COCO done." &
13
+ sudo docker pull ultralytics/yolov5:latest && echo "Docker done." &
14
+ python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
15
+ wait && echo "All tasks done." # finish background tasks
16
+ else
17
+ echo "Running re-start script." # resume interrupted runs
18
+ i=0
19
+ list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
20
+ while IFS= read -r id; do
21
+ ((i++))
22
+ echo "restarting container $i: $id"
23
+ sudo docker start $id
24
+ # sudo docker exec -it $id python train.py --resume # single-GPU
25
+ sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
26
+ done <<<"$list"
27
+ fi
utils/callbacks.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+ """
3
+ Callback utils
4
+ """
5
+
6
+ import threading
7
+
8
+
9
+ class Callbacks:
10
+ """"
11
+ Handles all registered callbacks for YOLOv5 Hooks
12
+ """
13
+
14
+ def __init__(self):
15
+ # Define the available callbacks
16
+ self._callbacks = {
17
+ 'on_pretrain_routine_start': [],
18
+ 'on_pretrain_routine_end': [],
19
+ 'on_train_start': [],
20
+ 'on_train_epoch_start': [],
21
+ 'on_train_batch_start': [],
22
+ 'optimizer_step': [],
23
+ 'on_before_zero_grad': [],
24
+ 'on_train_batch_end': [],
25
+ 'on_train_epoch_end': [],
26
+ 'on_val_start': [],
27
+ 'on_val_batch_start': [],
28
+ 'on_val_image_end': [],
29
+ 'on_val_batch_end': [],
30
+ 'on_val_end': [],
31
+ 'on_fit_epoch_end': [], # fit = train + val
32
+ 'on_model_save': [],
33
+ 'on_train_end': [],
34
+ 'on_params_update': [],
35
+ 'teardown': [], }
36
+ self.stop_training = False # set True to interrupt training
37
+
38
+ def register_action(self, hook, name='', callback=None):
39
+ """
40
+ Register a new action to a callback hook
41
+
42
+ Args:
43
+ hook: The callback hook name to register the action to
44
+ name: The name of the action for later reference
45
+ callback: The callback to fire
46
+ """
47
+ assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
48
+ assert callable(callback), f"callback '{callback}' is not callable"
49
+ self._callbacks[hook].append({'name': name, 'callback': callback})
50
+
51
+ def get_registered_actions(self, hook=None):
52
+ """"
53
+ Returns all the registered actions by callback hook
54
+
55
+ Args:
56
+ hook: The name of the hook to check, defaults to all
57
+ """
58
+ return self._callbacks[hook] if hook else self._callbacks
59
+
60
+ def run(self, hook, *args, thread=False, **kwargs):
61
+ """
62
+ Loop through the registered actions and fire all callbacks on main thread
63
+
64
+ Args:
65
+ hook: The name of the hook to check, defaults to all
66
+ args: Arguments to receive from YOLOv5
67
+ thread: (boolean) Run callbacks in daemon thread
68
+ kwargs: Keyword Arguments to receive from YOLOv5
69
+ """
70
+
71
+ assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}"
72
+ for logger in self._callbacks[hook]:
73
+ if thread:
74
+ threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start()
75
+ else:
76
+ logger['callback'](*args, **kwargs)
utils/dataloaders.py ADDED
@@ -0,0 +1,1222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+ """
3
+ Dataloaders and dataset utils
4
+ """
5
+
6
+ import contextlib
7
+ import glob
8
+ import hashlib
9
+ import json
10
+ import math
11
+ import os
12
+ import random
13
+ import shutil
14
+ import time
15
+ from itertools import repeat
16
+ from multiprocessing.pool import Pool, ThreadPool
17
+ from pathlib import Path
18
+ from threading import Thread
19
+ from urllib.parse import urlparse
20
+
21
+ import numpy as np
22
+ import psutil
23
+ import torch
24
+ import torch.nn.functional as F
25
+ import torchvision
26
+ import yaml
27
+ from PIL import ExifTags, Image, ImageOps
28
+ from torch.utils.data import DataLoader, Dataset, dataloader, distributed
29
+ from tqdm import tqdm
30
+
31
+ from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste,
32
+ letterbox, mixup, random_perspective)
33
+ from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements,
34
+ check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy,
35
+ xywh2xyxy, xywhn2xyxy, xyxy2xywhn)
36
+ from utils.torch_utils import torch_distributed_zero_first
37
+
38
+ # Parameters
39
+ HELP_URL = 'See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data'
40
+ IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes
41
+ VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
42
+ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
43
+ RANK = int(os.getenv('RANK', -1))
44
+ PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders
45
+
46
+ # Get orientation exif tag
47
+ for orientation in ExifTags.TAGS.keys():
48
+ if ExifTags.TAGS[orientation] == 'Orientation':
49
+ break
50
+
51
+
52
+ def get_hash(paths):
53
+ # Returns a single hash value of a list of paths (files or dirs)
54
+ size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
55
+ h = hashlib.sha256(str(size).encode()) # hash sizes
56
+ h.update(''.join(paths).encode()) # hash paths
57
+ return h.hexdigest() # return hash
58
+
59
+
60
+ def exif_size(img):
61
+ # Returns exif-corrected PIL size
62
+ s = img.size # (width, height)
63
+ with contextlib.suppress(Exception):
64
+ rotation = dict(img._getexif().items())[orientation]
65
+ if rotation in [6, 8]: # rotation 270 or 90
66
+ s = (s[1], s[0])
67
+ return s
68
+
69
+
70
+ def exif_transpose(image):
71
+ """
72
+ Transpose a PIL image accordingly if it has an EXIF Orientation tag.
73
+ Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
74
+
75
+ :param image: The image to transpose.
76
+ :return: An image.
77
+ """
78
+ exif = image.getexif()
79
+ orientation = exif.get(0x0112, 1) # default 1
80
+ if orientation > 1:
81
+ method = {
82
+ 2: Image.FLIP_LEFT_RIGHT,
83
+ 3: Image.ROTATE_180,
84
+ 4: Image.FLIP_TOP_BOTTOM,
85
+ 5: Image.TRANSPOSE,
86
+ 6: Image.ROTATE_270,
87
+ 7: Image.TRANSVERSE,
88
+ 8: Image.ROTATE_90}.get(orientation)
89
+ if method is not None:
90
+ image = image.transpose(method)
91
+ del exif[0x0112]
92
+ image.info['exif'] = exif.tobytes()
93
+ return image
94
+
95
+
96
+ def seed_worker(worker_id):
97
+ # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader
98
+ worker_seed = torch.initial_seed() % 2 ** 32
99
+ np.random.seed(worker_seed)
100
+ random.seed(worker_seed)
101
+
102
+
103
+ def create_dataloader(path,
104
+ imgsz,
105
+ batch_size,
106
+ stride,
107
+ single_cls=False,
108
+ hyp=None,
109
+ augment=False,
110
+ cache=False,
111
+ pad=0.0,
112
+ rect=False,
113
+ rank=-1,
114
+ workers=8,
115
+ image_weights=False,
116
+ quad=False,
117
+ prefix='',
118
+ shuffle=False,
119
+ seed=0):
120
+ if rect and shuffle:
121
+ LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')
122
+ shuffle = False
123
+ with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
124
+ dataset = LoadImagesAndLabels(
125
+ path,
126
+ imgsz,
127
+ batch_size,
128
+ augment=augment, # augmentation
129
+ hyp=hyp, # hyperparameters
130
+ rect=rect, # rectangular batches
131
+ cache_images=cache,
132
+ single_cls=single_cls,
133
+ stride=int(stride),
134
+ pad=pad,
135
+ image_weights=image_weights,
136
+ prefix=prefix)
137
+
138
+ batch_size = min(batch_size, len(dataset))
139
+ nd = torch.cuda.device_count() # number of CUDA devices
140
+ nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
141
+ sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
142
+ loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
143
+ generator = torch.Generator()
144
+ generator.manual_seed(6148914691236517205 + seed + RANK)
145
+ return loader(dataset,
146
+ batch_size=batch_size,
147
+ shuffle=shuffle and sampler is None,
148
+ num_workers=nw,
149
+ sampler=sampler,
150
+ pin_memory=PIN_MEMORY,
151
+ collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn,
152
+ worker_init_fn=seed_worker,
153
+ generator=generator), dataset
154
+
155
+
156
+ class InfiniteDataLoader(dataloader.DataLoader):
157
+ """ Dataloader that reuses workers
158
+
159
+ Uses same syntax as vanilla DataLoader
160
+ """
161
+
162
+ def __init__(self, *args, **kwargs):
163
+ super().__init__(*args, **kwargs)
164
+ object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
165
+ self.iterator = super().__iter__()
166
+
167
+ def __len__(self):
168
+ return len(self.batch_sampler.sampler)
169
+
170
+ def __iter__(self):
171
+ for _ in range(len(self)):
172
+ yield next(self.iterator)
173
+
174
+
175
+ class _RepeatSampler:
176
+ """ Sampler that repeats forever
177
+
178
+ Args:
179
+ sampler (Sampler)
180
+ """
181
+
182
+ def __init__(self, sampler):
183
+ self.sampler = sampler
184
+
185
+ def __iter__(self):
186
+ while True:
187
+ yield from iter(self.sampler)
188
+
189
+
190
+ class LoadScreenshots:
191
+ # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"`
192
+ def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):
193
+ # source = [screen_number left top width height] (pixels)
194
+ check_requirements('mss')
195
+ import mss
196
+
197
+ source, *params = source.split()
198
+ self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0
199
+ if len(params) == 1:
200
+ self.screen = int(params[0])
201
+ elif len(params) == 4:
202
+ left, top, width, height = (int(x) for x in params)
203
+ elif len(params) == 5:
204
+ self.screen, left, top, width, height = (int(x) for x in params)
205
+ self.img_size = img_size
206
+ self.stride = stride
207
+ self.transforms = transforms
208
+ self.auto = auto
209
+ self.mode = 'stream'
210
+ self.frame = 0
211
+ self.sct = mss.mss()
212
+
213
+ # Parse monitor shape
214
+ monitor = self.sct.monitors[self.screen]
215
+ self.top = monitor['top'] if top is None else (monitor['top'] + top)
216
+ self.left = monitor['left'] if left is None else (monitor['left'] + left)
217
+ self.width = width or monitor['width']
218
+ self.height = height or monitor['height']
219
+ self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height}
220
+
221
+ def __iter__(self):
222
+ return self
223
+
224
+ def __next__(self):
225
+ # mss screen capture: get raw pixels from the screen as np array
226
+ im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR
227
+ s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: '
228
+
229
+ if self.transforms:
230
+ im = self.transforms(im0) # transforms
231
+ else:
232
+ im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
233
+ im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
234
+ im = np.ascontiguousarray(im) # contiguous
235
+ self.frame += 1
236
+ return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s
237
+
238
+
239
+ class LoadImages:
240
+ # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
241
+ def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
242
+ if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line
243
+ path = Path(path).read_text().rsplit()
244
+ files = []
245
+ for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:
246
+ p = str(Path(p).resolve())
247
+ if '*' in p:
248
+ files.extend(sorted(glob.glob(p, recursive=True))) # glob
249
+ elif os.path.isdir(p):
250
+ files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir
251
+ elif os.path.isfile(p):
252
+ files.append(p) # files
253
+ else:
254
+ raise FileNotFoundError(f'{p} does not exist')
255
+
256
+ images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
257
+ videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
258
+ ni, nv = len(images), len(videos)
259
+
260
+ self.img_size = img_size
261
+ self.stride = stride
262
+ self.files = images + videos
263
+ self.nf = ni + nv # number of files
264
+ self.video_flag = [False] * ni + [True] * nv
265
+ self.mode = 'image'
266
+ self.auto = auto
267
+ self.transforms = transforms # optional
268
+ self.vid_stride = vid_stride # video frame-rate stride
269
+ if any(videos):
270
+ self._new_video(videos[0]) # new video
271
+ else:
272
+ self.cap = None
273
+ assert self.nf > 0, f'No images or videos found in {p}. ' \
274
+ f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
275
+
276
+ def __iter__(self):
277
+ self.count = 0
278
+ return self
279
+
280
+ def __next__(self):
281
+ if self.count == self.nf:
282
+ raise StopIteration
283
+ path = self.files[self.count]
284
+
285
+ if self.video_flag[self.count]:
286
+ # Read video
287
+ self.mode = 'video'
288
+ for _ in range(self.vid_stride):
289
+ self.cap.grab()
290
+ ret_val, im0 = self.cap.retrieve()
291
+ while not ret_val:
292
+ self.count += 1
293
+ self.cap.release()
294
+ if self.count == self.nf: # last video
295
+ raise StopIteration
296
+ path = self.files[self.count]
297
+ self._new_video(path)
298
+ ret_val, im0 = self.cap.read()
299
+
300
+ self.frame += 1
301
+ # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False
302
+ s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
303
+
304
+ else:
305
+ # Read image
306
+ self.count += 1
307
+ im0 = cv2.imread(path) # BGR
308
+ assert im0 is not None, f'Image Not Found {path}'
309
+ s = f'image {self.count}/{self.nf} {path}: '
310
+
311
+ if self.transforms:
312
+ im = self.transforms(im0) # transforms
313
+ else:
314
+ im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
315
+ im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
316
+ im = np.ascontiguousarray(im) # contiguous
317
+
318
+ return path, im, im0, self.cap, s
319
+
320
+ def _new_video(self, path):
321
+ # Create a new video capture object
322
+ self.frame = 0
323
+ self.cap = cv2.VideoCapture(path)
324
+ self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
325
+ self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees
326
+ # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493
327
+
328
+ def _cv2_rotate(self, im):
329
+ # Rotate a cv2 video manually
330
+ if self.orientation == 0:
331
+ return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)
332
+ elif self.orientation == 180:
333
+ return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)
334
+ elif self.orientation == 90:
335
+ return cv2.rotate(im, cv2.ROTATE_180)
336
+ return im
337
+
338
+ def __len__(self):
339
+ return self.nf # number of files
340
+
341
+
342
+ class LoadStreams:
343
+ # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
344
+ def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
345
+ torch.backends.cudnn.benchmark = True # faster for fixed-size inference
346
+ self.mode = 'stream'
347
+ self.img_size = img_size
348
+ self.stride = stride
349
+ self.vid_stride = vid_stride # video frame-rate stride
350
+ sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
351
+ n = len(sources)
352
+ self.sources = [clean_str(x) for x in sources] # clean source names for later
353
+ self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
354
+ for i, s in enumerate(sources): # index, source
355
+ # Start thread to read frames from video stream
356
+ st = f'{i + 1}/{n}: {s}... '
357
+ if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video
358
+ # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4'
359
+ check_requirements(('pafy', 'youtube_dl==2020.12.2'))
360
+ import pafy
361
+ s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL
362
+ s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
363
+ if s == 0:
364
+ assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.'
365
+ assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.'
366
+ cap = cv2.VideoCapture(s)
367
+ assert cap.isOpened(), f'{st}Failed to open {s}'
368
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
369
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
370
+ fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan
371
+ self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
372
+ self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback
373
+
374
+ _, self.imgs[i] = cap.read() # guarantee first frame
375
+ self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
376
+ LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)')
377
+ self.threads[i].start()
378
+ LOGGER.info('') # newline
379
+
380
+ # check for common shapes
381
+ s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs])
382
+ self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
383
+ self.auto = auto and self.rect
384
+ self.transforms = transforms # optional
385
+ if not self.rect:
386
+ LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.')
387
+
388
+ def update(self, i, cap, stream):
389
+ # Read stream `i` frames in daemon thread
390
+ n, f = 0, self.frames[i] # frame number, frame array
391
+ while cap.isOpened() and n < f:
392
+ n += 1
393
+ cap.grab() # .read() = .grab() followed by .retrieve()
394
+ if n % self.vid_stride == 0:
395
+ success, im = cap.retrieve()
396
+ if success:
397
+ self.imgs[i] = im
398
+ else:
399
+ LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.')
400
+ self.imgs[i] = np.zeros_like(self.imgs[i])
401
+ cap.open(stream) # re-open stream if signal was lost
402
+ time.sleep(0.0) # wait time
403
+
404
+ def __iter__(self):
405
+ self.count = -1
406
+ return self
407
+
408
+ def __next__(self):
409
+ self.count += 1
410
+ if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
411
+ cv2.destroyAllWindows()
412
+ raise StopIteration
413
+
414
+ im0 = self.imgs.copy()
415
+ if self.transforms:
416
+ im = np.stack([self.transforms(x) for x in im0]) # transforms
417
+ else:
418
+ im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize
419
+ im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
420
+ im = np.ascontiguousarray(im) # contiguous
421
+
422
+ return self.sources, im, im0, None, ''
423
+
424
+ def __len__(self):
425
+ return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
426
+
427
+
428
+ def img2label_paths(img_paths):
429
+ # Define label paths as a function of image paths
430
+ sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings
431
+ return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
432
+
433
+
434
+ class LoadImagesAndLabels(Dataset):
435
+ # YOLOv5 train_loader/val_loader, loads images and labels for training and validation
436
+ cache_version = 0.6 # dataset labels *.cache version
437
+ rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4]
438
+
439
+ def __init__(self,
440
+ path,
441
+ img_size=640,
442
+ batch_size=16,
443
+ augment=False,
444
+ hyp=None,
445
+ rect=False,
446
+ image_weights=False,
447
+ cache_images=False,
448
+ single_cls=False,
449
+ stride=32,
450
+ pad=0.0,
451
+ min_items=0,
452
+ prefix=''):
453
+ self.img_size = img_size
454
+ self.augment = augment
455
+ self.hyp = hyp
456
+ self.image_weights = image_weights
457
+ self.rect = False if image_weights else rect
458
+ self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
459
+ self.mosaic_border = [-img_size // 2, -img_size // 2]
460
+ self.stride = stride
461
+ self.path = path
462
+ self.albumentations = Albumentations(size=img_size) if augment else None
463
+
464
+ try:
465
+ f = [] # image files
466
+ for p in path if isinstance(path, list) else [path]:
467
+ p = Path(p) # os-agnostic
468
+ if p.is_dir(): # dir
469
+ f += glob.glob(str(p / '**' / '*.*'), recursive=True)
470
+ # f = list(p.rglob('*.*')) # pathlib
471
+ elif p.is_file(): # file
472
+ with open(p) as t:
473
+ t = t.read().strip().splitlines()
474
+ parent = str(p.parent) + os.sep
475
+ f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path
476
+ # f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib)
477
+ else:
478
+ raise FileNotFoundError(f'{prefix}{p} does not exist')
479
+ self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
480
+ # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
481
+ assert self.im_files, f'{prefix}No images found'
482
+ except Exception as e:
483
+ raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e
484
+
485
+ # Check cache
486
+ self.label_files = img2label_paths(self.im_files) # labels
487
+ cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
488
+ try:
489
+ cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
490
+ assert cache['version'] == self.cache_version # matches current version
491
+ assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash
492
+ except Exception:
493
+ cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops
494
+
495
+ # Display cache
496
+ nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total
497
+ if exists and LOCAL_RANK in {-1, 0}:
498
+ d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt'
499
+ tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results
500
+ if cache['msgs']:
501
+ LOGGER.info('\n'.join(cache['msgs'])) # display warnings
502
+ assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}'
503
+
504
+ # Read cache
505
+ [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
506
+ labels, shapes, self.segments = zip(*cache.values())
507
+ nl = len(np.concatenate(labels, 0)) # number of labels
508
+ assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}'
509
+ self.labels = list(labels)
510
+ self.shapes = np.array(shapes)
511
+ self.im_files = list(cache.keys()) # update
512
+ self.label_files = img2label_paths(cache.keys()) # update
513
+
514
+ # Filter images
515
+ if min_items:
516
+ include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int)
517
+ LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset')
518
+ self.im_files = [self.im_files[i] for i in include]
519
+ self.label_files = [self.label_files[i] for i in include]
520
+ self.labels = [self.labels[i] for i in include]
521
+ self.segments = [self.segments[i] for i in include]
522
+ self.shapes = self.shapes[include] # wh
523
+
524
+ # Create indices
525
+ n = len(self.shapes) # number of images
526
+ bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index
527
+ nb = bi[-1] + 1 # number of batches
528
+ self.batch = bi # batch index of image
529
+ self.n = n
530
+ self.indices = range(n)
531
+
532
+ # Update labels
533
+ include_class = [] # filter labels to include only these classes (optional)
534
+ self.segments = list(self.segments)
535
+ include_class_array = np.array(include_class).reshape(1, -1)
536
+ for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
537
+ if include_class:
538
+ j = (label[:, 0:1] == include_class_array).any(1)
539
+ self.labels[i] = label[j]
540
+ if segment:
541
+ self.segments[i] = [segment[idx] for idx, elem in enumerate(j) if elem]
542
+ if single_cls: # single-class training, merge all classes into 0
543
+ self.labels[i][:, 0] = 0
544
+
545
+ # Rectangular Training
546
+ if self.rect:
547
+ # Sort by aspect ratio
548
+ s = self.shapes # wh
549
+ ar = s[:, 1] / s[:, 0] # aspect ratio
550
+ irect = ar.argsort()
551
+ self.im_files = [self.im_files[i] for i in irect]
552
+ self.label_files = [self.label_files[i] for i in irect]
553
+ self.labels = [self.labels[i] for i in irect]
554
+ self.segments = [self.segments[i] for i in irect]
555
+ self.shapes = s[irect] # wh
556
+ ar = ar[irect]
557
+
558
+ # Set training image shapes
559
+ shapes = [[1, 1]] * nb
560
+ for i in range(nb):
561
+ ari = ar[bi == i]
562
+ mini, maxi = ari.min(), ari.max()
563
+ if maxi < 1:
564
+ shapes[i] = [maxi, 1]
565
+ elif mini > 1:
566
+ shapes[i] = [1, 1 / mini]
567
+
568
+ self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride
569
+
570
+ # Cache images into RAM/disk for faster training
571
+ if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix):
572
+ cache_images = False
573
+ self.ims = [None] * n
574
+ self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files]
575
+ if cache_images:
576
+ b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
577
+ self.im_hw0, self.im_hw = [None] * n, [None] * n
578
+ fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image
579
+ results = ThreadPool(NUM_THREADS).imap(fcn, range(n))
580
+ pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0)
581
+ for i, x in pbar:
582
+ if cache_images == 'disk':
583
+ b += self.npy_files[i].stat().st_size
584
+ else: # 'ram'
585
+ self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
586
+ b += self.ims[i].nbytes
587
+ pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})'
588
+ pbar.close()
589
+
590
+ def check_cache_ram(self, safety_margin=0.1, prefix=''):
591
+ # Check image caching requirements vs available memory
592
+ b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
593
+ n = min(self.n, 30) # extrapolate from 30 random images
594
+ for _ in range(n):
595
+ im = cv2.imread(random.choice(self.im_files)) # sample image
596
+ ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio
597
+ b += im.nbytes * ratio ** 2
598
+ mem_required = b * self.n / n # GB required to cache dataset into RAM
599
+ mem = psutil.virtual_memory()
600
+ cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question
601
+ if not cache:
602
+ LOGGER.info(f'{prefix}{mem_required / gb:.1f}GB RAM required, '
603
+ f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, '
604
+ f"{'caching images ✅' if cache else 'not caching images ⚠️'}")
605
+ return cache
606
+
607
+ def cache_labels(self, path=Path('./labels.cache'), prefix=''):
608
+ # Cache dataset labels, check images and read shapes
609
+ x = {} # dict
610
+ nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
611
+ desc = f'{prefix}Scanning {path.parent / path.stem}...'
612
+ with Pool(NUM_THREADS) as pool:
613
+ pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))),
614
+ desc=desc,
615
+ total=len(self.im_files),
616
+ bar_format=TQDM_BAR_FORMAT)
617
+ for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
618
+ nm += nm_f
619
+ nf += nf_f
620
+ ne += ne_f
621
+ nc += nc_f
622
+ if im_file:
623
+ x[im_file] = [lb, shape, segments]
624
+ if msg:
625
+ msgs.append(msg)
626
+ pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt'
627
+
628
+ pbar.close()
629
+ if msgs:
630
+ LOGGER.info('\n'.join(msgs))
631
+ if nf == 0:
632
+ LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}')
633
+ x['hash'] = get_hash(self.label_files + self.im_files)
634
+ x['results'] = nf, nm, ne, nc, len(self.im_files)
635
+ x['msgs'] = msgs # warnings
636
+ x['version'] = self.cache_version # cache version
637
+ try:
638
+ np.save(path, x) # save cache for next time
639
+ path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
640
+ LOGGER.info(f'{prefix}New cache created: {path}')
641
+ except Exception as e:
642
+ LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable
643
+ return x
644
+
645
+ def __len__(self):
646
+ return len(self.im_files)
647
+
648
+ # def __iter__(self):
649
+ # self.count = -1
650
+ # print('ran dataset iter')
651
+ # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
652
+ # return self
653
+
654
+ def __getitem__(self, index):
655
+ index = self.indices[index] # linear, shuffled, or image_weights
656
+
657
+ hyp = self.hyp
658
+ mosaic = self.mosaic and random.random() < hyp['mosaic']
659
+ if mosaic:
660
+ # Load mosaic
661
+ img, labels = self.load_mosaic(index)
662
+ shapes = None
663
+
664
+ # MixUp augmentation
665
+ if random.random() < hyp['mixup']:
666
+ img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1)))
667
+
668
+ else:
669
+ # Load image
670
+ img, (h0, w0), (h, w) = self.load_image(index)
671
+
672
+ # Letterbox
673
+ shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
674
+ img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
675
+ shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
676
+
677
+ labels = self.labels[index].copy()
678
+ if labels.size: # normalized xywh to pixel xyxy format
679
+ labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
680
+
681
+ if self.augment:
682
+ img, labels = random_perspective(img,
683
+ labels,
684
+ degrees=hyp['degrees'],
685
+ translate=hyp['translate'],
686
+ scale=hyp['scale'],
687
+ shear=hyp['shear'],
688
+ perspective=hyp['perspective'])
689
+
690
+ nl = len(labels) # number of labels
691
+ if nl:
692
+ labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
693
+
694
+ if self.augment:
695
+ # Albumentations
696
+ img, labels = self.albumentations(img, labels)
697
+ nl = len(labels) # update after albumentations
698
+
699
+ # HSV color-space
700
+ augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
701
+
702
+ # Flip up-down
703
+ if random.random() < hyp['flipud']:
704
+ img = np.flipud(img)
705
+ if nl:
706
+ labels[:, 2] = 1 - labels[:, 2]
707
+
708
+ # Flip left-right
709
+ if random.random() < hyp['fliplr']:
710
+ img = np.fliplr(img)
711
+ if nl:
712
+ labels[:, 1] = 1 - labels[:, 1]
713
+
714
+ # Cutouts
715
+ # labels = cutout(img, labels, p=0.5)
716
+ # nl = len(labels) # update after cutout
717
+
718
+ labels_out = torch.zeros((nl, 6))
719
+ if nl:
720
+ labels_out[:, 1:] = torch.from_numpy(labels)
721
+
722
+ # Convert
723
+ img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
724
+ img = np.ascontiguousarray(img)
725
+
726
+ return torch.from_numpy(img), labels_out, self.im_files[index], shapes
727
+
728
+ def load_image(self, i):
729
+ # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw)
730
+ im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i],
731
+ if im is None: # not cached in RAM
732
+ if fn.exists(): # load npy
733
+ im = np.load(fn)
734
+ else: # read image
735
+ im = cv2.imread(f) # BGR
736
+ assert im is not None, f'Image Not Found {f}'
737
+ h0, w0 = im.shape[:2] # orig hw
738
+ r = self.img_size / max(h0, w0) # ratio
739
+ if r != 1: # if sizes are not equal
740
+ interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA
741
+ im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp)
742
+ return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
743
+ return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized
744
+
745
+ def cache_images_to_disk(self, i):
746
+ # Saves an image as an *.npy file for faster loading
747
+ f = self.npy_files[i]
748
+ if not f.exists():
749
+ np.save(f.as_posix(), cv2.imread(self.im_files[i]))
750
+
751
+ def load_mosaic(self, index):
752
+ # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
753
+ labels4, segments4 = [], []
754
+ s = self.img_size
755
+ yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
756
+ indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
757
+ random.shuffle(indices)
758
+ for i, index in enumerate(indices):
759
+ # Load image
760
+ img, _, (h, w) = self.load_image(index)
761
+
762
+ # place img in img4
763
+ if i == 0: # top left
764
+ img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
765
+ x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
766
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
767
+ elif i == 1: # top right
768
+ x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
769
+ x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
770
+ elif i == 2: # bottom left
771
+ x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
772
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
773
+ elif i == 3: # bottom right
774
+ x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
775
+ x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
776
+
777
+ img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
778
+ padw = x1a - x1b
779
+ padh = y1a - y1b
780
+
781
+ # Labels
782
+ labels, segments = self.labels[index].copy(), self.segments[index].copy()
783
+ if labels.size:
784
+ labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
785
+ segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
786
+ labels4.append(labels)
787
+ segments4.extend(segments)
788
+
789
+ # Concat/clip labels
790
+ labels4 = np.concatenate(labels4, 0)
791
+ for x in (labels4[:, 1:], *segments4):
792
+ np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
793
+ # img4, labels4 = replicate(img4, labels4) # replicate
794
+
795
+ # Augment
796
+ img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
797
+ img4, labels4 = random_perspective(img4,
798
+ labels4,
799
+ segments4,
800
+ degrees=self.hyp['degrees'],
801
+ translate=self.hyp['translate'],
802
+ scale=self.hyp['scale'],
803
+ shear=self.hyp['shear'],
804
+ perspective=self.hyp['perspective'],
805
+ border=self.mosaic_border) # border to remove
806
+
807
+ return img4, labels4
808
+
809
+ def load_mosaic9(self, index):
810
+ # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic
811
+ labels9, segments9 = [], []
812
+ s = self.img_size
813
+ indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
814
+ random.shuffle(indices)
815
+ hp, wp = -1, -1 # height, width previous
816
+ for i, index in enumerate(indices):
817
+ # Load image
818
+ img, _, (h, w) = self.load_image(index)
819
+
820
+ # place img in img9
821
+ if i == 0: # center
822
+ img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
823
+ h0, w0 = h, w
824
+ c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
825
+ elif i == 1: # top
826
+ c = s, s - h, s + w, s
827
+ elif i == 2: # top right
828
+ c = s + wp, s - h, s + wp + w, s
829
+ elif i == 3: # right
830
+ c = s + w0, s, s + w0 + w, s + h
831
+ elif i == 4: # bottom right
832
+ c = s + w0, s + hp, s + w0 + w, s + hp + h
833
+ elif i == 5: # bottom
834
+ c = s + w0 - w, s + h0, s + w0, s + h0 + h
835
+ elif i == 6: # bottom left
836
+ c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
837
+ elif i == 7: # left
838
+ c = s - w, s + h0 - h, s, s + h0
839
+ elif i == 8: # top left
840
+ c = s - w, s + h0 - hp - h, s, s + h0 - hp
841
+
842
+ padx, pady = c[:2]
843
+ x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
844
+
845
+ # Labels
846
+ labels, segments = self.labels[index].copy(), self.segments[index].copy()
847
+ if labels.size:
848
+ labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
849
+ segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
850
+ labels9.append(labels)
851
+ segments9.extend(segments)
852
+
853
+ # Image
854
+ img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
855
+ hp, wp = h, w # height, width previous
856
+
857
+ # Offset
858
+ yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
859
+ img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
860
+
861
+ # Concat/clip labels
862
+ labels9 = np.concatenate(labels9, 0)
863
+ labels9[:, [1, 3]] -= xc
864
+ labels9[:, [2, 4]] -= yc
865
+ c = np.array([xc, yc]) # centers
866
+ segments9 = [x - c for x in segments9]
867
+
868
+ for x in (labels9[:, 1:], *segments9):
869
+ np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
870
+ # img9, labels9 = replicate(img9, labels9) # replicate
871
+
872
+ # Augment
873
+ img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste'])
874
+ img9, labels9 = random_perspective(img9,
875
+ labels9,
876
+ segments9,
877
+ degrees=self.hyp['degrees'],
878
+ translate=self.hyp['translate'],
879
+ scale=self.hyp['scale'],
880
+ shear=self.hyp['shear'],
881
+ perspective=self.hyp['perspective'],
882
+ border=self.mosaic_border) # border to remove
883
+
884
+ return img9, labels9
885
+
886
+ @staticmethod
887
+ def collate_fn(batch):
888
+ im, label, path, shapes = zip(*batch) # transposed
889
+ for i, lb in enumerate(label):
890
+ lb[:, 0] = i # add target image index for build_targets()
891
+ return torch.stack(im, 0), torch.cat(label, 0), path, shapes
892
+
893
+ @staticmethod
894
+ def collate_fn4(batch):
895
+ im, label, path, shapes = zip(*batch) # transposed
896
+ n = len(shapes) // 4
897
+ im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
898
+
899
+ ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]])
900
+ wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]])
901
+ s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale
902
+ for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
903
+ i *= 4
904
+ if random.random() < 0.5:
905
+ im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear',
906
+ align_corners=False)[0].type(im[i].type())
907
+ lb = label[i]
908
+ else:
909
+ im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2)
910
+ lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
911
+ im4.append(im1)
912
+ label4.append(lb)
913
+
914
+ for i, lb in enumerate(label4):
915
+ lb[:, 0] = i # add target image index for build_targets()
916
+
917
+ return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4
918
+
919
+
920
+ # Ancillary functions --------------------------------------------------------------------------------------------------
921
+ def flatten_recursive(path=DATASETS_DIR / 'coco128'):
922
+ # Flatten a recursive directory by bringing all files to top level
923
+ new_path = Path(f'{str(path)}_flat')
924
+ if os.path.exists(new_path):
925
+ shutil.rmtree(new_path) # delete output folder
926
+ os.makedirs(new_path) # make new output folder
927
+ for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)):
928
+ shutil.copyfile(file, new_path / Path(file).name)
929
+
930
+
931
+ def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes()
932
+ # Convert detection dataset into classification dataset, with one directory per class
933
+ path = Path(path) # images dir
934
+ shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing
935
+ files = list(path.rglob('*.*'))
936
+ n = len(files) # number of files
937
+ for im_file in tqdm(files, total=n):
938
+ if im_file.suffix[1:] in IMG_FORMATS:
939
+ # image
940
+ im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
941
+ h, w = im.shape[:2]
942
+
943
+ # labels
944
+ lb_file = Path(img2label_paths([str(im_file)])[0])
945
+ if Path(lb_file).exists():
946
+ with open(lb_file) as f:
947
+ lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
948
+
949
+ for j, x in enumerate(lb):
950
+ c = int(x[0]) # class
951
+ f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
952
+ if not f.parent.is_dir():
953
+ f.parent.mkdir(parents=True)
954
+
955
+ b = x[1:] * [w, h, w, h] # box
956
+ # b[2:] = b[2:].max() # rectangle to square
957
+ b[2:] = b[2:] * 1.2 + 3 # pad
958
+ b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int)
959
+
960
+ b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
961
+ b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
962
+ assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
963
+
964
+
965
+ def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
966
+ """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
967
+ Usage: from utils.dataloaders import *; autosplit()
968
+ Arguments
969
+ path: Path to images directory
970
+ weights: Train, val, test weights (list, tuple)
971
+ annotated_only: Only use images with an annotated txt file
972
+ """
973
+ path = Path(path) # images dir
974
+ files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
975
+ n = len(files) # number of files
976
+ random.seed(0) # for reproducibility
977
+ indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
978
+
979
+ txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
980
+ for x in txt:
981
+ if (path.parent / x).exists():
982
+ (path.parent / x).unlink() # remove existing
983
+
984
+ print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
985
+ for i, img in tqdm(zip(indices, files), total=n):
986
+ if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
987
+ with open(path.parent / txt[i], 'a') as f:
988
+ f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file
989
+
990
+
991
+ def verify_image_label(args):
992
+ # Verify one image-label pair
993
+ im_file, lb_file, prefix = args
994
+ nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
995
+ try:
996
+ # verify images
997
+ im = Image.open(im_file)
998
+ im.verify() # PIL verify
999
+ shape = exif_size(im) # image size
1000
+ assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
1001
+ assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
1002
+ if im.format.lower() in ('jpg', 'jpeg'):
1003
+ with open(im_file, 'rb') as f:
1004
+ f.seek(-2, 2)
1005
+ if f.read() != b'\xff\xd9': # corrupt JPEG
1006
+ ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
1007
+ msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved'
1008
+
1009
+ # verify labels
1010
+ if os.path.isfile(lb_file):
1011
+ nf = 1 # label found
1012
+ with open(lb_file) as f:
1013
+ lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
1014
+ if any(len(x) > 6 for x in lb): # is segment
1015
+ classes = np.array([x[0] for x in lb], dtype=np.float32)
1016
+ segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
1017
+ lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
1018
+ lb = np.array(lb, dtype=np.float32)
1019
+ nl = len(lb)
1020
+ if nl:
1021
+ assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected'
1022
+ assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}'
1023
+ assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}'
1024
+ _, i = np.unique(lb, axis=0, return_index=True)
1025
+ if len(i) < nl: # duplicate row check
1026
+ lb = lb[i] # remove duplicates
1027
+ if segments:
1028
+ segments = [segments[x] for x in i]
1029
+ msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed'
1030
+ else:
1031
+ ne = 1 # label empty
1032
+ lb = np.zeros((0, 5), dtype=np.float32)
1033
+ else:
1034
+ nm = 1 # label missing
1035
+ lb = np.zeros((0, 5), dtype=np.float32)
1036
+ return im_file, lb, shape, segments, nm, nf, ne, nc, msg
1037
+ except Exception as e:
1038
+ nc = 1
1039
+ msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}'
1040
+ return [None, None, None, None, nm, nf, ne, nc, msg]
1041
+
1042
+
1043
+ class HUBDatasetStats():
1044
+ """ Class for generating HUB dataset JSON and `-hub` dataset directory
1045
+
1046
+ Arguments
1047
+ path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
1048
+ autodownload: Attempt to download dataset if not found locally
1049
+
1050
+ Usage
1051
+ from utils.dataloaders import HUBDatasetStats
1052
+ stats = HUBDatasetStats('coco128.yaml', autodownload=True) # usage 1
1053
+ stats = HUBDatasetStats('path/to/coco128.zip') # usage 2
1054
+ stats.get_json(save=False)
1055
+ stats.process_images()
1056
+ """
1057
+
1058
+ def __init__(self, path='coco128.yaml', autodownload=False):
1059
+ # Initialize class
1060
+ zipped, data_dir, yaml_path = self._unzip(Path(path))
1061
+ try:
1062
+ with open(check_yaml(yaml_path), errors='ignore') as f:
1063
+ data = yaml.safe_load(f) # data dict
1064
+ if zipped:
1065
+ data['path'] = data_dir
1066
+ except Exception as e:
1067
+ raise Exception('error/HUB/dataset_stats/yaml_load') from e
1068
+
1069
+ check_dataset(data, autodownload) # download dataset if missing
1070
+ self.hub_dir = Path(data['path'] + '-hub')
1071
+ self.im_dir = self.hub_dir / 'images'
1072
+ self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images
1073
+ self.stats = {'nc': data['nc'], 'names': list(data['names'].values())} # statistics dictionary
1074
+ self.data = data
1075
+
1076
+ @staticmethod
1077
+ def _find_yaml(dir):
1078
+ # Return data.yaml file
1079
+ files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive
1080
+ assert files, f'No *.yaml file found in {dir}'
1081
+ if len(files) > 1:
1082
+ files = [f for f in files if f.stem == dir.stem] # prefer *.yaml files that match dir name
1083
+ assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed'
1084
+ assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}'
1085
+ return files[0]
1086
+
1087
+ def _unzip(self, path):
1088
+ # Unzip data.zip
1089
+ if not str(path).endswith('.zip'): # path is data.yaml
1090
+ return False, None, path
1091
+ assert Path(path).is_file(), f'Error unzipping {path}, file not found'
1092
+ unzip_file(path, path=path.parent)
1093
+ dir = path.with_suffix('') # dataset directory == zip name
1094
+ assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/'
1095
+ return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path
1096
+
1097
+ def _hub_ops(self, f, max_dim=1920):
1098
+ # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing
1099
+ f_new = self.im_dir / Path(f).name # dataset-hub image filename
1100
+ try: # use PIL
1101
+ im = Image.open(f)
1102
+ r = max_dim / max(im.height, im.width) # ratio
1103
+ if r < 1.0: # image too large
1104
+ im = im.resize((int(im.width * r), int(im.height * r)))
1105
+ im.save(f_new, 'JPEG', quality=50, optimize=True) # save
1106
+ except Exception as e: # use OpenCV
1107
+ LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}')
1108
+ im = cv2.imread(f)
1109
+ im_height, im_width = im.shape[:2]
1110
+ r = max_dim / max(im_height, im_width) # ratio
1111
+ if r < 1.0: # image too large
1112
+ im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA)
1113
+ cv2.imwrite(str(f_new), im)
1114
+
1115
+ def get_json(self, save=False, verbose=False):
1116
+ # Return dataset JSON for Ultralytics HUB
1117
+ def _round(labels):
1118
+ # Update labels to integer class and 6 decimal place floats
1119
+ return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
1120
+
1121
+ for split in 'train', 'val', 'test':
1122
+ if self.data.get(split) is None:
1123
+ self.stats[split] = None # i.e. no test set
1124
+ continue
1125
+ dataset = LoadImagesAndLabels(self.data[split]) # load dataset
1126
+ x = np.array([
1127
+ np.bincount(label[:, 0].astype(int), minlength=self.data['nc'])
1128
+ for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics')]) # shape(128x80)
1129
+ self.stats[split] = {
1130
+ 'instance_stats': {
1131
+ 'total': int(x.sum()),
1132
+ 'per_class': x.sum(0).tolist()},
1133
+ 'image_stats': {
1134
+ 'total': dataset.n,
1135
+ 'unlabelled': int(np.all(x == 0, 1).sum()),
1136
+ 'per_class': (x > 0).sum(0).tolist()},
1137
+ 'labels': [{
1138
+ str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]}
1139
+
1140
+ # Save, print and return
1141
+ if save:
1142
+ stats_path = self.hub_dir / 'stats.json'
1143
+ print(f'Saving {stats_path.resolve()}...')
1144
+ with open(stats_path, 'w') as f:
1145
+ json.dump(self.stats, f) # save stats.json
1146
+ if verbose:
1147
+ print(json.dumps(self.stats, indent=2, sort_keys=False))
1148
+ return self.stats
1149
+
1150
+ def process_images(self):
1151
+ # Compress images for Ultralytics HUB
1152
+ for split in 'train', 'val', 'test':
1153
+ if self.data.get(split) is None:
1154
+ continue
1155
+ dataset = LoadImagesAndLabels(self.data[split]) # load dataset
1156
+ desc = f'{split} images'
1157
+ for _ in tqdm(ThreadPool(NUM_THREADS).imap(self._hub_ops, dataset.im_files), total=dataset.n, desc=desc):
1158
+ pass
1159
+ print(f'Done. All images saved to {self.im_dir}')
1160
+ return self.im_dir
1161
+
1162
+
1163
+ # Classification dataloaders -------------------------------------------------------------------------------------------
1164
+ class ClassificationDataset(torchvision.datasets.ImageFolder):
1165
+ """
1166
+ YOLOv5 Classification Dataset.
1167
+ Arguments
1168
+ root: Dataset path
1169
+ transform: torchvision transforms, used by default
1170
+ album_transform: Albumentations transforms, used if installed
1171
+ """
1172
+
1173
+ def __init__(self, root, augment, imgsz, cache=False):
1174
+ super().__init__(root=root)
1175
+ self.torch_transforms = classify_transforms(imgsz)
1176
+ self.album_transforms = classify_albumentations(augment, imgsz) if augment else None
1177
+ self.cache_ram = cache is True or cache == 'ram'
1178
+ self.cache_disk = cache == 'disk'
1179
+ self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im
1180
+
1181
+ def __getitem__(self, i):
1182
+ f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image
1183
+ if self.cache_ram and im is None:
1184
+ im = self.samples[i][3] = cv2.imread(f)
1185
+ elif self.cache_disk:
1186
+ if not fn.exists(): # load npy
1187
+ np.save(fn.as_posix(), cv2.imread(f))
1188
+ im = np.load(fn)
1189
+ else: # read image
1190
+ im = cv2.imread(f) # BGR
1191
+ if self.album_transforms:
1192
+ sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image']
1193
+ else:
1194
+ sample = self.torch_transforms(im)
1195
+ return sample, j
1196
+
1197
+
1198
+ def create_classification_dataloader(path,
1199
+ imgsz=224,
1200
+ batch_size=16,
1201
+ augment=True,
1202
+ cache=False,
1203
+ rank=-1,
1204
+ workers=8,
1205
+ shuffle=True):
1206
+ # Returns Dataloader object to be used with YOLOv5 Classifier
1207
+ with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
1208
+ dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache)
1209
+ batch_size = min(batch_size, len(dataset))
1210
+ nd = torch.cuda.device_count()
1211
+ nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers])
1212
+ sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
1213
+ generator = torch.Generator()
1214
+ generator.manual_seed(6148914691236517205 + RANK)
1215
+ return InfiniteDataLoader(dataset,
1216
+ batch_size=batch_size,
1217
+ shuffle=shuffle and sampler is None,
1218
+ num_workers=nw,
1219
+ sampler=sampler,
1220
+ pin_memory=PIN_MEMORY,
1221
+ worker_init_fn=seed_worker,
1222
+ generator=generator) # or DataLoader(persistent_workers=True)
utils/docker/Dockerfile ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+ # Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5
3
+ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference
4
+
5
+ # Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch
6
+ FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime
7
+
8
+ # Downloads to user config dir
9
+ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
10
+
11
+ # Install linux packages
12
+ ENV DEBIAN_FRONTEND noninteractive
13
+ RUN apt update
14
+ RUN TZ=Etc/UTC apt install -y tzdata
15
+ RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg
16
+ # RUN alias python=python3
17
+
18
+ # Security updates
19
+ # https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796
20
+ RUN apt upgrade --no-install-recommends -y openssl
21
+
22
+ # Create working directory
23
+ RUN rm -rf /usr/src/app && mkdir -p /usr/src/app
24
+ WORKDIR /usr/src/app
25
+
26
+ # Copy contents
27
+ COPY . /usr/src/app
28
+
29
+ # Install pip packages
30
+ COPY requirements.txt .
31
+ RUN python3 -m pip install --upgrade pip wheel
32
+ RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \
33
+ coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0'
34
+ # tensorflow tensorflowjs \
35
+
36
+ # Set environment variables
37
+ ENV OMP_NUM_THREADS=1
38
+
39
+ # Cleanup
40
+ ENV DEBIAN_FRONTEND teletype
41
+
42
+
43
+ # Usage Examples -------------------------------------------------------------------------------------------------------
44
+
45
+ # Build and Push
46
+ # t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t
47
+
48
+ # Pull and Run
49
+ # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
50
+
51
+ # Pull and Run with local directory access
52
+ # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t
53
+
54
+ # Kill all
55
+ # sudo docker kill $(sudo docker ps -q)
56
+
57
+ # Kill all image-based
58
+ # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest)
59
+
60
+ # DockerHub tag update
61
+ # t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew
62
+
63
+ # Clean up
64
+ # sudo docker system prune -a --volumes
65
+
66
+ # Update Ubuntu drivers
67
+ # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/
68
+
69
+ # DDP test
70
+ # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3
71
+
72
+ # GCP VM from Image
73
+ # docker.io/ultralytics/yolov5:latest