mlbench123 commited on
Commit
1f0d4f2
·
verified ·
1 Parent(s): 861107b

Upload 10 files

Browse files
Files changed (9) hide show
  1. augmentations.py +581 -0
  2. common.py +929 -0
  3. datasets.py +1841 -0
  4. defomable_conv.py +117 -0
  5. downloads.py +150 -0
  6. experimental.py +127 -0
  7. general.py +876 -0
  8. metrics.py +335 -0
  9. plots.py +525 -0
augmentations.py ADDED
@@ -0,0 +1,581 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ Image augmentation functions
4
+ """
5
+
6
+ # from cProfile import label
7
+ # from curses import endwin
8
+ import logging
9
+ import math
10
+ import random
11
+
12
+ import cv2
13
+ import numpy as np
14
+
15
+ from general import LOGGER, check_version, colorstr, resample_segments, segment2box, xyxy2xywh as xyxy2cxcywh, clip_coords
16
+ from metrics import bbox_ioa, box_iou
17
+ import torch
18
+
19
+
20
+ class Albumentations:
21
+ # YOLOv5 Albumentations class (optional, only used if package is installed)
22
+ def __init__(self):
23
+ self.transform = None
24
+ try:
25
+ import albumentations as A
26
+ check_version(A.__version__, '1.0.3', hard=True) # version requirement
27
+
28
+ self.transform = A.Compose([
29
+ A.Blur(p=0.01),
30
+ A.MedianBlur(p=0.3),
31
+ A.ToGray(p=0.01),
32
+ A.CLAHE(p=0.3),
33
+ A.RandomBrightnessContrast(p=0.3),
34
+ A.RandomGamma(p=0.0),
35
+ A.ImageCompression(quality_lower=75, p=0.0)],
36
+ #bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])
37
+ )
38
+
39
+ logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))
40
+ except ImportError: # package not installed, skip
41
+ pass
42
+ except Exception as e:
43
+ logging.info(colorstr('albumentations: ') + f'{e}')
44
+
45
+ def __call__(self, im, labels, p=1.0):
46
+ if self.transform and random.random() < p:
47
+ #new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed
48
+ new = self.transform(image=im) # transformed
49
+ #im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])
50
+ im = new['image']
51
+ return im, labels
52
+
53
+ class AlbumentationsTemporal:
54
+ # YOLOv5 Albumentations class (optional, only used if package is installed)
55
+ def __init__(self, num_frames):
56
+ self.transform = None
57
+ self.num_frames = num_frames
58
+ try:
59
+ import albumentations as A
60
+ check_version(A.__version__, '1.0.3', hard=True) # version requirement
61
+ additional_targets = {f'image{i}':'image' for i in range(1, num_frames)}
62
+ # A.Blur(p=0.01),
63
+ # A.MedianBlur(p=0.3),
64
+ # A.ToGray(p=0.01),
65
+ # A.CLAHE(p=0.3),
66
+ # A.RandomBrightnessContrast(p=0.3),
67
+ # A.RandomGamma(p=0.0),
68
+ # A.ImageCompression(quality_lower=75, p=0.0)
69
+ self.transform = A.Compose([
70
+ A.Blur(p=0.01),
71
+ A.MedianBlur(p=0.3),
72
+ A.ToGray(p=0.01),
73
+ A.CLAHE(p=0.3),
74
+ A.RandomBrightnessContrast(p=0.3),
75
+ A.RandomGamma(p=0.0),
76
+ A.ImageCompression(quality_lower=75, p=0.0)],
77
+ #bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']),
78
+ #additional_targets=additional_targets
79
+ )
80
+
81
+ logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))
82
+ except ImportError: # package not installed, skip
83
+ pass
84
+ except Exception as e:
85
+ logging.info(colorstr('albumentations: ') + f'{e}')
86
+
87
+ self.transformation_expression = "self.transform(image=ims[0], "
88
+ for ti in range(1, self.num_frames):
89
+ self.transformation_expression += f"image{ti}=ims[{ti}], "
90
+ self.transformation_expression += ")"
91
+ #self.transformation_expression += "bboxes=labels[:, 1:], class_labels=labels[:, 0])"
92
+
93
+ def __call__(self, ims, labels, p=1.0):
94
+ if self.transform and random.random() < p:
95
+ # n_i, t, enddim = labels.shape
96
+ # labels = labels.reshape(n_i*t, enddim).astype(np.float32)
97
+ #LOGGER.info(f"img shape before albumentations adjustment {ims.shape}")
98
+ try:
99
+ new = eval(self.transformation_expression) #transformed
100
+ except Exception as e:
101
+ LOGGER.critical(f"Error occured {self.transformation_expression}, {labels[:, 1:]}, {str(e)}")
102
+ exit()
103
+ #new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed
104
+ ims = [new['image']] + [new[f'image{ti}'] for ti in range(1, self.num_frames)]
105
+ #labels = [np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])]
106
+
107
+ ims = np.stack(ims, 0) # T X H X W X C
108
+ #labels = np.concatenate(labels, 0) #n_i*t X 5
109
+ #labels = np.reshape(n_i, t, enddim)
110
+ #LOGGER.info(f"img shape after albumentations adjustment {ims.shape}")
111
+
112
+ return ims, labels
113
+
114
+ def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
115
+ # HSV color-space augmentation
116
+ if hgain or sgain or vgain:
117
+ r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
118
+ hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))
119
+ dtype = im.dtype # uint8
120
+
121
+ x = np.arange(0, 256, dtype=r.dtype)
122
+ lut_hue = ((x * r[0]) % 180).astype(dtype)
123
+ lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
124
+ lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
125
+
126
+ im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
127
+ cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed
128
+
129
+ def augment_hsv_temporal(im, hgain=0.5, sgain=0.5, vgain=0.5, frame_wise_aug=False):
130
+ # HSV color-space augmentation
131
+ if hgain or sgain or vgain:
132
+ r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
133
+ dtype = im.dtype # uint8
134
+ x = np.arange(0, 256, dtype=r.dtype)
135
+ lut_hue = ((x * r[0]) % 180).astype(dtype)
136
+ lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
137
+ lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
138
+ for ti in range(len(im)):
139
+ if frame_wise_aug:
140
+ r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
141
+ dtype = im.dtype # uint8
142
+ x = np.arange(0, 256, dtype=r.dtype)
143
+ lut_hue = ((x * r[0]) % 180).astype(dtype)
144
+ lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
145
+ lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
146
+ hue, sat, val = cv2.split(cv2.cvtColor(im[ti], cv2.COLOR_BGR2HSV))
147
+ im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
148
+ cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im[ti])
149
+
150
+ def hist_equalize(im, clahe=True, bgr=False):
151
+ # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255
152
+ yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
153
+ if clahe:
154
+ c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
155
+ yuv[:, :, 0] = c.apply(yuv[:, :, 0])
156
+ else:
157
+ yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
158
+ return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
159
+
160
+
161
+ def replicate(im, labels):
162
+ # Replicate labels
163
+ h, w = im.shape[:2]
164
+ boxes = labels[:, 1:].astype(int)
165
+ x1, y1, x2, y2 = boxes.T
166
+ s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
167
+ for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
168
+ x1b, y1b, x2b, y2b = boxes[i]
169
+ bh, bw = y2b - y1b, x2b - x1b
170
+ yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
171
+ x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
172
+ im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax]
173
+ labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
174
+
175
+ return im, labels
176
+
177
+
178
+ def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
179
+ # Resize and pad image while meeting stride-multiple constraints
180
+ shape = im.shape[:2] # current shape [height, width]
181
+ if isinstance(new_shape, int):
182
+ new_shape = (new_shape, new_shape)
183
+
184
+ # Scale ratio (new / old)
185
+ r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
186
+ if not scaleup: # only scale down, do not scale up (for better val mAP)
187
+ r = min(r, 1.0)
188
+
189
+ # Compute padding
190
+ ratio = r, r # width, height ratios
191
+ new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
192
+ dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
193
+ if auto: # minimum rectangle
194
+ dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
195
+ elif scaleFill: # stretch
196
+ dw, dh = 0.0, 0.0
197
+ new_unpad = (new_shape[1], new_shape[0])
198
+ ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
199
+
200
+ dw /= 2 # divide padding into 2 sides
201
+ dh /= 2
202
+
203
+ if shape[::-1] != new_unpad: # resize
204
+ im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
205
+ top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
206
+ left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
207
+ im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
208
+ return im, ratio, (dw, dh)
209
+
210
+ def letterbox_temporal(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
211
+ # Resize and pad image while meeting stride-multiple constraints
212
+ shape = im[0].shape[:2] # current shape [height, width]
213
+ if isinstance(new_shape, int):
214
+ new_shape = (new_shape, new_shape)
215
+
216
+ # Scale ratio (new / old)
217
+ r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
218
+ if not scaleup: # only scale down, do not scale up (for better val mAP)
219
+ r = min(r, 1.0)
220
+
221
+ # Compute padding
222
+ ratio = r, r # width, height ratios
223
+ new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
224
+ dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
225
+ if auto: # minimum rectangle
226
+ dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
227
+ elif scaleFill: # stretch
228
+ dw, dh = 0.0, 0.0
229
+ new_unpad = (new_shape[1], new_shape[0])
230
+ ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
231
+
232
+ dw /= 2 # divide padding into 2 sides
233
+ dh /= 2
234
+
235
+ if shape[::-1] != new_unpad: # resize
236
+ for ti in range(len(im)):
237
+ im[ti] = cv2.resize(im[ti], new_unpad, interpolation=cv2.INTER_LINEAR)
238
+ top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
239
+ left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
240
+ for ti in range(len(im)):
241
+ im[ti] = cv2.copyMakeBorder(im[ti], top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
242
+ return im, ratio, (dw, dh)
243
+
244
+ def random_perspective_temporal(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
245
+ border=(0, 0), frame_wise_aug=False):
246
+ # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))
247
+ # targets = [cls, xyxy]
248
+ if frame_wise_aug:
249
+ max_n = -1
250
+ _, t, enddim = targets.shape
251
+ new_images, new_labels = [], []
252
+ for ii in range(t):
253
+ label_ = targets[:, ii, :]
254
+ image, label_ = random_perspective(im[ii], label_, segments=segments, degrees=degrees, translate=translate, scale=scale, shear=shear, perspective=perspective, border=border)
255
+ new_images.append(image)
256
+ new_labels.append(label_) # n x 5
257
+ max_n = len(label_) if len(label_) > max_n else max_n
258
+
259
+ new_labels_ = np.zeros((max_n, t, enddim), dtype=np.float32)
260
+ for ti, label_ in enumerate(new_labels):
261
+ n, enddim = label_.shape
262
+ new_labels_[:n, ti, :] = label_
263
+ new_images = np.stack(new_images, 0)
264
+ #print(new_images.shape)
265
+ return new_images, new_labels_
266
+
267
+
268
+ t, h, w, c = im.shape
269
+ height = h + border[0] * 2 # shape(h,w,c)
270
+ width = w + border[1] * 2
271
+
272
+ # Center
273
+ C = np.eye(3)
274
+ C[0, 2] = -w / 2 # x translation (pixels)
275
+ C[1, 2] = -h / 2 # y translation (pixels)
276
+
277
+ # Perspective
278
+ P = np.eye(3)
279
+ P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
280
+ P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
281
+
282
+ # Rotation and Scale
283
+ R = np.eye(3)
284
+ a = random.uniform(-degrees, degrees)
285
+ # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
286
+ s = random.uniform(1 - scale, 1 + scale)
287
+ # s = 2 ** random.uniform(-scale, scale)
288
+ R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
289
+
290
+ # Shear
291
+ S = np.eye(3)
292
+ S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
293
+ S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
294
+
295
+ # Translation
296
+ T = np.eye(3)
297
+ T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
298
+ T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
299
+
300
+ # Combined rotation matrix
301
+ M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
302
+ if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
303
+ new_images = []
304
+ if perspective:
305
+ for ii in range(len(im)):
306
+ new_images.append(cv2.warpPerspective(im[ii], M, dsize=(width, height), borderValue=(114, 114, 114)))
307
+ else: # affine
308
+ for ii in range(len(im)):
309
+ new_images.append(cv2.warpAffine(im[ii], M[:2], dsize=(width, height), borderValue=(114, 114, 114)))
310
+ new_images = np.stack(new_images, 0)
311
+ assert len(new_images.shape) == 4
312
+ im = new_images
313
+ # Visualize
314
+ # import matplotlib.pyplot as plt
315
+ # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
316
+ # ax[0].imshow(im[:, :, ::-1]) # base
317
+ # ax[1].imshow(im2[:, :, ::-1]) # warped
318
+
319
+ # Transform label coordinates
320
+
321
+ n_instance, t, enddim = targets.shape
322
+ #LOGGER.info(f"before warping , {targets.shape}")
323
+ targets = targets.reshape(n_instance*t, enddim)
324
+ #LOGGER.info(f"before warping after reshaping , {targets.shape}")
325
+ n = len(targets)
326
+ if n:
327
+ #segments not recoded
328
+ use_segments = any(x.any() for x in segments)
329
+ new = np.zeros((n, 4))
330
+ if use_segments: # warp segments
331
+ segments = resample_segments(segments) # upsample
332
+ for i, segment in enumerate(segments):
333
+ xy = np.ones((len(segment), 3))
334
+ xy[:, :2] = segment
335
+ xy = xy @ M.T # transform
336
+ xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
337
+ # clip
338
+ new[i] = segment2box(xy, width, height)
339
+
340
+ else: # warp boxes
341
+ xy = np.ones((n * 4, 3))
342
+ xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
343
+ xy = xy @ M.T # transform
344
+ xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
345
+
346
+ # create new boxes
347
+ x = xy[:, [0, 2, 4, 6]]
348
+ y = xy[:, [1, 3, 5, 7]]
349
+ new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
350
+
351
+ # clip
352
+ new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
353
+ new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
354
+
355
+ # filter candidates
356
+ i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
357
+ #LOGGER.info(f"warping instances {i}")
358
+ i = i.reshape(n_instance, t)
359
+ i_instance = np.prod(i, axis=-1).astype(bool)
360
+ new = new.reshape(n_instance, t, -1)
361
+ targets = targets.reshape(n_instance, t, enddim)
362
+ new_targets = []
363
+ for ni, ii in enumerate(i_instance):
364
+ if ii:
365
+ for ti in range(t):
366
+ tt = [targets[ni, ti, 0]] + new[ni, ti, :].tolist() if i[ni, ti] else [0.]*4
367
+ new_targets.append(tt)
368
+ targets = np.array(new_targets).reshape(-1, t, enddim).astype(np.float32)
369
+ #LOGGER.info(f"after warping , {targets.shape}")
370
+ #targets = targets.astype(np.float32).reshape(n_instance, t, enddim)
371
+ return im, targets
372
+
373
+
374
+ def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
375
+ border=(0, 0)):
376
+ # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))
377
+ # targets = [cls, xyxy]
378
+
379
+ height = im.shape[0] + border[0] * 2 # shape(h,w,c)
380
+ width = im.shape[1] + border[1] * 2
381
+
382
+ # Center
383
+ C = np.eye(3)
384
+ C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
385
+ C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
386
+
387
+ # Perspective
388
+ P = np.eye(3)
389
+ P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
390
+ P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
391
+
392
+ # Rotation and Scale
393
+ R = np.eye(3)
394
+ a = random.uniform(-degrees, degrees)
395
+ # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
396
+ s = random.uniform(1 - scale, 1 + scale)
397
+ # s = 2 ** random.uniform(-scale, scale)
398
+ R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
399
+
400
+ # Shear
401
+ S = np.eye(3)
402
+ S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
403
+ S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
404
+
405
+ # Translation
406
+ T = np.eye(3)
407
+ T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
408
+ T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
409
+
410
+ # Combined rotation matrix
411
+ M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
412
+ if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
413
+ if perspective:
414
+ im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
415
+ else: # affine
416
+ im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
417
+
418
+ # Visualize
419
+ # import matplotlib.pyplot as plt
420
+ # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
421
+ # ax[0].imshow(im[:, :, ::-1]) # base
422
+ # ax[1].imshow(im2[:, :, ::-1]) # warped
423
+
424
+ # Transform label coordinates
425
+ n = len(targets)
426
+ if n:
427
+ use_segments = any(x.any() for x in segments)
428
+ new = np.zeros((n, 4))
429
+ if use_segments: # warp segments
430
+ segments = resample_segments(segments) # upsample
431
+ for i, segment in enumerate(segments):
432
+ xy = np.ones((len(segment), 3))
433
+ xy[:, :2] = segment
434
+ xy = xy @ M.T # transform
435
+ xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
436
+
437
+ # clip
438
+ new[i] = segment2box(xy, width, height)
439
+
440
+ else: # warp boxes
441
+ xy = np.ones((n * 4, 3))
442
+ xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
443
+ xy = xy @ M.T # transform
444
+ xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
445
+
446
+ # create new boxes
447
+ x = xy[:, [0, 2, 4, 6]]
448
+ y = xy[:, [1, 3, 5, 7]]
449
+ new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
450
+
451
+ # clip
452
+ new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
453
+ new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
454
+
455
+ # filter candidates
456
+ i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
457
+ targets = targets[i]
458
+ targets[:, 1:5] = new[i]
459
+
460
+ return im, targets
461
+
462
+
463
+ def copy_paste(im, labels, segments, p=0.5):
464
+ # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
465
+ n = len(segments)
466
+ if p and n:
467
+ h, w, c = im.shape # height, width, channels
468
+ im_new = np.zeros(im.shape, np.uint8)
469
+ for j in random.sample(range(n), k=round(p * n)):
470
+ l, s = labels[j], segments[j]
471
+ box = w - l[3], l[2], w - l[1], l[4]
472
+ ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
473
+ if (ioa < 0.30).all(): # allow 30% obscuration of existing labels
474
+ labels = np.concatenate((labels, [[l[0], *box]]), 0)
475
+ segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))
476
+ cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
477
+
478
+ result = cv2.bitwise_and(src1=im, src2=im_new)
479
+ result = cv2.flip(result, 1) # augment segments (flip left-right)
480
+ i = result > 0 # pixels to replace
481
+ # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch
482
+ im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug
483
+
484
+ return im, labels, segments
485
+
486
+ def make_cuboid_from_temporal_annotation(labels):
487
+ #Labels of form n X T X 4 with x1,y1,x2,y2 format convert to n X 4
488
+ n, t = labels.shape[:2]
489
+ labels = labels.reshape(n*t, 4)
490
+ labels_with_wh = xyxy2cxcywh(labels)
491
+ labels_with_wh = labels_with_wh.reshape(n, t, 4)[..., 2:]
492
+ labels = labels.reshape(n, t, 4)
493
+ new_labels = []
494
+ for ni in range(n):
495
+ temporal_candidates = labels_with_wh[ni].all(axis=-1)
496
+ labels_at_n = labels[ni, temporal_candidates].reshape(-1, 4)
497
+ x1, y1, x2, y2 = labels_at_n[:, 0].min(), labels_at_n[:, 1].min(), labels_at_n[:, 2].max(), labels_at_n[:, 3].max()
498
+ new_labels.append([x1, y1, x2, y2])
499
+ new_labels = np.array(new_labels).reshape(-1, 4)
500
+ assert new_labels.shape[0] == n, "in cuboid formation number of instances not matching"
501
+ return new_labels
502
+
503
+ def mixup_drones(im, labels1, im2, labels2):
504
+ #Labels of form n X T X 5 with x1,y1,x2,y2 format
505
+ h,w,c = im[-1].shape
506
+ cuboid_labels1, cuboid_labels2 = make_cuboid_from_temporal_annotation(labels1[:, :, 1:]), make_cuboid_from_temporal_annotation(labels2[:, :, 1:])
507
+ cuboid_labels1, cuboid_labels2 = torch.tensor(cuboid_labels1), torch.tensor(cuboid_labels2)
508
+ ious = box_iou(cuboid_labels2, cuboid_labels1).numpy()
509
+ mergable_candidates = ~ious.any(axis=-1)
510
+ labels2 = labels2[mergable_candidates]
511
+ n2, t, enddim = labels2.shape
512
+ labels2[..., [1, 3]] = labels2[..., [1, 3]].clip(0, w) # x1, x2
513
+ labels2[..., [2, 4]] = labels2[..., [2, 4]].clip(0, h) # y1, y2
514
+ r = np.random.beta(32.0, 32.0)
515
+ if n2:
516
+ for ti in range(t):
517
+ for ni in range(n2):
518
+ x1, y1, x2, y2 = labels2[ni, ti, 1:]
519
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
520
+ im[ti][y1:y2, x1:x2, :] = (r*im[ti][y1:y2, x1:x2, :] + (1-r)*im2[ti][y1:y2, x1:x2, :]).astype(np.uint8)
521
+ labels = np.concatenate((labels1, labels2), 0).reshape(-1, t, enddim)
522
+ else:
523
+ labels = labels1
524
+
525
+ return im, labels
526
+
527
+
528
+
529
+ def cutout(im, labels, p=0.5):
530
+ # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
531
+ if random.random() < p:
532
+ h, w = im.shape[:2]
533
+ scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
534
+ for s in scales:
535
+ mask_h = random.randint(1, int(h * s)) # create random masks
536
+ mask_w = random.randint(1, int(w * s))
537
+
538
+ # box
539
+ xmin = max(0, random.randint(0, w) - mask_w // 2)
540
+ ymin = max(0, random.randint(0, h) - mask_h // 2)
541
+ xmax = min(w, xmin + mask_w)
542
+ ymax = min(h, ymin + mask_h)
543
+
544
+ # apply random color mask
545
+ im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
546
+
547
+ # return unobscured labels
548
+ if len(labels) and s > 0.03:
549
+ box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
550
+ ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
551
+ labels = labels[ioa < 0.60] # remove >60% obscured labels
552
+
553
+ return labels
554
+
555
+
556
+ def mixup(im, labels, im2, labels2):
557
+ # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
558
+ r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
559
+ im = (im * r + im2 * (1 - r)).astype(np.uint8)
560
+ labels = np.concatenate((labels, labels2), 0)
561
+ return im, labels
562
+
563
+ def mixup_temporal(im, labels, im2, labels2, frame_wise_aug=False):
564
+ # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
565
+ r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
566
+ t = len(im)
567
+ for ti in range(t):
568
+ if frame_wise_aug: r = np.random.beta(32.0, 32.0)
569
+ im[ti] = (im[ti] * r + im2[ti] * (1 - r)).astype(np.uint8)
570
+ enddim = labels.shape[-1]
571
+ labels = np.concatenate((labels, labels2), 0).reshape(-1, t, enddim)
572
+
573
+ return im, labels
574
+
575
+
576
+ def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
577
+ # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
578
+ w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
579
+ w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
580
+ ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
581
+ return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
common.py ADDED
@@ -0,0 +1,929 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ Common modules
4
+ """
5
+
6
+ import logging
7
+ import math
8
+ import warnings
9
+ from copy import copy
10
+ from pathlib import Path
11
+
12
+ import numpy as np
13
+ import pandas as pd
14
+ import requests
15
+ import torch
16
+ import torch.nn as nn
17
+ import torch.nn.functional as F
18
+ from PIL import Image
19
+ from typing import Optional
20
+ from torch.cuda import amp
21
+
22
+ from datasets import exif_transpose, letterbox
23
+ from general import (colorstr, increment_path, make_divisible, non_max_suppression, save_one_box, scale_coords,
24
+ xyxy2xywh)
25
+ from plots import Annotator, colors
26
+ from torch_utils import time_sync
27
+ from defomable_conv import DeformConv
28
+
29
+ LOGGER = logging.getLogger(__name__)
30
+
31
+
32
+ def autopad(k, p=None): # kernel, padding
33
+ # Pad to 'same'
34
+ if p is None:
35
+ p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
36
+ return p
37
+
38
+ class Conv(nn.Module):
39
+ # Standard convolution
40
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
41
+ super().__init__()
42
+ self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
43
+ self.bn = nn.BatchNorm2d(c2)
44
+ self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
45
+
46
+ def forward(self, x):
47
+ return self.act(self.bn(self.conv(x)))
48
+
49
+ def forward_fuse(self, x):
50
+ return self.act(self.conv(x))
51
+
52
+ class DConv(nn.Module):
53
+ #Deformable COnvolutions
54
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
55
+ super().__init__()
56
+ #self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
57
+ self.conv = DeformConv(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
58
+ self.bn = nn.BatchNorm2d(c2)
59
+ self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
60
+
61
+ def forward(self, x):
62
+ return self.act(self.bn(self.conv(x)))
63
+
64
+ def forward_fuse(self, x):
65
+ return self.act(self.conv(x))
66
+
67
+ class DWConv(Conv):
68
+ # Depth-wise convolution class
69
+ def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
70
+ super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
71
+
72
+ class ChannelAttentionModule(nn.Module):
73
+ def __init__(self, c1, reduction=16):
74
+ super(ChannelAttentionModule, self).__init__()
75
+ mid_channel = c1 // reduction
76
+ self.avg_pool = nn.AdaptiveAvgPool2d(1)
77
+ self.max_pool = nn.AdaptiveMaxPool2d(1)
78
+ self.shared_MLP = nn.Sequential(
79
+ nn.Linear(in_features=c1, out_features=mid_channel),
80
+ nn.ReLU(),
81
+ nn.Linear(in_features=mid_channel, out_features=c1)
82
+ )
83
+ self.sigmoid = nn.Sigmoid()
84
+ #self.act=SiLU()
85
+ def forward(self, x):
86
+ avgout = self.shared_MLP(self.avg_pool(x).view(x.size(0),-1)).unsqueeze(2).unsqueeze(3)
87
+ maxout = self.shared_MLP(self.max_pool(x).view(x.size(0),-1)).unsqueeze(2).unsqueeze(3)
88
+ return self.sigmoid(avgout + maxout)
89
+
90
+ class SpatialAttentionModule(nn.Module):
91
+ def __init__(self):
92
+ super(SpatialAttentionModule, self).__init__()
93
+ self.conv2d = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=7, stride=1, padding=3)
94
+ #self.act=SiLU()
95
+ self.sigmoid = nn.Sigmoid()
96
+ def forward(self, x):
97
+ avgout = torch.mean(x, dim=1, keepdim=True)
98
+ maxout, _ = torch.max(x, dim=1, keepdim=True)
99
+ out = torch.cat([avgout, maxout], dim=1)
100
+ out = self.sigmoid(self.conv2d(out))
101
+ return out
102
+
103
+ class CBAM(nn.Module):
104
+ def __init__(self, c1,c2):
105
+ super(CBAM, self).__init__()
106
+ self.channel_attention = ChannelAttentionModule(c1)
107
+ self.spatial_attention = SpatialAttentionModule()
108
+
109
+ def forward(self, x):
110
+ out = self.channel_attention(x) * x
111
+ out = self.spatial_attention(out) * out
112
+ return out
113
+
114
+ class TransformerLayer(nn.Module):
115
+ def __init__(self, c, num_heads):
116
+ super().__init__()
117
+
118
+ self.ln1 = nn.LayerNorm(c)
119
+ self.q = nn.Linear(c, c, bias=False)
120
+ self.k = nn.Linear(c, c, bias=False)
121
+ self.v = nn.Linear(c, c, bias=False)
122
+ self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
123
+ self.ln2 = nn.LayerNorm(c)
124
+ self.fc1 = nn.Linear(c, 4*c, bias=False)
125
+ self.fc2 = nn.Linear(4*c, c, bias=False)
126
+ self.dropout = nn.Dropout(0.1)
127
+ self.act = nn.ReLU(True)
128
+
129
+ def forward(self, x):
130
+ x_ = self.ln1(x)
131
+ x = self.dropout(self.ma(self.q(x_), self.k(x_), self.v(x_))[0]) + x
132
+ x_ = self.ln2(x)
133
+ x_ = self.fc2(self.dropout(self.act(self.fc1(x_))))
134
+ x = x + self.dropout(x_)
135
+ return x
136
+
137
+
138
+ class TransformerBlock(nn.Module):
139
+ # Vision Transformer https://arxiv.org/abs/2010.11929
140
+ def __init__(self, c1, c2, num_heads, num_layers):
141
+ super().__init__()
142
+ self.conv = None
143
+ if c1 != c2:
144
+ self.conv = Conv(c1, c2)
145
+ self.linear = nn.Linear(c2, c2) # learnable position embedding
146
+ self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
147
+ self.c2 = c2
148
+
149
+ def forward(self, x):
150
+ if self.conv is not None:
151
+ x = self.conv(x)
152
+ b, _, w, h = x.shape
153
+ p = x.flatten(2).unsqueeze(0).transpose(0, 3).squeeze(3)
154
+ return self.tr(p + self.linear(p)).unsqueeze(3).transpose(0, 3).reshape(b, self.c2, w, h)
155
+
156
+ def drop_path_f(x, drop_prob: float = 0., training: bool = False):
157
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
158
+
159
+ This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
160
+ the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
161
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
162
+ changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
163
+ 'survival rate' as the argument.
164
+
165
+ """
166
+ if drop_prob == 0. or not training:
167
+ return x
168
+ keep_prob = 1 - drop_prob
169
+ shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
170
+ random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
171
+ random_tensor.floor_() # binarize
172
+ output = x.div(keep_prob) * random_tensor
173
+ return output
174
+
175
+
176
+ class DropPath(nn.Module):
177
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
178
+ """
179
+ def __init__(self, drop_prob=None):
180
+ super(DropPath, self).__init__()
181
+ self.drop_prob = drop_prob
182
+
183
+ def forward(self, x):
184
+ return drop_path_f(x, self.drop_prob, self.training)
185
+
186
+ def window_partition(x, window_size: int):
187
+ """
188
+ 将feature map按照window_size划分成一个个没有重叠的window
189
+ Args:
190
+ x: (B, H, W, C)
191
+ window_size (int): window size(M)
192
+
193
+ Returns:
194
+ windows: (num_windows*B, window_size, window_size, C)
195
+ """
196
+ B, H, W, C = x.shape
197
+ x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
198
+ # permute: [B, H//Mh, Mh, W//Mw, Mw, C] -> [B, H//Mh, W//Mh, Mw, Mw, C]
199
+ # view: [B, H//Mh, W//Mw, Mh, Mw, C] -> [B*num_windows, Mh, Mw, C]
200
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
201
+ return windows
202
+
203
+ def window_reverse(windows, window_size: int, H: int, W: int):
204
+ """
205
+ 将一个个window还原成一个feature map
206
+ Args:
207
+ windows: (num_windows*B, window_size, window_size, C)
208
+ window_size (int): Window size(M)
209
+ H (int): Height of image
210
+ W (int): Width of image
211
+
212
+ Returns:
213
+ x: (B, H, W, C)
214
+ """
215
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
216
+ # view: [B*num_windows, Mh, Mw, C] -> [B, H//Mh, W//Mw, Mh, Mw, C]
217
+ x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
218
+ # permute: [B, H//Mh, W//Mw, Mh, Mw, C] -> [B, H//Mh, Mh, W//Mw, Mw, C]
219
+ # view: [B, H//Mh, Mh, W//Mw, Mw, C] -> [B, H, W, C]
220
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
221
+ return x
222
+
223
+ class Mlp(nn.Module):
224
+ """ MLP as used in Vision Transformer, MLP-Mixer and related networks
225
+ """
226
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
227
+ super().__init__()
228
+ out_features = out_features or in_features
229
+ hidden_features = hidden_features or in_features
230
+
231
+ self.fc1 = nn.Linear(in_features, hidden_features)
232
+ self.act = act_layer()
233
+ self.drop1 = nn.Dropout(drop)
234
+ self.fc2 = nn.Linear(hidden_features, out_features)
235
+ self.drop2 = nn.Dropout(drop)
236
+
237
+ def forward(self, x):
238
+ x = self.fc1(x)
239
+ x = self.act(x)
240
+ x = self.drop1(x)
241
+ x = self.fc2(x)
242
+ x = self.drop2(x)
243
+ return x
244
+
245
+ class WindowAttention(nn.Module):
246
+ r""" Window based multi-head self attention (W-MSA) module with relative position bias.
247
+ It supports both of shifted and non-shifted window.
248
+
249
+ Args:
250
+ dim (int): Number of input channels.
251
+ window_size (tuple[int]): The height and width of the window.
252
+ num_heads (int): Number of attention heads.
253
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
254
+ attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
255
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
256
+ """
257
+
258
+ def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.):
259
+
260
+ super().__init__()
261
+ self.dim = dim
262
+ self.window_size = window_size # [Mh, Mw]
263
+ self.num_heads = num_heads
264
+ head_dim = dim // num_heads
265
+ self.scale = head_dim ** -0.5
266
+
267
+ # define a parameter table of relative position bias
268
+ self.relative_position_bias_table = nn.Parameter(
269
+ torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # [2*Mh-1 * 2*Mw-1, nH]
270
+
271
+ # get pair-wise relative position index for each token inside the window
272
+ coords_h = torch.arange(self.window_size[0])
273
+ coords_w = torch.arange(self.window_size[1])
274
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing="ij")) # [2, Mh, Mw]
275
+ coords_flatten = torch.flatten(coords, 1) # [2, Mh*Mw]
276
+ # [2, Mh*Mw, 1] - [2, 1, Mh*Mw]
277
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # [2, Mh*Mw, Mh*Mw]
278
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # [Mh*Mw, Mh*Mw, 2]
279
+ relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
280
+ relative_coords[:, :, 1] += self.window_size[1] - 1
281
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
282
+ relative_position_index = relative_coords.sum(-1) # [Mh*Mw, Mh*Mw]
283
+ self.register_buffer("relative_position_index", relative_position_index)
284
+
285
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
286
+ self.attn_drop = nn.Dropout(attn_drop)
287
+ self.proj = nn.Linear(dim, dim)
288
+ self.proj_drop = nn.Dropout(proj_drop)
289
+
290
+ nn.init.trunc_normal_(self.relative_position_bias_table, std=.02)
291
+ self.softmax = nn.Softmax(dim=-1)
292
+
293
+ def forward(self, x, mask: Optional[torch.Tensor] = None):
294
+ """
295
+ Args:
296
+ x: input features with shape of (num_windows*B, Mh*Mw, C)
297
+ mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
298
+ """
299
+ # [batch_size*num_windows, Mh*Mw, total_embed_dim]
300
+ B_, N, C = x.shape
301
+ # qkv(): -> [batch_size*num_windows, Mh*Mw, 3 * total_embed_dim]
302
+ # reshape: -> [batch_size*num_windows, Mh*Mw, 3, num_heads, embed_dim_per_head]
303
+ # permute: -> [3, batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]
304
+ qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
305
+ # [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]
306
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
307
+
308
+ # transpose: -> [batch_size*num_windows, num_heads, embed_dim_per_head, Mh*Mw]
309
+ # @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, Mh*Mw]
310
+ q = q * self.scale
311
+ attn = (q @ k.transpose(-2, -1))
312
+
313
+ # relative_position_bias_table.view: [Mh*Mw*Mh*Mw,nH] -> [Mh*Mw,Mh*Mw,nH]
314
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
315
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)
316
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # [nH, Mh*Mw, Mh*Mw]
317
+ attn = attn + relative_position_bias.unsqueeze(0)
318
+
319
+ if mask is not None:
320
+ # mask: [nW, Mh*Mw, Mh*Mw]
321
+ nW = mask.shape[0] # num_windows
322
+ # attn.view: [batch_size, num_windows, num_heads, Mh*Mw, Mh*Mw]
323
+ # mask.unsqueeze: [1, nW, 1, Mh*Mw, Mh*Mw]
324
+ attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
325
+ attn = attn.view(-1, self.num_heads, N, N)
326
+ attn = self.softmax(attn)
327
+ else:
328
+ attn = self.softmax(attn)
329
+
330
+ attn = self.attn_drop(attn)
331
+
332
+ # @: multiply -> [batch_size*num_windows, num_heads, Mh*Mw, embed_dim_per_head]
333
+ # transpose: -> [batch_size*num_windows, Mh*Mw, num_heads, embed_dim_per_head]
334
+ # reshape: -> [batch_size*num_windows, Mh*Mw, total_embed_dim]
335
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
336
+ x = self.proj(x)
337
+ x = self.proj_drop(x)
338
+ return x
339
+
340
+ class SwinTransformerLayer(nn.Module):
341
+ # Vision Transformer https://arxiv.org/abs/2010.11929
342
+ def __init__(self, c, num_heads, window_size=7, shift_size=0,
343
+ mlp_ratio = 4, qkv_bias=False, drop=0., attn_drop=0., drop_path=0.,
344
+ act_layer=nn.GELU, norm_layer=nn.LayerNorm):
345
+ super().__init__()
346
+ if num_heads > 10:
347
+ drop_path = 0.1
348
+ self.window_size = window_size
349
+ self.shift_size = shift_size
350
+ self.mlp_ratio = mlp_ratio
351
+
352
+ self.norm1 = norm_layer(c)
353
+ self.attn = WindowAttention(
354
+ c, window_size=(self.window_size, self.window_size), num_heads=num_heads, qkv_bias=qkv_bias,
355
+ attn_drop=attn_drop, proj_drop=drop)
356
+
357
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
358
+ self.norm2 = norm_layer(c)
359
+ mlp_hidden_dim = int(c * mlp_ratio)
360
+ self.mlp = Mlp(in_features=c, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
361
+
362
+ def create_mask(self, x, H, W):
363
+ # calculate attention mask for SW-MSA
364
+ # 保证Hp和Wp是window_size的整数倍
365
+ Hp = int(np.ceil(H / self.window_size)) * self.window_size
366
+ Wp = int(np.ceil(W / self.window_size)) * self.window_size
367
+ # 拥有和feature map一样的通道排列顺序,方便后续window_partition
368
+ img_mask = torch.zeros((1, Hp, Wp, 1), dtype=self.attn.qkv.weight.dtype, device=x.device) # [1, Hp, Wp, 1]
369
+ h_slices = ( (0, -self.window_size),
370
+ slice(-self.window_size, -self.shift_size),
371
+ slice(-self.shift_size, None))
372
+ w_slices = (slice(0, -self.window_size),
373
+ slice(-self.window_size, -self.shift_size),
374
+ slice(-self.shift_size, None))
375
+ cnt = 0
376
+ for h in h_slices:
377
+ for w in w_slices:
378
+ img_mask[:, h, w, :] = cnt
379
+ cnt += 1
380
+
381
+ mask_windows = window_partition(img_mask, self.window_size) # [nW, Mh, Mw, 1]
382
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size) # [nW, Mh*Mw]
383
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) # [nW, 1, Mh*Mw] - [nW, Mh*Mw, 1]
384
+ # [nW, Mh*Mw, Mh*Mw]
385
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, torch.tensor(-100.0)).masked_fill(attn_mask == 0, torch.tensor(0.0))
386
+ return attn_mask
387
+
388
+ def forward(self, x):
389
+ b, c, w, h = x.shape
390
+ x = x.permute(0, 3, 2, 1).contiguous() # [b,h,w,c]
391
+
392
+ attn_mask = self.create_mask(x, h, w) # [nW, Mh*Mw, Mh*Mw]
393
+ shortcut = x
394
+ x = self.norm1(x)
395
+
396
+ pad_l = pad_t = 0
397
+ pad_r = (self.window_size - w % self.window_size) % self.window_size
398
+ pad_b = (self.window_size - h % self.window_size) % self.window_size
399
+ x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
400
+ _, hp, wp, _ = x.shape
401
+
402
+ if self.shift_size > 0:
403
+ # print(f"shift size: {self.shift_size}")
404
+ shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
405
+ else:
406
+ shifted_x = x
407
+ attn_mask = None
408
+
409
+ x_windows = window_partition(shifted_x, self.window_size) # [nW*B, Mh, Mw, C]
410
+ x_windows = x_windows.view(-1, self.window_size * self.window_size, c) # [nW*B, Mh*Mw, C]
411
+
412
+ attn_windows = self.attn(x_windows, mask=attn_mask) # [nW*B, Mh*Mw, C]
413
+
414
+ attn_windows = attn_windows.view(-1, self.window_size, self.window_size, c) # [nW*B, Mh, Mw, C]
415
+ shifted_x = window_reverse(attn_windows, self.window_size, hp, wp) # [B, H', W', C]
416
+
417
+ if self.shift_size > 0:
418
+ x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
419
+ else:
420
+ x = shifted_x
421
+
422
+ if pad_r > 0 or pad_b > 0:
423
+ # 把前面pad的数据移除掉
424
+ x = x[:, :h, :w, :].contiguous()
425
+
426
+ x = shortcut + self.drop_path(x)
427
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
428
+
429
+ x = x.permute(0, 3, 2, 1).contiguous()
430
+ return x # (b, self.c2, w, h)
431
+
432
+ class SwinTransformerBlock(nn.Module):
433
+ def __init__(self, c1, c2, num_heads, num_layers, window_size=8):
434
+ super().__init__()
435
+ self.conv = None
436
+ if c1 != c2:
437
+ self.conv = Conv(c1, c2)
438
+
439
+ self.window_size = window_size
440
+ self.shift_size = window_size // 2
441
+ self.tr = nn.Sequential(*(SwinTransformerLayer(c2, num_heads=num_heads, window_size=window_size, shift_size=0 if (i % 2 == 0) else self.shift_size ) for i in range(num_layers)))
442
+
443
+ def forward(self, x):
444
+ if self.conv is not None:
445
+ x = self.conv(x)
446
+ x = self.tr(x)
447
+ return x
448
+
449
+ from models.video_swin_transformer import SwinTransformerLayer3D, SwinTransformerBlock3D #, _init_weights as initswin3d
450
+ # class SwinTransformerBlock3D(nn.Module):
451
+ # def __init__(self, c1, c2, num_frames, num_heads, num_layers=2):
452
+ # super().__init__()
453
+ # self.conv = None
454
+ # if c1 != c2:
455
+ # self.conv = Conv(c1, c2)
456
+ # #num_heads = c1 //64
457
+ # window_size = 8
458
+ # self.num_frames = num_frames
459
+ # self.window_size = (num_frames, window_size, window_size)
460
+ # self.shift_size = (0, window_size // 2, window_size // 2)
461
+ # self.tr = nn.Sequential(*(SwinTransformerLayer3D(c2, num_heads=num_heads, window_size=self.window_size, shift_size=(0, 0, 0) if ( i % 2 == 0) else self.shift_size ) for i in range(num_layers)))
462
+ # #self.apply(initswin3d)
463
+
464
+ # def reshape_frames(self, x, mode:int=0):
465
+ # if mode == 0:
466
+ # b, c, h, w = x.shape
467
+ # b_new = b // self.num_frames
468
+ # x = x.reshape(b_new, self.num_frames, c, h, w)
469
+ # elif mode == 1:
470
+ # b, t, c, h, w = x.shape
471
+ # x = x.reshape(b*t, c, h, w)
472
+ # return x
473
+ # def forward(self, x):
474
+ # #print(f"x shape begfore swin transformer {x.shape}")
475
+ # if self.conv is not None:
476
+ # x = self.conv(x)
477
+ # x = self.reshape_frames(x, 0)
478
+ # x = self.tr(x)
479
+ # x = self.reshape_frames(x, 1)
480
+ # #print(f"x shape after swin transformer {x.shape}")
481
+ # return x
482
+
483
+ class Bottleneck(nn.Module):
484
+ # Standard bottleneck
485
+ def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
486
+ super().__init__()
487
+ c_ = int(c2 * e) # hidden channels
488
+ self.cv1 = Conv(c1, c_, 1, 1)
489
+ self.cv2 = Conv(c_, c2, 3, 1, g=g)
490
+ self.add = shortcut and c1 == c2
491
+
492
+ def forward(self, x):
493
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
494
+
495
+ class DeformableBottleneck(nn.Module):
496
+ # Standard bottleneck
497
+ def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
498
+ super().__init__()
499
+ c_ = int(c2 * e) # hidden channels
500
+ self.cv1 = DConv(c1, c_, 1, 1)
501
+ self.cv2 = DConv(c_, c2, 3, 1, g=g)
502
+ self.add = shortcut and c1 == c2
503
+
504
+ def forward(self, x):
505
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
506
+
507
+ class BottleneckCSP(nn.Module):
508
+ # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
509
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
510
+ super().__init__()
511
+ c_ = int(c2 * e) # hidden channels
512
+ self.cv1 = Conv(c1, c_, 1, 1)
513
+ self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
514
+ self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
515
+ self.cv4 = Conv(2 * c_, c2, 1, 1)
516
+ self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
517
+ self.act = nn.SiLU()
518
+ self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
519
+
520
+ def forward(self, x):
521
+ y1 = self.cv3(self.m(self.cv1(x)))
522
+ y2 = self.cv2(x)
523
+ return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
524
+
525
+
526
+ class C3(nn.Module):
527
+ # CSP Bottleneck with 3 convolutions
528
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
529
+ super().__init__()
530
+ c_ = int(c2 * e) # hidden channels
531
+ self.cv1 = Conv(c1, c_, 1, 1)
532
+ self.cv2 = Conv(c1, c_, 1, 1)
533
+ self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
534
+ self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
535
+ # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
536
+
537
+ def forward(self, x):
538
+ return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
539
+
540
+ class C3D(nn.Module):
541
+ # CSP Bottleneck with 3 deformable convolutions
542
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
543
+ super().__init__()
544
+ c_ = int(c2 * e) # hidden channels
545
+ self.cv1 = DConv(c1, c_, 1, 1)
546
+ self.cv2 = DConv(c1, c_, 1, 1)
547
+ self.cv3 = DConv(2 * c_, c2, 1) # act=FReLU(c2)
548
+ self.m = nn.Sequential(*(DeformableBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
549
+ # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
550
+
551
+ def forward(self, x):
552
+ return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
553
+
554
+
555
+ class C3TR(C3):
556
+ # C3 module with TransformerBlock()
557
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
558
+ super().__init__(c1, c2, n, shortcut, g, e)
559
+ c_ = int(c2 * e)
560
+ self.m = TransformerBlock(c_, c_, 4, n)
561
+
562
+ class C3STR(C3):
563
+ # C3 module with SwinTransformerBlock()
564
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
565
+ super().__init__(c1, c2, n, shortcut, g, e)
566
+ c_ = int(c2 * e)
567
+ #print(f"num of heads, num_layers for C3STR {c_//32}, {n}")
568
+ self.m = SwinTransformerBlock(c_, c_, c_//32, n)
569
+
570
+ class C3DSTR(C3D):
571
+ # C3D module with SwinTransformerBlock()
572
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
573
+ super().__init__(c1, c2, n, shortcut, g, e)
574
+ c_ = int(c2 * e)
575
+ #print(f"num of heads, num_layers for C3STR {c_//32}, {n}")
576
+ self.m = SwinTransformerBlock(c_, c_, c_//32, n)
577
+
578
+ class C3STTR(C3):
579
+ # C3 module with SwinTransformer3DBlock()
580
+ def __init__(self, c1, c2, num_frames, n=1, shortcut=True, g=1, e=0.5):
581
+ super().__init__(c1, c2, n, shortcut, g, e)
582
+ c_ = int(c2 * e)
583
+ #print(f"num of heads, num_layers, num_frames for C3STR {c_//32}, {n}, {num_frames}")
584
+ self.m = SwinTransformerBlock3D(c_, c_, num_frames, c_//32, n)
585
+
586
+ class C3Temporal(nn.Module):
587
+ def __init__(self, c1, c2, num_frames, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
588
+ super().__init__()
589
+ self.c3 = C3(c1, c2, n, shortcut, g, e)
590
+ self.temporaltransformer = SwinTransformerBlock3D(c2, num_frames)
591
+ #self.sequential_module = nn.Sequential(C3(c1, c2, n, shortcut, g, e), SwinTransformerBlock3D(c2, num_frames))
592
+ def forward(self, x):
593
+ return self.temporaltransformer(self.c3(x))
594
+
595
+ class C3SPP(C3):
596
+ # C3 module with SPP()
597
+ def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
598
+ super().__init__(c1, c2, n, shortcut, g, e)
599
+ c_ = int(c2 * e)
600
+ self.m = SPP(c_, c_, k)
601
+
602
+
603
+ class C3Ghost(C3):
604
+ # C3 module with GhostBottleneck()
605
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
606
+ super().__init__(c1, c2, n, shortcut, g, e)
607
+ c_ = int(c2 * e) # hidden channels
608
+ self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
609
+
610
+
611
+ class SPP(nn.Module):
612
+ # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
613
+ def __init__(self, c1, c2, k=(5, 9, 13)):
614
+ super().__init__()
615
+ c_ = c1 // 2 # hidden channels
616
+ self.cv1 = Conv(c1, c_, 1, 1)
617
+ self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
618
+ self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
619
+
620
+ def forward(self, x):
621
+ x = self.cv1(x)
622
+ with warnings.catch_warnings():
623
+ warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
624
+ return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
625
+
626
+ class ASPP(nn.Module):
627
+ # Atrous Spatial Pyramid Pooling (ASPP) layer
628
+ def __init__(self, c1, c2, k=(5, 9, 13)):
629
+ super().__init__()
630
+ c_ = c1 // 2 # hidden channels
631
+ self.cv1 = Conv(c1, c_, 1, 1)
632
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
633
+ self.m = nn.ModuleList([nn.Conv2d(c_, c_, kernel_size=3, stride=1, padding=(x-1)//2, dilation=(x-1)//2, bias=False) for x in k])
634
+ self.cv2 = Conv(c_ * (len(k) + 2), c2, 1, 1)
635
+
636
+ def forward(self, x):
637
+ x = self.cv1(x)
638
+ return self.cv2(torch.cat([x]+ [self.maxpool(x)] + [m(x) for m in self.m] , 1))
639
+
640
+ class SPPF(nn.Module):
641
+ # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
642
+ def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
643
+ super().__init__()
644
+ c_ = c1 // 2 # hidden channels
645
+ self.cv1 = Conv(c1, c_, 1, 1)
646
+ self.cv2 = Conv(c_ * 4, c2, 1, 1)
647
+ self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
648
+
649
+ def forward(self, x):
650
+ x = self.cv1(x)
651
+ with warnings.catch_warnings():
652
+ warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
653
+ y1 = self.m(x)
654
+ y2 = self.m(y1)
655
+ return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1))
656
+
657
+
658
+ class Focus(nn.Module):
659
+ # Focus wh information into c-space
660
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
661
+ super().__init__()
662
+ self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
663
+ # self.contract = Contract(gain=2)
664
+
665
+ def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
666
+ return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
667
+ # return self.conv(self.contract(x))
668
+
669
+
670
+ class GhostConv(nn.Module):
671
+ # Ghost Convolution https://github.com/huawei-noah/ghostnet
672
+ def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
673
+ super().__init__()
674
+ c_ = c2 // 2 # hidden channels
675
+ self.cv1 = Conv(c1, c_, k, s, None, g, act)
676
+ self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
677
+
678
+ def forward(self, x):
679
+ y = self.cv1(x)
680
+ return torch.cat([y, self.cv2(y)], 1)
681
+
682
+
683
+ class GhostBottleneck(nn.Module):
684
+ # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
685
+ def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
686
+ super().__init__()
687
+ c_ = c2 // 2
688
+ self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
689
+ DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
690
+ GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
691
+ self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
692
+ Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
693
+
694
+ def forward(self, x):
695
+ return self.conv(x) + self.shortcut(x)
696
+
697
+
698
+ class Contract(nn.Module):
699
+ # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
700
+ def __init__(self, gain=2):
701
+ super().__init__()
702
+ self.gain = gain
703
+
704
+ def forward(self, x):
705
+ b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
706
+ s = self.gain
707
+ x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)
708
+ x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
709
+ return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)
710
+
711
+
712
+ class Expand(nn.Module):
713
+ # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
714
+ def __init__(self, gain=2):
715
+ super().__init__()
716
+ self.gain = gain
717
+
718
+ def forward(self, x):
719
+ b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
720
+ s = self.gain
721
+ x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80)
722
+ x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
723
+ return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160)
724
+
725
+
726
+ class Concat(nn.Module):
727
+ # Concatenate a list of tensors along dimension
728
+ def __init__(self, dimension=1):
729
+ super().__init__()
730
+ self.d = dimension
731
+
732
+ def forward(self, x):
733
+ return torch.cat(x, self.d)
734
+
735
+
736
+ class AutoShape(nn.Module):
737
+ # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
738
+ conf = 0.25 # NMS confidence threshold
739
+ iou = 0.45 # NMS IoU threshold
740
+ classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
741
+ multi_label = False # NMS multiple labels per box
742
+ max_det = 1000 # maximum number of detections per image
743
+
744
+ def __init__(self, model):
745
+ super().__init__()
746
+ self.model = model.eval()
747
+
748
+ def autoshape(self):
749
+ LOGGER.info('AutoShape already enabled, skipping... ') # model already converted to model.autoshape()
750
+ return self
751
+
752
+ def _apply(self, fn):
753
+ # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
754
+ self = super()._apply(fn)
755
+ m = self.model.model[-1] # Detect()
756
+ m.stride = fn(m.stride)
757
+ m.grid = list(map(fn, m.grid))
758
+ if isinstance(m.anchor_grid, list):
759
+ m.anchor_grid = list(map(fn, m.anchor_grid))
760
+ return self
761
+
762
+ @torch.no_grad()
763
+ def forward(self, imgs, size=640, augment=False, profile=False):
764
+ # Inference from various sources. For height=640, width=1280, RGB images example inputs are:
765
+ # file: imgs = 'data/images/zidane.jpg' # str or PosixPath
766
+ # URI: = 'https://ultralytics.com/images/zidane.jpg'
767
+ # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
768
+ # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
769
+ # numpy: = np.zeros((640,1280,3)) # HWC
770
+ # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
771
+ # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
772
+
773
+ t = [time_sync()]
774
+ p = next(self.model.parameters()) # for device and type
775
+ if isinstance(imgs, torch.Tensor): # torch
776
+ with amp.autocast(enabled=p.device.type != 'cpu'):
777
+ return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
778
+
779
+ # Pre-process
780
+ n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
781
+ shape0, shape1, files = [], [], [] # image and inference shapes, filenames
782
+ for i, im in enumerate(imgs):
783
+ f = f'image{i}' # filename
784
+ if isinstance(im, (str, Path)): # filename or uri
785
+ im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
786
+ im = np.asarray(exif_transpose(im))
787
+ elif isinstance(im, Image.Image): # PIL Image
788
+ im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f
789
+ files.append(Path(f).with_suffix('.jpg').name)
790
+ if im.shape[0] < 5: # image in CHW
791
+ im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
792
+ im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input
793
+ s = im.shape[:2] # HWC
794
+ shape0.append(s) # image shape
795
+ g = (size / max(s)) # gain
796
+ shape1.append([y * g for y in s])
797
+ imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
798
+ shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
799
+ x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
800
+ x = np.stack(x, 0) if n > 1 else x[0][None] # stack
801
+ x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
802
+ x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
803
+ t.append(time_sync())
804
+
805
+ with amp.autocast(enabled=p.device.type != 'cpu'):
806
+ # Inference
807
+ y = self.model(x, augment, profile)[0] # forward
808
+ t.append(time_sync())
809
+
810
+ # Post-process
811
+ y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes,
812
+ multi_label=self.multi_label, max_det=self.max_det) # NMS
813
+ for i in range(n):
814
+ scale_coords(shape1, y[i][:, :4], shape0[i])
815
+
816
+ t.append(time_sync())
817
+ return Detections(imgs, y, files, t, self.names, x.shape)
818
+
819
+
820
+ class Detections:
821
+ # YOLOv5 detections class for inference results
822
+ def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
823
+ super().__init__()
824
+ d = pred[0].device # device
825
+ gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations
826
+ self.imgs = imgs # list of images as numpy arrays
827
+ self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
828
+ self.names = names # class names
829
+ self.files = files # image filenames
830
+ self.xyxy = pred # xyxy pixels
831
+ self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
832
+ self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
833
+ self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
834
+ self.n = len(self.pred) # number of images (batch size)
835
+ self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
836
+ self.s = shape # inference BCHW shape
837
+
838
+ def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
839
+ crops = []
840
+ for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
841
+ s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
842
+ if pred.shape[0]:
843
+ for c in pred[:, -1].unique():
844
+ n = (pred[:, -1] == c).sum() # detections per class
845
+ s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
846
+ if show or save or render or crop:
847
+ annotator = Annotator(im, example=str(self.names))
848
+ for *box, conf, cls in reversed(pred): # xyxy, confidence, class
849
+ label = f'{self.names[int(cls)]} {conf:.2f}'
850
+ if crop:
851
+ file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None
852
+ crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label,
853
+ 'im': save_one_box(box, im, file=file, save=save)})
854
+ else: # all others
855
+ annotator.box_label(box, label, color=colors(cls))
856
+ im = annotator.im
857
+ else:
858
+ s += '(no detections)'
859
+
860
+ im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
861
+ if pprint:
862
+ LOGGER.info(s.rstrip(', '))
863
+ if show:
864
+ im.show(self.files[i]) # show
865
+ if save:
866
+ f = self.files[i]
867
+ im.save(save_dir / f) # save
868
+ if i == self.n - 1:
869
+ LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
870
+ if render:
871
+ self.imgs[i] = np.asarray(im)
872
+ if crop:
873
+ if save:
874
+ LOGGER.info(f'Saved results to {save_dir}\n')
875
+ return crops
876
+
877
+ def print(self):
878
+ self.display(pprint=True) # print results
879
+ LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' %
880
+ self.t)
881
+
882
+ def show(self):
883
+ self.display(show=True) # show results
884
+
885
+ def save(self, save_dir='runs/detect/exp'):
886
+ save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir
887
+ self.display(save=True, save_dir=save_dir) # save results
888
+
889
+ def crop(self, save=True, save_dir='runs/detect/exp'):
890
+ save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None
891
+ return self.display(crop=True, save=save, save_dir=save_dir) # crop results
892
+
893
+ def render(self):
894
+ self.display(render=True) # render results
895
+ return self.imgs
896
+
897
+ def pandas(self):
898
+ # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
899
+ new = copy(self) # return copy
900
+ ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
901
+ cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
902
+ for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
903
+ a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
904
+ setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
905
+ return new
906
+
907
+ def tolist(self):
908
+ # return a list of Detections objects, i.e. 'for result in results.tolist():'
909
+ x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]
910
+ for d in x:
911
+ for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
912
+ setattr(d, k, getattr(d, k)[0]) # pop out of list
913
+ return x
914
+
915
+ def __len__(self):
916
+ return self.n
917
+
918
+
919
+ class Classify(nn.Module):
920
+ # Classification head, i.e. x(b,c1,20,20) to x(b,c2)
921
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
922
+ super().__init__()
923
+ self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
924
+ self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
925
+ self.flat = nn.Flatten()
926
+
927
+ def forward(self, x):
928
+ z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
929
+ return self.flat(self.conv(z)) # flatten to x(b,c2)
datasets.py ADDED
@@ -0,0 +1,1841 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ Dataloaders and dataset utils
4
+ """
5
+
6
+ import glob, sys
7
+ import hashlib
8
+ import json
9
+ import logging
10
+ import os
11
+ from posixpath import basename
12
+ import random
13
+ import shutil
14
+ import time
15
+ from itertools import repeat
16
+ from multiprocessing.pool import Pool, ThreadPool
17
+ from pathlib import Path
18
+ from threading import Thread
19
+ from unittest.mock import patch
20
+ from zipfile import ZipFile
21
+
22
+ import cv2
23
+ import numpy as np
24
+ import torch
25
+ import torch.nn.functional as F
26
+ import yaml
27
+ from PIL import ExifTags, Image, ImageOps
28
+ from torch.utils.data import Dataset
29
+ from tqdm import tqdm
30
+
31
+ from augmentations import Albumentations, AlbumentationsTemporal, augment_hsv, augment_hsv_temporal, copy_paste, letterbox, letterbox_temporal, mixup, mixup_temporal, random_perspective, random_perspective_temporal, mixup_drones
32
+ from general import (LOGGER, check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, xyn2xy,
33
+ xywh2xyxy, xywhn2xyxy, xyxy2xywhn)
34
+ from plots import Annotator, plot_images_temporal
35
+ from torch_utils import torch_distributed_zero_first
36
+ from general import colorstr
37
+
38
+ # Parameters
39
+ HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
40
+ IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
41
+ VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
42
+ NUM_THREADS = min(4, os.cpu_count()) # number of multiprocessing threads
43
+
44
+ # Get orientation exif tag
45
+ for orientation in ExifTags.TAGS.keys():
46
+ if ExifTags.TAGS[orientation] == 'Orientation':
47
+ break
48
+
49
+
50
+ def get_hash(paths):
51
+ # Returns a single hash value of a list of paths (files or dirs)
52
+ size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
53
+ h = hashlib.md5(str(size).encode()) # hash sizes
54
+ h.update(''.join(paths).encode()) # hash paths
55
+ return h.hexdigest() # return hash
56
+
57
+
58
+ def exif_size(img):
59
+ # Returns exif-corrected PIL size
60
+ s = img.size # (width, height)
61
+ try:
62
+ rotation = dict(img._getexif().items())[orientation]
63
+ if rotation == 6: # rotation 270
64
+ s = (s[1], s[0])
65
+ elif rotation == 8: # rotation 90
66
+ s = (s[1], s[0])
67
+ except:
68
+ pass
69
+
70
+ return s
71
+
72
+
73
+ def exif_transpose(image):
74
+ """
75
+ Transpose a PIL image accordingly if it has an EXIF Orientation tag.
76
+ Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
77
+
78
+ :param image: The image to transpose.
79
+ :return: An image.
80
+ """
81
+ exif = image.getexif()
82
+ orientation = exif.get(0x0112, 1) # default 1
83
+ if orientation > 1:
84
+ method = {2: Image.FLIP_LEFT_RIGHT,
85
+ 3: Image.ROTATE_180,
86
+ 4: Image.FLIP_TOP_BOTTOM,
87
+ 5: Image.TRANSPOSE,
88
+ 6: Image.ROTATE_270,
89
+ 7: Image.TRANSVERSE,
90
+ 8: Image.ROTATE_90,
91
+ }.get(orientation)
92
+ if method is not None:
93
+ image = image.transpose(method)
94
+ del exif[0x0112]
95
+ image.info["exif"] = exif.tobytes()
96
+ return image
97
+
98
+ def seed_worker(worker_id):
99
+ # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader
100
+ logging.info(f"{colorstr('train: ')} printing from the worker id {worker_id}")
101
+ worker_seed = torch.initial_seed() % 2 ** 32
102
+ np.random.seed(worker_seed)
103
+ random.seed(worker_seed)
104
+
105
+ def create_dataloader(path, annotation_path, video_root_path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
106
+ rect=False, rank=-1, workers=3, image_weights=False, quad=False, prefix='', is_training=True, num_frames=5, makestreamloader=False):
107
+ # Make sure only the first process in DDP process the dataset first, and the following others can use the cache
108
+ with torch_distributed_zero_first(rank):
109
+ #LoadImagesAndLabels
110
+ if makestreamloader:
111
+ dataset = LoadClipsStream(path, annotation_path, video_root_path, imgsz, batch_size,
112
+ augment=augment, # augment images
113
+ hyp=hyp, # augmentation hyperparameters
114
+ rect=rect, # rectangular training
115
+ cache_images=cache,
116
+ single_cls=single_cls,
117
+ stride=int(stride),
118
+ pad=pad,
119
+ image_weights=image_weights,
120
+ prefix=prefix,
121
+ is_training=is_training,
122
+ num_frames=num_frames
123
+ )
124
+ else:
125
+ dataset = LoadClipsAndLabels(path, annotation_path, video_root_path, imgsz, batch_size,
126
+ augment=augment, # augment images
127
+ hyp=hyp, # augmentation hyperparameters
128
+ rect=rect, # rectangular training
129
+ cache_images=cache,
130
+ single_cls=single_cls,
131
+ stride=int(stride),
132
+ pad=pad,
133
+ image_weights=image_weights,
134
+ prefix=prefix,
135
+ is_training=is_training,
136
+ num_frames=num_frames
137
+ )
138
+ shuffle = is_training
139
+ shuffle = False if rect else shuffle
140
+ batch_size = min(batch_size, len(dataset))
141
+ nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers
142
+ sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=shuffle) if rank != -1 else None #torch.utils.data.RandomSampler(dataset)
143
+ loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
144
+ # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
145
+ #print("sampler", sampler)
146
+ shuffle = shuffle and sampler is None
147
+ generator = torch.Generator()
148
+ generator.manual_seed(0)
149
+ print(f"data loader shuffle {shuffle}") if rank in [0, -1] else None
150
+ collate_fn = LoadClipsStream.collate_fn if makestreamloader else LoadClipsAndLabels.collate_fn
151
+ dataloader = loader(dataset,
152
+ batch_size=batch_size,
153
+ num_workers=nw,
154
+ sampler=sampler,
155
+ pin_memory=True,
156
+ drop_last=False,
157
+ collate_fn=LoadClipsAndLabels.collate_fn4 if quad else collate_fn,
158
+ generator=generator,
159
+ shuffle = shuffle,
160
+ worker_init_fn=seed_worker
161
+ #**kwargs
162
+ )
163
+ return dataloader, dataset
164
+
165
+
166
+ class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
167
+ """ Dataloader that reuses workers
168
+
169
+ Uses same syntax as vanilla DataLoader
170
+ """
171
+
172
+ def __init__(self, *args, **kwargs):
173
+ super().__init__(*args, **kwargs)
174
+ object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
175
+ self.iterator = super().__iter__()
176
+
177
+ def __len__(self):
178
+ return len(self.batch_sampler.sampler)
179
+
180
+ def __iter__(self):
181
+ for i in range(len(self)):
182
+ yield next(self.iterator)
183
+
184
+
185
+ class _RepeatSampler:
186
+ """ Sampler that repeats forever
187
+
188
+ Args:
189
+ sampler (Sampler)
190
+ """
191
+
192
+ def __init__(self, sampler):
193
+ self.sampler = sampler
194
+
195
+ def __iter__(self):
196
+ while True:
197
+ yield from iter(self.sampler)
198
+
199
+
200
+ class LoadImages:
201
+ # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
202
+ def __init__(self, path, img_size=640, stride=32, auto=True):
203
+ p = str(Path(path).resolve()) # os-agnostic absolute path
204
+ if '*' in p:
205
+ files = sorted(glob.glob(p, recursive=True)) # glob
206
+ elif os.path.isdir(p):
207
+ files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
208
+ elif os.path.isfile(p):
209
+ files = [p] # files
210
+ else:
211
+ raise Exception(f'ERROR: {p} does not exist')
212
+
213
+ images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
214
+ videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
215
+ ni, nv = len(images), len(videos)
216
+
217
+ self.img_size = img_size
218
+ self.stride = stride
219
+ self.files = images + videos
220
+ self.nf = ni + nv # number of files
221
+ self.video_flag = [False] * ni + [True] * nv
222
+ self.mode = 'image'
223
+ self.auto = auto
224
+ if any(videos):
225
+ self.new_video(videos[0]) # new video
226
+ else:
227
+ self.cap = None
228
+ assert self.nf > 0, f'No images or videos found in {p}. ' \
229
+ f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
230
+
231
+ def __iter__(self):
232
+ self.count = 0
233
+ return self
234
+
235
+ def __next__(self):
236
+ if self.count == self.nf:
237
+ raise StopIteration
238
+ path = self.files[self.count]
239
+
240
+ if self.video_flag[self.count]:
241
+ # Read video
242
+ self.mode = 'video'
243
+ ret_val, img0 = self.cap.read()
244
+ if not ret_val:
245
+ self.count += 1
246
+ self.cap.release()
247
+ if self.count == self.nf: # last video
248
+ raise StopIteration
249
+ else:
250
+ path = self.files[self.count]
251
+ self.new_video(path)
252
+ ret_val, img0 = self.cap.read()
253
+
254
+ self.frame += 1
255
+ s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
256
+
257
+ else:
258
+ # Read image
259
+ self.count += 1
260
+ img0 = cv2.imread(path) # BGR
261
+ assert img0 is not None, f'Image Not Found {path}'
262
+ s = f'image {self.count}/{self.nf} {path}: '
263
+
264
+ # Padded resize
265
+ img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
266
+
267
+ # Convert
268
+ img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
269
+ img = np.ascontiguousarray(img)
270
+
271
+ return path, img, img0, self.cap, s
272
+
273
+ def new_video(self, path):
274
+ self.frame = 0
275
+ self.cap = cv2.VideoCapture(path)
276
+ self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
277
+
278
+ def __len__(self):
279
+ return self.nf # number of files
280
+
281
+
282
+ class LoadWebcam: # for inference
283
+ # YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0`
284
+ def __init__(self, pipe='0', img_size=640, stride=32):
285
+ self.img_size = img_size
286
+ self.stride = stride
287
+ self.pipe = eval(pipe) if pipe.isnumeric() else pipe
288
+ self.cap = cv2.VideoCapture(self.pipe) # video capture object
289
+ self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
290
+
291
+ def __iter__(self):
292
+ self.count = -1
293
+ return self
294
+
295
+ def __next__(self):
296
+ self.count += 1
297
+ if cv2.waitKey(1) == ord('q'): # q to quit
298
+ self.cap.release()
299
+ cv2.destroyAllWindows()
300
+ raise StopIteration
301
+
302
+ # Read frame
303
+ ret_val, img0 = self.cap.read()
304
+ img0 = cv2.flip(img0, 1) # flip left-right
305
+
306
+ # Print
307
+ assert ret_val, f'Camera Error {self.pipe}'
308
+ img_path = 'webcam.jpg'
309
+ s = f'webcam {self.count}: '
310
+
311
+ # Padded resize
312
+ img = letterbox(img0, self.img_size, stride=self.stride)[0]
313
+
314
+ # Convert
315
+ img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
316
+ img = np.ascontiguousarray(img)
317
+
318
+ return img_path, img, img0, None, s
319
+
320
+ def __len__(self):
321
+ return 0
322
+
323
+
324
+ class LoadStreams:
325
+ # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
326
+ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
327
+ self.mode = 'stream'
328
+ self.img_size = img_size
329
+ self.stride = stride
330
+
331
+ if os.path.isfile(sources):
332
+ with open(sources) as f:
333
+ sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
334
+ else:
335
+ sources = [sources]
336
+
337
+ n = len(sources)
338
+ self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
339
+ self.sources = [clean_str(x) for x in sources] # clean source names for later
340
+ self.auto = auto
341
+ for i, s in enumerate(sources): # index, source
342
+ # Start thread to read frames from video stream
343
+ st = f'{i + 1}/{n}: {s}... '
344
+ if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
345
+ check_requirements(('pafy', 'youtube_dl'))
346
+ import pafy
347
+ s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
348
+ s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
349
+ cap = cv2.VideoCapture(s)
350
+ assert cap.isOpened(), f'{st}Failed to open {s}'
351
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
352
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
353
+ self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
354
+ self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
355
+
356
+ _, self.imgs[i] = cap.read() # guarantee first frame
357
+ self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
358
+ LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
359
+ self.threads[i].start()
360
+ LOGGER.info('') # newline
361
+
362
+ # check for common shapes
363
+ s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
364
+ self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
365
+ if not self.rect:
366
+ LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')
367
+
368
+ def update(self, i, cap, stream):
369
+ # Read stream `i` frames in daemon thread
370
+ n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
371
+ while cap.isOpened() and n < f:
372
+ n += 1
373
+ # _, self.imgs[index] = cap.read()
374
+ cap.grab()
375
+ if n % read == 0:
376
+ success, im = cap.retrieve()
377
+ if success:
378
+ self.imgs[i] = im
379
+ else:
380
+ LOGGER.warn('WARNING: Video stream unresponsive, please check your IP camera connection.')
381
+ self.imgs[i] *= 0
382
+ cap.open(stream) # re-open stream if signal was lost
383
+ time.sleep(1 / self.fps[i]) # wait time
384
+
385
+ def __iter__(self):
386
+ self.count = -1
387
+ return self
388
+
389
+ def __next__(self):
390
+ self.count += 1
391
+ if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
392
+ cv2.destroyAllWindows()
393
+ raise StopIteration
394
+
395
+ # Letterbox
396
+ img0 = self.imgs.copy()
397
+ img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]
398
+
399
+ # Stack
400
+ img = np.stack(img, 0)
401
+
402
+ # Convert
403
+ img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
404
+ img = np.ascontiguousarray(img)
405
+
406
+ return self.sources, img, img0, None, ''
407
+
408
+ def __len__(self):
409
+ return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
410
+
411
+
412
+ def img2label_paths(img_paths):
413
+ # Define label paths as a function of image paths
414
+ sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
415
+ return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
416
+
417
+ # def img2label_paths(img_paths, annotation_dir):
418
+ # #return [os.path.join(annotation_dir, os.path.basename(img_path).replace(".jpg", ".txt")) for img_path in img_paths ]
419
+ # get_clip_id = lambda x: os.path.basename(x).split(".")[0].split("_")[1]
420
+ # get_frame_id = lambda x: str(int(os.path.basename(x).replace("frame", "").split(".")[0].split("_")[-1]) - 1).zfill(5) if annotation_dir.find("NPS") > -1 else str(int(os.path.basename(x).replace("frame", "").split(".")[0].split("_")[-1])).zfill(5)
421
+
422
+ # meta_paths =[ [str(Path(x).parent.parent), get_clip_id(x), get_frame_id(x)] for x in img_paths]
423
+ # #print(meta_paths[0])
424
+ # return [ os.path.join(annotation_dir, f"Clip_{clip_id}_{frame_id}.txt") for (directory, clip_id, frame_id) in meta_paths]
425
+ def img2label_paths(img_paths, annotation_dir):
426
+ label_paths = []
427
+ for img_path in img_paths:
428
+ # Extract video ID from path: .../002/_002_00000.jpg -> 002
429
+ video_id = Path(img_path).parent.name
430
+ # Extract frame ID: _002_00000.jpg -> 00000
431
+ frame_id = Path(img_path).stem.split('_')[-1]
432
+ # Construct label path
433
+ label_path = os.path.join(annotation_dir, video_id, f"_{video_id}_{frame_id}.txt")
434
+ label_paths.append(label_path)
435
+ return label_paths
436
+
437
+ import pickle
438
+ def get_video_length(video_root_path, split='train'):
439
+ video_id_length = {}
440
+ cache_file = os.path.join(video_root_path, f"video_length_dict_{split}.pkl")
441
+
442
+ if os.path.exists(cache_file):
443
+ video_id_length = pickle.load(open(cache_file, "rb"))
444
+ else:
445
+ # Look in the correct split directory
446
+ videos_path = glob.glob(f"{video_root_path}/*")
447
+ assert len(videos_path) > 0, f"{video_root_path}/{split} found empty videos"
448
+
449
+ for video_path in videos_path:
450
+ video_id = os.path.basename(video_path).split(".")[0] # 001.mp4 -> 001
451
+ cap = cv2.VideoCapture(video_path)
452
+ n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
453
+ video_id_length[video_id] = n_frames
454
+ cap.release()
455
+
456
+ # Save cache with split suffix
457
+ pickle.dump(video_id_length, open(cache_file, "wb"))
458
+
459
+ return video_id_length
460
+
461
+ class LoadClipsStream(Dataset):
462
+ # YOLOv5 train_loader/val_loader, loads clips and labels for training and validation
463
+ cache_version = 0.6 # dataset labels *.cache version
464
+
465
+ def __init__(self, path, annotation_path, video_root_path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
466
+ cache_images=False, single_cls=False, stride=32, pad=0.0, prefix='', is_training=True, num_frames=5):
467
+ self.img_size = img_size
468
+ self.augment = augment
469
+ self.hyp = hyp
470
+ self.image_weights = image_weights
471
+ self.rect = False if image_weights else rect
472
+ self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
473
+ self.mosaic_border = [-img_size // 2, -img_size // 2]
474
+ self.stride = stride
475
+ self.pad = pad
476
+ #self.path = path
477
+ self.is_training = is_training
478
+ self.frame_wise_aug = False
479
+ if self.hyp:
480
+ self.frame_wise_aug = int(self.hyp["frame_wise"]) if "frame_wise" in self.hyp else 0 == 1
481
+ print(f"Frame wise augmentation set to {self.frame_wise_aug}")
482
+ self.video_root_path = video_root_path
483
+ #self.video_length_dict = get_video_length(self.video_root_path)
484
+ self.num_frames = num_frames #int(self.hyp['num_frames'])
485
+ self.skip_frames = int(self.hyp["skip_rate"]) if self.hyp else self.num_frames - 1
486
+ self.albumentations = None
487
+ self.video_cap = None
488
+ if augment:
489
+ self.albumentations = AlbumentationsTemporal(self.num_frames) if not self.frame_wise_aug else Albumentations()
490
+
491
+ def __len__(self):
492
+ #return sys.maxsize
493
+ #comment this & uncomment sys.maxsize
494
+ video_path = "Videos/Clip_50.mov"
495
+ cap = cv2.VideoCapture(video_path)
496
+ n = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
497
+ cap.release()
498
+ return n
499
+
500
+ def sample_temporal_frames_from_stream(self, num_of_frames):
501
+ #make streamable interface here, get num_of_frames
502
+ video_path = "Videos/Clip_50.mov"
503
+ self.video_cap = cv2.VideoCapture(video_path) if self.video_cap is None else self.video_cap
504
+ if not self.video_cap.isOpened():
505
+ print(f"Error: Unable to open video file: {video_path}")
506
+ return
507
+ imgs, orig_shapes, resized_shapes = [], [], []
508
+ for i in range(num_of_frames):
509
+ ret, im = self.video_cap.read()
510
+ h0, w0 = im.shape[:2] # orig hw
511
+ r = self.img_size / max(h0, w0) # ratio
512
+ if r != 1: # if sizes are not equal
513
+ im = cv2.resize(im, (int(w0 * r), int(h0 * r)),
514
+ interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
515
+ imgs.append(im)
516
+ orig_shapes.append((h0, w0))
517
+ resized_shapes.append(im.shape[:2][::-1])#wh
518
+ resized_shapes = np.array(resized_shapes, dtype=int).reshape(-1, 2)
519
+ return imgs, orig_shapes, resized_shapes
520
+
521
+ def __del__(self):
522
+ if self.video_cap is not None:
523
+ self.video_cap.release()
524
+
525
+ def __getitem__(self, index):
526
+ #print(index, self.indices[index])
527
+
528
+ hyp = self.hyp
529
+ mosaic = self.mosaic and random.random() < hyp['mosaic']
530
+
531
+ #do temporal sampling
532
+ imgs, orig_shapes, shapes = self.sample_temporal_frames_from_stream(self.num_frames)
533
+ (h0, w0) = orig_shapes[0]
534
+ w, h = shapes[0]
535
+ # # Letterbox
536
+ if self.rect:
537
+ # Sort by aspect ratio
538
+ s = shapes # wh
539
+ ar = s[:, 1] / s[:, 0] # aspect ratio
540
+ irect = ar.argsort()
541
+ imgs = [imgs[i] for i in irect]
542
+ shapes = s[irect] # wh
543
+
544
+ ar = ar[irect]
545
+
546
+ # Set training image shapes
547
+ shapes = [[1, 1]] * 1
548
+ for i in range(1):
549
+ ari = ar[0 == i]
550
+ mini, maxi = ari.min(), ari.max()
551
+ if maxi < 1:
552
+ shapes[i] = [maxi, 1]
553
+ elif mini > 1:
554
+ shapes[i] = [1, 1 / mini]
555
+
556
+ batch_shapes = np.ceil(np.array(shapes) * self.img_size / self.stride + self.pad).astype(int) * self.stride
557
+ shape = batch_shapes[0] if self.rect else self.img_size # final letterboxed shape
558
+ imgs, ratio, pad = letterbox_temporal(imgs, shape, auto=False, scaleup=self.augment)
559
+ shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
560
+
561
+
562
+ img = np.stack(imgs, 0) # t x h X w X C
563
+ # labels = temporal_labels
564
+ if self.augment:
565
+ img, labels = random_perspective_temporal(img, labels,
566
+ degrees=hyp['degrees'],
567
+ translate=hyp['translate'],
568
+ scale=hyp['scale'],
569
+ shear=hyp['shear'],
570
+ perspective=hyp['perspective'], frame_wise_aug=self.frame_wise_aug)
571
+
572
+ #plot_images_temporal(img, [labels], fname=os.path.basename(temporal_frames_path[0]), n_batch=1, LOGGER=LOGGER)
573
+
574
+
575
+ t = len(imgs)
576
+ # Convert
577
+ img = [np.ascontiguousarray(img[ti].transpose((2, 0, 1))[::-1]) for ti in range(t)] # HWC to CHW, BGR to RGB
578
+ img = np.stack(img, axis=0)
579
+
580
+ return torch.from_numpy(img), shapes
581
+
582
+ @staticmethod
583
+ def collate_fn(batch):
584
+ img, shapes = zip(*batch) # transposed label - > B [ n X T X 6 ]
585
+
586
+ T = img[0].shape[0]
587
+ new_shapes = []
588
+
589
+ for shape in shapes:
590
+ new_shapes += [shape for _ in range(T)]
591
+
592
+ shapes = tuple(new_shapes)
593
+
594
+ img = torch.stack(img, 0) #B X T X C X H X W -> B*TXCXHXW
595
+ B, T, C, H, W = img.shape
596
+
597
+ assert len(shapes) == B*T, print(f"in collate function collected shapes {len(shapes)} & images collected {B*T}")
598
+ # B [n_i x T X 6]
599
+ #print(label)
600
+ img = img.reshape(B*T, C, H, W)
601
+
602
+ return img, shapes
603
+
604
+ @staticmethod
605
+ def collate_fn4(batch):
606
+ print("shouldn't come here, this collate function is for quad training & haven't been rewritten for temporal")
607
+ pass
608
+
609
+
610
+ class LoadClipsAndLabels(Dataset):
611
+ # YOLOv5 train_loader/val_loader, loads clips and labels for training and validation
612
+ cache_version = 0.6 # dataset labels *.cache version
613
+
614
+ def __init__(self, path, annotation_path, video_root_path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
615
+ cache_images=False, single_cls=False, stride=32, pad=0.0, prefix='', is_training=True, num_frames=5):
616
+ self.img_size = img_size
617
+ self.augment = augment
618
+ self.hyp = hyp
619
+ self.image_weights = image_weights
620
+ self.rect = False if image_weights else rect
621
+ self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
622
+ self.mosaic_border = [-img_size // 2, -img_size // 2]
623
+ self.stride = stride
624
+ self.path = path
625
+ self.is_training = is_training
626
+ self.frame_wise_aug = False
627
+ if self.hyp:
628
+ self.frame_wise_aug = int(self.hyp["frame_wise"]) if "frame_wise" in self.hyp else 0 == 1
629
+ print(f"Frame wise augmentation set to {self.frame_wise_aug}")
630
+ self.video_root_path = video_root_path
631
+ self.video_length_dict = get_video_length(self.video_root_path)
632
+ self.num_frames = num_frames #int(self.hyp['num_frames'])
633
+ self.skip_frames = int(self.hyp["skip_rate"]) if self.hyp else self.num_frames - 1
634
+ self.albumentations = None
635
+ if augment:
636
+ self.albumentations = AlbumentationsTemporal(self.num_frames) if not self.frame_wise_aug else Albumentations()
637
+ self.annotation_path = annotation_path
638
+ #print(path, annotation_path)
639
+
640
+ # Check cache
641
+ cache_path = Path(annotation_path).with_suffix('.cache')
642
+ try:
643
+ cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
644
+ assert cache['version'] == self.cache_version # same version
645
+ #assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash
646
+ except:
647
+ exists = False
648
+ if not exists:
649
+ try:
650
+ f = [] # image files
651
+ for p in path if isinstance(path, list) else [path]:
652
+ p = Path(p) # os-agnostic
653
+ if p.is_dir(): # dir
654
+ f += glob.glob(str(p / '**' / '*.*'), recursive=True)
655
+ # f = list(p.rglob('*.*')) # pathlib
656
+ elif p.is_file(): # file
657
+ with open(p) as t:
658
+ t = t.read().strip().splitlines()
659
+ parent = str(p.parent) + os.sep
660
+ f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
661
+ # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
662
+ else:
663
+ raise Exception(f'{prefix}{p} does not exist')
664
+ #logging.info("reached here")
665
+ self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
666
+ # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
667
+ assert self.img_files, f'{prefix}No images found'
668
+ except Exception as e:
669
+ raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
670
+
671
+ self.label_files = img2label_paths(self.img_files, self.annotation_path) # labels
672
+
673
+ if not exists:
674
+ cache, exists = self.cache_labels(cache_path, prefix), False # cache
675
+ # Display cache
676
+ nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
677
+ if exists:
678
+ d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
679
+ tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
680
+ if cache['msgs']:
681
+ logging.info('\n'.join(cache['msgs'])) # display warnings
682
+ assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
683
+
684
+ # Read cache
685
+ [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
686
+
687
+ labels, instances, shapes, self.segments = zip(*cache.values())
688
+ self.labels = list(labels)
689
+ self.instances = list(instances)
690
+ self.shapes = np.array(shapes, dtype=np.float64)
691
+ self.img_files = list(cache.keys())
692
+ self.label_files = img2label_paths(cache.keys(), self.annotation_path) # update
693
+
694
+ n = len(shapes) # number of images
695
+ bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index
696
+ nb = bi[-1] + 1 # number of batches
697
+ self.batch = bi # batch index of image
698
+ self.n = n
699
+ self.indices = list(range(n))
700
+
701
+ # Update labels
702
+ include_class = [] # filter labels to include only these classes (optional)
703
+ include_class_array = np.array(include_class).reshape(1, -1)
704
+ for i, (label, segment, instance) in enumerate(zip(self.labels, self.segments, self.instances)):
705
+ if include_class:
706
+ j = (label[:, 0:1] == include_class_array).any(1)
707
+ self.labels[i] = label[j]
708
+ self.instances[i] = instance[j]
709
+ if segment:
710
+ self.segments[i] = segment[j]
711
+ if single_cls: # single-class training, merge all classes into 0
712
+ self.labels[i][:, 0] = 0
713
+ if segment:
714
+ self.segments[i][:, 0] = 0
715
+
716
+ # # Rectangular Training
717
+ assert len(self.img_files) == len(self.labels)
718
+ if self.rect:
719
+ # Sort by aspect ratio
720
+ s = self.shapes # wh
721
+ ar = s[:, 1] / s[:, 0] # aspect ratio
722
+ irect = ar.argsort()
723
+ self.img_files = [self.img_files[i] for i in irect]
724
+ self.labels = [self.labels[i] for i in irect]
725
+ self.instances = [self.instances[i] for i in irect]
726
+ self.label_files = [self.label_files[i] for i in irect]
727
+ self.shapes = s[irect] # wh
728
+
729
+ ar = ar[irect]
730
+
731
+ # Set training image shapes
732
+ shapes = [[1, 1]] * nb
733
+ for i in range(nb):
734
+ ari = ar[bi == i]
735
+ mini, maxi = ari.min(), ari.max()
736
+ if maxi < 1:
737
+ shapes[i] = [maxi, 1]
738
+ elif mini > 1:
739
+ shapes[i] = [1, 1 / mini]
740
+
741
+ self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride
742
+
743
+
744
+
745
+ self.img_file_to_indices_mapping = {str(image_path):index for index, image_path in enumerate(self.img_files) }
746
+ if self.is_training and not self.rect:
747
+ print("Shuffling indices because training")
748
+ random.shuffle(self.indices)
749
+
750
+ # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
751
+ self.imgs, self.img_npy = [None] * n, [None] * n
752
+ if cache_images:
753
+ if cache_images == 'disk':
754
+ self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
755
+ self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
756
+ self.im_cache_dir.mkdir(parents=True, exist_ok=True)
757
+ gb = 0 # Gigabytes of cached images
758
+ self.img_hw0, self.img_hw = [None] * n, [None] * n
759
+ results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
760
+ pbar = tqdm(enumerate(results), total=n)
761
+ for i, x in pbar:
762
+ if cache_images == 'disk':
763
+ if not self.img_npy[i].exists():
764
+ np.save(self.img_npy[i].as_posix(), x[0])
765
+ gb += self.img_npy[i].stat().st_size
766
+ else:
767
+ self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
768
+ gb += self.imgs[i].nbytes
769
+ pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'
770
+ pbar.close()
771
+
772
+
773
+ def cache_labels(self, path=Path('./labels.cache'), prefix='', ):
774
+ # Cache dataset labels, check images and read shapes
775
+ x = {} # dict
776
+ nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
777
+ desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels...in training mode ? {self.is_training}"
778
+ with Pool(NUM_THREADS) as pool:
779
+ pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
780
+ desc=desc, total=len(self.img_files))
781
+ for im_file, l, i, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
782
+ nm += nm_f
783
+ nf += nf_f
784
+ ne += ne_f
785
+ nc += nc_f
786
+ if im_file:
787
+ if self.is_training:
788
+ if nm_f == 0 and nf_f == 1 and ne_f == 0 and nc_f == 0:
789
+ x[im_file] = [l, i, shape, segments]
790
+ else:
791
+ #if nm_f == 0 and nf_f == 1 and ne_f == 0 and nc_f == 0:###Please remove later
792
+ x[im_file] = [l, i, shape, segments]
793
+ assert len(l) == len(i), print(f"Len of labels {len(l)} not matching with len of instances {len(i)} for image file {im_file}")
794
+ if msg:
795
+ msgs.append(msg)
796
+ pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
797
+
798
+ pbar.close()
799
+ if msgs:
800
+ logging.info('\n'.join(msgs))
801
+ if nf == 0:
802
+ logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
803
+ x['hash'] = get_hash(self.label_files + self.img_files)
804
+ x['results'] = nf, nm, ne, nc, len(self.img_files)
805
+ x['msgs'] = msgs # warnings
806
+ x['version'] = self.cache_version # cache version
807
+ try:
808
+ np.save(path, x) # save cache for next time
809
+ path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
810
+ logging.info(f'{prefix}New cache created: {path}')
811
+ except Exception as e:
812
+ logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable
813
+ return x
814
+
815
+ def __len__(self):
816
+ return len(self.img_files)
817
+
818
+ def get_video_length(self, index):
819
+ img_file = self.img_files[index]
820
+ video_id = Path(img_file).parent.name # Extract from parent directory
821
+ return int(self.video_length_dict[video_id])
822
+
823
+ def get_temporal_labels(self, temporal_indices):
824
+
825
+ instances = []
826
+ for tii in temporal_indices:
827
+ if tii > -1:
828
+ if self.instances[tii].shape[0] > 0: #if not null labels
829
+ instances += self.instances[tii].tolist()
830
+
831
+ instances = np.sort(np.unique(np.array(instances)))
832
+ instance_normalize_dict = {int(i):int(ii) for ii, i in enumerate(instances)} #map n instance number to local 0, 1, 2
833
+ #print(f"temporal labels {[self.labels[tindex] for tindex in temporal_indices if tindex > -1]}, instances {instances} paths {[self.label_files[tindex] for tindex in temporal_indices if tindex > -1]}")
834
+ #LOGGER.info(f"Temporal Label : {temporal_indices}, instanc dict {instance_normalize_dict}, {instances}")
835
+ n_instance = len(instance_normalize_dict)
836
+ labels = np.zeros((n_instance, self.num_frames, 5), dtype=np.float32)
837
+ if n_instance == 0:
838
+ return labels
839
+ for tii, tindex in enumerate(temporal_indices):
840
+ if tindex > -1:
841
+ if self.labels[tindex].shape[0] > 0:
842
+ assert self.labels[tindex].shape[0] == self.instances[tindex].shape[0], print(f"In label sampling, length of labels {self.labels[tindex].shape} not matching with instances {self.instances[tindex].shape}")
843
+ instances_id = np.array([instance_normalize_dict[int(instance)] for instance in self.instances[tindex]])
844
+ labels[instances_id, tii] = self.labels[tindex]
845
+
846
+ return labels
847
+
848
+ def sample_temporal_frames(self, index):
849
+ # n_frames = self.get_video_length(index)
850
+ # current_frame_id = int(float(os.path.basename(self.img_files[index]).split(".")[0].split("_")[-1]))
851
+ n_frames = self.get_video_length(index)
852
+ img_file = self.img_files[index]
853
+
854
+ # Extract video ID: .../002/_002_00000.jpg -> 002
855
+ video_id = Path(img_file).parent.name
856
+ # Extract current frame ID: _002_00000.jpg -> 00000
857
+ current_frame_id = int(Path(img_file).stem.split('_')[-1])
858
+
859
+ clip_id = int(os.path.basename(self.img_files[index]).split(".")[0].split("_")[1])
860
+ skip_frames = np.random.randint(self.skip_frames+1) if self.is_training else 0 #generate random skip rate
861
+ #skip_frames = self.skip_frames #if self.is_training else 0
862
+ max_sample_window = (skip_frames + 1)*(self.num_frames -1) + 1
863
+ sample_frame_ids = None
864
+ if current_frame_id >= max_sample_window+1:
865
+ #sample from current_frame_id-sample_window to current_frame_id
866
+ #Random Sampling
867
+ #range_to_pick_from = np.arange(current_frame_id - max_sample_window + 1, current_frame_id)
868
+ #sample_frame_ids = sorted(np.random.choice(range_to_pick_from, size=self.num_frames-1, replace=False).tolist()) + [current_frame_id]
869
+ #Fix Skip rate with uniform sampling
870
+ sample_frame_ids = [ i for i in range(current_frame_id - max_sample_window+1, current_frame_id+1, skip_frames + 1)]
871
+
872
+ else: #(n_frames - current_frame_id + 1) >= max_sample_window
873
+ #sample from current_frame_id to current_frame_id + sample_window
874
+ #range_to_pick_from = np.arange(current_frame_id + 1, current_frame_id+max_sample_window+1)
875
+ #sample_frame_ids = [current_frame_id] + sorted(np.random.choice(range_to_pick_from, size=self.num_frames-1, replace=False).tolist())
876
+ #Fix Skip rate with uniform sampling
877
+ sample_frame_ids = [ i for i in range(current_frame_id, current_frame_id+max_sample_window, skip_frames+1)]
878
+ # image_file_parent_path = Path(self.img_files[index]).parents[0]
879
+ # sample_frame_paths = [str(Path.joinpath(image_file_parent_path, f"Clip_{clip_id}_{str(sample_frame_id).zfill(5)}.jpg")) for sample_frame_id in sample_frame_ids]
880
+ # #print(f"Sampled frame ids {sample_frame_ids}, principal frame id {current_frame_id}, clip id {clip_id}")
881
+ image_file_parent_path = Path(img_file).parent
882
+ sample_frame_paths = [
883
+ str(image_file_parent_path / f"_{video_id}_{str(frame_id).zfill(5)}.jpg")
884
+ for frame_id in sample_frame_ids
885
+ ]
886
+ sample_frame_ids = [self.img_file_to_indices_mapping[str(img_file_path)] if str(img_file_path) in self.img_file_to_indices_mapping else -1 for img_file_path in sample_frame_paths ]
887
+ assert self.img_files[index] in sample_frame_paths, print(f"Temporal Sampling :Principal key frame missing current_frame_path {self.img_files[index]}, sample_frame_paths {sample_frame_paths}, total frames {n_frames}")
888
+
889
+ return sample_frame_paths, sample_frame_ids
890
+
891
+ def __getitem__(self, index):
892
+ #print(index, self.indices[index])
893
+ index = self.indices[index] # linear, shuffled, or image_weights
894
+ temporal_frames_path = None
895
+
896
+ hyp = self.hyp
897
+ mosaic = self.mosaic and random.random() < hyp['mosaic']
898
+ if mosaic:
899
+ # Load mosaic
900
+ img, labels, temporal_frames_path = load_mosaic_temporal(self, index, if_return_frame_paths=True, do_plot=False) #image is txhxwxc, labels are nxtx5
901
+ shapes = None
902
+
903
+ # MixUp augmentation
904
+ if random.random() < hyp['mixup']:
905
+ img, labels = mixup_temporal(img, labels, *load_mosaic_temporal(self, random.randint(0, self.n - 1), if_return_frame_paths=False, do_plot=False), self.frame_wise_aug)
906
+ #img, labels = mixup_drones(img, labels, *load_mosaic_temporal(self, random.randint(0, self.n - 1), if_return_frame_paths=False, do_plot=False))
907
+ if random.random() < hyp['double_mixup']:
908
+ if random.random() < hyp['mixup']:
909
+ img, labels = mixup_drones(img, labels, *load_mosaic_temporal(self, random.randint(0, self.n - 1), if_return_frame_paths=False, do_plot=False))
910
+ assert temporal_frames_path != None, print(f"Temporal Frame paths are none with mosaic {temporal_frames_path}, index {index}")
911
+ else:
912
+
913
+ #do temporal sampling
914
+ temporal_frames_path, temporal_indices = self.sample_temporal_frames(index)
915
+ assert temporal_frames_path != None, print(f"Temporal Frame paths are none without mosaic {temporal_frames_path}, index {index}")
916
+ # Load image
917
+ imgs = []
918
+
919
+ for frame_path in temporal_frames_path:
920
+ img, (h0, w0), (h, w) = load_image_by_path(self, frame_path)
921
+ imgs.append(img)
922
+
923
+ # # Letterbox
924
+ shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
925
+ imgs, ratio, pad = letterbox_temporal(imgs, shape, auto=False, scaleup=self.augment)
926
+ shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
927
+
928
+ temporal_labels = self.get_temporal_labels(temporal_indices) # n X T x 5
929
+
930
+ n_ins, t, enddim = temporal_labels.shape
931
+ # labels = self.labels[index].copy()
932
+ if temporal_labels.size: # normalized xywh to pixel xyxy format
933
+ temporal_labels = temporal_labels.reshape(n_ins*t, enddim)
934
+ temporal_labels[ :, 1:] = xywhn2xyxy(temporal_labels[ :, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
935
+ temporal_labels = temporal_labels.reshape(n_ins, t, enddim)
936
+ img = np.stack(imgs, 0) # t x h X w X C
937
+ labels = temporal_labels
938
+ if self.augment:
939
+ img, labels = random_perspective_temporal(img, labels,
940
+ degrees=hyp['degrees'],
941
+ translate=hyp['translate'],
942
+ scale=hyp['scale'],
943
+ shear=hyp['shear'],
944
+ perspective=hyp['perspective'], frame_wise_aug=self.frame_wise_aug)
945
+
946
+ #plot_images_temporal(img, [labels], fname=os.path.basename(temporal_frames_path[0]), n_batch=1, LOGGER=LOGGER)
947
+
948
+
949
+ n_ins, t, enddim = labels.shape
950
+
951
+ nl = len(labels) # number of labels
952
+ if nl:
953
+ labels = labels.reshape(n_ins*t, enddim)
954
+ labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[2], h=img.shape[1], clip=True, eps=1E-3)
955
+ labels = labels.reshape(n_ins, t, enddim)
956
+ if self.augment:
957
+ # Albumentations
958
+ labels = labels.reshape(n_ins*t, enddim)
959
+ if self.frame_wise_aug:
960
+ for ti in range(t):
961
+ img[ti], labels = self.albumentations(img[ti], labels)
962
+ else:
963
+ img, labels = self.albumentations(img, labels)
964
+ labels = labels.reshape(-1, t, enddim)
965
+ nl = len(labels) # update after albumentations
966
+
967
+ # HSV color-space
968
+ augment_hsv_temporal(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'], frame_wise_aug=self.frame_wise_aug)
969
+
970
+ # Flip up-down
971
+ if random.random() < hyp['flipud']:
972
+ for ti in range(t):
973
+ img[ti] = np.flipud(img[ti])
974
+
975
+ if nl:
976
+ for ti in range(t):
977
+ labels[:, ti, 2] = 1 - labels[:, ti, 2]
978
+
979
+ # Flip left-right
980
+ if random.random() < hyp['fliplr']:
981
+
982
+ for ti in range(t):
983
+ img[ti] = np.fliplr(img[ti])
984
+ if nl:
985
+ for ti in range(t):
986
+ labels[:, ti, 1] = 1 - labels[:, ti, 1]
987
+
988
+
989
+ # Cutouts
990
+ # labels = cutout(img, labels, p=0.5)
991
+
992
+ labels_out = torch.zeros((nl, t, 6))
993
+ if nl:
994
+ labels_out[:, :, 1:] = torch.from_numpy(labels)
995
+
996
+ # Convert
997
+ img = [np.ascontiguousarray(img[ti].transpose((2, 0, 1))[::-1]) for ti in range(t)] # HWC to CHW, BGR to RGB
998
+ img = np.stack(img, axis=0)
999
+
1000
+ main_frame_path = os.path.basename(self.img_files[index])
1001
+ main_frameid_id = -1
1002
+ for tii, tfp in enumerate(temporal_frames_path):
1003
+ if os.path.basename(tfp) == main_frame_path:
1004
+ main_frameid_id = tii
1005
+ break
1006
+ assert main_frameid_id > -1, print(f"In data loader, couldn't find main image path {main_frame_path}, temporal paths {temporal_frames_path} ")
1007
+ label_paths = [self.label_files[self.img_file_to_indices_mapping[tfp]] if tfp in self.img_file_to_indices_mapping else 0 for tfp in temporal_frames_path ]
1008
+ return torch.from_numpy(img), labels_out, temporal_frames_path, shapes, main_frameid_id, label_paths
1009
+
1010
+ @staticmethod
1011
+ def collate_fn(batch):
1012
+ img, label, path, shapes, main_frameid_ids, label_paths = zip(*batch) # transposed label - > B [ n X T X 6 ]
1013
+
1014
+ main_frame_ids = [] #note the principal frameid relative in a batch around which temporal sample is generated
1015
+ for i, l in enumerate(label):
1016
+ t = l.shape[1]
1017
+ for ti in range(t):
1018
+ main_frame_ids.append((i*t) + ti) if main_frameid_ids[i] == ti else None
1019
+ l[:, ti, 0] = (i*t) + ti # add target image index for build_targets()
1020
+
1021
+ T = img[0].shape[0]
1022
+ new_paths, new_shapes = [], []
1023
+
1024
+ for shape in shapes:
1025
+ new_shapes += [shape for _ in range(T)]
1026
+
1027
+ for path_temporal in path:
1028
+ new_paths += [p_ for p_ in path_temporal]
1029
+
1030
+ path = tuple(new_paths)
1031
+ shapes = tuple(new_shapes)
1032
+
1033
+ img = torch.stack(img, 0) #B X T X C X H X W -> B*TXCXHXW
1034
+ B, T, C, H, W = img.shape
1035
+ assert len(main_frame_ids) == B, print(f"in collate funtion, len(main frame ids) {len(main_frame_ids)} must match outer batch size of {B}")
1036
+ assert len(shapes) == B*T, print(f"in collate function collected shapes {len(shapes)} & images collected {B*T}")
1037
+ assert len(path) == B*T, print(f"in collate function collected path {len(path)} & images collected {B*T}")
1038
+ assert len(label) == B, print(f"in collate function collected labels {len(label)} & images collected {B}")
1039
+ # B [n_i x T X 6]
1040
+ #print(label)
1041
+ img = img.reshape(B*T, C, H, W)
1042
+ label = torch.cat(label, 0)
1043
+ label = label.reshape(label.shape[0]*T, 6)
1044
+ # previous_len = label.shape[0]
1045
+ # label = [l for l in label if xywhn2xyxy(l[2:].reshape(-1, 4), img[int(l[0])].shape[-1], img[int(l[0])].shape[-2]).any()]
1046
+ # if len(label) != previous_len:
1047
+ # print("removed empty targets")
1048
+ # label = torch.cat(label, 0).reshape(-1, 6) if len(label) > 0 else torch.zeros((0, 6))
1049
+
1050
+ #print(label[:, 0], B*T)
1051
+
1052
+ new_label_paths = []
1053
+ for label_path_set in label_paths:
1054
+ new_label_paths += label_path_set
1055
+ return img, label, path, shapes, main_frame_ids, new_label_paths
1056
+ @staticmethod
1057
+ def collate_fn4(batch):
1058
+ print("shouldn't come here, this collate function is for quad training & haven't been rewritten for temporal")
1059
+ pass
1060
+
1061
+
1062
+ class LoadImagesAndLabels(Dataset):
1063
+ # YOLOv5 train_loader/val_loader, loads images and labels for training and validation
1064
+ cache_version = 0.6 # dataset labels *.cache version
1065
+
1066
+ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
1067
+ cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
1068
+ self.img_size = img_size
1069
+ self.augment = augment
1070
+ self.hyp = hyp
1071
+ self.image_weights = image_weights
1072
+ self.rect = False if image_weights else rect
1073
+ self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
1074
+ self.mosaic_border = [-img_size // 2, -img_size // 2]
1075
+ self.stride = stride
1076
+ self.path = path
1077
+ self.albumentations = Albumentations() if augment else None
1078
+
1079
+ try:
1080
+ f = [] # image files
1081
+ for p in path if isinstance(path, list) else [path]:
1082
+ p = Path(p) # os-agnostic
1083
+ if p.is_dir(): # dir
1084
+ f += glob.glob(str(p / '**' / '*.*'), recursive=True)
1085
+ # f = list(p.rglob('*.*')) # pathlib
1086
+ elif p.is_file(): # file
1087
+ with open(p) as t:
1088
+ t = t.read().strip().splitlines()
1089
+ parent = str(p.parent) + os.sep
1090
+ f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
1091
+ # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
1092
+ else:
1093
+ raise Exception(f'{prefix}{p} does not exist')
1094
+ self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
1095
+ # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
1096
+ assert self.img_files, f'{prefix}No images found'
1097
+ except Exception as e:
1098
+ raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
1099
+
1100
+ # Check cache
1101
+ self.label_files = img2label_paths(self.img_files) # labels
1102
+ cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
1103
+ try:
1104
+ cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
1105
+ assert cache['version'] == self.cache_version # same version
1106
+ assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash
1107
+ except:
1108
+ cache, exists = self.cache_labels(cache_path, prefix), False # cache
1109
+
1110
+ # Display cache
1111
+ nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
1112
+ if exists:
1113
+ d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
1114
+ tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
1115
+ if cache['msgs']:
1116
+ logging.info('\n'.join(cache['msgs'])) # display warnings
1117
+ assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
1118
+
1119
+ # Read cache
1120
+ [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
1121
+ labels, shapes, self.segments = zip(*cache.values())
1122
+ self.labels = list(labels)
1123
+ self.shapes = np.array(shapes, dtype=np.float64)
1124
+ self.img_files = list(cache.keys()) # update
1125
+ self.label_files = img2label_paths(cache.keys()) # update
1126
+ n = len(shapes) # number of images
1127
+ bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index
1128
+ nb = bi[-1] + 1 # number of batches
1129
+ self.batch = bi # batch index of image
1130
+ self.n = n
1131
+ self.indices = range(n)
1132
+
1133
+ # Update labels
1134
+ include_class = [] # filter labels to include only these classes (optional)
1135
+ include_class_array = np.array(include_class).reshape(1, -1)
1136
+ for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
1137
+ if include_class:
1138
+ j = (label[:, 0:1] == include_class_array).any(1)
1139
+ self.labels[i] = label[j]
1140
+ if segment:
1141
+ self.segments[i] = segment[j]
1142
+ if single_cls: # single-class training, merge all classes into 0
1143
+ self.labels[i][:, 0] = 0
1144
+ if segment:
1145
+ self.segments[i][:, 0] = 0
1146
+
1147
+ # Rectangular Training
1148
+ if self.rect:
1149
+ # Sort by aspect ratio
1150
+ s = self.shapes # wh
1151
+ ar = s[:, 1] / s[:, 0] # aspect ratio
1152
+ irect = ar.argsort()
1153
+ self.img_files = [self.img_files[i] for i in irect]
1154
+ self.label_files = [self.label_files[i] for i in irect]
1155
+ self.labels = [self.labels[i] for i in irect]
1156
+ self.shapes = s[irect] # wh
1157
+ ar = ar[irect]
1158
+
1159
+ # Set training image shapes
1160
+ shapes = [[1, 1]] * nb
1161
+ for i in range(nb):
1162
+ ari = ar[bi == i]
1163
+ mini, maxi = ari.min(), ari.max()
1164
+ if maxi < 1:
1165
+ shapes[i] = [maxi, 1]
1166
+ elif mini > 1:
1167
+ shapes[i] = [1, 1 / mini]
1168
+
1169
+ self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride
1170
+
1171
+ # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
1172
+ self.imgs, self.img_npy = [None] * n, [None] * n
1173
+ if cache_images:
1174
+ if cache_images == 'disk':
1175
+ self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
1176
+ self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
1177
+ self.im_cache_dir.mkdir(parents=True, exist_ok=True)
1178
+ gb = 0 # Gigabytes of cached images
1179
+ self.img_hw0, self.img_hw = [None] * n, [None] * n
1180
+ results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
1181
+ pbar = tqdm(enumerate(results), total=n)
1182
+ for i, x in pbar:
1183
+ if cache_images == 'disk':
1184
+ if not self.img_npy[i].exists():
1185
+ np.save(self.img_npy[i].as_posix(), x[0])
1186
+ gb += self.img_npy[i].stat().st_size
1187
+ else:
1188
+ self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
1189
+ gb += self.imgs[i].nbytes
1190
+ pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'
1191
+ pbar.close()
1192
+
1193
+ def cache_labels(self, path=Path('./labels.cache'), prefix=''):
1194
+ # Cache dataset labels, check images and read shapes
1195
+ x = {} # dict
1196
+ nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
1197
+ desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
1198
+ with Pool(NUM_THREADS) as pool:
1199
+ pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
1200
+ desc=desc, total=len(self.img_files))
1201
+ for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
1202
+ nm += nm_f
1203
+ nf += nf_f
1204
+ ne += ne_f
1205
+ nc += nc_f
1206
+ if im_file:
1207
+ x[im_file] = [l, shape, segments]
1208
+ if msg:
1209
+ msgs.append(msg)
1210
+ pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
1211
+
1212
+ pbar.close()
1213
+ if msgs:
1214
+ logging.info('\n'.join(msgs))
1215
+ if nf == 0:
1216
+ logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
1217
+ x['hash'] = get_hash(self.label_files + self.img_files)
1218
+ x['results'] = nf, nm, ne, nc, len(self.img_files)
1219
+ x['msgs'] = msgs # warnings
1220
+ x['version'] = self.cache_version # cache version
1221
+ try:
1222
+ np.save(path, x) # save cache for next time
1223
+ path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
1224
+ logging.info(f'{prefix}New cache created: {path}')
1225
+ except Exception as e:
1226
+ logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable
1227
+ return x
1228
+
1229
+ def __len__(self):
1230
+ return len(self.img_files)
1231
+
1232
+ # def __iter__(self):
1233
+ # self.count = -1
1234
+ # print('ran dataset iter')
1235
+ # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
1236
+ # return self
1237
+
1238
+ def __getitem__(self, index):
1239
+ index = self.indices[index] # linear, shuffled, or image_weights
1240
+
1241
+ hyp = self.hyp
1242
+ mosaic = self.mosaic and random.random() < hyp['mosaic']
1243
+ if mosaic:
1244
+ # Load mosaic
1245
+ img, labels = load_mosaic(self, index)
1246
+ shapes = None
1247
+
1248
+ # MixUp augmentation
1249
+ if random.random() < hyp['mixup']:
1250
+ img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))
1251
+
1252
+ else:
1253
+ # Load image
1254
+ img, (h0, w0), (h, w) = load_image(self, index)
1255
+
1256
+ # Letterbox
1257
+ shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
1258
+ img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
1259
+ shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
1260
+
1261
+ labels = self.labels[index].copy()
1262
+ if labels.size: # normalized xywh to pixel xyxy format
1263
+ labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
1264
+
1265
+ if self.augment:
1266
+ img, labels = random_perspective(img, labels,
1267
+ degrees=hyp['degrees'],
1268
+ translate=hyp['translate'],
1269
+ scale=hyp['scale'],
1270
+ shear=hyp['shear'],
1271
+ perspective=hyp['perspective'])
1272
+
1273
+ nl = len(labels) # number of labels
1274
+ if nl:
1275
+ labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
1276
+
1277
+ if self.augment:
1278
+ # Albumentations
1279
+ img, labels = self.albumentations(img, labels)
1280
+ nl = len(labels) # update after albumentations
1281
+
1282
+ # HSV color-space
1283
+ augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
1284
+
1285
+ # Flip up-down
1286
+ if random.random() < hyp['flipud']:
1287
+ img = np.flipud(img)
1288
+ if nl:
1289
+ labels[:, 2] = 1 - labels[:, 2]
1290
+
1291
+ # Flip left-right
1292
+ if random.random() < hyp['fliplr']:
1293
+ img = np.fliplr(img)
1294
+ if nl:
1295
+ labels[:, 1] = 1 - labels[:, 1]
1296
+
1297
+ # Cutouts
1298
+ # labels = cutout(img, labels, p=0.5)
1299
+
1300
+ labels_out = torch.zeros((nl, 6))
1301
+ if nl:
1302
+ labels_out[:, 1:] = torch.from_numpy(labels)
1303
+
1304
+ # Convert
1305
+ img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
1306
+ img = np.ascontiguousarray(img)
1307
+
1308
+ return torch.from_numpy(img), labels_out, self.img_files[index], shapes
1309
+
1310
+ @staticmethod
1311
+ def collate_fn(batch):
1312
+ img, label, path, shapes = zip(*batch) # transposed
1313
+ for i, l in enumerate(label):
1314
+ l[:, 0] = i # add target image index for build_targets()
1315
+ return torch.stack(img, 0), torch.cat(label, 0), path, shapes
1316
+
1317
+ @staticmethod
1318
+ def collate_fn4(batch):
1319
+ img, label, path, shapes = zip(*batch) # transposed
1320
+ n = len(shapes) // 4
1321
+ img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
1322
+
1323
+ ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]])
1324
+ wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]])
1325
+ s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale
1326
+ for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
1327
+ i *= 4
1328
+ if random.random() < 0.5:
1329
+ im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', align_corners=False)[
1330
+ 0].type(img[i].type())
1331
+ l = label[i]
1332
+ else:
1333
+ im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
1334
+ l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
1335
+ img4.append(im)
1336
+ label4.append(l)
1337
+
1338
+ for i, l in enumerate(label4):
1339
+ l[:, 0] = i # add target image index for build_targets()
1340
+
1341
+ return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
1342
+
1343
+
1344
+ # Ancillary functions --------------------------------------------------------------------------------------------------
1345
+
1346
+ def load_image_by_path(self, path:str):
1347
+
1348
+ #path adjsutment to run on akash pc
1349
+ # parts = path.split("/")
1350
+ # parts.insert(2, "aakashkumar")
1351
+ # path = "/".join(parts)
1352
+
1353
+ im = cv2.imread(path) # BGR
1354
+ assert im is not None, f'Image Not Found {path}'
1355
+ h0, w0 = im.shape[:2] # orig hw
1356
+ r = self.img_size / max(h0, w0) # ratio
1357
+ if r != 1: # if sizes are not equal
1358
+ im = cv2.resize(im, (int(w0 * r), int(h0 * r)),
1359
+ interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
1360
+ return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
1361
+
1362
+ def load_image(self, i:int):
1363
+ # loads 1 image from dataset index 'i', returns im, original hw, resized hw
1364
+ im = self.imgs[i]
1365
+ if im is None: # not cached in ram
1366
+ npy = self.img_npy[i]
1367
+ if npy and npy.exists(): # load npy
1368
+ im = np.load(npy)
1369
+ else: # read image
1370
+ path = self.img_files[i]
1371
+ im = cv2.imread(path) # BGR
1372
+ assert im is not None, f'Image Not Found {path}'
1373
+ h0, w0 = im.shape[:2] # orig hw
1374
+ r = self.img_size / max(h0, w0) # ratio
1375
+ if r != 1: # if sizes are not equal
1376
+ im = cv2.resize(im, (int(w0 * r), int(h0 * r)),
1377
+ interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
1378
+ return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
1379
+ else:
1380
+ return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized
1381
+
1382
+
1383
+ def load_mosaic_temporal(self, index, if_return_frame_paths=False, do_plot=False):
1384
+ labels4, segments4 = [], []
1385
+ s = self.img_size
1386
+ main_temporal_frame_paths = None
1387
+ yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
1388
+ indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
1389
+ random.shuffle(indices)
1390
+ mainindex = index
1391
+ #temporal_frames_per_indices = [self.sample_temporal_frames(index) for index in indices]
1392
+ for i, index in enumerate(indices):
1393
+ # Load image
1394
+ temporal_frame_paths, temporal_frame_indices = self.sample_temporal_frames(index)
1395
+ main_temporal_frame_paths = temporal_frame_paths if index == mainindex else main_temporal_frame_paths
1396
+ temporal_images = [load_image_by_path(self, frame_path)[0] for frame_path in temporal_frame_paths]
1397
+ (h, w, c) = temporal_images[0].shape[:3]
1398
+ num_frames = self.num_frames
1399
+ temporal_images = np.stack(temporal_images, axis=0).reshape(-1, h, w, c)
1400
+ #img, _, (h, w) = load_image(self, index)
1401
+
1402
+ # place img in img4
1403
+ if i == 0: # top left
1404
+ img4 = np.full((num_frames, s * 2, s * 2, c), 114, dtype=np.uint8) # base image with 4 tiles
1405
+ x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
1406
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
1407
+ elif i == 1: # top right
1408
+ x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
1409
+ x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
1410
+ elif i == 2: # bottom left
1411
+ x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
1412
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
1413
+ elif i == 3: # bottom right
1414
+ x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
1415
+ x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
1416
+
1417
+ img4[:, y1a:y2a, x1a:x2a] = temporal_images[:, y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
1418
+ padw = x1a - x1b
1419
+ padh = y1a - y1b
1420
+
1421
+ # Labels
1422
+ temporal_labels = self.get_temporal_labels(temporal_frame_indices)
1423
+ segments = self.segments[index]
1424
+ #print(f"{temporal_labels}, {temporal_frame_paths}")
1425
+
1426
+ #
1427
+ if temporal_labels.size:
1428
+ n_ins, t, enddim = temporal_labels.shape
1429
+ temporal_labels = temporal_labels.reshape(n_ins*t, enddim)
1430
+ temporal_labels[:, 1:] = xywhn2xyxy(temporal_labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
1431
+ segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
1432
+ temporal_labels = temporal_labels.reshape(n_ins, t, enddim)
1433
+
1434
+ labels4.append(temporal_labels)
1435
+ segments4.extend(segments)
1436
+
1437
+
1438
+ # Concat/clip labels
1439
+ labels4 = np.concatenate(labels4, 0).reshape(-1, self.num_frames, 5)
1440
+ for x in (labels4[:, :, 1:], *segments4):
1441
+ np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
1442
+ # img4, labels4 = replicate(img4, labels4) # replicate
1443
+
1444
+ # Augment
1445
+ img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) #no need to rewrite, #it won't be applied as it depends on segment length
1446
+ img4, labels4 = random_perspective_temporal(img4, labels4, segments4,
1447
+ degrees=self.hyp['degrees'],
1448
+ translate=self.hyp['translate'],
1449
+ scale=self.hyp['scale'],
1450
+ shear=self.hyp['shear'],
1451
+ perspective=self.hyp['perspective'],
1452
+ border=self.mosaic_border,
1453
+ frame_wise_aug=self.frame_wise_aug
1454
+ ) # border to remove
1455
+ if do_plot:
1456
+ plot_images_temporal(img4, [labels4], fname="dataloadingimage.jpg", n_batch=1, LOGGER=LOGGER)
1457
+ drawing = Annotator(img4[0], line_width=2)
1458
+ for box in labels4[:, 0, 1:]:
1459
+ drawing.box_label(box, color=(255, 0, 0))
1460
+ cv2.imwrite("mosaic_0.jpg" , drawing.im )
1461
+ exit()
1462
+ # exit()
1463
+
1464
+ #
1465
+ if if_return_frame_paths:
1466
+ return img4, labels4, main_temporal_frame_paths
1467
+ else:
1468
+ return img4, labels4
1469
+
1470
+ def load_mosaic(self, index):
1471
+ # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
1472
+ labels4, segments4 = [], []
1473
+ s = self.img_size
1474
+ yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
1475
+ indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
1476
+ random.shuffle(indices)
1477
+ for i, index in enumerate(indices):
1478
+ # Load image
1479
+ img, _, (h, w) = load_image(self, index)
1480
+
1481
+ # place img in img4
1482
+ if i == 0: # top left
1483
+ img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
1484
+ x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
1485
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
1486
+ elif i == 1: # top right
1487
+ x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
1488
+ x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
1489
+ elif i == 2: # bottom left
1490
+ x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
1491
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
1492
+ elif i == 3: # bottom right
1493
+ x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
1494
+ x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
1495
+
1496
+ img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
1497
+ padw = x1a - x1b
1498
+ padh = y1a - y1b
1499
+
1500
+ # Labels
1501
+ labels, segments = self.labels[index].copy(), self.segments[index].copy()
1502
+ if labels.size:
1503
+ labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
1504
+ segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
1505
+ labels4.append(labels)
1506
+ segments4.extend(segments)
1507
+
1508
+ # Concat/clip labels
1509
+ labels4 = np.concatenate(labels4, 0)
1510
+ for x in (labels4[:, 1:], *segments4):
1511
+ np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
1512
+ # img4, labels4 = replicate(img4, labels4) # replicate
1513
+
1514
+ # Augment
1515
+ img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
1516
+ img4, labels4 = random_perspective(img4, labels4, segments4,
1517
+ degrees=self.hyp['degrees'],
1518
+ translate=self.hyp['translate'],
1519
+ scale=self.hyp['scale'],
1520
+ shear=self.hyp['shear'],
1521
+ perspective=self.hyp['perspective'],
1522
+ border=self.mosaic_border) # border to remove
1523
+
1524
+ return img4, labels4
1525
+
1526
+
1527
+ def load_mosaic9(self, index):
1528
+ # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic
1529
+ labels9, segments9 = [], []
1530
+ s = self.img_size
1531
+ indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
1532
+ random.shuffle(indices)
1533
+ for i, index in enumerate(indices):
1534
+ # Load image
1535
+ img, _, (h, w) = load_image(self, index)
1536
+
1537
+ # place img in img9
1538
+ if i == 0: # center
1539
+ img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
1540
+ h0, w0 = h, w
1541
+ c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
1542
+ elif i == 1: # top
1543
+ c = s, s - h, s + w, s
1544
+ elif i == 2: # top right
1545
+ c = s + wp, s - h, s + wp + w, s
1546
+ elif i == 3: # right
1547
+ c = s + w0, s, s + w0 + w, s + h
1548
+ elif i == 4: # bottom right
1549
+ c = s + w0, s + hp, s + w0 + w, s + hp + h
1550
+ elif i == 5: # bottom
1551
+ c = s + w0 - w, s + h0, s + w0, s + h0 + h
1552
+ elif i == 6: # bottom left
1553
+ c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
1554
+ elif i == 7: # left
1555
+ c = s - w, s + h0 - h, s, s + h0
1556
+ elif i == 8: # top left
1557
+ c = s - w, s + h0 - hp - h, s, s + h0 - hp
1558
+
1559
+ padx, pady = c[:2]
1560
+ x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
1561
+
1562
+ # Labels
1563
+ labels, segments = self.labels[index].copy(), self.segments[index].copy()
1564
+ if labels.size:
1565
+ labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
1566
+ segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
1567
+ labels9.append(labels)
1568
+ segments9.extend(segments)
1569
+
1570
+ # Image
1571
+ img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
1572
+ hp, wp = h, w # height, width previous
1573
+
1574
+ # Offset
1575
+ yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
1576
+ img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
1577
+
1578
+ # Concat/clip labels
1579
+ labels9 = np.concatenate(labels9, 0)
1580
+ labels9[:, [1, 3]] -= xc
1581
+ labels9[:, [2, 4]] -= yc
1582
+ c = np.array([xc, yc]) # centers
1583
+ segments9 = [x - c for x in segments9]
1584
+
1585
+ for x in (labels9[:, 1:], *segments9):
1586
+ np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
1587
+ # img9, labels9 = replicate(img9, labels9) # replicate
1588
+
1589
+ # Augment
1590
+ img9, labels9 = random_perspective(img9, labels9, segments9,
1591
+ degrees=self.hyp['degrees'],
1592
+ translate=self.hyp['translate'],
1593
+ scale=self.hyp['scale'],
1594
+ shear=self.hyp['shear'],
1595
+ perspective=self.hyp['perspective'],
1596
+ border=self.mosaic_border) # border to remove
1597
+
1598
+ return img9, labels9
1599
+
1600
+
1601
+ def create_folder(path='./new'):
1602
+ # Create folder
1603
+ if os.path.exists(path):
1604
+ shutil.rmtree(path) # delete output folder
1605
+ os.makedirs(path) # make new output folder
1606
+
1607
+
1608
+ def flatten_recursive(path='../datasets/coco128'):
1609
+ # Flatten a recursive directory by bringing all files to top level
1610
+ new_path = Path(path + '_flat')
1611
+ create_folder(new_path)
1612
+ for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
1613
+ shutil.copyfile(file, new_path / Path(file).name)
1614
+
1615
+
1616
+ def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()
1617
+ # Convert detection dataset into classification dataset, with one directory per class
1618
+ path = Path(path) # images dir
1619
+ shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
1620
+ files = list(path.rglob('*.*'))
1621
+ n = len(files) # number of files
1622
+ for im_file in tqdm(files, total=n):
1623
+ if im_file.suffix[1:] in IMG_FORMATS:
1624
+ # image
1625
+ im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
1626
+ h, w = im.shape[:2]
1627
+
1628
+ # labels
1629
+ lb_file = Path(img2label_paths([str(im_file)])[0])
1630
+ if Path(lb_file).exists():
1631
+ with open(lb_file) as f:
1632
+ lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
1633
+
1634
+ for j, x in enumerate(lb):
1635
+ c = int(x[0]) # class
1636
+ f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
1637
+ if not f.parent.is_dir():
1638
+ f.parent.mkdir(parents=True)
1639
+
1640
+ b = x[1:] * [w, h, w, h] # box
1641
+ # b[2:] = b[2:].max() # rectangle to square
1642
+ b[2:] = b[2:] * 1.2 + 3 # pad
1643
+ b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int)
1644
+
1645
+ b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
1646
+ b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
1647
+ assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
1648
+
1649
+
1650
+ def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
1651
+ """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
1652
+ Usage: from utils.datasets import *; autosplit()
1653
+ Arguments
1654
+ path: Path to images directory
1655
+ weights: Train, val, test weights (list, tuple)
1656
+ annotated_only: Only use images with an annotated txt file
1657
+ """
1658
+ path = Path(path) # images dir
1659
+ files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
1660
+ n = len(files) # number of files
1661
+ random.seed(0) # for reproducibility
1662
+ indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
1663
+
1664
+ txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
1665
+ [(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
1666
+
1667
+ print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
1668
+ for i, img in tqdm(zip(indices, files), total=n):
1669
+ if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
1670
+ with open(path.parent / txt[i], 'a') as f:
1671
+ f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
1672
+
1673
+
1674
+ def verify_image_label(args):
1675
+ # Verify one image-label pair
1676
+ im_file, lb_file, prefix = args
1677
+ nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
1678
+ try:
1679
+ # verify images
1680
+ im = Image.open(im_file)
1681
+ im.verify() # PIL verify
1682
+ shape = exif_size(im) # image size
1683
+ assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
1684
+ assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
1685
+ if im.format.lower() in ('jpg', 'jpeg'):
1686
+ with open(im_file, 'rb') as f:
1687
+ f.seek(-2, 2)
1688
+ if f.read() != b'\xff\xd9': # corrupt JPEG
1689
+ ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
1690
+ msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved'
1691
+
1692
+ # verify labels
1693
+ if os.path.isfile(lb_file):
1694
+ nf = 1 # label found
1695
+ with open(lb_file) as f:
1696
+ #l = [x.split() for x in f.read().strip().splitlines() if len(x)]
1697
+ l = [x.split() for x in f.read().strip().splitlines() if len(x)] #for briar conversion issue, added , seperator
1698
+ i = None
1699
+ # if any([len(x) > 8 for x in l]): # is segment
1700
+ # classes = np.array([x[0] for x in l], dtype=np.float32)
1701
+ # segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
1702
+ # l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
1703
+ if all([len(x) == 5 for x in l]): #adjustment for ishan's annotations
1704
+
1705
+ #i = [int( float(x[-1]) ) for x in l] #seperate instances
1706
+ #l = [x[:-1] for x in l]
1707
+ #adjust toishan's annotations
1708
+ l = [x for x in l]
1709
+ i = list(range(len(l)))
1710
+ #print("coming here 3", [len(x) for x in l], len(i), len(l), flush=True)
1711
+ assert len(i) == len(l), print("Len of instances not matching with bboxes")
1712
+ # else:
1713
+ # i = np.ones((len(l),), dtype=np.int32)
1714
+ l = np.array(l, dtype=np.float32)
1715
+ i = np.array(i).reshape(-1)
1716
+ nl = len(l)
1717
+ if nl:
1718
+ assert l.shape[1] == 5, f'labels require 5 columns, {l.shape[1]} columns detected'
1719
+ assert (l >= 0).all(), f'negative label values {l[l < 0]}'
1720
+ assert (l[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {l[:, 1:][l[:, 1:] > 1]}'
1721
+ l = np.unique(l, axis=0) # remove duplicate rows
1722
+ if len(l) < nl:
1723
+ segments = np.unique(segments, axis=0)
1724
+ msg = f'{prefix}WARNING: {im_file}: {nl - len(l)} duplicate labels removed'
1725
+ else:
1726
+ ne = 1 # label empty
1727
+ l = np.zeros((0, 5), dtype=np.float32)
1728
+ i = np.zeros((0,), dtype=np.int32)
1729
+ else:
1730
+ nm = 1 # label missing
1731
+ l = np.zeros((0, 5), dtype=np.float32)
1732
+ i = np.zeros((0,), dtype= 32)
1733
+
1734
+ return im_file, l, i, shape, segments, nm, nf, ne, nc, msg
1735
+ except Exception as e:
1736
+ print(e)
1737
+ nc = 1
1738
+ msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}'
1739
+ return [None, None, None, None, None, nm, nf, ne, nc, msg]
1740
+
1741
+
1742
+ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):
1743
+ """ Return dataset statistics dictionary with images and instances counts per split per class
1744
+ To run in parent directory: export PYTHONPATH="$PWD/yolov5"
1745
+ Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)
1746
+ Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip')
1747
+ Arguments
1748
+ path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
1749
+ autodownload: Attempt to download dataset if not found locally
1750
+ verbose: Print stats dictionary
1751
+ """
1752
+
1753
+ def round_labels(labels):
1754
+ # Update labels to integer class and 6 decimal place floats
1755
+ return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
1756
+
1757
+ def unzip(path):
1758
+ # Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
1759
+ if str(path).endswith('.zip'): # path is data.zip
1760
+ assert Path(path).is_file(), f'Error unzipping {path}, file not found'
1761
+ ZipFile(path).extractall(path=path.parent) # unzip
1762
+ dir = path.with_suffix('') # dataset directory == zip name
1763
+ return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path
1764
+ else: # path is data.yaml
1765
+ return False, None, path
1766
+
1767
+ def hub_ops(f, max_dim=1920):
1768
+ # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing
1769
+ f_new = im_dir / Path(f).name # dataset-hub image filename
1770
+ try: # use PIL
1771
+ im = Image.open(f)
1772
+ r = max_dim / max(im.height, im.width) # ratio
1773
+ if r < 1.0: # image too large
1774
+ im = im.resize((int(im.width * r), int(im.height * r)))
1775
+ im.save(f_new, quality=75) # save
1776
+ except Exception as e: # use OpenCV
1777
+ print(f'WARNING: HUB ops PIL failure {f}: {e}')
1778
+ im = cv2.imread(f)
1779
+ im_height, im_width = im.shape[:2]
1780
+ r = max_dim / max(im_height, im_width) # ratio
1781
+ if r < 1.0: # image too large
1782
+ im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_LINEAR)
1783
+ cv2.imwrite(str(f_new), im)
1784
+
1785
+ zipped, data_dir, yaml_path = unzip(Path(path))
1786
+ with open(check_yaml(yaml_path), errors='ignore') as f:
1787
+ data = yaml.safe_load(f) # data dict
1788
+ if zipped:
1789
+ data['path'] = data_dir # TODO: should this be dir.resolve()?
1790
+ check_dataset(data, autodownload) # download dataset if missing
1791
+ hub_dir = Path(data['path'] + ('-hub' if hub else ''))
1792
+ stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary
1793
+ for split in 'train', 'val', 'test':
1794
+ if data.get(split) is None:
1795
+ stats[split] = None # i.e. no test set
1796
+ continue
1797
+ x = []
1798
+ dataset = LoadImagesAndLabels(data[split]) # load dataset
1799
+ for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
1800
+ x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))
1801
+ x = np.array(x) # shape(128x80)
1802
+ stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
1803
+ 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
1804
+ 'per_class': (x > 0).sum(0).tolist()},
1805
+ 'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
1806
+ zip(dataset.img_files, dataset.labels)]}
1807
+
1808
+ if hub:
1809
+ im_dir = hub_dir / 'images'
1810
+ im_dir.mkdir(parents=True, exist_ok=True)
1811
+ for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'):
1812
+ pass
1813
+
1814
+ # Profile
1815
+ stats_path = hub_dir / 'stats.json'
1816
+ if profile:
1817
+ for _ in range(1):
1818
+ file = stats_path.with_suffix('.npy')
1819
+ t1 = time.time()
1820
+ np.save(file, stats)
1821
+ t2 = time.time()
1822
+ x = np.load(file, allow_pickle=True)
1823
+ print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
1824
+
1825
+ file = stats_path.with_suffix('.json')
1826
+ t1 = time.time()
1827
+ with open(file, 'w') as f:
1828
+ json.dump(stats, f) # save stats *.json
1829
+ t2 = time.time()
1830
+ with open(file) as f:
1831
+ x = json.load(f) # load hyps dict
1832
+ print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
1833
+
1834
+ # Save, print and return
1835
+ if hub:
1836
+ print(f'Saving {stats_path.resolve()}...')
1837
+ with open(stats_path, 'w') as f:
1838
+ json.dump(stats, f) # save stats.json
1839
+ if verbose:
1840
+ print(json.dumps(stats, indent=2, sort_keys=False))
1841
+ return stats
defomable_conv.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torchvision.ops import deform_conv2d
2
+ from torch import nn
3
+ import torch
4
+ from torch.nn.modules.utils import _pair
5
+
6
+ class _NewEmptyTensorOp(torch.autograd.Function):
7
+ @staticmethod
8
+ def forward(ctx, x, new_shape):
9
+ ctx.shape = x.shape
10
+ return x.new_empty(new_shape)
11
+
12
+ @staticmethod
13
+ def backward(ctx, grad):
14
+ shape = ctx.shape
15
+ return _NewEmptyTensorOp.apply(grad, shape), None
16
+
17
+ class DeformConv(nn.Module):
18
+ def __init__(
19
+ self,
20
+ in_channels,
21
+ out_channels,
22
+ kernel_size,
23
+ stride=1,
24
+ padding=0,
25
+ dilation=1,
26
+ groups=1,
27
+ deformable_groups=1,
28
+ bias=False,
29
+ norm=None,
30
+ activation=None,
31
+ ):
32
+ """
33
+ Deformable convolution from :paper:`deformconv`.
34
+
35
+ Arguments are similar to :class:`Conv2D`. Extra arguments:
36
+
37
+ Args:
38
+ deformable_groups (int): number of groups used in deformable convolution.
39
+ norm (nn.Module, optional): a normalization layer
40
+ activation (callable(Tensor) -> Tensor): a callable activation function
41
+ """
42
+ super(DeformConv, self).__init__()
43
+
44
+ assert not bias
45
+ assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format(
46
+ in_channels, groups
47
+ )
48
+ assert (
49
+ out_channels % groups == 0
50
+ ), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups)
51
+
52
+ self.in_channels = in_channels
53
+ self.out_channels = out_channels
54
+ self.kernel_size = _pair(kernel_size)
55
+ self.stride = _pair(stride)
56
+ self.padding = _pair(padding)
57
+ self.dilation = _pair(dilation)
58
+ self.groups = groups
59
+ self.deformable_groups = deformable_groups
60
+ self.norm = norm
61
+ self.activation = activation
62
+
63
+ self.weight = nn.Parameter(
64
+ torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size)
65
+ )
66
+ self.bias = None
67
+
68
+ offset_out_channels = 2*self.groups*self.kernel_size[0]*self.kernel_size[1]
69
+ self.conv_offset = torch.nn.Conv2d(self.in_channels, offset_out_channels, 1, groups=self.groups)#, bias=False)
70
+
71
+ nn.init.constant_(self.conv_offset.weight, 0)
72
+ nn.init.constant_(self.conv_offset.bias, 0)
73
+ nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
74
+
75
+ def forward(self, x):
76
+ if x.numel() == 0:
77
+ # When input is empty, we want to return a empty tensor with "correct" shape,
78
+ # So that the following operations will not panic
79
+ # if they check for the shape of the tensor.
80
+ # This computes the height and width of the output tensor
81
+ output_shape = [
82
+ (i + 2 * p - (di * (k - 1) + 1)) // s + 1
83
+ for i, p, di, k, s in zip(
84
+ x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
85
+ )
86
+ ]
87
+ output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
88
+ return _NewEmptyTensorOp.apply(x, output_shape)
89
+
90
+ offset = self.conv_offset(x)
91
+ x = deform_conv2d(
92
+ x,
93
+ offset,
94
+ self.weight,
95
+ self.bias,
96
+ self.stride,
97
+ self.padding,
98
+ self.dilation,
99
+ None
100
+ )
101
+ if self.norm is not None:
102
+ x = self.norm(x)
103
+ if self.activation is not None:
104
+ x = self.activation(x)
105
+ return x
106
+
107
+ def extra_repr(self):
108
+ tmpstr = "in_channels=" + str(self.in_channels)
109
+ tmpstr += ", out_channels=" + str(self.out_channels)
110
+ tmpstr += ", kernel_size=" + str(self.kernel_size)
111
+ tmpstr += ", stride=" + str(self.stride)
112
+ tmpstr += ", padding=" + str(self.padding)
113
+ tmpstr += ", dilation=" + str(self.dilation)
114
+ tmpstr += ", groups=" + str(self.groups)
115
+ tmpstr += ", deformable_groups=" + str(self.deformable_groups)
116
+ tmpstr += ", bias=False"
117
+ return tmpstr
downloads.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ Download utils
4
+ """
5
+
6
+ import os
7
+ import platform
8
+ import subprocess
9
+ import time
10
+ import urllib
11
+ from pathlib import Path
12
+ from zipfile import ZipFile
13
+
14
+ import requests
15
+ import torch
16
+
17
+
18
+ def gsutil_getsize(url=''):
19
+ # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
20
+ s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8')
21
+ return eval(s.split(' ')[0]) if len(s) else 0 # bytes
22
+
23
+
24
+ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''):
25
+ # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes
26
+ file = Path(file)
27
+ assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}"
28
+ try: # url1
29
+ print(f'Downloading {url} to {file}...')
30
+ torch.hub.download_url_to_file(url, str(file))
31
+ assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check
32
+ except Exception as e: # url2
33
+ file.unlink(missing_ok=True) # remove partial downloads
34
+ print(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...')
35
+ os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail
36
+ finally:
37
+ if not file.exists() or file.stat().st_size < min_bytes: # check
38
+ file.unlink(missing_ok=True) # remove partial downloads
39
+ print(f"ERROR: {assert_msg}\n{error_msg}")
40
+ print('')
41
+
42
+
43
+ def attempt_download(file, repo='ultralytics/yolov5'): # from utils.downloads import *; attempt_download()
44
+ # Attempt file download if does not exist
45
+ file = Path(str(file).strip().replace("'", ''))
46
+
47
+ if not file.exists():
48
+ # URL specified
49
+ name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc.
50
+ if str(file).startswith(('http:/', 'https:/')): # download
51
+ url = str(file).replace(':/', '://') # Pathlib turns :// -> :/
52
+ name = name.split('?')[0] # parse authentication https://url.com/file.txt?auth...
53
+ safe_download(file=name, url=url, min_bytes=1E5)
54
+ return name
55
+
56
+ # GitHub assets
57
+ file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required)
58
+ try:
59
+ response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api
60
+ assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
61
+ tag = response['tag_name'] # i.e. 'v1.0'
62
+ except: # fallback plan
63
+ assets = ['yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt',
64
+ 'yolov5n6.pt', 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt']
65
+ try:
66
+ tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]
67
+ except:
68
+ tag = 'v6.0' # current release
69
+
70
+ if name in assets:
71
+ safe_download(file,
72
+ url=f'https://github.com/{repo}/releases/download/{tag}/{name}',
73
+ # url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional)
74
+ min_bytes=1E5,
75
+ error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/')
76
+
77
+ return str(file)
78
+
79
+
80
+ def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'):
81
+ # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download()
82
+ t = time.time()
83
+ file = Path(file)
84
+ cookie = Path('cookie') # gdrive cookie
85
+ print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='')
86
+ file.unlink(missing_ok=True) # remove existing file
87
+ cookie.unlink(missing_ok=True) # remove existing cookie
88
+
89
+ # Attempt file download
90
+ out = "NUL" if platform.system() == "Windows" else "/dev/null"
91
+ os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}')
92
+ if os.path.exists('cookie'): # large file
93
+ s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}'
94
+ else: # small file
95
+ s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"'
96
+ r = os.system(s) # execute, capture return
97
+ cookie.unlink(missing_ok=True) # remove existing cookie
98
+
99
+ # Error check
100
+ if r != 0:
101
+ file.unlink(missing_ok=True) # remove partial
102
+ print('Download error ') # raise Exception('Download error')
103
+ return r
104
+
105
+ # Unzip if archive
106
+ if file.suffix == '.zip':
107
+ print('unzipping... ', end='')
108
+ ZipFile(file).extractall(path=file.parent) # unzip
109
+ file.unlink() # remove zip
110
+
111
+ print(f'Done ({time.time() - t:.1f}s)')
112
+ return r
113
+
114
+
115
+ def get_token(cookie="./cookie"):
116
+ with open(cookie) as f:
117
+ for line in f:
118
+ if "download" in line:
119
+ return line.split()[-1]
120
+ return ""
121
+
122
+ # Google utils: https://cloud.google.com/storage/docs/reference/libraries ----------------------------------------------
123
+ #
124
+ #
125
+ # def upload_blob(bucket_name, source_file_name, destination_blob_name):
126
+ # # Uploads a file to a bucket
127
+ # # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
128
+ #
129
+ # storage_client = storage.Client()
130
+ # bucket = storage_client.get_bucket(bucket_name)
131
+ # blob = bucket.blob(destination_blob_name)
132
+ #
133
+ # blob.upload_from_filename(source_file_name)
134
+ #
135
+ # print('File {} uploaded to {}.'.format(
136
+ # source_file_name,
137
+ # destination_blob_name))
138
+ #
139
+ #
140
+ # def download_blob(bucket_name, source_blob_name, destination_file_name):
141
+ # # Uploads a blob from a bucket
142
+ # storage_client = storage.Client()
143
+ # bucket = storage_client.get_bucket(bucket_name)
144
+ # blob = bucket.blob(source_blob_name)
145
+ #
146
+ # blob.download_to_filename(destination_file_name)
147
+ #
148
+ # print('Blob {} downloaded to {}.'.format(
149
+ # source_blob_name,
150
+ # destination_file_name))
experimental.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ Experimental modules
4
+ """
5
+ import math
6
+
7
+ import numpy as np
8
+ import torch
9
+ import torch.nn as nn
10
+
11
+ from common import Conv
12
+ from downloads import attempt_download
13
+
14
+
15
+ class CrossConv(nn.Module):
16
+ # Cross Convolution Downsample
17
+ def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
18
+ # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
19
+ super().__init__()
20
+ c_ = int(c2 * e) # hidden channels
21
+ self.cv1 = Conv(c1, c_, (1, k), (1, s))
22
+ self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
23
+ self.add = shortcut and c1 == c2
24
+
25
+ def forward(self, x):
26
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
27
+
28
+
29
+ class Sum(nn.Module):
30
+ # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
31
+ def __init__(self, n, weight=False): # n: number of inputs
32
+ super().__init__()
33
+ self.weight = weight # apply weights boolean
34
+ self.iter = range(n - 1) # iter object
35
+ if weight:
36
+ self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights
37
+
38
+ def forward(self, x):
39
+ y = x[0] # no weight
40
+ if self.weight:
41
+ w = torch.sigmoid(self.w) * 2
42
+ for i in self.iter:
43
+ y = y + x[i + 1] * w[i]
44
+ else:
45
+ for i in self.iter:
46
+ y = y + x[i + 1]
47
+ return y
48
+
49
+
50
+ class MixConv2d(nn.Module):
51
+ # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
52
+ def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy
53
+ super().__init__()
54
+ n = len(k) # number of convolutions
55
+ if equal_ch: # equal c_ per group
56
+ i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices
57
+ c_ = [(i == g).sum() for g in range(n)] # intermediate channels
58
+ else: # equal weight.numel() per group
59
+ b = [c2] + [0] * n
60
+ a = np.eye(n + 1, n, k=-1)
61
+ a -= np.roll(a, 1, axis=1)
62
+ a *= np.array(k) ** 2
63
+ a[0] = 1
64
+ c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
65
+
66
+ self.m = nn.ModuleList(
67
+ [nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])
68
+ self.bn = nn.BatchNorm2d(c2)
69
+ self.act = nn.SiLU()
70
+
71
+ def forward(self, x):
72
+ return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
73
+
74
+
75
+ class Ensemble(nn.ModuleList):
76
+ # Ensemble of models
77
+ def __init__(self):
78
+ super().__init__()
79
+
80
+ def forward(self, x, augment=False, profile=False, visualize=False):
81
+ y = []
82
+ for module in self:
83
+ y.append(module(x, augment, profile, visualize)[0])
84
+ # y = torch.stack(y).max(0)[0] # max ensemble
85
+ # y = torch.stack(y).mean(0) # mean ensemble
86
+ y = torch.cat(y, 1) # nms ensemble
87
+ return y, None # inference, train output
88
+
89
+
90
+ def attempt_load(weights, map_location=None, inplace=True, fuse=True, return_epoch_number=False):
91
+ from models.yolo import Detect, Model
92
+
93
+ # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
94
+ model = Ensemble()
95
+ for w in weights if isinstance(weights, list) else [weights]:
96
+ ckpt = torch.load(attempt_download(w), map_location=map_location, weights_only=False) # load
97
+ if fuse:
98
+ model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model
99
+ else:
100
+ model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse
101
+
102
+ # Compatibility updates
103
+ for m in model.modules():
104
+ if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]:
105
+ m.inplace = inplace # pytorch 1.7.0 compatibility
106
+ if type(m) is Detect:
107
+ if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility
108
+ delattr(m, 'anchor_grid')
109
+ setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
110
+ elif type(m) is Conv:
111
+ m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
112
+
113
+ if len(model) == 1:
114
+ if not return_epoch_number:
115
+ return model[-1] # return ensemble
116
+ else:
117
+ return model[-1], ckpt["epoch"]
118
+ else:
119
+ print(f'Ensemble created with {weights}\n')
120
+ for k in ['names']:
121
+ setattr(model, k, getattr(model[-1], k))
122
+ model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
123
+ if not return_epoch_number:
124
+ return model # return ensemble
125
+ else:
126
+ return model, ckpt["epoch"]
127
+
general.py ADDED
@@ -0,0 +1,876 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ General utils
4
+ """
5
+
6
+ import contextlib
7
+ import glob
8
+ import logging
9
+ import math
10
+ import os
11
+ import platform
12
+ import random
13
+ import re
14
+ import signal
15
+ import time
16
+ import urllib
17
+ from itertools import repeat
18
+ from multiprocessing.pool import ThreadPool
19
+ from pathlib import Path
20
+ from subprocess import check_output
21
+ from zipfile import ZipFile
22
+
23
+ import cv2
24
+ import numpy as np
25
+ import pandas as pd
26
+ import pkg_resources as pkg
27
+ import torch
28
+ import torchvision
29
+ import yaml
30
+
31
+ from downloads import gsutil_getsize
32
+ from metrics import box_iou, fitness
33
+
34
+ # Settings
35
+ torch.set_printoptions(linewidth=320, precision=5, profile='long')
36
+ np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
37
+ pd.options.display.max_columns = 10
38
+ cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
39
+ os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
40
+
41
+ FILE = Path(__file__).resolve()
42
+ ROOT = FILE.parents[1] # YOLOv5 root directory
43
+
44
+
45
+ def set_logging(name=None, verbose=True):
46
+ # Sets level and returns logger
47
+ rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings
48
+ logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARN)
49
+ return logging.getLogger(name)
50
+
51
+
52
+ LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)
53
+
54
+
55
+ class Profile(contextlib.ContextDecorator):
56
+ # Usage: @Profile() decorator or 'with Profile():' context manager
57
+ def __enter__(self):
58
+ self.start = time.time()
59
+
60
+ def __exit__(self, type, value, traceback):
61
+ print(f'Profile results: {time.time() - self.start:.5f}s')
62
+
63
+
64
+ class Timeout(contextlib.ContextDecorator):
65
+ # Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager
66
+ def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):
67
+ self.seconds = int(seconds)
68
+ self.timeout_message = timeout_msg
69
+ self.suppress = bool(suppress_timeout_errors)
70
+
71
+ def _timeout_handler(self, signum, frame):
72
+ raise TimeoutError(self.timeout_message)
73
+
74
+ def __enter__(self):
75
+ signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM
76
+ signal.alarm(self.seconds) # start countdown for SIGALRM to be raised
77
+
78
+ def __exit__(self, exc_type, exc_val, exc_tb):
79
+ signal.alarm(0) # Cancel SIGALRM if it's scheduled
80
+ if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError
81
+ return True
82
+
83
+
84
+ class WorkingDirectory(contextlib.ContextDecorator):
85
+ # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager
86
+ def __init__(self, new_dir):
87
+ self.dir = new_dir # new dir
88
+ self.cwd = Path.cwd().resolve() # current dir
89
+
90
+ def __enter__(self):
91
+ os.chdir(self.dir)
92
+
93
+ def __exit__(self, exc_type, exc_val, exc_tb):
94
+ os.chdir(self.cwd)
95
+
96
+
97
+ def try_except(func):
98
+ # try-except function. Usage: @try_except decorator
99
+ def handler(*args, **kwargs):
100
+ try:
101
+ func(*args, **kwargs)
102
+ except Exception as e:
103
+ print(e)
104
+
105
+ return handler
106
+
107
+
108
+ def methods(instance):
109
+ # Get class/instance methods
110
+ return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")]
111
+
112
+
113
+ def print_args(name, opt):
114
+ # Print argparser arguments
115
+ LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
116
+
117
+
118
+ def init_seeds(seed=0):
119
+ # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html
120
+ # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible
121
+ import torch.backends.cudnn as cudnn
122
+ random.seed(seed)
123
+ np.random.seed(seed)
124
+ torch.manual_seed(seed)
125
+ cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False)
126
+
127
+
128
+ def get_latest_run(search_dir='.'):
129
+ # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
130
+ last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
131
+ return max(last_list, key=os.path.getctime) if last_list else ''
132
+
133
+
134
+ def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
135
+ # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required.
136
+ env = os.getenv(env_var)
137
+ if env:
138
+ path = Path(env) # use environment variable
139
+ else:
140
+ cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs
141
+ path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir
142
+ path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable
143
+ path.mkdir(exist_ok=True) # make if required
144
+ return path
145
+
146
+
147
+ def is_writeable(dir, test=False):
148
+ # Return True if directory has write permissions, test opening a file with write permissions if test=True
149
+ if test: # method 1
150
+ file = Path(dir) / 'tmp.txt'
151
+ try:
152
+ with open(file, 'w'): # open file with write permissions
153
+ pass
154
+ file.unlink() # remove file
155
+ return True
156
+ except OSError:
157
+ return False
158
+ else: # method 2
159
+ return os.access(dir, os.R_OK) # possible issues on Windows
160
+
161
+
162
+ def is_docker():
163
+ # Is environment a Docker container?
164
+ return Path('/workspace').exists() # or Path('/.dockerenv').exists()
165
+
166
+
167
+ def is_colab():
168
+ # Is environment a Google Colab instance?
169
+ try:
170
+ import google.colab
171
+ return True
172
+ except ImportError:
173
+ return False
174
+
175
+
176
+ def is_pip():
177
+ # Is file in a pip package?
178
+ return 'site-packages' in Path(__file__).resolve().parts
179
+
180
+
181
+ def is_ascii(s=''):
182
+ # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7)
183
+ s = str(s) # convert list, tuple, None, etc. to str
184
+ return len(s.encode().decode('ascii', 'ignore')) == len(s)
185
+
186
+
187
+ def is_chinese(s='人工智能'):
188
+ # Is string composed of any Chinese characters?
189
+ return re.search('[\u4e00-\u9fff]', s)
190
+
191
+
192
+ def emojis(str=''):
193
+ # Return platform-dependent emoji-safe version of string
194
+ return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
195
+
196
+
197
+ def file_size(path):
198
+ # Return file/dir size (MB)
199
+ path = Path(path)
200
+ if path.is_file():
201
+ return path.stat().st_size / 1E6
202
+ elif path.is_dir():
203
+ return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6
204
+ else:
205
+ return 0.0
206
+
207
+
208
+ def check_online():
209
+ # Check internet connectivity
210
+ import socket
211
+ try:
212
+ socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility
213
+ return True
214
+ except OSError:
215
+ return False
216
+
217
+
218
+ @try_except
219
+ @WorkingDirectory(ROOT)
220
+ def check_git_status():
221
+ # Recommend 'git pull' if code is out of date
222
+ msg = ', for updates see https://github.com/ultralytics/yolov5'
223
+ print(colorstr('github: '), end='')
224
+ assert Path('.git').exists(), 'skipping check (not a git repository)' + msg
225
+ assert not is_docker(), 'skipping check (Docker image)' + msg
226
+ assert check_online(), 'skipping check (offline)' + msg
227
+
228
+ cmd = 'git fetch && git config --get remote.origin.url'
229
+ url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch
230
+ branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
231
+ n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
232
+ if n > 0:
233
+ s = f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update."
234
+ else:
235
+ s = f'up to date with {url} ✅'
236
+ print(emojis(s)) # emoji-safe
237
+
238
+
239
+ def check_python(minimum='3.6.2'):
240
+ # Check current python version vs. required python version
241
+ check_version(platform.python_version(), minimum, name='Python ', hard=True)
242
+
243
+
244
+ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False):
245
+ # Check version vs. required version
246
+ current, minimum = (pkg.parse_version(x) for x in (current, minimum))
247
+ result = (current == minimum) if pinned else (current >= minimum) # bool
248
+ if hard: # assert min requirements met
249
+ assert result, f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed'
250
+ else:
251
+ return result
252
+
253
+
254
+ @try_except
255
+ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):
256
+ # Check installed dependencies meet requirements (pass *.txt file or list of packages)
257
+ prefix = colorstr('red', 'bold', 'requirements:')
258
+ check_python() # check python version
259
+ if isinstance(requirements, (str, Path)): # requirements.txt file
260
+ file = Path(requirements)
261
+ assert file.exists(), f"{prefix} {file.resolve()} not found, check failed."
262
+ requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
263
+ else: # list or tuple of packages
264
+ requirements = [x for x in requirements if x not in exclude]
265
+
266
+ n = 0 # number of packages updates
267
+ for r in requirements:
268
+ try:
269
+ pkg.require(r)
270
+ except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
271
+ s = f"{prefix} {r} not found and is required by YOLOv5"
272
+ if install:
273
+ print(f"{s}, attempting auto-update...")
274
+ try:
275
+ assert check_online(), f"'pip install {r}' skipped (offline)"
276
+ print(check_output(f"pip install '{r}'", shell=True).decode())
277
+ n += 1
278
+ except Exception as e:
279
+ print(f'{prefix} {e}')
280
+ else:
281
+ print(f'{s}. Please install and rerun your command.')
282
+
283
+ if n: # if packages updated
284
+ source = file.resolve() if 'file' in locals() else requirements
285
+ s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
286
+ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
287
+ print(emojis(s))
288
+
289
+
290
+ def check_img_size(imgsz, s=32, floor=0):
291
+ # Verify image size is a multiple of stride s in each dimension
292
+ if isinstance(imgsz, int): # integer i.e. img_size=640
293
+ new_size = max(make_divisible(imgsz, int(s)), floor)
294
+ else: # list i.e. img_size=[640, 480]
295
+ new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]
296
+ if new_size != imgsz:
297
+ print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')
298
+ return new_size
299
+
300
+
301
+ def check_imshow():
302
+ # Check if environment supports image displays
303
+ try:
304
+ assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'
305
+ assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'
306
+ cv2.imshow('test', np.zeros((1, 1, 3)))
307
+ cv2.waitKey(1)
308
+ cv2.destroyAllWindows()
309
+ cv2.waitKey(1)
310
+ return True
311
+ except Exception as e:
312
+ print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
313
+ return False
314
+
315
+
316
+ def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):
317
+ # Check file(s) for acceptable suffix
318
+ if file and suffix:
319
+ if isinstance(suffix, str):
320
+ suffix = [suffix]
321
+ for f in file if isinstance(file, (list, tuple)) else [file]:
322
+ s = Path(f).suffix.lower() # file suffix
323
+ if len(s):
324
+ assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}"
325
+
326
+
327
+ def check_yaml(file, suffix=('.yaml', '.yml')):
328
+ # Search/download YAML file (if necessary) and return path, checking suffix
329
+ return check_file(file, suffix)
330
+
331
+
332
+ def check_file(file, suffix=''):
333
+ # Search/download file (if necessary) and return path
334
+ check_suffix(file, suffix) # optional
335
+ file = str(file) # convert to str()
336
+ if Path(file).is_file() or file == '': # exists
337
+ return file
338
+ elif file.startswith(('http:/', 'https:/')): # download
339
+ url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/
340
+ file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth
341
+ if Path(file).is_file():
342
+ print(f'Found {url} locally at {file}') # file already exists
343
+ else:
344
+ print(f'Downloading {url} to {file}...')
345
+ torch.hub.download_url_to_file(url, file)
346
+ assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check
347
+ return file
348
+ else: # search
349
+ files = []
350
+ for d in 'data', 'models', 'utils': # search directories
351
+ files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file
352
+ assert len(files), f'File not found: {file}' # assert file was found
353
+ assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
354
+ return files[0] # return file
355
+
356
+
357
+ def check_dataset(data, autodownload=True, streamable_hence_skip=False):
358
+ # Download and/or unzip dataset if not found locally
359
+ # Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip
360
+
361
+ # Download (optional)
362
+ extract_dir = ''
363
+ if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip
364
+ download(data, dir='../datasets', unzip=True, delete=False, curl=False, threads=1)
365
+ data = next((Path('../datasets') / Path(data).stem).rglob('*.yaml'))
366
+ extract_dir, autodownload = data.parent, False
367
+
368
+ # Read yaml (optional)
369
+ if isinstance(data, (str, Path)):
370
+ with open(data, errors='ignore') as f:
371
+ data = yaml.safe_load(f) # dictionary
372
+
373
+ # Parse yaml
374
+ path = extract_dir or Path(data.get('path') or '') # optional 'path' default to '.'
375
+ for k in 'train', 'val', 'test', 'inference':
376
+ if data.get(k): # prepend path
377
+ data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]]
378
+
379
+ assert 'nc' in data, "Dataset 'nc' key missing."
380
+ if 'names' not in data:
381
+ data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing
382
+ train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))
383
+
384
+ if 'annotation_path' in data:
385
+ annotation_path = Path(data.get('annotation_path') or '')
386
+ for k in 'annotation_train', 'annotation_val', 'annotation_test':
387
+ if data.get(k): # prepend path
388
+ data[k] = str(annotation_path / data[k]) if isinstance(data[k], str) else [str(annotation_path / x) for x in data[k]]
389
+
390
+ if 'video_root_path' in data:
391
+ video_root_path = Path(data.get('video_root_path') or '')
392
+ for k in 'video_root_path_train', 'video_root_path_val', 'video_root_path_test', 'video_root_path_inference':
393
+ if data.get(k): # prepend path
394
+ data[k] = str(video_root_path / data[k]) if isinstance(data[k], str) else [str(video_root_path / x) for x in data[k]]
395
+
396
+ if val:
397
+ val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
398
+ if not all(x.exists() for x in val):
399
+ print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
400
+ if s and autodownload: # download script
401
+ root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'
402
+ if s.startswith('http') and s.endswith('.zip'): # URL
403
+ f = Path(s).name # filename
404
+ print(f'Downloading {s} to {f}...')
405
+ torch.hub.download_url_to_file(s, f)
406
+ Path(root).mkdir(parents=True, exist_ok=True) # create root
407
+ ZipFile(f).extractall(path=root) # unzip
408
+ Path(f).unlink() # remove zip
409
+ r = None # success
410
+ elif s.startswith('bash '): # bash script
411
+ print(f'Running {s} ...')
412
+ r = os.system(s)
413
+ else: # python script
414
+ r = exec(s, {'yaml': data}) # return None
415
+ print(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n")
416
+ else:
417
+ if not streamable_hence_skip:
418
+ raise Exception('Dataset not found.')
419
+
420
+ return data # dictionary
421
+
422
+
423
+ def url2file(url):
424
+ # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt
425
+ url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/
426
+ file = Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth
427
+ return file
428
+
429
+
430
+ def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1):
431
+ # Multi-threaded file download and unzip function, used in data.yaml for autodownload
432
+ def download_one(url, dir):
433
+ # Download 1 file
434
+ f = dir / Path(url).name # filename
435
+ if Path(url).is_file(): # exists in current path
436
+ Path(url).rename(f) # move to dir
437
+ elif not f.exists():
438
+ print(f'Downloading {url} to {f}...')
439
+ if curl:
440
+ os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail
441
+ else:
442
+ torch.hub.download_url_to_file(url, f, progress=True) # torch download
443
+ if unzip and f.suffix in ('.zip', '.gz'):
444
+ print(f'Unzipping {f}...')
445
+ if f.suffix == '.zip':
446
+ ZipFile(f).extractall(path=dir) # unzip
447
+ elif f.suffix == '.gz':
448
+ os.system(f'tar xfz {f} --directory {f.parent}') # unzip
449
+ if delete:
450
+ f.unlink() # remove zip
451
+
452
+ dir = Path(dir)
453
+ dir.mkdir(parents=True, exist_ok=True) # make directory
454
+ if threads > 1:
455
+ pool = ThreadPool(threads)
456
+ pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded
457
+ pool.close()
458
+ pool.join()
459
+ else:
460
+ for u in [url] if isinstance(url, (str, Path)) else url:
461
+ download_one(u, dir)
462
+
463
+
464
+ def make_divisible(x, divisor):
465
+ # Returns x evenly divisible by divisor
466
+ return math.ceil(x / divisor) * divisor
467
+
468
+
469
+ def clean_str(s):
470
+ # Cleans a string by replacing special characters with underscore _
471
+ return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
472
+
473
+
474
+ def one_cycle(y1=0.0, y2=1.0, steps=100):
475
+ # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf
476
+ return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
477
+
478
+
479
+ def colorstr(*input):
480
+ # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
481
+ *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
482
+ colors = {'black': '\033[30m', # basic colors
483
+ 'red': '\033[31m',
484
+ 'green': '\033[32m',
485
+ 'yellow': '\033[33m',
486
+ 'blue': '\033[34m',
487
+ 'magenta': '\033[35m',
488
+ 'cyan': '\033[36m',
489
+ 'white': '\033[37m',
490
+ 'bright_black': '\033[90m', # bright colors
491
+ 'bright_red': '\033[91m',
492
+ 'bright_green': '\033[92m',
493
+ 'bright_yellow': '\033[93m',
494
+ 'bright_blue': '\033[94m',
495
+ 'bright_magenta': '\033[95m',
496
+ 'bright_cyan': '\033[96m',
497
+ 'bright_white': '\033[97m',
498
+ 'end': '\033[0m', # misc
499
+ 'bold': '\033[1m',
500
+ 'underline': '\033[4m'}
501
+ return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
502
+
503
+
504
+ def labels_to_class_weights(labels, nc=80):
505
+ # Get class weights (inverse frequency) from training labels
506
+ if labels[0] is None: # no labels loaded
507
+ return torch.Tensor()
508
+
509
+ labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
510
+ classes = labels[:, 0].astype(int) # labels = [class xywh]
511
+ weights = np.bincount(classes, minlength=nc) # occurrences per class
512
+
513
+ # Prepend gridpoint count (for uCE training)
514
+ # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
515
+ # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
516
+
517
+ weights[weights == 0] = 1 # replace empty bins with 1
518
+ weights = 1 / weights # number of targets per class
519
+ weights /= weights.sum() # normalize
520
+ return torch.from_numpy(weights)
521
+
522
+
523
+ def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
524
+ # Produces image weights based on class_weights and image contents
525
+ class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels])
526
+ image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
527
+ # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
528
+ return image_weights
529
+
530
+
531
+ def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
532
+ # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
533
+ # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
534
+ # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
535
+ # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
536
+ # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
537
+ x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
538
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
539
+ 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
540
+ return x
541
+
542
+
543
+ def xyxy2xywh(x):
544
+ # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
545
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
546
+ y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
547
+ y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
548
+ y[:, 2] = x[:, 2] - x[:, 0] # width
549
+ y[:, 3] = x[:, 3] - x[:, 1] # height
550
+ return y
551
+
552
+
553
+ def xywh2xyxy(x):
554
+ # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
555
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
556
+ y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
557
+ y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
558
+ y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
559
+ y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
560
+ return y
561
+
562
+
563
+ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
564
+ # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
565
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
566
+ y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
567
+ y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
568
+ y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
569
+ y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
570
+ return y
571
+
572
+
573
+ def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
574
+ # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
575
+ if clip:
576
+ clip_coords(x, (h - eps, w - eps)) # warning: inplace clip
577
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
578
+ y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center
579
+ y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center
580
+ y[:, 2] = (x[:, 2] - x[:, 0]) / w # width
581
+ y[:, 3] = (x[:, 3] - x[:, 1]) / h # height
582
+ return y
583
+
584
+
585
+ def xyn2xy(x, w=640, h=640, padw=0, padh=0):
586
+ # Convert normalized segments into pixel segments, shape (n,2)
587
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
588
+ y[:, 0] = w * x[:, 0] + padw # top left x
589
+ y[:, 1] = h * x[:, 1] + padh # top left y
590
+ return y
591
+
592
+
593
+ def segment2box(segment, width=640, height=640):
594
+ # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
595
+ x, y = segment.T # segment xy
596
+ inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
597
+ x, y, = x[inside], y[inside]
598
+ return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
599
+
600
+
601
+ def segments2boxes(segments):
602
+ # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
603
+ boxes = []
604
+ for s in segments:
605
+ x, y = s.T # segment xy
606
+ boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
607
+ return xyxy2xywh(np.array(boxes)) # cls, xywh
608
+
609
+
610
+ def resample_segments(segments, n=1000):
611
+ # Up-sample an (n,2) segment
612
+ for i, s in enumerate(segments):
613
+ x = np.linspace(0, len(s) - 1, n)
614
+ xp = np.arange(len(s))
615
+ segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
616
+ return segments
617
+
618
+ def extend_iou(annotations):
619
+ for i in range(len(annotations)):
620
+ x1, y1, x2, y2 = annotations[i]
621
+ orig_box_width, orig_box_height = x2 -x1, y2 - y1
622
+ if orig_box_width*orig_box_height < 100 :
623
+ orig_aspect_ratio = float(orig_box_width / orig_box_height)
624
+ extended_width = math.sqrt(100 * orig_aspect_ratio)
625
+ extended_height = 100 / extended_width
626
+ delta_width = extended_width - orig_box_width
627
+ delta_height = extended_height - orig_box_height
628
+ x1 -= delta_width/2
629
+ x2 = x1 + extended_width
630
+ y1 -= delta_height/2
631
+ y2 = y1 + extended_height
632
+ annotations[i][0], annotations[i][1], annotations[i][2], annotations[i][3] = x1, y1, x2, y2
633
+ return annotations
634
+
635
+ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
636
+ # Rescale coords (xyxy) from img1_shape to img0_shape
637
+ if ratio_pad is None: # calculate from img0_shape
638
+ gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
639
+ pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
640
+ else:
641
+ gain = ratio_pad[0][0]
642
+ pad = ratio_pad[1]
643
+
644
+ coords[:, [0, 2]] -= pad[0] # x padding
645
+ coords[:, [1, 3]] -= pad[1] # y padding
646
+ coords[:, :4] /= gain
647
+ clip_coords(coords, img0_shape)
648
+ return coords
649
+
650
+
651
+ def clip_coords(boxes, shape):
652
+ # Clip bounding xyxy bounding boxes to image shape (height, width)
653
+ if isinstance(boxes, torch.Tensor): # faster individually
654
+ boxes[:, 0].clamp_(0, shape[1]) # x1
655
+ boxes[:, 1].clamp_(0, shape[0]) # y1
656
+ boxes[:, 2].clamp_(0, shape[1]) # x2
657
+ boxes[:, 3].clamp_(0, shape[0]) # y2
658
+ else: # np.array (faster grouped)
659
+ boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2
660
+ boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2
661
+
662
+
663
+ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
664
+ labels=(), max_det=300):
665
+ """Runs Non-Maximum Suppression (NMS) on inference results
666
+
667
+ Returns:
668
+ list of detections, on (n,6) tensor per image [xyxy, conf, cls]
669
+ """
670
+
671
+ nc = prediction.shape[2] - 5 # number of classes
672
+ xc = prediction[..., 4] > conf_thres # candidates
673
+
674
+ # Checks
675
+ assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
676
+ assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
677
+
678
+ # Settings
679
+ min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
680
+ max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
681
+ time_limit = 10.0 # seconds to quit after
682
+ redundant = True # require redundant detections
683
+ multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
684
+ merge = False # use merge-NMS
685
+
686
+ t = time.time()
687
+ output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
688
+ for xi, x in enumerate(prediction): # image index, image inference
689
+ # Apply constraints
690
+ # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
691
+ x = x[xc[xi]] # confidence
692
+
693
+ # Cat apriori labels if autolabelling
694
+ if labels and len(labels[xi]):
695
+ l = labels[xi]
696
+ v = torch.zeros((len(l), nc + 5), device=x.device)
697
+ v[:, :4] = l[:, 1:5] # box
698
+ v[:, 4] = 1.0 # conf
699
+ v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
700
+ x = torch.cat((x, v), 0)
701
+
702
+ # If none remain process next image
703
+ if not x.shape[0]:
704
+ continue
705
+
706
+ # Compute conf
707
+ x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
708
+
709
+ # Box (center x, center y, width, height) to (x1, y1, x2, y2)
710
+ box = xywh2xyxy(x[:, :4])
711
+
712
+ # Detections matrix nx6 (xyxy, conf, cls)
713
+ if multi_label:
714
+ i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
715
+ x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
716
+ else: # best class only
717
+ conf, j = x[:, 5:].max(1, keepdim=True)
718
+ x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
719
+
720
+ # Filter by class
721
+ if classes is not None:
722
+ x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
723
+
724
+ # Apply finite constraint
725
+ # if not torch.isfinite(x).all():
726
+ # x = x[torch.isfinite(x).all(1)]
727
+
728
+ # Check shape
729
+ n = x.shape[0] # number of boxes
730
+ if not n: # no boxes
731
+ continue
732
+ elif n > max_nms: # excess boxes
733
+ x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
734
+
735
+ # Batched NMS
736
+ c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
737
+ boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
738
+ i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
739
+ if i.shape[0] > max_det: # limit detections
740
+ i = i[:max_det]
741
+ if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
742
+ # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
743
+ iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
744
+ weights = iou * scores[None] # box weights
745
+ x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
746
+ if redundant:
747
+ i = i[iou.sum(1) > 1] # require redundancy
748
+
749
+ output[xi] = x[i]
750
+ if (time.time() - t) > time_limit:
751
+ print(f'WARNING: NMS time limit {time_limit}s exceeded')
752
+ break # time limit exceeded
753
+
754
+ return output
755
+
756
+
757
+ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
758
+ # Strip optimizer from 'f' to finalize training, optionally save as 's'
759
+ x = torch.load(f, map_location=torch.device('cpu'))
760
+ if x.get('ema'):
761
+ x['model'] = x['ema'] # replace model with ema
762
+ for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
763
+ if k in x:
764
+ x[k] = None
765
+ x['epoch'] = -1
766
+ x['model'].half() # to FP16
767
+ for p in x['model'].parameters():
768
+ p.requires_grad = False
769
+ torch.save(x, s or f)
770
+ mb = os.path.getsize(s or f) / 1E6 # filesize
771
+ print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
772
+
773
+
774
+
775
+ def print_mutation(results, hyp, save_dir, bucket):
776
+ evolve_csv, results_csv, evolve_yaml = save_dir / 'evolve.csv', save_dir / 'results.csv', save_dir / 'hyp_evolve.yaml'
777
+ keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
778
+ 'val/box_loss', 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps]
779
+ keys = tuple(x.strip() for x in keys)
780
+ vals = results + tuple(hyp.values())
781
+ n = len(keys)
782
+
783
+ # Download (optional)
784
+ if bucket:
785
+ url = f'gs://{bucket}/evolve.csv'
786
+ if gsutil_getsize(url) > (os.path.getsize(evolve_csv) if os.path.exists(evolve_csv) else 0):
787
+ os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local
788
+
789
+ # Log to evolve.csv
790
+ s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header
791
+ with open(evolve_csv, 'a') as f:
792
+ f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n')
793
+
794
+ # Print to screen
795
+ print(colorstr('evolve: ') + ', '.join(f'{x.strip():>20s}' for x in keys))
796
+ print(colorstr('evolve: ') + ', '.join(f'{x:20.5g}' for x in vals), end='\n\n\n')
797
+
798
+ # Save yaml
799
+ with open(evolve_yaml, 'w') as f:
800
+ data = pd.read_csv(evolve_csv)
801
+ data = data.rename(columns=lambda x: x.strip()) # strip keys
802
+ i = np.argmax(fitness(data.values[:, :7])) #
803
+ f.write('# YOLOv5 Hyperparameter Evolution Results\n' +
804
+ f'# Best generation: {i}\n' +
805
+ f'# Last generation: {len(data)}\n' +
806
+ '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' +
807
+ '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n')
808
+ yaml.safe_dump(hyp, f, sort_keys=False)
809
+
810
+ if bucket:
811
+ os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload
812
+
813
+
814
+ def apply_classifier(x, model, img, im0):
815
+ # Apply a second stage classifier to yolo outputs
816
+ im0 = [im0] if isinstance(im0, np.ndarray) else im0
817
+ for i, d in enumerate(x): # per image
818
+ if d is not None and len(d):
819
+ d = d.clone()
820
+
821
+ # Reshape and pad cutouts
822
+ b = xyxy2xywh(d[:, :4]) # boxes
823
+ b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
824
+ b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
825
+ d[:, :4] = xywh2xyxy(b).long()
826
+
827
+ # Rescale boxes from img_size to im0 size
828
+ scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
829
+
830
+ # Classes
831
+ pred_cls1 = d[:, 5].long()
832
+ ims = []
833
+ for j, a in enumerate(d): # per item
834
+ cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
835
+ im = cv2.resize(cutout, (224, 224)) # BGR
836
+ # cv2.imwrite('example%i.jpg' % j, cutout)
837
+
838
+ im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
839
+ im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
840
+ im /= 255 # 0 - 255 to 0.0 - 1.0
841
+ ims.append(im)
842
+
843
+ pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
844
+ x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
845
+
846
+ return x
847
+
848
+
849
+ def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):
850
+ # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
851
+ xyxy = torch.tensor(xyxy).view(-1, 4)
852
+ b = xyxy2xywh(xyxy) # boxes
853
+ if square:
854
+ b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
855
+ b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
856
+ xyxy = xywh2xyxy(b).long()
857
+ clip_coords(xyxy, im.shape)
858
+ crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]
859
+ if save:
860
+ cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop)
861
+ return crop
862
+
863
+
864
+ def increment_path(path, exist_ok=False, sep='', mkdir=False):
865
+ # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
866
+ path = Path(path) # os-agnostic
867
+ if path.exists() and not exist_ok:
868
+ path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')
869
+ dirs = glob.glob(f"{path}{sep}*") # similar paths
870
+ matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
871
+ i = [int(m.groups()[0]) for m in matches if m] # indices
872
+ n = max(i) + 1 if i else 2 # increment number
873
+ path = Path(f"{path}{sep}{n}{suffix}") # increment path
874
+ if mkdir:
875
+ path.mkdir(parents=True, exist_ok=True) # make directory
876
+ return path
metrics.py ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ Model validation metrics
4
+ """
5
+
6
+ import math
7
+ import warnings
8
+ from pathlib import Path
9
+
10
+ import matplotlib.pyplot as plt
11
+ import numpy as np
12
+ import torch
13
+
14
+
15
+ def fitness(x):
16
+ # Model fitness as a weighted combination of metrics
17
+ w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
18
+ return (x[:, :4] * w).sum(1)
19
+
20
+
21
+ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()):
22
+ """ Compute the average precision, given the recall and precision curves.
23
+ Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
24
+ # Arguments
25
+ tp: True positives (nparray, nx1 or nx10).
26
+ conf: Objectness value from 0-1 (nparray).
27
+ pred_cls: Predicted object classes (nparray).
28
+ target_cls: True object classes (nparray).
29
+ plot: Plot precision-recall curve at mAP@0.5
30
+ save_dir: Plot save directory
31
+ # Returns
32
+ The average precision as computed in py-faster-rcnn.
33
+ """
34
+
35
+ # Sort by objectness
36
+ i = np.argsort(-conf)
37
+ tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
38
+
39
+ # Find unique classes
40
+ unique_classes = np.unique(target_cls)
41
+ nc = unique_classes.shape[0] # number of classes, number of detections
42
+
43
+ # Create Precision-Recall curve and compute AP for each class
44
+ px, py = np.linspace(0, 1, 1000), [] # for plotting
45
+ ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
46
+ for ci, c in enumerate(unique_classes):
47
+ i = pred_cls == c
48
+ n_l = (target_cls == c).sum() # number of labels
49
+ n_p = i.sum() # number of predictions
50
+
51
+ if n_p == 0 or n_l == 0:
52
+ continue
53
+ else:
54
+ # Accumulate FPs and TPs
55
+ fpc = (1 - tp[i]).cumsum(0)
56
+ tpc = tp[i].cumsum(0)
57
+
58
+ # Recall
59
+ recall = tpc / (n_l + 1e-16) # recall curve
60
+ r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
61
+
62
+ # Precision
63
+ precision = tpc / (tpc + fpc) # precision curve
64
+ p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
65
+
66
+ # AP from recall-precision curve
67
+ for j in range(tp.shape[1]):
68
+ ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
69
+ if plot and j == 0:
70
+ py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5
71
+
72
+ # Compute F1 (harmonic mean of precision and recall)
73
+ f1 = 2 * p * r / (p + r + 1e-16)
74
+ names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data
75
+ names = {i: v for i, v in enumerate(names)} # to dict
76
+ if plot:
77
+ plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.jpg', names)
78
+ plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.jpg', names, ylabel='F1')
79
+ plot_mc_curve(px, p, Path(save_dir) / 'P_curve.jpg', names, ylabel='Precision')
80
+ plot_mc_curve(px, r, Path(save_dir) / 'R_curve.jpg', names, ylabel='Recall')
81
+
82
+ i = f1.mean(0).argmax() # max F1 index
83
+ return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')
84
+
85
+
86
+ def compute_ap(recall, precision):
87
+ """ Compute the average precision, given the recall and precision curves
88
+ # Arguments
89
+ recall: The recall curve (list)
90
+ precision: The precision curve (list)
91
+ # Returns
92
+ Average precision, precision curve, recall curve
93
+ """
94
+
95
+ # Append sentinel values to beginning and end
96
+ mrec = np.concatenate(([0.0], recall, [1.0]))
97
+ mpre = np.concatenate(([1.0], precision, [0.0]))
98
+
99
+ # Compute the precision envelope
100
+ mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
101
+
102
+ # Integrate area under curve
103
+ method = 'interp' # methods: 'continuous', 'interp'
104
+ if method == 'interp':
105
+ x = np.linspace(0, 1, 101) # 101-point interp (COCO)
106
+ ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
107
+ else: # 'continuous'
108
+ i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
109
+ ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
110
+
111
+ return ap, mpre, mrec
112
+
113
+
114
+ class ConfusionMatrix:
115
+ # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix
116
+ def __init__(self, nc, conf=0.25, iou_thres=0.45):
117
+ self.matrix = np.zeros((nc + 1, nc + 1))
118
+ self.nc = nc # number of classes
119
+ self.conf = conf
120
+ self.iou_thres = iou_thres
121
+
122
+ def process_batch(self, detections, labels):
123
+ """
124
+ Return intersection-over-union (Jaccard index) of boxes.
125
+ Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
126
+ Arguments:
127
+ detections (Array[N, 6]), x1, y1, x2, y2, conf, class
128
+ labels (Array[M, 5]), class, x1, y1, x2, y2
129
+ Returns:
130
+ None, updates confusion matrix accordingly
131
+ """
132
+ detections = detections[detections[:, 4] > self.conf]
133
+ gt_classes = labels[:, 0].int()
134
+ detection_classes = detections[:, 5].int()
135
+ iou = box_iou(labels[:, 1:], detections[:, :4])
136
+
137
+ x = torch.where(iou > self.iou_thres)
138
+ if x[0].shape[0]:
139
+ matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
140
+ if x[0].shape[0] > 1:
141
+ matches = matches[matches[:, 2].argsort()[::-1]]
142
+ matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
143
+ matches = matches[matches[:, 2].argsort()[::-1]]
144
+ matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
145
+ else:
146
+ matches = np.zeros((0, 3))
147
+
148
+ n = matches.shape[0] > 0
149
+ m0, m1, _ = matches.transpose().astype(np.int16)
150
+ for i, gc in enumerate(gt_classes):
151
+ j = m0 == i
152
+ if n and sum(j) == 1:
153
+ self.matrix[detection_classes[m1[j]], gc] += 1 # correct
154
+ else:
155
+ self.matrix[self.nc, gc] += 1 # background FP
156
+
157
+ if n:
158
+ for i, dc in enumerate(detection_classes):
159
+ if not any(m1 == i):
160
+ self.matrix[dc, self.nc] += 1 # background FN
161
+
162
+ def matrix(self):
163
+ return self.matrix
164
+
165
+ def plot(self, normalize=True, save_dir='', names=()):
166
+ try:
167
+ import seaborn as sn
168
+
169
+ array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-6) if normalize else 1) # normalize columns
170
+ array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
171
+
172
+ fig = plt.figure(figsize=(12, 9), tight_layout=True)
173
+ sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size
174
+ labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels
175
+ with warnings.catch_warnings():
176
+ warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered
177
+ sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True,
178
+ xticklabels=names + ['background FP'] if labels else "auto",
179
+ yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1))
180
+ fig.axes[0].set_xlabel('True')
181
+ fig.axes[0].set_ylabel('Predicted')
182
+ fig.savefig(Path(save_dir) / 'confusion_matrix.jpg', dpi=250)
183
+ plt.close()
184
+ except Exception as e:
185
+ print(f'WARNING: ConfusionMatrix plot failure: {e}')
186
+
187
+ def print(self):
188
+ for i in range(self.nc + 1):
189
+ print(' '.join(map(str, self.matrix[i])))
190
+
191
+
192
+ def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
193
+ # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
194
+ box2 = box2.T
195
+
196
+ # Get the coordinates of bounding boxes
197
+ if x1y1x2y2: # x1, y1, x2, y2 = box1
198
+ b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
199
+ b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
200
+ else: # transform from xywh to xyxy
201
+ b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
202
+ b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
203
+ b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
204
+ b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
205
+
206
+ # Intersection area
207
+ inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
208
+ (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
209
+
210
+ # Union Area
211
+ w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
212
+ w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
213
+ union = w1 * h1 + w2 * h2 - inter + eps
214
+
215
+ iou = inter / union
216
+ if GIoU or DIoU or CIoU:
217
+ cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
218
+ ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
219
+ if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
220
+ c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
221
+ rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
222
+ (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
223
+ if DIoU:
224
+ return iou - rho2 / c2 # DIoU
225
+ elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
226
+ v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
227
+ with torch.no_grad():
228
+ alpha = v / (v - iou + (1 + eps))
229
+ return iou - (rho2 / c2 + v * alpha) # CIoU
230
+ else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
231
+ c_area = cw * ch + eps # convex area
232
+ return iou - (c_area - union) / c_area # GIoU
233
+ else:
234
+ return iou # IoU
235
+
236
+
237
+ def box_iou(box1, box2):
238
+ # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
239
+ """
240
+ Return intersection-over-union (Jaccard index) of boxes.
241
+ Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
242
+ Arguments:
243
+ box1 (Tensor[N, 4])
244
+ box2 (Tensor[M, 4])
245
+ Returns:
246
+ iou (Tensor[N, M]): the NxM matrix containing the pairwise
247
+ IoU values for every element in boxes1 and boxes2
248
+ """
249
+
250
+ def box_area(box):
251
+ # box = 4xn
252
+ return (box[2] - box[0]) * (box[3] - box[1])
253
+
254
+ area1 = box_area(box1.T)
255
+ area2 = box_area(box2.T)
256
+
257
+ # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
258
+ inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
259
+ return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
260
+
261
+
262
+ def bbox_ioa(box1, box2, eps=1E-7):
263
+ """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2
264
+ box1: np.array of shape(4)
265
+ box2: np.array of shape(nx4)
266
+ returns: np.array of shape(n)
267
+ """
268
+
269
+ box2 = box2.transpose()
270
+
271
+ # Get the coordinates of bounding boxes
272
+ b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
273
+ b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
274
+
275
+ # Intersection area
276
+ inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
277
+ (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
278
+
279
+ # box2 area
280
+ box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps
281
+
282
+ # Intersection over box2 area
283
+ return inter_area / box2_area
284
+
285
+
286
+ def wh_iou(wh1, wh2):
287
+ # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
288
+ wh1 = wh1[:, None] # [N,1,2]
289
+ wh2 = wh2[None] # [1,M,2]
290
+ inter = torch.min(wh1, wh2).prod(2) # [N,M]
291
+ return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
292
+
293
+
294
+ # Plots ----------------------------------------------------------------------------------------------------------------
295
+
296
+ def plot_pr_curve(px, py, ap, save_dir='pr_curve.jpg', names=()):
297
+ # Precision-recall curve
298
+ fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
299
+ py = np.stack(py, axis=1)
300
+
301
+ if 0 < len(names) < 21: # display per-class legend if < 21 classes
302
+ for i, y in enumerate(py.T):
303
+ ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision)
304
+ else:
305
+ ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision)
306
+
307
+ ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean())
308
+ ax.set_xlabel('Recall')
309
+ ax.set_ylabel('Precision')
310
+ ax.set_xlim(0, 1)
311
+ ax.set_ylim(0, 1)
312
+ plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
313
+ fig.savefig(Path(save_dir), dpi=250)
314
+ plt.close()
315
+
316
+
317
+ def plot_mc_curve(px, py, save_dir='mc_curve.jpg', names=(), xlabel='Confidence', ylabel='Metric'):
318
+ # Metric-confidence curve
319
+ fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
320
+
321
+ if 0 < len(names) < 21: # display per-class legend if < 21 classes
322
+ for i, y in enumerate(py):
323
+ ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric)
324
+ else:
325
+ ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric)
326
+
327
+ y = py.mean(0)
328
+ ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}')
329
+ ax.set_xlabel(xlabel)
330
+ ax.set_ylabel(ylabel)
331
+ ax.set_xlim(0, 1)
332
+ ax.set_ylim(0, 1)
333
+ plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
334
+ fig.savefig(Path(save_dir), dpi=250)
335
+ plt.close()
plots.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ Plotting utils
4
+ """
5
+
6
+ import math
7
+ import os
8
+ from copy import copy
9
+ from pathlib import Path
10
+
11
+ import cv2
12
+ import matplotlib
13
+ import matplotlib.pyplot as plt
14
+ import numpy as np
15
+ import pandas as pd
16
+ #from models.common import LOGGER
17
+ import seaborn as sn
18
+ import torch
19
+ from PIL import Image, ImageDraw, ImageFont
20
+
21
+ from general import is_ascii, is_chinese, user_config_dir, xywh2xyxy, xyxy2xywh
22
+ from metrics import fitness
23
+
24
+ # Settings
25
+ CONFIG_DIR = user_config_dir() # Ultralytics settings dir
26
+ RANK = int(os.getenv('RANK', -1))
27
+ matplotlib.rc('font', **{'size': 11})
28
+ matplotlib.use('Agg') # for writing to files only
29
+
30
+
31
+ class Colors:
32
+ # Ultralytics color palette https://ultralytics.com/
33
+ def __init__(self):
34
+ # hex = matplotlib.colors.TABLEAU_COLORS.values()
35
+ hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
36
+ '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
37
+ self.palette = [self.hex2rgb('#' + c) for c in hex]
38
+ self.n = len(self.palette)
39
+
40
+ def __call__(self, i, bgr=False):
41
+ c = self.palette[int(i) % self.n]
42
+ return (c[2], c[1], c[0]) if bgr else c
43
+
44
+ @staticmethod
45
+ def hex2rgb(h): # rgb order (PIL)
46
+ return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
47
+
48
+
49
+ colors = Colors() # create instance for 'from utils.plots import colors'
50
+
51
+
52
+ def check_font(font='Arial.ttf', size=10):
53
+ # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary
54
+ font = Path(font)
55
+ font = font if font.exists() else (CONFIG_DIR / font.name)
56
+ try:
57
+ return ImageFont.truetype(str(font) if font.exists() else font.name, size)
58
+ except Exception as e: # download if missing
59
+ url = "https://ultralytics.com/assets/" + font.name
60
+ print(f'Downloading {url} to {font}...')
61
+ torch.hub.download_url_to_file(url, str(font), progress=False)
62
+ return ImageFont.truetype(str(font), size)
63
+
64
+
65
+ class Annotator:
66
+ if RANK in (-1, 0):
67
+ check_font() # download TTF if necessary
68
+
69
+ # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
70
+ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
71
+ assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
72
+ self.pil = pil or not is_ascii(example) or is_chinese(example)
73
+ if self.pil: # use PIL
74
+ self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
75
+ self.draw = ImageDraw.Draw(self.im)
76
+ self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font,
77
+ size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
78
+ else: # use cv2
79
+ self.im = im
80
+ self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
81
+
82
+ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
83
+ # Add one xyxy box to image with label
84
+ if self.pil or not is_ascii(label):
85
+ self.draw.rectangle(box, width=self.lw, outline=color) # box
86
+ if label:
87
+ w, h = self.font.getsize(label) # text width, height
88
+ outside = box[1] - h >= 0 # label fits outside box
89
+ self.draw.rectangle([box[0],
90
+ box[1] - h if outside else box[1],
91
+ box[0] + w + 1,
92
+ box[1] + 1 if outside else box[1] + h + 1], fill=color)
93
+ # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
94
+ self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
95
+ else: # cv2
96
+ p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
97
+ cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
98
+ if label:
99
+ tf = max(self.lw - 1, 1) # font thickness
100
+ w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
101
+ outside = p1[1] - h - 3 >= 0 # label fits outside box
102
+ p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
103
+ cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
104
+ cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color,
105
+ thickness=tf, lineType=cv2.LINE_AA)
106
+
107
+ def rectangle(self, xy, fill=None, outline=None, width=1):
108
+ # Add rectangle to image (PIL-only)
109
+ self.draw.rectangle(xy, fill, outline, width)
110
+
111
+ def text(self, xy, text, txt_color=(255, 255, 255)):
112
+ # Add text to image (PIL-only)
113
+ w, h = self.font.getsize(text) # text width, height
114
+ self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font)
115
+
116
+ def result(self):
117
+ # Return annotated image as array
118
+ return np.asarray(self.im)
119
+
120
+
121
+ def hist2d(x, y, n=100):
122
+ # 2d histogram used in labels.jpg and evolve.jpg
123
+ xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
124
+ hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
125
+ xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
126
+ yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
127
+ return np.log(hist[xidx, yidx])
128
+
129
+
130
+ def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
131
+ from scipy.signal import butter, filtfilt
132
+
133
+ # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
134
+ def butter_lowpass(cutoff, fs, order):
135
+ nyq = 0.5 * fs
136
+ normal_cutoff = cutoff / nyq
137
+ return butter(order, normal_cutoff, btype='low', analog=False)
138
+
139
+ b, a = butter_lowpass(cutoff, fs, order=order)
140
+ return filtfilt(b, a, data) # forward-backward filter
141
+
142
+
143
+ def output_to_target(output):
144
+ # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
145
+ targets = []
146
+ for i, o in enumerate(output):
147
+ for *box, conf, cls in o.cpu().numpy():
148
+ targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
149
+ return np.array(targets)
150
+
151
+
152
+ import random
153
+ from torchvision.utils import draw_bounding_boxes, make_grid, save_image
154
+ from torchvision.ops import box_convert
155
+
156
+ def plot_images_temporal(images, targets, fname='images.jpg', n_batch=1, LOGGER=None):
157
+
158
+ # Plot image grid with labels
159
+
160
+ temporal_window = targets[0].shape[1]
161
+ #LOGGER.info(f"Images shape before plot {images.shape}, {len(targets)}, {targets}")
162
+ if isinstance(images, np.ndarray):
163
+ images = images.transpose((0, 3, 1, 2))
164
+ images = np.stack([image[::-1] for image in images], 0)
165
+ images = torch.from_numpy(images)
166
+
167
+ b_t, c, h, w = images.shape
168
+
169
+ images = images.reshape(-1, temporal_window, c, h, w)
170
+ images, targets = images[:n_batch], targets[:n_batch]
171
+
172
+ if isinstance(images, torch.Tensor):
173
+ images = images.cpu().float() #2 X T X C X H X W
174
+ if isinstance(targets[0], np.ndarray):
175
+ targets = [torch.from_numpy(target).cpu() for target in targets] # list*2 [n-ins x T X 6]
176
+ if isinstance(targets[0], torch.Tensor):
177
+ targets = [target.cpu() for target in targets] # list*2 [n-ins x T X 6]
178
+
179
+ if torch.max(images[0]) <= 1:
180
+ images *= 255 # de-normalise (optional)
181
+
182
+ images_list = []
183
+ images = images.to(torch.uint8)
184
+ for ii, image_temporal in enumerate(images):
185
+ for ti, image in enumerate(image_temporal):
186
+ classes = targets[ii][:, ti, 0].numpy().astype(str).tolist()
187
+ boxes = targets[ii][:, ti, 1:] #* torch.tensor([w, h, w, h])[None, :]
188
+ #boxes = box_convert(boxes, in_fmt="cxcywh", out_fmt="xyxy")
189
+ image = draw_bounding_boxes(image, boxes, classes, colors="red", width=7)
190
+ images_list.append(image)
191
+
192
+ #make grid
193
+ LOGGER.info(f"in pllot Size of images {len(images_list)}, targets {targets} , {fname}, {image.shape}")
194
+ images_grid = make_grid(images_list, nrow=temporal_window).float()/255.
195
+ #save image
196
+ save_image(images_grid, fname)
197
+
198
+
199
+ def plot_images(images, targets, paths=None, fname='images.jpg', num_frames=5, names=None, max_size=1920, max_subplots=25):
200
+ # Plot image grid with labels
201
+ if isinstance(images, torch.Tensor):
202
+ images = images.cpu().float().numpy()
203
+ if isinstance(targets, torch.Tensor):
204
+ targets = targets.cpu().numpy()
205
+ if np.max(images[0]) <= 1:
206
+ images *= 255 # de-normalise (optional)
207
+ bs, _, h, w = images.shape # batch size, _, height, width
208
+ bs = min(bs, max_subplots) # limit plot images
209
+
210
+ ns = np.ceil(bs ** 0.5) # number of subplots (square)
211
+ #print(f"NS {ns}, bs {bs}")
212
+ # Build Image
213
+ mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
214
+ for i, im in enumerate(images):
215
+ if i == max_subplots: # if last batch has fewer images than we expect
216
+ break
217
+ x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
218
+ im = im.transpose(1, 2, 0)
219
+ mosaic[y:y + h, x:x + w, :] = im
220
+
221
+ # Resize (optional)
222
+ scale = max_size / ns / max(h, w)
223
+ if scale < 1:
224
+ h = math.ceil(scale * h)
225
+ w = math.ceil(scale * w)
226
+ mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))
227
+
228
+ # Annotate
229
+ fs = int((h + w) * ns * 0.01) # font size
230
+ annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True)
231
+ for i in range(i + 1):
232
+ x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
233
+ annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
234
+ if paths:
235
+ annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
236
+ if len(targets) > 0:
237
+ ti = targets[targets[:, 0] == i] # image targets
238
+ boxes = xywh2xyxy(ti[:, 2:6]).T
239
+ classes = ti[:, 1].astype('int')
240
+ labels = ti.shape[1] == 6 # labels if no conf column
241
+ conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)
242
+
243
+ if boxes.shape[1]:
244
+ if boxes.max() <= 1.01: # if normalized with tolerance 0.01
245
+ boxes[[0, 2]] *= w # scale to pixels
246
+ boxes[[1, 3]] *= h
247
+ elif scale < 1: # absolute coords need scale if image scales
248
+ boxes *= scale
249
+ boxes[[0, 2]] += x
250
+ boxes[[1, 3]] += y
251
+ for j, box in enumerate(boxes.T.tolist()):
252
+ cls = classes[j]
253
+ color = colors(cls)
254
+ cls = names[cls] if names else cls
255
+ if labels or conf[j] > 0.25: # 0.25 conf thresh
256
+ label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'
257
+ annotator.box_label(box, label, color=color)
258
+ annotator.im.save(fname) # save
259
+
260
+
261
+ def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
262
+ # Plot LR simulating training for full epochs
263
+ optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
264
+ y = []
265
+ for _ in range(epochs):
266
+ scheduler.step()
267
+ y.append(optimizer.param_groups[0]['lr'])
268
+ plt.plot(y, '.-', label='LR')
269
+ plt.xlabel('epoch')
270
+ plt.ylabel('LR')
271
+ plt.grid()
272
+ plt.xlim(0, epochs)
273
+ plt.ylim(0)
274
+ plt.savefig(Path(save_dir) / 'LR.jpg', dpi=200)
275
+ plt.close()
276
+
277
+
278
+ def plot_val_txt(): # from utils.plots import *; plot_val()
279
+ # Plot val.txt histograms
280
+ x = np.loadtxt('val.txt', dtype=np.float32)
281
+ box = xyxy2xywh(x[:, :4])
282
+ cx, cy = box[:, 0], box[:, 1]
283
+
284
+ fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
285
+ ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
286
+ ax.set_aspect('equal')
287
+ plt.savefig('hist2d.jpg', dpi=300)
288
+
289
+ fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
290
+ ax[0].hist(cx, bins=600)
291
+ ax[1].hist(cy, bins=600)
292
+ plt.savefig('hist1d.jpg', dpi=200)
293
+
294
+
295
+ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
296
+ # Plot targets.txt histograms
297
+ x = np.loadtxt('targets.txt', dtype=np.float32).T
298
+ s = ['x targets', 'y targets', 'width targets', 'height targets']
299
+ fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
300
+ ax = ax.ravel()
301
+ for i in range(4):
302
+ ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}')
303
+ ax[i].legend()
304
+ ax[i].set_title(s[i])
305
+ plt.savefig('targets.jpg', dpi=200)
306
+
307
+
308
+ def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()
309
+ # Plot file=study.txt generated by val.py (or plot all study*.txt in dir)
310
+ save_dir = Path(file).parent if file else Path(dir)
311
+ plot2 = False # plot additional results
312
+ if plot2:
313
+ ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()
314
+
315
+ fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
316
+ # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
317
+ for f in sorted(save_dir.glob('study*.txt')):
318
+ y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
319
+ x = np.arange(y.shape[1]) if x is None else np.array(x)
320
+ if plot2:
321
+ s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']
322
+ for i in range(7):
323
+ ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
324
+ ax[i].set_title(s[i])
325
+
326
+ j = y[3].argmax() + 1
327
+ ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
328
+ label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
329
+
330
+ ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
331
+ 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
332
+
333
+ ax2.grid(alpha=0.2)
334
+ ax2.set_yticks(np.arange(20, 60, 5))
335
+ ax2.set_xlim(0, 57)
336
+ ax2.set_ylim(25, 55)
337
+ ax2.set_xlabel('GPU Speed (ms/img)')
338
+ ax2.set_ylabel('COCO AP val')
339
+ ax2.legend(loc='lower right')
340
+ f = save_dir / 'study.jpg'
341
+ print(f'Saving {f}...')
342
+ plt.savefig(f, dpi=300)
343
+
344
+
345
+ def plot_labels(labels, names=(), save_dir=Path('')):
346
+ # plot dataset labels
347
+ print('Plotting labels... ')
348
+ c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
349
+ nc = int(c.max() + 1) # number of classes
350
+ x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
351
+
352
+ # seaborn correlogram
353
+ sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
354
+ plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
355
+ plt.close()
356
+
357
+ # matplotlib labels
358
+ matplotlib.use('svg') # faster
359
+ ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
360
+ y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
361
+ # [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195
362
+ ax[0].set_ylabel('instances')
363
+ if 0 < len(names) < 30:
364
+ ax[0].set_xticks(range(len(names)))
365
+ ax[0].set_xticklabels(names, rotation=90, fontsize=10)
366
+ else:
367
+ ax[0].set_xlabel('classes')
368
+ sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
369
+ sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
370
+
371
+ # rectangles
372
+ labels[:, 1:3] = 0.5 # center
373
+ labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
374
+ img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
375
+ for cls, *box in labels[:1000]:
376
+ ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
377
+ ax[1].imshow(img)
378
+ ax[1].axis('off')
379
+
380
+ for a in [0, 1, 2, 3]:
381
+ for s in ['top', 'right', 'left', 'bottom']:
382
+ ax[a].spines[s].set_visible(False)
383
+
384
+ plt.savefig(save_dir / 'labels.jpg', dpi=200)
385
+ matplotlib.use('Agg')
386
+ plt.close()
387
+
388
+
389
+ def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
390
+ # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
391
+ ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
392
+ s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
393
+ files = list(Path(save_dir).glob('frames*.txt'))
394
+ for fi, f in enumerate(files):
395
+ try:
396
+ results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
397
+ n = results.shape[1] # number of rows
398
+ x = np.arange(start, min(stop, n) if stop else n)
399
+ results = results[:, x]
400
+ t = (results[0] - results[0].min()) # set t0=0s
401
+ results[0] = x
402
+ for i, a in enumerate(ax):
403
+ if i < len(results):
404
+ label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
405
+ a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
406
+ a.set_title(s[i])
407
+ a.set_xlabel('time (s)')
408
+ # if fi == len(files) - 1:
409
+ # a.set_ylim(bottom=0)
410
+ for side in ['top', 'right']:
411
+ a.spines[side].set_visible(False)
412
+ else:
413
+ a.remove()
414
+ except Exception as e:
415
+ print(f'Warning: Plotting error for {f}; {e}')
416
+ ax[1].legend()
417
+ plt.savefig(Path(save_dir) / 'idetection_profile.jpg', dpi=200)
418
+
419
+
420
+ def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()
421
+ # Plot evolve.csv hyp evolution results
422
+ evolve_csv = Path(evolve_csv)
423
+ data = pd.read_csv(evolve_csv)
424
+ keys = [x.strip() for x in data.columns]
425
+ x = data.values
426
+ f = fitness(x)
427
+ j = np.argmax(f) # max fitness index
428
+ plt.figure(figsize=(10, 12), tight_layout=True)
429
+ matplotlib.rc('font', **{'size': 8})
430
+ for i, k in enumerate(keys[7:]):
431
+ v = x[:, 7 + i]
432
+ mu = v[j] # best single result
433
+ plt.subplot(6, 5, i + 1)
434
+ plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
435
+ plt.plot(mu, f.max(), 'k+', markersize=15)
436
+ plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters
437
+ if i % 5 != 0:
438
+ plt.yticks([])
439
+ print(f'{k:>15}: {mu:.3g}')
440
+ f = evolve_csv.with_suffix('.jpg') # filename
441
+ plt.savefig(f, dpi=200)
442
+ plt.close()
443
+ print(f'Saved {f}')
444
+
445
+
446
+ def plot_results(file='path/to/results.csv', dir=''):
447
+ # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')
448
+ save_dir = Path(file).parent if file else Path(dir)
449
+ fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
450
+ ax = ax.ravel()
451
+ files = list(save_dir.glob('results*.csv'))
452
+ assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.'
453
+ for fi, f in enumerate(files):
454
+ try:
455
+ data = pd.read_csv(f)
456
+ s = [x.strip() for x in data.columns]
457
+ x = data.values[:, 0]
458
+ for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
459
+ y = data.values[:, j]
460
+ # y[y == 0] = np.nan # don't show zero values
461
+ ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8)
462
+ ax[i].set_title(s[j], fontsize=12)
463
+ # if j in [8, 9, 10]: # share train and val loss y axes
464
+ # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
465
+ except Exception as e:
466
+ print(f'Warning: Plotting error for {f}: {e}')
467
+ ax[1].legend()
468
+ fig.savefig(save_dir / 'results.jpg', dpi=200)
469
+ plt.close()
470
+
471
+
472
+ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
473
+ """
474
+ x: Features to be visualized
475
+ module_type: Module type
476
+ stage: Module stage within model
477
+ n: Maximum number of feature maps to plot
478
+ save_dir: Directory to save results
479
+ """
480
+ if 'Detect' not in module_type:
481
+ batch, channels, height, width = x.shape # batch, channels, height, width
482
+ if height > 1 and width > 1:
483
+ f = f"stage{stage}_{module_type.split('.')[-1]}_features.jpg" # filename
484
+
485
+ blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels
486
+ n = min(n, channels) # number of plots
487
+ fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols
488
+ ax = ax.ravel()
489
+ plt.subplots_adjust(wspace=0.05, hspace=0.05)
490
+ for i in range(n):
491
+ ax[i].imshow(blocks[i].squeeze()) # cmap='gray'
492
+ ax[i].axis('off')
493
+
494
+ print(f'Saving {save_dir / f}... ({n}/{channels})')
495
+ plt.savefig(save_dir / f, dpi=300, bbox_inches='tight')
496
+ plt.close()
497
+
498
+
499
+ def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):
500
+ # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
501
+ xyxy = torch.tensor(xyxy).view(-1, 4)
502
+ b = xyxy2xywh(xyxy) # boxes
503
+ if square:
504
+ b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
505
+ b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
506
+ xyxy = xywh2xyxy(b).long()
507
+ clip_coords(xyxy, im.shape)
508
+ crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]
509
+ if save:
510
+ file.parent.mkdir(parents=True, exist_ok=True) # make directory
511
+ f = str(increment_path(file).with_suffix('.jpg'))
512
+ cv2.imwrite(f, crop) # chroma subsampling issue on Ubuntu
513
+ return crop
514
+
515
+
516
+ def clip_coords(boxes, shape):
517
+ # Clip bounding xyxy bounding boxes to image shape (height, width)
518
+ if isinstance(boxes, torch.Tensor): # faster individually
519
+ boxes[:, 0].clamp_(0, shape[1]) # x1
520
+ boxes[:, 1].clamp_(0, shape[0]) # y1
521
+ boxes[:, 2].clamp_(0, shape[1]) # x2
522
+ boxes[:, 3].clamp_(0, shape[0]) # y2
523
+ else: # np.array (faster grouped)
524
+ boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2
525
+ boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2