id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
14,655
import math import numpy as np import requests import torch import torch.nn as nn from PIL import Image, ImageDraw from utils.datasets import letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh from utils.plots import color_list def channel_shuffle(x, groups): batchsize, num_channels, height, width = x.data.size() channels_per_group = num_channels // groups # reshape x = x.view(batchsize, groups, channels_per_group, height, width) x = torch.transpose(x, 1, 2).contiguous() # flatten x = x.view(batchsize, -1, height, width) return x
null
14,656
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first def get_hash(files): # Returns a single hash value of a list of files return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
null
14,657
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first for orientation in ExifTags.TAGS.keys(): if ExifTags.TAGS[orientation] == 'Orientation': break def exif_size(img): # Returns exif-corrected PIL size s = img.size # (width, height) try: rotation = dict(img._getexif().items())[orientation] if rotation == 6: # rotation 270 s = (s[1], s[0]) elif rotation == 8: # rotation 90 s = (s[1], s[0]) except: pass return s
null
14,658
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first def showlabels(img, boxs, landmarks): for box in boxs: x,y,w,h = box[0] * img.shape[1], box[1] * img.shape[0], box[2] * img.shape[1], box[3] * img.shape[0] #cv2.rectangle(image, (x,y), (x+w,y+h), (0,255,0), 2) cv2.rectangle(img, (int(x - w/2), int(y - h/2)), (int(x + w/2), int(y + h/2)), (0, 255, 0), 2) for landmark in landmarks: #cv2.circle(img,(60,60),30,(0,0,255)) for i in range(5): cv2.circle(img, (int(landmark[2*i] * img.shape[1]), int(landmark[2*i+1]*img.shape[0])), 3 ,(0,0,255), -1) cv2.imshow('test', img) cv2.waitKey(0)
null
14,659
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first def load_image(self, index): # loads 1 image from dataset, returns img, original hw, resized hw img = self.imgs[index] if img is None: # not cached path = self.img_files[index] img = cv2.imread(path) # BGR assert img is not None, 'Image Not Found ' + path h0, w0 = img.shape[:2] # orig hw r = self.img_size / max(h0, w0) # resize image to img_size if r != 1: # always resize down, only resize up if training with augmentation interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized else: return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] height = img.shape[0] + border[0] * 2 # shape(h,w,c) width = img.shape[1] + border[1] * 2 # Center C = np.eye(3) C[0, 2] = -img.shape[1] / 2 # x translation (pixels) C[1, 2] = -img.shape[0] / 2 # y translation (pixels) # Perspective P = np.eye(3) P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) # Rotation and Scale R = np.eye(3) a = random.uniform(-degrees, degrees) # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations s = random.uniform(1 - scale, 1 + scale) # s = 2 ** random.uniform(-scale, scale) R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) # Shear S = np.eye(3) S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) # Translation T = np.eye(3) T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) # Combined rotation matrix M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed if perspective: img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) else: # affine img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) # Visualize # import matplotlib.pyplot as plt # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() # ax[0].imshow(img[:, :, ::-1]) # base # ax[1].imshow(img2[:, :, ::-1]) # warped # Transform label coordinates n = len(targets) if n: # warp points #xy = np.ones((n * 4, 3)) xy = np.ones((n * 9, 3)) xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]].reshape(n * 9, 2) # x1y1, x2y2, x1y2, x2y1 xy = xy @ M.T # transform if perspective: xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 18) # rescale else: # affine xy = xy[:, :2].reshape(n, 18) # create new boxes x = xy[:, [0, 2, 4, 6]] y = xy[:, [1, 3, 5, 7]] landmarks = xy[:, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17]] mask = np.array(targets[:, 5:] > 0, dtype=np.int32) landmarks = landmarks * mask landmarks = landmarks + mask - 1 landmarks = np.where(landmarks < 0, -1, landmarks) landmarks[:, [0, 2, 4, 6, 8]] = np.where(landmarks[:, [0, 2, 4, 6, 8]] > width, -1, landmarks[:, [0, 2, 4, 6, 8]]) landmarks[:, [1, 3, 5, 7, 9]] = np.where(landmarks[:, [1, 3, 5, 7, 9]] > height, -1,landmarks[:, [1, 3, 5, 7, 9]]) landmarks[:, 0] = np.where(landmarks[:, 1] == -1, -1, landmarks[:, 0]) landmarks[:, 1] = np.where(landmarks[:, 0] == -1, -1, landmarks[:, 1]) landmarks[:, 2] = np.where(landmarks[:, 3] == -1, -1, landmarks[:, 2]) landmarks[:, 3] = np.where(landmarks[:, 2] == -1, -1, landmarks[:, 3]) landmarks[:, 4] = np.where(landmarks[:, 5] == -1, -1, landmarks[:, 4]) landmarks[:, 5] = np.where(landmarks[:, 4] == -1, -1, landmarks[:, 5]) landmarks[:, 6] = np.where(landmarks[:, 7] == -1, -1, landmarks[:, 6]) landmarks[:, 7] = np.where(landmarks[:, 6] == -1, -1, landmarks[:, 7]) landmarks[:, 8] = np.where(landmarks[:, 9] == -1, -1, landmarks[:, 8]) landmarks[:, 9] = np.where(landmarks[:, 8] == -1, -1, landmarks[:, 9]) targets[:,5:] = landmarks xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T # # apply angle-based reduction of bounding boxes # radians = a * math.pi / 180 # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5 # x = (xy[:, 2] + xy[:, 0]) / 2 # y = (xy[:, 3] + xy[:, 1]) / 2 # w = (xy[:, 2] - xy[:, 0]) * reduction # h = (xy[:, 3] - xy[:, 1]) * reduction # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T # clip boxes xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width) xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height) # filter candidates i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T) targets = targets[i] targets[:, 1:5] = xy[i] return img, targets def load_mosaic_face(self, index): # loads images in a mosaic labels4 = [] s = self.img_size yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices for i, index in enumerate(indices): # Load image img, _, (h, w) = load_image(self, index) # place img in img4 if i == 0: # top left img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) elif i == 1: # top right x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h elif i == 2: # bottom left x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) elif i == 3: # bottom right x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] padw = x1a - x1b padh = y1a - y1b # Labels x = self.labels[index] labels = x.copy() if x.size > 0: # Normalized xywh to pixel xyxy format #box, x1,y1,x2,y2 labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh #10 landmarks labels[:, 5] = np.array(x[:, 5] > 0, dtype=np.int32) * (w * x[:, 5] + padw) + (np.array(x[:, 5] > 0, dtype=np.int32) - 1) labels[:, 6] = np.array(x[:, 6] > 0, dtype=np.int32) * (h * x[:, 6] + padh) + (np.array(x[:, 6] > 0, dtype=np.int32) - 1) labels[:, 7] = np.array(x[:, 7] > 0, dtype=np.int32) * (w * x[:, 7] + padw) + (np.array(x[:, 7] > 0, dtype=np.int32) - 1) labels[:, 8] = np.array(x[:, 8] > 0, dtype=np.int32) * (h * x[:, 8] + padh) + (np.array(x[:, 8] > 0, dtype=np.int32) - 1) labels[:, 9] = np.array(x[:, 9] > 0, dtype=np.int32) * (w * x[:, 9] + padw) + (np.array(x[:, 9] > 0, dtype=np.int32) - 1) labels[:, 10] = np.array(x[:, 10] > 0, dtype=np.int32) * (h * x[:, 10] + padh) + (np.array(x[:, 10] > 0, dtype=np.int32) - 1) labels[:, 11] = np.array(x[:, 11] > 0, dtype=np.int32) * (w * x[:, 11] + padw) + (np.array(x[:, 11] > 0, dtype=np.int32) - 1) labels[:, 12] = np.array(x[:, 12] > 0, dtype=np.int32) * (h * x[:, 12] + padh) + (np.array(x[:, 12] > 0, dtype=np.int32) - 1) labels[:, 13] = np.array(x[:, 13] > 0, dtype=np.int32) * (w * x[:, 13] + padw) + (np.array(x[:, 13] > 0, dtype=np.int32) - 1) labels[:, 14] = np.array(x[:, 14] > 0, dtype=np.int32) * (h * x[:, 14] + padh) + (np.array(x[:, 14] > 0, dtype=np.int32) - 1) labels4.append(labels) # Concat/clip labels if len(labels4): labels4 = np.concatenate(labels4, 0) np.clip(labels4[:, 1:5], 0, 2 * s, out=labels4[:, 1:5]) # use with random_perspective # img4, labels4 = replicate(img4, labels4) # replicate #landmarks labels4[:, 5:] = np.where(labels4[:, 5:] < 0, -1, labels4[:, 5:]) labels4[:, 5:] = np.where(labels4[:, 5:] > 2 * s, -1, labels4[:, 5:]) labels4[:, 5] = np.where(labels4[:, 6] == -1, -1, labels4[:, 5]) labels4[:, 6] = np.where(labels4[:, 5] == -1, -1, labels4[:, 6]) labels4[:, 7] = np.where(labels4[:, 8] == -1, -1, labels4[:, 7]) labels4[:, 8] = np.where(labels4[:, 7] == -1, -1, labels4[:, 8]) labels4[:, 9] = np.where(labels4[:, 10] == -1, -1, labels4[:, 9]) labels4[:, 10] = np.where(labels4[:, 9] == -1, -1, labels4[:, 10]) labels4[:, 11] = np.where(labels4[:, 12] == -1, -1, labels4[:, 11]) labels4[:, 12] = np.where(labels4[:, 11] == -1, -1, labels4[:, 12]) labels4[:, 13] = np.where(labels4[:, 14] == -1, -1, labels4[:, 13]) labels4[:, 14] = np.where(labels4[:, 13] == -1, -1, labels4[:, 14]) # Augment img4, labels4 = random_perspective(img4, labels4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], shear=self.hyp['shear'], perspective=self.hyp['perspective'], border=self.mosaic_border) # border to remove return img4, labels4
null
14,660
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) dtype = img.dtype # uint8 x = np.arange(0, 256, dtype=np.int16) lut_hue = ((x * r[0]) % 180).astype(dtype) lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) lut_val = np.clip(x * r[2], 0, 255).astype(dtype) img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed # Histogram equalization # if random.random() < 0.2: # for i in range(3): # img[:, :, i] = cv2.equalizeHist(img[:, :, i])
null
14,661
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first def replicate(img, labels): # Replicate labels h, w = img.shape[:2] boxes = labels[:, 1:].astype(int) x1, y1, x2, y2 = boxes.T s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices x1b, y1b, x2b, y2b = boxes[i] bh, bw = y2b - y1b, x2b - x1b yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) return img, labels
null
14,662
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True): # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232 shape = img.shape[:2] # current shape [height, width] if isinstance(new_shape, int): new_shape = (new_shape, new_shape) # Scale ratio (new / old) r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) if not scaleup: # only scale down, do not scale up (for better test mAP) r = min(r, 1.0) # Compute padding ratio = r, r # width, height ratios new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding if auto: # minimum rectangle dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding elif scaleFill: # stretch dw, dh = 0.0, 0.0 new_unpad = (new_shape[1], new_shape[0]) ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios dw /= 2 # divide padding into 2 sides dh /= 2 if shape[::-1] != new_unpad: # resize img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border return img, ratio, (dw, dh)
null
14,663
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first def cutout(image, labels): # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 h, w = image.shape[:2] def bbox_ioa(box1, box2): # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 box2 = box2.transpose() # Get the coordinates of bounding boxes b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] # Intersection area inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) # box2 area box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 # Intersection over box2 area return inter_area / box2_area # create random masks scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction for s in scales: mask_h = random.randint(1, int(h * s)) mask_w = random.randint(1, int(w * s)) # box xmin = max(0, random.randint(0, w) - mask_w // 2) ymin = max(0, random.randint(0, h) - mask_h // 2) xmax = min(w, xmin + mask_w) ymax = min(h, ymin + mask_h) # apply random color mask image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] # return unobscured labels if len(labels) and s > 0.03: box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area labels = labels[ioa < 0.60] # remove >60% obscured labels return labels
null
14,664
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first def create_folder(path='./new'): # Create folder if os.path.exists(path): shutil.rmtree(path) # delete output folder os.makedirs(path) # make new output folder def flatten_recursive(path='../coco128'): # Flatten a recursive directory by bringing all files to top level new_path = Path(path + '_flat') create_folder(new_path) for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): shutil.copyfile(file, new_path / Path(file).name)
null
14,665
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] def img2label_paths(img_paths): # Define label paths as a function of image paths sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths] def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y return y def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128') # Convert detection dataset into classification dataset, with one directory per class path = Path(path) # images dir shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing files = list(path.rglob('*.*')) n = len(files) # number of files for im_file in tqdm(files, total=n): if im_file.suffix[1:] in img_formats: # image im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB h, w = im.shape[:2] # labels lb_file = Path(img2label_paths([str(im_file)])[0]) if Path(lb_file).exists(): with open(lb_file, 'r') as f: lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels for j, x in enumerate(lb): c = int(x[0]) # class f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename if not f.parent.is_dir(): f.parent.mkdir(parents=True) b = x[1:] * [w, h, w, h] # box # b[2:] = b[2:].max() # rectangle to square b[2:] = b[2:] * 1.2 + 3 # pad b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image b[[1, 3]] = np.clip(b[[1, 3]], 0, h) assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
null
14,666
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] The provided code snippet includes necessary dependencies for implementing the `autosplit` function. Write a Python function `def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0))` to solve the following problem: Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files # Arguments path: Path to images directory weights: Train, val, test weights (list) Here is the function: def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128') """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files # Arguments path: Path to images directory weights: Train, val, test weights (list) """ path = Path(path) # images dir files = list(path.rglob('*.*')) n = len(files) # number of files indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing for i, img in tqdm(zip(indices, files), total=n): if img.suffix[1:] in img_formats: with open(path / txt[i], 'a') as f: f.write(str(img) + '\n') # add image to txt file
Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files # Arguments path: Path to images directory weights: Train, val, test weights (list)
14,667
import json import sys from pathlib import Path import torch import yaml from tqdm import tqdm from utils.datasets import LoadImagesAndLabels from utils.datasets import img2label_paths from utils.general import colorstr, xywh2xyxy, check_dataset def check_wandb_config_file(data_config_file): wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path if Path(wandb_config).is_file(): return wandb_config return data_config_file
null
14,668
import json import sys from pathlib import Path import torch import yaml from tqdm import tqdm from utils.datasets import LoadImagesAndLabels from utils.datasets import img2label_paths from utils.general import colorstr, xywh2xyxy, check_dataset try: import wandb from wandb import init, finish except ImportError: wandb = None WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' def get_run_info(run_path): run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) run_id = run_path.stem project = run_path.parent.stem model_artifact_name = 'run_' + run_id + '_model' return run_id, project, model_artifact_name def process_wandb_config_ddp_mode(opt): with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict train_dir, val_dir = None, None if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): api = wandb.Api() train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) train_dir = train_artifact.download() train_path = Path(train_dir) / 'data/images/' data_dict['train'] = str(train_path) if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): api = wandb.Api() val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) val_dir = val_artifact.download() val_path = Path(val_dir) / 'data/images/' data_dict['val'] = str(val_path) if train_dir or val_dir: ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') with open(ddp_data_path, 'w') as f: yaml.dump(data_dict, f) opt.data = ddp_data_path def check_wandb_resume(opt): process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None if isinstance(opt.resume, str): if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): if opt.global_rank not in [-1, 0]: # For resuming DDP runs run_id, project, model_artifact_name = get_run_info(opt.resume) api = wandb.Api() artifact = api.artifact(project + '/' + model_artifact_name + ':latest') modeldir = artifact.download() opt.weights = str(Path(modeldir) / "last.pt") return True return None
null
14,669
import argparse import yaml from wandb_utils import WandbLogger class WandbLogger(): def __init__(self, opt, name, run_id, data_dict, job_type='Training'): # Pre-training routine -- self.job_type = job_type self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call if isinstance(opt.resume, str): # checks resume from artifact if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): run_id, project, model_artifact_name = get_run_info(opt.resume) model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name assert wandb, 'install wandb to resume wandb runs' # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config self.wandb_run = wandb.init(id=run_id, project=project, resume='allow') opt.resume = model_artifact_name elif self.wandb: self.wandb_run = wandb.init(config=opt, resume="allow", project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, name=name, job_type=job_type, id=run_id) if not wandb.run else wandb.run if self.wandb_run: if self.job_type == 'Training': if not opt.resume: wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict # Info useful for resuming from artifacts self.wandb_run.config.opt = vars(opt) self.wandb_run.config.data_dict = wandb_data_dict self.data_dict = self.setup_training(opt, data_dict) if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) else: prefix = colorstr('wandb: ') print(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") def check_and_upload_dataset(self, opt): assert wandb, 'Install wandb to upload dataset' check_dataset(self.data_dict) config_path = self.log_dataset_artifact(opt.data, opt.single_cls, 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) print("Created dataset config file ", config_path) with open(config_path) as f: wandb_data_dict = yaml.load(f, Loader=yaml.SafeLoader) return wandb_data_dict def setup_training(self, opt, data_dict): self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants self.bbox_interval = opt.bbox_interval if isinstance(opt.resume, str): modeldir, _ = self.download_model_artifact(opt) if modeldir: self.weights = Path(modeldir) / "last.pt" config = self.wandb_run.config opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \ config.opt['hyp'] data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), opt.artifact_alias) self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), opt.artifact_alias) self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None if self.train_artifact_path is not None: train_path = Path(self.train_artifact_path) / 'data/images/' data_dict['train'] = str(train_path) if self.val_artifact_path is not None: val_path = Path(self.val_artifact_path) / 'data/images/' data_dict['val'] = str(val_path) self.val_table = self.val_artifact.get("val") self.map_val_table_path() if self.val_artifact is not None: self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 return data_dict def download_dataset_artifact(self, path, alias): if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" datadir = dataset_artifact.download() return datadir, dataset_artifact return None, None def download_model_artifact(self, opt): if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' modeldir = model_artifact.download() epochs_trained = model_artifact.metadata.get('epochs_trained') total_epochs = model_artifact.metadata.get('total_epochs') assert epochs_trained < total_epochs, 'training to %g epochs is finished, nothing to resume.' % ( total_epochs) return modeldir, model_artifact return None, None def log_model(self, path, opt, epoch, fitness_score, best_model=False): model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ 'original_url': str(path), 'epochs_trained': epoch + 1, 'save period': opt.save_period, 'project': opt.project, 'total_epochs': opt.epochs, 'fitness_score': fitness_score }) model_artifact.add_file(str(path / 'last.pt'), name='last.pt') wandb.log_artifact(model_artifact, aliases=['latest', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) print("Saving model artifact on epoch ", epoch + 1) def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): with open(data_file) as f: data = yaml.load(f, Loader=yaml.SafeLoader) # data dict nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) names = {k: v for k, v in enumerate(names)} # to index dictionary self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( data['train']), names, name='train') if data.get('train') else None self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( data['val']), names, name='val') if data.get('val') else None if data.get('train'): data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') if data.get('val'): data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path data.pop('download', None) with open(path, 'w') as f: yaml.dump(data, f) if self.job_type == 'Training': # builds correct artifact pipeline graph self.wandb_run.use_artifact(self.val_artifact) self.wandb_run.use_artifact(self.train_artifact) self.val_artifact.wait() self.val_table = self.val_artifact.get('val') self.map_val_table_path() else: self.wandb_run.log_artifact(self.train_artifact) self.wandb_run.log_artifact(self.val_artifact) return path def map_val_table_path(self): self.val_table_map = {} print("Mapping dataset") for i, data in enumerate(tqdm(self.val_table.data)): self.val_table_map[data[3]] = data[0] def create_dataset_table(self, dataset, class_to_id, name='dataset'): # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging artifact = wandb.Artifact(name=name, type="dataset") img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None img_files = tqdm(dataset.img_files) if not img_files else img_files for img_file in img_files: if Path(img_file).is_dir(): artifact.add_dir(img_file, name='data/images') labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) artifact.add_dir(labels_path, name='data/labels') else: artifact.add_file(img_file, name='data/images/' + Path(img_file).name) label_file = Path(img2label_paths([img_file])[0]) artifact.add_file(str(label_file), name='data/labels/' + label_file.name) if label_file.exists() else None table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): height, width = shapes[0] labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) * torch.Tensor([width, height, width, height]) box_data, img_classes = [], {} for cls, *xyxy in labels[:, 1:].tolist(): cls = int(cls) box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": cls, "box_caption": "%s" % (class_to_id[cls]), "scores": {"acc": 1}, "domain": "pixel"}) img_classes[cls] = class_to_id[cls] boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes), Path(paths).name) artifact.add(table, name) return artifact def log_training_progress(self, predn, path, names): if self.val_table and self.result_table: class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) box_data = [] total_conf = 0 for *xyxy, conf, cls in predn.tolist(): if conf >= 0.25: box_data.append( {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel"}) total_conf = total_conf + conf boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space id = self.val_table_map[Path(path).name] self.result_table.add_data(self.current_epoch, id, wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), total_conf / max(1, len(box_data)) ) def log(self, log_dict): if self.wandb_run: for key, value in log_dict.items(): self.log_dict[key] = value def end_epoch(self, best_result=False): if self.wandb_run: wandb.log(self.log_dict) self.log_dict = {} if self.result_artifact: train_results = wandb.JoinedTable(self.val_table, self.result_table, "id") self.result_artifact.add(train_results, 'result') wandb.log_artifact(self.result_artifact, aliases=['latest', 'epoch ' + str(self.current_epoch), ('best' if best_result else '')]) self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): if self.wandb_run: if self.log_dict: wandb.log(self.log_dict) wandb.run.finish() def create_dataset_artifact(opt): with open(opt.data) as f: data = yaml.load(f, Loader=yaml.SafeLoader) # data dict logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation')
null
14,670
import logging import math import os import subprocess import time from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F import torchvision logger = logging.getLogger(__name__) def git_describe(): # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe if Path('.git').exists(): return subprocess.check_output('git describe --tags --long --always', shell=True).decode('utf-8')[:-1] else: return '' def select_device(device='', batch_size=None): # device = 'cpu' or '0' or '0,1,2,3' s = f'YOLOv5 {git_describe()} torch {torch.__version__} ' # string cpu = device.lower() == 'cpu' if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability cuda = not cpu and torch.cuda.is_available() if cuda: n = torch.cuda.device_count() if n > 1 and batch_size: # check that batch_size is compatible with device_count assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' space = ' ' * len(s) for i, d in enumerate(device.split(',') if device else range(n)): p = torch.cuda.get_device_properties(i) s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB else: s += 'CPU\n' logger.info(s) # skip a line return torch.device('cuda:0' if cuda else 'cpu')
null
14,671
import logging import math import os import subprocess import time from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F import torchvision def initialize_weights(model): for m in model.modules(): t = type(m) if t is nn.Conv2d: pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif t is nn.BatchNorm2d: m.eps = 1e-3 m.momentum = 0.03 elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: m.inplace = True
null
14,672
import logging import math import os import subprocess import time from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F import torchvision def find_modules(model, mclass=nn.Conv2d): # Finds layer indices matching module class 'mclass' return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
null
14,673
import logging import math import os import subprocess import time from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F import torchvision def sparsity(model): # Return global model sparsity a, b = 0., 0. for p in model.parameters(): a += p.numel() b += (p == 0).sum() return b / a def prune(model, amount=0.3): # Prune model to requested global sparsity import torch.nn.utils.prune as prune print('Pruning model... ', end='') for name, m in model.named_modules(): if isinstance(m, nn.Conv2d): prune.l1_unstructured(m, name='weight', amount=amount) # prune prune.remove(m, 'weight') # make permanent print(' %.3g global sparsity' % sparsity(model))
null
14,674
import logging import math import os import subprocess import time from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F import torchvision def fuse_conv_and_bn(conv, bn): # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ fusedconv = nn.Conv2d(conv.in_channels, conv.out_channels, kernel_size=conv.kernel_size, stride=conv.stride, padding=conv.padding, groups=conv.groups, bias=True).requires_grad_(False).to(conv.weight.device) # prepare filters w_conv = conv.weight.clone().view(conv.out_channels, -1) w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size())) # prepare spatial bias b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) return fusedconv
null
14,675
import logging import math import os import subprocess import time from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F import torchvision try: import thop # for FLOPS computation except ImportError: thop = None logger = logging.getLogger(__name__) def profile(x, ops, n=100, device=None): def model_info(model, verbose=False, img_size=640): # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] n_p = sum(x.numel() for x in model.parameters()) # number parameters n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients if verbose: print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) for i, (name, p) in enumerate(model.named_parameters()): name = name.replace('module_list.', '') print('%5g %40s %9s %12g %20s %10.3g %10.3g' % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) try: # FLOPS from thop import profile stride = int(model.stride.max()) if hasattr(model, 'stride') else 32 img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS except (ImportError, Exception): fs = '' logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
null
14,676
import logging import math import os import subprocess import time from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F import torchvision def load_classifier(name='resnet101', n=2): # Loads a pretrained model reshaped to n-class output model = torchvision.models.__dict__[name](pretrained=True) # ResNet model properties # input_size = [3, 224, 224] # input_space = 'RGB' # input_range = [0, 1] # mean = [0.485, 0.456, 0.406] # std = [0.229, 0.224, 0.225] # Reshape output to n classes filters = model.fc.weight.shape[1] model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) model.fc.out_features = n return model
null
14,677
import logging import math import os import subprocess import time from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F import torchvision def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) # scales img(bs,3,y,x) by ratio constrained to gs-multiple if ratio == 1.0: return img else: h, w = img.shape[2:] s = (int(h * ratio), int(w * ratio)) # new size img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize if not same_shape: # pad/crop img h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
null
14,678
import logging import math import os import subprocess import time from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F import torchvision def copy_attr(a, b, include=(), exclude=()): # Copy attributes from b to a, options to only include [...] and to exclude [...] for k, v in b.__dict__.items(): if (len(include) and k not in include) or k.startswith('_') or k in exclude: continue else: setattr(a, k, v)
null
14,679
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first def get_hash(files): # Returns a single hash value of a list of files return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
null
14,680
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first for orientation in ExifTags.TAGS.keys(): if ExifTags.TAGS[orientation] == 'Orientation': break def exif_size(img): # Returns exif-corrected PIL size s = img.size # (width, height) try: rotation = dict(img._getexif().items())[orientation] if rotation == 6: # rotation 270 s = (s[1], s[0]) elif rotation == 8: # rotation 90 s = (s[1], s[0]) except: pass return s
null
14,681
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): """ Dataloader that reuses workers Uses same syntax as vanilla DataLoader """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) self.iterator = super().__iter__() def __len__(self): return len(self.batch_sampler.sampler) def __iter__(self): for i in range(len(self)): yield next(self.iterator) class LoadImagesAndLabels(Dataset): # for training/testing def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): self.img_size = img_size self.augment = augment self.hyp = hyp self.image_weights = image_weights self.rect = False if image_weights else rect self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) self.mosaic_border = [-img_size // 2, -img_size // 2] self.stride = stride try: f = [] # image files for p in path if isinstance(path, list) else [path]: p = Path(p) # os-agnostic if p.is_dir(): # dir f += glob.glob(str(p / '**' / '*.*'), recursive=True) elif p.is_file(): # file with open(p, 'r') as t: t = t.read().strip().splitlines() parent = str(p.parent) + os.sep f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path else: raise Exception(f'{prefix}{p} does not exist') self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) assert self.img_files, f'{prefix}No images found' except Exception as e: raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}') # Check cache self.label_files = img2label_paths(self.img_files) # labels cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels if cache_path.is_file(): cache = torch.load(cache_path) # load if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed cache = self.cache_labels(cache_path, prefix) # re-cache else: cache = self.cache_labels(cache_path, prefix) # cache # Display cache [nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" tqdm(None, desc=prefix + desc, total=n, initial=n) assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' # Read cache cache.pop('hash') # remove hash labels, shapes = zip(*cache.values()) self.labels = list(labels) self.shapes = np.array(shapes, dtype=np.float64) self.img_files = list(cache.keys()) # update self.label_files = img2label_paths(cache.keys()) # update if single_cls: for x in self.labels: x[:, 0] = 0 n = len(shapes) # number of images bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index nb = bi[-1] + 1 # number of batches self.batch = bi # batch index of image self.n = n self.indices = range(n) # Rectangular Training if self.rect: # Sort by aspect ratio s = self.shapes # wh ar = s[:, 1] / s[:, 0] # aspect ratio irect = ar.argsort() self.img_files = [self.img_files[i] for i in irect] self.label_files = [self.label_files[i] for i in irect] self.labels = [self.labels[i] for i in irect] self.shapes = s[irect] # wh ar = ar[irect] # Set training image shapes shapes = [[1, 1]] * nb for i in range(nb): ari = ar[bi == i] mini, maxi = ari.min(), ari.max() if maxi < 1: shapes[i] = [maxi, 1] elif mini > 1: shapes[i] = [1, 1 / mini] self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) self.imgs = [None] * n if cache_images: gb = 0 # Gigabytes of cached images self.img_hw0, self.img_hw = [None] * n, [None] * n results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads pbar = tqdm(enumerate(results), total=n) for i, x in pbar: self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) gb += self.imgs[i].nbytes pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files)) for i, (im_file, lb_file) in enumerate(pbar): try: # verify images im = Image.open(im_file) im.verify() # PIL verify shape = exif_size(im) # image size assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels' # verify labels if os.path.isfile(lb_file): nf += 1 # label found with open(lb_file, 'r') as f: l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels if len(l): assert l.shape[1] == 5, 'labels require 5 columns each' assert (l >= 0).all(), 'negative labels' assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' else: ne += 1 # label empty l = np.zeros((0, 5), dtype=np.float32) else: nm += 1 # label missing l = np.zeros((0, 5), dtype=np.float32) x[im_file] = [l, shape] except Exception as e: nc += 1 print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... " \ f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" if nf == 0: print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = [nf, nm, ne, nc, i + 1] torch.save(x, path) # save for next time logging.info(f'{prefix}New cache created: {path}') return x def __len__(self): return len(self.img_files) # def __iter__(self): # self.count = -1 # print('ran dataset iter') # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) # return self def __getitem__(self, index): index = self.indices[index] # linear, shuffled, or image_weights hyp = self.hyp mosaic = self.mosaic and random.random() < hyp['mosaic'] if mosaic: # Load mosaic img, labels = load_mosaic(self, index) shapes = None # MixUp https://arxiv.org/pdf/1710.09412.pdf if random.random() < hyp['mixup']: img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1)) r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 img = (img * r + img2 * (1 - r)).astype(np.uint8) labels = np.concatenate((labels, labels2), 0) else: # Load image img, (h0, w0), (h, w) = load_image(self, index) # Letterbox shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling labels = self.labels[index].copy() if labels.size: # normalized xywh to pixel xyxy format labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) if self.augment: # Augment imagespace if not mosaic: img, labels = random_perspective(img, labels, degrees=hyp['degrees'], translate=hyp['translate'], scale=hyp['scale'], shear=hyp['shear'], perspective=hyp['perspective']) # Augment colorspace augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) # Apply cutouts # if random.random() < 0.9: # labels = cutout(img, labels) nL = len(labels) # number of labels if nL: labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 if self.augment: # flip up-down if random.random() < hyp['flipud']: img = np.flipud(img) if nL: labels[:, 2] = 1 - labels[:, 2] # flip left-right if random.random() < hyp['fliplr']: img = np.fliplr(img) if nL: labels[:, 1] = 1 - labels[:, 1] labels_out = torch.zeros((nL, 6)) if nL: labels_out[:, 1:] = torch.from_numpy(labels) # Convert img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = np.ascontiguousarray(img) return torch.from_numpy(img), labels_out, self.img_files[index], shapes def collate_fn(batch): img, label, path, shapes = zip(*batch) # transposed for i, l in enumerate(label): l[:, 0] = i # add target image index for build_targets() return torch.stack(img, 0), torch.cat(label, 0), path, shapes def collate_fn4(batch): img, label, path, shapes = zip(*batch) # transposed n = len(shapes) // 4 img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] ho = torch.tensor([[0., 0, 0, 1, 0, 0]]) wo = torch.tensor([[0., 0, 1, 0, 0, 0]]) s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW i *= 4 if random.random() < 0.5: im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[ 0].type(img[i].type()) l = label[i] else: im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s img4.append(im) label4.append(l) for i, l in enumerate(label4): l[:, 0] = i # add target image index for build_targets() return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 def torch_distributed_zero_first(local_rank: int): """ Decorator to make all processes in distributed training wait for each local_master to do something. """ if local_rank not in [-1, 0]: torch.distributed.barrier() yield if local_rank == 0: torch.distributed.barrier() def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): # Make sure only the first process in DDP process the dataset first, and the following others can use the cache with torch_distributed_zero_first(rank): dataset = LoadImagesAndLabels(path, imgsz, batch_size, augment=augment, # augment images hyp=hyp, # augmentation hyperparameters rect=rect, # rectangular training cache_images=cache, single_cls=opt.single_cls, stride=int(stride), pad=pad, image_weights=image_weights, prefix=prefix) batch_size = min(batch_size, len(dataset)) nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() dataloader = loader(dataset, batch_size=batch_size, num_workers=nw, sampler=sampler, pin_memory=True, collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn) return dataloader, dataset
null
14,682
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) dtype = img.dtype # uint8 x = np.arange(0, 256, dtype=np.int16) lut_hue = ((x * r[0]) % 180).astype(dtype) lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) lut_val = np.clip(x * r[2], 0, 255).astype(dtype) img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed # Histogram equalization # if random.random() < 0.2: # for i in range(3): # img[:, :, i] = cv2.equalizeHist(img[:, :, i])
null
14,683
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first def load_image(self, index): # loads 1 image from dataset, returns img, original hw, resized hw img = self.imgs[index] if img is None: # not cached path = self.img_files[index] img = cv2.imread(path) # BGR assert img is not None, 'Image Not Found ' + path h0, w0 = img.shape[:2] # orig hw r = self.img_size / max(h0, w0) # resize image to img_size if r != 1: # always resize down, only resize up if training with augmentation interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized else: return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] height = img.shape[0] + border[0] * 2 # shape(h,w,c) width = img.shape[1] + border[1] * 2 # Center C = np.eye(3) C[0, 2] = -img.shape[1] / 2 # x translation (pixels) C[1, 2] = -img.shape[0] / 2 # y translation (pixels) # Perspective P = np.eye(3) P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) # Rotation and Scale R = np.eye(3) a = random.uniform(-degrees, degrees) # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations s = random.uniform(1 - scale, 1 + scale) # s = 2 ** random.uniform(-scale, scale) R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) # Shear S = np.eye(3) S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) # Translation T = np.eye(3) T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) # Combined rotation matrix M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed if perspective: img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) else: # affine img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) # Visualize # import matplotlib.pyplot as plt # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() # ax[0].imshow(img[:, :, ::-1]) # base # ax[1].imshow(img2[:, :, ::-1]) # warped # Transform label coordinates n = len(targets) if n: # warp points xy = np.ones((n * 4, 3)) xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 xy = xy @ M.T # transform if perspective: xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale else: # affine xy = xy[:, :2].reshape(n, 8) # create new boxes x = xy[:, [0, 2, 4, 6]] y = xy[:, [1, 3, 5, 7]] xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T # # apply angle-based reduction of bounding boxes # radians = a * math.pi / 180 # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5 # x = (xy[:, 2] + xy[:, 0]) / 2 # y = (xy[:, 3] + xy[:, 1]) / 2 # w = (xy[:, 2] - xy[:, 0]) * reduction # h = (xy[:, 3] - xy[:, 1]) * reduction # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T # clip boxes xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width) xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height) # filter candidates i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T) targets = targets[i] targets[:, 1:5] = xy[i] return img, targets def xywhn2xyxy(x, w=640, h=640, padw=32, padh=32): # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y return y def load_mosaic(self, index): # loads images in a 4-mosaic labels4 = [] s = self.img_size yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices for i, index in enumerate(indices): # Load image img, _, (h, w) = load_image(self, index) # place img in img4 if i == 0: # top left img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) elif i == 1: # top right x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h elif i == 2: # bottom left x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) elif i == 3: # bottom right x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] padw = x1a - x1b padh = y1a - y1b # Labels labels = self.labels[index].copy() if labels.size: labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format labels4.append(labels) # Concat/clip labels if len(labels4): labels4 = np.concatenate(labels4, 0) np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective # img4, labels4 = replicate(img4, labels4) # replicate # Augment img4, labels4 = random_perspective(img4, labels4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], shear=self.hyp['shear'], perspective=self.hyp['perspective'], border=self.mosaic_border) # border to remove return img4, labels4
null
14,684
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first def load_image(self, index): # loads 1 image from dataset, returns img, original hw, resized hw img = self.imgs[index] if img is None: # not cached path = self.img_files[index] img = cv2.imread(path) # BGR assert img is not None, 'Image Not Found ' + path h0, w0 = img.shape[:2] # orig hw r = self.img_size / max(h0, w0) # resize image to img_size if r != 1: # always resize down, only resize up if training with augmentation interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized else: return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] height = img.shape[0] + border[0] * 2 # shape(h,w,c) width = img.shape[1] + border[1] * 2 # Center C = np.eye(3) C[0, 2] = -img.shape[1] / 2 # x translation (pixels) C[1, 2] = -img.shape[0] / 2 # y translation (pixels) # Perspective P = np.eye(3) P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) # Rotation and Scale R = np.eye(3) a = random.uniform(-degrees, degrees) # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations s = random.uniform(1 - scale, 1 + scale) # s = 2 ** random.uniform(-scale, scale) R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) # Shear S = np.eye(3) S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) # Translation T = np.eye(3) T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) # Combined rotation matrix M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed if perspective: img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) else: # affine img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) # Visualize # import matplotlib.pyplot as plt # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() # ax[0].imshow(img[:, :, ::-1]) # base # ax[1].imshow(img2[:, :, ::-1]) # warped # Transform label coordinates n = len(targets) if n: # warp points xy = np.ones((n * 4, 3)) xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 xy = xy @ M.T # transform if perspective: xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale else: # affine xy = xy[:, :2].reshape(n, 8) # create new boxes x = xy[:, [0, 2, 4, 6]] y = xy[:, [1, 3, 5, 7]] xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T # # apply angle-based reduction of bounding boxes # radians = a * math.pi / 180 # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5 # x = (xy[:, 2] + xy[:, 0]) / 2 # y = (xy[:, 3] + xy[:, 1]) / 2 # w = (xy[:, 2] - xy[:, 0]) * reduction # h = (xy[:, 3] - xy[:, 1]) * reduction # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T # clip boxes xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width) xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height) # filter candidates i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T) targets = targets[i] targets[:, 1:5] = xy[i] return img, targets def xywhn2xyxy(x, w=640, h=640, padw=32, padh=32): # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y return y def load_mosaic9(self, index): # loads images in a 9-mosaic labels9 = [] s = self.img_size indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices for i, index in enumerate(indices): # Load image img, _, (h, w) = load_image(self, index) # place img in img9 if i == 0: # center img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles h0, w0 = h, w c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates elif i == 1: # top c = s, s - h, s + w, s elif i == 2: # top right c = s + wp, s - h, s + wp + w, s elif i == 3: # right c = s + w0, s, s + w0 + w, s + h elif i == 4: # bottom right c = s + w0, s + hp, s + w0 + w, s + hp + h elif i == 5: # bottom c = s + w0 - w, s + h0, s + w0, s + h0 + h elif i == 6: # bottom left c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h elif i == 7: # left c = s - w, s + h0 - h, s, s + h0 elif i == 8: # top left c = s - w, s + h0 - hp - h, s, s + h0 - hp padx, pady = c[:2] x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords # Labels labels = self.labels[index].copy() if labels.size: labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format labels9.append(labels) # Image img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] hp, wp = h, w # height, width previous # Offset yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] # Concat/clip labels if len(labels9): labels9 = np.concatenate(labels9, 0) labels9[:, [1, 3]] -= xc labels9[:, [2, 4]] -= yc np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective # img9, labels9 = replicate(img9, labels9) # replicate # Augment img9, labels9 = random_perspective(img9, labels9, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], shear=self.hyp['shear'], perspective=self.hyp['perspective'], border=self.mosaic_border) # border to remove return img9, labels9
null
14,685
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first def replicate(img, labels): # Replicate labels h, w = img.shape[:2] boxes = labels[:, 1:].astype(int) x1, y1, x2, y2 = boxes.T s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices x1b, y1b, x2b, y2b = boxes[i] bh, bw = y2b - y1b, x2b - x1b yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) return img, labels
null
14,686
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first def cutout(image, labels): # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 h, w = image.shape[:2] def bbox_ioa(box1, box2): # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 box2 = box2.transpose() # Get the coordinates of bounding boxes b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] # Intersection area inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) # box2 area box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 # Intersection over box2 area return inter_area / box2_area # create random masks scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction for s in scales: mask_h = random.randint(1, int(h * s)) mask_w = random.randint(1, int(w * s)) # box xmin = max(0, random.randint(0, w) - mask_w // 2) ymin = max(0, random.randint(0, h) - mask_h // 2) xmax = min(w, xmin + mask_w) ymax = min(h, ymin + mask_h) # apply random color mask image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] # return unobscured labels if len(labels) and s > 0.03: box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area labels = labels[ioa < 0.60] # remove >60% obscured labels return labels
null
14,687
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first def create_folder(path='./new'): # Create folder if os.path.exists(path): shutil.rmtree(path) # delete output folder os.makedirs(path) # make new output folder def flatten_recursive(path='../coco128'): # Flatten a recursive directory by bringing all files to top level new_path = Path(path + '_flat') create_folder(new_path) for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): shutil.copyfile(file, new_path / Path(file).name)
null
14,688
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] def img2label_paths(img_paths): # Define label paths as a function of image paths sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths] def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y return y def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128') # Convert detection dataset into classification dataset, with one directory per class path = Path(path) # images dir shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing files = list(path.rglob('*.*')) n = len(files) # number of files for im_file in tqdm(files, total=n): if im_file.suffix[1:] in img_formats: # image im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB h, w = im.shape[:2] # labels lb_file = Path(img2label_paths([str(im_file)])[0]) if Path(lb_file).exists(): with open(lb_file, 'r') as f: lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels for j, x in enumerate(lb): c = int(x[0]) # class f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename if not f.parent.is_dir(): f.parent.mkdir(parents=True) b = x[1:] * [w, h, w, h] # box # b[2:] = b[2:].max() # rectangle to square b[2:] = b[2:] * 1.2 + 3 # pad b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image b[[1, 3]] = np.clip(b[[1, 3]], 0, h) assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
null
14,689
import glob import logging import math import os import random import shutil import time from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from threading import Thread import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] The provided code snippet includes necessary dependencies for implementing the `autosplit` function. Write a Python function `def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0))` to solve the following problem: Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files # Arguments path: Path to images directory weights: Train, val, test weights (list) Here is the function: def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128') """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files # Arguments path: Path to images directory weights: Train, val, test weights (list) """ path = Path(path) # images dir files = list(path.rglob('*.*')) n = len(files) # number of files indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing for i, img in tqdm(zip(indices, files), total=n): if img.suffix[1:] in img_formats: with open(path / txt[i], 'a') as f: f.write(str(img) + '\n') # add image to txt file
Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files # Arguments path: Path to images directory weights: Train, val, test weights (list)
14,690
import os import platform import subprocess import time from pathlib import Path import requests import torch def get_token(cookie="./cookie"): with open(cookie) as f: for line in f: if "download" in line: return line.split()[-1] return "" def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): # Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download() t = time.time() file = Path(file) cookie = Path('cookie') # gdrive cookie print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') file.unlink(missing_ok=True) # remove existing file cookie.unlink(missing_ok=True) # remove existing cookie # Attempt file download out = "NUL" if platform.system() == "Windows" else "/dev/null" os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') if os.path.exists('cookie'): # large file s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' else: # small file s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' r = os.system(s) # execute, capture return cookie.unlink(missing_ok=True) # remove existing cookie # Error check if r != 0: file.unlink(missing_ok=True) # remove partial print('Download error ') # raise Exception('Download error') return r # Unzip if archive if file.suffix == '.zip': print('unzipping... ', end='') os.system(f'unzip -q {file}') # unzip file.unlink() # remove zip to free space print(f'Done ({time.time() - t:.1f}s)') return r
null
14,691
import glob import math import os import random from copy import copy from pathlib import Path import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import torch import yaml from PIL import Image, ImageDraw from scipy.signal import butter, filtfilt from utils.general import xywh2xyxy, xyxy2xywh from utils.metrics import fitness def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy def butter_lowpass(cutoff, fs, order): nyq = 0.5 * fs normal_cutoff = cutoff / nyq return butter(order, normal_cutoff, btype='low', analog=False) b, a = butter_lowpass(cutoff, fs, order=order) return filtfilt(b, a, data) # forward-backward filter
null
14,692
import glob import math import os import random from copy import copy from pathlib import Path import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import torch import yaml from PIL import Image, ImageDraw from scipy.signal import butter, filtfilt from utils.general import xywh2xyxy, xyxy2xywh from utils.metrics import fitness def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() # Compares the two methods for width-height anchor multiplication # https://github.com/ultralytics/yolov3/issues/168 x = np.arange(-4.0, 4.0, .1) ya = np.exp(x) yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2 fig = plt.figure(figsize=(6, 3), tight_layout=True) plt.plot(x, ya, '.-', label='YOLOv3') plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2') plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6') plt.xlim(left=-4, right=4) plt.ylim(bottom=0, top=6) plt.xlabel('input') plt.ylabel('output') plt.grid() plt.legend() fig.savefig('comparison.png', dpi=200)
null
14,693
import glob import math import os import random from copy import copy from pathlib import Path import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import torch import yaml from PIL import Image, ImageDraw from scipy.signal import butter, filtfilt from utils.general import xywh2xyxy, xyxy2xywh from utils.metrics import fitness def xyxy2xywh(x): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center y[:, 2] = x[:, 2] - x[:, 0] # width y[:, 3] = x[:, 3] - x[:, 1] # height return y def output_to_target(output): # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] targets = [] for i, o in enumerate(output): for *box, conf, cls in o.cpu().numpy(): targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) return np.array(targets)
null
14,694
import glob import math import os import random from copy import copy from pathlib import Path import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import torch import yaml from PIL import Image, ImageDraw from scipy.signal import butter, filtfilt from utils.general import xywh2xyxy, xyxy2xywh from utils.metrics import fitness def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): # Plot LR simulating training for full epochs optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals y = [] for _ in range(epochs): scheduler.step() y.append(optimizer.param_groups[0]['lr']) plt.plot(y, '.-', label='LR') plt.xlabel('epoch') plt.ylabel('LR') plt.grid() plt.xlim(0, epochs) plt.ylim(0) plt.savefig(Path(save_dir) / 'LR.png', dpi=200) plt.close()
null
14,695
import glob import math import os import random from copy import copy from pathlib import Path import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import torch import yaml from PIL import Image, ImageDraw from scipy.signal import butter, filtfilt from utils.general import xywh2xyxy, xyxy2xywh from utils.metrics import fitness def hist2d(x, y, n=100): # 2d histogram used in labels.png and evolve.png xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) return np.log(hist[xidx, yidx]) def xyxy2xywh(x): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center y[:, 2] = x[:, 2] - x[:, 0] # width y[:, 3] = x[:, 3] - x[:, 1] # height return y def plot_test_txt(): # from utils.plots import *; plot_test() # Plot test.txt histograms x = np.loadtxt('test.txt', dtype=np.float32) box = xyxy2xywh(x[:, :4]) cx, cy = box[:, 0], box[:, 1] fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) ax.set_aspect('equal') plt.savefig('hist2d.png', dpi=300) fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) ax[0].hist(cx, bins=600) ax[1].hist(cy, bins=600) plt.savefig('hist1d.png', dpi=200)
null
14,696
import glob import math import os import random from copy import copy from pathlib import Path import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import torch import yaml from PIL import Image, ImageDraw from scipy.signal import butter, filtfilt from utils.general import xywh2xyxy, xyxy2xywh from utils.metrics import fitness def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() # Plot targets.txt histograms x = np.loadtxt('targets.txt', dtype=np.float32).T s = ['x targets', 'y targets', 'width targets', 'height targets'] fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) ax = ax.ravel() for i in range(4): ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std())) ax[i].legend() ax[i].set_title(s[i]) plt.savefig('targets.jpg', dpi=200)
null
14,697
import glob import math import os import random from copy import copy from pathlib import Path import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import torch import yaml from PIL import Image, ImageDraw from scipy.signal import butter, filtfilt from utils.general import xywh2xyxy, xyxy2xywh from utils.metrics import fitness def plot_study_txt(path='study/', x=None): # from utils.plots import *; plot_study_txt() # Plot study.txt generated by test.py fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) ax = ax.ravel() fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']]: y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T x = np.arange(y.shape[1]) if x is None else np.array(x) s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)'] for i in range(7): ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) ax[i].set_title(s[i]) j = y[3].argmax() + 1 ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8, label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') ax2.grid() ax2.set_yticks(np.arange(30, 60, 5)) ax2.set_xlim(0, 30) ax2.set_ylim(29, 51) ax2.set_xlabel('GPU Speed (ms/img)') ax2.set_ylabel('COCO AP val') ax2.legend(loc='lower right') plt.savefig('test_study.png', dpi=300)
null
14,698
import glob import math import os import random from copy import copy from pathlib import Path import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import torch import yaml from PIL import Image, ImageDraw from scipy.signal import butter, filtfilt from utils.general import xywh2xyxy, xyxy2xywh from utils.metrics import fitness matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') def hist2d(x, y, n=100): # 2d histogram used in labels.png and evolve.png xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) return np.log(hist[xidx, yidx]) def fitness(x): # Model fitness as a weighted combination of metrics w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] return (x[:, :4] * w).sum(1) def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() # Plot hyperparameter evolution results in evolve.txt with open(yaml_file) as f: hyp = yaml.load(f, Loader=yaml.SafeLoader) x = np.loadtxt('evolve.txt', ndmin=2) f = fitness(x) # weights = (f - f.min()) ** 2 # for weighted results plt.figure(figsize=(10, 12), tight_layout=True) matplotlib.rc('font', **{'size': 8}) for i, (k, v) in enumerate(hyp.items()): y = x[:, i + 7] # mu = (y * weights).sum() / weights.sum() # best weighted result mu = y[f.argmax()] # best single result plt.subplot(6, 5, i + 1) plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none') plt.plot(mu, f.max(), 'k+', markersize=15) plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters if i % 5 != 0: plt.yticks([]) print('%15s: %.3g' % (k, mu)) plt.savefig('evolve.png', dpi=200) print('\nPlot saved as evolve.png')
null
14,699
import glob import math import os import random from copy import copy from pathlib import Path import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import torch import yaml from PIL import Image, ImageDraw from scipy.signal import butter, filtfilt from utils.general import xywh2xyxy, xyxy2xywh from utils.metrics import fitness def profile_idetection(start=0, stop=0, labels=(), save_dir=''): # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] files = list(Path(save_dir).glob('frames*.txt')) for fi, f in enumerate(files): try: results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows n = results.shape[1] # number of rows x = np.arange(start, min(stop, n) if stop else n) results = results[:, x] t = (results[0] - results[0].min()) # set t0=0s results[0] = x for i, a in enumerate(ax): if i < len(results): label = labels[fi] if len(labels) else f.stem.replace('frames_', '') a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) a.set_title(s[i]) a.set_xlabel('time (s)') # if fi == len(files) - 1: # a.set_ylim(bottom=0) for side in ['top', 'right']: a.spines[side].set_visible(False) else: a.remove() except Exception as e: print('Warning: Plotting error for %s; %s' % (f, e)) ax[1].legend() plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
null
14,700
import glob import math import os import random from copy import copy from pathlib import Path import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import torch import yaml from PIL import Image, ImageDraw from scipy.signal import butter, filtfilt from utils.general import xywh2xyxy, xyxy2xywh from utils.metrics import fitness def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay() # Plot training 'results*.txt', overlaying train and val losses s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T n = results.shape[1] # number of rows x = range(start, min(stop, n) if stop else n) fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True) ax = ax.ravel() for i in range(5): for j in [i, i + 5]: y = results[j, x] ax[i].plot(x, y, marker='.', label=s[j]) # y_smooth = butter_lowpass_filtfilt(y) # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j]) ax[i].set_title(t[i]) ax[i].legend() ax[i].set_ylabel(f) if i == 0 else None # add filename fig.savefig(f.replace('.txt', '.png'), dpi=200)
null
14,701
import torch def decode_infer(output, stride): # logging.info(torch.tensor(output.shape[0])) # logging.info(output.shape) # # bz is batch-size # bz = tuple(torch.tensor(output.shape[0])) # gridsize = tuple(torch.tensor(output.shape[-1])) # logging.info(gridsize) sh = torch.tensor(output.shape) bz = sh[0] gridsize = sh[-1] output = output.permute(0, 2, 3, 1) output = output.view(bz, gridsize, gridsize, self.gt_per_grid, 5+self.numclass) x1y1, x2y2, conf, prob = torch.split( output, [2, 2, 1, self.numclass], dim=4) shiftx = torch.arange(0, gridsize, dtype=torch.float32) shifty = torch.arange(0, gridsize, dtype=torch.float32) shifty, shiftx = torch.meshgrid([shiftx, shifty]) shiftx = shiftx.unsqueeze(-1).repeat(bz, 1, 1, self.gt_per_grid) shifty = shifty.unsqueeze(-1).repeat(bz, 1, 1, self.gt_per_grid) xy_grid = torch.stack([shiftx, shifty], dim=4).cuda() x1y1 = (xy_grid+0.5-torch.exp(x1y1))*stride x2y2 = (xy_grid+0.5+torch.exp(x2y2))*stride xyxy = torch.cat((x1y1, x2y2), dim=4) conf = torch.sigmoid(conf) prob = torch.sigmoid(prob) output = torch.cat((xyxy, conf, prob), 4) output = output.view(bz, -1, 5+self.numclass) return output
null
14,702
import glob import logging import math import os import random import re import subprocess import time from pathlib import Path import cv2 import numpy as np import torch import torchvision import yaml from utils.google_utils import gsutil_getsize from utils.metrics import fitness from utils.torch_utils import init_torch_seeds def set_logging(rank=-1): logging.basicConfig( format="%(message)s", level=logging.INFO if rank in [-1, 0] else logging.WARN)
null
14,703
import glob import logging import math import os import random import re import subprocess import time from pathlib import Path import cv2 import numpy as np import torch import torchvision import yaml from utils.google_utils import gsutil_getsize from utils.metrics import fitness from utils.torch_utils import init_torch_seeds os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) def get_latest_run(search_dir='.'): # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) return max(last_list, key=os.path.getctime) if last_list else ''
null
14,704
import glob import logging import math import os import random import re import subprocess import time from pathlib import Path import cv2 import numpy as np import torch import torchvision import yaml from utils.google_utils import gsutil_getsize from utils.metrics import fitness from utils.torch_utils import init_torch_seeds def check_online(): # Check internet connectivity import socket try: socket.create_connection(("1.1.1.1", 53)) # check host accesability return True except OSError: return False def colorstr(*input): # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string colors = {'black': '\033[30m', # basic colors 'red': '\033[31m', 'green': '\033[32m', 'yellow': '\033[33m', 'blue': '\033[34m', 'magenta': '\033[35m', 'cyan': '\033[36m', 'white': '\033[37m', 'bright_black': '\033[90m', # bright colors 'bright_red': '\033[91m', 'bright_green': '\033[92m', 'bright_yellow': '\033[93m', 'bright_blue': '\033[94m', 'bright_magenta': '\033[95m', 'bright_cyan': '\033[96m', 'bright_white': '\033[97m', 'end': '\033[0m', # misc 'bold': '\033[1m', 'underline': '\033[4m'} return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] def check_git_status(): # Recommend 'git pull' if code is out of date print(colorstr('github: '), end='') try: assert Path('.git').exists(), 'skipping check (not a git repository)' assert not Path('/workspace').exists(), 'skipping check (Docker image)' # not Path('/.dockerenv').exists() assert check_online(), 'skipping check (offline)' cmd = 'git fetch && git config --get remote.origin.url' # github repo url url = subprocess.check_output(cmd, shell=True).decode()[:-1] cmd = 'git rev-list $(git rev-parse --abbrev-ref HEAD)..origin/master --count' # commits behind n = int(subprocess.check_output(cmd, shell=True)) if n > 0: print(f"⚠️ WARNING: code is out of date by {n} {'commits' if n > 1 else 'commmit'}. " f"Use 'git pull' to update or 'git clone {url}' to download latest.") else: print(f'up to date with {url} ✅') except Exception as e: print(e)
null
14,705
import glob import logging import math import os import random import re import subprocess import time from pathlib import Path import cv2 import numpy as np import torch import torchvision import yaml from utils.google_utils import gsutil_getsize from utils.metrics import fitness from utils.torch_utils import init_torch_seeds def check_requirements(file='requirements.txt'): # Check installed dependencies meet requirements import pkg_resources requirements = pkg_resources.parse_requirements(Path(file).open()) requirements = [x.name + ''.join(*x.specs) if len(x.specs) else x.name for x in requirements] pkg_resources.require(requirements) # DistributionNotFound or VersionConflict exception if requirements not met
null
14,706
import glob import logging import math import os import random import re import subprocess import time from pathlib import Path import cv2 import numpy as np import torch import torchvision import yaml from utils.google_utils import gsutil_getsize from utils.metrics import fitness from utils.torch_utils import init_torch_seeds os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) def check_file(file): # Search for file if not found if os.path.isfile(file) or file == '': return file else: files = glob.glob('./**/' + file, recursive=True) # find file assert len(files), 'File Not Found: %s' % file # assert file was found assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique return files[0] # return file
null
14,707
import glob import logging import math import os import random import re import subprocess import time from pathlib import Path import cv2 import numpy as np import torch import torchvision import yaml from utils.google_utils import gsutil_getsize from utils.metrics import fitness from utils.torch_utils import init_torch_seeds def clean_str(s): # Cleans a string by replacing special characters with underscore _ return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
null
14,708
import glob import logging import math import os import random import re import subprocess import time from pathlib import Path import cv2 import numpy as np import torch import torchvision import yaml from utils.google_utils import gsutil_getsize from utils.metrics import fitness from utils.torch_utils import init_torch_seeds def one_cycle(y1=0.0, y2=1.0, steps=100): # lambda function for sinusoidal ramp from y1 to y2 return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
null
14,709
import glob import logging import math import os import random import re import subprocess import time from pathlib import Path import cv2 import numpy as np import torch import torchvision import yaml from utils.google_utils import gsutil_getsize from utils.metrics import fitness from utils.torch_utils import init_torch_seeds def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] return x
null
14,710
import glob import logging import math import os import random import re import subprocess import time from pathlib import Path import cv2 import numpy as np import torch import torchvision import yaml from utils.google_utils import gsutil_getsize from utils.metrics import fitness from utils.torch_utils import init_torch_seeds torch.set_printoptions(linewidth=320, precision=5, profile='long') def wh_iou(wh1, wh2): # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 wh1 = wh1[:, None] # [N,1,2] wh2 = wh2[None] # [1,M,2] inter = torch.min(wh1, wh2).prod(2) # [N,M] # iou = inter / (area1 + area2 - inter) return inter / (wh1.prod(2) + wh2.prod(2) - inter)
null
14,711
import glob import logging import math import os import random import re import subprocess import time from pathlib import Path import cv2 import numpy as np import torch import torchvision import yaml from utils.google_utils import gsutil_getsize from utils.metrics import fitness from utils.torch_utils import init_torch_seeds torch.set_printoptions(linewidth=320, precision=5, profile='long') def jaccard_diou(box_a, box_b, iscrowd:bool=False): use_batch = True if box_a.dim() == 2: use_batch = False box_a = box_a[None, ...] box_b = box_b[None, ...] inter = intersect(box_a, box_b) area_a = ((box_a[:, :, 2]-box_a[:, :, 0]) * (box_a[:, :, 3]-box_a[:, :, 1])).unsqueeze(2).expand_as(inter) # [A,B] area_b = ((box_b[:, :, 2]-box_b[:, :, 0]) * (box_b[:, :, 3]-box_b[:, :, 1])).unsqueeze(1).expand_as(inter) # [A,B] union = area_a + area_b - inter x1 = ((box_a[:, :, 2]+box_a[:, :, 0]) / 2).unsqueeze(2).expand_as(inter) y1 = ((box_a[:, :, 3]+box_a[:, :, 1]) / 2).unsqueeze(2).expand_as(inter) x2 = ((box_b[:, :, 2]+box_b[:, :, 0]) / 2).unsqueeze(1).expand_as(inter) y2 = ((box_b[:, :, 3]+box_b[:, :, 1]) / 2).unsqueeze(1).expand_as(inter) t1 = box_a[:, :, 1].unsqueeze(2).expand_as(inter) b1 = box_a[:, :, 3].unsqueeze(2).expand_as(inter) l1 = box_a[:, :, 0].unsqueeze(2).expand_as(inter) r1 = box_a[:, :, 2].unsqueeze(2).expand_as(inter) t2 = box_b[:, :, 1].unsqueeze(1).expand_as(inter) b2 = box_b[:, :, 3].unsqueeze(1).expand_as(inter) l2 = box_b[:, :, 0].unsqueeze(1).expand_as(inter) r2 = box_b[:, :, 2].unsqueeze(1).expand_as(inter) cr = torch.max(r1, r2) cl = torch.min(l1, l2) ct = torch.min(t1, t2) cb = torch.max(b1, b2) D = (((x2 - x1)**2 + (y2 - y1)**2) / ((cr-cl)**2 + (cb-ct)**2 + 1e-7)) out = inter / area_a if iscrowd else inter / (union + 1e-7) - D ** 0.7 return out if use_batch else out.squeeze(0)
null
14,712
import glob import logging import math import os import random import re import subprocess import time from pathlib import Path import cv2 import numpy as np import torch import torchvision import yaml from utils.google_utils import gsutil_getsize from utils.metrics import fitness from utils.torch_utils import init_torch_seeds torch.set_printoptions(linewidth=320, precision=5, profile='long') def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y return y def box_iou(box1, box2): # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py """ Return intersection-over-union (Jaccard index) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, y2) format. Arguments: box1 (Tensor[N, 4]) box2 (Tensor[M, 4]) Returns: iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 """ def box_area(box): # box = 4xn return (box[2] - box[0]) * (box[3] - box[1]) area1 = box_area(box1.T) area2 = box_area(box2.T) # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) # iou = inter / (area1 + area2 - inter) return inter / (area1[:, None] + area2 - inter) The provided code snippet includes necessary dependencies for implementing the `non_max_suppression` function. Write a Python function `def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=())` to solve the following problem: Performs Non-Maximum Suppression (NMS) on inference results Returns: detections with shape: nx6 (x1, y1, x2, y2, conf, cls) Here is the function: def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()): """Performs Non-Maximum Suppression (NMS) on inference results Returns: detections with shape: nx6 (x1, y1, x2, y2, conf, cls) """ nc = prediction.shape[2] - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates # Settings # (pixels) minimum and maximum box width and height min_wh, max_wh = 2, 4096 #max_det = 300 # maximum number of detections per image #max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() time_limit = 10.0 # seconds to quit after redundant = True # require redundant detections multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img) merge = False # use merge-NMS t = time.time() output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] for xi, x in enumerate(prediction): # image index, image inference # Apply constraints # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height x = x[xc[xi]] # confidence # Cat apriori labels if autolabelling if labels and len(labels[xi]): l = labels[xi] v = torch.zeros((len(l), nc + 5), device=x.device) v[:, :4] = l[:, 1:5] # box v[:, 4] = 1.0 # conf v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls x = torch.cat((x, v), 0) # If none remain process next image if not x.shape[0]: continue # Compute conf x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf # Box (center x, center y, width, height) to (x1, y1, x2, y2) box = xywh2xyxy(x[:, :4]) # Detections matrix nx6 (xyxy, conf, cls) if multi_label: i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) else: # best class only conf, j = x[:, 5:].max(1, keepdim=True) x = torch.cat((box, conf, j.float()), 1)[ conf.view(-1) > conf_thres] # Filter by class if classes is not None: x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] # Apply finite constraint # if not torch.isfinite(x).all(): # x = x[torch.isfinite(x).all(1)] # Check shape n = x.shape[0] # number of boxes if not n: # no boxes continue #elif n > max_nms: # excess boxes # x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence x = x[x[:, 4].argsort(descending=True)] # sort by confidence # Batched NMS c = x[:, 5:6] * (0 if agnostic else max_wh) # classes boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS #if i.shape[0] > max_det: # limit detections # i = i[:max_det] if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix weights = iou * scores[None] # box weights x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes if redundant: i = i[iou.sum(1) > 1] # require redundancy output[xi] = x[i] if (time.time() - t) > time_limit: print(f'WARNING: NMS time limit {time_limit}s exceeded') break # time limit exceeded return output
Performs Non-Maximum Suppression (NMS) on inference results Returns: detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
14,713
import glob import logging import math import os import random import re import subprocess import time from pathlib import Path import cv2 import numpy as np import torch import torchvision import yaml from utils.google_utils import gsutil_getsize from utils.metrics import fitness from utils.torch_utils import init_torch_seeds np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) def gsutil_getsize(url=''): # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') return eval(s.split(' ')[0]) if len(s) else 0 # bytes def fitness(x): # Model fitness as a weighted combination of metrics w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] return (x[:, :4] * w).sum(1) def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): # Print mutation results to evolve.txt (for use with train.py --evolve) a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) if bucket: url = 'gs://%s/evolve.txt' % bucket if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local with open('evolve.txt', 'a') as f: # append result f.write(c + b + '\n') x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows x = x[np.argsort(-fitness(x))] # sort np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness # Save yaml for i, k in enumerate(hyp.keys()): hyp[k] = float(x[0, i + 7]) with open(yaml_file, 'w') as f: results = tuple(x[0, :7]) c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') yaml.dump(hyp, f, sort_keys=False) if bucket: os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
null
14,714
import glob import logging import math import os import random import re import subprocess import time from pathlib import Path import cv2 import numpy as np import torch import torchvision import yaml from utils.google_utils import gsutil_getsize from utils.metrics import fitness from utils.torch_utils import init_torch_seeds torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) cv2.setNumThreads(0) def xyxy2xywh(x): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center y[:, 2] = x[:, 2] - x[:, 0] # width y[:, 3] = x[:, 3] - x[:, 1] # height return y def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y return y def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): # Rescale coords (xyxy) from img1_shape to img0_shape if ratio_pad is None: # calculate from img0_shape gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding else: gain = ratio_pad[0][0] pad = ratio_pad[1] coords[:, [0, 2]] -= pad[0] # x padding coords[:, [1, 3]] -= pad[1] # y padding coords[:, :4] /= gain clip_coords(coords, img0_shape) return coords def apply_classifier(x, model, img, im0): # applies a second stage classifier to yolo outputs im0 = [im0] if isinstance(im0, np.ndarray) else im0 for i, d in enumerate(x): # per image if d is not None and len(d): d = d.clone() # Reshape and pad cutouts b = xyxy2xywh(d[:, :4]) # boxes b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad d[:, :4] = xywh2xyxy(b).long() # Rescale boxes from img_size to im0 size scale_coords(img.shape[2:], d[:, :4], im0[i].shape) # Classes pred_cls1 = d[:, 5].long() ims = [] for j, a in enumerate(d): # per item cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] im = cv2.resize(cutout, (224, 224)) # BGR # cv2.imwrite('test%i.jpg' % j, cutout) # BGR to RGB, to 3x416x416 im = im[:, :, ::-1].transpose(2, 0, 1) im = np.ascontiguousarray( im, dtype=np.float32) # uint8 to float32 im /= 255.0 # 0 - 255 to 0.0 - 1.0 ims.append(im) pred_cls2 = model(torch.Tensor(ims).to(d.device) ).argmax(1) # classifier prediction # retain matching class detections x[i] = x[i][pred_cls1 == pred_cls2] return x
null
14,715
from pathlib import Path import matplotlib.pyplot as plt import numpy as np import torch from . import general def compute_ap(recall, precision): """ Compute the average precision, given the recall and precision curves # Arguments recall: The recall curve (list) precision: The precision curve (list) # Returns Average precision, precision curve, recall curve """ # Append sentinel values to beginning and end mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) mpre = np.concatenate(([1.], precision, [0.])) # Compute the precision envelope mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) # Integrate area under curve method = 'interp' # methods: 'continuous', 'interp' if method == 'interp': x = np.linspace(0, 1, 101) # 101-point interp (COCO) ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate else: # 'continuous' i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve return ap, mpre, mrec def plot_pr_curve(px, py, ap, save_dir='.', names=()): fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) py = np.stack(py, axis=1) if 0 < len(names) < 21: # show mAP in legend if < 10 classes for i, y in enumerate(py.T): ax.plot(px, y, linewidth=1, label=f'{names[i]} %.3f' % ap[i, 0]) # plot(recall, precision) else: ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) ax.set_xlabel('Recall') ax.set_ylabel('Precision') ax.set_xlim(0, 1) ax.set_ylim(0, 1) plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") fig.savefig(Path(save_dir) / 'precision_recall_curve.png', dpi=250) The provided code snippet includes necessary dependencies for implementing the `ap_per_class` function. Write a Python function `def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision-recall_curve.png', names=[])` to solve the following problem: Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments tp: True positives (nparray, nx1 or nx10). conf: Objectness value from 0-1 (nparray). pred_cls: Predicted object classes (nparray). target_cls: True object classes (nparray). plot: Plot precision-recall curve at mAP@0.5 save_dir: Plot save directory # Returns The average precision as computed in py-faster-rcnn. Here is the function: def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision-recall_curve.png', names=[]): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments tp: True positives (nparray, nx1 or nx10). conf: Objectness value from 0-1 (nparray). pred_cls: Predicted object classes (nparray). target_cls: True object classes (nparray). plot: Plot precision-recall curve at mAP@0.5 save_dir: Plot save directory # Returns The average precision as computed in py-faster-rcnn. """ # Sort by objectness i = np.argsort(-conf) tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] # Find unique classes unique_classes = np.unique(target_cls) # Create Precision-Recall curve and compute AP for each class px, py = np.linspace(0, 1, 1000), [] # for plotting pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898 s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95) ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s) for ci, c in enumerate(unique_classes): i = pred_cls == c n_l = (target_cls == c).sum() # number of labels n_p = i.sum() # number of predictions if n_p == 0 or n_l == 0: continue else: # Accumulate FPs and TPs fpc = (1 - tp[i]).cumsum(0) tpc = tp[i].cumsum(0) # Recall recall = tpc / (n_l + 1e-16) # recall curve r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases # Precision precision = tpc / (tpc + fpc) # precision curve p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score # AP from recall-precision curve for j in range(tp.shape[1]): ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) if plot and (j == 0): py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 # Compute F1 score (harmonic mean of precision and recall) f1 = 2 * p * r / (p + r + 1e-16) if plot: plot_pr_curve(px, py, ap, save_dir, names) return p, r, ap, f1, unique_classes.astype('int32')
Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments tp: True positives (nparray, nx1 or nx10). conf: Objectness value from 0-1 (nparray). pred_cls: Predicted object classes (nparray). target_cls: True object classes (nparray). plot: Plot precision-recall curve at mAP@0.5 save_dir: Plot save directory # Returns The average precision as computed in py-faster-rcnn.
14,717
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes The provided code snippet includes necessary dependencies for implementing the `find_inside_bboxes` function. Write a Python function `def find_inside_bboxes(bboxes: Tensor, img_h: int, img_w: int) -> Tensor` to solve the following problem: Find bboxes as long as a part of bboxes is inside the image. Args: bboxes (Tensor): Shape (N, 4). img_h (int): Image height. img_w (int): Image width. Returns: Tensor: Index of the remaining bboxes. Here is the function: def find_inside_bboxes(bboxes: Tensor, img_h: int, img_w: int) -> Tensor: """Find bboxes as long as a part of bboxes is inside the image. Args: bboxes (Tensor): Shape (N, 4). img_h (int): Image height. img_w (int): Image width. Returns: Tensor: Index of the remaining bboxes. """ inside_inds = (bboxes[:, 0] < img_w) & (bboxes[:, 2] > 0) \ & (bboxes[:, 1] < img_h) & (bboxes[:, 3] > 0) return inside_inds
Find bboxes as long as a part of bboxes is inside the image. Args: bboxes (Tensor): Shape (N, 4). img_h (int): Image height. img_w (int): Image width. Returns: Tensor: Index of the remaining bboxes.
14,718
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes def bbox_flip(bboxes: Tensor, img_shape: Tuple[int], direction: str = 'horizontal') -> Tensor: """Flip bboxes horizontally or vertically. Args: bboxes (Tensor): Shape (..., 4*k) img_shape (Tuple[int]): Image shape. direction (str): Flip direction, options are "horizontal", "vertical", "diagonal". Default: "horizontal" Returns: Tensor: Flipped bboxes. """ assert bboxes.shape[-1] % 4 == 0 assert direction in ['horizontal', 'vertical', 'diagonal'] flipped = bboxes.clone() if direction == 'horizontal': flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] elif direction == 'vertical': flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] else: flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] return flipped The provided code snippet includes necessary dependencies for implementing the `bbox_mapping` function. Write a Python function `def bbox_mapping(bboxes: Tensor, img_shape: Tuple[int], scale_factor: Union[float, Tuple[float]], flip: bool, flip_direction: str = 'horizontal') -> Tensor` to solve the following problem: Map bboxes from the original image scale to testing scale. Here is the function: def bbox_mapping(bboxes: Tensor, img_shape: Tuple[int], scale_factor: Union[float, Tuple[float]], flip: bool, flip_direction: str = 'horizontal') -> Tensor: """Map bboxes from the original image scale to testing scale.""" new_bboxes = bboxes * bboxes.new_tensor(scale_factor) if flip: new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction) return new_bboxes
Map bboxes from the original image scale to testing scale.
14,719
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes def bbox_flip(bboxes: Tensor, img_shape: Tuple[int], direction: str = 'horizontal') -> Tensor: """Flip bboxes horizontally or vertically. Args: bboxes (Tensor): Shape (..., 4*k) img_shape (Tuple[int]): Image shape. direction (str): Flip direction, options are "horizontal", "vertical", "diagonal". Default: "horizontal" Returns: Tensor: Flipped bboxes. """ assert bboxes.shape[-1] % 4 == 0 assert direction in ['horizontal', 'vertical', 'diagonal'] flipped = bboxes.clone() if direction == 'horizontal': flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] elif direction == 'vertical': flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] else: flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] return flipped The provided code snippet includes necessary dependencies for implementing the `bbox_mapping_back` function. Write a Python function `def bbox_mapping_back(bboxes: Tensor, img_shape: Tuple[int], scale_factor: Union[float, Tuple[float]], flip: bool, flip_direction: str = 'horizontal') -> Tensor` to solve the following problem: Map bboxes from testing scale to original image scale. Here is the function: def bbox_mapping_back(bboxes: Tensor, img_shape: Tuple[int], scale_factor: Union[float, Tuple[float]], flip: bool, flip_direction: str = 'horizontal') -> Tensor: """Map bboxes from testing scale to original image scale.""" new_bboxes = bbox_flip(bboxes, img_shape, flip_direction) if flip else bboxes new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor) return new_bboxes.view(bboxes.shape)
Map bboxes from testing scale to original image scale.
14,720
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes def get_box_tensor(boxes: Union[Tensor, BaseBoxes]) -> Tensor: """Get tensor data from box type boxes. Args: boxes (Tensor or BaseBoxes): boxes with type of tensor or box type. If its type is a tensor, the boxes will be directly returned. If its type is a box type, the `boxes.tensor` will be returned. Returns: Tensor: boxes tensor. """ if isinstance(boxes, BaseBoxes): boxes = boxes.tensor return boxes The provided code snippet includes necessary dependencies for implementing the `bbox2roi` function. Write a Python function `def bbox2roi(bbox_list: List[Union[Tensor, BaseBoxes]]) -> Tensor` to solve the following problem: Convert a list of bboxes to roi format. Args: bbox_list (List[Union[Tensor, :obj:`BaseBoxes`]): a list of bboxes corresponding to a batch of images. Returns: Tensor: shape (n, box_dim + 1), where ``box_dim`` depends on the different box types. For example, If the box type in ``bbox_list`` is HorizontalBoxes, the output shape is (n, 5). Each row of data indicates [batch_ind, x1, y1, x2, y2]. Here is the function: def bbox2roi(bbox_list: List[Union[Tensor, BaseBoxes]]) -> Tensor: """Convert a list of bboxes to roi format. Args: bbox_list (List[Union[Tensor, :obj:`BaseBoxes`]): a list of bboxes corresponding to a batch of images. Returns: Tensor: shape (n, box_dim + 1), where ``box_dim`` depends on the different box types. For example, If the box type in ``bbox_list`` is HorizontalBoxes, the output shape is (n, 5). Each row of data indicates [batch_ind, x1, y1, x2, y2]. """ rois_list = [] for img_id, bboxes in enumerate(bbox_list): bboxes = get_box_tensor(bboxes) img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) rois = torch.cat([img_inds, bboxes], dim=-1) rois_list.append(rois) rois = torch.cat(rois_list, 0) return rois
Convert a list of bboxes to roi format. Args: bbox_list (List[Union[Tensor, :obj:`BaseBoxes`]): a list of bboxes corresponding to a batch of images. Returns: Tensor: shape (n, box_dim + 1), where ``box_dim`` depends on the different box types. For example, If the box type in ``bbox_list`` is HorizontalBoxes, the output shape is (n, 5). Each row of data indicates [batch_ind, x1, y1, x2, y2].
14,721
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes The provided code snippet includes necessary dependencies for implementing the `roi2bbox` function. Write a Python function `def roi2bbox(rois: Tensor) -> List[Tensor]` to solve the following problem: Convert rois to bounding box format. Args: rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. Returns: List[Tensor]: Converted boxes of corresponding rois. Here is the function: def roi2bbox(rois: Tensor) -> List[Tensor]: """Convert rois to bounding box format. Args: rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. Returns: List[Tensor]: Converted boxes of corresponding rois. """ bbox_list = [] img_ids = torch.unique(rois[:, 0].cpu(), sorted=True) for img_id in img_ids: inds = (rois[:, 0] == img_id.item()) bbox = rois[inds, 1:] bbox_list.append(bbox) return bbox_list
Convert rois to bounding box format. Args: rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. Returns: List[Tensor]: Converted boxes of corresponding rois.
14,722
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes The provided code snippet includes necessary dependencies for implementing the `bbox2result` function. Write a Python function `def bbox2result(bboxes: Union[Tensor, np.ndarray], labels: Union[Tensor, np.ndarray], num_classes: int) -> List[np.ndarray]` to solve the following problem: Convert detection results to a list of numpy arrays. Args: bboxes (Tensor | np.ndarray): shape (n, 5) labels (Tensor | np.ndarray): shape (n, ) num_classes (int): class number, including background class Returns: List(np.ndarray]): bbox results of each class Here is the function: def bbox2result(bboxes: Union[Tensor, np.ndarray], labels: Union[Tensor, np.ndarray], num_classes: int) -> List[np.ndarray]: """Convert detection results to a list of numpy arrays. Args: bboxes (Tensor | np.ndarray): shape (n, 5) labels (Tensor | np.ndarray): shape (n, ) num_classes (int): class number, including background class Returns: List(np.ndarray]): bbox results of each class """ if bboxes.shape[0] == 0: return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)] else: if isinstance(bboxes, torch.Tensor): bboxes = bboxes.detach().cpu().numpy() labels = labels.detach().cpu().numpy() return [bboxes[labels == i, :] for i in range(num_classes)]
Convert detection results to a list of numpy arrays. Args: bboxes (Tensor | np.ndarray): shape (n, 5) labels (Tensor | np.ndarray): shape (n, ) num_classes (int): class number, including background class Returns: List(np.ndarray]): bbox results of each class
14,723
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes The provided code snippet includes necessary dependencies for implementing the `distance2bbox` function. Write a Python function `def distance2bbox( points: Tensor, distance: Tensor, max_shape: Optional[Union[Sequence[int], Tensor, Sequence[Sequence[int]]]] = None ) -> Tensor` to solve the following problem: Decode distance prediction to bounding box. Args: points (Tensor): Shape (B, N, 2) or (N, 2). distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) max_shape (Union[Sequence[int], Tensor, Sequence[Sequence[int]]], optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Returns: Tensor: Boxes with shape (N, 4) or (B, N, 4) Here is the function: def distance2bbox( points: Tensor, distance: Tensor, max_shape: Optional[Union[Sequence[int], Tensor, Sequence[Sequence[int]]]] = None ) -> Tensor: """Decode distance prediction to bounding box. Args: points (Tensor): Shape (B, N, 2) or (N, 2). distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) max_shape (Union[Sequence[int], Tensor, Sequence[Sequence[int]]], optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Returns: Tensor: Boxes with shape (N, 4) or (B, N, 4) """ x1 = points[..., 0] - distance[..., 0] y1 = points[..., 1] - distance[..., 1] x2 = points[..., 0] + distance[..., 2] y2 = points[..., 1] + distance[..., 3] bboxes = torch.stack([x1, y1, x2, y2], -1) if max_shape is not None: if bboxes.dim() == 2 and not torch.onnx.is_in_onnx_export(): # speed up bboxes[:, 0::2].clamp_(min=0, max=max_shape[1]) bboxes[:, 1::2].clamp_(min=0, max=max_shape[0]) return bboxes # clip bboxes with dynamic `min` and `max` for onnx if torch.onnx.is_in_onnx_export(): # TODO: delete from mmdet.core.export import dynamic_clip_for_onnx x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape) bboxes = torch.stack([x1, y1, x2, y2], dim=-1) return bboxes if not isinstance(max_shape, torch.Tensor): max_shape = x1.new_tensor(max_shape) max_shape = max_shape[..., :2].type_as(x1) if max_shape.ndim == 2: assert bboxes.ndim == 3 assert max_shape.size(0) == bboxes.size(0) min_xy = x1.new_tensor(0) max_xy = torch.cat([max_shape, max_shape], dim=-1).flip(-1).unsqueeze(-2) bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) return bboxes
Decode distance prediction to bounding box. Args: points (Tensor): Shape (B, N, 2) or (N, 2). distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) max_shape (Union[Sequence[int], Tensor, Sequence[Sequence[int]]], optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Returns: Tensor: Boxes with shape (N, 4) or (B, N, 4)
14,724
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes The provided code snippet includes necessary dependencies for implementing the `bbox2distance` function. Write a Python function `def bbox2distance(points: Tensor, bbox: Tensor, max_dis: Optional[float] = None, eps: float = 0.1) -> Tensor` to solve the following problem: Decode bounding box based on distances. Args: points (Tensor): Shape (n, 2) or (b, n, 2), [x, y]. bbox (Tensor): Shape (n, 4) or (b, n, 4), "xyxy" format max_dis (float, optional): Upper bound of the distance. eps (float): a small value to ensure target < max_dis, instead <= Returns: Tensor: Decoded distances. Here is the function: def bbox2distance(points: Tensor, bbox: Tensor, max_dis: Optional[float] = None, eps: float = 0.1) -> Tensor: """Decode bounding box based on distances. Args: points (Tensor): Shape (n, 2) or (b, n, 2), [x, y]. bbox (Tensor): Shape (n, 4) or (b, n, 4), "xyxy" format max_dis (float, optional): Upper bound of the distance. eps (float): a small value to ensure target < max_dis, instead <= Returns: Tensor: Decoded distances. """ left = points[..., 0] - bbox[..., 0] top = points[..., 1] - bbox[..., 1] right = bbox[..., 2] - points[..., 0] bottom = bbox[..., 3] - points[..., 1] if max_dis is not None: left = left.clamp(min=0, max=max_dis - eps) top = top.clamp(min=0, max=max_dis - eps) right = right.clamp(min=0, max=max_dis - eps) bottom = bottom.clamp(min=0, max=max_dis - eps) return torch.stack([left, top, right, bottom], -1)
Decode bounding box based on distances. Args: points (Tensor): Shape (n, 2) or (b, n, 2), [x, y]. bbox (Tensor): Shape (n, 4) or (b, n, 4), "xyxy" format max_dis (float, optional): Upper bound of the distance. eps (float): a small value to ensure target < max_dis, instead <= Returns: Tensor: Decoded distances.
14,725
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes The provided code snippet includes necessary dependencies for implementing the `bbox_rescale` function. Write a Python function `def bbox_rescale(bboxes: Tensor, scale_factor: float = 1.0) -> Tensor` to solve the following problem: Rescale bounding box w.r.t. scale_factor. Args: bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois scale_factor (float): rescale factor Returns: Tensor: Rescaled bboxes. Here is the function: def bbox_rescale(bboxes: Tensor, scale_factor: float = 1.0) -> Tensor: """Rescale bounding box w.r.t. scale_factor. Args: bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois scale_factor (float): rescale factor Returns: Tensor: Rescaled bboxes. """ if bboxes.size(1) == 5: bboxes_ = bboxes[:, 1:] inds_ = bboxes[:, 0] else: bboxes_ = bboxes cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5 cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5 w = bboxes_[:, 2] - bboxes_[:, 0] h = bboxes_[:, 3] - bboxes_[:, 1] w = w * scale_factor h = h * scale_factor x1 = cx - 0.5 * w x2 = cx + 0.5 * w y1 = cy - 0.5 * h y2 = cy + 0.5 * h if bboxes.size(1) == 5: rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1) else: rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) return rescaled_bboxes
Rescale bounding box w.r.t. scale_factor. Args: bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois scale_factor (float): rescale factor Returns: Tensor: Rescaled bboxes.
14,726
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes The provided code snippet includes necessary dependencies for implementing the `bbox_cxcywh_to_xyxy` function. Write a Python function `def bbox_cxcywh_to_xyxy(bbox: Tensor) -> Tensor` to solve the following problem: Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2). Args: bbox (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Converted bboxes. Here is the function: def bbox_cxcywh_to_xyxy(bbox: Tensor) -> Tensor: """Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2). Args: bbox (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Converted bboxes. """ cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1) bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)] return torch.cat(bbox_new, dim=-1)
Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2). Args: bbox (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Converted bboxes.
14,727
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes The provided code snippet includes necessary dependencies for implementing the `bbox_xyxy_to_cxcywh` function. Write a Python function `def bbox_xyxy_to_cxcywh(bbox: Tensor) -> Tensor` to solve the following problem: Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h). Args: bbox (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Converted bboxes. Here is the function: def bbox_xyxy_to_cxcywh(bbox: Tensor) -> Tensor: """Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h). Args: bbox (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Converted bboxes. """ x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1) bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)] return torch.cat(bbox_new, dim=-1)
Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h). Args: bbox (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Converted bboxes.
14,728
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes def bbox2corner(bboxes: torch.Tensor) -> torch.Tensor: """Convert bbox coordinates from (x1, y1, x2, y2) to corners ((x1, y1), (x2, y1), (x1, y2), (x2, y2)). Args: bboxes (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Shape (n*4, 2) for corners. """ x1, y1, x2, y2 = torch.split(bboxes, 1, dim=1) return torch.cat([x1, y1, x2, y1, x1, y2, x2, y2], dim=1).reshape(-1, 2) def corner2bbox(corners: torch.Tensor) -> torch.Tensor: """Convert bbox coordinates from corners ((x1, y1), (x2, y1), (x1, y2), (x2, y2)) to (x1, y1, x2, y2). Args: corners (Tensor): Shape (n*4, 2) for corners. Returns: Tensor: Shape (n, 4) for bboxes. """ corners = corners.reshape(-1, 4, 2) min_xy = corners.min(dim=1)[0] max_xy = corners.max(dim=1)[0] return torch.cat([min_xy, max_xy], dim=1) The provided code snippet includes necessary dependencies for implementing the `bbox_project` function. Write a Python function `def bbox_project( bboxes: Union[torch.Tensor, np.ndarray], homography_matrix: Union[torch.Tensor, np.ndarray], img_shape: Optional[Tuple[int, int]] = None ) -> Union[torch.Tensor, np.ndarray]` to solve the following problem: Geometric transformation for bbox. Args: bboxes (Union[torch.Tensor, np.ndarray]): Shape (n, 4) for bboxes. homography_matrix (Union[torch.Tensor, np.ndarray]): Shape (3, 3) for geometric transformation. img_shape (Tuple[int, int], optional): Image shape. Defaults to None. Returns: Union[torch.Tensor, np.ndarray]: Converted bboxes. Here is the function: def bbox_project( bboxes: Union[torch.Tensor, np.ndarray], homography_matrix: Union[torch.Tensor, np.ndarray], img_shape: Optional[Tuple[int, int]] = None ) -> Union[torch.Tensor, np.ndarray]: """Geometric transformation for bbox. Args: bboxes (Union[torch.Tensor, np.ndarray]): Shape (n, 4) for bboxes. homography_matrix (Union[torch.Tensor, np.ndarray]): Shape (3, 3) for geometric transformation. img_shape (Tuple[int, int], optional): Image shape. Defaults to None. Returns: Union[torch.Tensor, np.ndarray]: Converted bboxes. """ bboxes_type = type(bboxes) if bboxes_type is np.ndarray: bboxes = torch.from_numpy(bboxes) if isinstance(homography_matrix, np.ndarray): homography_matrix = torch.from_numpy(homography_matrix) corners = bbox2corner(bboxes) corners = torch.cat( [corners, corners.new_ones(corners.shape[0], 1)], dim=1) corners = torch.matmul(homography_matrix, corners.t()).t() # Convert to homogeneous coordinates by normalization corners = corners[:, :2] / corners[:, 2:3] bboxes = corner2bbox(corners) if img_shape is not None: bboxes[:, 0::2] = bboxes[:, 0::2].clamp(0, img_shape[1]) bboxes[:, 1::2] = bboxes[:, 1::2].clamp(0, img_shape[0]) if bboxes_type is np.ndarray: bboxes = bboxes.numpy() return bboxes
Geometric transformation for bbox. Args: bboxes (Union[torch.Tensor, np.ndarray]): Shape (n, 4) for bboxes. homography_matrix (Union[torch.Tensor, np.ndarray]): Shape (3, 3) for geometric transformation. img_shape (Tuple[int, int], optional): Image shape. Defaults to None. Returns: Union[torch.Tensor, np.ndarray]: Converted bboxes.
14,729
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes The provided code snippet includes necessary dependencies for implementing the `cat_boxes` function. Write a Python function `def cat_boxes(data_list: List[Union[Tensor, BaseBoxes]], dim: int = 0) -> Union[Tensor, BaseBoxes]` to solve the following problem: Concatenate boxes with type of tensor or box type. Args: data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors or box types need to be concatenated. dim (int): The dimension over which the box are concatenated. Defaults to 0. Returns: Union[Tensor, :obj`BaseBoxes`]: Concatenated results. Here is the function: def cat_boxes(data_list: List[Union[Tensor, BaseBoxes]], dim: int = 0) -> Union[Tensor, BaseBoxes]: """Concatenate boxes with type of tensor or box type. Args: data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors or box types need to be concatenated. dim (int): The dimension over which the box are concatenated. Defaults to 0. Returns: Union[Tensor, :obj`BaseBoxes`]: Concatenated results. """ if data_list and isinstance(data_list[0], BaseBoxes): return data_list[0].cat(data_list, dim=dim) else: return torch.cat(data_list, dim=dim)
Concatenate boxes with type of tensor or box type. Args: data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors or box types need to be concatenated. dim (int): The dimension over which the box are concatenated. Defaults to 0. Returns: Union[Tensor, :obj`BaseBoxes`]: Concatenated results.
14,730
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes The provided code snippet includes necessary dependencies for implementing the `stack_boxes` function. Write a Python function `def stack_boxes(data_list: List[Union[Tensor, BaseBoxes]], dim: int = 0) -> Union[Tensor, BaseBoxes]` to solve the following problem: Stack boxes with type of tensor or box type. Args: data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors or box types need to be stacked. dim (int): The dimension over which the box are stacked. Defaults to 0. Returns: Union[Tensor, :obj`BaseBoxes`]: Stacked results. Here is the function: def stack_boxes(data_list: List[Union[Tensor, BaseBoxes]], dim: int = 0) -> Union[Tensor, BaseBoxes]: """Stack boxes with type of tensor or box type. Args: data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors or box types need to be stacked. dim (int): The dimension over which the box are stacked. Defaults to 0. Returns: Union[Tensor, :obj`BaseBoxes`]: Stacked results. """ if data_list and isinstance(data_list[0], BaseBoxes): return data_list[0].stack(data_list, dim=dim) else: return torch.stack(data_list, dim=dim)
Stack boxes with type of tensor or box type. Args: data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors or box types need to be stacked. dim (int): The dimension over which the box are stacked. Defaults to 0. Returns: Union[Tensor, :obj`BaseBoxes`]: Stacked results.
14,731
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes The provided code snippet includes necessary dependencies for implementing the `scale_boxes` function. Write a Python function `def scale_boxes(boxes: Union[Tensor, BaseBoxes], scale_factor: Tuple[float, float]) -> Union[Tensor, BaseBoxes]` to solve the following problem: Scale boxes with type of tensor or box type. Args: boxes (Tensor or :obj:`BaseBoxes`): boxes need to be scaled. Its type can be a tensor or a box type. scale_factor (Tuple[float, float]): factors for scaling boxes. The length should be 2. Returns: Union[Tensor, :obj:`BaseBoxes`]: Scaled boxes. Here is the function: def scale_boxes(boxes: Union[Tensor, BaseBoxes], scale_factor: Tuple[float, float]) -> Union[Tensor, BaseBoxes]: """Scale boxes with type of tensor or box type. Args: boxes (Tensor or :obj:`BaseBoxes`): boxes need to be scaled. Its type can be a tensor or a box type. scale_factor (Tuple[float, float]): factors for scaling boxes. The length should be 2. Returns: Union[Tensor, :obj:`BaseBoxes`]: Scaled boxes. """ if isinstance(boxes, BaseBoxes): boxes.rescale_(scale_factor) return boxes else: # Tensor boxes will be treated as horizontal boxes repeat_num = int(boxes.size(-1) / 2) scale_factor = boxes.new_tensor(scale_factor).repeat((1, repeat_num)) return boxes * scale_factor
Scale boxes with type of tensor or box type. Args: boxes (Tensor or :obj:`BaseBoxes`): boxes need to be scaled. Its type can be a tensor or a box type. scale_factor (Tuple[float, float]): factors for scaling boxes. The length should be 2. Returns: Union[Tensor, :obj:`BaseBoxes`]: Scaled boxes.
14,732
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes The provided code snippet includes necessary dependencies for implementing the `get_box_wh` function. Write a Python function `def get_box_wh(boxes: Union[Tensor, BaseBoxes]) -> Tuple[Tensor, Tensor]` to solve the following problem: Get the width and height of boxes with type of tensor or box type. Args: boxes (Tensor or :obj:`BaseBoxes`): boxes with type of tensor or box type. Returns: Tuple[Tensor, Tensor]: the width and height of boxes. Here is the function: def get_box_wh(boxes: Union[Tensor, BaseBoxes]) -> Tuple[Tensor, Tensor]: """Get the width and height of boxes with type of tensor or box type. Args: boxes (Tensor or :obj:`BaseBoxes`): boxes with type of tensor or box type. Returns: Tuple[Tensor, Tensor]: the width and height of boxes. """ if isinstance(boxes, BaseBoxes): w = boxes.widths h = boxes.heights else: # Tensor boxes will be treated as horizontal boxes by defaults w = boxes[:, 2] - boxes[:, 0] h = boxes[:, 3] - boxes[:, 1] return w, h
Get the width and height of boxes with type of tensor or box type. Args: boxes (Tensor or :obj:`BaseBoxes`): boxes with type of tensor or box type. Returns: Tuple[Tensor, Tensor]: the width and height of boxes.
14,733
from typing import List, Optional, Sequence, Tuple, Union import numpy as np import torch from torch import Tensor from mmdet.structures.bbox import BaseBoxes The provided code snippet includes necessary dependencies for implementing the `empty_box_as` function. Write a Python function `def empty_box_as(boxes: Union[Tensor, BaseBoxes]) -> Union[Tensor, BaseBoxes]` to solve the following problem: Generate empty box according to input ``boxes` type and device. Args: boxes (Tensor or :obj:`BaseBoxes`): boxes with type of tensor or box type. Returns: Union[Tensor, BaseBoxes]: Generated empty box. Here is the function: def empty_box_as(boxes: Union[Tensor, BaseBoxes]) -> Union[Tensor, BaseBoxes]: """Generate empty box according to input ``boxes` type and device. Args: boxes (Tensor or :obj:`BaseBoxes`): boxes with type of tensor or box type. Returns: Union[Tensor, BaseBoxes]: Generated empty box. """ if isinstance(boxes, BaseBoxes): return boxes.empty_boxes() else: # Tensor boxes will be treated as horizontal boxes by defaults return boxes.new_zeros(0, 4)
Generate empty box according to input ``boxes` type and device. Args: boxes (Tensor or :obj:`BaseBoxes`): boxes with type of tensor or box type. Returns: Union[Tensor, BaseBoxes]: Generated empty box.
14,734
from typing import Callable, Optional, Tuple, Type, Union import numpy as np import torch from torch import Tensor from .base_boxes import BaseBoxes def _register_box(name: str, box_type: Type, force: bool = False) -> None: """Register a box type. Args: name (str): The name of box type. box_type (type): Box mode class to be registered. force (bool): Whether to override an existing class with the same name. Defaults to False. """ assert issubclass(box_type, BaseBoxes) name = name.lower() if not force and (name in box_types or box_type in _box_type_to_name): raise KeyError(f'box type {name} has been registered') elif name in box_types: _box_type = box_types.pop(name) _box_type_to_name.pop(_box_type) elif box_type in _box_type_to_name: _name = _box_type_to_name.pop(box_type) box_types.pop(_name) box_types[name] = box_type _box_type_to_name[box_type] = name The provided code snippet includes necessary dependencies for implementing the `register_box` function. Write a Python function `def register_box(name: str, box_type: Type = None, force: bool = False) -> Union[Type, Callable]` to solve the following problem: Register a box type. A record will be added to ``bbox_types``, whose key is the box type name and value is the box type itself. Simultaneously, a reverse dictionary ``_box_type_to_name`` will be updated. It can be used as a decorator or a normal function. Args: name (str): The name of box type. bbox_type (type, Optional): Box type class to be registered. Defaults to None. force (bool): Whether to override the existing box type with the same name. Defaults to False. Examples: >>> from mmdet.structures.bbox import register_box >>> from mmdet.structures.bbox import BaseBoxes >>> # as a decorator >>> @register_box('hbox') >>> class HorizontalBoxes(BaseBoxes): >>> pass >>> # as a normal function >>> class RotatedBoxes(BaseBoxes): >>> pass >>> register_box('rbox', RotatedBoxes) Here is the function: def register_box(name: str, box_type: Type = None, force: bool = False) -> Union[Type, Callable]: """Register a box type. A record will be added to ``bbox_types``, whose key is the box type name and value is the box type itself. Simultaneously, a reverse dictionary ``_box_type_to_name`` will be updated. It can be used as a decorator or a normal function. Args: name (str): The name of box type. bbox_type (type, Optional): Box type class to be registered. Defaults to None. force (bool): Whether to override the existing box type with the same name. Defaults to False. Examples: >>> from mmdet.structures.bbox import register_box >>> from mmdet.structures.bbox import BaseBoxes >>> # as a decorator >>> @register_box('hbox') >>> class HorizontalBoxes(BaseBoxes): >>> pass >>> # as a normal function >>> class RotatedBoxes(BaseBoxes): >>> pass >>> register_box('rbox', RotatedBoxes) """ if not isinstance(force, bool): raise TypeError(f'force must be a boolean, but got {type(force)}') # use it as a normal method: register_box(name, box_type=BoxCls) if box_type is not None: _register_box(name=name, box_type=box_type, force=force) return box_type # use it as a decorator: @register_box(name) def _register(cls): _register_box(name=name, box_type=cls, force=force) return cls return _register
Register a box type. A record will be added to ``bbox_types``, whose key is the box type name and value is the box type itself. Simultaneously, a reverse dictionary ``_box_type_to_name`` will be updated. It can be used as a decorator or a normal function. Args: name (str): The name of box type. bbox_type (type, Optional): Box type class to be registered. Defaults to None. force (bool): Whether to override the existing box type with the same name. Defaults to False. Examples: >>> from mmdet.structures.bbox import register_box >>> from mmdet.structures.bbox import BaseBoxes >>> # as a decorator >>> @register_box('hbox') >>> class HorizontalBoxes(BaseBoxes): >>> pass >>> # as a normal function >>> class RotatedBoxes(BaseBoxes): >>> pass >>> register_box('rbox', RotatedBoxes)
14,735
from typing import Callable, Optional, Tuple, Type, Union import numpy as np import torch from torch import Tensor from .base_boxes import BaseBoxes def _register_box_converter(src_type: Union[str, type], dst_type: Union[str, type], converter: Callable, force: bool = False) -> None: """Register a box converter. Args: src_type (str or type): source box type name or class. dst_type (str or type): destination box type name or class. converter (Callable): Convert function. force (bool): Whether to override the existing box type with the same name. Defaults to False. """ assert callable(converter) src_type_name, _ = get_box_type(src_type) dst_type_name, _ = get_box_type(dst_type) converter_name = src_type_name + '2' + dst_type_name if not force and converter_name in box_converters: raise KeyError(f'The box converter from {src_type_name} to ' f'{dst_type_name} has been registered.') box_converters[converter_name] = converter The provided code snippet includes necessary dependencies for implementing the `register_box_converter` function. Write a Python function `def register_box_converter(src_type: Union[str, type], dst_type: Union[str, type], converter: Optional[Callable] = None, force: bool = False) -> Callable` to solve the following problem: Register a box converter. A record will be added to ``box_converter``, whose key is '{src_type_name}2{dst_type_name}' and value is the convert function. It can be used as a decorator or a normal function. Args: src_type (str or type): source box type name or class. dst_type (str or type): destination box type name or class. converter (Callable): Convert function. Defaults to None. force (bool): Whether to override the existing box type with the same name. Defaults to False. Examples: >>> from mmdet.structures.bbox import register_box_converter >>> # as a decorator >>> @register_box_converter('hbox', 'rbox') >>> def converter_A(boxes): >>> pass >>> # as a normal function >>> def converter_B(boxes): >>> pass >>> register_box_converter('rbox', 'hbox', converter_B) Here is the function: def register_box_converter(src_type: Union[str, type], dst_type: Union[str, type], converter: Optional[Callable] = None, force: bool = False) -> Callable: """Register a box converter. A record will be added to ``box_converter``, whose key is '{src_type_name}2{dst_type_name}' and value is the convert function. It can be used as a decorator or a normal function. Args: src_type (str or type): source box type name or class. dst_type (str or type): destination box type name or class. converter (Callable): Convert function. Defaults to None. force (bool): Whether to override the existing box type with the same name. Defaults to False. Examples: >>> from mmdet.structures.bbox import register_box_converter >>> # as a decorator >>> @register_box_converter('hbox', 'rbox') >>> def converter_A(boxes): >>> pass >>> # as a normal function >>> def converter_B(boxes): >>> pass >>> register_box_converter('rbox', 'hbox', converter_B) """ if not isinstance(force, bool): raise TypeError(f'force must be a boolean, but got {type(force)}') # use it as a normal method: # register_box_converter(src_type, dst_type, converter=Func) if converter is not None: _register_box_converter( src_type=src_type, dst_type=dst_type, converter=converter, force=force) return converter # use it as a decorator: @register_box_converter(name) def _register(func): _register_box_converter( src_type=src_type, dst_type=dst_type, converter=func, force=force) return func return _register
Register a box converter. A record will be added to ``box_converter``, whose key is '{src_type_name}2{dst_type_name}' and value is the convert function. It can be used as a decorator or a normal function. Args: src_type (str or type): source box type name or class. dst_type (str or type): destination box type name or class. converter (Callable): Convert function. Defaults to None. force (bool): Whether to override the existing box type with the same name. Defaults to False. Examples: >>> from mmdet.structures.bbox import register_box_converter >>> # as a decorator >>> @register_box_converter('hbox', 'rbox') >>> def converter_A(boxes): >>> pass >>> # as a normal function >>> def converter_B(boxes): >>> pass >>> register_box_converter('rbox', 'hbox', converter_B)
14,736
from typing import Callable, Optional, Tuple, Type, Union import numpy as np import torch from torch import Tensor from .base_boxes import BaseBoxes BoxType = Union[np.ndarray, Tensor, BaseBoxes] box_converters: dict = {} def get_box_type(box_type: Union[str, type]) -> Tuple[str, type]: """get both box type name and class. Args: box_type (str or type): Single box type name or class. Returns: Tuple[str, type]: A tuple of box type name and class. """ if isinstance(box_type, str): type_name = box_type.lower() assert type_name in box_types, \ f"Box type {type_name} hasn't been registered in box_types." type_cls = box_types[type_name] elif issubclass(box_type, BaseBoxes): assert box_type in _box_type_to_name, \ f"Box type {box_type} hasn't been registered in box_types." type_name = _box_type_to_name[box_type] type_cls = box_type else: raise KeyError('box_type must be a str or class inheriting from ' f'BaseBoxes, but got {type(box_type)}.') return type_name, type_cls class BaseBoxes(metaclass=ABCMeta): """The base class for 2D box types. The functions of ``BaseBoxes`` lie in three fields: - Verify the boxes shape. - Support tensor-like operations. - Define abstract functions for 2D boxes. In ``__init__`` , ``BaseBoxes`` verifies the validity of the data shape w.r.t ``box_dim``. The tensor with the dimension >= 2 and the length of the last dimension being ``box_dim`` will be regarded as valid. ``BaseBoxes`` will restore them at the field ``tensor``. It's necessary to override ``box_dim`` in subclass to guarantee the data shape is correct. There are many basic tensor-like functions implemented in ``BaseBoxes``. In most cases, users can operate ``BaseBoxes`` instance like a normal tensor. To protect the validity of data shape, All tensor-like functions cannot modify the last dimension of ``self.tensor``. When creating a new box type, users need to inherit from ``BaseBoxes`` and override abstract methods and specify the ``box_dim``. Then, register the new box type by using the decorator ``register_box_type``. Args: data (Tensor or np.ndarray or Sequence): The box data with shape (..., box_dim). dtype (torch.dtype, Optional): data type of boxes. Defaults to None. device (str or torch.device, Optional): device of boxes. Default to None. clone (bool): Whether clone ``boxes`` or not. Defaults to True. """ # Used to verify the last dimension length # Should override it in subclass. box_dim: int = 0 def __init__(self, data: Union[Tensor, np.ndarray, Sequence], dtype: Optional[torch.dtype] = None, device: Optional[DeviceType] = None, clone: bool = True) -> None: if isinstance(data, (np.ndarray, Tensor, Sequence)): data = torch.as_tensor(data) else: raise TypeError('boxes should be Tensor, ndarray, or Sequence, ', f'but got {type(data)}') if device is not None or dtype is not None: data = data.to(dtype=dtype, device=device) # Clone the data to avoid potential bugs if clone: data = data.clone() # handle the empty input like [] if data.numel() == 0: data = data.reshape((-1, self.box_dim)) assert data.dim() >= 2 and data.size(-1) == self.box_dim, \ ('The boxes dimension must >= 2 and the length of the last ' f'dimension must be {self.box_dim}, but got boxes with ' f'shape {data.shape}.') self.tensor = data def convert_to(self, dst_type: Union[str, type]) -> 'BaseBoxes': """Convert self to another box type. Args: dst_type (str or type): destination box type. Returns: :obj:`BaseBoxes`: destination box type object . """ from .box_type import convert_box_type return convert_box_type(self, dst_type=dst_type) def empty_boxes(self: T, dtype: Optional[torch.dtype] = None, device: Optional[DeviceType] = None) -> T: """Create empty box. Args: dtype (torch.dtype, Optional): data type of boxes. device (str or torch.device, Optional): device of boxes. Returns: T: empty boxes with shape of (0, box_dim). """ empty_box = self.tensor.new_zeros( 0, self.box_dim, dtype=dtype, device=device) return type(self)(empty_box, clone=False) def fake_boxes(self: T, sizes: Tuple[int], fill: float = 0, dtype: Optional[torch.dtype] = None, device: Optional[DeviceType] = None) -> T: """Create fake boxes with specific sizes and fill values. Args: sizes (Tuple[int]): The size of fake boxes. The last value must be equal with ``self.box_dim``. fill (float): filling value. Defaults to 0. dtype (torch.dtype, Optional): data type of boxes. device (str or torch.device, Optional): device of boxes. Returns: T: Fake boxes with shape of ``sizes``. """ fake_boxes = self.tensor.new_full( sizes, fill, dtype=dtype, device=device) return type(self)(fake_boxes, clone=False) def __getitem__(self: T, index: IndexType) -> T: """Rewrite getitem to protect the last dimension shape.""" boxes = self.tensor if isinstance(index, np.ndarray): index = torch.as_tensor(index, device=self.device) if isinstance(index, Tensor) and index.dtype == torch.bool: assert index.dim() < boxes.dim() elif isinstance(index, tuple): assert len(index) < boxes.dim() # `Ellipsis`(...) is commonly used in index like [None, ...]. # When `Ellipsis` is in index, it must be the last item. if Ellipsis in index: assert index[-1] is Ellipsis boxes = boxes[index] if boxes.dim() == 1: boxes = boxes.reshape(1, -1) return type(self)(boxes, clone=False) def __setitem__(self: T, index: IndexType, values: Union[Tensor, T]) -> T: """Rewrite setitem to protect the last dimension shape.""" assert type(values) is type(self), \ 'The value to be set must be the same box type as self' values = values.tensor if isinstance(index, np.ndarray): index = torch.as_tensor(index, device=self.device) if isinstance(index, Tensor) and index.dtype == torch.bool: assert index.dim() < self.tensor.dim() elif isinstance(index, tuple): assert len(index) < self.tensor.dim() # `Ellipsis`(...) is commonly used in index like [None, ...]. # When `Ellipsis` is in index, it must be the last item. if Ellipsis in index: assert index[-1] is Ellipsis self.tensor[index] = values def __len__(self) -> int: """Return the length of self.tensor first dimension.""" return self.tensor.size(0) def __deepcopy__(self, memo): """Only clone the ``self.tensor`` when applying deepcopy.""" cls = self.__class__ other = cls.__new__(cls) memo[id(self)] = other other.tensor = self.tensor.clone() return other def __repr__(self) -> str: """Return a strings that describes the object.""" return self.__class__.__name__ + '(\n' + str(self.tensor) + ')' def new_tensor(self, *args, **kwargs) -> Tensor: """Reload ``new_tensor`` from self.tensor.""" return self.tensor.new_tensor(*args, **kwargs) def new_full(self, *args, **kwargs) -> Tensor: """Reload ``new_full`` from self.tensor.""" return self.tensor.new_full(*args, **kwargs) def new_empty(self, *args, **kwargs) -> Tensor: """Reload ``new_empty`` from self.tensor.""" return self.tensor.new_empty(*args, **kwargs) def new_ones(self, *args, **kwargs) -> Tensor: """Reload ``new_ones`` from self.tensor.""" return self.tensor.new_ones(*args, **kwargs) def new_zeros(self, *args, **kwargs) -> Tensor: """Reload ``new_zeros`` from self.tensor.""" return self.tensor.new_zeros(*args, **kwargs) def size(self, dim: Optional[int] = None) -> Union[int, torch.Size]: """Reload new_zeros from self.tensor.""" # self.tensor.size(dim) cannot work when dim=None. return self.tensor.size() if dim is None else self.tensor.size(dim) def dim(self) -> int: """Reload ``dim`` from self.tensor.""" return self.tensor.dim() def device(self) -> torch.device: """Reload ``device`` from self.tensor.""" return self.tensor.device def dtype(self) -> torch.dtype: """Reload ``dtype`` from self.tensor.""" return self.tensor.dtype def shape(self) -> torch.Size: return self.tensor.shape def numel(self) -> int: """Reload ``numel`` from self.tensor.""" return self.tensor.numel() def numpy(self) -> np.ndarray: """Reload ``numpy`` from self.tensor.""" return self.tensor.numpy() def to(self: T, *args, **kwargs) -> T: """Reload ``to`` from self.tensor.""" return type(self)(self.tensor.to(*args, **kwargs), clone=False) def cpu(self: T) -> T: """Reload ``cpu`` from self.tensor.""" return type(self)(self.tensor.cpu(), clone=False) def cuda(self: T, *args, **kwargs) -> T: """Reload ``cuda`` from self.tensor.""" return type(self)(self.tensor.cuda(*args, **kwargs), clone=False) def clone(self: T) -> T: """Reload ``clone`` from self.tensor.""" return type(self)(self.tensor) def detach(self: T) -> T: """Reload ``detach`` from self.tensor.""" return type(self)(self.tensor.detach(), clone=False) def view(self: T, *shape: Tuple[int]) -> T: """Reload ``view`` from self.tensor.""" return type(self)(self.tensor.view(shape), clone=False) def reshape(self: T, *shape: Tuple[int]) -> T: """Reload ``reshape`` from self.tensor.""" return type(self)(self.tensor.reshape(shape), clone=False) def expand(self: T, *sizes: Tuple[int]) -> T: """Reload ``expand`` from self.tensor.""" return type(self)(self.tensor.expand(sizes), clone=False) def repeat(self: T, *sizes: Tuple[int]) -> T: """Reload ``repeat`` from self.tensor.""" return type(self)(self.tensor.repeat(sizes), clone=False) def transpose(self: T, dim0: int, dim1: int) -> T: """Reload ``transpose`` from self.tensor.""" ndim = self.tensor.dim() assert dim0 != -1 and dim0 != ndim - 1 assert dim1 != -1 and dim1 != ndim - 1 return type(self)(self.tensor.transpose(dim0, dim1), clone=False) def permute(self: T, *dims: Tuple[int]) -> T: """Reload ``permute`` from self.tensor.""" assert dims[-1] == -1 or dims[-1] == self.tensor.dim() - 1 return type(self)(self.tensor.permute(dims), clone=False) def split(self: T, split_size_or_sections: Union[int, Sequence[int]], dim: int = 0) -> List[T]: """Reload ``split`` from self.tensor.""" assert dim != -1 and dim != self.tensor.dim() - 1 boxes_list = self.tensor.split(split_size_or_sections, dim=dim) return [type(self)(boxes, clone=False) for boxes in boxes_list] def chunk(self: T, chunks: int, dim: int = 0) -> List[T]: """Reload ``chunk`` from self.tensor.""" assert dim != -1 and dim != self.tensor.dim() - 1 boxes_list = self.tensor.chunk(chunks, dim=dim) return [type(self)(boxes, clone=False) for boxes in boxes_list] def unbind(self: T, dim: int = 0) -> T: """Reload ``unbind`` from self.tensor.""" assert dim != -1 and dim != self.tensor.dim() - 1 boxes_list = self.tensor.unbind(dim=dim) return [type(self)(boxes, clone=False) for boxes in boxes_list] def flatten(self: T, start_dim: int = 0, end_dim: int = -2) -> T: """Reload ``flatten`` from self.tensor.""" assert end_dim != -1 and end_dim != self.tensor.dim() - 1 return type(self)(self.tensor.flatten(start_dim, end_dim), clone=False) def squeeze(self: T, dim: Optional[int] = None) -> T: """Reload ``squeeze`` from self.tensor.""" boxes = self.tensor.squeeze() if dim is None else \ self.tensor.squeeze(dim) return type(self)(boxes, clone=False) def unsqueeze(self: T, dim: int) -> T: """Reload ``unsqueeze`` from self.tensor.""" assert dim != -1 and dim != self.tensor.dim() return type(self)(self.tensor.unsqueeze(dim), clone=False) def cat(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T: """Cancatenates a box instance list into one single box instance. Similar to ``torch.cat``. Args: box_list (Sequence[T]): A sequence of box instances. dim (int): The dimension over which the box are concatenated. Defaults to 0. Returns: T: Concatenated box instance. """ assert isinstance(box_list, Sequence) if len(box_list) == 0: raise ValueError('box_list should not be a empty list.') assert dim != -1 and dim != box_list[0].dim() - 1 assert all(isinstance(boxes, cls) for boxes in box_list) th_box_list = [boxes.tensor for boxes in box_list] return cls(torch.cat(th_box_list, dim=dim), clone=False) def stack(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T: """Concatenates a sequence of tensors along a new dimension. Similar to ``torch.stack``. Args: box_list (Sequence[T]): A sequence of box instances. dim (int): Dimension to insert. Defaults to 0. Returns: T: Concatenated box instance. """ assert isinstance(box_list, Sequence) if len(box_list) == 0: raise ValueError('box_list should not be a empty list.') assert dim != -1 and dim != box_list[0].dim() assert all(isinstance(boxes, cls) for boxes in box_list) th_box_list = [boxes.tensor for boxes in box_list] return cls(torch.stack(th_box_list, dim=dim), clone=False) def centers(self) -> Tensor: """Return a tensor representing the centers of boxes.""" pass def areas(self) -> Tensor: """Return a tensor representing the areas of boxes.""" pass def widths(self) -> Tensor: """Return a tensor representing the widths of boxes.""" pass def heights(self) -> Tensor: """Return a tensor representing the heights of boxes.""" pass def flip_(self, img_shape: Tuple[int, int], direction: str = 'horizontal') -> None: """Flip boxes horizontally or vertically in-place. Args: img_shape (Tuple[int, int]): A tuple of image height and width. direction (str): Flip direction, options are "horizontal", "vertical" and "diagonal". Defaults to "horizontal" """ pass def translate_(self, distances: Tuple[float, float]) -> None: """Translate boxes in-place. Args: distances (Tuple[float, float]): translate distances. The first is horizontal distance and the second is vertical distance. """ pass def clip_(self, img_shape: Tuple[int, int]) -> None: """Clip boxes according to the image shape in-place. Args: img_shape (Tuple[int, int]): A tuple of image height and width. """ pass def rotate_(self, center: Tuple[float, float], angle: float) -> None: """Rotate all boxes in-place. Args: center (Tuple[float, float]): Rotation origin. angle (float): Rotation angle represented in degrees. Positive values mean clockwise rotation. """ pass def project_(self, homography_matrix: Union[Tensor, np.ndarray]) -> None: """Geometric transformat boxes in-place. Args: homography_matrix (Tensor or np.ndarray]): Shape (3, 3) for geometric transformation. """ pass def rescale_(self, scale_factor: Tuple[float, float]) -> None: """Rescale boxes w.r.t. rescale_factor in-place. Note: Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes w.r.t ``scale_facotr``. The difference is that ``resize_`` only changes the width and the height of boxes, but ``rescale_`` also rescales the box centers simultaneously. Args: scale_factor (Tuple[float, float]): factors for scaling boxes. The length should be 2. """ pass def resize_(self, scale_factor: Tuple[float, float]) -> None: """Resize the box width and height w.r.t scale_factor in-place. Note: Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes w.r.t ``scale_facotr``. The difference is that ``resize_`` only changes the width and the height of boxes, but ``rescale_`` also rescales the box centers simultaneously. Args: scale_factor (Tuple[float, float]): factors for scaling box shapes. The length should be 2. """ pass def is_inside(self, img_shape: Tuple[int, int], all_inside: bool = False, allowed_border: int = 0) -> BoolTensor: """Find boxes inside the image. Args: img_shape (Tuple[int, int]): A tuple of image height and width. all_inside (bool): Whether the boxes are all inside the image or part inside the image. Defaults to False. allowed_border (int): Boxes that extend beyond the image shape boundary by more than ``allowed_border`` are considered "outside" Defaults to 0. Returns: BoolTensor: A BoolTensor indicating whether the box is inside the image. Assuming the original boxes have shape (m, n, box_dim), the output has shape (m, n). """ pass def find_inside_points(self, points: Tensor, is_aligned: bool = False) -> BoolTensor: """Find inside box points. Boxes dimension must be 2. Args: points (Tensor): Points coordinates. Has shape of (m, 2). is_aligned (bool): Whether ``points`` has been aligned with boxes or not. If True, the length of boxes and ``points`` should be the same. Defaults to False. Returns: BoolTensor: A BoolTensor indicating whether a point is inside boxes. Assuming the boxes has shape of (n, box_dim), if ``is_aligned`` is False. The index has shape of (m, n). If ``is_aligned`` is True, m should be equal to n and the index has shape of (m, ). """ pass def overlaps(boxes1: 'BaseBoxes', boxes2: 'BaseBoxes', mode: str = 'iou', is_aligned: bool = False, eps: float = 1e-6) -> Tensor: """Calculate overlap between two set of boxes with their types converted to the present box type. Args: boxes1 (:obj:`BaseBoxes`): BaseBoxes with shape of (m, box_dim) or empty. boxes2 (:obj:`BaseBoxes`): BaseBoxes with shape of (n, box_dim) or empty. mode (str): "iou" (intersection over union), "iof" (intersection over foreground). Defaults to "iou". is_aligned (bool): If True, then m and n must be equal. Defaults to False. eps (float): A value added to the denominator for numerical stability. Defaults to 1e-6. Returns: Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) """ pass def from_instance_masks(masks: MaskType) -> 'BaseBoxes': """Create boxes from instance masks. Args: masks (:obj:`BitmapMasks` or :obj:`PolygonMasks`): BitmapMasks or PolygonMasks instance with length of n. Returns: :obj:`BaseBoxes`: Converted boxes with shape of (n, box_dim). """ pass The provided code snippet includes necessary dependencies for implementing the `convert_box_type` function. Write a Python function `def convert_box_type(boxes: BoxType, *, src_type: Union[str, type] = None, dst_type: Union[str, type] = None) -> BoxType` to solve the following problem: Convert boxes from source type to destination type. If ``boxes`` is a instance of BaseBoxes, the ``src_type`` will be set as the type of ``boxes``. Args: boxes (np.ndarray or Tensor or :obj:`BaseBoxes`): boxes need to convert. src_type (str or type, Optional): source box type. Defaults to None. dst_type (str or type, Optional): destination box type. Defaults to None. Returns: Union[np.ndarray, Tensor, :obj:`BaseBoxes`]: Converted boxes. It's type is consistent with the input's type. Here is the function: def convert_box_type(boxes: BoxType, *, src_type: Union[str, type] = None, dst_type: Union[str, type] = None) -> BoxType: """Convert boxes from source type to destination type. If ``boxes`` is a instance of BaseBoxes, the ``src_type`` will be set as the type of ``boxes``. Args: boxes (np.ndarray or Tensor or :obj:`BaseBoxes`): boxes need to convert. src_type (str or type, Optional): source box type. Defaults to None. dst_type (str or type, Optional): destination box type. Defaults to None. Returns: Union[np.ndarray, Tensor, :obj:`BaseBoxes`]: Converted boxes. It's type is consistent with the input's type. """ assert dst_type is not None dst_type_name, dst_type_cls = get_box_type(dst_type) is_box_cls = False is_numpy = False if isinstance(boxes, BaseBoxes): src_type_name, _ = get_box_type(type(boxes)) is_box_cls = True elif isinstance(boxes, (Tensor, np.ndarray)): assert src_type is not None src_type_name, _ = get_box_type(src_type) if isinstance(boxes, np.ndarray): is_numpy = True else: raise TypeError('boxes must be a instance of BaseBoxes, Tensor or ' f'ndarray, but get {type(boxes)}.') if src_type_name == dst_type_name: return boxes converter_name = src_type_name + '2' + dst_type_name assert converter_name in box_converters, \ "Convert function hasn't been registered in box_converters." converter = box_converters[converter_name] if is_box_cls: boxes = converter(boxes.tensor) return dst_type_cls(boxes) elif is_numpy: boxes = converter(torch.from_numpy(boxes)) return boxes.numpy() else: return converter(boxes)
Convert boxes from source type to destination type. If ``boxes`` is a instance of BaseBoxes, the ``src_type`` will be set as the type of ``boxes``. Args: boxes (np.ndarray or Tensor or :obj:`BaseBoxes`): boxes need to convert. src_type (str or type, Optional): source box type. Defaults to None. dst_type (str or type, Optional): destination box type. Defaults to None. Returns: Union[np.ndarray, Tensor, :obj:`BaseBoxes`]: Converted boxes. It's type is consistent with the input's type.
14,737
from typing import Callable, Optional, Tuple, Type, Union import numpy as np import torch from torch import Tensor from .base_boxes import BaseBoxes def get_box_type(box_type: Union[str, type]) -> Tuple[str, type]: """get both box type name and class. Args: box_type (str or type): Single box type name or class. Returns: Tuple[str, type]: A tuple of box type name and class. """ if isinstance(box_type, str): type_name = box_type.lower() assert type_name in box_types, \ f"Box type {type_name} hasn't been registered in box_types." type_cls = box_types[type_name] elif issubclass(box_type, BaseBoxes): assert box_type in _box_type_to_name, \ f"Box type {box_type} hasn't been registered in box_types." type_name = _box_type_to_name[box_type] type_cls = box_type else: raise KeyError('box_type must be a str or class inheriting from ' f'BaseBoxes, but got {type(box_type)}.') return type_name, type_cls class BaseBoxes(metaclass=ABCMeta): """The base class for 2D box types. The functions of ``BaseBoxes`` lie in three fields: - Verify the boxes shape. - Support tensor-like operations. - Define abstract functions for 2D boxes. In ``__init__`` , ``BaseBoxes`` verifies the validity of the data shape w.r.t ``box_dim``. The tensor with the dimension >= 2 and the length of the last dimension being ``box_dim`` will be regarded as valid. ``BaseBoxes`` will restore them at the field ``tensor``. It's necessary to override ``box_dim`` in subclass to guarantee the data shape is correct. There are many basic tensor-like functions implemented in ``BaseBoxes``. In most cases, users can operate ``BaseBoxes`` instance like a normal tensor. To protect the validity of data shape, All tensor-like functions cannot modify the last dimension of ``self.tensor``. When creating a new box type, users need to inherit from ``BaseBoxes`` and override abstract methods and specify the ``box_dim``. Then, register the new box type by using the decorator ``register_box_type``. Args: data (Tensor or np.ndarray or Sequence): The box data with shape (..., box_dim). dtype (torch.dtype, Optional): data type of boxes. Defaults to None. device (str or torch.device, Optional): device of boxes. Default to None. clone (bool): Whether clone ``boxes`` or not. Defaults to True. """ # Used to verify the last dimension length # Should override it in subclass. box_dim: int = 0 def __init__(self, data: Union[Tensor, np.ndarray, Sequence], dtype: Optional[torch.dtype] = None, device: Optional[DeviceType] = None, clone: bool = True) -> None: if isinstance(data, (np.ndarray, Tensor, Sequence)): data = torch.as_tensor(data) else: raise TypeError('boxes should be Tensor, ndarray, or Sequence, ', f'but got {type(data)}') if device is not None or dtype is not None: data = data.to(dtype=dtype, device=device) # Clone the data to avoid potential bugs if clone: data = data.clone() # handle the empty input like [] if data.numel() == 0: data = data.reshape((-1, self.box_dim)) assert data.dim() >= 2 and data.size(-1) == self.box_dim, \ ('The boxes dimension must >= 2 and the length of the last ' f'dimension must be {self.box_dim}, but got boxes with ' f'shape {data.shape}.') self.tensor = data def convert_to(self, dst_type: Union[str, type]) -> 'BaseBoxes': """Convert self to another box type. Args: dst_type (str or type): destination box type. Returns: :obj:`BaseBoxes`: destination box type object . """ from .box_type import convert_box_type return convert_box_type(self, dst_type=dst_type) def empty_boxes(self: T, dtype: Optional[torch.dtype] = None, device: Optional[DeviceType] = None) -> T: """Create empty box. Args: dtype (torch.dtype, Optional): data type of boxes. device (str or torch.device, Optional): device of boxes. Returns: T: empty boxes with shape of (0, box_dim). """ empty_box = self.tensor.new_zeros( 0, self.box_dim, dtype=dtype, device=device) return type(self)(empty_box, clone=False) def fake_boxes(self: T, sizes: Tuple[int], fill: float = 0, dtype: Optional[torch.dtype] = None, device: Optional[DeviceType] = None) -> T: """Create fake boxes with specific sizes and fill values. Args: sizes (Tuple[int]): The size of fake boxes. The last value must be equal with ``self.box_dim``. fill (float): filling value. Defaults to 0. dtype (torch.dtype, Optional): data type of boxes. device (str or torch.device, Optional): device of boxes. Returns: T: Fake boxes with shape of ``sizes``. """ fake_boxes = self.tensor.new_full( sizes, fill, dtype=dtype, device=device) return type(self)(fake_boxes, clone=False) def __getitem__(self: T, index: IndexType) -> T: """Rewrite getitem to protect the last dimension shape.""" boxes = self.tensor if isinstance(index, np.ndarray): index = torch.as_tensor(index, device=self.device) if isinstance(index, Tensor) and index.dtype == torch.bool: assert index.dim() < boxes.dim() elif isinstance(index, tuple): assert len(index) < boxes.dim() # `Ellipsis`(...) is commonly used in index like [None, ...]. # When `Ellipsis` is in index, it must be the last item. if Ellipsis in index: assert index[-1] is Ellipsis boxes = boxes[index] if boxes.dim() == 1: boxes = boxes.reshape(1, -1) return type(self)(boxes, clone=False) def __setitem__(self: T, index: IndexType, values: Union[Tensor, T]) -> T: """Rewrite setitem to protect the last dimension shape.""" assert type(values) is type(self), \ 'The value to be set must be the same box type as self' values = values.tensor if isinstance(index, np.ndarray): index = torch.as_tensor(index, device=self.device) if isinstance(index, Tensor) and index.dtype == torch.bool: assert index.dim() < self.tensor.dim() elif isinstance(index, tuple): assert len(index) < self.tensor.dim() # `Ellipsis`(...) is commonly used in index like [None, ...]. # When `Ellipsis` is in index, it must be the last item. if Ellipsis in index: assert index[-1] is Ellipsis self.tensor[index] = values def __len__(self) -> int: """Return the length of self.tensor first dimension.""" return self.tensor.size(0) def __deepcopy__(self, memo): """Only clone the ``self.tensor`` when applying deepcopy.""" cls = self.__class__ other = cls.__new__(cls) memo[id(self)] = other other.tensor = self.tensor.clone() return other def __repr__(self) -> str: """Return a strings that describes the object.""" return self.__class__.__name__ + '(\n' + str(self.tensor) + ')' def new_tensor(self, *args, **kwargs) -> Tensor: """Reload ``new_tensor`` from self.tensor.""" return self.tensor.new_tensor(*args, **kwargs) def new_full(self, *args, **kwargs) -> Tensor: """Reload ``new_full`` from self.tensor.""" return self.tensor.new_full(*args, **kwargs) def new_empty(self, *args, **kwargs) -> Tensor: """Reload ``new_empty`` from self.tensor.""" return self.tensor.new_empty(*args, **kwargs) def new_ones(self, *args, **kwargs) -> Tensor: """Reload ``new_ones`` from self.tensor.""" return self.tensor.new_ones(*args, **kwargs) def new_zeros(self, *args, **kwargs) -> Tensor: """Reload ``new_zeros`` from self.tensor.""" return self.tensor.new_zeros(*args, **kwargs) def size(self, dim: Optional[int] = None) -> Union[int, torch.Size]: """Reload new_zeros from self.tensor.""" # self.tensor.size(dim) cannot work when dim=None. return self.tensor.size() if dim is None else self.tensor.size(dim) def dim(self) -> int: """Reload ``dim`` from self.tensor.""" return self.tensor.dim() def device(self) -> torch.device: """Reload ``device`` from self.tensor.""" return self.tensor.device def dtype(self) -> torch.dtype: """Reload ``dtype`` from self.tensor.""" return self.tensor.dtype def shape(self) -> torch.Size: return self.tensor.shape def numel(self) -> int: """Reload ``numel`` from self.tensor.""" return self.tensor.numel() def numpy(self) -> np.ndarray: """Reload ``numpy`` from self.tensor.""" return self.tensor.numpy() def to(self: T, *args, **kwargs) -> T: """Reload ``to`` from self.tensor.""" return type(self)(self.tensor.to(*args, **kwargs), clone=False) def cpu(self: T) -> T: """Reload ``cpu`` from self.tensor.""" return type(self)(self.tensor.cpu(), clone=False) def cuda(self: T, *args, **kwargs) -> T: """Reload ``cuda`` from self.tensor.""" return type(self)(self.tensor.cuda(*args, **kwargs), clone=False) def clone(self: T) -> T: """Reload ``clone`` from self.tensor.""" return type(self)(self.tensor) def detach(self: T) -> T: """Reload ``detach`` from self.tensor.""" return type(self)(self.tensor.detach(), clone=False) def view(self: T, *shape: Tuple[int]) -> T: """Reload ``view`` from self.tensor.""" return type(self)(self.tensor.view(shape), clone=False) def reshape(self: T, *shape: Tuple[int]) -> T: """Reload ``reshape`` from self.tensor.""" return type(self)(self.tensor.reshape(shape), clone=False) def expand(self: T, *sizes: Tuple[int]) -> T: """Reload ``expand`` from self.tensor.""" return type(self)(self.tensor.expand(sizes), clone=False) def repeat(self: T, *sizes: Tuple[int]) -> T: """Reload ``repeat`` from self.tensor.""" return type(self)(self.tensor.repeat(sizes), clone=False) def transpose(self: T, dim0: int, dim1: int) -> T: """Reload ``transpose`` from self.tensor.""" ndim = self.tensor.dim() assert dim0 != -1 and dim0 != ndim - 1 assert dim1 != -1 and dim1 != ndim - 1 return type(self)(self.tensor.transpose(dim0, dim1), clone=False) def permute(self: T, *dims: Tuple[int]) -> T: """Reload ``permute`` from self.tensor.""" assert dims[-1] == -1 or dims[-1] == self.tensor.dim() - 1 return type(self)(self.tensor.permute(dims), clone=False) def split(self: T, split_size_or_sections: Union[int, Sequence[int]], dim: int = 0) -> List[T]: """Reload ``split`` from self.tensor.""" assert dim != -1 and dim != self.tensor.dim() - 1 boxes_list = self.tensor.split(split_size_or_sections, dim=dim) return [type(self)(boxes, clone=False) for boxes in boxes_list] def chunk(self: T, chunks: int, dim: int = 0) -> List[T]: """Reload ``chunk`` from self.tensor.""" assert dim != -1 and dim != self.tensor.dim() - 1 boxes_list = self.tensor.chunk(chunks, dim=dim) return [type(self)(boxes, clone=False) for boxes in boxes_list] def unbind(self: T, dim: int = 0) -> T: """Reload ``unbind`` from self.tensor.""" assert dim != -1 and dim != self.tensor.dim() - 1 boxes_list = self.tensor.unbind(dim=dim) return [type(self)(boxes, clone=False) for boxes in boxes_list] def flatten(self: T, start_dim: int = 0, end_dim: int = -2) -> T: """Reload ``flatten`` from self.tensor.""" assert end_dim != -1 and end_dim != self.tensor.dim() - 1 return type(self)(self.tensor.flatten(start_dim, end_dim), clone=False) def squeeze(self: T, dim: Optional[int] = None) -> T: """Reload ``squeeze`` from self.tensor.""" boxes = self.tensor.squeeze() if dim is None else \ self.tensor.squeeze(dim) return type(self)(boxes, clone=False) def unsqueeze(self: T, dim: int) -> T: """Reload ``unsqueeze`` from self.tensor.""" assert dim != -1 and dim != self.tensor.dim() return type(self)(self.tensor.unsqueeze(dim), clone=False) def cat(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T: """Cancatenates a box instance list into one single box instance. Similar to ``torch.cat``. Args: box_list (Sequence[T]): A sequence of box instances. dim (int): The dimension over which the box are concatenated. Defaults to 0. Returns: T: Concatenated box instance. """ assert isinstance(box_list, Sequence) if len(box_list) == 0: raise ValueError('box_list should not be a empty list.') assert dim != -1 and dim != box_list[0].dim() - 1 assert all(isinstance(boxes, cls) for boxes in box_list) th_box_list = [boxes.tensor for boxes in box_list] return cls(torch.cat(th_box_list, dim=dim), clone=False) def stack(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T: """Concatenates a sequence of tensors along a new dimension. Similar to ``torch.stack``. Args: box_list (Sequence[T]): A sequence of box instances. dim (int): Dimension to insert. Defaults to 0. Returns: T: Concatenated box instance. """ assert isinstance(box_list, Sequence) if len(box_list) == 0: raise ValueError('box_list should not be a empty list.') assert dim != -1 and dim != box_list[0].dim() assert all(isinstance(boxes, cls) for boxes in box_list) th_box_list = [boxes.tensor for boxes in box_list] return cls(torch.stack(th_box_list, dim=dim), clone=False) def centers(self) -> Tensor: """Return a tensor representing the centers of boxes.""" pass def areas(self) -> Tensor: """Return a tensor representing the areas of boxes.""" pass def widths(self) -> Tensor: """Return a tensor representing the widths of boxes.""" pass def heights(self) -> Tensor: """Return a tensor representing the heights of boxes.""" pass def flip_(self, img_shape: Tuple[int, int], direction: str = 'horizontal') -> None: """Flip boxes horizontally or vertically in-place. Args: img_shape (Tuple[int, int]): A tuple of image height and width. direction (str): Flip direction, options are "horizontal", "vertical" and "diagonal". Defaults to "horizontal" """ pass def translate_(self, distances: Tuple[float, float]) -> None: """Translate boxes in-place. Args: distances (Tuple[float, float]): translate distances. The first is horizontal distance and the second is vertical distance. """ pass def clip_(self, img_shape: Tuple[int, int]) -> None: """Clip boxes according to the image shape in-place. Args: img_shape (Tuple[int, int]): A tuple of image height and width. """ pass def rotate_(self, center: Tuple[float, float], angle: float) -> None: """Rotate all boxes in-place. Args: center (Tuple[float, float]): Rotation origin. angle (float): Rotation angle represented in degrees. Positive values mean clockwise rotation. """ pass def project_(self, homography_matrix: Union[Tensor, np.ndarray]) -> None: """Geometric transformat boxes in-place. Args: homography_matrix (Tensor or np.ndarray]): Shape (3, 3) for geometric transformation. """ pass def rescale_(self, scale_factor: Tuple[float, float]) -> None: """Rescale boxes w.r.t. rescale_factor in-place. Note: Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes w.r.t ``scale_facotr``. The difference is that ``resize_`` only changes the width and the height of boxes, but ``rescale_`` also rescales the box centers simultaneously. Args: scale_factor (Tuple[float, float]): factors for scaling boxes. The length should be 2. """ pass def resize_(self, scale_factor: Tuple[float, float]) -> None: """Resize the box width and height w.r.t scale_factor in-place. Note: Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes w.r.t ``scale_facotr``. The difference is that ``resize_`` only changes the width and the height of boxes, but ``rescale_`` also rescales the box centers simultaneously. Args: scale_factor (Tuple[float, float]): factors for scaling box shapes. The length should be 2. """ pass def is_inside(self, img_shape: Tuple[int, int], all_inside: bool = False, allowed_border: int = 0) -> BoolTensor: """Find boxes inside the image. Args: img_shape (Tuple[int, int]): A tuple of image height and width. all_inside (bool): Whether the boxes are all inside the image or part inside the image. Defaults to False. allowed_border (int): Boxes that extend beyond the image shape boundary by more than ``allowed_border`` are considered "outside" Defaults to 0. Returns: BoolTensor: A BoolTensor indicating whether the box is inside the image. Assuming the original boxes have shape (m, n, box_dim), the output has shape (m, n). """ pass def find_inside_points(self, points: Tensor, is_aligned: bool = False) -> BoolTensor: """Find inside box points. Boxes dimension must be 2. Args: points (Tensor): Points coordinates. Has shape of (m, 2). is_aligned (bool): Whether ``points`` has been aligned with boxes or not. If True, the length of boxes and ``points`` should be the same. Defaults to False. Returns: BoolTensor: A BoolTensor indicating whether a point is inside boxes. Assuming the boxes has shape of (n, box_dim), if ``is_aligned`` is False. The index has shape of (m, n). If ``is_aligned`` is True, m should be equal to n and the index has shape of (m, ). """ pass def overlaps(boxes1: 'BaseBoxes', boxes2: 'BaseBoxes', mode: str = 'iou', is_aligned: bool = False, eps: float = 1e-6) -> Tensor: """Calculate overlap between two set of boxes with their types converted to the present box type. Args: boxes1 (:obj:`BaseBoxes`): BaseBoxes with shape of (m, box_dim) or empty. boxes2 (:obj:`BaseBoxes`): BaseBoxes with shape of (n, box_dim) or empty. mode (str): "iou" (intersection over union), "iof" (intersection over foreground). Defaults to "iou". is_aligned (bool): If True, then m and n must be equal. Defaults to False. eps (float): A value added to the denominator for numerical stability. Defaults to 1e-6. Returns: Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) """ pass def from_instance_masks(masks: MaskType) -> 'BaseBoxes': """Create boxes from instance masks. Args: masks (:obj:`BitmapMasks` or :obj:`PolygonMasks`): BitmapMasks or PolygonMasks instance with length of n. Returns: :obj:`BaseBoxes`: Converted boxes with shape of (n, box_dim). """ pass The provided code snippet includes necessary dependencies for implementing the `autocast_box_type` function. Write a Python function `def autocast_box_type(dst_box_type='hbox') -> Callable` to solve the following problem: A decorator which automatically casts results['gt_bboxes'] to the destination box type. It commenly used in mmdet.datasets.transforms to make the transforms up- compatible with the np.ndarray type of results['gt_bboxes']. The speed of processing of np.ndarray and BaseBoxes data are the same: - np.ndarray: 0.0509 img/s - BaseBoxes: 0.0551 img/s Args: dst_box_type (str): Destination box type. Here is the function: def autocast_box_type(dst_box_type='hbox') -> Callable: """A decorator which automatically casts results['gt_bboxes'] to the destination box type. It commenly used in mmdet.datasets.transforms to make the transforms up- compatible with the np.ndarray type of results['gt_bboxes']. The speed of processing of np.ndarray and BaseBoxes data are the same: - np.ndarray: 0.0509 img/s - BaseBoxes: 0.0551 img/s Args: dst_box_type (str): Destination box type. """ _, box_type_cls = get_box_type(dst_box_type) def decorator(func: Callable) -> Callable: def wrapper(self, results: dict, *args, **kwargs) -> dict: if ('gt_bboxes' not in results or isinstance(results['gt_bboxes'], BaseBoxes)): return func(self, results) elif isinstance(results['gt_bboxes'], np.ndarray): results['gt_bboxes'] = box_type_cls( results['gt_bboxes'], clone=False) if 'mix_results' in results: for res in results['mix_results']: if isinstance(res['gt_bboxes'], np.ndarray): res['gt_bboxes'] = box_type_cls( res['gt_bboxes'], clone=False) _results = func(self, results, *args, **kwargs) # In some cases, the function will process gt_bboxes in-place # Simultaneously convert inputting and outputting gt_bboxes # back to np.ndarray if isinstance(_results, dict) and 'gt_bboxes' in _results: if isinstance(_results['gt_bboxes'], BaseBoxes): _results['gt_bboxes'] = _results['gt_bboxes'].numpy() if isinstance(results['gt_bboxes'], BaseBoxes): results['gt_bboxes'] = results['gt_bboxes'].numpy() return _results else: raise TypeError( "auto_box_type requires results['gt_bboxes'] to " 'be BaseBoxes or np.ndarray, but got ' f"{type(results['gt_bboxes'])}") return wrapper return decorator
A decorator which automatically casts results['gt_bboxes'] to the destination box type. It commenly used in mmdet.datasets.transforms to make the transforms up- compatible with the np.ndarray type of results['gt_bboxes']. The speed of processing of np.ndarray and BaseBoxes data are the same: - np.ndarray: 0.0509 img/s - BaseBoxes: 0.0551 img/s Args: dst_box_type (str): Destination box type.
14,738
import torch def fp16_clamp(x, min=None, max=None): if not x.is_cuda and x.dtype == torch.float16: # clamp for cpu float16, tensor fp16 has no clamp implementation return x.float().clamp(min, max).half() return x.clamp(min, max) The provided code snippet includes necessary dependencies for implementing the `bbox_overlaps` function. Write a Python function `def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6)` to solve the following problem: Calculate overlap between two set of bboxes. FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889 Note: Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou', there are some new generated variable when calculating IOU using bbox_overlaps function: 1) is_aligned is False area1: M x 1 area2: N x 1 lt: M x N x 2 rb: M x N x 2 wh: M x N x 2 overlap: M x N x 1 union: M x N x 1 ious: M x N x 1 Total memory: S = (9 x N x M + N + M) * 4 Byte, When using FP16, we can reduce: R = (9 x N x M + N + M) * 4 / 2 Byte R large than (N + M) * 4 * 2 is always true when N and M >= 1. Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2, N + 1 < 3 * N, when N or M is 1. Given M = 40 (ground truth), N = 400000 (three anchor boxes in per grid, FPN, R-CNNs), R = 275 MB (one times) A special case (dense detection), M = 512 (ground truth), R = 3516 MB = 3.43 GB When the batch size is B, reduce: B x R Therefore, CUDA memory runs out frequently. Experiments on GeForce RTX 2080Ti (11019 MiB): | dtype | M | N | Use | Real | Ideal | |:----:|:----:|:----:|:----:|:----:|:----:| | FP32 | 512 | 400000 | 8020 MiB | -- | -- | | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB | | FP32 | 40 | 400000 | 1540 MiB | -- | -- | | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB | 2) is_aligned is True area1: N x 1 area2: N x 1 lt: N x 2 rb: N x 2 wh: N x 2 overlap: N x 1 union: N x 1 ious: N x 1 Total memory: S = 11 x N * 4 Byte When using FP16, we can reduce: R = 11 x N * 4 / 2 Byte So do the 'giou' (large than 'iou'). Time-wise, FP16 is generally faster than FP32. When gpu_assign_thr is not -1, it takes more time on cpu but not reduce memory. There, we can reduce half the memory and keep the speed. If ``is_aligned`` is ``False``, then calculate the overlaps between each bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of bboxes1 and bboxes2. Args: bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty. bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty. B indicates the batch dim, in shape (B1, B2, ..., Bn). If ``is_aligned`` is ``True``, then m and n must be equal. mode (str): "iou" (intersection over union), "iof" (intersection over foreground) or "giou" (generalized intersection over union). Default "iou". is_aligned (bool, optional): If True, then m and n must be equal. Default False. eps (float, optional): A value added to the denominator for numerical stability. Default 1e-6. Returns: Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) Example: >>> bboxes1 = torch.FloatTensor([ >>> [0, 0, 10, 10], >>> [10, 10, 20, 20], >>> [32, 32, 38, 42], >>> ]) >>> bboxes2 = torch.FloatTensor([ >>> [0, 0, 10, 20], >>> [0, 10, 10, 19], >>> [10, 10, 20, 20], >>> ]) >>> overlaps = bbox_overlaps(bboxes1, bboxes2) >>> assert overlaps.shape == (3, 3) >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) >>> assert overlaps.shape == (3, ) Example: >>> empty = torch.empty(0, 4) >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]]) >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) Here is the function: def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6): """Calculate overlap between two set of bboxes. FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889 Note: Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou', there are some new generated variable when calculating IOU using bbox_overlaps function: 1) is_aligned is False area1: M x 1 area2: N x 1 lt: M x N x 2 rb: M x N x 2 wh: M x N x 2 overlap: M x N x 1 union: M x N x 1 ious: M x N x 1 Total memory: S = (9 x N x M + N + M) * 4 Byte, When using FP16, we can reduce: R = (9 x N x M + N + M) * 4 / 2 Byte R large than (N + M) * 4 * 2 is always true when N and M >= 1. Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2, N + 1 < 3 * N, when N or M is 1. Given M = 40 (ground truth), N = 400000 (three anchor boxes in per grid, FPN, R-CNNs), R = 275 MB (one times) A special case (dense detection), M = 512 (ground truth), R = 3516 MB = 3.43 GB When the batch size is B, reduce: B x R Therefore, CUDA memory runs out frequently. Experiments on GeForce RTX 2080Ti (11019 MiB): | dtype | M | N | Use | Real | Ideal | |:----:|:----:|:----:|:----:|:----:|:----:| | FP32 | 512 | 400000 | 8020 MiB | -- | -- | | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB | | FP32 | 40 | 400000 | 1540 MiB | -- | -- | | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB | 2) is_aligned is True area1: N x 1 area2: N x 1 lt: N x 2 rb: N x 2 wh: N x 2 overlap: N x 1 union: N x 1 ious: N x 1 Total memory: S = 11 x N * 4 Byte When using FP16, we can reduce: R = 11 x N * 4 / 2 Byte So do the 'giou' (large than 'iou'). Time-wise, FP16 is generally faster than FP32. When gpu_assign_thr is not -1, it takes more time on cpu but not reduce memory. There, we can reduce half the memory and keep the speed. If ``is_aligned`` is ``False``, then calculate the overlaps between each bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of bboxes1 and bboxes2. Args: bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty. bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty. B indicates the batch dim, in shape (B1, B2, ..., Bn). If ``is_aligned`` is ``True``, then m and n must be equal. mode (str): "iou" (intersection over union), "iof" (intersection over foreground) or "giou" (generalized intersection over union). Default "iou". is_aligned (bool, optional): If True, then m and n must be equal. Default False. eps (float, optional): A value added to the denominator for numerical stability. Default 1e-6. Returns: Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) Example: >>> bboxes1 = torch.FloatTensor([ >>> [0, 0, 10, 10], >>> [10, 10, 20, 20], >>> [32, 32, 38, 42], >>> ]) >>> bboxes2 = torch.FloatTensor([ >>> [0, 0, 10, 20], >>> [0, 10, 10, 19], >>> [10, 10, 20, 20], >>> ]) >>> overlaps = bbox_overlaps(bboxes1, bboxes2) >>> assert overlaps.shape == (3, 3) >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) >>> assert overlaps.shape == (3, ) Example: >>> empty = torch.empty(0, 4) >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]]) >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) """ assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}' # Either the boxes are empty or the length of boxes' last dimension is 4 assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0) assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0) # Batch dim must be the same # Batch dim: (B1, B2, ... Bn) assert bboxes1.shape[:-2] == bboxes2.shape[:-2] batch_shape = bboxes1.shape[:-2] rows = bboxes1.size(-2) cols = bboxes2.size(-2) if is_aligned: assert rows == cols if rows * cols == 0: if is_aligned: return bboxes1.new(batch_shape + (rows, )) else: return bboxes1.new(batch_shape + (rows, cols)) area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * ( bboxes1[..., 3] - bboxes1[..., 1]) area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * ( bboxes2[..., 3] - bboxes2[..., 1]) if is_aligned: lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2] rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2] wh = fp16_clamp(rb - lt, min=0) overlap = wh[..., 0] * wh[..., 1] if mode in ['iou', 'giou']: union = area1 + area2 - overlap else: union = area1 if mode == 'giou': enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2]) enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:]) else: lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]) # [B, rows, cols, 2] rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]) # [B, rows, cols, 2] wh = fp16_clamp(rb - lt, min=0) overlap = wh[..., 0] * wh[..., 1] if mode in ['iou', 'giou']: union = area1[..., None] + area2[..., None, :] - overlap else: union = area1[..., None] if mode == 'giou': enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]) enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]) eps = union.new_tensor([eps]) union = torch.max(union, eps) ious = overlap / union if mode in ['iou', 'iof']: return ious # calculate gious enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0) enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] enclose_area = torch.max(enclose_area, eps) gious = ious - (enclose_area - union) / enclose_area return gious
Calculate overlap between two set of bboxes. FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889 Note: Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou', there are some new generated variable when calculating IOU using bbox_overlaps function: 1) is_aligned is False area1: M x 1 area2: N x 1 lt: M x N x 2 rb: M x N x 2 wh: M x N x 2 overlap: M x N x 1 union: M x N x 1 ious: M x N x 1 Total memory: S = (9 x N x M + N + M) * 4 Byte, When using FP16, we can reduce: R = (9 x N x M + N + M) * 4 / 2 Byte R large than (N + M) * 4 * 2 is always true when N and M >= 1. Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2, N + 1 < 3 * N, when N or M is 1. Given M = 40 (ground truth), N = 400000 (three anchor boxes in per grid, FPN, R-CNNs), R = 275 MB (one times) A special case (dense detection), M = 512 (ground truth), R = 3516 MB = 3.43 GB When the batch size is B, reduce: B x R Therefore, CUDA memory runs out frequently. Experiments on GeForce RTX 2080Ti (11019 MiB): | dtype | M | N | Use | Real | Ideal | |:----:|:----:|:----:|:----:|:----:|:----:| | FP32 | 512 | 400000 | 8020 MiB | -- | -- | | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB | | FP32 | 40 | 400000 | 1540 MiB | -- | -- | | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB | 2) is_aligned is True area1: N x 1 area2: N x 1 lt: N x 2 rb: N x 2 wh: N x 2 overlap: N x 1 union: N x 1 ious: N x 1 Total memory: S = 11 x N * 4 Byte When using FP16, we can reduce: R = 11 x N * 4 / 2 Byte So do the 'giou' (large than 'iou'). Time-wise, FP16 is generally faster than FP32. When gpu_assign_thr is not -1, it takes more time on cpu but not reduce memory. There, we can reduce half the memory and keep the speed. If ``is_aligned`` is ``False``, then calculate the overlaps between each bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of bboxes1 and bboxes2. Args: bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty. bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty. B indicates the batch dim, in shape (B1, B2, ..., Bn). If ``is_aligned`` is ``True``, then m and n must be equal. mode (str): "iou" (intersection over union), "iof" (intersection over foreground) or "giou" (generalized intersection over union). Default "iou". is_aligned (bool, optional): If True, then m and n must be equal. Default False. eps (float, optional): A value added to the denominator for numerical stability. Default 1e-6. Returns: Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) Example: >>> bboxes1 = torch.FloatTensor([ >>> [0, 0, 10, 10], >>> [10, 10, 20, 20], >>> [32, 32, 38, 42], >>> ]) >>> bboxes2 = torch.FloatTensor([ >>> [0, 0, 10, 20], >>> [0, 10, 10, 19], >>> [10, 10, 20, 20], >>> ]) >>> overlaps = bbox_overlaps(bboxes1, bboxes2) >>> assert overlaps.shape == (3, 3) >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) >>> assert overlaps.shape == (3, ) Example: >>> empty = torch.empty(0, 4) >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]]) >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
14,739
import numpy as np import pycocotools.mask as mask_util import torch from mmengine.utils import slice_list The provided code snippet includes necessary dependencies for implementing the `split_combined_polys` function. Write a Python function `def split_combined_polys(polys, poly_lens, polys_per_mask)` to solve the following problem: Split the combined 1-D polys into masks. A mask is represented as a list of polys, and a poly is represented as a 1-D array. In dataset, all masks are concatenated into a single 1-D tensor. Here we need to split the tensor into original representations. Args: polys (list): a list (length = image num) of 1-D tensors poly_lens (list): a list (length = image num) of poly length polys_per_mask (list): a list (length = image num) of poly number of each mask Returns: list: a list (length = image num) of list (length = mask num) of \ list (length = poly num) of numpy array. Here is the function: def split_combined_polys(polys, poly_lens, polys_per_mask): """Split the combined 1-D polys into masks. A mask is represented as a list of polys, and a poly is represented as a 1-D array. In dataset, all masks are concatenated into a single 1-D tensor. Here we need to split the tensor into original representations. Args: polys (list): a list (length = image num) of 1-D tensors poly_lens (list): a list (length = image num) of poly length polys_per_mask (list): a list (length = image num) of poly number of each mask Returns: list: a list (length = image num) of list (length = mask num) of \ list (length = poly num) of numpy array. """ mask_polys_list = [] for img_id in range(len(polys)): polys_single = polys[img_id] polys_lens_single = poly_lens[img_id].tolist() polys_per_mask_single = polys_per_mask[img_id].tolist() split_polys = slice_list(polys_single, polys_lens_single) mask_polys = slice_list(split_polys, polys_per_mask_single) mask_polys_list.append(mask_polys) return mask_polys_list
Split the combined 1-D polys into masks. A mask is represented as a list of polys, and a poly is represented as a 1-D array. In dataset, all masks are concatenated into a single 1-D tensor. Here we need to split the tensor into original representations. Args: polys (list): a list (length = image num) of 1-D tensors poly_lens (list): a list (length = image num) of poly length polys_per_mask (list): a list (length = image num) of poly number of each mask Returns: list: a list (length = image num) of list (length = mask num) of \ list (length = poly num) of numpy array.
14,740
import numpy as np import pycocotools.mask as mask_util import torch from mmengine.utils import slice_list The provided code snippet includes necessary dependencies for implementing the `encode_mask_results` function. Write a Python function `def encode_mask_results(mask_results)` to solve the following problem: Encode bitmap mask to RLE code. Args: mask_results (list): bitmap mask results. Returns: list | tuple: RLE encoded mask. Here is the function: def encode_mask_results(mask_results): """Encode bitmap mask to RLE code. Args: mask_results (list): bitmap mask results. Returns: list | tuple: RLE encoded mask. """ encoded_mask_results = [] for mask in mask_results: encoded_mask_results.append( mask_util.encode( np.array(mask[:, :, np.newaxis], order='F', dtype='uint8'))[0]) # encoded with RLE return encoded_mask_results
Encode bitmap mask to RLE code. Args: mask_results (list): bitmap mask results. Returns: list | tuple: RLE encoded mask.
14,741
import numpy as np import pycocotools.mask as mask_util import torch from mmengine.utils import slice_list The provided code snippet includes necessary dependencies for implementing the `mask2bbox` function. Write a Python function `def mask2bbox(masks)` to solve the following problem: Obtain tight bounding boxes of binary masks. Args: masks (Tensor): Binary mask of shape (n, h, w). Returns: Tensor: Bboxe with shape (n, 4) of \ positive region in binary mask. Here is the function: def mask2bbox(masks): """Obtain tight bounding boxes of binary masks. Args: masks (Tensor): Binary mask of shape (n, h, w). Returns: Tensor: Bboxe with shape (n, 4) of \ positive region in binary mask. """ N = masks.shape[0] bboxes = masks.new_zeros((N, 4), dtype=torch.float32) x_any = torch.any(masks, dim=1) y_any = torch.any(masks, dim=2) for i in range(N): x = torch.where(x_any[i, :])[0] y = torch.where(y_any[i, :])[0] if len(x) > 0 and len(y) > 0: bboxes[i, :] = bboxes.new_tensor( [x[0], y[0], x[-1] + 1, y[-1] + 1]) return bboxes
Obtain tight bounding boxes of binary masks. Args: masks (Tensor): Binary mask of shape (n, h, w). Returns: Tensor: Bboxe with shape (n, 4) of \ positive region in binary mask.
14,742
import itertools from abc import ABCMeta, abstractmethod from typing import Sequence, Type, TypeVar import cv2 import mmcv import numpy as np import pycocotools.mask as maskUtils import torch from mmcv.ops.roi_align import roi_align The provided code snippet includes necessary dependencies for implementing the `polygon_to_bitmap` function. Write a Python function `def polygon_to_bitmap(polygons, height, width)` to solve the following problem: Convert masks from the form of polygons to bitmaps. Args: polygons (list[ndarray]): masks in polygon representation height (int): mask height width (int): mask width Return: ndarray: the converted masks in bitmap representation Here is the function: def polygon_to_bitmap(polygons, height, width): """Convert masks from the form of polygons to bitmaps. Args: polygons (list[ndarray]): masks in polygon representation height (int): mask height width (int): mask width Return: ndarray: the converted masks in bitmap representation """ rles = maskUtils.frPyObjects(polygons, height, width) rle = maskUtils.merge(rles) bitmap_mask = maskUtils.decode(rle).astype(bool) return bitmap_mask
Convert masks from the form of polygons to bitmaps. Args: polygons (list[ndarray]): masks in polygon representation height (int): mask height width (int): mask width Return: ndarray: the converted masks in bitmap representation
14,743
import itertools from abc import ABCMeta, abstractmethod from typing import Sequence, Type, TypeVar import cv2 import mmcv import numpy as np import pycocotools.mask as maskUtils import torch from mmcv.ops.roi_align import roi_align The provided code snippet includes necessary dependencies for implementing the `bitmap_to_polygon` function. Write a Python function `def bitmap_to_polygon(bitmap)` to solve the following problem: Convert masks from the form of bitmaps to polygons. Args: bitmap (ndarray): masks in bitmap representation. Return: list[ndarray]: the converted mask in polygon representation. bool: whether the mask has holes. Here is the function: def bitmap_to_polygon(bitmap): """Convert masks from the form of bitmaps to polygons. Args: bitmap (ndarray): masks in bitmap representation. Return: list[ndarray]: the converted mask in polygon representation. bool: whether the mask has holes. """ bitmap = np.ascontiguousarray(bitmap).astype(np.uint8) # cv2.RETR_CCOMP: retrieves all of the contours and organizes them # into a two-level hierarchy. At the top level, there are external # boundaries of the components. At the second level, there are # boundaries of the holes. If there is another contour inside a hole # of a connected component, it is still put at the top level. # cv2.CHAIN_APPROX_NONE: stores absolutely all the contour points. outs = cv2.findContours(bitmap, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) contours = outs[-2] hierarchy = outs[-1] if hierarchy is None: return [], False # hierarchy[i]: 4 elements, for the indexes of next, previous, # parent, or nested contours. If there is no corresponding contour, # it will be -1. with_hole = (hierarchy.reshape(-1, 4)[:, 3] >= 0).any() contours = [c.reshape(-1, 2) for c in contours] return contours, with_hole
Convert masks from the form of bitmaps to polygons. Args: bitmap (ndarray): masks in bitmap representation. Return: list[ndarray]: the converted mask in polygon representation. bool: whether the mask has holes.
14,744
import numpy as np import torch from torch.nn.modules.utils import _pair def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): """Compute mask target for each positive proposal in the image. Args: pos_proposals (Tensor): Positive proposals. pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals. gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap or Polygon. cfg (dict): Config dict that indicate the mask size. Returns: Tensor: Mask target of each positive proposals in the image. Example: >>> from mmengine.config import Config >>> import mmdet >>> from mmdet.data_elements.mask import BitmapMasks >>> from mmdet.data_elements.mask.mask_target import * # NOQA >>> H, W = 32, 32 >>> cfg = Config({'mask_size': (7, 11)}) >>> rng = np.random.RandomState(0) >>> # Masks for each ground truth box (relative to the image) >>> gt_masks_data = rng.rand(3, H, W) >>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W) >>> # Predicted positive boxes in one image >>> pos_proposals = torch.FloatTensor([ >>> [ 16.2, 5.5, 19.9, 20.9], >>> [ 17.3, 13.6, 19.3, 19.3], >>> [ 14.8, 16.4, 17.0, 23.7], >>> [ 0.0, 0.0, 16.0, 16.0], >>> [ 4.0, 0.0, 20.0, 16.0], >>> ]) >>> # For each predicted proposal, its assignment to a gt mask >>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1]) >>> mask_targets = mask_target_single( >>> pos_proposals, pos_assigned_gt_inds, gt_masks, cfg) >>> assert mask_targets.shape == (5,) + cfg['mask_size'] """ device = pos_proposals.device mask_size = _pair(cfg.mask_size) binarize = not cfg.get('soft_mask_target', False) num_pos = pos_proposals.size(0) if num_pos > 0: proposals_np = pos_proposals.cpu().numpy() maxh, maxw = gt_masks.height, gt_masks.width proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw) proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh) pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() mask_targets = gt_masks.crop_and_resize( proposals_np, mask_size, device=device, inds=pos_assigned_gt_inds, binarize=binarize).to_ndarray() mask_targets = torch.from_numpy(mask_targets).float().to(device) else: mask_targets = pos_proposals.new_zeros((0, ) + mask_size) return mask_targets The provided code snippet includes necessary dependencies for implementing the `mask_target` function. Write a Python function `def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, cfg)` to solve the following problem: Compute mask target for positive proposals in multiple images. Args: pos_proposals_list (list[Tensor]): Positive proposals in multiple images, each has shape (num_pos, 4). pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each positive proposals, each has shape (num_pos,). gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of each image. cfg (dict): Config dict that specifies the mask size. Returns: Tensor: Mask target of each image, has shape (num_pos, w, h). Example: >>> from mmengine.config import Config >>> import mmdet >>> from mmdet.data_elements.mask import BitmapMasks >>> from mmdet.data_elements.mask.mask_target import * >>> H, W = 17, 18 >>> cfg = Config({'mask_size': (13, 14)}) >>> rng = np.random.RandomState(0) >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image >>> pos_proposals_list = [ >>> torch.Tensor([ >>> [ 7.2425, 5.5929, 13.9414, 14.9541], >>> [ 7.3241, 3.6170, 16.3850, 15.3102], >>> ]), >>> torch.Tensor([ >>> [ 4.8448, 6.4010, 7.0314, 9.7681], >>> [ 5.9790, 2.6989, 7.4416, 4.8580], >>> [ 0.0000, 0.0000, 0.1398, 9.8232], >>> ]), >>> ] >>> # Corresponding class index for each proposal for each image >>> pos_assigned_gt_inds_list = [ >>> torch.LongTensor([7, 0]), >>> torch.LongTensor([5, 4, 1]), >>> ] >>> # Ground truth mask for each true object for each image >>> gt_masks_list = [ >>> BitmapMasks(rng.rand(8, H, W), height=H, width=W), >>> BitmapMasks(rng.rand(6, H, W), height=H, width=W), >>> ] >>> mask_targets = mask_target( >>> pos_proposals_list, pos_assigned_gt_inds_list, >>> gt_masks_list, cfg) >>> assert mask_targets.shape == (5,) + cfg['mask_size'] Here is the function: def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, cfg): """Compute mask target for positive proposals in multiple images. Args: pos_proposals_list (list[Tensor]): Positive proposals in multiple images, each has shape (num_pos, 4). pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each positive proposals, each has shape (num_pos,). gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of each image. cfg (dict): Config dict that specifies the mask size. Returns: Tensor: Mask target of each image, has shape (num_pos, w, h). Example: >>> from mmengine.config import Config >>> import mmdet >>> from mmdet.data_elements.mask import BitmapMasks >>> from mmdet.data_elements.mask.mask_target import * >>> H, W = 17, 18 >>> cfg = Config({'mask_size': (13, 14)}) >>> rng = np.random.RandomState(0) >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image >>> pos_proposals_list = [ >>> torch.Tensor([ >>> [ 7.2425, 5.5929, 13.9414, 14.9541], >>> [ 7.3241, 3.6170, 16.3850, 15.3102], >>> ]), >>> torch.Tensor([ >>> [ 4.8448, 6.4010, 7.0314, 9.7681], >>> [ 5.9790, 2.6989, 7.4416, 4.8580], >>> [ 0.0000, 0.0000, 0.1398, 9.8232], >>> ]), >>> ] >>> # Corresponding class index for each proposal for each image >>> pos_assigned_gt_inds_list = [ >>> torch.LongTensor([7, 0]), >>> torch.LongTensor([5, 4, 1]), >>> ] >>> # Ground truth mask for each true object for each image >>> gt_masks_list = [ >>> BitmapMasks(rng.rand(8, H, W), height=H, width=W), >>> BitmapMasks(rng.rand(6, H, W), height=H, width=W), >>> ] >>> mask_targets = mask_target( >>> pos_proposals_list, pos_assigned_gt_inds_list, >>> gt_masks_list, cfg) >>> assert mask_targets.shape == (5,) + cfg['mask_size'] """ cfg_list = [cfg for _ in range(len(pos_proposals_list))] mask_targets = map(mask_target_single, pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, cfg_list) mask_targets = list(mask_targets) if len(mask_targets) > 0: mask_targets = torch.cat(mask_targets) return mask_targets
Compute mask target for positive proposals in multiple images. Args: pos_proposals_list (list[Tensor]): Positive proposals in multiple images, each has shape (num_pos, 4). pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each positive proposals, each has shape (num_pos,). gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of each image. cfg (dict): Config dict that specifies the mask size. Returns: Tensor: Mask target of each image, has shape (num_pos, w, h). Example: >>> from mmengine.config import Config >>> import mmdet >>> from mmdet.data_elements.mask import BitmapMasks >>> from mmdet.data_elements.mask.mask_target import * >>> H, W = 17, 18 >>> cfg = Config({'mask_size': (13, 14)}) >>> rng = np.random.RandomState(0) >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image >>> pos_proposals_list = [ >>> torch.Tensor([ >>> [ 7.2425, 5.5929, 13.9414, 14.9541], >>> [ 7.3241, 3.6170, 16.3850, 15.3102], >>> ]), >>> torch.Tensor([ >>> [ 4.8448, 6.4010, 7.0314, 9.7681], >>> [ 5.9790, 2.6989, 7.4416, 4.8580], >>> [ 0.0000, 0.0000, 0.1398, 9.8232], >>> ]), >>> ] >>> # Corresponding class index for each proposal for each image >>> pos_assigned_gt_inds_list = [ >>> torch.LongTensor([7, 0]), >>> torch.LongTensor([5, 4, 1]), >>> ] >>> # Ground truth mask for each true object for each image >>> gt_masks_list = [ >>> BitmapMasks(rng.rand(8, H, W), height=H, width=W), >>> BitmapMasks(rng.rand(6, H, W), height=H, width=W), >>> ] >>> mask_targets = mask_target( >>> pos_proposals_list, pos_assigned_gt_inds_list, >>> gt_masks_list, cfg) >>> assert mask_targets.shape == (5,) + cfg['mask_size']
14,745
import copy import warnings from pathlib import Path from typing import Optional, Sequence, Union import numpy as np import torch import torch.nn as nn from mmcv.ops import RoIPool from mmcv.transforms import Compose from mmengine.config import Config from mmengine.runner import load_checkpoint from mmdet.registry import DATASETS from ..evaluation import get_classes from ..registry import MODELS from ..structures import DetDataSample, SampleList from ..utils import get_test_pipeline_cfg MODELS = Registry('model', parent=MMENGINE_MODELS) The provided code snippet includes necessary dependencies for implementing the `init_detector` function. Write a Python function `def init_detector( config: Union[str, Path, Config], checkpoint: Optional[str] = None, palette: str = 'none', device: str = 'cuda:0', cfg_options: Optional[dict] = None, ) -> nn.Module` to solve the following problem: Initialize a detector from config file. Args: config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, :obj:`Path`, or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. palette (str): Color palette used for visualization. If palette is stored in checkpoint, use checkpoint's palette first, otherwise use externally passed palette. Currently, supports 'coco', 'voc', 'citys' and 'random'. Defaults to none. device (str): The device where the anchors will be put on. Defaults to cuda:0. cfg_options (dict, optional): Options to override some settings in the used config. Returns: nn.Module: The constructed detector. Here is the function: def init_detector( config: Union[str, Path, Config], checkpoint: Optional[str] = None, palette: str = 'none', device: str = 'cuda:0', cfg_options: Optional[dict] = None, ) -> nn.Module: """Initialize a detector from config file. Args: config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, :obj:`Path`, or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. palette (str): Color palette used for visualization. If palette is stored in checkpoint, use checkpoint's palette first, otherwise use externally passed palette. Currently, supports 'coco', 'voc', 'citys' and 'random'. Defaults to none. device (str): The device where the anchors will be put on. Defaults to cuda:0. cfg_options (dict, optional): Options to override some settings in the used config. Returns: nn.Module: The constructed detector. """ if isinstance(config, (str, Path)): config = Config.fromfile(config) elif not isinstance(config, Config): raise TypeError('config must be a filename or Config object, ' f'but got {type(config)}') if cfg_options is not None: config.merge_from_dict(cfg_options) elif 'init_cfg' in config.model.backbone: config.model.backbone.init_cfg = None model = MODELS.build(config.model) if checkpoint is None: warnings.simplefilter('once') warnings.warn('checkpoint is None, use COCO classes by default.') model.dataset_meta = {'classes': get_classes('coco')} else: checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') # Weights converted from elsewhere may not have meta fields. checkpoint_meta = checkpoint.get('meta', {}) # save the dataset_meta in the model for convenience if 'dataset_meta' in checkpoint_meta: # mmdet 3.x, all keys should be lowercase model.dataset_meta = { k.lower(): v for k, v in checkpoint_meta['dataset_meta'].items() } elif 'CLASSES' in checkpoint_meta: # < mmdet 3.x classes = checkpoint_meta['CLASSES'] model.dataset_meta = {'classes': classes} else: warnings.simplefilter('once') warnings.warn( 'dataset_meta or class names are not saved in the ' 'checkpoint\'s meta data, use COCO classes by default.') model.dataset_meta = {'classes': get_classes('coco')} # Priority: args.palette -> config -> checkpoint if palette != 'none': model.dataset_meta['palette'] = palette else: test_dataset_cfg = copy.deepcopy(config.test_dataloader.dataset) # lazy init. We only need the metainfo. test_dataset_cfg['lazy_init'] = True metainfo = DATASETS.build(test_dataset_cfg).metainfo cfg_palette = metainfo.get('palette', None) if cfg_palette is not None: model.dataset_meta['palette'] = cfg_palette else: if 'palette' not in model.dataset_meta: warnings.warn( 'palette does not exist, random is used by default. ' 'You can also set the palette to customize.') model.dataset_meta['palette'] = 'random' model.cfg = config # save the config in the model for convenience model.to(device) model.eval() return model
Initialize a detector from config file. Args: config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path, :obj:`Path`, or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. palette (str): Color palette used for visualization. If palette is stored in checkpoint, use checkpoint's palette first, otherwise use externally passed palette. Currently, supports 'coco', 'voc', 'citys' and 'random'. Defaults to none. device (str): The device where the anchors will be put on. Defaults to cuda:0. cfg_options (dict, optional): Options to override some settings in the used config. Returns: nn.Module: The constructed detector.
14,746
import copy import warnings from pathlib import Path from typing import Optional, Sequence, Union import numpy as np import torch import torch.nn as nn from mmcv.ops import RoIPool from mmcv.transforms import Compose from mmengine.config import Config from mmengine.runner import load_checkpoint from mmdet.registry import DATASETS from ..evaluation import get_classes from ..registry import MODELS from ..structures import DetDataSample, SampleList from ..utils import get_test_pipeline_cfg ImagesType = Union[str, np.ndarray, Sequence[str], Sequence[np.ndarray]] The provided code snippet includes necessary dependencies for implementing the `inference_detector` function. Write a Python function `def inference_detector( model: nn.Module, imgs: ImagesType, test_pipeline: Optional[Compose] = None ) -> Union[DetDataSample, SampleList]` to solve the following problem: Inference image(s) with the detector. Args: model (nn.Module): The loaded detector. imgs (str, ndarray, Sequence[str/ndarray]): Either image files or loaded images. test_pipeline (:obj:`Compose`): Test pipeline. Returns: :obj:`DetDataSample` or list[:obj:`DetDataSample`]: If imgs is a list or tuple, the same length list type results will be returned, otherwise return the detection results directly. Here is the function: def inference_detector( model: nn.Module, imgs: ImagesType, test_pipeline: Optional[Compose] = None ) -> Union[DetDataSample, SampleList]: """Inference image(s) with the detector. Args: model (nn.Module): The loaded detector. imgs (str, ndarray, Sequence[str/ndarray]): Either image files or loaded images. test_pipeline (:obj:`Compose`): Test pipeline. Returns: :obj:`DetDataSample` or list[:obj:`DetDataSample`]: If imgs is a list or tuple, the same length list type results will be returned, otherwise return the detection results directly. """ if isinstance(imgs, (list, tuple)): is_batch = True else: imgs = [imgs] is_batch = False cfg = model.cfg if test_pipeline is None: cfg = cfg.copy() test_pipeline = get_test_pipeline_cfg(cfg) if isinstance(imgs[0], np.ndarray): # Calling this method across libraries will result # in module unregistered error if not prefixed with mmdet. test_pipeline[0].type = 'mmdet.LoadImageFromNDArray' test_pipeline = Compose(test_pipeline) if model.data_preprocessor.device.type == 'cpu': for m in model.modules(): assert not isinstance( m, RoIPool ), 'CPU inference with RoIPool is not supported currently.' result_list = [] for img in imgs: # prepare data if isinstance(img, np.ndarray): # TODO: remove img_id. data_ = dict(img=img, img_id=0) else: # TODO: remove img_id. data_ = dict(img_path=img, img_id=0) # build the data pipeline data_ = test_pipeline(data_) data_['inputs'] = [data_['inputs']] data_['data_samples'] = [data_['data_samples']] # forward the model with torch.no_grad(): results = model.test_step(data_)[0] result_list.append(results) if not is_batch: return result_list[0] else: return result_list
Inference image(s) with the detector. Args: model (nn.Module): The loaded detector. imgs (str, ndarray, Sequence[str/ndarray]): Either image files or loaded images. test_pipeline (:obj:`Compose`): Test pipeline. Returns: :obj:`DetDataSample` or list[:obj:`DetDataSample`]: If imgs is a list or tuple, the same length list type results will be returned, otherwise return the detection results directly.
14,747
import copy import warnings from pathlib import Path from typing import Optional, Sequence, Union import numpy as np import torch import torch.nn as nn from mmcv.ops import RoIPool from mmcv.transforms import Compose from mmengine.config import Config from mmengine.runner import load_checkpoint from mmdet.registry import DATASETS from ..evaluation import get_classes from ..registry import MODELS from ..structures import DetDataSample, SampleList from ..utils import get_test_pipeline_cfg The provided code snippet includes necessary dependencies for implementing the `async_inference_detector` function. Write a Python function `async def async_inference_detector(model, imgs)` to solve the following problem: Async inference image(s) with the detector. Args: model (nn.Module): The loaded detector. img (str | ndarray): Either image files or loaded images. Returns: Awaitable detection results. Here is the function: async def async_inference_detector(model, imgs): """Async inference image(s) with the detector. Args: model (nn.Module): The loaded detector. img (str | ndarray): Either image files or loaded images. Returns: Awaitable detection results. """ if not isinstance(imgs, (list, tuple)): imgs = [imgs] cfg = model.cfg if isinstance(imgs[0], np.ndarray): cfg = cfg.copy() # set loading pipeline type cfg.data.test.pipeline[0].type = 'LoadImageFromNDArray' # cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) test_pipeline = Compose(cfg.data.test.pipeline) datas = [] for img in imgs: # prepare data if isinstance(img, np.ndarray): # directly add img data = dict(img=img) else: # add information into dict data = dict(img_info=dict(filename=img), img_prefix=None) # build the data pipeline data = test_pipeline(data) datas.append(data) for m in model.modules(): assert not isinstance( m, RoIPool), 'CPU inference with RoIPool is not supported currently.' # We don't restore `torch.is_grad_enabled()` value during concurrent # inference since execution can overlap torch.set_grad_enabled(False) results = await model.aforward_test(data, rescale=True) return results
Async inference image(s) with the detector. Args: model (nn.Module): The loaded detector. img (str | ndarray): Either image files or loaded images. Returns: Awaitable detection results.
14,748
from typing import List, Tuple, Union import mmcv import numpy as np from mmengine.utils import is_str The provided code snippet includes necessary dependencies for implementing the `palette_val` function. Write a Python function `def palette_val(palette: List[tuple]) -> List[tuple]` to solve the following problem: Convert palette to matplotlib palette. Args: palette (List[tuple]): A list of color tuples. Returns: List[tuple[float]]: A list of RGB matplotlib color tuples. Here is the function: def palette_val(palette: List[tuple]) -> List[tuple]: """Convert palette to matplotlib palette. Args: palette (List[tuple]): A list of color tuples. Returns: List[tuple[float]]: A list of RGB matplotlib color tuples. """ new_palette = [] for color in palette: color = [c / 255 for c in color] new_palette.append(tuple(color)) return new_palette
Convert palette to matplotlib palette. Args: palette (List[tuple]): A list of color tuples. Returns: List[tuple[float]]: A list of RGB matplotlib color tuples.
14,749
from typing import List, Tuple, Union import mmcv import numpy as np from mmengine.utils import is_str The provided code snippet includes necessary dependencies for implementing the `get_palette` function. Write a Python function `def get_palette(palette: Union[List[tuple], str, tuple], num_classes: int) -> List[Tuple[int]]` to solve the following problem: Get palette from various inputs. Args: palette (list[tuple] | str | tuple): palette inputs. num_classes (int): the number of classes. Returns: list[tuple[int]]: A list of color tuples. Here is the function: def get_palette(palette: Union[List[tuple], str, tuple], num_classes: int) -> List[Tuple[int]]: """Get palette from various inputs. Args: palette (list[tuple] | str | tuple): palette inputs. num_classes (int): the number of classes. Returns: list[tuple[int]]: A list of color tuples. """ assert isinstance(num_classes, int) if isinstance(palette, list): dataset_palette = palette elif isinstance(palette, tuple): dataset_palette = [palette] * num_classes elif palette == 'random' or palette is None: state = np.random.get_state() # random color np.random.seed(42) palette = np.random.randint(0, 256, size=(num_classes, 3)) np.random.set_state(state) dataset_palette = [tuple(c) for c in palette] elif palette == 'coco': from mmdet.datasets import CocoDataset, CocoPanopticDataset dataset_palette = CocoDataset.METAINFO['palette'] if len(dataset_palette) < num_classes: dataset_palette = CocoPanopticDataset.METAINFO['palette'] elif palette == 'citys': from mmdet.datasets import CityscapesDataset dataset_palette = CityscapesDataset.METAINFO['palette'] elif palette == 'voc': from mmdet.datasets import VOCDataset dataset_palette = VOCDataset.METAINFO['palette'] elif is_str(palette): dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes else: raise TypeError(f'Invalid type for palette: {type(palette)}') assert len(dataset_palette) >= num_classes, \ 'The length of palette should not be less than `num_classes`.' return dataset_palette
Get palette from various inputs. Args: palette (list[tuple] | str | tuple): palette inputs. num_classes (int): the number of classes. Returns: list[tuple[int]]: A list of color tuples.
14,750
from typing import List, Tuple, Union import mmcv import numpy as np from mmengine.utils import is_str The provided code snippet includes necessary dependencies for implementing the `_get_adaptive_scales` function. Write a Python function `def _get_adaptive_scales(areas: np.ndarray, min_area: int = 800, max_area: int = 30000) -> np.ndarray` to solve the following problem: Get adaptive scales according to areas. The scale range is [0.5, 1.0]. When the area is less than ``min_area``, the scale is 0.5 while the area is larger than ``max_area``, the scale is 1.0. Args: areas (ndarray): The areas of bboxes or masks with the shape of (n, ). min_area (int): Lower bound areas for adaptive scales. Defaults to 800. max_area (int): Upper bound areas for adaptive scales. Defaults to 30000. Returns: ndarray: The adaotive scales with the shape of (n, ). Here is the function: def _get_adaptive_scales(areas: np.ndarray, min_area: int = 800, max_area: int = 30000) -> np.ndarray: """Get adaptive scales according to areas. The scale range is [0.5, 1.0]. When the area is less than ``min_area``, the scale is 0.5 while the area is larger than ``max_area``, the scale is 1.0. Args: areas (ndarray): The areas of bboxes or masks with the shape of (n, ). min_area (int): Lower bound areas for adaptive scales. Defaults to 800. max_area (int): Upper bound areas for adaptive scales. Defaults to 30000. Returns: ndarray: The adaotive scales with the shape of (n, ). """ scales = 0.5 + (areas - min_area) / (max_area - min_area) scales = np.clip(scales, 0.5, 1.0) return scales
Get adaptive scales according to areas. The scale range is [0.5, 1.0]. When the area is less than ``min_area``, the scale is 0.5 while the area is larger than ``max_area``, the scale is 1.0. Args: areas (ndarray): The areas of bboxes or masks with the shape of (n, ). min_area (int): Lower bound areas for adaptive scales. Defaults to 800. max_area (int): Upper bound areas for adaptive scales. Defaults to 30000. Returns: ndarray: The adaotive scales with the shape of (n, ).
14,751
from typing import List, Tuple, Union import mmcv import numpy as np from mmengine.utils import is_str The provided code snippet includes necessary dependencies for implementing the `jitter_color` function. Write a Python function `def jitter_color(color: tuple) -> tuple` to solve the following problem: Randomly jitter the given color in order to better distinguish instances with the same class. Args: color (tuple): The RGB color tuple. Each value is between [0, 255]. Returns: tuple: The jittered color tuple. Here is the function: def jitter_color(color: tuple) -> tuple: """Randomly jitter the given color in order to better distinguish instances with the same class. Args: color (tuple): The RGB color tuple. Each value is between [0, 255]. Returns: tuple: The jittered color tuple. """ jitter = np.random.rand(3) jitter = (jitter / np.linalg.norm(jitter) - 0.5) * 0.5 * 255 color = np.clip(jitter + color, 0, 255).astype(np.uint8) return tuple(color)
Randomly jitter the given color in order to better distinguish instances with the same class. Args: color (tuple): The RGB color tuple. Each value is between [0, 255]. Returns: tuple: The jittered color tuple.
14,752
import datetime import itertools import os.path as osp import tempfile from typing import Dict, Optional, Sequence, Tuple, Union import mmcv import numpy as np from mmengine.evaluator import BaseMetric from mmengine.fileio import FileClient, dump, load from mmengine.logging import MMLogger, print_log from terminaltables import AsciiTable from mmdet.datasets.api_wrappers import COCOPanoptic from mmdet.registry import METRICS from ..functional import (INSTANCE_OFFSET, pq_compute_multi_core, pq_compute_single_core) The provided code snippet includes necessary dependencies for implementing the `parse_pq_results` function. Write a Python function `def parse_pq_results(pq_results: dict) -> dict` to solve the following problem: Parse the Panoptic Quality results. Args: pq_results (dict): Panoptic Quality results. Returns: dict: Panoptic Quality results parsed. Here is the function: def parse_pq_results(pq_results: dict) -> dict: """Parse the Panoptic Quality results. Args: pq_results (dict): Panoptic Quality results. Returns: dict: Panoptic Quality results parsed. """ result = dict() result['PQ'] = 100 * pq_results['All']['pq'] result['SQ'] = 100 * pq_results['All']['sq'] result['RQ'] = 100 * pq_results['All']['rq'] result['PQ_th'] = 100 * pq_results['Things']['pq'] result['SQ_th'] = 100 * pq_results['Things']['sq'] result['RQ_th'] = 100 * pq_results['Things']['rq'] result['PQ_st'] = 100 * pq_results['Stuff']['pq'] result['SQ_st'] = 100 * pq_results['Stuff']['sq'] result['RQ_st'] = 100 * pq_results['Stuff']['rq'] return result
Parse the Panoptic Quality results. Args: pq_results (dict): Panoptic Quality results. Returns: dict: Panoptic Quality results parsed.
14,753
import datetime import itertools import os.path as osp import tempfile from typing import Dict, Optional, Sequence, Tuple, Union import mmcv import numpy as np from mmengine.evaluator import BaseMetric from mmengine.fileio import FileClient, dump, load from mmengine.logging import MMLogger, print_log from terminaltables import AsciiTable from mmdet.datasets.api_wrappers import COCOPanoptic from mmdet.registry import METRICS from ..functional import (INSTANCE_OFFSET, pq_compute_multi_core, pq_compute_single_core) The provided code snippet includes necessary dependencies for implementing the `print_panoptic_table` function. Write a Python function `def print_panoptic_table( pq_results: dict, classwise_results: Optional[dict] = None, logger: Optional[Union['MMLogger', str]] = None) -> None` to solve the following problem: Print the panoptic evaluation results table. Args: pq_results(dict): The Panoptic Quality results. classwise_results(dict, optional): The classwise Panoptic Quality. results. The keys are class names and the values are metrics. Defaults to None. logger (:obj:`MMLogger` | str, optional): Logger used for printing related information during evaluation. Default: None. Here is the function: def print_panoptic_table( pq_results: dict, classwise_results: Optional[dict] = None, logger: Optional[Union['MMLogger', str]] = None) -> None: """Print the panoptic evaluation results table. Args: pq_results(dict): The Panoptic Quality results. classwise_results(dict, optional): The classwise Panoptic Quality. results. The keys are class names and the values are metrics. Defaults to None. logger (:obj:`MMLogger` | str, optional): Logger used for printing related information during evaluation. Default: None. """ headers = ['', 'PQ', 'SQ', 'RQ', 'categories'] data = [headers] for name in ['All', 'Things', 'Stuff']: numbers = [ f'{(pq_results[name][k] * 100):0.3f}' for k in ['pq', 'sq', 'rq'] ] row = [name] + numbers + [pq_results[name]['n']] data.append(row) table = AsciiTable(data) print_log('Panoptic Evaluation Results:\n' + table.table, logger=logger) if classwise_results is not None: class_metrics = [(name, ) + tuple(f'{(metrics[k] * 100):0.3f}' for k in ['pq', 'sq', 'rq']) for name, metrics in classwise_results.items()] num_columns = min(8, len(class_metrics) * 4) results_flatten = list(itertools.chain(*class_metrics)) headers = ['category', 'PQ', 'SQ', 'RQ'] * (num_columns // 4) results_2d = itertools.zip_longest( *[results_flatten[i::num_columns] for i in range(num_columns)]) data = [headers] data += [result for result in results_2d] table = AsciiTable(data) print_log( 'Classwise Panoptic Evaluation Results:\n' + table.table, logger=logger)
Print the panoptic evaluation results table. Args: pq_results(dict): The Panoptic Quality results. classwise_results(dict, optional): The classwise Panoptic Quality. results. The keys are class names and the values are metrics. Defaults to None. logger (:obj:`MMLogger` | str, optional): Logger used for printing related information during evaluation. Default: None.
14,754
from mmengine.utils import is_str The provided code snippet includes necessary dependencies for implementing the `wider_face_classes` function. Write a Python function `def wider_face_classes() -> list` to solve the following problem: Class names of WIDERFace. Here is the function: def wider_face_classes() -> list: """Class names of WIDERFace.""" return ['face']
Class names of WIDERFace.
14,755
from mmengine.utils import is_str The provided code snippet includes necessary dependencies for implementing the `voc_classes` function. Write a Python function `def voc_classes() -> list` to solve the following problem: Class names of PASCAL VOC. Here is the function: def voc_classes() -> list: """Class names of PASCAL VOC.""" return [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ]
Class names of PASCAL VOC.