code
stringlengths
17
6.64M
class MethodAveragePrecision(Enum): '\n Class representing if the coordinates are relative to the\n image size or are absolute values.\n\n Developed by: Rafael Padilla\n Last modification: Apr 28 2018\n ' EveryPointInterpolation = 1 ElevenPointInterpolation = 2
class CoordinatesType(Enum): '\n Class representing if the coordinates are relative to the\n image size or are absolute values.\n\n Developed by: Rafael Padilla\n Last modification: Apr 28 2018\n ' Relative = 1 Absolute = 2
class BBType(Enum): '\n Class representing if the bounding box is groundtruth or not.\n\n Developed by: Rafael Padilla\n Last modification: May 24 2018\n ' GroundTruth = 1 Detected = 2
class BBFormat(Enum): '\n Class representing the format of a bounding box.\n It can be (X,Y,width,height) => XYWH\n or (X1,Y1,X2,Y2) => XYX2Y2\n\n Developed by: Rafael Padilla\n Last modification: May 24 2018\n ' XYWH = 1 XYX2Y2 = 2
def convertToRelativeValues(size, box): dw = (1.0 / size[0]) dh = (1.0 / size[1]) cx = ((box[1] + box[0]) / 2.0) cy = ((box[3] + box[2]) / 2.0) w = (box[1] - box[0]) h = (box[3] - box[2]) x = (cx * dw) y = (cy * dh) w = (w * dw) h = (h * dh) return (x, y, w, h)
def convertToAbsoluteValues(size, box): xIn = round(((((2 * float(box[0])) - float(box[2])) * size[0]) / 2)) yIn = round(((((2 * float(box[1])) - float(box[3])) * size[1]) / 2)) xEnd = (xIn + round((float(box[2]) * size[0]))) yEnd = (yIn + round((float(box[3]) * size[1]))) if (xIn < 0): xIn = 0 if (yIn < 0): yIn = 0 if (xEnd >= size[0]): xEnd = (size[0] - 1) if (yEnd >= size[1]): yEnd = (size[1] - 1) return (xIn, yIn, xEnd, yEnd)
def add_bb_into_image(image, bb, color=(255, 0, 0), thickness=2, label=None): r = int(color[0]) g = int(color[1]) b = int(color[2]) font = cv2.FONT_HERSHEY_SIMPLEX fontScale = 0.5 fontThickness = 1 (x1, y1, x2, y2) = bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2) x1 = int(x1) y1 = int(y1) x2 = int(x2) y2 = int(y2) cv2.rectangle(image, (x1, y1), (x2, y2), (b, g, r), thickness) if (label is not None): (tw, th) = cv2.getTextSize(label, font, fontScale, fontThickness)[0] (xin_bb, yin_bb) = ((x1 + thickness), ((y1 - th) + int((12.5 * fontScale)))) if ((yin_bb - th) <= 0): yin_bb = (y1 + th) r_Xin = (x1 - int((thickness / 2))) r_Yin = ((y1 - th) - int((thickness / 2))) cv2.rectangle(image, (r_Xin, (r_Yin - thickness)), (((r_Xin + tw) + (thickness * 3)), ((r_Yin + th) + int((12.5 * fontScale)))), (b, g, r), (- 1)) cv2.putText(image, label, (xin_bb, yin_bb), font, fontScale, (0, 0, 0), fontThickness, cv2.LINE_AA) return image
def ValidateFormats(argFormat, argName, errors): if (argFormat == 'xywh'): return BBFormat.XYWH elif (argFormat == 'xyrb'): return BBFormat.XYX2Y2 elif (argFormat is None): return BBFormat.XYWH else: errors.append(("argument %s: invalid value. It must be either 'xywh' or 'xyrb'" % argName))
def ValidateMandatoryArgs(arg, argName, errors): if (arg is None): errors.append(('argument %s: required argument' % argName)) else: return True
def ValidateImageSize(arg, argName, argInformed, errors): errorMsg = ('argument %s: required argument if %s is relative' % (argName, argInformed)) ret = None if (arg is None): errors.append(errorMsg) else: arg = arg.replace('(', '').replace(')', '') args = arg.split(',') if (len(args) != 2): errors.append(("%s. It must be in the format 'width,height' (e.g. '600,400')" % errorMsg)) elif ((not args[0].isdigit()) or (not args[1].isdigit())): errors.append(("%s. It must be in INdiaTEGER the format 'width,height' (e.g. '600,400')" % errorMsg)) else: ret = (int(args[0]), int(args[1])) return ret
def ValidateCoordinatesTypes(arg, argName, errors): if (arg == 'abs'): return CoordinatesType.Absolute elif (arg == 'rel'): return CoordinatesType.Relative elif (arg is None): return CoordinatesType.Absolute errors.append(("argument %s: invalid value. It must be either 'rel' or 'abs'" % argName))
def ValidatePaths(arg, nameArg, errors): if (arg is None): errors.append(('argument %s: invalid directory' % nameArg)) elif ((os.path.isdir(arg) is False) and (os.path.isdir(os.path.join(currentPath, arg)) is False)): errors.append(("argument %s: directory does not exist '%s'" % (nameArg, arg))) else: arg = os.path.join(currentPath, arg) return arg
def getBoundingBoxes(directory, isGT, bbFormat, coordType, allBoundingBoxes=None, allClasses=None, imgSize=(0, 0)): 'Read txt files containing bounding boxes (ground truth and detections).' if (allBoundingBoxes is None): allBoundingBoxes = BoundingBoxes() if (allClasses is None): allClasses = [] os.chdir(directory) files = glob.glob('*.txt') files.sort() for f in files: nameOfImage = f.replace('.txt', '') fh1 = open(f, 'r') for line in fh1: line = line.replace('\n', '') if (line.replace(' ', '') == ''): continue splitLine = line.split(' ') if isGT: idClass = splitLine[0] x = float(splitLine[1]) y = float(splitLine[2]) w = float(splitLine[3]) h = float(splitLine[4]) bb = BoundingBox(nameOfImage, idClass, x, y, w, h, coordType, imgSize, BBType.GroundTruth, format=bbFormat) else: idClass = splitLine[0] confidence = float(splitLine[1]) x = float(splitLine[2]) y = float(splitLine[3]) w = float(splitLine[4]) h = float(splitLine[5]) bb = BoundingBox(nameOfImage, idClass, x, y, w, h, coordType, imgSize, BBType.Detected, confidence, format=bbFormat) allBoundingBoxes.addBoundingBox(bb) if (idClass not in allClasses): allClasses.append(idClass) fh1.close() return (allBoundingBoxes, allClasses)
class DatasetCatalog(object): DATA_DIR = 'data' DATASETS = {'jhmdb_train': {'video_root': 'jhmdb/videos', 'ann_file': 'jhmdb/annotations/jhmdb_train_gt_min.json', 'box_file': '', 'eval_file_paths': {'labelmap_file': ''}, 'object_file': 'jhmdb/annotations/train_object_detection.json', 'keypoints_file': 'jhmdb/annotations/jhmdb_train_person_bbox_kpts.json'}, 'jhmdb_val': {'video_root': 'jhmdb/videos', 'ann_file': 'jhmdb/annotations/jhmdb_test_gt_min.json', 'box_file': 'jhmdb/annotations/jhmdb_test_yowo_det_score.json', 'eval_file_paths': {'csv_gt_file': '', 'labelmap_file': ''}, 'object_file': 'jhmdb/annotations/test_object_detection.json', 'keypoints_file': 'jhmdb/annotations/jhmdb_test_person_bbox_kpts.json'}} @staticmethod def get(name): data_dir = DatasetCatalog.DATA_DIR attrs = DatasetCatalog.DATASETS[name] if (attrs['box_file'] == ''): box_file = '' else: box_file = os.path.join(data_dir, attrs['box_file']) args = dict(video_root=os.path.join(data_dir, attrs['video_root']), ann_file=os.path.join(data_dir, attrs['ann_file']), box_file=box_file, eval_file_paths={key: os.path.join(data_dir, attrs['eval_file_paths'][key]) for key in attrs['eval_file_paths']}, object_file=os.path.join(data_dir, attrs['object_file']), keypoints_file=os.path.join(data_dir, attrs['keypoints_file'])) return dict(factory='DatasetEngine', args=args) raise RuntimeError('Dataset not available: {}'.format(name))
def build_dataset(cfg, dataset_list, transforms, dataset_catalog, is_train=True, object_transforms=None): '\n Arguments:\n cfg: config object for the experiment.\n dataset_list (list[str]): Contains the names of the datasets, i.e.,\n ava_video_train_v2.2, ava_video_val_v2.2, etc..\n transforms (callable): transforms to apply to each (clip, target) sample.\n dataset_catalog (DatasetCatalog): contains the information on how to\n construct a dataset.\n is_train (bool): whether to setup the dataset for training or testing.\n object_transforms: transforms to apply to object boxes.\n ' if (not isinstance(dataset_list, (list, tuple))): raise RuntimeError('dataset_list should be a list of strings, got {}'.format(dataset_list)) datasets = [] for dataset_name in dataset_list: data = dataset_catalog.get(dataset_name) factory = getattr(D, data['factory']) args = data['args'] if (data['factory'] == 'DatasetEngine'): args['remove_clips_without_annotations'] = is_train args['frame_span'] = (cfg.INPUT.FRAME_NUM * cfg.INPUT.FRAME_SAMPLE_RATE) if (not is_train): args['box_thresh'] = cfg.TEST.BOX_THRESH args['action_thresh'] = cfg.TEST.ACTION_THRESH else: args['box_file'] = None if has_object(cfg.MODEL.HIT_STRUCTURE): args['object_transforms'] = object_transforms else: args['object_file'] = None args['transforms'] = transforms dataset = factory(**args) datasets.append(dataset) if (not is_train): return datasets dataset = datasets[0] if (len(datasets) > 1): dataset = D.ConcatDataset(datasets) return [dataset]
def make_data_sampler(dataset, shuffle, distributed): if distributed: return samplers.DistributedSampler(dataset, shuffle=shuffle) if shuffle: sampler = torch.utils.data.sampler.RandomSampler(dataset) else: sampler = torch.utils.data.sampler.SequentialSampler(dataset) return sampler
def _quantize(x, bins): bins = copy.copy(bins) bins = sorted(bins) quantized = list(map((lambda y: bisect.bisect_right(bins, y)), x)) return quantized
def _compute_aspect_ratios(dataset): aspect_ratios = [] for i in range(len(dataset)): video_info = dataset.get_video_info(i) aspect_ratio = (float(video_info['height']) / float(video_info['width'])) aspect_ratios.append(aspect_ratio) return aspect_ratios
def make_batch_data_sampler(dataset, sampler, aspect_grouping, videos_per_batch, num_iters=None, start_iter=0, drop_last=False): if aspect_grouping: if (not isinstance(aspect_grouping, (list, tuple))): aspect_grouping = [aspect_grouping] aspect_ratios = _compute_aspect_ratios(dataset) group_ids = _quantize(aspect_ratios, aspect_grouping) batch_sampler = samplers.GroupedBatchSampler(sampler, group_ids, videos_per_batch, drop_uneven=drop_last) else: batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, videos_per_batch, drop_last=drop_last) if (num_iters is not None): batch_sampler = samplers.IterationBasedBatchSampler(batch_sampler, num_iters, start_iter) return batch_sampler
def make_data_loader(cfg, is_train=True, is_distributed=False, start_iter=0): num_gpus = get_world_size() if is_train: videos_per_batch = cfg.SOLVER.VIDEOS_PER_BATCH assert ((videos_per_batch % num_gpus) == 0), 'SOLVER.VIDEOS_PER_BATCH ({}) must be divisible by the number ' 'of GPUs ({}) used.'.format(videos_per_batch, num_gpus) videos_per_gpu = (videos_per_batch // num_gpus) shuffle = True drop_last = True num_iters = cfg.SOLVER.MAX_ITER else: videos_per_batch = cfg.TEST.VIDEOS_PER_BATCH assert ((videos_per_batch % num_gpus) == 0), 'TEST.VIDEOS_PER_BATCH ({}) must be divisible by the number ' 'of GPUs ({}) used.'.format(videos_per_batch, num_gpus) videos_per_gpu = (videos_per_batch // num_gpus) shuffle = (False if (not is_distributed) else True) drop_last = False num_iters = None start_iter = 0 aspect_grouping = ([1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else []) DatasetCatalog = paths_catalog.DatasetCatalog dataset_list = (cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST) transforms = build_transforms(cfg, is_train) if has_object(cfg.MODEL.HIT_STRUCTURE): object_transforms = build_object_transforms(cfg, is_train=is_train) else: object_transforms = None datasets = build_dataset(cfg, dataset_list, transforms, DatasetCatalog, is_train, object_transforms) data_loaders = [] for dataset in datasets: sampler = make_data_sampler(dataset, shuffle, is_distributed) batch_sampler = make_batch_data_sampler(dataset, sampler, aspect_grouping, videos_per_gpu, num_iters, start_iter, drop_last) collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY) num_workers = cfg.DATALOADER.NUM_WORKERS data_loader = torch.utils.data.DataLoader(dataset, num_workers=num_workers, batch_sampler=batch_sampler, collate_fn=collator) data_loaders.append(data_loader) if is_train: assert (len(data_loaders) == 1) return data_loaders[0] return data_loaders
class ConcatDataset(_ConcatDataset): '\n Same as torch.utils.dataset.dataset.ConcatDataset, but exposes an extra\n method for querying the sizes of the image\n ' def get_idxs(self, idx): dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if (dataset_idx == 0): sample_idx = idx else: sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)]) return (dataset_idx, sample_idx) def get_video_info(self, idx): (dataset_idx, sample_idx) = self.get_idxs(idx) return self.datasets[dataset_idx].get_video_info(sample_idx)
def evaluate(dataset, predictions, output_folder, **kwargs): 'evaluate dataset using different methods based on dataset type.\n Args:\n dataset: Dataset object\n predictions(list[BoxList]): each item in the list represents the\n prediction results for one image.\n output_folder: output folder, to save evaluation files or results.\n **kwargs: other args.\n Returns:\n evaluation result\n ' args = dict(dataset=dataset, predictions=predictions, output_folder=output_folder, **kwargs) if isinstance(dataset, datasets.DatasetEngine): return jhmdb_evaluation(**args) else: dataset_name = dataset.__class__.__name__ raise NotImplementedError('Unsupported dataset type {}.'.format(dataset_name))
def jhmdb_evaluation(dataset, predictions, output_folder, **_): logger = logging.getLogger('hit.inference') logger.info('performing jhmdb evaluation.') return save_jhmdb_results(dataset=dataset, predictions=predictions, output_folder=output_folder, logger=logger)
def _validate_label_map(label_map): 'Checks if a label map is valid.\n\n Args:\n label_map: StringIntLabelMap to validate.\n\n Raises:\n ValueError: if label map is invalid.\n ' for item in label_map.item: if (item.id < 1): raise ValueError('Label map ids should be >= 1.')
def create_category_index(categories): "Creates dictionary of COCO compatible categories keyed by category id.\n\n Args:\n categories: a list of dicts, each of which has the following keys:\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name\n e.g., 'cat', 'dog', 'pizza'.\n\n Returns:\n category_index: a dict containing the same entries as categories, but keyed\n by the 'id' field of each category.\n " category_index = {} for cat in categories: category_index[cat['id']] = cat return category_index
def get_max_label_map_index(label_map): 'Get maximum index in label map.\n\n Args:\n label_map: a StringIntLabelMapProto\n\n Returns:\n an integer\n ' return max([item.id for item in label_map.item])
def convert_label_map_to_categories(label_map, max_num_classes, use_display_name=True): "Loads label map proto and returns categories list compatible with eval.\n\n This function loads a label map and returns a list of dicts, each of which\n has the following keys:\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name\n e.g., 'cat', 'dog', 'pizza'.\n We only allow class into the list if its id-label_id_offset is\n between 0 (inclusive) and max_num_classes (exclusive).\n If there are several items mapping to the same id in the label map,\n we will only keep the first one in the categories list.\n\n Args:\n label_map: a StringIntLabelMapProto or None. If None, a default categories\n list is created with max_num_classes categories.\n max_num_classes: maximum number of (consecutive) label indices to include.\n use_display_name: (boolean) choose whether to load 'display_name' field\n as category name. If False or if the display_name field does not exist,\n uses 'name' field as category names instead.\n Returns:\n categories: a list of dictionaries representing all possible categories.\n " categories = [] list_of_ids_already_added = [] if (not label_map): label_id_offset = 1 for class_id in range(max_num_classes): categories.append({'id': (class_id + label_id_offset), 'name': 'category_{}'.format((class_id + label_id_offset))}) return categories for item in label_map.item: if (not (0 < item.id <= max_num_classes)): logger.info('Ignore item %d since it falls outside of requested label range.', item.id) continue if (use_display_name and item.HasField('display_name')): name = item.display_name else: name = item.name if (item.id not in list_of_ids_already_added): list_of_ids_already_added.append(item.id) categories.append({'id': item.id, 'name': name}) return categories
def load_labelmap(path): 'Loads label map proto.\n\n Args:\n path: path to StringIntLabelMap proto text file.\n Returns:\n a StringIntLabelMapProto\n ' with open(path, 'r') as fid: label_map_string = fid.read() label_map = string_int_label_map_pb2.StringIntLabelMap() try: text_format.Merge(label_map_string, label_map) except text_format.ParseError: label_map.ParseFromString(label_map_string) _validate_label_map(label_map) return label_map
def get_label_map_dict(label_map_path, use_display_name=False): "Reads a label map and returns a dictionary of label names to id.\n\n Args:\n label_map_path: path to label_map.\n use_display_name: whether to use the label map items' display names as keys.\n\n Returns:\n A dictionary mapping label names to id.\n " label_map = load_labelmap(label_map_path) label_map_dict = {} for item in label_map.item: if use_display_name: label_map_dict[item.display_name] = item.id else: label_map_dict[item.name] = item.id return label_map_dict
def create_category_index_from_labelmap(label_map_path): "Reads a label map and returns a category index.\n\n Args:\n label_map_path: Path to `StringIntLabelMap` proto text file.\n\n Returns:\n A category index, which is a dictionary that maps integer ids to dicts\n containing categories, e.g.\n {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}, ...}\n " label_map = load_labelmap(label_map_path) max_num_classes = max((item.id for item in label_map.item)) categories = convert_label_map_to_categories(label_map, max_num_classes) return create_category_index(categories)
def create_class_agnostic_category_index(): 'Creates a category index with a single `object` class.' return {1: {'id': 1, 'name': 'object'}}
def compute_precision_recall(scores, labels, num_gt): 'Compute precision and recall.\n\n Args:\n scores: A float numpy array representing detection score\n labels: A boolean numpy array representing true/false positive labels\n num_gt: Number of ground truth instances\n\n Raises:\n ValueError: if the input is not of the correct format\n\n Returns:\n precision: Fraction of positive instances over detected ones. This value is\n None if no ground truth labels are present.\n recall: Fraction of detected positive instance over all positive instances.\n This value is None if no ground truth labels are present.\n\n ' if ((not isinstance(labels, np.ndarray)) or (labels.dtype != np.bool) or (len(labels.shape) != 1)): raise ValueError('labels must be single dimension bool numpy array') if ((not isinstance(scores, np.ndarray)) or (len(scores.shape) != 1)): raise ValueError('scores must be single dimension numpy array') if (num_gt < np.sum(labels)): raise ValueError('Number of true positives must be smaller than num_gt.') if (len(scores) != len(labels)): raise ValueError('scores and labels must be of the same size.') if (num_gt == 0): return (None, None) sorted_indices = np.argsort(scores) sorted_indices = sorted_indices[::(- 1)] labels = labels.astype(int) true_positive_labels = labels[sorted_indices] false_positive_labels = (1 - true_positive_labels) cum_true_positives = np.cumsum(true_positive_labels) cum_false_positives = np.cumsum(false_positive_labels) precision = (cum_true_positives.astype(float) / (cum_true_positives + cum_false_positives)) recall = (cum_true_positives.astype(float) / num_gt) return (precision, recall)
def compute_average_precision(precision, recall): 'Compute Average Precision according to the definition in VOCdevkit.\n\n Precision is modified to ensure that it does not decrease as recall\n decrease.\n\n Args:\n precision: A float [N, 1] numpy array of precisions\n recall: A float [N, 1] numpy array of recalls\n\n Raises:\n ValueError: if the input is not of the correct format\n\n Returns:\n average_precison: The area under the precision recall curve. NaN if\n precision and recall are None.\n\n ' if (precision is None): if (recall is not None): raise ValueError('If precision is None, recall must also be None') return np.NAN if ((not isinstance(precision, np.ndarray)) or (not isinstance(recall, np.ndarray))): raise ValueError('precision and recall must be numpy array') if ((precision.dtype != np.float) or (recall.dtype != np.float)): raise ValueError('input must be float numpy array.') if (len(precision) != len(recall)): raise ValueError('precision and recall must be of the same size.') if (not precision.size): return 0.0 if ((np.amin(precision) < 0) or (np.amax(precision) > 1)): raise ValueError('Precision must be in the range of [0, 1].') if ((np.amin(recall) < 0) or (np.amax(recall) > 1)): raise ValueError('recall must be in the range of [0, 1].') if (not all(((recall[i] <= recall[(i + 1)]) for i in range((len(recall) - 1))))): raise ValueError('recall must be a non-decreasing array') recall = np.concatenate([[0], recall, [1]]) precision = np.concatenate([[0], precision, [0]]) for i in range((len(precision) - 2), (- 1), (- 1)): precision[i] = np.maximum(precision[i], precision[(i + 1)]) indices = (np.where((recall[1:] != recall[:(- 1)]))[0] + 1) average_precision = np.sum(((recall[indices] - recall[(indices - 1)]) * precision[indices])) return average_precision
def compute_cor_loc(num_gt_imgs_per_class, num_images_correctly_detected_per_class): 'Compute CorLoc according to the definition in the following paper.\n\n https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf\n\n Returns nans if there are no ground truth images for a class.\n\n Args:\n num_gt_imgs_per_class: 1D array, representing number of images containing\n at least one object instance of a particular class\n num_images_correctly_detected_per_class: 1D array, representing number of\n images that are correctly detected at least one object instance of a\n particular class\n\n Returns:\n corloc_per_class: A float numpy array represents the corloc score of each\n class\n ' with np.errstate(divide='ignore', invalid='ignore'): return np.where((num_gt_imgs_per_class == 0), np.nan, (num_images_correctly_detected_per_class / num_gt_imgs_per_class))
class BoxMaskList(np_box_list.BoxList): 'Convenience wrapper for BoxList with masks.\n\n BoxMaskList extends the np_box_list.BoxList to contain masks as well.\n In particular, its constructor receives both boxes and masks. Note that the\n masks correspond to the full image.\n ' def __init__(self, box_data, mask_data): 'Constructs box collection.\n\n Args:\n box_data: a numpy array of shape [N, 4] representing box coordinates\n mask_data: a numpy array of shape [N, height, width] representing masks\n with values are in {0,1}. The masks correspond to the full\n image. The height and the width will be equal to image height and width.\n\n Raises:\n ValueError: if bbox dataset is not a numpy array\n ValueError: if invalid dimensions for bbox dataset\n ValueError: if mask dataset is not a numpy array\n ValueError: if invalid dimension for mask dataset\n ' super(BoxMaskList, self).__init__(box_data) if (not isinstance(mask_data, np.ndarray)): raise ValueError('Mask dataset must be a numpy array.') if (len(mask_data.shape) != 3): raise ValueError('Invalid dimensions for mask dataset.') if (mask_data.dtype != np.uint8): raise ValueError('Invalid dataset type for mask dataset: uint8 is required.') if (mask_data.shape[0] != box_data.shape[0]): raise ValueError('There should be the same number of boxes and masks.') self.data['masks'] = mask_data def get_masks(self): 'Convenience function for accessing masks.\n\n Returns:\n a numpy array of shape [N, height, width] representing masks\n ' return self.get_field('masks')
def area(masks): 'Computes area of masks.\n\n Args:\n masks: Numpy array with shape [N, height, width] holding N masks. Masks\n values are of type np.uint8 and values are in {0,1}.\n\n Returns:\n a numpy array with shape [N*1] representing mask areas.\n\n Raises:\n ValueError: If masks.dtype is not np.uint8\n ' if (masks.dtype != np.uint8): raise ValueError('Masks type should be np.uint8') return np.sum(masks, axis=(1, 2), dtype=np.float32)
def intersection(masks1, masks2): 'Compute pairwise intersection areas between masks.\n\n Args:\n masks1: a numpy array with shape [N, height, width] holding N masks. Masks\n values are of type np.uint8 and values are in {0,1}.\n masks2: a numpy array with shape [M, height, width] holding M masks. Masks\n values are of type np.uint8 and values are in {0,1}.\n\n Returns:\n a numpy array with shape [N*M] representing pairwise intersection area.\n\n Raises:\n ValueError: If masks1 and masks2 are not of type np.uint8.\n ' if ((masks1.dtype != np.uint8) or (masks2.dtype != np.uint8)): raise ValueError('masks1 and masks2 should be of type np.uint8') n = masks1.shape[0] m = masks2.shape[0] answer = np.zeros([n, m], dtype=np.float32) for i in np.arange(n): for j in np.arange(m): answer[(i, j)] = np.sum(np.minimum(masks1[i], masks2[j]), dtype=np.float32) return answer
def iou(masks1, masks2): 'Computes pairwise intersection-over-union between mask collections.\n\n Args:\n masks1: a numpy array with shape [N, height, width] holding N masks. Masks\n values are of type np.uint8 and values are in {0,1}.\n masks2: a numpy array with shape [M, height, width] holding N masks. Masks\n values are of type np.uint8 and values are in {0,1}.\n\n Returns:\n a numpy array with shape [N, M] representing pairwise iou scores.\n\n Raises:\n ValueError: If masks1 and masks2 are not of type np.uint8.\n ' if ((masks1.dtype != np.uint8) or (masks2.dtype != np.uint8)): raise ValueError('masks1 and masks2 should be of type np.uint8') intersect = intersection(masks1, masks2) area1 = area(masks1) area2 = area(masks2) union = ((np.expand_dims(area1, axis=1) + np.expand_dims(area2, axis=0)) - intersect) return (intersect / np.maximum(union, EPSILON))
def ioa(masks1, masks2): "Computes pairwise intersection-over-area between box collections.\n\n Intersection-over-area (ioa) between two masks, mask1 and mask2 is defined as\n their intersection area over mask2's area. Note that ioa is not symmetric,\n that is, IOA(mask1, mask2) != IOA(mask2, mask1).\n\n Args:\n masks1: a numpy array with shape [N, height, width] holding N masks. Masks\n values are of type np.uint8 and values are in {0,1}.\n masks2: a numpy array with shape [M, height, width] holding N masks. Masks\n values are of type np.uint8 and values are in {0,1}.\n\n Returns:\n a numpy array with shape [N, M] representing pairwise ioa scores.\n\n Raises:\n ValueError: If masks1 and masks2 are not of type np.uint8.\n " if ((masks1.dtype != np.uint8) or (masks2.dtype != np.uint8)): raise ValueError('masks1 and masks2 should be of type np.uint8') intersect = intersection(masks1, masks2) areas = np.expand_dims(area(masks2), axis=0) return (intersect / (areas + EPSILON))
class DetectionEvaluator(object): 'Interface for object detection evalution classes.\n\n Example usage of the Evaluator:\n ------------------------------\n evaluator = DetectionEvaluator(categories)\n\n # Detections and groundtruth for image 1.\n evaluator.add_single_groundtruth_image_info(...)\n evaluator.add_single_detected_image_info(...)\n\n # Detections and groundtruth for image 2.\n evaluator.add_single_groundtruth_image_info(...)\n evaluator.add_single_detected_image_info(...)\n\n metrics_dict = evaluator.evaluate()\n ' __metaclass__ = ABCMeta def __init__(self, categories): "Constructor.\n\n Args:\n categories: A list of dicts, each of which has the following keys -\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name e.g., 'cat', 'dog'.\n " self._categories = categories @abstractmethod def add_single_ground_truth_image_info(self, image_id, groundtruth_dict): 'Adds groundtruth for a single image to be used for evaluation.\n\n Args:\n image_id: A unique string/integer identifier for the image.\n groundtruth_dict: A dictionary of groundtruth numpy arrays required\n for evaluations.\n ' pass @abstractmethod def add_single_detected_image_info(self, image_id, detections_dict): 'Adds detections for a single image to be used for evaluation.\n\n Args:\n image_id: A unique string/integer identifier for the image.\n detections_dict: A dictionary of detection numpy arrays required\n for evaluation.\n ' pass @abstractmethod def evaluate(self): 'Evaluates detections and returns a dictionary of metrics.' pass @abstractmethod def clear(self): 'Clears the state to prepare for a fresh evaluation.' pass
class ObjectDetectionEvaluator(DetectionEvaluator): 'A class to evaluate detections.' def __init__(self, categories, matching_iou_threshold=0.5, evaluate_corlocs=False, metric_prefix=None, use_weighted_mean_ap=False, evaluate_masks=False): "Constructor.\n\n Args:\n categories: A list of dicts, each of which has the following keys -\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name e.g., 'cat', 'dog'.\n matching_iou_threshold: IOU threshold to use for matching groundtruth\n boxes to detection boxes.\n evaluate_corlocs: (optional) boolean which determines if corloc scores\n are to be returned or not.\n metric_prefix: (optional) string prefix for metric name; if None, no\n prefix is used.\n use_weighted_mean_ap: (optional) boolean which determines if the mean\n average precision is computed directly from the scores and tp_fp_labels\n of all classes.\n evaluate_masks: If False, evaluation will be performed based on boxes.\n If True, mask evaluation will be performed instead.\n\n Raises:\n ValueError: If the category ids are not 1-indexed.\n " super(ObjectDetectionEvaluator, self).__init__(categories) self._num_classes = max([cat['id'] for cat in categories]) if (min((cat['id'] for cat in categories)) < 1): raise ValueError('Classes should be 1-indexed.') self._matching_iou_threshold = matching_iou_threshold self._use_weighted_mean_ap = use_weighted_mean_ap self._label_id_offset = 1 self._evaluate_masks = evaluate_masks self._evaluation = ObjectDetectionEvaluation(num_groundtruth_classes=self._num_classes, matching_iou_threshold=self._matching_iou_threshold, use_weighted_mean_ap=self._use_weighted_mean_ap, label_id_offset=self._label_id_offset) self._image_ids = set([]) self._evaluate_corlocs = evaluate_corlocs self._metric_prefix = ((metric_prefix + '_') if metric_prefix else '') def add_single_ground_truth_image_info(self, image_id, groundtruth_dict): 'Adds groundtruth for a single image to be used for evaluation.\n\n Args:\n image_id: A unique string/integer identifier for the image.\n groundtruth_dict: A dictionary containing -\n standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array\n of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of\n the format [ymin, xmin, ymax, xmax] in absolute image coordinates.\n standard_fields.InputDataFields.groundtruth_classes: integer numpy array\n of shape [num_boxes] containing 1-indexed groundtruth classes for the\n boxes.\n standard_fields.InputDataFields.groundtruth_difficult: Optional length\n M numpy boolean array denoting whether a ground truth box is a\n difficult instance or not. This field is optional to support the case\n that no boxes are difficult.\n standard_fields.InputDataFields.groundtruth_instance_masks: Optional\n numpy array of shape [num_boxes, height, width] with values in {0, 1}.\n\n Raises:\n ValueError: On adding groundtruth for an image more than once. Will also\n raise error if instance masks are not in groundtruth dictionary.\n ' if (image_id in self._image_ids): raise ValueError('Image with id {} already added.'.format(image_id)) groundtruth_classes = (groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] - self._label_id_offset) if ((standard_fields.InputDataFields.groundtruth_difficult in groundtruth_dict.keys()) and (groundtruth_dict[standard_fields.InputDataFields.groundtruth_difficult].size or (not groundtruth_classes.size))): groundtruth_difficult = groundtruth_dict[standard_fields.InputDataFields.groundtruth_difficult] else: groundtruth_difficult = None if (not (len(self._image_ids) % 1000)): logger.warning('image %s does not have groundtruth difficult flag specified', image_id) groundtruth_masks = None if self._evaluate_masks: if (standard_fields.InputDataFields.groundtruth_instance_masks not in groundtruth_dict): raise ValueError('Instance masks not in groundtruth dictionary.') groundtruth_masks = groundtruth_dict[standard_fields.InputDataFields.groundtruth_instance_masks] self._evaluation.add_single_ground_truth_image_info(image_key=image_id, groundtruth_boxes=groundtruth_dict[standard_fields.InputDataFields.groundtruth_boxes], groundtruth_class_labels=groundtruth_classes, groundtruth_is_difficult_list=groundtruth_difficult, groundtruth_masks=groundtruth_masks) self._image_ids.update([image_id]) def add_single_detected_image_info(self, image_id, detections_dict): 'Adds detections for a single image to be used for evaluation.\n\n Args:\n image_id: A unique string/integer identifier for the image.\n detections_dict: A dictionary containing -\n standard_fields.DetectionResultFields.detection_boxes: float32 numpy\n array of shape [num_boxes, 4] containing `num_boxes` detection boxes\n of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.\n standard_fields.DetectionResultFields.detection_scores: float32 numpy\n array of shape [num_boxes] containing detection scores for the boxes.\n standard_fields.DetectionResultFields.detection_classes: integer numpy\n array of shape [num_boxes] containing 1-indexed detection classes for\n the boxes.\n standard_fields.DetectionResultFields.detection_masks: uint8 numpy\n array of shape [num_boxes, height, width] containing `num_boxes` masks\n of values ranging between 0 and 1.\n\n Raises:\n ValueError: If detection masks are not in detections dictionary.\n ' detection_classes = (detections_dict[standard_fields.DetectionResultFields.detection_classes] - self._label_id_offset) detection_masks = None if self._evaluate_masks: if (standard_fields.DetectionResultFields.detection_masks not in detections_dict): raise ValueError('Detection masks not in detections dictionary.') detection_masks = detections_dict[standard_fields.DetectionResultFields.detection_masks] self._evaluation.add_single_detected_image_info(image_key=image_id, detected_boxes=detections_dict[standard_fields.DetectionResultFields.detection_boxes], detected_scores=detections_dict[standard_fields.DetectionResultFields.detection_scores], detected_class_labels=detection_classes, detected_masks=detection_masks) def evaluate(self): "Compute evaluation result.\n\n Returns:\n A dictionary of metrics with the following fields -\n\n 1. summary_metrics:\n 'Precision/mAP@<matching_iou_threshold>IOU': mean average precision at\n the specified IOU threshold.\n\n 2. per_category_ap: category specific results with keys of the form\n 'PerformanceByCategory/mAP@<matching_iou_threshold>IOU/category'.\n " (per_class_ap, mean_ap, _, _, per_class_corloc, mean_corloc) = self._evaluation.evaluate() pascal_metrics = {(self._metric_prefix + 'Precision/mAP@{}IOU'.format(self._matching_iou_threshold)): mean_ap} if self._evaluate_corlocs: pascal_metrics[(self._metric_prefix + 'Precision/meanCorLoc@{}IOU'.format(self._matching_iou_threshold))] = mean_corloc category_index = label_map_util.create_category_index(self._categories) for idx in range(per_class_ap.size): if ((idx + self._label_id_offset) in category_index): display_name = (self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(self._matching_iou_threshold, category_index[(idx + self._label_id_offset)]['name'])) pascal_metrics[display_name] = per_class_ap[idx] if self._evaluate_corlocs: display_name = (self._metric_prefix + 'PerformanceByCategory/CorLoc@{}IOU/{}'.format(self._matching_iou_threshold, category_index[(idx + self._label_id_offset)]['name'])) pascal_metrics[display_name] = per_class_corloc[idx] return pascal_metrics def clear(self): 'Clears the state to prepare for a fresh evaluation.' self._evaluation = ObjectDetectionEvaluation(num_groundtruth_classes=self._num_classes, matching_iou_threshold=self._matching_iou_threshold, use_weighted_mean_ap=self._use_weighted_mean_ap, label_id_offset=self._label_id_offset) self._image_ids.clear()
class PascalDetectionEvaluator(ObjectDetectionEvaluator): 'A class to evaluate detections using PASCAL metrics.' def __init__(self, categories, matching_iou_threshold=0.5): super(PascalDetectionEvaluator, self).__init__(categories, matching_iou_threshold=matching_iou_threshold, evaluate_corlocs=False, metric_prefix='PascalBoxes', use_weighted_mean_ap=False)
class WeightedPascalDetectionEvaluator(ObjectDetectionEvaluator): 'A class to evaluate detections using weighted PASCAL metrics.\n\n Weighted PASCAL metrics computes the mean average precision as the average\n precision given the scores and tp_fp_labels of all classes. In comparison,\n PASCAL metrics computes the mean average precision as the mean of the\n per-class average precisions.\n\n This definition is very similar to the mean of the per-class average\n precisions weighted by class frequency. However, they are typically not the\n same as the average precision is not a linear function of the scores and\n tp_fp_labels.\n ' def __init__(self, categories, matching_iou_threshold=0.5): super(WeightedPascalDetectionEvaluator, self).__init__(categories, matching_iou_threshold=matching_iou_threshold, evaluate_corlocs=False, metric_prefix='WeightedPascalBoxes', use_weighted_mean_ap=True)
class PascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator): 'A class to evaluate instance masks using PASCAL metrics.' def __init__(self, categories, matching_iou_threshold=0.5): super(PascalInstanceSegmentationEvaluator, self).__init__(categories, matching_iou_threshold=matching_iou_threshold, evaluate_corlocs=False, metric_prefix='PascalMasks', use_weighted_mean_ap=False, evaluate_masks=True)
class WeightedPascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator): 'A class to evaluate instance masks using weighted PASCAL metrics.\n\n Weighted PASCAL metrics computes the mean average precision as the average\n precision given the scores and tp_fp_labels of all classes. In comparison,\n PASCAL metrics computes the mean average precision as the mean of the\n per-class average precisions.\n\n This definition is very similar to the mean of the per-class average\n precisions weighted by class frequency. However, they are typically not the\n same as the average precision is not a linear function of the scores and\n tp_fp_labels.\n ' def __init__(self, categories, matching_iou_threshold=0.5): super(WeightedPascalInstanceSegmentationEvaluator, self).__init__(categories, matching_iou_threshold=matching_iou_threshold, evaluate_corlocs=False, metric_prefix='WeightedPascalMasks', use_weighted_mean_ap=True, evaluate_masks=True)
class OpenImagesDetectionEvaluator(ObjectDetectionEvaluator): 'A class to evaluate detections using Open Images V2 metrics.\n\n Open Images V2 introduce group_of type of bounding boxes and this metric\n handles those boxes appropriately.\n ' def __init__(self, categories, matching_iou_threshold=0.5, evaluate_corlocs=False): "Constructor.\n\n Args:\n categories: A list of dicts, each of which has the following keys -\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name e.g., 'cat', 'dog'.\n matching_iou_threshold: IOU threshold to use for matching groundtruth\n boxes to detection boxes.\n evaluate_corlocs: if True, additionally evaluates and returns CorLoc.\n " super(OpenImagesDetectionEvaluator, self).__init__(categories, matching_iou_threshold, evaluate_corlocs, metric_prefix='OpenImagesV2') def add_single_ground_truth_image_info(self, image_id, groundtruth_dict): 'Adds groundtruth for a single image to be used for evaluation.\n\n Args:\n image_id: A unique string/integer identifier for the image.\n groundtruth_dict: A dictionary containing -\n standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array\n of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of\n the format [ymin, xmin, ymax, xmax] in absolute image coordinates.\n standard_fields.InputDataFields.groundtruth_classes: integer numpy array\n of shape [num_boxes] containing 1-indexed groundtruth classes for the\n boxes.\n standard_fields.InputDataFields.groundtruth_group_of: Optional length\n M numpy boolean array denoting whether a groundtruth box contains a\n group of instances.\n\n Raises:\n ValueError: On adding groundtruth for an image more than once.\n ' if (image_id in self._image_ids): raise ValueError('Image with id {} already added.'.format(image_id)) groundtruth_classes = (groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] - self._label_id_offset) if ((standard_fields.InputDataFields.groundtruth_group_of in groundtruth_dict.keys()) and (groundtruth_dict[standard_fields.InputDataFields.groundtruth_group_of].size or (not groundtruth_classes.size))): groundtruth_group_of = groundtruth_dict[standard_fields.InputDataFields.groundtruth_group_of] else: groundtruth_group_of = None if (not (len(self._image_ids) % 1000)): logger.warning('image %s does not have groundtruth group_of flag specified', image_id) self._evaluation.add_single_ground_truth_image_info(image_id, groundtruth_dict[standard_fields.InputDataFields.groundtruth_boxes], groundtruth_classes, groundtruth_is_difficult_list=None, groundtruth_is_group_of_list=groundtruth_group_of) self._image_ids.update([image_id])
class ObjectDetectionEvaluation(object): 'Internal implementation of Pascal object detection metrics.' def __init__(self, num_groundtruth_classes, matching_iou_threshold=0.5, nms_iou_threshold=1.0, nms_max_output_boxes=10000, use_weighted_mean_ap=False, label_id_offset=0): if (num_groundtruth_classes < 1): raise ValueError('Need at least 1 groundtruth class for evaluation.') self.per_image_eval = per_image_evaluation.PerImageEvaluation(num_groundtruth_classes=num_groundtruth_classes, matching_iou_threshold=matching_iou_threshold, nms_iou_threshold=nms_iou_threshold, nms_max_output_boxes=nms_max_output_boxes) self.num_class = num_groundtruth_classes self.use_weighted_mean_ap = use_weighted_mean_ap self.label_id_offset = label_id_offset self.groundtruth_boxes = {} self.groundtruth_class_labels = {} self.groundtruth_masks = {} self.groundtruth_is_difficult_list = {} self.groundtruth_is_group_of_list = {} self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=int) self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int) self._initialize_detections() def _initialize_detections(self): self.detection_keys = set() self.scores_per_class = [[] for _ in range(self.num_class)] self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)] self.num_images_correctly_detected_per_class = np.zeros(self.num_class) self.average_precision_per_class = np.empty(self.num_class, dtype=float) self.average_precision_per_class.fill(np.nan) self.precisions_per_class = [] self.recalls_per_class = [] self.corloc_per_class = np.ones(self.num_class, dtype=float) def clear_detections(self): self._initialize_detections() def add_single_ground_truth_image_info(self, image_key, groundtruth_boxes, groundtruth_class_labels, groundtruth_is_difficult_list=None, groundtruth_is_group_of_list=None, groundtruth_masks=None): 'Adds groundtruth for a single image to be used for evaluation.\n\n Args:\n image_key: A unique string/integer identifier for the image.\n groundtruth_boxes: float32 numpy array of shape [num_boxes, 4]\n containing `num_boxes` groundtruth boxes of the format\n [ymin, xmin, ymax, xmax] in absolute image coordinates.\n groundtruth_class_labels: integer numpy array of shape [num_boxes]\n containing 0-indexed groundtruth classes for the boxes.\n groundtruth_is_difficult_list: A length M numpy boolean array denoting\n whether a ground truth box is a difficult instance or not. To support\n the case that no boxes are difficult, it is by default set as None.\n groundtruth_is_group_of_list: A length M numpy boolean array denoting\n whether a ground truth box is a group-of box or not. To support\n the case that no boxes are groups-of, it is by default set as None.\n groundtruth_masks: uint8 numpy array of shape\n [num_boxes, height, width] containing `num_boxes` groundtruth masks.\n The mask values range from 0 to 1.\n ' if (image_key in self.groundtruth_boxes): logger.warning('image %s has already been added to the ground truth database.', image_key) return self.groundtruth_boxes[image_key] = groundtruth_boxes self.groundtruth_class_labels[image_key] = groundtruth_class_labels self.groundtruth_masks[image_key] = groundtruth_masks if (groundtruth_is_difficult_list is None): num_boxes = groundtruth_boxes.shape[0] groundtruth_is_difficult_list = np.zeros(num_boxes, dtype=bool) self.groundtruth_is_difficult_list[image_key] = groundtruth_is_difficult_list.astype(dtype=bool) if (groundtruth_is_group_of_list is None): num_boxes = groundtruth_boxes.shape[0] groundtruth_is_group_of_list = np.zeros(num_boxes, dtype=bool) self.groundtruth_is_group_of_list[image_key] = groundtruth_is_group_of_list.astype(dtype=bool) self._update_ground_truth_statistics(groundtruth_class_labels, groundtruth_is_difficult_list.astype(dtype=bool), groundtruth_is_group_of_list.astype(dtype=bool)) def add_single_detected_image_info(self, image_key, detected_boxes, detected_scores, detected_class_labels, detected_masks=None): 'Adds detections for a single image to be used for evaluation.\n\n Args:\n image_key: A unique string/integer identifier for the image.\n detected_boxes: float32 numpy array of shape [num_boxes, 4]\n containing `num_boxes` detection boxes of the format\n [ymin, xmin, ymax, xmax] in absolute image coordinates.\n detected_scores: float32 numpy array of shape [num_boxes] containing\n detection scores for the boxes.\n detected_class_labels: integer numpy array of shape [num_boxes] containing\n 0-indexed detection classes for the boxes.\n detected_masks: np.uint8 numpy array of shape [num_boxes, height, width]\n containing `num_boxes` detection masks with values ranging\n between 0 and 1.\n\n Raises:\n ValueError: if the number of boxes, scores and class labels differ in\n length.\n ' if ((len(detected_boxes) != len(detected_scores)) or (len(detected_boxes) != len(detected_class_labels))): raise ValueError(('detected_boxes, detected_scores and detected_class_labels should all have same lengths. Got[%d, %d, %d]' % len(detected_boxes)), len(detected_scores), len(detected_class_labels)) if (image_key in self.detection_keys): logger.warning('image %s has already been added to the detection result database', image_key) return self.detection_keys.add(image_key) if (image_key in self.groundtruth_boxes): groundtruth_boxes = self.groundtruth_boxes[image_key] groundtruth_class_labels = self.groundtruth_class_labels[image_key] groundtruth_masks = self.groundtruth_masks.pop(image_key) groundtruth_is_difficult_list = self.groundtruth_is_difficult_list[image_key] groundtruth_is_group_of_list = self.groundtruth_is_group_of_list[image_key] else: groundtruth_boxes = np.empty(shape=[0, 4], dtype=float) groundtruth_class_labels = np.array([], dtype=int) if (detected_masks is None): groundtruth_masks = None else: groundtruth_masks = np.empty(shape=[0, 1, 1], dtype=float) groundtruth_is_difficult_list = np.array([], dtype=bool) groundtruth_is_group_of_list = np.array([], dtype=bool) (scores, tp_fp_labels, is_class_correctly_detected_in_image) = self.per_image_eval.compute_object_detection_metrics(detected_boxes=detected_boxes, detected_scores=detected_scores, detected_class_labels=detected_class_labels, groundtruth_boxes=groundtruth_boxes, groundtruth_class_labels=groundtruth_class_labels, groundtruth_is_difficult_list=groundtruth_is_difficult_list, groundtruth_is_group_of_list=groundtruth_is_group_of_list, detected_masks=detected_masks, groundtruth_masks=groundtruth_masks) for i in range(self.num_class): if (scores[i].shape[0] > 0): self.scores_per_class[i].append(scores[i]) self.tp_fp_labels_per_class[i].append(tp_fp_labels[i]) self.num_images_correctly_detected_per_class += is_class_correctly_detected_in_image def _update_ground_truth_statistics(self, groundtruth_class_labels, groundtruth_is_difficult_list, groundtruth_is_group_of_list): 'Update grouth truth statitistics.\n\n 1. Difficult boxes are ignored when counting the number of ground truth\n instances as done in Pascal VOC devkit.\n 2. Difficult boxes are treated as normal boxes when computing CorLoc related\n statitistics.\n\n Args:\n groundtruth_class_labels: An integer numpy array of length M,\n representing M class labels of object instances in ground truth\n groundtruth_is_difficult_list: A boolean numpy array of length M denoting\n whether a ground truth box is a difficult instance or not\n groundtruth_is_group_of_list: A boolean numpy array of length M denoting\n whether a ground truth box is a group-of box or not\n ' for class_index in range(self.num_class): num_gt_instances = np.sum((groundtruth_class_labels[((~ groundtruth_is_difficult_list) & (~ groundtruth_is_group_of_list))] == class_index)) self.num_gt_instances_per_class[class_index] += num_gt_instances if np.any((groundtruth_class_labels == class_index)): self.num_gt_imgs_per_class[class_index] += 1 def evaluate(self): 'Compute evaluation result.\n\n Returns:\n A named tuple with the following fields -\n average_precision: float numpy array of average precision for\n each class.\n mean_ap: mean average precision of all classes, float scalar\n precisions: List of precisions, each precision is a float numpy\n array\n recalls: List of recalls, each recall is a float numpy array\n corloc: numpy float array\n mean_corloc: Mean CorLoc score for each class, float scalar\n ' if (self.num_gt_instances_per_class == 0).any(): logger.info('The following classes have no ground truth examples: %s', (np.squeeze(np.argwhere((self.num_gt_instances_per_class == 0))) + self.label_id_offset)) if self.use_weighted_mean_ap: all_scores = np.array([], dtype=float) all_tp_fp_labels = np.array([], dtype=bool) for class_index in range(self.num_class): if (self.num_gt_instances_per_class[class_index] == 0): continue if (not self.scores_per_class[class_index]): scores = np.array([], dtype=float) tp_fp_labels = np.array([], dtype=bool) else: scores = np.concatenate(self.scores_per_class[class_index]) tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index]) if self.use_weighted_mean_ap: all_scores = np.append(all_scores, scores) all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels) (precision, recall) = metrics.compute_precision_recall(scores, tp_fp_labels, self.num_gt_instances_per_class[class_index]) self.precisions_per_class.append(precision) self.recalls_per_class.append(recall) average_precision = metrics.compute_average_precision(precision, recall) self.average_precision_per_class[class_index] = average_precision self.corloc_per_class = metrics.compute_cor_loc(self.num_gt_imgs_per_class, self.num_images_correctly_detected_per_class) if self.use_weighted_mean_ap: num_gt_instances = np.sum(self.num_gt_instances_per_class) (precision, recall) = metrics.compute_precision_recall(all_scores, all_tp_fp_labels, num_gt_instances) mean_ap = metrics.compute_average_precision(precision, recall) else: mean_ap = np.nanmean(self.average_precision_per_class) mean_corloc = np.nanmean(self.corloc_per_class) return ObjectDetectionEvalMetrics(self.average_precision_per_class, mean_ap, self.precisions_per_class, self.recalls_per_class, self.corloc_per_class, mean_corloc)
class InputDataFields(object): 'Names for the input tensors.\n\n Holds the standard dataset field names to use for identifying input tensors. This\n should be used by the decoder to identify keys for the returned tensor_dict\n containing input tensors. And it should be used by the model to identify the\n tensors it needs.\n\n Attributes:\n image: image.\n original_image: image in the original input size.\n key: unique key corresponding to image.\n source_id: source of the original image.\n filename: original filename of the dataset (without common path).\n groundtruth_image_classes: image-level class labels.\n groundtruth_boxes: coordinates of the ground truth boxes in the image.\n groundtruth_classes: box-level class labels.\n groundtruth_label_types: box-level label types (e.g. explicit negative).\n groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead]\n is the groundtruth a single object or a crowd.\n groundtruth_area: area of a groundtruth segment.\n groundtruth_difficult: is a `difficult` object\n groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the\n same class, forming a connected group, where instances are heavily\n occluding each other.\n proposal_boxes: coordinates of object proposal boxes.\n proposal_objectness: objectness score of each proposal.\n groundtruth_instance_masks: ground truth instance masks.\n groundtruth_instance_boundaries: ground truth instance boundaries.\n groundtruth_instance_classes: instance mask-level class labels.\n groundtruth_keypoints: ground truth keypoints.\n groundtruth_keypoint_visibilities: ground truth keypoint visibilities.\n groundtruth_label_scores: groundtruth label scores.\n groundtruth_weights: groundtruth weight factor for bounding boxes.\n num_groundtruth_boxes: number of groundtruth boxes.\n true_image_shapes: true shapes of images in the resized images, as resized\n images can be padded with zeros.\n ' image = 'image' original_image = 'original_image' key = 'key' source_id = 'source_id' filename = 'filename' groundtruth_image_classes = 'groundtruth_image_classes' groundtruth_boxes = 'groundtruth_boxes' groundtruth_classes = 'groundtruth_classes' groundtruth_label_types = 'groundtruth_label_types' groundtruth_is_crowd = 'groundtruth_is_crowd' groundtruth_area = 'groundtruth_area' groundtruth_difficult = 'groundtruth_difficult' groundtruth_group_of = 'groundtruth_group_of' proposal_boxes = 'proposal_boxes' proposal_objectness = 'proposal_objectness' groundtruth_instance_masks = 'groundtruth_instance_masks' groundtruth_instance_boundaries = 'groundtruth_instance_boundaries' groundtruth_instance_classes = 'groundtruth_instance_classes' groundtruth_keypoints = 'groundtruth_keypoints' groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities' groundtruth_label_scores = 'groundtruth_label_scores' groundtruth_weights = 'groundtruth_weights' num_groundtruth_boxes = 'num_groundtruth_boxes' true_image_shape = 'true_image_shape'
class DetectionResultFields(object): 'Naming conventions for storing the output of the detector.\n\n Attributes:\n source_id: source of the original image.\n key: unique key corresponding to image.\n detection_boxes: coordinates of the detection boxes in the image.\n detection_scores: detection scores for the detection boxes in the image.\n detection_classes: detection-level class labels.\n detection_masks: contains a segmentation mask for each detection box.\n detection_boundaries: contains an object boundary for each detection box.\n detection_keypoints: contains detection keypoints for each detection box.\n num_detections: number of detections in the batch.\n ' source_id = 'source_id' key = 'key' detection_boxes = 'detection_boxes' detection_scores = 'detection_scores' detection_classes = 'detection_classes' detection_masks = 'detection_masks' detection_boundaries = 'detection_boundaries' detection_keypoints = 'detection_keypoints' num_detections = 'num_detections'
class BoxListFields(object): 'Naming conventions for BoxLists.\n\n Attributes:\n boxes: bounding box coordinates.\n classes: classes per bounding box.\n scores: scores per bounding box.\n weights: sample weights per bounding box.\n objectness: objectness score per bounding box.\n masks: masks per bounding box.\n boundaries: boundaries per bounding box.\n keypoints: keypoints per bounding box.\n keypoint_heatmaps: keypoint heatmaps per bounding box.\n ' boxes = 'boxes' classes = 'classes' scores = 'scores' weights = 'weights' objectness = 'objectness' masks = 'masks' boundaries = 'boundaries' keypoints = 'keypoints' keypoint_heatmaps = 'keypoint_heatmaps'
class TfExampleFields(object): 'TF-example proto feature names for object detection.\n\n Holds the standard feature names to load from an Example proto for object\n detection.\n\n Attributes:\n image_encoded: JPEG encoded string\n image_format: image format, e.g. "JPEG"\n filename: filename\n channels: number of channels of image\n colorspace: colorspace, e.g. "RGB"\n height: height of image in pixels, e.g. 462\n width: width of image in pixels, e.g. 581\n source_id: original source of the image\n object_class_text: labels in text format, e.g. ["person", "cat"]\n object_class_label: labels in numbers, e.g. [16, 8]\n object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30\n object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40\n object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50\n object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70\n object_view: viewpoint of object, e.g. ["frontal", "left"]\n object_truncated: is object truncated, e.g. [true, false]\n object_occluded: is object occluded, e.g. [true, false]\n object_difficult: is object difficult, e.g. [true, false]\n object_group_of: is object a single object or a group of objects\n object_depiction: is object a depiction\n object_is_crowd: [DEPRECATED, use object_group_of instead]\n is the object a single object or a crowd\n object_segment_area: the area of the segment.\n object_weight: a weight factor for the object\'s bounding box.\n instance_masks: instance segmentation masks.\n instance_boundaries: instance boundaries.\n instance_classes: Classes for each instance segmentation mask.\n detection_class_label: class label in numbers.\n detection_bbox_ymin: ymin coordinates of a detection box.\n detection_bbox_xmin: xmin coordinates of a detection box.\n detection_bbox_ymax: ymax coordinates of a detection box.\n detection_bbox_xmax: xmax coordinates of a detection box.\n detection_score: detection score for the class label and box.\n ' image_encoded = 'image/encoded' image_format = 'image/format' filename = 'image/filename' channels = 'image/channels' colorspace = 'image/colorspace' height = 'image/height' width = 'image/width' source_id = 'image/source_id' object_class_text = 'image/object/class/text' object_class_label = 'image/object/class/label' object_bbox_ymin = 'image/object/bbox/ymin' object_bbox_xmin = 'image/object/bbox/xmin' object_bbox_ymax = 'image/object/bbox/ymax' object_bbox_xmax = 'image/object/bbox/xmax' object_view = 'image/object/view' object_truncated = 'image/object/truncated' object_occluded = 'image/object/occluded' object_difficult = 'image/object/difficult' object_group_of = 'image/object/group_of' object_depiction = 'image/object/depiction' object_is_crowd = 'image/object/is_crowd' object_segment_area = 'image/object/segment/area' object_weight = 'image/object/weight' instance_masks = 'image/segmentation/object' instance_boundaries = 'image/boundaries/object' instance_classes = 'image/segmentation/object/class' detection_class_label = 'image/detection/label' detection_bbox_ymin = 'image/detection/bbox/ymin' detection_bbox_xmin = 'image/detection/bbox/xmin' detection_bbox_ymax = 'image/detection/bbox/ymax' detection_bbox_xmax = 'image/detection/bbox/xmax' detection_score = 'image/detection/score'
class NpInfoDict(object): def __init__(self, info_dict, key_type=None, value_type=None): keys = sorted(list(info_dict.keys())) self.key_arr = np.array(keys, dtype=key_type) self.val_arr = np.array([info_dict[k] for k in keys], dtype=value_type) self._key_idx_map = {k: i for (i, k) in enumerate(keys)} def __getitem__(self, idx): return (self.key_arr[idx], self.val_arr[idx]) def __len__(self): return len(self.key_arr) def convert_key(self, org_key): return self._key_idx_map[org_key]
class NpBoxDict(object): def __init__(self, id_to_box_dict, key_list=None, value_types=[]): (value_fields, value_types) = list(zip(*value_types)) assert ('bbox' in value_fields) if (key_list is None): key_list = sorted(list(id_to_box_dict.keys())) self.length = len(key_list) pointer_list = [] value_lists = {field: [] for field in value_fields} cur = 0 pointer_list.append(cur) for k in key_list: box_infos = id_to_box_dict[k] cur += len(box_infos) pointer_list.append(cur) for box_info in box_infos: for field in value_fields: value_lists[field].append(box_info[field]) self.pointer_arr = np.array(pointer_list, dtype=np.int32) self.attr_names = np.array([('vfield_' + field) for field in value_fields]) for (field_name, value_type, attr_name) in zip(value_fields, value_types, self.attr_names): setattr(self, attr_name, np.array(value_lists[field_name], dtype=value_type)) def __getitem__(self, idx): l_pointer = self.pointer_arr[idx] r_pointer = self.pointer_arr[(idx + 1)] ret_val = [getattr(self, attr_name)[l_pointer:r_pointer] for attr_name in self.attr_names] return ret_val def __len__(self): return self.length
class DatasetEngine(data.Dataset): def __init__(self, video_root, ann_file, remove_clips_without_annotations, frame_span, box_file=None, eval_file_paths={}, box_thresh=0.0, action_thresh=0.0, transforms=None, object_file=None, object_transforms=None, keypoints_file=None): print('loading annotations into memory...') tic = time.time() json_dict = json.load(open(ann_file, 'r')) assert (type(json_dict) == dict), 'annotation file format {} not supported'.format(type(json_dict)) print('Done (t={:0.2f}s)'.format((time.time() - tic))) self.video_root = video_root self.transforms = transforms self.frame_span = frame_span self.eval_file_paths = eval_file_paths self.action_thresh = action_thresh clip2ann = defaultdict(list) if ('annotations' in json_dict): for ann in json_dict['annotations']: action_ids = ann['action_ids'] one_hot = np.zeros(22, dtype=np.bool) one_hot[action_ids] = True packed_act = one_hot[1:] clip2ann[ann['image_id']].append(dict(bbox=ann['bbox'], packed_act=packed_act)) movies_size = {} clips_info = {} for img in json_dict['images']: mov = img['movie'] if (mov not in movies_size): movies_size[mov] = [img['width'], img['height']] clips_info[img['id']] = [mov, img['timestamp']] self.movie_info = NpInfoDict(movies_size, value_type=np.int32) clip_ids = sorted(list(clips_info.keys())) if remove_clips_without_annotations: clip_ids = [clip_id for clip_id in clip_ids if (clip_id in clip2ann)] if box_file: imgToBoxes = self.load_box_file(box_file, box_thresh) clip_ids = [img_id for img_id in clip_ids if (len(imgToBoxes[img_id]) > 0)] self.det_persons = NpBoxDict(imgToBoxes, clip_ids, value_types=[('bbox', np.float32), ('score', np.float32)]) else: self.det_persons = None if object_file: imgToObjects = self.load_box_file(object_file) self.det_objects = NpBoxDict(imgToObjects, clip_ids, value_types=[('bbox', np.float32), ('score', np.float32)]) else: self.det_objects = None if keypoints_file: imgToBoxes = self.load_box_file(keypoints_file) self.det_keypoints = NpBoxDict(imgToBoxes, clip_ids, value_types=[('keypoints', np.float32), ('bbox', np.float32), ('score', np.float32)]) else: self.det_keypoints = None if object_transforms: self.object_transforms = object_transforms else: self.object_transforms = None self.anns = NpBoxDict(clip2ann, clip_ids, value_types=[('bbox', np.float32), ('packed_act', np.uint8)]) clips_info = {clip_id: [self.movie_info.convert_key(clips_info[clip_id][0]), clips_info[clip_id][1]] for clip_id in clip_ids} self.clips_info = NpInfoDict(clips_info, value_type=np.int32) def __getitem__(self, idx): (_, clip_info) = self.clips_info[idx] (mov_id, timestamp) = clip_info (movie_id, movie_size) = self.movie_info[mov_id] video_data = self._decode_video_data(movie_id, timestamp) (im_w, im_h) = movie_size if (self.det_persons is None): (boxes, packed_act) = self.anns[idx] boxes_tensor = torch.as_tensor(boxes, dtype=torch.float32).reshape((- 1), 4) boxes = BoxList(boxes_tensor, (im_w, im_h), mode='xyxy') one_hot_label = torch.as_tensor(packed_act, dtype=torch.uint8) boxes.add_field('labels', one_hot_label) else: (boxes, box_score) = self.det_persons[idx] boxes_tensor = torch.as_tensor(boxes).reshape((- 1), 4) boxes = BoxList(boxes_tensor, (im_w, im_h), mode='xyxy') boxes.add_field('det_score', torch.as_tensor(box_score)) boxes = boxes.clip_to_image(remove_empty=True) orig_boxes = boxes.bbox extras = {} if (self.transforms is not None): (video_data, boxes, transform_randoms) = self.transforms(video_data, boxes) (slow_video, fast_video) = video_data objects = None if (self.det_objects is not None): objects = self.get_objects(idx, im_w, im_h) if (self.object_transforms is not None): objects = self.object_transforms(objects, boxes, transform_randoms) keypoints = None if (self.det_keypoints is not None): keypoints = self.get_keypoints(idx, im_w, im_h, orig_boxes) if (self.object_transforms is not None): keypoints = self.object_transforms(keypoints, boxes, transform_randoms) extras['movie_id'] = movie_id extras['timestamp'] = timestamp return (slow_video, fast_video, boxes, objects, keypoints, extras, idx) return (video_data, boxes, idx, movie_id, timestamp) def return_null_box(self, im_w, im_h): return BoxList(torch.zeros((0, 4)), (im_w, im_h), mode='xyxy') def get_objects(self, idx, im_w, im_h): obj_boxes = self.return_null_box(im_w, im_h) if hasattr(self, 'det_objects'): (boxes, box_score) = self.det_objects[idx] if (len(box_score) == 0): return obj_boxes obj_boxes_tensor = torch.as_tensor(boxes).reshape((- 1), 4) obj_boxes = BoxList(obj_boxes_tensor, (im_w, im_h), mode='xyxy') scores = torch.as_tensor(box_score) obj_boxes.add_field('scores', scores) return obj_boxes def get_keypoints(self, idx, im_w, im_h, orig_boxes): kpts_boxes = self.return_null_box(im_w, im_h) (keypoints, boxes, box_score) = self.det_keypoints[idx] if (len(box_score) == 0): kpts_boxes = BoxList(torch.zeros((orig_boxes.shape[0], 4)).reshape((- 1), 4), (im_w, im_h), mode='xyxy') kpts_boxes.add_field('keypoints', np.zeros((orig_boxes.shape[0], 17, 3))) return kpts_boxes boxes = np.array(boxes) orig_boxes = orig_boxes.cpu().numpy() idx_to_keep = np.argmax(iou(orig_boxes, boxes), 1) boxes = boxes[idx_to_keep] keypoints = np.array(keypoints) keypoints = keypoints[idx_to_keep] keypoints_boxes_tensor = torch.as_tensor(boxes).reshape((- 1), 4) kpts_boxes = BoxList(keypoints_boxes_tensor, (im_w, im_h), mode='xyxy') scores = torch.as_tensor(box_score) kpts_boxes.add_field('scores', scores) kpts_boxes.add_field('keypoints', keypoints) return kpts_boxes def get_video_info(self, index): (_, clip_info) = self.clips_info[index] (mov_id, timestamp) = clip_info (movie_id, movie_size) = self.movie_info[mov_id] (w, h) = movie_size return dict(width=w, height=h, movie=movie_id, timestamp=timestamp) def load_box_file(self, box_file, score_thresh=0.0): import json print('Loading box file into memory...') tic = time.time() with open(box_file, 'r') as f: box_results = json.load(f) print('Done (t={:0.2f}s)'.format((time.time() - tic))) boxImgIds = [box['image_id'] for box in box_results] imgToBoxes = defaultdict(list) for (img_id, box) in zip(boxImgIds, box_results): if (box['score'] >= score_thresh): imgToBoxes[img_id].append(box) return imgToBoxes def _decode_video_data(self, dirname, timestamp): video_folder = os.path.join(self.video_root, dirname) right_span = (self.frame_span // 2) left_span = (self.frame_span - right_span) cur_t = timestamp right_frames = [] folder_list = np.array(os.listdir(video_folder)) while (cur_t < folder_list.shape[0]): if ((cur_t - timestamp) > right_span): break video_path = os.path.join(video_folder, '{}.png'.format(str(cur_t).zfill(5))) try: with Image.open(video_path) as img: right_frames.append(img.convert('RGB')) except BaseException as e: raise RuntimeError('Caught "{}" when loading {}'.format(str(e), video_path)) cur_t += 1 cur_t = (timestamp - 1) left_frames = [] while (cur_t > 0): if ((timestamp - cur_t) > left_span): break video_path = os.path.join(video_folder, '{}.png'.format(str(cur_t).zfill(5))) try: with Image.open(video_path) as img: left_frames.append(img.convert('RGB')) except BaseException as e: raise RuntimeError('Caught "{}" when loading {}'.format(str(e), video_path)) cur_t -= 1 frames = (left_frames + right_frames) video_data = np.stack(frames) return video_data def __len__(self): return len(self.clips_info) def __repr__(self): fmt_str = (('Dataset ' + self.__class__.__name__) + '\n') fmt_str += ' Number of datapoints: {}\n'.format(self.__len__()) fmt_str += ' Video Root Location: {}\n'.format(self.video_root) tmp = ' Transforms (if any): ' fmt_str += '{0}{1}\n'.format(tmp, self.transforms.__repr__().replace('\n', ('\n' + (' ' * len(tmp))))) return fmt_str
class DistributedSampler(Sampler): 'Sampler that restricts dataset loading to a subset of the dataset.\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n .. note::\n Dataset is assumed to be of constant size.\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n ' def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): if (num_replicas is None): if (not dist.is_available()): raise RuntimeError('Requires distributed package to be available') num_replicas = dist.get_world_size() if (rank is None): if (not dist.is_available()): raise RuntimeError('Requires distributed package to be available') rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.num_samples = int(math.ceil(((len(self.dataset) * 1.0) / self.num_replicas))) self.total_size = (self.num_samples * self.num_replicas) self.shuffle = shuffle def __iter__(self): if self.shuffle: g = torch.Generator() g.manual_seed(self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() indices += indices[:(self.total_size - len(indices))] assert (len(indices) == self.total_size) offset = (self.num_samples * self.rank) indices = indices[offset:(offset + self.num_samples)] assert (len(indices) == self.num_samples) return iter(indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch
class GroupedBatchSampler(BatchSampler): '\n Wraps another sampler to yield a mini-batch of indices.\n It enforces that elements from the same group should appear in groups of batch_size.\n It also tries to provide mini-batches which follows an ordering which is\n as close as possible to the ordering from the original sampler.\n\n Arguments:\n sampler (Sampler): Base sampler.\n batch_size (int): Size of mini-batch.\n drop_uneven (bool): If ``True``, the sampler will drop the batches whose\n size is less than ``batch_size``\n\n ' def __init__(self, sampler, group_ids, batch_size, drop_uneven=False): if (not isinstance(sampler, Sampler)): raise ValueError('sampler should be an instance of torch.utils.dataset.Sampler, but got sampler={}'.format(sampler)) self.sampler = sampler self.group_ids = torch.as_tensor(group_ids) assert (self.group_ids.dim() == 1) self.batch_size = batch_size self.drop_uneven = drop_uneven self.groups = torch.unique(self.group_ids).sort(0)[0] def _prepare_batches(self): dataset_size = len(self.group_ids) sampled_ids = torch.as_tensor(list(self.sampler)) order = torch.full((dataset_size,), (- 1), dtype=torch.int64) order[sampled_ids] = torch.arange(len(sampled_ids)) mask = (order >= 0) clusters = [((self.group_ids == i) & mask) for i in self.groups] relative_order = [order[cluster] for cluster in clusters] permutation_ids = [s[s.sort()[1]] for s in relative_order] permuted_clusters = [sampled_ids[idx] for idx in permutation_ids] splits = [c.split(self.batch_size) for c in permuted_clusters] merged = tuple(itertools.chain.from_iterable(splits)) first_element_of_batch = [t[0].item() for t in merged] inv_sampled_ids_map = {v: k for (k, v) in enumerate(sampled_ids.tolist())} first_index_of_batch = torch.as_tensor([inv_sampled_ids_map[s] for s in first_element_of_batch]) permutation_order = first_index_of_batch.sort(0)[1].tolist() batches = [merged[i].tolist() for i in permutation_order] if self.drop_uneven: kept = [] for batch in batches: if (len(batch) == self.batch_size): kept.append(batch) batches = kept return batches def __iter__(self): batches = self._prepare_batches() self._batches = batches return iter(batches) def __len__(self): if (not hasattr(self, '_batches')): self._batches = self._prepare_batches() return len(self._batches)
class IterationBasedBatchSampler(BatchSampler): '\n Wraps a BatchSampler, resampling from it until\n a specified number of iterations have been sampled\n ' def __init__(self, batch_sampler, num_iterations, start_iter=0): self.batch_sampler = batch_sampler self.num_iterations = num_iterations self.start_iter = start_iter def __iter__(self): iteration = self.start_iter while (iteration <= self.num_iterations): if hasattr(self.batch_sampler.sampler, 'set_epoch'): self.batch_sampler.sampler.set_epoch(iteration) for batch in self.batch_sampler: iteration += 1 if (iteration > self.num_iterations): break (yield batch) def __len__(self): return self.num_iterations
def build_transforms(cfg, is_train=True): if is_train: min_size = cfg.INPUT.MIN_SIZE_TRAIN max_size = cfg.INPUT.MAX_SIZE_TRAIN color_jitter = cfg.INPUT.COLOR_JITTER flip_prob = 0.5 slow_jitter = cfg.INPUT.SLOW_JITTER else: min_size = cfg.INPUT.MIN_SIZE_TEST max_size = cfg.INPUT.MAX_SIZE_TEST color_jitter = False flip_prob = 0 slow_jitter = False frame_num = cfg.INPUT.FRAME_NUM sample_rate = cfg.INPUT.FRAME_SAMPLE_RATE if color_jitter: color_transform = T.ColorJitter(cfg.INPUT.HUE_JITTER, cfg.INPUT.SAT_JITTER, cfg.INPUT.VAL_JITTER) else: color_transform = T.Identity() to_bgr = cfg.INPUT.TO_BGR normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr=to_bgr) tau = cfg.INPUT.TAU alpha = cfg.INPUT.ALPHA transform = T.Compose([T.TemporalCrop(frame_num, sample_rate), T.Resize(min_size, max_size), color_transform, T.RandomHorizontalFlip(flip_prob), T.ToTensor(), normalize_transform, T.SlowFastCrop(tau, alpha, slow_jitter)]) return transform
def build_object_transforms(cfg, is_train=True): if is_train: flip_prob = 0.5 else: flip_prob = 0 transform = OT.Compose([OT.Resize(), OT.PickTop(cfg.MODEL.HIT_STRUCTURE.MAX_OBJECT), OT.RandomHorizontalFlip(flip_prob)]) return transform
def compute_on_dataset_1stage(model, data_loader, device): cpu_device = torch.device('cpu') results_dict = {} if (get_world_size() == 1): extra_args = {} else: rank = get_rank() extra_args = dict(desc='rank {}'.format(rank)) for batch in tqdm(data_loader, **extra_args): (slow_clips, fast_clips, boxes, objects, keypoints, extras, video_ids) = batch slow_clips = slow_clips.to(device) fast_clips = fast_clips.to(device) boxes = [box.to(device) for box in boxes] objects = [(None if (box is None) else box.to(device)) for box in objects] keypoints = [(None if (box is None) else box.to(device)) for box in keypoints] with torch.no_grad(): output = model(slow_clips, fast_clips, boxes, objects, keypoints, extras) output = [o.to(cpu_device) for o in output] results_dict.update({video_id: result for (video_id, result) in zip(video_ids, output)}) return results_dict
def compute_on_dataset_2stage(model, data_loader, device, logger): cpu_device = torch.device('cpu') num_devices = get_world_size() dataset = data_loader.dataset if (num_devices == 1): extra_args = {} else: rank = get_rank() extra_args = dict(desc='rank {}'.format(rank)) loader_len = len(data_loader) person_feature_pool = MemoryPool() batch_info_list = ([None] * loader_len) logger.info('Stage 1: extracting clip features.') start_time = time.time() for (i, batch) in enumerate(tqdm(data_loader, **extra_args)): (slow_clips, fast_clips, boxes, objects, keypoints, extras, video_ids) = batch slow_clips = slow_clips.to(device) fast_clips = fast_clips.to(device) boxes = [box.to(device) for box in boxes] objects = [(None if (box is None) else box.to(device)) for box in objects] movie_ids = [e['movie_id'] for e in extras] timestamps = [e['timestamp'] for e in extras] with torch.no_grad(): feature = model(slow_clips, fast_clips, boxes, objects, keypoints, part_forward=0) person_feature = [ft.to(cpu_device) for ft in feature[0]] object_feature = [ft.to(cpu_device) for ft in feature[1]] hand_feature = [ft.to(cpu_device) for ft in feature[2]] poses_feature = [ft.to(cpu_device) for ft in feature[3]] for (j, (movie_id, timestamp, p_ft, o_ft)) in enumerate(zip(movie_ids, timestamps, person_feature, object_feature)): person_feature_pool[(movie_id, timestamp)] = p_ft batch_info_list[i] = (movie_ids, timestamps, video_ids, object_feature, hand_feature, poses_feature, [b.extra_fields['det_score'] for b in boxes]) synchronize() total_time = (time.time() - start_time) total_time_str = str(datetime.timedelta(seconds=total_time)) logger.info('Stage 1 time: {} ({} s / video per device, on {} devices)'.format(total_time_str, ((total_time * num_devices) / len(dataset)), num_devices)) feature_pool = all_gather(person_feature_pool) all_feature_pool_p = MemoryPool() all_feature_pool_p.update_list(feature_pool) del feature_pool, person_feature_pool results_dict = {} logger.info('Stage 2: predicting with extracted feature.') start_time = time.time() for (movie_ids, timestamps, video_ids, object_feature, hand_feature, poses_feature, det_scores) in tqdm(batch_info_list, **extra_args): current_feat_p = [all_feature_pool_p[(movie_id, timestamp)].to(device) for (movie_id, timestamp) in zip(movie_ids, timestamps)] current_feat_o = [ft_o.to(device) for ft_o in object_feature] current_feat_h = [ft_k.to(device) for ft_k in hand_feature] current_feat_pose = [ft_k.to(device) for ft_k in poses_feature] extras = dict(person_pool=all_feature_pool_p, movie_ids=movie_ids, timestamps=timestamps, current_feat_p=current_feat_p, current_feat_o=current_feat_o, current_feat_h=current_feat_h, current_feat_pose=current_feat_pose) with torch.no_grad(): output = model(None, None, None, None, extras=extras, part_forward=1) output = [o.to(cpu_device) for o in output] det_scores = [d.to(cpu_device) for d in det_scores] for (i, o) in enumerate(output): output[i].extra_fields['scores'] = (output[i].extra_fields['scores'] * det_scores[i].unsqueeze(1)) results_dict.update({video_id: result for (video_id, result) in zip(video_ids, output)}) synchronize() total_time = (time.time() - start_time) total_time_str = str(datetime.timedelta(seconds=total_time)) logger.info('Stage 2 time: {} ({} s / video per device, on {} devices)'.format(total_time_str, ((total_time * num_devices) / len(dataset)), num_devices)) del batch_info_list, output, extras, movie_ids, timestamps, video_ids, object_feature return results_dict
def compute_on_dataset(model, data_loader, device, logger, mem_active): model.eval() if mem_active: results_dict = compute_on_dataset_2stage(model, data_loader, device, logger) else: results_dict = compute_on_dataset_1stage(model, data_loader, device) return results_dict
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu): all_predictions = gather(predictions_per_gpu) if (not is_main_process()): return predictions = {} for p in all_predictions: predictions.update(p) video_ids = list(sorted(predictions.keys())) if (len(video_ids) != (video_ids[(- 1)] + 1)): logger = logging.getLogger('hit.inference') logger.warning('Number of videos that were gathered from multiple processes is not a contiguous set. Some images might be missing from the evaluation') predictions = [predictions[i] for i in video_ids] return predictions
def inference(model, data_loader, dataset_name, mem_active=False, output_folder=None): device = torch.device('cuda') num_devices = get_world_size() logger = logging.getLogger('hit.inference') dataset = data_loader.dataset logger.info('Start evaluation on {} dataset({} videos).'.format(dataset_name, len(dataset))) start_time = time.time() predictions = compute_on_dataset(model, data_loader, device, logger, mem_active) synchronize() total_time = (time.time() - start_time) total_time_str = str(datetime.timedelta(seconds=total_time)) logger.info('Total inference time: {} ({} s / video per device, on {} devices)'.format(total_time_str, ((total_time * num_devices) / len(dataset)), num_devices)) predictions = _accumulate_predictions_from_multiple_gpus(predictions) if (not is_main_process()): return if output_folder: torch.save(predictions, os.path.join(output_folder, 'predictions.pth')) return evaluate(dataset=dataset, predictions=predictions, output_folder=output_folder)
def do_train(model, data_loader, optimizer, scheduler, checkpointer, device, checkpoint_period, arguments, tblogger, val_period, dataset_names_val, data_loaders_val, distributed, mem_active): logger = logging.getLogger('hit.trainer') logger.info('Start training') meters = MetricLogger(delimiter=' ') max_iter = len(data_loader) start_iter = arguments['iteration'] person_pool = arguments['person_pool'] model.train() start_training_time = time.time() end = time.time() losses_reduced = torch.tensor(0.0) for (iteration, (slow_video, fast_video, boxes, objects, keypoints, extras, _)) in enumerate(data_loader, start_iter): data_time = (time.time() - end) iteration = (iteration + 1) arguments['iteration'] = iteration slow_video = slow_video.to(device) fast_video = fast_video.to(device) boxes = [box.to(device) for box in boxes] keypoints = [keypoint.to(device) for keypoint in keypoints] objects = [(None if (box is None) else box.to(device)) for box in objects] mem_extras = {} if mem_active: movie_ids = [e['movie_id'] for e in extras] timestamps = [e['timestamp'] for e in extras] mem_extras['person_pool'] = person_pool mem_extras['movie_ids'] = movie_ids mem_extras['timestamps'] = timestamps mem_extras['cur_loss'] = losses_reduced.item() (loss_dict, weight_dict, metric_dict, pooled_feature) = model(slow_video, fast_video, boxes, objects, keypoints, mem_extras) losses = sum([(loss_dict[k] * weight_dict[k]) for k in loss_dict]) loss_dict['total_loss'] = losses.detach().clone() loss_dict_reduced = reduce_dict(loss_dict) metric_dict_reduced = reduce_dict(metric_dict) meters.update(**loss_dict_reduced, **metric_dict_reduced) losses_reduced = loss_dict_reduced.pop('total_loss') optimizer.zero_grad() losses.backward() optimizer.step() if mem_active: person_feature = [ft.to('cpu') for ft in pooled_feature[0]] feature_update = MemoryPool() for (movie_id, timestamp, new_feature) in zip(movie_ids, timestamps, person_feature): new_feature.add_field('loss_tag', losses_reduced.item()) feature_update[(movie_id, timestamp)] = new_feature feature_update_list = all_gather(feature_update) person_pool.update_list(feature_update_list) batch_time = (time.time() - end) end = time.time() meters.update(time=batch_time, data=data_time) eta_seconds = (meters.time.global_avg * (max_iter - iteration)) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if (((iteration % 20) == 0) or (iteration == max_iter)): logger.info(meters.delimiter.join(['eta: {eta}', 'iter: {iter}', '{meters}', 'lr: {lr:.6f}', 'max mem: {memory:.0f}']).format(eta=eta_string, iter=iteration, meters=str(meters), lr=optimizer.param_groups[0]['lr'], memory=((torch.cuda.max_memory_allocated() / 1024.0) / 1024.0))) if (tblogger is not None): for (name, meter) in meters.meters.items(): tblogger.add_scalar(name, meter.median, iteration) tblogger.add_scalar('lr', optimizer.param_groups[0]['lr'], iteration) scheduler.step() if ((iteration % checkpoint_period) == 0): checkpointer.save('model_{:07d}'.format(iteration), **arguments) if (iteration == max_iter): arguments.pop('person_pool', None) checkpointer.save('model_final', **arguments) if (dataset_names_val and ((iteration % val_period) == 0)): val_in_train(model, dataset_names_val, data_loaders_val, tblogger, iteration, distributed, mem_active) end = time.time() total_training_time = (time.time() - start_training_time) total_time_str = str(datetime.timedelta(seconds=total_training_time)) logger.info('Total training time: {} ({:.4f} s / it)'.format(total_time_str, (total_training_time / max_iter)))
def val_in_train(model, dataset_names_val, data_loaders_val, tblogger, iteration, distributed, mem_active): if distributed: model_val = model.module else: model_val = model for (dataset_name, data_loader_val) in zip(dataset_names_val, data_loaders_val): eval_res = inference(model_val, data_loader_val, dataset_name, mem_active) synchronize() if (tblogger is not None): (eval_res, _) = eval_res total_mAP = eval_res['PascalBoxes_Precision/mAP@0.5IOU'] tblogger.add_scalar((dataset_name + '_mAP_0.5IOU'), total_mAP, iteration) model.train()
class _FrozenBatchNorm(nn.Module): def __init__(self, num_features, eps=1e-05, affine=True, track_running_stats=True): super(_FrozenBatchNorm, self).__init__() self.num_features = num_features self.eps = eps self.affine = affine self.track_running_stats = track_running_stats if self.affine: self.register_buffer('weight', torch.Tensor(num_features)) self.register_buffer('bias', torch.Tensor(num_features)) else: self.register_buffer('weight', None) self.register_buffer('bias', None) if self.track_running_stats: self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) else: self.register_parameter('running_mean', None) self.register_parameter('running_var', None) self.reset_parameters() def reset_running_stats(self): if self.track_running_stats: self.running_mean.zero_() self.running_var.fill_(1) def reset_parameters(self): self.reset_running_stats() if self.affine: self.weight.data.uniform_() self.bias.data.zero_() def _check_input_dim(self, input): raise NotImplementedError def forward(self, input): self._check_input_dim(input) view_shape = ((1, self.num_features) + ((1,) * (input.dim() - 2))) if self.track_running_stats: scale = (self.weight / (self.running_var + self.eps).sqrt()) bias = (self.bias - (self.running_mean * scale)) else: scale = self.weight bias = self.bias return ((scale.view(*view_shape) * input) + bias.view(*view_shape)) def extra_repr(self): return '{num_features}, eps={eps}, affine={affine}, track_running_stats={track_running_stats}'.format(**self.__dict__) def _load_from_state_dict(self, state_dict, prefix, metadata, strict, missing_keys, unexpected_keys, error_msgs): num_batches_tracked_key = (prefix + 'num_batches_tracked') if (num_batches_tracked_key in state_dict): del state_dict[num_batches_tracked_key] super(_FrozenBatchNorm, self)._load_from_state_dict(state_dict, prefix, metadata, strict, missing_keys, unexpected_keys, error_msgs)
class FrozenBatchNorm1d(_FrozenBatchNorm): def _check_input_dim(self, input): if ((input.dim() != 2) and (input.dim() != 3)): raise ValueError('expected 2D or 3D input (got {}D input)'.format(input.dim()))
class FrozenBatchNorm2d(_FrozenBatchNorm): def _check_input_dim(self, input): if (input.dim() != 4): raise ValueError('expected 4D input (got {}D input)'.format(input.dim()))
class FrozenBatchNorm3d(_FrozenBatchNorm): def _check_input_dim(self, input): if (input.dim() != 5): raise ValueError('expected 5D input (got {}D input)'.format(input.dim()))
class _ROIAlign3d(Function): @staticmethod def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio): ctx.save_for_backward(roi) ctx.output_size = _pair(output_size) ctx.spatial_scale = spatial_scale ctx.sampling_ratio = sampling_ratio ctx.input_shape = input.size() output = _C.roi_align_3d_forward(input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio) return output @staticmethod @once_differentiable def backward(ctx, grad_output): (rois,) = ctx.saved_tensors output_size = ctx.output_size spatial_scale = ctx.spatial_scale sampling_ratio = ctx.sampling_ratio (bs, ch, l, h, w) = ctx.input_shape grad_input = _C.roi_align_3d_backward(grad_output, rois, spatial_scale, output_size[0], output_size[1], bs, ch, l, h, w, sampling_ratio) return (grad_input, None, None, None, None)
class ROIAlign3d(nn.Module): def __init__(self, output_size, spatial_scale, sampling_ratio): super(ROIAlign3d, self).__init__() self.output_size = output_size self.spatial_scale = spatial_scale self.sampling_ratio = sampling_ratio def forward(self, input, rois): return roi_align_3d(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio) def __repr__(self): tmpstr = (self.__class__.__name__ + '(') tmpstr += ('output_size=' + str(self.output_size)) tmpstr += (', spatial_scale=' + str(self.spatial_scale)) tmpstr += (', sampling_ratio=' + str(self.sampling_ratio)) tmpstr += ')' return tmpstr
class _ROIPool3d(Function): @staticmethod def forward(ctx, input, roi, output_size, spatial_scale): ctx.output_size = _pair(output_size) ctx.spatial_scale = spatial_scale ctx.input_shape = input.size() (output, argmax) = _C.roi_pool_3d_forward(input, roi, spatial_scale, output_size[0], output_size[1]) ctx.save_for_backward(input, roi, argmax) return output @staticmethod @once_differentiable def backward(ctx, grad_output): (input, rois, argmax) = ctx.saved_tensors output_size = ctx.output_size spatial_scale = ctx.spatial_scale (bs, ch, l, h, w) = ctx.input_shape grad_input = _C.roi_pool_3d_backward(grad_output, input, rois, argmax, spatial_scale, output_size[0], output_size[1], bs, ch, l, h, w) return (grad_input, None, None, None)
class ROIPool3d(nn.Module): def __init__(self, output_size, spatial_scale): super(ROIPool3d, self).__init__() self.output_size = output_size self.spatial_scale = spatial_scale def forward(self, input, rois): return roi_pool_3d(input, rois, self.output_size, self.spatial_scale) def __repr__(self): tmpstr = (self.__class__.__name__ + '(') tmpstr += ('output_size=' + str(self.output_size)) tmpstr += (', spatial_scale=' + str(self.spatial_scale)) tmpstr += ')' return tmpstr
class _SigmoidFocalLoss(Function): @staticmethod def forward(ctx, logits, targets, gamma, alpha): ctx.save_for_backward(logits, targets) ctx.gamma = gamma ctx.alpha = alpha losses = _C.sigmoid_focalloss_forward(logits, targets, gamma, alpha) return losses @staticmethod @once_differentiable def backward(ctx, d_loss): (logits, targets) = ctx.saved_tensors gamma = ctx.gamma alpha = ctx.alpha d_logits = _C.sigmoid_focalloss_backward(logits, targets, d_loss, gamma, alpha) return (d_logits, None, None, None)
def sigmoid_focal_loss(logits, targets, gamma, alpha, reduction='mean'): assert (reduction in ['none', 'mean', 'sum']), 'Unsupported reduction type "{}"'.format(reduction) logits = logits.float() targets = targets.float() ret = _SigmoidFocalLoss.apply(logits, targets, gamma, alpha) if (reduction != 'none'): ret = (torch.mean(ret) if (reduction == 'mean') else torch.sum(ret)) return ret
class SigmoidFocalLoss(nn.Module): def __init__(self, gamma, alpha, reduction='mean'): super(SigmoidFocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha self.reduction = reduction def forward(self, logits, targets): loss = sigmoid_focal_loss(logits, targets, self.gamma, self.alpha, self.reduction) return loss def __repr__(self): tmpstr = (self.__class__.__name__ + '(') tmpstr += ('gamma=' + str(self.gamma)) tmpstr += (', alpha=' + str(self.alpha)) tmpstr += ')' return tmpstr
class _SoftmaxFocalLoss(Function): @staticmethod def forward(ctx, logits, targets, gamma, alpha): ctx.gamma = gamma ctx.alpha = alpha (losses, P) = _C.softmax_focalloss_forward(logits, targets, gamma, alpha) ctx.save_for_backward(logits, targets, P) return losses @staticmethod @once_differentiable def backward(ctx, d_loss): (logits, targets, P) = ctx.saved_tensors gamma = ctx.gamma alpha = ctx.alpha d_logits = _C.softmax_focalloss_backward(logits, targets, P, d_loss, gamma, alpha) return (d_logits, None, None, None)
def softmax_focal_loss(logits, targets, gamma, alpha, reduction='mean'): assert (reduction in ['none', 'mean', 'sum']), 'Unsupported reduction type "{}"'.format(reduction) logits = logits.float() targets = targets.int() ret = _SoftmaxFocalLoss.apply(logits, targets, gamma, alpha) if (reduction != 'none'): ret = (torch.mean(ret) if (reduction == 'mean') else torch.sum(ret)) return ret
class SoftmaxFocalLoss(nn.Module): def __init__(self, gamma, alpha, reduction='mean'): super(SoftmaxFocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha self.reduction = reduction def forward(self, logits, targets): loss = softmax_focal_loss(logits, targets, self.gamma, self.alpha, self.reduction) return loss def __repr__(self): tmpstr = (self.__class__.__name__ + '(') tmpstr += ('gamma=' + str(self.gamma)) tmpstr += (', alpha=' + str(self.alpha)) tmpstr += ')' return tmpstr
@registry.BACKBONES.register('Slowfast-Resnet50') @registry.BACKBONES.register('BERT') @registry.BACKBONES.register('Slowfast-Resnet101') def build_slowfast_resnet_backbone(cfg): model = slowfast.SlowFast(cfg) return model
@registry.BACKBONES.register('I3D-Resnet50') @registry.BACKBONES.register('I3D-Resnet101') @registry.BACKBONES.register('I3D-Resnet50-Sparse') @registry.BACKBONES.register('I3D-Resnet101-Sparse') def build_i3d_resnet_backbone(cfg): model = i3d.I3D(cfg) return model
def build_backbone(cfg): assert (cfg.MODEL.BACKBONE.CONV_BODY in registry.BACKBONES), 'cfg.MODEL.BACKBONE.CONV_BODY: {} are not registered in registry'.format(cfg.MODEL.BACKBONE.CONV_BODY) return registry.BACKBONES[cfg.MODEL.BACKBONE.CONV_BODY](cfg)
def get_model_cfg(cfg): backbone_strs = cfg.MODEL.BACKBONE.CONV_BODY.split('-')[1:] error_msg = 'Model backbone {} is not supported.'.format(cfg.MODEL.BACKBONE.CONV_BODY) use_temp_convs_1 = [2] temp_strides_1 = [2] max_pool_stride_1 = 2 use_temp_convs_2 = [1, 1, 1] temp_strides_2 = [1, 1, 1] max_pool_stride_2 = 2 use_temp_convs_3 = [1, 0, 1, 0] temp_strides_3 = [1, 1, 1, 1] use_temp_convs_5 = [0, 1, 0] temp_strides_5 = [1, 1, 1] avg_pool_stride = int((cfg.INPUT.FRAME_NUM / 8)) if (backbone_strs[0] == 'Resnet50'): block_config = (3, 4, 6, 3) use_temp_convs_4 = [1, 0, 1, 0, 1, 0] temp_strides_4 = [1, 1, 1, 1, 1, 1] elif (backbone_strs[0] == 'Resnet101'): block_config = (3, 4, 23, 3) use_temp_convs_4 = [] for i in range(23): if ((i % 2) == 0): use_temp_convs_4.append(1) else: use_temp_convs_4.append(0) temp_strides_4 = ([1] * 23) else: raise KeyError(error_msg) if (len(backbone_strs) > 1): if ((len(backbone_strs) == 2) and (backbone_strs[1] == 'Sparse')): temp_strides_1 = [1] max_pool_stride_1 = 1 avg_pool_stride = int((cfg.INPUT.FRAME_NUM / 2)) else: raise KeyError(error_msg) use_temp_convs_set = [use_temp_convs_1, use_temp_convs_2, use_temp_convs_3, use_temp_convs_4, use_temp_convs_5] temp_strides_set = [temp_strides_1, temp_strides_2, temp_strides_3, temp_strides_4, temp_strides_5] pool_strides_set = [max_pool_stride_1, max_pool_stride_2, avg_pool_stride] return (block_config, use_temp_convs_set, temp_strides_set, pool_strides_set)
class I3D(nn.Module): def __init__(self, cfg): super(I3D, self).__init__() self.cfg = cfg.clone() (block_config, use_temp_convs_set, temp_strides_set, pool_strides_set) = get_model_cfg(cfg) conv3_nonlocal = cfg.MODEL.BACKBONE.I3D.CONV3_NONLOCAL conv4_nonlocal = cfg.MODEL.BACKBONE.I3D.CONV4_NONLOCAL dim_inner = 64 conv_dims = [64, 256, 512, 1024, 2048] self.dim_out = 2304 (n1, n2, n3, n4) = block_config layer_mod = 2 conv3_nl_mod = layer_mod conv4_nl_mod = layer_mod if (not conv3_nonlocal): conv3_nl_mod = 1000 if (not conv4_nonlocal): conv4_nl_mod = 1000 self.c2_mapping = None data_dim = 3 self.conv1 = nn.Conv3d(data_dim, conv_dims[0], ((1 + (use_temp_convs_set[0][0] * 2)), 7, 7), stride=(temp_strides_set[0][0], 2, 2), padding=(use_temp_convs_set[0][0], 3, 3), bias=False) nn.init.kaiming_normal_(self.conv1.weight) if cfg.MODEL.BACKBONE.FROZEN_BN: self.bn1 = FrozenBatchNorm3d(conv_dims[0], eps=cfg.MODEL.BACKBONE.BN_EPSILON) nn.init.constant_(self.bn1.weight, 1.0) nn.init.constant_(self.bn1.bias, 0.0) else: self.bn1 = nn.BatchNorm3d(conv_dims[0], eps=cfg.MODEL.BACKBONE.BN_EPSILON, momentum=cfg.MODEL.BACKBONE.BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.maxpool1 = nn.MaxPool3d((pool_strides_set[0], 3, 3), stride=(pool_strides_set[0], 2, 2)) self.res_nl1 = ResNLBlock(cfg, conv_dims[0], conv_dims[1], stride=1, num_blocks=n1, dim_inner=dim_inner, use_temp_convs=use_temp_convs_set[1], temp_strides=temp_strides_set[1]) self.maxpool2 = nn.MaxPool3d((pool_strides_set[1], 1, 1), stride=(pool_strides_set[1], 1, 1)) self.res_nl2 = ResNLBlock(cfg, conv_dims[1], conv_dims[2], stride=2, num_blocks=n2, dim_inner=(dim_inner * 2), use_temp_convs=use_temp_convs_set[2], temp_strides=temp_strides_set[2], nonlocal_mod=conv3_nl_mod, group_nonlocal=cfg.MODEL.BACKBONE.I3D.CONV3_GROUP_NL) self.res_nl3 = ResNLBlock(cfg, conv_dims[2], conv_dims[3], stride=2, num_blocks=n3, dim_inner=(dim_inner * 4), use_temp_convs=use_temp_convs_set[3], temp_strides=temp_strides_set[3], nonlocal_mod=conv4_nl_mod) self.res_nl4 = ResNLBlock(cfg, conv_dims[3], conv_dims[4], stride=1, num_blocks=n4, dim_inner=(dim_inner * 8), use_temp_convs=use_temp_convs_set[4], temp_strides=temp_strides_set[4], dilation=2) def forward(self, _, x): out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.maxpool1(out) out = self.res_nl1(out) out = self.maxpool2(out) out = self.res_nl2(out) out = self.res_nl3(out) out = self.res_nl4(out) return (None, out) def c2_weight_mapping(self): if (self.c2_mapping is None): weight_map = {'conv1.weight': 'conv1_w', 'bn1.weight': 'res_conv1_bn_s', 'bn1.bias': 'res_conv1_bn_b', 'bn1.running_mean': 'res_conv1_bn_rm', 'bn1.running_var': 'res_conv1_bn_riv'} for i in range(1, 5): name = 'res_nl{}'.format(i) child_map = getattr(self, name).c2_weight_mapping() for (key, val) in child_map.items(): new_key = ((name + '.') + key) weight_map[new_key] = val.format((i + 1)) self.c2_mapping = weight_map return self.c2_mapping
def get_slow_model_cfg(cfg): backbone_strs = cfg.MODEL.BACKBONE.CONV_BODY.split('-')[1:] error_msg = 'Model backbone {} is not supported.'.format(cfg.MODEL.BACKBONE.CONV_BODY) use_temp_convs_1 = [0] temp_strides_1 = [1] max_pool_stride_1 = 1 use_temp_convs_2 = [0, 0, 0] temp_strides_2 = [1, 1, 1] use_temp_convs_3 = [0, 0, 0, 0] temp_strides_3 = [1, 1, 1, 1] use_temp_convs_5 = [1, 1, 1] temp_strides_5 = [1, 1, 1] slow_stride = cfg.INPUT.TAU avg_pool_stride = int((cfg.INPUT.FRAME_NUM / slow_stride)) if (backbone_strs[0] == 'Resnet50'): block_config = (3, 4, 6, 3) use_temp_convs_4 = [1, 1, 1, 1, 1, 1] temp_strides_4 = [1, 1, 1, 1, 1, 1] elif (backbone_strs[0] == 'Resnet101'): block_config = (3, 4, 23, 3) use_temp_convs_4 = ([1] * 23) temp_strides_4 = ([1] * 23) else: raise KeyError(error_msg) if (len(backbone_strs) > 1): raise KeyError(error_msg) use_temp_convs_set = [use_temp_convs_1, use_temp_convs_2, use_temp_convs_3, use_temp_convs_4, use_temp_convs_5] temp_strides_set = [temp_strides_1, temp_strides_2, temp_strides_3, temp_strides_4, temp_strides_5] pool_strides_set = [max_pool_stride_1, avg_pool_stride] return (block_config, use_temp_convs_set, temp_strides_set, pool_strides_set)
def get_fast_model_cfg(cfg): backbone_strs = cfg.MODEL.BACKBONE.CONV_BODY.split('-')[1:] error_msg = 'Model backbone {} is not supported.'.format(cfg.MODEL.BACKBONE.CONV_BODY) use_temp_convs_1 = [2] temp_strides_1 = [1] max_pool_stride_1 = 1 use_temp_convs_2 = [1, 1, 1] temp_strides_2 = [1, 1, 1] use_temp_convs_3 = [1, 1, 1, 1] temp_strides_3 = [1, 1, 1, 1] use_temp_convs_5 = [1, 1, 1] temp_strides_5 = [1, 1, 1] fast_stride = (cfg.INPUT.TAU // cfg.INPUT.ALPHA) avg_pool_stride = int((cfg.INPUT.FRAME_NUM / fast_stride)) if (backbone_strs[0] == 'Resnet50'): block_config = (3, 4, 6, 3) use_temp_convs_4 = [1, 1, 1, 1, 1, 1] temp_strides_4 = [1, 1, 1, 1, 1, 1] elif (backbone_strs[0] == 'Resnet101'): block_config = (3, 4, 23, 3) use_temp_convs_4 = ([1] * 23) temp_strides_4 = ([1] * 23) else: raise KeyError(error_msg) if (len(backbone_strs) > 1): raise KeyError(error_msg) use_temp_convs_set = [use_temp_convs_1, use_temp_convs_2, use_temp_convs_3, use_temp_convs_4, use_temp_convs_5] temp_strides_set = [temp_strides_1, temp_strides_2, temp_strides_3, temp_strides_4, temp_strides_5] pool_strides_set = [max_pool_stride_1, avg_pool_stride] return (block_config, use_temp_convs_set, temp_strides_set, pool_strides_set)
class LateralBlock(nn.Module): def __init__(self, conv_dim, alpha): super(LateralBlock, self).__init__() self.conv = nn.Conv3d(conv_dim, (conv_dim * 2), kernel_size=(5, 1, 1), stride=(alpha, 1, 1), padding=(2, 0, 0), bias=True) nn.init.kaiming_normal_(self.conv.weight) nn.init.constant_(self.conv.bias, 0.0) def forward(self, x): out = self.conv(x) return out
class FastPath(nn.Module): def __init__(self, cfg): super(FastPath, self).__init__() self.cfg = cfg.clone() (block_config, use_temp_convs_set, temp_strides_set, pool_strides_set) = get_fast_model_cfg(cfg) conv3_nonlocal = cfg.MODEL.BACKBONE.SLOWFAST.FAST.CONV3_NONLOCAL conv4_nonlocal = cfg.MODEL.BACKBONE.SLOWFAST.FAST.CONV4_NONLOCAL dim_inner = 8 conv_dims = [8, 32, 64, 128, 256] self.dim_out = conv_dims[(- 1)] (n1, n2, n3, n4) = block_config layer_mod = 2 conv3_nl_mod = layer_mod conv4_nl_mod = layer_mod if (not conv3_nonlocal): conv3_nl_mod = 1000 if (not conv4_nonlocal): conv4_nl_mod = 1000 self.c2_mapping = None self.conv1 = nn.Conv3d(3, conv_dims[0], ((1 + (use_temp_convs_set[0][0] * 2)), 7, 7), stride=(temp_strides_set[0][0], 2, 2), padding=(use_temp_convs_set[0][0], 3, 3), bias=False) nn.init.kaiming_normal_(self.conv1.weight) if cfg.MODEL.BACKBONE.FROZEN_BN: self.bn1 = FrozenBatchNorm3d(conv_dims[0], eps=cfg.MODEL.BACKBONE.BN_EPSILON) nn.init.constant_(self.bn1.weight, 1.0) nn.init.constant_(self.bn1.bias, 0.0) else: self.bn1 = nn.BatchNorm3d(conv_dims[0], eps=cfg.MODEL.BACKBONE.BN_EPSILON, momentum=cfg.MODEL.BACKBONE.BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.maxpool1 = nn.MaxPool3d((pool_strides_set[0], 3, 3), stride=(pool_strides_set[0], 2, 2)) self.res_nl1 = ResNLBlock(cfg, conv_dims[0], conv_dims[1], stride=1, num_blocks=n1, dim_inner=dim_inner, use_temp_convs=use_temp_convs_set[1], temp_strides=temp_strides_set[1]) self.res_nl2 = ResNLBlock(cfg, conv_dims[1], conv_dims[2], stride=2, num_blocks=n2, dim_inner=(dim_inner * 2), use_temp_convs=use_temp_convs_set[2], temp_strides=temp_strides_set[2], nonlocal_mod=conv3_nl_mod, group_nonlocal=cfg.MODEL.BACKBONE.SLOWFAST.FAST.CONV3_GROUP_NL) self.res_nl3 = ResNLBlock(cfg, conv_dims[2], conv_dims[3], stride=2, num_blocks=n3, dim_inner=(dim_inner * 4), use_temp_convs=use_temp_convs_set[3], temp_strides=temp_strides_set[3], nonlocal_mod=conv4_nl_mod) self.res_nl4 = ResNLBlock(cfg, conv_dims[3], conv_dims[4], stride=1, num_blocks=n4, dim_inner=(dim_inner * 8), use_temp_convs=use_temp_convs_set[4], temp_strides=temp_strides_set[4], dilation=2) if (cfg.MODEL.BACKBONE.SLOWFAST.LATERAL == 'tconv'): self._tconv(conv_dims) def _tconv(self, conv_dims): alpha = self.cfg.INPUT.ALPHA self.Tconv1 = LateralBlock(conv_dims[0], alpha) self.Tconv2 = LateralBlock(conv_dims[1], alpha) self.Tconv3 = LateralBlock(conv_dims[2], alpha) self.Tconv4 = LateralBlock(conv_dims[3], alpha) def forward(self, x): out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.maxpool1(out) tconv1 = self.Tconv1(out) out = self.res_nl1(out) tconv2 = self.Tconv2(out) out = self.res_nl2(out) tconv3 = self.Tconv3(out) out = self.res_nl3(out) tconv4 = self.Tconv4(out) out = self.res_nl4(out) return (out, [tconv1, tconv2, tconv3, tconv4])
class SlowPath(nn.Module): def __init__(self, cfg): super(SlowPath, self).__init__() self.cfg = cfg.clone() (block_config, use_temp_convs_set, temp_strides_set, pool_strides_set) = get_slow_model_cfg(cfg) conv3_nonlocal = cfg.MODEL.BACKBONE.SLOWFAST.SLOW.CONV3_NONLOCAL conv4_nonlocal = cfg.MODEL.BACKBONE.SLOWFAST.SLOW.CONV4_NONLOCAL dim_inner = 64 conv_dims = [64, 256, 512, 1024, 2048] self.dim_out = conv_dims[(- 1)] (n1, n2, n3, n4) = block_config layer_mod = 2 conv3_nl_mod = layer_mod conv4_nl_mod = layer_mod if (not conv3_nonlocal): conv3_nl_mod = 1000 if (not conv4_nonlocal): conv4_nl_mod = 1000 self.c2_mapping = None self.conv1 = nn.Conv3d(3, conv_dims[0], ((1 + (use_temp_convs_set[0][0] * 2)), 7, 7), stride=(temp_strides_set[0][0], 2, 2), padding=(use_temp_convs_set[0][0], 3, 3), bias=False) nn.init.kaiming_normal_(self.conv1.weight) if cfg.MODEL.BACKBONE.FROZEN_BN: self.bn1 = FrozenBatchNorm3d(conv_dims[0], eps=cfg.MODEL.BACKBONE.BN_EPSILON) nn.init.constant_(self.bn1.weight, 1.0) nn.init.constant_(self.bn1.bias, 0.0) else: self.bn1 = nn.BatchNorm3d(conv_dims[0], eps=cfg.MODEL.BACKBONE.BN_EPSILON, momentum=cfg.MODEL.BACKBONE.BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.maxpool1 = nn.MaxPool3d((pool_strides_set[0], 3, 3), stride=(pool_strides_set[0], 2, 2)) self.res_nl1 = ResNLBlock(cfg, conv_dims[0], conv_dims[1], stride=1, num_blocks=n1, dim_inner=dim_inner, use_temp_convs=use_temp_convs_set[1], temp_strides=temp_strides_set[1], lateral=cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE) self.res_nl2 = ResNLBlock(cfg, conv_dims[1], conv_dims[2], stride=2, num_blocks=n2, dim_inner=(dim_inner * 2), use_temp_convs=use_temp_convs_set[2], temp_strides=temp_strides_set[2], nonlocal_mod=conv3_nl_mod, group_nonlocal=cfg.MODEL.BACKBONE.SLOWFAST.SLOW.CONV3_GROUP_NL, lateral=cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE) self.res_nl3 = ResNLBlock(cfg, conv_dims[2], conv_dims[3], stride=2, num_blocks=n3, dim_inner=(dim_inner * 4), use_temp_convs=use_temp_convs_set[3], temp_strides=temp_strides_set[3], nonlocal_mod=conv4_nl_mod, lateral=cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE) self.res_nl4 = ResNLBlock(cfg, conv_dims[3], conv_dims[4], stride=1, num_blocks=n4, dim_inner=(dim_inner * 8), use_temp_convs=use_temp_convs_set[4], temp_strides=temp_strides_set[4], lateral=cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE, dilation=2) def forward(self, x, lateral_connection=None): out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.maxpool1(out) if lateral_connection: out = torch.cat([out, lateral_connection[0]], dim=1) out = self.res_nl1(out) if lateral_connection: out = torch.cat([out, lateral_connection[1]], dim=1) out = self.res_nl2(out) if lateral_connection: out = torch.cat([out, lateral_connection[2]], dim=1) out = self.res_nl3(out) if lateral_connection: out = torch.cat([out, lateral_connection[3]], dim=1) out = self.res_nl4(out) return out
class SlowFast(nn.Module): def __init__(self, cfg): super(SlowFast, self).__init__() self.cfg = cfg.clone() if cfg.MODEL.BACKBONE.SLOWFAST.SLOW.ACTIVE: self.slow = SlowPath(cfg) if cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE: self.fast = FastPath(cfg) if (cfg.MODEL.BACKBONE.SLOWFAST.SLOW.ACTIVE and cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE): self.dim_out = (self.slow.dim_out + self.fast.dim_out) elif cfg.MODEL.BACKBONE.SLOWFAST.SLOW.ACTIVE: self.dim_out = self.slow.dim_out elif cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE: self.dim_out = self.fast.dim_out def forward(self, slow_x, fast_x): tconv = None cfg = self.cfg slowout = None fastout = None if cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE: (fastout, tconv) = self.fast(fast_x) if cfg.MODEL.BACKBONE.SLOWFAST.SLOW.ACTIVE: slowout = self.slow(slow_x, tconv) return (slowout, fastout)
class Conv3dBN(nn.Module): def __init__(self, cfg, dim_in, dim_out, kernels, stride, padding, dilation=1, init_weight=None): super(Conv3dBN, self).__init__() self.conv = nn.Conv3d(dim_in, dim_out, kernels, stride=stride, padding=padding, dilation=dilation, bias=False) nn.init.kaiming_normal_(self.conv.weight) if cfg.MODEL.BACKBONE.FROZEN_BN: self.bn = FrozenBatchNorm3d(dim_out, eps=cfg.MODEL.BACKBONE.BN_EPSILON) nn.init.constant_(self.bn.weight, 1.0) nn.init.constant_(self.bn.bias, 0.0) else: self.bn = nn.BatchNorm3d(dim_out, eps=cfg.MODEL.BACKBONE.BN_EPSILON, momentum=cfg.MODEL.BACKBONE.BN_MOMENTUM) if (init_weight is not None): nn.init.constant_(self.bn.weight, init_weight) def forward(self, x): out = self.conv(x) out = self.bn(out) return out def c2_weight_mapping(self): return {'conv.weight': 'w', 'bn.weight': 'bn_s', 'bn.bias': 'bn_b', 'bn.running_mean': 'bn_rm', 'bn.running_var': 'bn_riv'}
class Bottleneck(nn.Module): def __init__(self, cfg, dim_in, dim_out, dim_inner, stride, dilation=1, use_temp_conv=1, temp_stride=1): super(Bottleneck, self).__init__() self.conv1 = Conv3dBN(cfg, dim_in, dim_inner, ((1 + (use_temp_conv * 2)), 1, 1), stride=(temp_stride, 1, 1), padding=(use_temp_conv, 0, 0)) self.conv2 = Conv3dBN(cfg, dim_inner, dim_inner, (1, 3, 3), stride=(1, stride, stride), dilation=(1, dilation, dilation), padding=(0, dilation, dilation)) self.conv3 = Conv3dBN(cfg, dim_inner, dim_out, (1, 1, 1), stride=(1, 1, 1), padding=0, init_weight=cfg.MODEL.BACKBONE.BN_INIT_GAMMA) self.relu = nn.ReLU(inplace=True) def forward(self, x): out = self.conv1(x) out = self.relu(out) out = self.conv2(out) out = self.relu(out) out = self.conv3(out) return out def c2_weight_mapping(self): weight_map = {} for i in range(1, 4): name = 'conv{}'.format(i) child_map = getattr(self, name).c2_weight_mapping() for (key, val) in child_map.items(): new_key = ((name + '.') + key) prefix = 'branch2{}_'.format(chr(((ord('a') + i) - 1))) weight_map[new_key] = (prefix + val) return weight_map
class ResBlock(nn.Module): def __init__(self, cfg, dim_in, dim_out, dim_inner, stride, dilation=1, use_temp_conv=0, temp_stride=1, need_shortcut=False): super(ResBlock, self).__init__() self.btnk = Bottleneck(cfg, dim_in, dim_out, dim_inner=dim_inner, stride=stride, dilation=dilation, use_temp_conv=use_temp_conv, temp_stride=temp_stride) if (not need_shortcut): self.shortcut = None else: self.shortcut = Conv3dBN(cfg, dim_in, dim_out, (1, 1, 1), stride=(temp_stride, stride, stride), padding=0) self.relu = nn.ReLU(inplace=True) def forward(self, x): tr = self.btnk(x) if (self.shortcut is None): sc = x else: sc = self.shortcut(x) return self.relu((tr + sc)) def c2_weight_mapping(self): weight_map = {} for (name, m_child) in self.named_children(): if m_child.state_dict(): child_map = m_child.c2_weight_mapping() for (key, val) in child_map.items(): new_key = ((name + '.') + key) if isinstance(m_child, Conv3dBN): prefix = 'branch1_' else: prefix = '' weight_map[new_key] = (prefix + val) return weight_map
class ResNLBlock(nn.Module): def __init__(self, cfg, dim_in, dim_out, stride, num_blocks, dim_inner, use_temp_convs, temp_strides, dilation=1, nonlocal_mod=1000, group_nonlocal=False, lateral=False): super(ResNLBlock, self).__init__() self.blocks = [] for idx in range(num_blocks): block_name = 'res_{}'.format(idx) block_stride = (stride if (idx == 0) else 1) block_dilation = dilation dim_in0 = ((dim_in + int(((dim_in * cfg.MODEL.BACKBONE.SLOWFAST.BETA) * 2))) if (lateral and (idx == 0) and (cfg.MODEL.BACKBONE.SLOWFAST.LATERAL != 'ttoc_sum')) else dim_in) need_shortcut = ((not ((dim_in0 == dim_out) and (temp_strides[idx] == 1) and (block_stride == 1))) or ((idx == 0) and (dilation != 1))) res_module = ResBlock(cfg, dim_in0, dim_out, dim_inner=dim_inner, stride=block_stride, dilation=block_dilation, use_temp_conv=use_temp_convs[idx], temp_stride=temp_strides[idx], need_shortcut=need_shortcut) self.add_module(block_name, res_module) self.blocks.append(block_name) dim_in = dim_out if ((idx % nonlocal_mod) == (nonlocal_mod - 1)): nl_block_name = 'nonlocal_{}'.format(idx) nl_module = NLBlock(dim_in, dim_in, int((dim_in / 2)), cfg.MODEL.NONLOCAL, group=group_nonlocal) self.add_module(nl_block_name, nl_module) self.blocks.append(nl_block_name) def forward(self, x): for layer_name in self.blocks: x = getattr(self, layer_name)(x) return x def c2_weight_mapping(self): weight_map = {} for (name, m_child) in self.named_children(): idx = name.split('_')[(- 1)] if m_child.state_dict(): child_map = m_child.c2_weight_mapping() for (key, val) in child_map.items(): new_key = ((name + '.') + key) if isinstance(m_child, NLBlock): prefix = ('nonlocal_conv{}_' + '{}_'.format(idx)) else: prefix = ('res{}_' + '{}_'.format(idx)) weight_map[new_key] = (prefix + val) return weight_map
class ActionDetector(nn.Module): def __init__(self, cfg): super(ActionDetector, self).__init__() self.backbone = build_backbone(cfg) self.roi_heads = build_3d_roi_heads(cfg, self.backbone.dim_out) def forward(self, slow_video, fast_video, boxes, objects=None, keypoints=None, extras={}, part_forward=(- 1)): if (part_forward == 1): slow_features = fast_features = None else: (slow_features, fast_features) = self.backbone(slow_video, fast_video) (result, detector_losses, loss_weight, detector_metrics) = self.roi_heads(slow_features, fast_features, boxes, objects, keypoints, extras, part_forward) if self.training: return (detector_losses, loss_weight, detector_metrics, result) return result def c2_weight_mapping(self): if (not hasattr(self, 'c2_mapping')): weight_map = {} for (name, m_child) in self.named_children(): if (m_child.state_dict() and hasattr(m_child, 'c2_weight_mapping')): child_map = m_child.c2_weight_mapping() for (key, val) in child_map.items(): new_key = ((name + '.') + key) weight_map[new_key] = val self.c2_mapping = weight_map return self.c2_mapping
def build_detection_model(cfg): return ActionDetector(cfg)
class NLBlock(nn.Module): def __init__(self, dim_in, dim_out, dim_inner, nl_cfg, group=False): super(NLBlock, self).__init__() self.nl_cfg = nl_cfg.clone() self.group = group self.group_size = 4 init_std = nl_cfg.CONV_INIT_STD bias = (not nl_cfg.NO_BIAS) pool_stride = 2 self.scale_value = (dim_inner ** (- 0.5)) self.dim_inner = dim_inner self.theta = nn.Conv3d(dim_in, dim_inner, 1, bias=bias) nn.init.normal_(self.theta.weight, std=init_std) if bias: nn.init.constant_(self.theta.bias, 0) if nl_cfg.USE_MAXPOOL: self.maxpool = nn.MaxPool3d((1, pool_stride, pool_stride), stride=(1, pool_stride, pool_stride)) self.phi = nn.Conv3d(dim_in, dim_inner, 1, bias=bias) nn.init.normal_(self.phi.weight, std=init_std) if bias: nn.init.constant_(self.phi.bias, 0) self.g = nn.Conv3d(dim_in, dim_inner, 1, bias=bias) nn.init.normal_(self.g.weight, std=init_std) if bias: nn.init.constant_(self.g.bias, 0) if nl_cfg.USE_SOFTMAX: self.softmax = nn.Softmax(dim=2) self.out = nn.Conv3d(dim_inner, dim_out, 1, bias=bias) if nl_cfg.USE_ZERO_INIT_CONV: nn.init.constant_(self.out.weight, 0) else: nn.init.normal_(self.out.weight, std=init_std) if bias: nn.init.constant_(self.out.bias, 0) if nl_cfg.USE_BN: if nl_cfg.FROZEN_BN: self.bn = FrozenBatchNorm3d(dim_out, eps=nl_cfg.BN_EPSILON) else: self.bn = nn.BatchNorm3d(dim_out, eps=nl_cfg.BN_EPSILON, momentum=nl_cfg.BN_MOMENTUM) nn.init.constant_(self.bn.weight, nl_cfg.BN_INIT_GAMMA) def forward(self, x): if (x.dim() != 5): raise ValueError('expected 4D or 5D input (got {}D input)'.format(x.dim())) if self.group: x = x.transpose(1, 2) sz_before_group = list(x.shape) sz_after_group = sz_before_group.copy() sz_after_group[0] = (- 1) sz_after_group[1] = self.group_size x = x.contiguous().view(*sz_after_group) x = x.transpose(1, 2) batch_size = x.shape[0] theta = self.theta(x) if self.nl_cfg.USE_MAXPOOL: max_pool = self.maxpool(x) else: max_pool = x phi = self.phi(max_pool) g = self.g(max_pool) org_size = theta.size() mat_size = [batch_size, self.dim_inner, (- 1)] theta = theta.view(*mat_size) phi = phi.view(*mat_size) g = g.view(*mat_size) theta_phi = torch.bmm(theta.transpose(1, 2), phi) if self.nl_cfg.USE_SOFTMAX: if self.nl_cfg.USE_SCALE: theta_phi_sc = (theta_phi * self.scale_value) else: theta_phi_sc = theta_phi p = self.softmax(theta_phi_sc) else: p = (theta_phi / theta_phi.shape[(- 1)]) t = torch.bmm(g, p.transpose(1, 2)) t = t.view(org_size) out = self.out(t) if self.nl_cfg.USE_BN: out = self.bn(out) out = (out + x) if self.group: out = out.transpose(1, 2) out = out.contiguous().view(*sz_before_group) out = out.transpose(1, 2) return out def c2_weight_mapping(self): weight_map = {} for (name, m_child) in self.named_children(): if m_child.state_dict(): if isinstance(m_child, (nn.BatchNorm3d, FrozenBatchNorm3d)): weight_map[(name + '.weight')] = '{}_s'.format(name) weight_map[(name + '.running_mean')] = '{}_rm'.format(name) weight_map[(name + '.running_var')] = '{}_riv'.format(name) elif isinstance(m_child, nn.GroupNorm): weight_map[(name + '.weight')] = '{}_s'.format(name) else: weight_map[(name + '.weight')] = '{}_w'.format(name) weight_map[(name + '.bias')] = '{}_b'.format(name) return weight_map
class Pooler3d(nn.Module): def __init__(self, output_size, scale, sampling_ratio=None, pooler_type='align3d'): super(Pooler3d, self).__init__() if (pooler_type == 'align3d'): assert (sampling_ratio is not None), 'Sampling ratio should be specified for 3d roi align.' self.pooler = ROIAlign3d(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) elif (pooler_type == 'pooling3d'): self.pooler = ROIPool3d(output_size, spatial_scale=scale) self.output_size = output_size def convert_to_roi_format(self, boxes, dtype, device): bbox_list = list() ids_list = list() for (i, b) in enumerate(boxes): if (not b): bbox_list.append(torch.zeros((0, 4), dtype=dtype, device=device)) ids_list.append(torch.zeros((0, 1), dtype=dtype, device=device)) else: b.bbox = b.bbox.to('cuda') bbox_list.append(b.bbox) ids_list.append(torch.full((len(b), 1), i, dtype=dtype, device=device)) concat_boxes = torch.cat(bbox_list, dim=0) ids = torch.cat(ids_list, dim=0) rois = torch.cat([ids, concat_boxes], dim=1) return rois def forward(self, x, boxes): rois = self.convert_to_roi_format(boxes, x.dtype, x.device) return self.pooler(x, rois)
def make_3d_pooler(head_cfg): resolution = head_cfg.POOLER_RESOLUTION scale = head_cfg.POOLER_SCALE sampling_ratio = head_cfg.POOLER_SAMPLING_RATIO pooler_type = head_cfg.POOLER_TYPE pooler = Pooler3d(output_size=(resolution, resolution), scale=scale, sampling_ratio=sampling_ratio, pooler_type=pooler_type) return pooler