code stringlengths 17 6.64M |
|---|
class Fnode(nn.Module):
' A simple wrapper used in place of nn.Sequential for torchscript typing\n Handles input type List[Tensor] -> output type Tensor\n '
def __init__(self, combine: nn.Module, after_combine: nn.Module):
super(Fnode, self).__init__()
self.combine = combine
self.after_combine = after_combine
def forward(self, x: List[torch.Tensor]) -> torch.Tensor:
return self.after_combine(self.combine(x))
|
class BiFpnLayer(nn.Module):
def __init__(self, feature_info, feat_sizes, fpn_config, fpn_channels, num_levels=5, pad_type='', downsample=None, upsample=None, norm_layer=nn.BatchNorm2d, act_layer=_ACT_LAYER, apply_resample_bn=False, pre_act=True, separable_conv=True, redundant_bias=False):
super(BiFpnLayer, self).__init__()
self.num_levels = num_levels
fpn_feature_info = (feature_info + [dict(num_chs=fpn_channels, size=feat_sizes[fc['feat_level']]) for fc in fpn_config.nodes])
self.fnode = nn.ModuleList()
for (i, fnode_cfg) in enumerate(fpn_config.nodes):
logging.debug('fnode {} : {}'.format(i, fnode_cfg))
combine = FpnCombine(fpn_feature_info, fpn_channels, tuple(fnode_cfg['inputs_offsets']), output_size=feat_sizes[fnode_cfg['feat_level']], pad_type=pad_type, downsample=downsample, upsample=upsample, norm_layer=norm_layer, apply_resample_bn=apply_resample_bn, redundant_bias=redundant_bias, weight_method=fnode_cfg['weight_method'])
after_combine = nn.Sequential()
conv_kwargs = dict(in_channels=fpn_channels, out_channels=fpn_channels, kernel_size=3, padding=pad_type, bias=False, norm_layer=norm_layer, act_layer=act_layer)
if pre_act:
conv_kwargs['bias'] = redundant_bias
conv_kwargs['act_layer'] = None
after_combine.add_module('act', act_layer(inplace=True))
after_combine.add_module('conv', (SeparableConv2d(**conv_kwargs) if separable_conv else ConvBnAct2d(**conv_kwargs)))
self.fnode.append(Fnode(combine=combine, after_combine=after_combine))
self.feature_info = fpn_feature_info[(- num_levels):]
def forward(self, x: List[torch.Tensor]):
for fn in self.fnode:
x.append(fn(x))
return x[(- self.num_levels):]
|
class BiFpn(nn.Module):
def __init__(self, config, feature_info):
super(BiFpn, self).__init__()
self.num_levels = config.num_levels
norm_layer = (config.norm_layer or nn.BatchNorm2d)
if config.norm_kwargs:
norm_layer = partial(norm_layer, **config.norm_kwargs)
act_layer = (get_act_layer(config.act_type) or _ACT_LAYER)
fpn_config = (config.fpn_config or get_fpn_config(config.fpn_name, min_level=config.min_level, max_level=config.max_level))
feat_sizes = get_feat_sizes(config.image_size, max_level=config.max_level)
prev_feat_size = feat_sizes[config.min_level]
self.resample = nn.ModuleDict()
for level in range(config.num_levels):
feat_size = feat_sizes[(level + config.min_level)]
if (level < len(feature_info)):
in_chs = feature_info[level]['num_chs']
feature_info[level]['size'] = feat_size
else:
self.resample[str(level)] = ResampleFeatureMap(in_channels=in_chs, out_channels=config.fpn_channels, input_size=prev_feat_size, output_size=feat_size, pad_type=config.pad_type, downsample=config.downsample_type, upsample=config.upsample_type, norm_layer=norm_layer, apply_bn=config.apply_resample_bn, redundant_bias=config.redundant_bias)
in_chs = config.fpn_channels
feature_info.append(dict(num_chs=in_chs, size=feat_size))
prev_feat_size = feat_size
self.cell = SequentialList()
for rep in range(config.fpn_cell_repeats):
logging.debug('building cell {}'.format(rep))
fpn_layer = BiFpnLayer(feature_info=feature_info, feat_sizes=feat_sizes, fpn_config=fpn_config, fpn_channels=config.fpn_channels, num_levels=config.num_levels, pad_type=config.pad_type, downsample=config.downsample_type, upsample=config.upsample_type, norm_layer=norm_layer, act_layer=act_layer, separable_conv=config.separable_conv, apply_resample_bn=config.apply_resample_bn, pre_act=(not config.conv_bn_relu_pattern), redundant_bias=config.redundant_bias)
self.cell.add_module(str(rep), fpn_layer)
feature_info = fpn_layer.feature_info
def forward(self, x: List[torch.Tensor]):
for resample in self.resample.values():
x.append(resample(x[(- 1)]))
x = self.cell(x)
return x
|
class HeadNet(nn.Module):
def __init__(self, config, num_outputs):
super(HeadNet, self).__init__()
self.num_levels = config.num_levels
self.bn_level_first = getattr(config, 'head_bn_level_first', False)
norm_layer = (config.norm_layer or nn.BatchNorm2d)
if config.norm_kwargs:
norm_layer = partial(norm_layer, **config.norm_kwargs)
act_type = (config.head_act_type if getattr(config, 'head_act_type', None) else config.act_type)
act_layer = (get_act_layer(act_type) or _ACT_LAYER)
conv_fn = (SeparableConv2d if config.separable_conv else ConvBnAct2d)
conv_kwargs = dict(in_channels=config.fpn_channels, out_channels=config.fpn_channels, kernel_size=3, padding=config.pad_type, bias=config.redundant_bias, act_layer=None, norm_layer=None)
self.conv_rep = nn.ModuleList([conv_fn(**conv_kwargs) for _ in range(config.box_class_repeats)])
self.bn_rep = nn.ModuleList()
if self.bn_level_first:
for _ in range(self.num_levels):
self.bn_rep.append(nn.ModuleList([norm_layer(config.fpn_channels) for _ in range(config.box_class_repeats)]))
else:
for _ in range(config.box_class_repeats):
self.bn_rep.append(nn.ModuleList([nn.Sequential(OrderedDict([('bn', norm_layer(config.fpn_channels))])) for _ in range(self.num_levels)]))
self.act = act_layer(inplace=True)
num_anchors = (len(config.aspect_ratios) * config.num_scales)
predict_kwargs = dict(in_channels=config.fpn_channels, out_channels=(num_outputs * num_anchors), kernel_size=3, padding=config.pad_type, bias=True, norm_layer=None, act_layer=None)
self.predict = conv_fn(**predict_kwargs)
@torch.jit.ignore()
def toggle_bn_level_first(self):
' Toggle the batchnorm layers between feature level first vs repeat first access pattern\n Limitations in torchscript require feature levels to be iterated over first.\n\n This function can be used to allow loading weights in the original order, and then toggle before\n jit scripting the model.\n '
with torch.no_grad():
new_bn_rep = nn.ModuleList()
for i in range(len(self.bn_rep[0])):
bn_first = nn.ModuleList()
for r in self.bn_rep.children():
m = r[i]
bn_first.append((m[0] if isinstance(m, nn.Sequential) else nn.Sequential(OrderedDict([('bn', m)]))))
new_bn_rep.append(bn_first)
self.bn_level_first = (not self.bn_level_first)
self.bn_rep = new_bn_rep
@torch.jit.ignore()
def _forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
outputs = []
for level in range(self.num_levels):
x_level = x[level]
for (conv, bn) in zip(self.conv_rep, self.bn_rep):
x_level = conv(x_level)
x_level = bn[level](x_level)
x_level = self.act(x_level)
outputs.append(self.predict(x_level))
return outputs
def _forward_level_first(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
outputs = []
for (level, bn_rep) in enumerate(self.bn_rep):
x_level = x[level]
for (conv, bn) in zip(self.conv_rep, bn_rep):
x_level = conv(x_level)
x_level = bn(x_level)
x_level = self.act(x_level)
outputs.append(self.predict(x_level))
return outputs
def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
if self.bn_level_first:
return self._forward_level_first(x)
else:
return self._forward(x)
|
def _init_weight(m, n=''):
' Weight initialization as per Tensorflow official implementations.\n '
def _fan_in_out(w, groups=1):
dimensions = w.dim()
if (dimensions < 2):
raise ValueError('Fan in and fan out can not be computed for tensor with fewer than 2 dimensions')
num_input_fmaps = w.size(1)
num_output_fmaps = w.size(0)
receptive_field_size = 1
if (w.dim() > 2):
receptive_field_size = w[0][0].numel()
fan_in = (num_input_fmaps * receptive_field_size)
fan_out = (num_output_fmaps * receptive_field_size)
fan_out //= groups
return (fan_in, fan_out)
def _glorot_uniform(w, gain=1, groups=1):
(fan_in, fan_out) = _fan_in_out(w, groups)
gain /= max(1.0, ((fan_in + fan_out) / 2.0))
limit = math.sqrt((3.0 * gain))
w.data.uniform_((- limit), limit)
def _variance_scaling(w, gain=1, groups=1):
(fan_in, fan_out) = _fan_in_out(w, groups)
gain /= max(1.0, fan_in)
std = math.sqrt(gain)
w.data.normal_(std=std)
if isinstance(m, SeparableConv2d):
if (('box_net' in n) or ('class_net' in n)):
_variance_scaling(m.conv_dw.weight, groups=m.conv_dw.groups)
_variance_scaling(m.conv_pw.weight)
if (m.conv_pw.bias is not None):
if ('class_net.predict' in n):
m.conv_pw.bias.data.fill_((- math.log(((1 - 0.01) / 0.01))))
else:
m.conv_pw.bias.data.zero_()
else:
_glorot_uniform(m.conv_dw.weight, groups=m.conv_dw.groups)
_glorot_uniform(m.conv_pw.weight)
if (m.conv_pw.bias is not None):
m.conv_pw.bias.data.zero_()
elif isinstance(m, ConvBnAct2d):
if (('box_net' in n) or ('class_net' in n)):
m.conv.weight.data.normal_(std=0.01)
if (m.conv.bias is not None):
if ('class_net.predict' in n):
m.conv.bias.data.fill_((- math.log(((1 - 0.01) / 0.01))))
else:
m.conv.bias.data.zero_()
else:
_glorot_uniform(m.conv.weight)
if (m.conv.bias is not None):
m.conv.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
|
def _init_weight_alt(m, n=''):
' Weight initialization alternative, based on EfficientNet bacbkone init w/ class bias addition\n NOTE: this will likely be removed after some experimentation\n '
if isinstance(m, nn.Conv2d):
fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / fan_out)))
if (m.bias is not None):
if ('class_net.predict' in n):
m.bias.data.fill_((- math.log(((1 - 0.01) / 0.01))))
else:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
|
def get_feature_info(backbone):
if isinstance(backbone.feature_info, Callable):
feature_info = [dict(num_chs=f['num_chs'], reduction=f['reduction']) for (i, f) in enumerate(backbone.feature_info())]
else:
feature_info = backbone.feature_info.get_dicts(keys=['num_chs', 'reduction'])
return feature_info
|
class EfficientDet(nn.Module):
def __init__(self, config, pretrained_backbone=True, alternate_init=False):
super(EfficientDet, self).__init__()
self.config = config
set_config_readonly(self.config)
self.backbone = create_model(config.backbone_name, features_only=True, out_indices=(self.config.backbone_indices or (2, 3, 4)), pretrained=pretrained_backbone, **config.backbone_args)
feature_info = get_feature_info(self.backbone)
self.fpn = BiFpn(self.config, feature_info)
self.class_net = HeadNet(self.config, num_outputs=self.config.num_classes)
self.box_net = HeadNet(self.config, num_outputs=4)
for (n, m) in self.named_modules():
if ('backbone' not in n):
if alternate_init:
_init_weight_alt(m, n)
else:
_init_weight(m, n)
@torch.jit.ignore()
def reset_head(self, num_classes=None, aspect_ratios=None, num_scales=None, alternate_init=False):
reset_class_head = False
reset_box_head = False
set_config_writeable(self.config)
if (num_classes is not None):
reset_class_head = True
self.config.num_classes = num_classes
if (aspect_ratios is not None):
reset_box_head = True
self.config.aspect_ratios = aspect_ratios
if (num_scales is not None):
reset_box_head = True
self.config.num_scales = num_scales
set_config_readonly(self.config)
if reset_class_head:
self.class_net = HeadNet(self.config, num_outputs=self.config.num_classes)
for (n, m) in self.class_net.named_modules(prefix='class_net'):
if alternate_init:
_init_weight_alt(m, n)
else:
_init_weight(m, n)
if reset_box_head:
self.box_net = HeadNet(self.config, num_outputs=4)
for (n, m) in self.box_net.named_modules(prefix='box_net'):
if alternate_init:
_init_weight_alt(m, n)
else:
_init_weight(m, n)
@torch.jit.ignore()
def toggle_head_bn_level_first(self):
' Toggle the head batchnorm layers between being access with feature_level first vs repeat\n '
self.class_net.toggle_bn_level_first()
self.box_net.toggle_bn_level_first()
def forward(self, x):
x = self.backbone(x)
x = self.fpn(x)
x_class = self.class_net(x)
x_box = self.box_net(x)
return (x_class, x_box)
|
def create_category_index(categories):
"Creates dictionary of COCO compatible categories keyed by category id.\n Args:\n categories: a list of dicts, each of which has the following keys:\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name e.g., 'cat', 'dog', 'pizza'.\n Returns:\n category_index: a dict containing the same entries as categories, but keyed\n by the 'id' field of each category.\n "
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
|
class DetectionEvaluator(metaclass=ABCMeta):
'Interface for object detection evalution classes.\n Example usage of the Evaluator:\n ------------------------------\n evaluator = DetectionEvaluator(categories)\n # Detections and groundtruth for image 1.\n evaluator.add_single_gt_image_info(...)\n evaluator.add_single_detected_image_info(...)\n # Detections and groundtruth for image 2.\n evaluator.add_single_gt_image_info(...)\n evaluator.add_single_detected_image_info(...)\n metrics_dict = evaluator.evaluation()\n '
def __init__(self, categories):
"Constructor.\n Args:\n categories: A list of dicts, each of which has the following keys -\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name e.g., 'cat', 'dog'.\n "
self._categories = categories
def observe_result_dict_for_single_example(self, eval_dict):
'Observes an evaluation result dict for a single example.\n When executing eagerly, once all observations have been observed by this\n method you can use `.evaluation()` to get the final metrics.\n When using `tf.estimator.Estimator` for evaluation this function is used by\n `get_estimator_eval_metric_ops()` to construct the metric update op.\n Args:\n eval_dict: A dictionary that holds tensors for evaluating an object\n detection model, returned from\n eval_util.result_dict_for_single_example().\n Returns:\n None when executing eagerly, or an update_op that can be used to update\n the eval metrics in `tf.estimator.EstimatorSpec`.\n '
raise NotImplementedError('Not implemented for this evaluator!')
@abstractmethod
def add_single_ground_truth_image_info(self, image_id, gt_dict):
'Adds groundtruth for a single image to be used for evaluation.\n Args:\n image_id: A unique string/integer identifier for the image.\n gt_dict: A dictionary of groundtruth numpy arrays required for evaluations.\n '
pass
@abstractmethod
def add_single_detected_image_info(self, image_id, detections_dict):
'Adds detections for a single image to be used for evaluation.\n Args:\n image_id: A unique string/integer identifier for the image.\n detections_dict: A dictionary of detection numpy arrays required for evaluation.\n '
pass
@abstractmethod
def evaluate(self):
'Evaluates detections and returns a dictionary of metrics.'
pass
@abstractmethod
def clear(self):
'Clears the state to prepare for a fresh evaluation.'
pass
|
class ObjectDetectionEvaluator(DetectionEvaluator):
'A class to evaluation detections.'
def __init__(self, categories, matching_iou_threshold=0.5, recall_lower_bound=0.0, recall_upper_bound=1.0, evaluate_corlocs=False, evaluate_precision_recall=False, metric_prefix=None, use_weighted_mean_ap=False, evaluate_masks=False, group_of_weight=0.0):
"Constructor.\n Args:\n categories: A list of dicts, each of which has the following keys -\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name e.g., 'cat', 'dog'.\n matching_iou_threshold: IOU threshold to use for matching groundtruth boxes to detection boxes.\n recall_lower_bound: lower bound of recall operating area.\n recall_upper_bound: upper bound of recall operating area.\n evaluate_corlocs: (optional) boolean which determines if corloc scores are to be returned or not.\n evaluate_precision_recall: (optional) boolean which determines if\n precision and recall values are to be returned or not.\n metric_prefix: (optional) string prefix for metric name; if None, no prefix is used.\n use_weighted_mean_ap: (optional) boolean which determines if the mean\n average precision is computed directly from the scores and tp_fp_labels of all classes.\n evaluate_masks: If False, evaluation will be performed based on boxes. If\n True, mask evaluation will be performed instead.\n group_of_weight: Weight of group-of boxes.If set to 0, detections of the\n correct class within a group-of box are ignored. If weight is > 0, then\n if at least one detection falls within a group-of box with\n matching_iou_threshold, weight group_of_weight is added to true\n positives. Consequently, if no detection falls within a group-of box,\n weight group_of_weight is added to false negatives.\n Raises:\n ValueError: If the category ids are not 1-indexed.\n "
super(ObjectDetectionEvaluator, self).__init__(categories)
self._num_classes = max([cat['id'] for cat in categories])
if (min((cat['id'] for cat in categories)) < 1):
raise ValueError('Classes should be 1-indexed.')
self._matching_iou_threshold = matching_iou_threshold
self._recall_lower_bound = recall_lower_bound
self._recall_upper_bound = recall_upper_bound
self._use_weighted_mean_ap = use_weighted_mean_ap
self._label_id_offset = 1
self._evaluate_masks = evaluate_masks
self._group_of_weight = group_of_weight
self._evaluation = ObjectDetectionEvaluation(num_gt_classes=self._num_classes, matching_iou_threshold=self._matching_iou_threshold, recall_lower_bound=self._recall_lower_bound, recall_upper_bound=self._recall_upper_bound, use_weighted_mean_ap=self._use_weighted_mean_ap, label_id_offset=self._label_id_offset, group_of_weight=self._group_of_weight)
self._image_ids = set([])
self._evaluate_corlocs = evaluate_corlocs
self._evaluate_precision_recall = evaluate_precision_recall
self._metric_prefix = ((metric_prefix + '_') if metric_prefix else '')
self._build_metric_names()
def _build_metric_names(self):
'Builds a list with metric names.'
if ((self._recall_lower_bound > 0.0) or (self._recall_upper_bound < 1.0)):
self._metric_names = [(self._metric_prefix + 'Precision/mAP@{}IOU@[{:.1f},{:.1f}]Recall'.format(self._matching_iou_threshold, self._recall_lower_bound, self._recall_upper_bound))]
else:
self._metric_names = [(self._metric_prefix + 'Precision/mAP@{}IOU'.format(self._matching_iou_threshold))]
if self._evaluate_corlocs:
self._metric_names.append((self._metric_prefix + 'Precision/meanCorLoc@{}IOU'.format(self._matching_iou_threshold)))
category_index = create_category_index(self._categories)
for idx in range(self._num_classes):
if ((idx + self._label_id_offset) in category_index):
category_name = category_index[(idx + self._label_id_offset)]['name']
category_name = unicodedata.normalize('NFKD', category_name)
self._metric_names.append((self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(self._matching_iou_threshold, category_name)))
if self._evaluate_corlocs:
self._metric_names.append((self._metric_prefix + 'PerformanceByCategory/CorLoc@{}IOU/{}'.format(self._matching_iou_threshold, category_name)))
def add_single_ground_truth_image_info(self, image_id, gt_dict):
'Adds groundtruth for a single image to be used for evaluation.\n Args:\n image_id: A unique string/integer identifier for the image.\n gt_dict: A dictionary containing -\n InputDataFields.gt_boxes: float32 numpy array\n of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of\n the format [ymin, xmin, ymax, xmax] in absolute image coordinates.\n InputDataFields.gt_classes: integer numpy array\n of shape [num_boxes] containing 1-indexed groundtruth classes for the boxes.\n InputDataFields.gt_difficult: Optional length M numpy boolean array\n denoting whether a ground truth box is a difficult instance or not.\n This field is optional to support the case that no boxes are difficult.\n InputDataFields.gt_instance_masks: Optional numpy array of shape\n [num_boxes, height, width] with values in {0, 1}.\n Raises:\n ValueError: On adding groundtruth for an image more than once. Will also\n raise error if instance masks are not in groundtruth dictionary.\n '
if (image_id in self._image_ids):
return
gt_classes = (gt_dict[InputDataFields.gt_classes] - self._label_id_offset)
if ((InputDataFields.gt_difficult in gt_dict) and (gt_dict[InputDataFields.gt_difficult].size or (not gt_classes.size))):
gt_difficult = gt_dict[InputDataFields.gt_difficult]
else:
gt_difficult = None
gt_masks = None
if self._evaluate_masks:
if (InputDataFields.gt_instance_masks not in gt_dict):
raise ValueError('Instance masks not in groundtruth dictionary.')
gt_masks = gt_dict[InputDataFields.gt_instance_masks]
self._evaluation.add_single_ground_truth_image_info(image_key=image_id, gt_boxes=gt_dict[InputDataFields.gt_boxes], gt_class_labels=gt_classes, gt_is_difficult_list=gt_difficult, gt_masks=gt_masks)
self._image_ids.update([image_id])
def add_single_detected_image_info(self, image_id, detections_dict):
'Adds detections for a single image to be used for evaluation.\n Args:\n image_id: A unique string/integer identifier for the image.\n detections_dict: A dictionary containing -\n DetectionResultFields.detection_boxes: float32 numpy\n array of shape [num_boxes, 4] containing `num_boxes` detection boxes\n of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.\n DetectionResultFields.detection_scores: float32 numpy\n array of shape [num_boxes] containing detection scores for the boxes.\n DetectionResultFields.detection_classes: integer numpy\n array of shape [num_boxes] containing 1-indexed detection classes for the boxes.\n DetectionResultFields.detection_masks: uint8 numpy array\n of shape [num_boxes, height, width] containing `num_boxes` masks of\n values ranging between 0 and 1.\n Raises:\n ValueError: If detection masks are not in detections dictionary.\n '
detection_classes = (detections_dict[DetectionResultFields.detection_classes] - self._label_id_offset)
detection_masks = None
if self._evaluate_masks:
if (DetectionResultFields.detection_masks not in detections_dict):
raise ValueError('Detection masks not in detections dictionary.')
detection_masks = detections_dict[DetectionResultFields.detection_masks]
self._evaluation.add_single_detected_image_info(image_key=image_id, detected_boxes=detections_dict[DetectionResultFields.detection_boxes], detected_scores=detections_dict[DetectionResultFields.detection_scores], detected_class_labels=detection_classes, detected_masks=detection_masks)
def evaluate(self):
"Compute evaluation result.\n Returns:\n A dictionary of metrics with the following fields -\n 1. summary_metrics:\n '<prefix if not empty>_Precision/mAP@<matching_iou_threshold>IOU': mean\n average precision at the specified IOU threshold.\n 2. per_category_ap: category specific results with keys of the form\n '<prefix if not empty>_PerformanceByCategory/\n mAP@<matching_iou_threshold>IOU/category'.\n "
metrics = self._evaluation.evaluate()
pascal_metrics = {self._metric_names[0]: metrics['mean_ap']}
if self._evaluate_corlocs:
pascal_metrics[self._metric_names[1]] = metrics['mean_corloc']
category_index = create_category_index(self._categories)
for idx in range(metrics['per_class_ap'].size):
if ((idx + self._label_id_offset) in category_index):
category_name = category_index[(idx + self._label_id_offset)]['name']
category_name = unicodedata.normalize('NFKD', category_name)
display_name = (self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = metrics['per_class_ap'][idx]
if self._evaluate_precision_recall:
display_name = (self._metric_prefix + 'PerformanceByCategory/Precision@{}IOU/{}'.format(self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = metrics['per_class_precision'][idx]
display_name = (self._metric_prefix + 'PerformanceByCategory/Recall@{}IOU/{}'.format(self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = metrics['per_class_recall'][idx]
if self._evaluate_corlocs:
display_name = (self._metric_prefix + 'PerformanceByCategory/CorLoc@{}IOU/{}'.format(self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = metrics['per_class_corloc'][idx]
return pascal_metrics
def clear(self):
'Clears the state to prepare for a fresh evaluation.'
self._evaluation = ObjectDetectionEvaluation(num_gt_classes=self._num_classes, matching_iou_threshold=self._matching_iou_threshold, use_weighted_mean_ap=self._use_weighted_mean_ap, label_id_offset=self._label_id_offset)
self._image_ids.clear()
|
class PascalDetectionEvaluator(ObjectDetectionEvaluator):
'A class to evaluation detections using PASCAL metrics.'
def __init__(self, categories, matching_iou_threshold=0.5):
super(PascalDetectionEvaluator, self).__init__(categories, matching_iou_threshold=matching_iou_threshold, evaluate_corlocs=False, metric_prefix='PascalBoxes', use_weighted_mean_ap=False)
|
class WeightedPascalDetectionEvaluator(ObjectDetectionEvaluator):
'A class to evaluation detections using weighted PASCAL metrics.\n Weighted PASCAL metrics computes the mean average precision as the average\n precision given the scores and tp_fp_labels of all classes. In comparison,\n PASCAL metrics computes the mean average precision as the mean of the\n per-class average precisions.\n This definition is very similar to the mean of the per-class average\n precisions weighted by class frequency. However, they are typically not the\n same as the average precision is not a linear function of the scores and\n tp_fp_labels.\n '
def __init__(self, categories, matching_iou_threshold=0.5):
super(WeightedPascalDetectionEvaluator, self).__init__(categories, matching_iou_threshold=matching_iou_threshold, evaluate_corlocs=False, metric_prefix='WeightedPascalBoxes', use_weighted_mean_ap=True)
|
class PrecisionAtRecallDetectionEvaluator(ObjectDetectionEvaluator):
'A class to evaluation detections using precision@recall metrics.'
def __init__(self, categories, matching_iou_threshold=0.5, recall_lower_bound=0.0, recall_upper_bound=1.0):
super(PrecisionAtRecallDetectionEvaluator, self).__init__(categories, matching_iou_threshold=matching_iou_threshold, recall_lower_bound=recall_lower_bound, recall_upper_bound=recall_upper_bound, evaluate_corlocs=False, metric_prefix='PrecisionAtRecallBoxes', use_weighted_mean_ap=False)
|
class OpenImagesDetectionEvaluator(ObjectDetectionEvaluator):
'A class to evaluation detections using Open Images V2 metrics.\n Open Images V2 introduce group_of type of bounding boxes and this metric\n handles those boxes appropriately.\n '
def __init__(self, categories, matching_iou_threshold=0.5, evaluate_masks=False, evaluate_corlocs=False, metric_prefix='OpenImagesV5', group_of_weight=0.0):
"Constructor.\n Args:\n categories: A list of dicts, each of which has the following keys -\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name e.g., 'cat', 'dog'.\n matching_iou_threshold: IOU threshold to use for matching groundtruth\n boxes to detection boxes.\n evaluate_masks: if True, evaluator evaluates masks.\n evaluate_corlocs: if True, additionally evaluates and returns CorLoc.\n metric_prefix: Prefix name of the metric.\n group_of_weight: Weight of the group-of bounding box. If set to 0 (default\n for Open Images V2 detection protocol), detections of the correct class\n within a group-of box are ignored. If weight is > 0, then if at least\n one detection falls within a group-of box with matching_iou_threshold,\n weight group_of_weight is added to true positives. Consequently, if no\n detection falls within a group-of box, weight group_of_weight is added\n to false negatives.\n "
super(OpenImagesDetectionEvaluator, self).__init__(categories, matching_iou_threshold, evaluate_corlocs, metric_prefix=metric_prefix, group_of_weight=group_of_weight, evaluate_masks=evaluate_masks)
def add_single_ground_truth_image_info(self, image_id, gt_dict):
'Adds groundtruth for a single image to be used for evaluation.\n Args:\n image_id: A unique string/integer identifier for the image.\n gt_dict: A dictionary containing -\n InputDataFields.gt_boxes: float32 numpy array\n of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of\n the format [ymin, xmin, ymax, xmax] in absolute image coordinates.\n InputDataFields.gt_classes: integer numpy array\n of shape [num_boxes] containing 1-indexed groundtruth classes for the boxes.\n InputDataFields.gt_group_of: Optional length M\n numpy boolean array denoting whether a groundtruth box contains a group of instances.\n Raises:\n ValueError: On adding groundtruth for an image more than once.\n '
if (image_id in self._image_ids):
return
gt_classes = (gt_dict[InputDataFields.gt_classes] - self._label_id_offset)
if ((InputDataFields.gt_group_of in gt_dict) and (gt_dict[InputDataFields.gt_group_of].size or (not gt_classes.size))):
gt_group_of = gt_dict[InputDataFields.gt_group_of]
else:
gt_group_of = None
if self._evaluate_masks:
gt_masks = gt_dict[InputDataFields.gt_instance_masks]
else:
gt_masks = None
self._evaluation.add_single_ground_truth_image_info(image_id, gt_dict[InputDataFields.gt_boxes], gt_classes, gt_is_difficult_list=None, gt_is_group_of_list=gt_group_of, gt_masks=gt_masks)
self._image_ids.update([image_id])
|
class OpenImagesChallengeEvaluator(OpenImagesDetectionEvaluator):
'A class implements Open Images Challenge metrics.\n Both Detection and Instance Segmentation evaluation metrics are implemented.\n Open Images Challenge Detection metric has two major changes in comparison\n with Open Images V2 detection metric:\n - a custom weight might be specified for detecting an object contained in a group-of box.\n - verified image-level labels should be explicitly provided for evaluation: in case an\n image has neither positive nor negative image level label of class c, all detections of\n this class on this image will be ignored.\n\n Open Images Challenge Instance Segmentation metric allows to measure performance\n of models in case of incomplete annotations: some instances are\n annotations only on box level and some - on image-level. In addition,\n image-level labels are taken into account as in detection metric.\n\n Open Images Challenge Detection metric default parameters:\n evaluate_masks = False\n group_of_weight = 1.0\n\n Open Images Challenge Instance Segmentation metric default parameters:\n evaluate_masks = True\n (group_of_weight will not matter)\n '
def __init__(self, categories, evaluate_masks=False, matching_iou_threshold=0.5, evaluate_corlocs=False, group_of_weight=1.0):
"Constructor.\n Args:\n categories: A list of dicts, each of which has the following keys -\n 'id': (required) an integer id uniquely identifying this category.\n 'name': (required) string representing category name e.g., 'cat', 'dog'.\n evaluate_masks: set to true for instance segmentation metric and to false\n for detection metric.\n matching_iou_threshold: IOU threshold to use for matching groundtruth\n boxes to detection boxes.\n evaluate_corlocs: if True, additionally evaluates and returns CorLoc.\n group_of_weight: Weight of group-of boxes. If set to 0, detections of the\n correct class within a group-of box are ignored. If weight is > 0, then\n if at least one detection falls within a group-of box with\n matching_iou_threshold, weight group_of_weight is added to true\n positives. Consequently, if no detection falls within a group-of box,\n weight group_of_weight is added to false negatives.\n "
if (not evaluate_masks):
metrics_prefix = 'OpenImagesDetectionChallenge'
else:
metrics_prefix = 'OpenImagesInstanceSegmentationChallenge'
super(OpenImagesChallengeEvaluator, self).__init__(categories, matching_iou_threshold, evaluate_masks=evaluate_masks, evaluate_corlocs=evaluate_corlocs, group_of_weight=group_of_weight, metric_prefix=metrics_prefix)
self._evaluatable_labels = {}
def add_single_ground_truth_image_info(self, image_id, gt_dict):
'Adds groundtruth for a single image to be used for evaluation.\n Args:\n image_id: A unique string/integer identifier for the image.\n gt_dict: A dictionary containing -\n InputDataFields.gt_boxes: float32 numpy array of shape [num_boxes, 4]\n containing `num_boxes` groundtruth boxes of the format [ymin, xmin, ymax, xmax]\n in absolute image coordinates.\n InputDataFields.gt_classes: integer numpy array of shape [num_boxes]\n containing 1-indexed groundtruth classes for the boxes.\n InputDataFields.gt_image_classes: integer 1D\n numpy array containing all classes for which labels are verified.\n InputDataFields.gt_group_of: Optional length M\n numpy boolean array denoting whether a groundtruth box contains a group of instances.\n Raises:\n ValueError: On adding groundtruth for an image more than once.\n '
super(OpenImagesChallengeEvaluator, self).add_single_ground_truth_image_info(image_id, gt_dict)
input_fields = InputDataFields
gt_classes = (gt_dict[input_fields.gt_classes] - self._label_id_offset)
image_classes = np.array([], dtype=int)
if (input_fields.gt_image_classes in gt_dict):
image_classes = gt_dict[input_fields.gt_image_classes]
elif (input_fields.gt_labeled_classes in gt_dict):
image_classes = gt_dict[input_fields.gt_labeled_classes]
image_classes -= self._label_id_offset
self._evaluatable_labels[image_id] = np.unique(np.concatenate((image_classes, gt_classes)))
def add_single_detected_image_info(self, image_id, detections_dict):
'Adds detections for a single image to be used for evaluation.\n Args:\n image_id: A unique string/integer identifier for the image.\n detections_dict: A dictionary containing -\n DetectionResultFields.detection_boxes: float32 numpy\n array of shape [num_boxes, 4] containing `num_boxes` detection boxes\n of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.\n DetectionResultFields.detection_scores: float32 numpy\n array of shape [num_boxes] containing detection scores for the boxes.\n DetectionResultFields.detection_classes: integer numpy\n array of shape [num_boxes] containing 1-indexed detection classes for\n the boxes.\n Raises:\n ValueError: If detection masks are not in detections dictionary.\n '
if (image_id not in self._image_ids):
self._image_ids.update([image_id])
self._evaluatable_labels[image_id] = np.array([])
detection_classes = (detections_dict[DetectionResultFields.detection_classes] - self._label_id_offset)
allowed_classes = np.where(np.isin(detection_classes, self._evaluatable_labels[image_id]))
detection_classes = detection_classes[allowed_classes]
detected_boxes = detections_dict[DetectionResultFields.detection_boxes][allowed_classes]
detected_scores = detections_dict[DetectionResultFields.detection_scores][allowed_classes]
if self._evaluate_masks:
detection_masks = detections_dict[DetectionResultFields.detection_masks][allowed_classes]
else:
detection_masks = None
self._evaluation.add_single_detected_image_info(image_key=image_id, detected_boxes=detected_boxes, detected_scores=detected_scores, detected_class_labels=detection_classes, detected_masks=detection_masks)
def clear(self):
'Clears stored data.'
super(OpenImagesChallengeEvaluator, self).clear()
self._evaluatable_labels.clear()
|
class InputDataFields(object):
'Names for the input tensors.\n Holds the standard data field names to use for identifying input tensors. This\n should be used by the decoder to identify keys for the returned tensor_dict\n containing input tensors. And it should be used by the model to identify the\n tensors it needs.\n Attributes:\n image: image.\n image_additional_channels: additional channels.\n key: unique key corresponding to image.\n filename: original filename of the dataset (without common path).\n gt_image_classes: image-level class labels.\n gt_image_confidences: image-level class confidences.\n gt_labeled_classes: image-level annotation that indicates the\n classes for which an image has been labeled.\n gt_boxes: coordinates of the ground truth boxes in the image.\n gt_classes: box-level class labels.\n gt_confidences: box-level class confidences. The shape should be\n the same as the shape of gt_classes.\n gt_label_types: box-level label types (e.g. explicit negative).\n gt_is_crowd: [DEPRECATED, use gt_group_of instead]\n is the groundtruth a single object or a crowd.\n gt_area: area of a groundtruth segment.\n gt_difficult: is a `difficult` object\n gt_group_of: is a `group_of` objects, e.g. multiple objects of the\n same class, forming a connected group, where instances are heavily\n occluding each other.\n gt_instance_masks: ground truth instance masks.\n gt_instance_boundaries: ground truth instance boundaries.\n gt_instance_classes: instance mask-level class labels.\n gt_label_weights: groundtruth label weights.\n gt_weights: groundtruth weight factor for bounding boxes.\n image_height: height of images, used to decode\n image_width: width of images, used to decode\n '
image = 'image'
key = 'image_id'
filename = 'filename'
gt_boxes = 'bbox'
gt_classes = 'cls'
gt_confidences = 'confidences'
gt_label_types = 'label_types'
gt_image_classes = 'img_cls'
gt_image_confidences = 'img_confidences'
gt_labeled_classes = 'labeled_cls'
gt_is_crowd = 'is_crowd'
gt_area = 'area'
gt_difficult = 'difficult'
gt_group_of = 'group_of'
gt_instance_masks = 'instance_masks'
gt_instance_boundaries = 'instance_boundaries'
gt_instance_classes = 'instance_classes'
image_height = 'img_height'
image_width = 'img_width'
image_size = 'img_size'
|
class DetectionResultFields(object):
'Naming conventions for storing the output of the detector.\n Attributes:\n source_id: source of the original image.\n key: unique key corresponding to image.\n detection_boxes: coordinates of the detection boxes in the image.\n detection_scores: detection scores for the detection boxes in the image.\n detection_multiclass_scores: class score distribution (including background)\n for detection boxes in the image including background class.\n detection_classes: detection-level class labels.\n detection_masks: contains a segmentation mask for each detection box.\n '
key = 'image_id'
detection_boxes = 'bbox'
detection_scores = 'score'
detection_classes = 'cls'
detection_masks = 'masks'
|
class BoxListFields(object):
'Naming conventions for BoxLists.\n Attributes:\n boxes: bounding box coordinates.\n classes: classes per bounding box.\n scores: scores per bounding box.\n weights: sample weights per bounding box.\n objectness: objectness score per bounding box.\n masks: masks per bounding box.\n boundaries: boundaries per bounding box.\n keypoints: keypoints per bounding box.\n keypoint_heatmaps: keypoint heatmaps per bounding box.\n is_crowd: is_crowd annotation per bounding box.\n '
boxes = 'boxes'
classes = 'classes'
scores = 'scores'
weights = 'weights'
confidences = 'confidences'
objectness = 'objectness'
masks = 'masks'
boundaries = 'boundaries'
keypoints = 'keypoints'
keypoint_visibilities = 'keypoint_visibilities'
keypoint_heatmaps = 'keypoint_heatmaps'
is_crowd = 'is_crowd'
group_of = 'group_of'
|
def compute_precision_recall(scores, labels, num_gt):
'Compute precision and recall.\n Args:\n scores: A float numpy array representing detection score\n labels: A float numpy array representing weighted true/false positive labels\n num_gt: Number of ground truth instances\n Raises:\n ValueError: if the input is not of the correct format\n Returns:\n precision: Fraction of positive instances over detected ones. This value is\n None if no ground truth labels are present.\n recall: Fraction of detected positive instance over all positive instances.\n This value is None if no ground truth labels are present.\n '
if ((not isinstance(labels, np.ndarray)) or (len(labels.shape) != 1)):
raise ValueError('labels must be single dimension numpy array')
if ((labels.dtype != np.float_) and (labels.dtype != np.bool_)):
raise ValueError('labels type must be either bool or float')
if ((not isinstance(scores, np.ndarray)) or (len(scores.shape) != 1)):
raise ValueError('scores must be single dimension numpy array')
if (num_gt < np.sum(labels)):
raise ValueError('Number of true positives must be smaller than num_gt.')
if (len(scores) != len(labels)):
raise ValueError('scores and labels must be of the same size.')
if (num_gt == 0):
return (None, None)
sorted_indices = np.argsort(scores)
sorted_indices = sorted_indices[::(- 1)]
true_positive_labels = labels[sorted_indices]
false_positive_labels = (true_positive_labels <= 0).astype(float)
cum_true_positives = np.cumsum(true_positive_labels)
cum_false_positives = np.cumsum(false_positive_labels)
precision = (cum_true_positives.astype(float) / (cum_true_positives + cum_false_positives))
recall = (cum_true_positives.astype(float) / num_gt)
return (precision, recall)
|
def compute_average_precision(precision, recall):
'Compute Average Precision according to the definition in VOCdevkit.\n Precision is modified to ensure that it does not decrease as recall\n decrease.\n Args:\n precision: A float [N, 1] numpy array of precisions\n recall: A float [N, 1] numpy array of recalls\n Raises:\n ValueError: if the input is not of the correct format\n Returns:\n average_precison: The area under the precision recall curve. NaN if\n precision and recall are None.\n '
if (precision is None):
if (recall is not None):
raise ValueError('If precision is None, recall must also be None')
return np.NAN
if ((not isinstance(precision, np.ndarray)) or (not isinstance(recall, np.ndarray))):
raise ValueError('precision and recall must be numpy array')
if ((precision.dtype != np.float_) or (recall.dtype != np.float_)):
raise ValueError('input must be float numpy array.')
if (len(precision) != len(recall)):
raise ValueError('precision and recall must be of the same size.')
if (not precision.size):
return 0.0
if ((np.amin(precision) < 0) or (np.amax(precision) > 1)):
raise ValueError('Precision must be in the range of [0, 1].')
if ((np.amin(recall) < 0) or (np.amax(recall) > 1)):
raise ValueError('recall must be in the range of [0, 1].')
if (not all(((recall[i] <= recall[(i + 1)]) for i in range((len(recall) - 1))))):
raise ValueError('recall must be a non-decreasing array')
recall = np.concatenate([[0], recall, [1]])
precision = np.concatenate([[0], precision, [0]])
for i in range((len(precision) - 2), (- 1), (- 1)):
precision[i] = np.maximum(precision[i], precision[(i + 1)])
indices = (np.where((recall[1:] != recall[:(- 1)]))[0] + 1)
average_precision = np.sum(((recall[indices] - recall[(indices - 1)]) * precision[indices]))
return average_precision
|
def compute_cor_loc(num_gt_imgs_per_class, num_images_correctly_detected_per_class):
'Compute CorLoc according to the definition in the following paper.\n https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf\n Returns nans if there are no ground truth images for a class.\n Args:\n num_gt_imgs_per_class: 1D array, representing number of images containing\n at least one object instance of a particular class\n num_images_correctly_detected_per_class: 1D array, representing number of\n images that are correctly detected at least one object instance of a particular class\n Returns:\n corloc_per_class: A float numpy array represents the corloc score of each class\n '
return np.where((num_gt_imgs_per_class == 0), np.nan, (num_images_correctly_detected_per_class / num_gt_imgs_per_class))
|
def compute_median_rank_at_k(tp_fp_list, k):
'Computes MedianRank@k, where k is the top-scoring labels.\n Args:\n tp_fp_list: a list of numpy arrays; each numpy array corresponds to the all\n detection on a single image, where the detections are sorted by score in\n descending order. Further, each numpy array element can have boolean or\n float values. True positive elements have either value >0.0 or True;\n any other value is considered false positive.\n k: number of top-scoring proposals to take.\n Returns:\n median_rank: median rank of all true positive proposals among top k by score.\n '
ranks = []
for i in range(len(tp_fp_list)):
ranks.append(np.where((tp_fp_list[i][0:min(k, tp_fp_list[i].shape[0])] > 0))[0])
concatenated_ranks = np.concatenate(ranks)
return np.median(concatenated_ranks)
|
def compute_recall_at_k(tp_fp_list, num_gt, k):
'Computes Recall@k, MedianRank@k, where k is the top-scoring labels.\n Args:\n tp_fp_list: a list of numpy arrays; each numpy array corresponds to the all\n detection on a single image, where the detections are sorted by score in\n descending order. Further, each numpy array element can have boolean or\n float values. True positive elements have either value >0.0 or True;\n any other value is considered false positive.\n num_gt: number of groundtruth anotations.\n k: number of top-scoring proposals to take.\n Returns:\n recall: recall evaluated on the top k by score detections.\n '
tp_fp_eval = []
for i in range(len(tp_fp_list)):
tp_fp_eval.append(tp_fp_list[i][0:min(k, tp_fp_list[i].shape[0])])
tp_fp_eval = np.concatenate(tp_fp_eval)
return (np.sum(tp_fp_eval) / num_gt)
|
class ObjectDetectionEvaluation():
'Internal implementation of Pascal object detection metrics.'
def __init__(self, num_gt_classes, matching_iou_threshold=0.5, nms_iou_threshold=1.0, nms_max_output_boxes=10000, recall_lower_bound=0.0, recall_upper_bound=1.0, use_weighted_mean_ap=False, label_id_offset=0, group_of_weight=0.0, per_image_eval_class=PerImageEvaluation):
'Constructor.\n Args:\n num_gt_classes: Number of ground-truth classes.\n matching_iou_threshold: IOU threshold used for matching detected boxes to ground-truth boxes.\n nms_iou_threshold: IOU threshold used for non-maximum suppression.\n nms_max_output_boxes: Maximum number of boxes returned by non-maximum suppression.\n recall_lower_bound: lower bound of recall operating area\n recall_upper_bound: upper bound of recall operating area\n use_weighted_mean_ap: (optional) boolean which determines if the mean\n average precision is computed directly from the scores and tp_fp_labels of all classes.\n label_id_offset: The label id offset.\n group_of_weight: Weight of group-of boxes.If set to 0, detections of the\n correct class within a group-of box are ignored. If weight is > 0, then\n if at least one detection falls within a group-of box with\n matching_iou_threshold, weight group_of_weight is added to true\n positives. Consequently, if no detection falls within a group-of box,\n weight group_of_weight is added to false negatives.\n per_image_eval_class: The class that contains functions for computing per image metrics.\n Raises:\n ValueError: if num_gt_classes is smaller than 1.\n '
if (num_gt_classes < 1):
raise ValueError('Need at least 1 groundtruth class for evaluation.')
self.per_image_eval = per_image_eval_class(num_gt_classes=num_gt_classes, matching_iou_threshold=matching_iou_threshold, nms_iou_threshold=nms_iou_threshold, nms_max_output_boxes=nms_max_output_boxes, group_of_weight=group_of_weight)
self.recall_lower_bound = recall_lower_bound
self.recall_upper_bound = recall_upper_bound
self.group_of_weight = group_of_weight
self.num_class = num_gt_classes
self.use_weighted_mean_ap = use_weighted_mean_ap
self.label_id_offset = label_id_offset
self.gt_boxes = {}
self.gt_class_labels = {}
self.gt_masks = {}
self.gt_is_difficult_list = {}
self.gt_is_group_of_list = {}
self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=float)
self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int)
self._initialize_detections()
def _initialize_detections(self):
'Initializes internal data structures.'
self.detection_keys = set()
self.scores_per_class = [[] for _ in range(self.num_class)]
self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)]
self.num_images_correctly_detected_per_class = np.zeros(self.num_class)
self.average_precision_per_class = np.empty(self.num_class, dtype=float)
self.average_precision_per_class.fill(np.nan)
self.precisions_per_class = ([np.nan] * self.num_class)
self.recalls_per_class = ([np.nan] * self.num_class)
self.sum_tp_class = ([np.nan] * self.num_class)
self.corloc_per_class = np.ones(self.num_class, dtype=float)
def clear_detections(self):
self._initialize_detections()
def add_single_ground_truth_image_info(self, image_key, gt_boxes, gt_class_labels, gt_is_difficult_list=None, gt_is_group_of_list=None, gt_masks=None):
'Adds groundtruth for a single image to be used for evaluation.\n Args:\n image_key: A unique string/integer identifier for the image.\n gt_boxes: float32 numpy array of shape [num_boxes, 4] containing\n `num_boxes` groundtruth boxes of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.\n gt_class_labels: integer numpy array of shape [num_boxes]\n containing 0-indexed groundtruth classes for the boxes.\n gt_is_difficult_list: A length M numpy boolean array denoting\n whether a ground truth box is a difficult instance or not. To support\n the case that no boxes are difficult, it is by default set as None.\n gt_is_group_of_list: A length M numpy boolean array denoting\n whether a ground truth box is a group-of box or not. To support the case\n that no boxes are groups-of, it is by default set as None.\n gt_masks: uint8 numpy array of shape [num_boxes, height, width]\n containing `num_boxes` groundtruth masks. The mask values range from 0 to 1.\n '
if (image_key in self.gt_boxes):
logging.warning('image %s has already been added to the ground truth database.', image_key)
return
self.gt_boxes[image_key] = gt_boxes
self.gt_class_labels[image_key] = gt_class_labels
self.gt_masks[image_key] = gt_masks
if (gt_is_difficult_list is None):
num_boxes = gt_boxes.shape[0]
gt_is_difficult_list = np.zeros(num_boxes, dtype=bool)
gt_is_difficult_list = gt_is_difficult_list.astype(dtype=bool)
self.gt_is_difficult_list[image_key] = gt_is_difficult_list
if (gt_is_group_of_list is None):
num_boxes = gt_boxes.shape[0]
gt_is_group_of_list = np.zeros(num_boxes, dtype=bool)
if (gt_masks is None):
num_boxes = gt_boxes.shape[0]
mask_presence_indicator = np.zeros(num_boxes, dtype=bool)
else:
mask_presence_indicator = (np.sum(gt_masks, axis=(1, 2)) == 0).astype(dtype=bool)
gt_is_group_of_list = gt_is_group_of_list.astype(dtype=bool)
self.gt_is_group_of_list[image_key] = gt_is_group_of_list
masked_gt_is_difficult_list = (gt_is_difficult_list | mask_presence_indicator)
for class_index in range(self.num_class):
num_gt_instances = np.sum((gt_class_labels[((~ masked_gt_is_difficult_list) & (~ gt_is_group_of_list))] == class_index))
num_groupof_gt_instances = (self.group_of_weight * np.sum((gt_class_labels[(gt_is_group_of_list & (~ masked_gt_is_difficult_list))] == class_index)))
self.num_gt_instances_per_class[class_index] += (num_gt_instances + num_groupof_gt_instances)
if np.any((gt_class_labels == class_index)):
self.num_gt_imgs_per_class[class_index] += 1
def add_single_detected_image_info(self, image_key, detected_boxes, detected_scores, detected_class_labels, detected_masks=None):
'Adds detections for a single image to be used for evaluation.\n Args:\n image_key: A unique string/integer identifier for the image.\n detected_boxes: float32 numpy array of shape [num_boxes, 4] containing\n `num_boxes` detection boxes of the format [ymin, xmin, ymax, xmax] in\n absolute image coordinates.\n detected_scores: float32 numpy array of shape [num_boxes] containing\n detection scores for the boxes.\n detected_class_labels: integer numpy array of shape [num_boxes] containing\n 0-indexed detection classes for the boxes.\n detected_masks: np.uint8 numpy array of shape [num_boxes, height, width]\n containing `num_boxes` detection masks with values ranging between 0 and 1.\n Raises:\n ValueError: if the number of boxes, scores and class labels differ in length.\n '
if ((len(detected_boxes) != len(detected_scores)) or (len(detected_boxes) != len(detected_class_labels))):
raise ValueError(('detected_boxes, detected_scores and detected_class_labels should all have same lengths. Got[%d, %d, %d]' % len(detected_boxes)), len(detected_scores), len(detected_class_labels))
if (image_key in self.detection_keys):
logging.warning('image %s has already been added to the detection result database', image_key)
return
self.detection_keys.add(image_key)
if (image_key in self.gt_boxes):
gt_boxes = self.gt_boxes[image_key]
gt_class_labels = self.gt_class_labels[image_key]
gt_masks = self.gt_masks.pop(image_key)
gt_is_difficult_list = self.gt_is_difficult_list[image_key]
gt_is_group_of_list = self.gt_is_group_of_list[image_key]
else:
gt_boxes = np.empty(shape=[0, 4], dtype=float)
gt_class_labels = np.array([], dtype=int)
if (detected_masks is None):
gt_masks = None
else:
gt_masks = np.empty(shape=[0, 1, 1], dtype=float)
gt_is_difficult_list = np.array([], dtype=bool)
gt_is_group_of_list = np.array([], dtype=bool)
(scores, tp_fp_labels, is_class_correctly_detected_in_image) = self.per_image_eval.compute_object_detection_metrics(detected_boxes=detected_boxes, detected_scores=detected_scores, detected_class_labels=detected_class_labels, gt_boxes=gt_boxes, gt_class_labels=gt_class_labels, gt_is_difficult_list=gt_is_difficult_list, gt_is_group_of_list=gt_is_group_of_list, detected_masks=detected_masks, gt_masks=gt_masks)
for i in range(self.num_class):
if (scores[i].shape[0] > 0):
self.scores_per_class[i].append(scores[i])
self.tp_fp_labels_per_class[i].append(tp_fp_labels[i])
self.num_images_correctly_detected_per_class += is_class_correctly_detected_in_image
def evaluate(self):
'Compute evaluation result.\n Returns:\n A dict with the following fields -\n average_precision: float numpy array of average precision for each class.\n mean_ap: mean average precision of all classes, float scalar\n precisions: List of precisions, each precision is a float numpy array\n recalls: List of recalls, each recall is a float numpy array\n corloc: numpy float array\n mean_corloc: Mean CorLoc score for each class, float scalar\n '
if (self.num_gt_instances_per_class == 0).any():
logging.warning('The following classes have no ground truth examples: %s', (np.squeeze(np.argwhere((self.num_gt_instances_per_class == 0))) + self.label_id_offset))
if self.use_weighted_mean_ap:
all_scores = np.array([], dtype=float)
all_tp_fp_labels = np.array([], dtype=bool)
for class_index in range(self.num_class):
if (self.num_gt_instances_per_class[class_index] == 0):
continue
if (not self.scores_per_class[class_index]):
scores = np.array([], dtype=float)
tp_fp_labels = np.array([], dtype=float)
else:
scores = np.concatenate(self.scores_per_class[class_index])
tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
if self.use_weighted_mean_ap:
all_scores = np.append(all_scores, scores)
all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
(precision, recall) = compute_precision_recall(scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
recall_within_bound_indices = [index for (index, value) in enumerate(recall) if ((value >= self.recall_lower_bound) and (value <= self.recall_upper_bound))]
recall_within_bound = recall[recall_within_bound_indices]
precision_within_bound = precision[recall_within_bound_indices]
self.precisions_per_class[class_index] = precision_within_bound
self.recalls_per_class[class_index] = recall_within_bound
self.sum_tp_class[class_index] = tp_fp_labels.sum()
average_precision = compute_average_precision(precision_within_bound, recall_within_bound)
self.average_precision_per_class[class_index] = average_precision
logging.debug('average_precision: %f', average_precision)
self.corloc_per_class = compute_cor_loc(self.num_gt_imgs_per_class, self.num_images_correctly_detected_per_class)
if self.use_weighted_mean_ap:
num_gt_instances = np.sum(self.num_gt_instances_per_class)
(precision, recall) = compute_precision_recall(all_scores, all_tp_fp_labels, num_gt_instances)
recall_within_bound_indices = [index for (index, value) in enumerate(recall) if ((value >= self.recall_lower_bound) and (value <= self.recall_upper_bound))]
recall_within_bound = recall[recall_within_bound_indices]
precision_within_bound = precision[recall_within_bound_indices]
mean_ap = compute_average_precision(precision_within_bound, recall_within_bound)
else:
mean_ap = np.nanmean(self.average_precision_per_class)
mean_corloc = np.nanmean(self.corloc_per_class)
return dict(per_class_ap=self.average_precision_per_class, mean_ap=mean_ap, per_class_precision=self.precisions_per_class, per_class_recall=self.recalls_per_class, per_class_corlocs=self.corloc_per_class, mean_corloc=mean_corloc)
|
def create_model(model_name, bench_task='', num_classes=None, pretrained=False, checkpoint_path='', checkpoint_ema=False, **kwargs):
config = get_efficientdet_config(model_name)
return create_model_from_config(config, bench_task=bench_task, num_classes=num_classes, pretrained=pretrained, checkpoint_path=checkpoint_path, checkpoint_ema=checkpoint_ema, **kwargs)
|
def create_model_from_config(config, bench_task='', num_classes=None, pretrained=False, checkpoint_path='', checkpoint_ema=False, **kwargs):
pretrained_backbone = kwargs.pop('pretrained_backbone', True)
if (pretrained or checkpoint_path):
pretrained_backbone = False
overrides = ('redundant_bias', 'label_smoothing', 'legacy_focal', 'jit_loss', 'soft_nms', 'max_det_per_image', 'image_size')
for ov in overrides:
value = kwargs.pop(ov, None)
if (value is not None):
setattr(config, ov, value)
labeler = kwargs.pop('bench_labeler', False)
model = EfficientDet(config, pretrained_backbone=pretrained_backbone, **kwargs)
if pretrained:
load_pretrained(model, config.url)
if ((num_classes is not None) and (num_classes != config.num_classes)):
model.reset_head(num_classes=num_classes)
if checkpoint_path:
load_checkpoint(model, checkpoint_path, use_ema=checkpoint_ema)
if (bench_task == 'train'):
model = DetBenchTrain(model, create_labeler=labeler)
elif (bench_task == 'predict'):
model = DetBenchPredict(model)
return model
|
def load_pretrained(model, url, filter_fn=None, strict=True):
if (not url):
logging.warning('Pretrained model URL is empty, using random initialization. Did you intend to use a `tf_` variant of the model?')
return
state_dict = load_state_dict_from_url(url, progress=False, map_location='cpu')
if (filter_fn is not None):
state_dict = filter_fn(state_dict)
model.load_state_dict(state_dict, strict=strict)
|
def focal_loss_legacy(logits, targets, alpha: float, gamma: float, normalizer):
"Compute the focal loss between `logits` and the golden `target` values.\n\n 'Legacy focal loss matches the loss used in the official Tensorflow impl for initial\n model releases and some time after that. It eventually transitioned to the 'New' loss\n defined below.\n\n Focal loss = -(1-pt)^gamma * log(pt)\n where pt is the probability of being classified to the true class.\n\n Args:\n logits: A float32 tensor of size [batch, height_in, width_in, num_predictions].\n\n targets: A float32 tensor of size [batch, height_in, width_in, num_predictions].\n\n alpha: A float32 scalar multiplying alpha to the loss from positive examples\n and (1-alpha) to the loss from negative examples.\n\n gamma: A float32 scalar modulating loss from hard and easy examples.\n\n normalizer: A float32 scalar normalizes the total loss from all examples.\n\n Returns:\n loss: A float32 scalar representing normalized total loss.\n "
positive_label_mask = (targets == 1.0)
cross_entropy = F.binary_cross_entropy_with_logits(logits, targets.to(logits.dtype), reduction='none')
neg_logits = ((- 1.0) * logits)
modulator = torch.exp((((gamma * targets) * neg_logits) - (gamma * torch.log1p(torch.exp(neg_logits)))))
loss = (modulator * cross_entropy)
weighted_loss = torch.where(positive_label_mask, (alpha * loss), ((1.0 - alpha) * loss))
return (weighted_loss / normalizer)
|
def new_focal_loss(logits, targets, alpha: float, gamma: float, normalizer, label_smoothing: float=0.01):
"Compute the focal loss between `logits` and the golden `target` values.\n\n 'New' is not the best descriptor, but this focal loss impl matches recent versions of\n the official Tensorflow impl of EfficientDet. It has support for label smoothing, however\n it is a bit slower, doesn't jit optimize well, and uses more memory.\n\n Focal loss = -(1-pt)^gamma * log(pt)\n where pt is the probability of being classified to the true class.\n Args:\n logits: A float32 tensor of size [batch, height_in, width_in, num_predictions].\n targets: A float32 tensor of size [batch, height_in, width_in, num_predictions].\n alpha: A float32 scalar multiplying alpha to the loss from positive examples\n and (1-alpha) to the loss from negative examples.\n gamma: A float32 scalar modulating loss from hard and easy examples.\n normalizer: Divide loss by this value.\n label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.\n Returns:\n loss: A float32 scalar representing normalized total loss.\n "
pred_prob = logits.sigmoid()
targets = targets.to(logits.dtype)
onem_targets = (1.0 - targets)
p_t = ((targets * pred_prob) + (onem_targets * (1.0 - pred_prob)))
alpha_factor = ((targets * alpha) + (onem_targets * (1.0 - alpha)))
modulating_factor = ((1.0 - p_t) ** gamma)
if (label_smoothing > 0.0):
targets = ((targets * (1.0 - label_smoothing)) + (0.5 * label_smoothing))
ce = F.binary_cross_entropy_with_logits(logits, targets, reduction='none')
return ((((1 / normalizer) * alpha_factor) * modulating_factor) * ce)
|
def huber_loss(input, target, delta: float=1.0, weights: Optional[torch.Tensor]=None, size_average: bool=True):
'\n '
err = (input - target)
abs_err = err.abs()
quadratic = torch.clamp(abs_err, max=delta)
linear = (abs_err - quadratic)
loss = ((0.5 * quadratic.pow(2)) + (delta * linear))
if (weights is not None):
loss = loss.mul(weights)
if size_average:
return loss.mean()
else:
return loss.sum()
|
def smooth_l1_loss(input, target, beta: float=(1.0 / 9), weights: Optional[torch.Tensor]=None, size_average: bool=True):
'\n very similar to the smooth_l1_loss from pytorch, but with the extra beta parameter\n '
if (beta < 1e-05):
loss = torch.abs((input - target))
else:
err = torch.abs((input - target))
loss = torch.where((err < beta), ((0.5 * err.pow(2)) / beta), (err - (0.5 * beta)))
if (weights is not None):
loss *= weights
if size_average:
return loss.mean()
else:
return loss.sum()
|
def _box_loss(box_outputs, box_targets, num_positives, delta: float=0.1):
'Computes box regression loss.'
normalizer = (num_positives * 4.0)
mask = (box_targets != 0.0)
box_loss = huber_loss(box_outputs, box_targets, weights=mask, delta=delta, size_average=False)
return (box_loss / normalizer)
|
def one_hot(x, num_classes: int):
x_non_neg = (x >= 0).unsqueeze((- 1))
onehot = torch.zeros((x.shape + (num_classes,)), device=x.device, dtype=torch.float32)
return (onehot.scatter((- 1), (x.unsqueeze((- 1)) * x_non_neg), 1) * x_non_neg)
|
def loss_fn(cls_outputs: List[torch.Tensor], box_outputs: List[torch.Tensor], cls_targets: List[torch.Tensor], box_targets: List[torch.Tensor], num_positives: torch.Tensor, num_classes: int, alpha: float, gamma: float, delta: float, box_loss_weight: float, label_smoothing: float=0.0, legacy_focal: bool=False) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
'Computes total detection loss.\n Computes total detection loss including box and class loss from all levels.\n Args:\n cls_outputs: a List with values representing logits in [batch_size, height, width, num_anchors].\n at each feature level (index)\n\n box_outputs: a List with values representing box regression targets in\n [batch_size, height, width, num_anchors * 4] at each feature level (index)\n\n cls_targets: groundtruth class targets.\n\n box_targets: groundtrusth box targets.\n\n num_positives: num positive grountruth anchors\n\n Returns:\n total_loss: an integer tensor representing total loss reducing from class and box losses from all levels.\n\n cls_loss: an integer tensor representing total class loss.\n\n box_loss: an integer tensor representing total box regression loss.\n '
num_positives_sum = (num_positives.sum() + 1.0).float()
levels = len(cls_outputs)
cls_losses = []
box_losses = []
for l in range(levels):
cls_targets_at_level = cls_targets[l]
box_targets_at_level = box_targets[l]
cls_targets_at_level_oh = one_hot(cls_targets_at_level, num_classes)
(bs, height, width, _, _) = cls_targets_at_level_oh.shape
cls_targets_at_level_oh = cls_targets_at_level_oh.view(bs, height, width, (- 1))
cls_outputs_at_level = cls_outputs[l].permute(0, 2, 3, 1).float()
if legacy_focal:
cls_loss = focal_loss_legacy(cls_outputs_at_level, cls_targets_at_level_oh, alpha=alpha, gamma=gamma, normalizer=num_positives_sum)
else:
cls_loss = new_focal_loss(cls_outputs_at_level, cls_targets_at_level_oh, alpha=alpha, gamma=gamma, normalizer=num_positives_sum, label_smoothing=label_smoothing)
cls_loss = cls_loss.view(bs, height, width, (- 1), num_classes)
cls_loss = (cls_loss * (cls_targets_at_level != (- 2)).unsqueeze((- 1)))
cls_losses.append(cls_loss.sum())
box_losses.append(_box_loss(box_outputs[l].permute(0, 2, 3, 1).float(), box_targets_at_level, num_positives_sum, delta=delta))
cls_loss = torch.sum(torch.stack(cls_losses, dim=(- 1)), dim=(- 1))
box_loss = torch.sum(torch.stack(box_losses, dim=(- 1)), dim=(- 1))
total_loss = (cls_loss + (box_loss_weight * box_loss))
return (total_loss, cls_loss, box_loss)
|
class DetectionLoss(nn.Module):
__constants__ = ['num_classes']
def __init__(self, config):
super(DetectionLoss, self).__init__()
self.config = config
self.num_classes = config.num_classes
self.alpha = config.alpha
self.gamma = config.gamma
self.delta = config.delta
self.box_loss_weight = config.box_loss_weight
self.label_smoothing = config.label_smoothing
self.legacy_focal = config.legacy_focal
self.use_jit = config.jit_loss
def forward(self, cls_outputs: List[torch.Tensor], box_outputs: List[torch.Tensor], cls_targets: List[torch.Tensor], box_targets: List[torch.Tensor], num_positives: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
l_fn = loss_fn
if ((not torch.jit.is_scripting()) and self.use_jit):
l_fn = loss_jit
return l_fn(cls_outputs, box_outputs, cls_targets, box_targets, num_positives, num_classes=self.num_classes, alpha=self.alpha, gamma=self.gamma, delta=self.delta, box_loss_weight=self.box_loss_weight, label_smoothing=self.label_smoothing, legacy_focal=self.legacy_focal)
|
def one_hot_bool(x, num_classes: int):
onehot = torch.zeros(x.size(0), num_classes, device=x.device, dtype=torch.bool)
return onehot.scatter_(1, x.unsqueeze(1), 1)
|
@torch.jit.script
class ArgMaxMatcher(object):
'Matcher based on highest value.\n\n This class computes matches from a similarity matrix. Each column is matched\n to a single row.\n\n To support object detection target assignment this class enables setting both\n matched_threshold (upper threshold) and unmatched_threshold (lower thresholds)\n defining three categories of similarity which define whether examples are\n positive, negative, or ignored:\n (1) similarity >= matched_threshold: Highest similarity. Matched/Positive!\n (2) matched_threshold > similarity >= unmatched_threshold: Medium similarity.\n Depending on negatives_lower_than_unmatched, this is either\n Unmatched/Negative OR Ignore.\n (3) unmatched_threshold > similarity: Lowest similarity. Depending on flag\n negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore.\n For ignored matches this class sets the values in the Match object to -2.\n '
def __init__(self, matched_threshold: float, unmatched_threshold: Optional[float]=None, negatives_lower_than_unmatched: bool=True, force_match_for_each_row: bool=False):
'Construct ArgMaxMatcher.\n\n Args:\n matched_threshold: Threshold for positive matches. Positive if\n sim >= matched_threshold, where sim is the maximum value of the\n similarity matrix for a given column. Set to None for no threshold.\n unmatched_threshold: Threshold for negative matches. Negative if\n sim < unmatched_threshold. Defaults to matched_threshold\n when set to None.\n negatives_lower_than_unmatched: Boolean which defaults to True. If True\n then negative matches are the ones below the unmatched_threshold,\n whereas ignored matches are in between the matched and unmatched\n threshold. If False, then negative matches are in between the matched\n and unmatched threshold, and everything lower than unmatched is ignored.\n force_match_for_each_row: If True, ensures that each row is matched to\n at least one column (which is not guaranteed otherwise if the\n matched_threshold is high). Defaults to False. See\n argmax_matcher_test.testMatcherForceMatch() for an example.\n\n Raises:\n ValueError: if unmatched_threshold is set but matched_threshold is not set\n or if unmatched_threshold > matched_threshold.\n '
if ((matched_threshold is None) and (unmatched_threshold is not None)):
raise ValueError('Need to also define matched_threshold when unmatched_threshold is defined')
self._matched_threshold = matched_threshold
self._unmatched_threshold: float = 0.0
if (unmatched_threshold is None):
self._unmatched_threshold = matched_threshold
else:
if (unmatched_threshold > matched_threshold):
raise ValueError('unmatched_threshold needs to be smaller or equal to matched_threshold')
self._unmatched_threshold = unmatched_threshold
if (not negatives_lower_than_unmatched):
if (self._unmatched_threshold == self._matched_threshold):
raise ValueError('When negatives are in between matched and unmatched thresholds, these cannot be of equal value. matched: %s, unmatched: %s', self._matched_threshold, self._unmatched_threshold)
self._force_match_for_each_row = force_match_for_each_row
self._negatives_lower_than_unmatched = negatives_lower_than_unmatched
def _match_when_rows_are_empty(self, similarity_matrix):
"Performs matching when the rows of similarity matrix are empty.\n\n When the rows are empty, all detections are false positives. So we return\n a tensor of -1's to indicate that the columns do not match to any rows.\n\n Returns:\n matches: int32 tensor indicating the row each column matches to.\n "
return ((- 1) * torch.ones(similarity_matrix.shape[1], dtype=torch.long, device=similarity_matrix.device))
def _match_when_rows_are_non_empty(self, similarity_matrix):
'Performs matching when the rows of similarity matrix are non empty.\n\n Returns:\n matches: int32 tensor indicating the row each column matches to.\n '
(matched_vals, matches) = torch.max(similarity_matrix, 0)
if (self._matched_threshold is not None):
below_unmatched_threshold = (self._unmatched_threshold > matched_vals)
between_thresholds = ((matched_vals >= self._unmatched_threshold) & (self._matched_threshold > matched_vals))
if self._negatives_lower_than_unmatched:
matches = self._set_values_using_indicator(matches, below_unmatched_threshold, (- 1))
matches = self._set_values_using_indicator(matches, between_thresholds, (- 2))
else:
matches = self._set_values_using_indicator(matches, below_unmatched_threshold, (- 2))
matches = self._set_values_using_indicator(matches, between_thresholds, (- 1))
if self._force_match_for_each_row:
force_match_column_ids = torch.argmax(similarity_matrix, 1)
force_match_column_indicators = one_hot_bool(force_match_column_ids, similarity_matrix.shape[1])
(force_match_column_mask, force_match_row_ids) = torch.max(force_match_column_indicators, 0)
final_matches = torch.where(force_match_column_mask, force_match_row_ids, matches)
return final_matches
else:
return matches
def match(self, similarity_matrix):
'Tries to match each column of the similarity matrix to a row.\n\n Args:\n similarity_matrix: tensor of shape [N, M] representing any similarity metric.\n\n Returns:\n Match object with corresponding matches for each of M columns.\n '
if (similarity_matrix.shape[0] == 0):
return Match(self._match_when_rows_are_empty(similarity_matrix))
else:
return Match(self._match_when_rows_are_non_empty(similarity_matrix))
def _set_values_using_indicator(self, x, indicator, val: int):
'Set the indicated fields of x to val.\n\n Args:\n x: tensor.\n indicator: boolean with same shape as x.\n val: scalar with value to set.\n\n Returns:\n modified tensor.\n '
indicator = indicator.to(dtype=x.dtype)
return ((x * (1 - indicator)) + (val * indicator))
|
class FasterRcnnBoxCoder(object):
'Faster RCNN box coder.'
def __init__(self, scale_factors: Optional[List[float]]=None, eps: float=EPS):
'Constructor for FasterRcnnBoxCoder.\n\n Args:\n scale_factors: List of 4 positive scalars to scale ty, tx, th and tw.\n If set to None, does not perform scaling. For Faster RCNN,\n the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0].\n '
self._scale_factors = scale_factors
if (scale_factors is not None):
assert (len(scale_factors) == 4)
for scalar in scale_factors:
assert (scalar > 0)
self.eps = eps
def code_size(self):
return 4
def encode(self, boxes: BoxList, anchors: BoxList):
'Encode a box collection with respect to anchor collection.\n\n Args:\n boxes: BoxList holding N boxes to be encoded.\n anchors: BoxList of anchors.\n\n Returns:\n a tensor representing N anchor-encoded boxes of the format [ty, tx, th, tw].\n '
(ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes()
(ycenter, xcenter, h, w) = boxes.get_center_coordinates_and_sizes()
ha += self.eps
wa += self.eps
h += self.eps
w += self.eps
tx = ((xcenter - xcenter_a) / wa)
ty = ((ycenter - ycenter_a) / ha)
tw = torch.log((w / wa))
th = torch.log((h / ha))
if (self._scale_factors is not None):
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
th *= self._scale_factors[2]
tw *= self._scale_factors[3]
return torch.stack([ty, tx, th, tw]).t()
def decode(self, rel_codes, anchors: BoxList):
'Decode relative codes to boxes.\n\n Args:\n rel_codes: a tensor representing N anchor-encoded boxes.\n anchors: BoxList of anchors.\n\n Returns:\n boxes: BoxList holding N bounding boxes.\n '
(ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes()
(ty, tx, th, tw) = rel_codes.t().unbind()
if (self._scale_factors is not None):
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
th /= self._scale_factors[2]
tw /= self._scale_factors[3]
w = (torch.exp(tw) * wa)
h = (torch.exp(th) * ha)
ycenter = ((ty * ha) + ycenter_a)
xcenter = ((tx * wa) + xcenter_a)
ymin = (ycenter - (h / 2.0))
xmin = (xcenter - (w / 2.0))
ymax = (ycenter + (h / 2.0))
xmax = (xcenter + (w / 2.0))
return BoxList(torch.stack([ymin, xmin, ymax, xmax]).t())
|
def batch_decode(encoded_boxes, box_coder: FasterRcnnBoxCoder, anchors: BoxList):
'Decode a batch of encoded boxes.\n\n This op takes a batch of encoded bounding boxes and transforms\n them to a batch of bounding boxes specified by their corners in\n the order of [y_min, x_min, y_max, x_max].\n\n Args:\n encoded_boxes: a float32 tensor of shape [batch_size, num_anchors,\n code_size] representing the location of the objects.\n box_coder: a BoxCoder object.\n anchors: a BoxList of anchors used to encode `encoded_boxes`.\n\n Returns:\n decoded_boxes: a float32 tensor of shape [batch_size, num_anchors, coder_size]\n representing the corners of the objects in the order of [y_min, x_min, y_max, x_max].\n\n Raises:\n ValueError: if batch sizes of the inputs are inconsistent, or if\n the number of anchors inferred from encoded_boxes and anchors are inconsistent.\n '
assert (len(encoded_boxes.shape) == 3)
if (encoded_boxes.shape[1] != anchors.num_boxes()):
raise ValueError(('The number of anchors inferred from encoded_boxes and anchors are inconsistent: shape[1] of encoded_boxes %s should be equal to the number of anchors: %s.' % (encoded_boxes.shape[1], anchors.num_boxes())))
decoded_boxes = torch.stack([box_coder.decode(boxes, anchors).boxes for boxes in encoded_boxes.unbind()])
return decoded_boxes
|
@torch.jit.script
class BoxList(object):
'Box collection.'
data: Dict[(str, torch.Tensor)]
def __init__(self, boxes):
'Constructs box collection.\n\n Args:\n boxes: a tensor of shape [N, 4] representing box corners\n\n Raises:\n ValueError: if invalid dimensions for bbox data or if bbox data is not in float32 format.\n '
if ((len(boxes.shape) != 2) or (boxes.shape[(- 1)] != 4)):
raise ValueError('Invalid dimensions for box data.')
if (boxes.dtype != torch.float32):
raise ValueError('Invalid tensor type: should be tf.float32')
self.data = {'boxes': boxes}
def num_boxes(self):
'Returns number of boxes held in collection.\n\n Returns:\n a tensor representing the number of boxes held in the collection.\n '
return self.data['boxes'].shape[0]
def get_all_fields(self):
'Returns all fields.'
return self.data.keys()
def get_extra_fields(self):
"Returns all non-box fields (i.e., everything not named 'boxes')."
extra: List[str] = []
for k in self.data.keys():
if (k != 'boxes'):
extra.append(k)
return extra
def add_field(self, field: str, field_data: torch.Tensor):
'Add field to box list.\n\n This method can be used to add related box data such as weights/labels, etc.\n\n Args:\n field: a string key to access the data via `get`\n field_data: a tensor containing the data to store in the BoxList\n '
self.data[field] = field_data
def has_field(self, field: str):
return (field in self.data)
def boxes(self):
'Convenience function for accessing box coordinates.\n\n Returns:\n a tensor with shape [N, 4] representing box coordinates.\n '
return self.get_field('boxes')
def set_boxes(self, boxes):
'Convenience function for setting box coordinates.\n\n Args:\n boxes: a tensor of shape [N, 4] representing box corners\n\n Raises:\n ValueError: if invalid dimensions for bbox data\n '
if ((len(boxes.shape) != 2) or (boxes.shape[(- 1)] != 4)):
raise ValueError('Invalid dimensions for box data.')
self.data['boxes'] = boxes
def get_field(self, field: str):
'Accesses a box collection and associated fields.\n\n This function returns specified field with object; if no field is specified,\n it returns the box coordinates.\n\n Args:\n field: this optional string parameter can be used to specify a related field to be accessed.\n\n Returns:\n a tensor representing the box collection or an associated field.\n\n Raises:\n ValueError: if invalid field\n '
if (not self.has_field(field)):
raise ValueError(f'field {field} does not exist')
return self.data[field]
def set_field(self, field: str, value: torch.Tensor):
'Sets the value of a field.\n\n Updates the field of a box_list with a given value.\n\n Args:\n field: (string) name of the field to set value.\n value: the value to assign to the field.\n\n Raises:\n ValueError: if the box_list does not have specified field.\n '
if (not self.has_field(field)):
raise ValueError(f'field {field} does not exist')
self.data[field] = value
def get_center_coordinates_and_sizes(self):
'Computes the center coordinates, height and width of the boxes.\n\n Returns:\n a list of 4 1-D tensors [ycenter, xcenter, height, width].\n '
box_corners = self.boxes()
(ymin, xmin, ymax, xmax) = box_corners.t().unbind()
width = (xmax - xmin)
height = (ymax - ymin)
ycenter = (ymin + (height / 2.0))
xcenter = (xmin + (width / 2.0))
return [ycenter, xcenter, height, width]
def transpose_coordinates(self):
'Transpose the coordinate representation in a boxlist.\n\n '
(y_min, x_min, y_max, x_max) = self.boxes().chunk(4, dim=1)
self.set_boxes(torch.cat([x_min, y_min, x_max, y_max], 1))
def as_tensor_dict(self, fields: Optional[List[str]]=None):
'Retrieves specified fields as a dictionary of tensors.\n\n Args:\n fields: (optional) list of fields to return in the dictionary.\n If None (default), all fields are returned.\n\n Returns:\n tensor_dict: A dictionary of tensors specified by fields.\n\n Raises:\n ValueError: if specified field is not contained in boxlist.\n '
tensor_dict = {}
if (fields is None):
fields = self.get_all_fields()
for field in fields:
if (not self.has_field(field)):
raise ValueError('boxlist must contain all specified fields')
tensor_dict[field] = self.get_field(field)
return tensor_dict
def device(self):
return self.data['boxes'].device
|
@torch.jit.script
class Match(object):
'Class to store results from the matcher.\n\n This class is used to store the results from the matcher. It provides\n convenient methods to query the matching results.\n '
def __init__(self, match_results: torch.Tensor):
'Constructs a Match object.\n\n Args:\n match_results: Integer tensor of shape [N] with (1) match_results[i]>=0,\n meaning that column i is matched with row match_results[i].\n (2) match_results[i]=-1, meaning that column i is not matched.\n (3) match_results[i]=-2, meaning that column i is ignored.\n\n Raises:\n ValueError: if match_results does not have rank 1 or is not an integer int32 scalar tensor\n '
if (len(match_results.shape) != 1):
raise ValueError('match_results should have rank 1')
if (match_results.dtype not in (torch.int32, torch.int64)):
raise ValueError('match_results should be an int32 or int64 scalar tensor')
self.match_results = match_results
def matched_column_indices(self):
'Returns column indices that match to some row.\n\n The indices returned by this op are always sorted in increasing order.\n\n Returns:\n column_indices: int32 tensor of shape [K] with column indices.\n '
return torch.nonzero((self.match_results > (- 1))).flatten().long()
def matched_column_indicator(self):
'Returns column indices that are matched.\n\n Returns:\n column_indices: int32 tensor of shape [K] with column indices.\n '
return (self.match_results >= 0)
def num_matched_columns(self):
'Returns number (int32 scalar tensor) of matched columns.'
return self.matched_column_indices().numel()
def unmatched_column_indices(self):
'Returns column indices that do not match any row.\n\n The indices returned by this op are always sorted in increasing order.\n\n Returns:\n column_indices: int32 tensor of shape [K] with column indices.\n '
return torch.nonzero((self.match_results == (- 1))).flatten().long()
def unmatched_column_indicator(self):
'Returns column indices that are unmatched.\n\n Returns:\n column_indices: int32 tensor of shape [K] with column indices.\n '
return (self.match_results == (- 1))
def num_unmatched_columns(self):
'Returns number (int32 scalar tensor) of unmatched columns.'
return self.unmatched_column_indices().numel()
def ignored_column_indices(self):
'Returns column indices that are ignored (neither Matched nor Unmatched).\n\n The indices returned by this op are always sorted in increasing order.\n\n Returns:\n column_indices: int32 tensor of shape [K] with column indices.\n '
return torch.nonzero(self.ignored_column_indicator()).flatten().long()
def ignored_column_indicator(self):
'Returns boolean column indicator where True means the column is ignored.\n\n Returns:\n column_indicator: boolean vector which is True for all ignored column indices.\n '
return (self.match_results == (- 2))
def num_ignored_columns(self):
'Returns number (int32 scalar tensor) of matched columns.'
return self.ignored_column_indices().numel()
def unmatched_or_ignored_column_indices(self):
'Returns column indices that are unmatched or ignored.\n\n The indices returned by this op are always sorted in increasing order.\n\n Returns:\n column_indices: int32 tensor of shape [K] with column indices.\n '
return torch.nonzero((0 > self.match_results)).flatten().long()
def matched_row_indices(self):
'Returns row indices that match some column.\n\n The indices returned by this op are ordered so as to be in correspondence with the output of\n matched_column_indicator(). For example if self.matched_column_indicator() is [0,2],\n and self.matched_row_indices() is [7, 3], then we know that column 0 was matched to row 7 and\n column 2 was matched to row 3.\n\n Returns:\n row_indices: int32 tensor of shape [K] with row indices.\n '
return torch.gather(self.match_results, 0, self.matched_column_indices()).flatten().long()
def gather_based_on_match(self, input_tensor, unmatched_value, ignored_value):
'Gathers elements from `input_tensor` based on match results.\n\n For columns that are matched to a row, gathered_tensor[col] is set to input_tensor[match_results[col]].\n For columns that are unmatched, gathered_tensor[col] is set to unmatched_value. Finally, for columns that\n are ignored gathered_tensor[col] is set to ignored_value.\n\n Note that the input_tensor.shape[1:] must match with unmatched_value.shape\n and ignored_value.shape\n\n Args:\n input_tensor: Tensor to gather values from.\n unmatched_value: Constant tensor or python scalar value for unmatched columns.\n ignored_value: Constant tensor or python scalar for ignored columns.\n\n Returns:\n gathered_tensor: A tensor containing values gathered from input_tensor.\n The shape of the gathered tensor is [match_results.shape[0]] + input_tensor.shape[1:].\n '
if isinstance(ignored_value, torch.Tensor):
input_tensor = torch.cat([ignored_value, unmatched_value, input_tensor], dim=0)
else:
input_tensor = torch.cat([torch.tensor([ignored_value, unmatched_value], dtype=input_tensor.dtype, device=input_tensor.device), input_tensor], dim=0)
gather_indices = torch.clamp((self.match_results + 2), min=0)
gathered_tensor = torch.index_select(input_tensor, 0, gather_indices)
return gathered_tensor
|
def area(boxlist: BoxList):
'Computes area of boxes.\n\n Args:\n boxlist: BoxList holding N boxes\n\n Returns:\n a tensor with shape [N] representing box areas.\n '
(y_min, x_min, y_max, x_max) = boxlist.boxes().chunk(4, dim=1)
out = ((y_max - y_min).squeeze(1) * (x_max - x_min).squeeze(1))
return out
|
def intersection(boxlist1: BoxList, boxlist2: BoxList):
'Compute pairwise intersection areas between boxes.\n\n Args:\n boxlist1: BoxList holding N boxes\n boxlist2: BoxList holding M boxes\n\n Returns:\n a tensor with shape [N, M] representing pairwise intersections\n '
(y_min1, x_min1, y_max1, x_max1) = boxlist1.boxes().chunk(4, dim=1)
(y_min2, x_min2, y_max2, x_max2) = boxlist2.boxes().chunk(4, dim=1)
all_pairs_min_ymax = torch.min(y_max1, y_max2.t())
all_pairs_max_ymin = torch.max(y_min1, y_min2.t())
intersect_heights = torch.clamp((all_pairs_min_ymax - all_pairs_max_ymin), min=0)
all_pairs_min_xmax = torch.min(x_max1, x_max2.t())
all_pairs_max_xmin = torch.max(x_min1, x_min2.t())
intersect_widths = torch.clamp((all_pairs_min_xmax - all_pairs_max_xmin), min=0)
return (intersect_heights * intersect_widths)
|
def iou(boxlist1: BoxList, boxlist2: BoxList):
'Computes pairwise intersection-over-union between box collections.\n\n Args:\n boxlist1: BoxList holding N boxes\n boxlist2: BoxList holding M boxes\n\n Returns:\n a tensor with shape [N, M] representing pairwise iou scores.\n '
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = ((areas1.unsqueeze(1) + areas2.unsqueeze(0)) - intersections)
return torch.where((intersections == 0.0), torch.zeros_like(intersections), (intersections / unions))
|
@torch.jit.script
class IouSimilarity(object):
'Class to compute similarity based on Intersection over Union (IOU) metric.\n\n This class computes pairwise similarity between two BoxLists based on IOU.\n '
def __init__(self):
pass
def compare(self, boxlist1: BoxList, boxlist2: BoxList):
'Computes matrix of pairwise similarity between BoxLists.\n\n This op (to be overridden) computes a measure of pairwise similarity between\n the boxes in the given BoxLists. Higher values indicate more similarity.\n\n Note that this method simply measures similarity and does not explicitly\n perform a matching.\n\n Args:\n boxlist1: BoxList holding N boxes.\n boxlist2: BoxList holding M boxes.\n\n Returns:\n a (float32) tensor of shape [N, M] with pairwise similarity score.\n '
return iou(boxlist1, boxlist2)
|
class TargetAssigner(object):
'Target assigner to compute classification and regression targets.'
def __init__(self, similarity_calc: IouSimilarity, matcher: ArgMaxMatcher, box_coder: FasterRcnnBoxCoder, negative_class_weight: float=1.0, unmatched_cls_target: Optional[float]=None, keypoints_field_name: str=KEYPOINTS_FIELD_NAME):
'Construct Object Detection Target Assigner.\n\n Args:\n similarity_calc: a RegionSimilarityCalculator\n\n matcher: Matcher used to match groundtruth to anchors.\n\n box_coder: BoxCoder used to encode matching groundtruth boxes with respect to anchors.\n\n negative_class_weight: classification weight to be associated to negative\n anchors (default: 1.0). The weight must be in [0., 1.].\n\n unmatched_cls_target: a float32 tensor with shape [d_1, d_2, ..., d_k]\n which is consistent with the classification target for each\n anchor (and can be empty for scalar targets). This shape must thus be\n compatible with the groundtruth labels that are passed to the "assign"\n function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).\n If set to None, unmatched_cls_target is set to be [0] for each anchor.\n\n Raises:\n ValueError: if similarity_calc is not a RegionSimilarityCalculator or\n if matcher is not a Matcher or if box_coder is not a BoxCoder\n '
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
if (unmatched_cls_target is not None):
self._unmatched_cls_target = unmatched_cls_target
else:
self._unmatched_cls_target = 0.0
self._keypoints_field_name = keypoints_field_name
def assign(self, anchors: BoxList, groundtruth_boxes: BoxList, groundtruth_labels=None, groundtruth_weights=None):
'Assign classification and regression targets to each anchor.\n\n For a given set of anchors and groundtruth detections, match anchors\n to groundtruth_boxes and assign classification and regression targets to\n each anchor as well as weights based on the resulting match (specifying,\n e.g., which anchors should not contribute to training loss).\n\n Anchors that are not matched to anything are given a classification target\n of self._unmatched_cls_target which can be specified via the constructor.\n\n Args:\n anchors: a BoxList representing N anchors\n\n groundtruth_boxes: a BoxList representing M groundtruth boxes\n\n groundtruth_labels: a tensor of shape [M, d_1, ... d_k]\n with labels for each of the ground_truth boxes. The subshape\n [d_1, ... d_k] can be empty (corresponding to scalar inputs). When set\n to None, groundtruth_labels assumes a binary problem where all\n ground_truth boxes get a positive label (of 1).\n\n groundtruth_weights: a float tensor of shape [M] indicating the weight to\n assign to all anchors match to a particular groundtruth box. The weights\n must be in [0., 1.]. If None, all weights are set to 1.\n\n **params: Additional keyword arguments for specific implementations of the Matcher.\n\n Returns:\n cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],\n where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels\n which has shape [num_gt_boxes, d_1, d_2, ... d_k].\n\n cls_weights: a float32 tensor with shape [num_anchors]\n\n reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]\n\n reg_weights: a float32 tensor with shape [num_anchors]\n\n match: a matcher.Match object encoding the match between anchors and groundtruth boxes,\n with rows corresponding to groundtruth boxes and columns corresponding to anchors.\n\n Raises:\n ValueError: if anchors or groundtruth_boxes are not of type box_list.BoxList\n '
if (not isinstance(anchors, box_list.BoxList)):
raise ValueError('anchors must be an BoxList')
if (not isinstance(groundtruth_boxes, box_list.BoxList)):
raise ValueError('groundtruth_boxes must be an BoxList')
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, anchors)
match = self._matcher.match(match_quality_matrix)
reg_targets = self._create_regression_targets(anchors, groundtruth_boxes, match)
cls_targets = self._create_classification_targets(groundtruth_labels, match)
return (cls_targets, reg_targets, match)
def _create_regression_targets(self, anchors: BoxList, groundtruth_boxes: BoxList, match: Match):
'Returns a regression target for each anchor.\n\n Args:\n anchors: a BoxList representing N anchors\n\n groundtruth_boxes: a BoxList representing M groundtruth_boxes\n\n match: a matcher.Match object\n\n Returns:\n reg_targets: a float32 tensor with shape [N, box_code_dimension]\n '
device = anchors.device()
zero_box = torch.zeros((1, 4), device=device)
matched_gt_boxes = match.gather_based_on_match(groundtruth_boxes.boxes(), unmatched_value=zero_box, ignored_value=zero_box)
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(self._keypoints_field_name):
groundtruth_keypoints = groundtruth_boxes.get_field(self._keypoints_field_name)
zero_kp = torch.zeros(((1,) + groundtruth_keypoints.shape[1:]), device=device)
matched_keypoints = match.gather_based_on_match(groundtruth_keypoints, unmatched_value=zero_kp, ignored_value=zero_kp)
matched_gt_boxlist.add_field(self._keypoints_field_name, matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
unmatched_ignored_reg_targets = self._default_regression_target(device).repeat(match.match_results.shape[0], 1)
matched_anchors_mask = match.matched_column_indicator()
reg_targets = torch.where(matched_anchors_mask.unsqueeze(1), matched_reg_targets, unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self, device: torch.device):
'Returns the default target for anchors to regress to.\n\n Default regression targets are set to zero (though in this implementation what\n these targets are set to should not matter as the regression weight of any box\n set to regress to the default target is zero).\n\n Returns:\n default_target: a float32 tensor with shape [1, box_code_dimension]\n '
return torch.zeros(1, self._box_coder.code_size(), device=device)
def _create_classification_targets(self, groundtruth_labels, match: Match):
'Create classification targets for each anchor.\n\n Assign a classification target of for each anchor to the matching\n groundtruth label that is provided by match. Anchors that are not matched\n to anything are given the target self._unmatched_cls_target\n\n Args:\n groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]\n with labels for each of the ground_truth boxes. The subshape\n [d_1, ... d_k] can be empty (corresponding to scalar labels).\n match: a matcher.Match object that provides a matching between anchors\n and groundtruth boxes.\n\n Returns:\n a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the\n subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has\n shape [num_gt_boxes, d_1, d_2, ... d_k].\n '
return match.gather_based_on_match(groundtruth_labels, unmatched_value=self._unmatched_cls_target, ignored_value=self._unmatched_cls_target)
def _create_regression_weights(self, match: Match, groundtruth_weights):
'Set regression weight for each anchor.\n\n Only positive anchors are set to contribute to the regression loss, so this\n method returns a weight of 1 for every positive anchor and 0 for every\n negative anchor.\n\n Args:\n match: a matcher.Match object that provides a matching between anchors and groundtruth boxes.\n groundtruth_weights: a float tensor of shape [M] indicating the weight to\n assign to all anchors match to a particular groundtruth box.\n\n Returns:\n a float32 tensor with shape [num_anchors] representing regression weights.\n '
return match.gather_based_on_match(groundtruth_weights, ignored_value=0.0, unmatched_value=0.0)
def _create_classification_weights(self, match: Match, groundtruth_weights):
'Create classification weights for each anchor.\n\n Positive (matched) anchors are associated with a weight of\n positive_class_weight and negative (unmatched) anchors are associated with\n a weight of negative_class_weight. When anchors are ignored, weights are set\n to zero. By default, both positive/negative weights are set to 1.0,\n but they can be adjusted to handle class imbalance (which is almost always\n the case in object detection).\n\n Args:\n match: a matcher.Match object that provides a matching between anchors and groundtruth boxes.\n groundtruth_weights: a float tensor of shape [M] indicating the weight to\n assign to all anchors match to a particular groundtruth box.\n\n Returns:\n a float32 tensor with shape [num_anchors] representing classification weights.\n '
return match.gather_based_on_match(groundtruth_weights, ignored_value=0.0, unmatched_value=self._negative_class_weight)
def box_coder(self):
'Get BoxCoder of this TargetAssigner.\n\n Returns:\n BoxCoder object.\n '
return self._box_coder
|
def _parse_args():
(args_config, remaining) = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
args = parser.parse_args(remaining)
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return (args, args_text)
|
def get_clip_parameters(model, exclude_head=False):
if exclude_head:
return [p for (n, p) in model.named_parameters() if ('predict' not in n)]
else:
return model.parameters()
|
def main():
utils.setup_default_logging()
(args, args_text) = _parse_args()
args.pretrained_backbone = (not args.no_pretrained_backbone)
args.prefetcher = (not args.no_prefetcher)
args.distributed = False
if ('WORLD_SIZE' in os.environ):
args.distributed = (int(os.environ['WORLD_SIZE']) > 1)
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0
device = utils.init_distributed_device(args)
assert (args.rank >= 0)
if args.distributed:
logging.info(('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.' % (args.rank, args.world_size)))
else:
logging.info('Training with a single process on 1 GPU.')
use_amp = None
if args.amp:
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
else:
logging.warning('Neither APEX or native Torch AMP is available, using float32. Install NVIDA apex or upgrade to PyTorch 1.6.')
if args.native_amp:
if has_native_amp:
use_amp = 'native'
else:
logging.warning('Native AMP not available, using float32. Upgrade to PyTorch 1.6.')
elif args.apex_amp:
if has_apex:
use_amp = 'apex'
else:
logging.warning('APEX AMP not available, using float32. Install NVIDA apex')
utils.random_seed(args.seed, args.rank)
with set_layer_config(scriptable=args.torchscript):
model = create_model(args.model, bench_task='train', num_classes=args.num_classes, pretrained=args.pretrained, pretrained_backbone=args.pretrained_backbone, redundant_bias=args.redundant_bias, label_smoothing=args.smoothing, legacy_focal=args.legacy_focal, jit_loss=args.jit_loss, soft_nms=args.soft_nms, bench_labeler=args.bench_labeler, checkpoint_path=args.initial_checkpoint)
model_config = model.config
if (args.local_rank == 0):
logging.info(('Model %s created, param count: %d' % (args.model, sum([m.numel() for m in model.parameters()]))))
model.cuda()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if (args.distributed and args.sync_bn):
if (has_apex and (use_amp == 'apex')):
model = convert_syncbn_model(model)
else:
model = convert_sync_batchnorm(model)
if (args.local_rank == 0):
logging.info('Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
if args.torchscript:
assert (not args.torchcompile), 'Cannot use torch.compile() with torch.jit.script()'
assert (not (use_amp == 'apex')), 'Cannot use APEX AMP with torchscripted model, force native amp with `--native-amp` flag'
assert (not args.sync_bn), 'Cannot use SyncBatchNorm with torchscripted model. Use `--dist-bn reduce` instead of `--sync-bn`'
model = torch.jit.script(model)
optimizer = create_optimizer(args, model)
amp_autocast = suppress
loss_scaler = None
if (use_amp == 'native'):
amp_autocast = torch.cuda.amp.autocast
loss_scaler = utils.NativeScaler()
if (args.local_rank == 0):
logging.info('Using native Torch AMP. Training in mixed precision.')
elif (use_amp == 'apex'):
(model, optimizer) = amp.initialize(model, optimizer, opt_level='O1')
loss_scaler = utils.ApexScaler()
if (args.local_rank == 0):
logging.info('Using NVIDIA APEX AMP. Training in mixed precision.')
elif (args.local_rank == 0):
logging.info('AMP not enabled. Training in float32.')
resume_epoch = None
if args.resume:
resume_epoch = resume_checkpoint(unwrap_bench(model), args.resume, optimizer=(None if args.no_resume_opt else optimizer), loss_scaler=(None if args.no_resume_opt else loss_scaler), log_info=(args.local_rank == 0))
model_ema = None
if args.model_ema:
model_ema = utils.ModelEmaV2(model, decay=args.model_ema_decay)
if args.resume:
load_checkpoint(unwrap_bench(model_ema), args.resume, use_ema=True)
if args.distributed:
if (has_apex and (use_amp == 'apex')):
if (args.local_rank == 0):
logging.info('Using apex DistributedDataParallel.')
model = ApexDDP(model, delay_allreduce=True)
else:
if (args.local_rank == 0):
logging.info('Using torch DistributedDataParallel.')
model = NativeDDP(model, device_ids=[args.device])
if ((model_ema is not None) and (not args.resume)):
model_ema.set(model)
if args.torchcompile:
model = torch.compile(model, backend=args.torchcompile)
(lr_scheduler, num_epochs) = create_scheduler(args, optimizer)
start_epoch = 0
if (args.start_epoch is not None):
start_epoch = args.start_epoch
elif (resume_epoch is not None):
start_epoch = resume_epoch
if ((lr_scheduler is not None) and (start_epoch > 0)):
lr_scheduler.step(start_epoch)
if (args.local_rank == 0):
logging.info('Scheduled epochs: {}'.format(num_epochs))
(loader_train, loader_eval, evaluator) = create_datasets_and_loaders(args, model_config)
if (model_config.num_classes < loader_train.dataset.parser.max_label):
logging.error(f'Model {model_config.num_classes} has fewer classes than dataset {loader_train.dataset.parser.max_label}.')
exit(1)
if (model_config.num_classes > loader_train.dataset.parser.max_label):
logging.warning(f'Model {model_config.num_classes} has more classes than dataset {loader_train.dataset.parser.max_label}.')
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
saver = None
output_dir = ''
if (args.local_rank == 0):
output_base = (args.output if args.output else './output')
exp_name = '-'.join([datetime.now().strftime('%Y%m%d-%H%M%S'), args.model])
output_dir = utils.get_outdir(output_base, 'train', exp_name)
decreasing = (True if (eval_metric == 'loss') else False)
saver = utils.CheckpointSaver(model, optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler, checkpoint_dir=output_dir, decreasing=decreasing, unwrap_fn=unwrap_bench)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
try:
for epoch in range(start_epoch, num_epochs):
if args.distributed:
loader_train.sampler.set_epoch(epoch)
train_metrics = train_epoch(epoch, model, loader_train, optimizer, args, lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir, amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema)
if (args.distributed and (args.dist_bn in ('broadcast', 'reduce'))):
if (args.local_rank == 0):
logging.info('Distributing BatchNorm running means and vars')
utils.distribute_bn(model, args.world_size, (args.dist_bn == 'reduce'))
if (model_ema is not None):
if (args.distributed and (args.dist_bn in ('broadcast', 'reduce'))):
utils.distribute_bn(model_ema, args.world_size, (args.dist_bn == 'reduce'))
eval_metrics = validate(model_ema.module, loader_eval, args, evaluator, log_suffix=' (EMA)')
else:
eval_metrics = validate(model, loader_eval, args, evaluator)
if (lr_scheduler is not None):
lr_scheduler.step((epoch + 1), eval_metrics[eval_metric])
if (saver is not None):
utils.update_summary(epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'), write_header=(best_metric is None))
(best_metric, best_epoch) = saver.save_checkpoint(epoch=epoch, metric=eval_metrics[eval_metric])
except KeyboardInterrupt:
pass
if (best_metric is not None):
logging.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
|
def create_datasets_and_loaders(args, model_config, transform_train_fn=None, transform_eval_fn=None, collate_fn=None):
' Setup datasets, transforms, loaders, evaluator.\n\n Args:\n args: Command line args / config for training\n model_config: Model specific configuration dict / struct\n transform_train_fn: Override default image + annotation transforms (see note in loaders.py)\n transform_eval_fn: Override default image + annotation transforms (see note in loaders.py)\n collate_fn: Override default fast collate function\n\n Returns:\n Train loader, validation loader, evaluator\n '
input_config = resolve_input_config(args, model_config=model_config)
(dataset_train, dataset_eval) = create_dataset(args.dataset, args.root)
labeler = None
if (not args.bench_labeler):
labeler = AnchorLabeler(Anchors.from_config(model_config), model_config.num_classes, match_threshold=0.5)
loader_train = create_loader(dataset_train, input_size=input_config['input_size'], batch_size=args.batch_size, is_training=True, use_prefetcher=args.prefetcher, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount, interpolation=(args.train_interpolation or input_config['interpolation']), fill_color=input_config['fill_color'], mean=input_config['mean'], std=input_config['std'], num_workers=args.workers, distributed=args.distributed, pin_mem=args.pin_mem, anchor_labeler=labeler, transform_fn=transform_train_fn, collate_fn=collate_fn)
if (args.val_skip > 1):
dataset_eval = SkipSubset(dataset_eval, args.val_skip)
loader_eval = create_loader(dataset_eval, input_size=input_config['input_size'], batch_size=args.batch_size, is_training=False, use_prefetcher=args.prefetcher, interpolation=input_config['interpolation'], fill_color=input_config['fill_color'], mean=input_config['mean'], std=input_config['std'], num_workers=args.workers, distributed=args.distributed, pin_mem=args.pin_mem, anchor_labeler=labeler, transform_fn=transform_eval_fn, collate_fn=collate_fn)
evaluator = create_evaluator(args.dataset, loader_eval.dataset, distributed=args.distributed, pred_yxyx=False)
return (loader_train, loader_eval, evaluator)
|
def train_epoch(epoch, model, loader, optimizer, args, lr_scheduler=None, saver=None, output_dir='', amp_autocast=suppress, loss_scaler=None, model_ema=None):
batch_time_m = utils.AverageMeter()
data_time_m = utils.AverageMeter()
losses_m = utils.AverageMeter()
model.train()
clip_params = get_clip_parameters(model, exclude_head=('agc' in args.clip_mode))
end = time.time()
last_idx = (len(loader) - 1)
num_updates = (epoch * len(loader))
for (batch_idx, (input, target)) in enumerate(loader):
last_batch = (batch_idx == last_idx)
data_time_m.update((time.time() - end))
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input, target)
loss = output['loss']
if (not args.distributed):
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if (loss_scaler is not None):
loss_scaler(loss, optimizer, clip_grad=args.clip_grad, clip_mode=args.clip_mode, parameters=clip_params)
else:
loss.backward()
if (args.clip_grad is not None):
utils.dispatch_clip_grad(clip_params, value=args.clip_grad, mode=args.clip_mode)
optimizer.step()
torch.cuda.synchronize()
if (model_ema is not None):
model_ema.update(model)
num_updates += 1
batch_time_m.update((time.time() - end))
if (last_batch or ((batch_idx % args.log_interval) == 0)):
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = (sum(lrl) / len(lrl))
if args.distributed:
reduced_loss = utils.reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if (args.local_rank == 0):
global_batch_size = (input.size(0) * args.world_size)
logging.info(f'Train: {epoch} [{batch_idx:>4d}/{len(loader)} ({((100 * batch_idx) / last_idx):>3.0f}%)] Loss: {losses_m.val:>9.6f} ({losses_m.avg:>6.4f}) Time: {batch_time_m.val:.3f}s, {(global_batch_size / batch_time_m.val):>7.2f}/s ({batch_time_m.avg:.3f}s, {(global_batch_size / batch_time_m.avg):>7.2f}/s) LR: {lr:.3e} Data: {data_time_m.val:.3f} ({data_time_m.avg:.3f})')
if (args.save_images and output_dir):
torchvision.utils.save_image(input, os.path.join(output_dir, ('train-batch-%d.jpg' % batch_idx)), padding=0, normalize=True)
if ((saver is not None) and args.recovery_interval and (last_batch or (((batch_idx + 1) % args.recovery_interval) == 0))):
saver.save_recovery(epoch, batch_idx=batch_idx)
if (lr_scheduler is not None):
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)])
|
def validate(model, loader, args, evaluator=None, log_suffix=''):
batch_time_m = utils.AverageMeter()
losses_m = utils.AverageMeter()
model.eval()
end = time.time()
last_idx = (len(loader) - 1)
with torch.no_grad():
for (batch_idx, (input, target)) in enumerate(loader):
last_batch = (batch_idx == last_idx)
output = model(input, target)
loss = output['loss']
if (evaluator is not None):
evaluator.add_predictions(output['detections'], target)
if args.distributed:
reduced_loss = utils.reduce_tensor(loss.data, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
batch_time_m.update((time.time() - end))
end = time.time()
if ((args.local_rank == 0) and (last_batch or ((batch_idx % args.log_interval) == 0))):
log_name = ('Test' + log_suffix)
logging.info(f'{log_name}: [{batch_idx:>4d}/{last_idx}] Time: {batch_time_m.val:.3f} ({batch_time_m.avg:.3f}) Loss: {losses_m.val:>7.4f} ({losses_m.avg:>6.4f}) ')
metrics = OrderedDict([('loss', losses_m.avg)])
if (evaluator is not None):
metrics['map'] = evaluator.evaluate()
return metrics
|
def add_bool_arg(parser, name, default=False, help=''):
dest_name = name.replace('-', '_')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(('--' + name), dest=dest_name, action='store_true', help=help)
group.add_argument(('--no-' + name), dest=dest_name, action='store_false', help=help)
parser.set_defaults(**{dest_name: default})
|
def validate(args):
setup_default_logging()
if args.amp:
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
assert ((not args.apex_amp) or (not args.native_amp)), 'Only one AMP mode should be set.'
args.pretrained = (args.pretrained or (not args.checkpoint))
args.prefetcher = (not args.no_prefetcher)
with set_layer_config(scriptable=args.torchscript):
extra_args = {}
if (args.img_size is not None):
extra_args = dict(image_size=(args.img_size, args.img_size))
bench = create_model(args.model, bench_task='predict', num_classes=args.num_classes, pretrained=args.pretrained, redundant_bias=args.redundant_bias, soft_nms=args.soft_nms, checkpoint_path=args.checkpoint, checkpoint_ema=args.use_ema, **extra_args)
model_config = bench.config
param_count = sum([m.numel() for m in bench.parameters()])
print(('Model %s created, param count: %d' % (args.model, param_count)))
bench = bench.cuda()
if args.torchscript:
assert (not args.apex_amp), 'Cannot use APEX AMP with torchscripted model, force native amp with `--native-amp` flag'
bench = torch.jit.script(bench)
elif args.torchcompile:
bench = torch.compile(bench, backend=args.torchcompile)
amp_autocast = suppress
if args.apex_amp:
bench = amp.initialize(bench, opt_level='O1')
print('Using NVIDIA APEX AMP. Validating in mixed precision.')
elif args.native_amp:
amp_autocast = torch.cuda.amp.autocast
print('Using native Torch AMP. Validating in mixed precision.')
else:
print('AMP not enabled. Validating in float32.')
if (args.num_gpu > 1):
bench = torch.nn.DataParallel(bench, device_ids=list(range(args.num_gpu)))
dataset = create_dataset(args.dataset, args.root, args.split)
input_config = resolve_input_config(args, model_config)
loader = create_loader(dataset, input_size=input_config['input_size'], batch_size=args.batch_size, use_prefetcher=args.prefetcher, interpolation=input_config['interpolation'], fill_color=input_config['fill_color'], mean=input_config['mean'], std=input_config['std'], num_workers=args.workers, pin_mem=args.pin_mem)
evaluator = create_evaluator(args.dataset, dataset, pred_yxyx=False)
bench.eval()
batch_time = AverageMeter()
end = time.time()
last_idx = (len(loader) - 1)
with torch.no_grad():
for (i, (input, target)) in enumerate(loader):
with amp_autocast():
output = bench(input, img_info=target)
evaluator.add_predictions(output, target)
batch_time.update((time.time() - end))
end = time.time()
if (((i % args.log_freq) == 0) or (i == last_idx)):
print(f'Test: [{i:>4d}/{len(loader)}] Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {(input.size(0) / batch_time.avg):>7.2f}/s) ')
mean_ap = 0.0
if dataset.parser.has_labels:
mean_ap = evaluator.evaluate(output_result_file=args.results)
else:
evaluator.save(args.results)
return mean_ap
|
def main():
args = parser.parse_args()
validate(args)
|
def main():
args = parser.parse_args()
args.gpu_id = 0
if args.c2_prefix:
args.c2_init = (args.c2_prefix + '.init.pb')
args.c2_predict = (args.c2_prefix + '.predict.pb')
model = model_helper.ModelHelper(name='le_net', init_params=False)
init_net_proto = caffe2_pb2.NetDef()
with open(args.c2_init, 'rb') as f:
init_net_proto.ParseFromString(f.read())
model.param_init_net = core.Net(init_net_proto)
predict_net_proto = caffe2_pb2.NetDef()
with open(args.c2_predict, 'rb') as f:
predict_net_proto.ParseFromString(f.read())
model.net = core.Net(predict_net_proto)
input_blob = model.net.external_inputs[0]
model.param_init_net.GaussianFill([], input_blob.GetUnscopedName(), shape=(args.batch_size, 3, args.img_size, args.img_size), mean=0.0, std=1.0)
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, overwrite=True)
workspace.BenchmarkNet(model.net.Proto().name, 5, 20, True)
|
def natural_key(string_):
'See http://www.codinghorror.com/blog/archives/001018.html'
return [(int(s) if s.isdigit() else s) for s in re.split('(\\d+)', string_.lower())]
|
def find_images_and_targets(folder, types=IMG_EXTENSIONS, class_to_idx=None, leaf_name_only=True, sort=True):
if (class_to_idx is None):
class_to_idx = dict()
build_class_idx = True
else:
build_class_idx = False
labels = []
filenames = []
for (root, subdirs, files) in os.walk(folder, topdown=False):
rel_path = (os.path.relpath(root, folder) if (root != folder) else '')
label = (os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_'))
if (build_class_idx and (not subdirs)):
class_to_idx[label] = None
for f in files:
(base, ext) = os.path.splitext(f)
if (ext.lower() in types):
filenames.append(os.path.join(root, f))
labels.append(label)
if build_class_idx:
classes = sorted(class_to_idx.keys(), key=natural_key)
for (idx, c) in enumerate(classes):
class_to_idx[c] = idx
images_and_targets = zip(filenames, [class_to_idx[l] for l in labels])
if sort:
images_and_targets = sorted(images_and_targets, key=(lambda k: natural_key(k[0])))
if build_class_idx:
return (images_and_targets, classes, class_to_idx)
else:
return images_and_targets
|
class Dataset(data.Dataset):
def __init__(self, root, transform=None, load_bytes=False):
(imgs, _, _) = find_images_and_targets(root)
if (len(imgs) == 0):
raise RuntimeError(((('Found 0 images in subfolders of: ' + root) + '\nSupported image extensions are: ') + ','.join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.load_bytes = load_bytes
def __getitem__(self, index):
(path, target) = self.imgs[index]
img = (open(path, 'rb').read() if self.load_bytes else Image.open(path).convert('RGB'))
if (self.transform is not None):
img = self.transform(img)
if (target is None):
target = torch.zeros(1).long()
return (img, target)
def __len__(self):
return len(self.imgs)
def filenames(self, indices=[], basename=False):
if indices:
if basename:
return [os.path.basename(self.imgs[i][0]) for i in indices]
else:
return [self.imgs[i][0] for i in indices]
elif basename:
return [os.path.basename(x[0]) for x in self.imgs]
else:
return [x[0] for x in self.imgs]
|
def fast_collate(batch):
targets = torch.tensor([b[1] for b in batch], dtype=torch.int64)
batch_size = len(targets)
tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
for i in range(batch_size):
tensor[i] += torch.from_numpy(batch[i][0])
return (tensor, targets)
|
class PrefetchLoader():
def __init__(self, loader, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD):
self.loader = loader
self.mean = torch.tensor([(x * 255) for x in mean]).cuda().view(1, 3, 1, 1)
self.std = torch.tensor([(x * 255) for x in std]).cuda().view(1, 3, 1, 1)
def __iter__(self):
stream = torch.cuda.Stream()
first = True
for (next_input, next_target) in self.loader:
with torch.cuda.stream(stream):
next_input = next_input.cuda(non_blocking=True)
next_target = next_target.cuda(non_blocking=True)
next_input = next_input.float().sub_(self.mean).div_(self.std)
if (not first):
(yield (input, target))
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
(yield (input, target))
def __len__(self):
return len(self.loader)
@property
def sampler(self):
return self.loader.sampler
|
def create_loader(dataset, input_size, batch_size, is_training=False, use_prefetcher=True, interpolation='bilinear', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_workers=1, crop_pct=None, tensorflow_preprocessing=False):
if isinstance(input_size, tuple):
img_size = input_size[(- 2):]
else:
img_size = input_size
if (tensorflow_preprocessing and use_prefetcher):
from data.tf_preprocessing import TfPreprocessTransform
transform = TfPreprocessTransform(is_training=is_training, size=img_size, interpolation=interpolation)
else:
transform = transforms_imagenet_eval(img_size, interpolation=interpolation, use_prefetcher=use_prefetcher, mean=mean, std=std, crop_pct=crop_pct)
dataset.transform = transform
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, collate_fn=(fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate))
if use_prefetcher:
loader = PrefetchLoader(loader, mean=mean, std=std)
return loader
|
def distorted_bounding_box_crop(image_bytes, bbox, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0), max_attempts=100, scope=None):
'Generates cropped_image using one of the bboxes randomly distorted.\n\n See `tf.image.sample_distorted_bounding_box` for more documentation.\n\n Args:\n image_bytes: `Tensor` of binary image data.\n bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`\n where each coordinate is [0, 1) and the coordinates are arranged\n as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole\n image.\n min_object_covered: An optional `float`. Defaults to `0.1`. The cropped\n area of the image must contain at least this fraction of any bounding\n box supplied.\n aspect_ratio_range: An optional list of `float`s. The cropped area of the\n image must have an aspect ratio = width / height within this range.\n area_range: An optional list of `float`s. The cropped area of the image\n must contain a fraction of the supplied image within in this range.\n max_attempts: An optional `int`. Number of attempts at generating a cropped\n region of the image of the specified constraints. After `max_attempts`\n failures, return the entire image.\n scope: Optional `str` for name scope.\n Returns:\n cropped image `Tensor`\n '
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]):
shape = tf.image.extract_jpeg_shape(image_bytes)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(shape, bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True)
(bbox_begin, bbox_size, _) = sample_distorted_bounding_box
(offset_y, offset_x, _) = tf.unstack(bbox_begin)
(target_height, target_width, _) = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
|
def _at_least_x_are_equal(a, b, x):
'At least `x` of `a` and `b` `Tensors` are equal.'
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
|
def _decode_and_random_crop(image_bytes, image_size, resize_method):
'Make a random crop of image_size.'
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = distorted_bounding_box_crop(image_bytes, bbox, min_object_covered=0.1, aspect_ratio_range=((3.0 / 4), (4.0 / 3.0)), area_range=(0.08, 1.0), max_attempts=10, scope=None)
original_shape = tf.image.extract_jpeg_shape(image_bytes)
bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)
image = tf.cond(bad, (lambda : _decode_and_center_crop(image_bytes, image_size)), (lambda : tf.image.resize([image], [image_size, image_size], resize_method)[0]))
return image
|
def _decode_and_center_crop(image_bytes, image_size, resize_method):
'Crops to center of image with padding then scales image_size.'
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(((image_size / (image_size + CROP_PADDING)) * tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = (((image_height - padded_center_crop_size) + 1) // 2)
offset_width = (((image_width - padded_center_crop_size) + 1) // 2)
crop_window = tf.stack([offset_height, offset_width, padded_center_crop_size, padded_center_crop_size])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = tf.image.resize([image], [image_size, image_size], resize_method)[0]
return image
|
def _flip(image):
'Random horizontal image flip.'
image = tf.image.random_flip_left_right(image)
return image
|
def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'):
'Preprocesses the given image for evaluation.\n\n Args:\n image_bytes: `Tensor` representing an image binary of arbitrary size.\n use_bfloat16: `bool` for whether to use bfloat16.\n image_size: image size.\n interpolation: image interpolation method\n\n Returns:\n A preprocessed image `Tensor`.\n '
resize_method = (tf.image.ResizeMethod.BICUBIC if (interpolation == 'bicubic') else tf.image.ResizeMethod.BILINEAR)
image = _decode_and_random_crop(image_bytes, image_size, resize_method)
image = _flip(image)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(image, dtype=(tf.bfloat16 if use_bfloat16 else tf.float32))
return image
|
def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'):
'Preprocesses the given image for evaluation.\n\n Args:\n image_bytes: `Tensor` representing an image binary of arbitrary size.\n use_bfloat16: `bool` for whether to use bfloat16.\n image_size: image size.\n interpolation: image interpolation method\n\n Returns:\n A preprocessed image `Tensor`.\n '
resize_method = (tf.image.ResizeMethod.BICUBIC if (interpolation == 'bicubic') else tf.image.ResizeMethod.BILINEAR)
image = _decode_and_center_crop(image_bytes, image_size, resize_method)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(image, dtype=(tf.bfloat16 if use_bfloat16 else tf.float32))
return image
|
def preprocess_image(image_bytes, is_training=False, use_bfloat16=False, image_size=IMAGE_SIZE, interpolation='bicubic'):
'Preprocesses the given image.\n\n Args:\n image_bytes: `Tensor` representing an image binary of arbitrary size.\n is_training: `bool` for whether the preprocessing is for training.\n use_bfloat16: `bool` for whether to use bfloat16.\n image_size: image size.\n interpolation: image interpolation method\n\n Returns:\n A preprocessed image `Tensor` with value range of [0, 255].\n '
if is_training:
return preprocess_for_train(image_bytes, use_bfloat16, image_size, interpolation)
else:
return preprocess_for_eval(image_bytes, use_bfloat16, image_size, interpolation)
|
class TfPreprocessTransform():
def __init__(self, is_training=False, size=224, interpolation='bicubic'):
self.is_training = is_training
self.size = (size[0] if isinstance(size, tuple) else size)
self.interpolation = interpolation
self._image_bytes = None
self.process_image = self._build_tf_graph()
self.sess = None
def _build_tf_graph(self):
with tf.device('/cpu:0'):
self._image_bytes = tf.placeholder(shape=[], dtype=tf.string)
img = preprocess_image(self._image_bytes, self.is_training, False, self.size, self.interpolation)
return img
def __call__(self, image_bytes):
if (self.sess is None):
self.sess = tf.Session()
img = self.sess.run(self.process_image, feed_dict={self._image_bytes: image_bytes})
img = img.round().clip(0, 255).astype(np.uint8)
if (img.ndim < 3):
img = np.expand_dims(img, axis=(- 1))
img = np.rollaxis(img, 2)
return img
|
def resolve_data_config(model, args, default_cfg={}, verbose=True):
new_config = {}
default_cfg = default_cfg
if ((not default_cfg) and (model is not None) and hasattr(model, 'default_cfg')):
default_cfg = model.default_cfg
in_chans = 3
input_size = (in_chans, 224, 224)
if (args.img_size is not None):
assert isinstance(args.img_size, int)
input_size = (in_chans, args.img_size, args.img_size)
elif ('input_size' in default_cfg):
input_size = default_cfg['input_size']
new_config['input_size'] = input_size
new_config['interpolation'] = 'bicubic'
if args.interpolation:
new_config['interpolation'] = args.interpolation
elif ('interpolation' in default_cfg):
new_config['interpolation'] = default_cfg['interpolation']
new_config['mean'] = IMAGENET_DEFAULT_MEAN
if (args.mean is not None):
mean = tuple(args.mean)
if (len(mean) == 1):
mean = tuple((list(mean) * in_chans))
else:
assert (len(mean) == in_chans)
new_config['mean'] = mean
elif ('mean' in default_cfg):
new_config['mean'] = default_cfg['mean']
new_config['std'] = IMAGENET_DEFAULT_STD
if (args.std is not None):
std = tuple(args.std)
if (len(std) == 1):
std = tuple((list(std) * in_chans))
else:
assert (len(std) == in_chans)
new_config['std'] = std
elif ('std' in default_cfg):
new_config['std'] = default_cfg['std']
new_config['crop_pct'] = DEFAULT_CROP_PCT
if (args.crop_pct is not None):
new_config['crop_pct'] = args.crop_pct
elif ('crop_pct' in default_cfg):
new_config['crop_pct'] = default_cfg['crop_pct']
if verbose:
print('Data processing configuration for current model + dataset:')
for (n, v) in new_config.items():
print(('\t%s: %s' % (n, str(v))))
return new_config
|
class ToNumpy():
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if (np_img.ndim < 3):
np_img = np.expand_dims(np_img, axis=(- 1))
np_img = np.rollaxis(np_img, 2)
return np_img
|
class ToTensor():
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if (np_img.ndim < 3):
np_img = np.expand_dims(np_img, axis=(- 1))
np_img = np.rollaxis(np_img, 2)
return torch.from_numpy(np_img).to(dtype=self.dtype)
|
def _pil_interp(method):
if (method == 'bicubic'):
return Image.BICUBIC
elif (method == 'lanczos'):
return Image.LANCZOS
elif (method == 'hamming'):
return Image.HAMMING
else:
return Image.BILINEAR
|
def transforms_imagenet_eval(img_size=224, crop_pct=None, interpolation='bilinear', use_prefetcher=False, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD):
crop_pct = (crop_pct or DEFAULT_CROP_PCT)
if isinstance(img_size, tuple):
assert (len(img_size) == 2)
if (img_size[(- 1)] == img_size[(- 2)]):
scale_size = int(math.floor((img_size[0] / crop_pct)))
else:
scale_size = tuple([int((x / crop_pct)) for x in img_size])
else:
scale_size = int(math.floor((img_size / crop_pct)))
tfl = [transforms.Resize(scale_size, _pil_interp(interpolation)), transforms.CenterCrop(img_size)]
if use_prefetcher:
tfl += [ToNumpy()]
else:
tfl += [transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))]
return transforms.Compose(tfl)
|
def add_override_act_fn(name, fn):
global _OVERRIDE_FN
_OVERRIDE_FN[name] = fn
|
def update_override_act_fn(overrides):
assert isinstance(overrides, dict)
global _OVERRIDE_FN
_OVERRIDE_FN.update(overrides)
|
def clear_override_act_fn():
global _OVERRIDE_FN
_OVERRIDE_FN = dict()
|
def add_override_act_layer(name, fn):
_OVERRIDE_LAYER[name] = fn
|
def update_override_act_layer(overrides):
assert isinstance(overrides, dict)
global _OVERRIDE_LAYER
_OVERRIDE_LAYER.update(overrides)
|
def clear_override_act_layer():
global _OVERRIDE_LAYER
_OVERRIDE_LAYER = dict()
|
def get_act_fn(name='relu'):
' Activation Function Factory\n Fetching activation fns by name with this function allows export or torch script friendly\n functions to be returned dynamically based on current config.\n '
if (name in _OVERRIDE_FN):
return _OVERRIDE_FN[name]
use_me = (not (config.is_exportable() or config.is_scriptable() or config.is_no_jit()))
if (use_me and (name in _ACT_FN_ME)):
return _ACT_FN_ME[name]
if (config.is_exportable() and (name in ('silu', 'swish'))):
return swish
use_jit = (not (config.is_exportable() or config.is_no_jit()))
if (use_jit and (name in _ACT_FN_JIT)):
return _ACT_FN_JIT[name]
return _ACT_FN_DEFAULT[name]
|
def get_act_layer(name='relu'):
' Activation Layer Factory\n Fetching activation layers by name with this function allows export or torch script friendly\n functions to be returned dynamically based on current config.\n '
if (name in _OVERRIDE_LAYER):
return _OVERRIDE_LAYER[name]
use_me = (not (config.is_exportable() or config.is_scriptable() or config.is_no_jit()))
if (use_me and (name in _ACT_LAYER_ME)):
return _ACT_LAYER_ME[name]
if (config.is_exportable() and (name in ('silu', 'swish'))):
return Swish
use_jit = (not (config.is_exportable() or config.is_no_jit()))
if (use_jit and (name in _ACT_FN_JIT)):
return _ACT_LAYER_JIT[name]
return _ACT_LAYER_DEFAULT[name]
|
def swish(x, inplace: bool=False):
'Swish - Described originally as SiLU (https://arxiv.org/abs/1702.03118v3)\n and also as Swish (https://arxiv.org/abs/1710.05941).\n\n TODO Rename to SiLU with addition to PyTorch\n '
return (x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()))
|
class Swish(nn.Module):
def __init__(self, inplace: bool=False):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return swish(x, self.inplace)
|
def mish(x, inplace: bool=False):
'Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681\n '
return x.mul(F.softplus(x).tanh())
|
class Mish(nn.Module):
def __init__(self, inplace: bool=False):
super(Mish, self).__init__()
self.inplace = inplace
def forward(self, x):
return mish(x, self.inplace)
|
def sigmoid(x, inplace: bool=False):
return (x.sigmoid_() if inplace else x.sigmoid())
|
class Sigmoid(nn.Module):
def __init__(self, inplace: bool=False):
super(Sigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return (x.sigmoid_() if self.inplace else x.sigmoid())
|
def tanh(x, inplace: bool=False):
return (x.tanh_() if inplace else x.tanh())
|
class Tanh(nn.Module):
def __init__(self, inplace: bool=False):
super(Tanh, self).__init__()
self.inplace = inplace
def forward(self, x):
return (x.tanh_() if self.inplace else x.tanh())
|
def hard_swish(x, inplace: bool=False):
inner = F.relu6((x + 3.0)).div_(6.0)
return (x.mul_(inner) if inplace else x.mul(inner))
|
class HardSwish(nn.Module):
def __init__(self, inplace: bool=False):
super(HardSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_swish(x, self.inplace)
|
def hard_sigmoid(x, inplace: bool=False):
if inplace:
return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
else:
return (F.relu6((x + 3.0)) / 6.0)
|
class HardSigmoid(nn.Module):
def __init__(self, inplace: bool=False):
super(HardSigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_sigmoid(x, self.inplace)
|
@torch.jit.script
def swish_jit(x, inplace: bool=False):
'Swish - Described originally as SiLU (https://arxiv.org/abs/1702.03118v3)\n and also as Swish (https://arxiv.org/abs/1710.05941).\n\n TODO Rename to SiLU with addition to PyTorch\n '
return x.mul(x.sigmoid())
|
@torch.jit.script
def mish_jit(x, _inplace: bool=False):
'Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681\n '
return x.mul(F.softplus(x).tanh())
|
class SwishJit(nn.Module):
def __init__(self, inplace: bool=False):
super(SwishJit, self).__init__()
def forward(self, x):
return swish_jit(x)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.