repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/roi_heads/mask_heads/maskiou_head.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch import torch.nn as nn from mmcv.cnn import Conv2d, Linear, MaxPool2d from mmcv.runner import BaseModule, force_fp32 from torch.nn.modules.utils import _pair from mmdet.models.builder import HEADS, build_loss @HEADS.register_module() class MaskIoUHead(BaseModule): """Mask IoU Head. This head predicts the IoU of predicted masks and corresponding gt masks. """ def __init__(self, num_convs=4, num_fcs=2, roi_feat_size=14, in_channels=256, conv_out_channels=256, fc_out_channels=1024, num_classes=80, loss_iou=dict(type='MSELoss', loss_weight=0.5), init_cfg=[ dict(type='Kaiming', override=dict(name='convs')), dict(type='Caffe2Xavier', override=dict(name='fcs')), dict( type='Normal', std=0.01, override=dict(name='fc_mask_iou')) ]): super(MaskIoUHead, self).__init__(init_cfg) self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.num_classes = num_classes self.fp16_enabled = False self.convs = nn.ModuleList() for i in range(num_convs): if i == 0: # concatenation of mask feature and mask prediction in_channels = self.in_channels + 1 else: in_channels = self.conv_out_channels stride = 2 if i == num_convs - 1 else 1 self.convs.append( Conv2d( in_channels, self.conv_out_channels, 3, stride=stride, padding=1)) roi_feat_size = _pair(roi_feat_size) pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2) self.fcs = nn.ModuleList() for i in range(num_fcs): in_channels = ( self.conv_out_channels * pooled_area if i == 0 else self.fc_out_channels) self.fcs.append(Linear(in_channels, self.fc_out_channels)) self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes) self.relu = nn.ReLU() self.max_pool = MaxPool2d(2, 2) self.loss_iou = build_loss(loss_iou) def forward(self, mask_feat, mask_pred): mask_pred = mask_pred.sigmoid() mask_pred_pooled = self.max_pool(mask_pred.unsqueeze(1)) x = torch.cat((mask_feat, mask_pred_pooled), 1) for conv in self.convs: x = self.relu(conv(x)) x = x.flatten(1) for fc in self.fcs: x = self.relu(fc(x)) mask_iou = self.fc_mask_iou(x) return mask_iou @force_fp32(apply_to=('mask_iou_pred', )) def loss(self, mask_iou_pred, mask_iou_targets): pos_inds = mask_iou_targets > 0 if pos_inds.sum() > 0: loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds], mask_iou_targets[pos_inds]) else: loss_mask_iou = mask_iou_pred.sum() * 0 return dict(loss_mask_iou=loss_mask_iou) @force_fp32(apply_to=('mask_pred', )) def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targets, rcnn_train_cfg): """Compute target of mask IoU. Mask IoU target is the IoU of the predicted mask (inside a bbox) and the gt mask of corresponding gt mask (the whole instance). The intersection area is computed inside the bbox, and the gt mask area is computed with two steps, firstly we compute the gt area inside the bbox, then divide it by the area ratio of gt area inside the bbox and the gt area of the whole instance. Args: sampling_results (list[:obj:`SamplingResult`]): sampling results. gt_masks (BitmapMask | PolygonMask): Gt masks (the whole instance) of each image, with the same shape of the input image. mask_pred (Tensor): Predicted masks of each positive proposal, shape (num_pos, h, w). mask_targets (Tensor): Gt mask of each positive proposal, binary map of the shape (num_pos, h, w). rcnn_train_cfg (dict): Training config for R-CNN part. Returns: Tensor: mask iou target (length == num positive). """ pos_proposals = [res.pos_bboxes for res in sampling_results] pos_assigned_gt_inds = [ res.pos_assigned_gt_inds for res in sampling_results ] # compute the area ratio of gt areas inside the proposals and # the whole instance area_ratios = map(self._get_area_ratio, pos_proposals, pos_assigned_gt_inds, gt_masks) area_ratios = torch.cat(list(area_ratios)) assert mask_targets.size(0) == area_ratios.size(0) mask_pred = (mask_pred > rcnn_train_cfg.mask_thr_binary).float() mask_pred_areas = mask_pred.sum((-1, -2)) # mask_pred and mask_targets are binary maps overlap_areas = (mask_pred * mask_targets).sum((-1, -2)) # compute the mask area of the whole instance gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7) mask_iou_targets = overlap_areas / ( mask_pred_areas + gt_full_areas - overlap_areas) return mask_iou_targets def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks): """Compute area ratio of the gt mask inside the proposal and the gt mask of the corresponding instance.""" num_pos = pos_proposals.size(0) if num_pos > 0: area_ratios = [] proposals_np = pos_proposals.cpu().numpy() pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() # compute mask areas of gt instances (batch processing for speedup) gt_instance_mask_area = gt_masks.areas for i in range(num_pos): gt_mask = gt_masks[pos_assigned_gt_inds[i]] # crop the gt mask inside the proposal bbox = proposals_np[i, :].astype(np.int32) gt_mask_in_proposal = gt_mask.crop(bbox) ratio = gt_mask_in_proposal.areas[0] / ( gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7) area_ratios.append(ratio) area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to( pos_proposals.device) else: area_ratios = pos_proposals.new_zeros((0, )) return area_ratios @force_fp32(apply_to=('mask_iou_pred', )) def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels): """Get the mask scores. mask_score = bbox_score * mask_iou """ inds = range(det_labels.size(0)) mask_scores = mask_iou_pred[inds, det_labels] * det_bboxes[inds, -1] mask_scores = mask_scores.cpu().numpy() det_labels = det_labels.cpu().numpy() return [mask_scores[det_labels == i] for i in range(self.num_classes)]
7,382
39.125
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.models.builder import HEADS from mmdet.models.utils import ResLayer, SimplifiedBasicBlock from .fused_semantic_head import FusedSemanticHead @HEADS.register_module() class SCNetSemanticHead(FusedSemanticHead): """Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_. Args: conv_to_res (bool, optional): if True, change the conv layers to ``SimplifiedBasicBlock``. """ def __init__(self, conv_to_res=True, **kwargs): super(SCNetSemanticHead, self).__init__(**kwargs) self.conv_to_res = conv_to_res if self.conv_to_res: num_res_blocks = self.num_convs // 2 self.convs = ResLayer( SimplifiedBasicBlock, self.in_channels, self.conv_out_channels, num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.num_convs = num_res_blocks
998
33.448276
72
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/roi_heads/mask_heads/feature_relay_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.runner import BaseModule, auto_fp16 from mmdet.models.builder import HEADS @HEADS.register_module() class FeatureRelayHead(BaseModule): """Feature Relay Head used in `SCNet <https://arxiv.org/abs/2012.10150>`_. Args: in_channels (int, optional): number of input channels. Default: 256. conv_out_channels (int, optional): number of output channels before classification layer. Default: 256. roi_feat_size (int, optional): roi feat size at box head. Default: 7. scale_factor (int, optional): scale factor to match roi feat size at mask head. Default: 2. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, in_channels=1024, out_conv_channels=256, roi_feat_size=7, scale_factor=2, init_cfg=dict(type='Kaiming', layer='Linear')): super(FeatureRelayHead, self).__init__(init_cfg) assert isinstance(roi_feat_size, int) self.in_channels = in_channels self.out_conv_channels = out_conv_channels self.roi_feat_size = roi_feat_size self.out_channels = (roi_feat_size**2) * out_conv_channels self.scale_factor = scale_factor self.fp16_enabled = False self.fc = nn.Linear(self.in_channels, self.out_channels) self.upsample = nn.Upsample( scale_factor=scale_factor, mode='bilinear', align_corners=True) @auto_fp16() def forward(self, x): """Forward function.""" N, in_C = x.shape if N > 0: out_C = self.out_conv_channels out_HW = self.roi_feat_size x = self.fc(x) x = x.reshape(N, out_C, out_HW, out_HW) x = self.upsample(x) return x return None
1,930
34.759259
78
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/roi_heads/mask_heads/global_context_head.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, auto_fp16, force_fp32 from mmdet.models.builder import HEADS from mmdet.models.utils import ResLayer, SimplifiedBasicBlock @HEADS.register_module() class GlobalContextHead(BaseModule): """Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_. Args: num_convs (int, optional): number of convolutional layer in GlbCtxHead. Default: 4. in_channels (int, optional): number of input channels. Default: 256. conv_out_channels (int, optional): number of output channels before classification layer. Default: 256. num_classes (int, optional): number of classes. Default: 80. loss_weight (float, optional): global context loss weight. Default: 1. conv_cfg (dict, optional): config to init conv layer. Default: None. norm_cfg (dict, optional): config to init norm layer. Default: None. conv_to_res (bool, optional): if True, 2 convs will be grouped into 1 `SimplifiedBasicBlock` using a skip connection. Default: False. init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_weight=1.0, conv_cfg=None, norm_cfg=None, conv_to_res=False, init_cfg=dict( type='Normal', std=0.01, override=dict(name='fc'))): super(GlobalContextHead, self).__init__(init_cfg) self.num_convs = num_convs self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.num_classes = num_classes self.loss_weight = loss_weight self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.conv_to_res = conv_to_res self.fp16_enabled = False if self.conv_to_res: num_res_blocks = num_convs // 2 self.convs = ResLayer( SimplifiedBasicBlock, in_channels, self.conv_out_channels, num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.num_convs = num_res_blocks else: self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = self.in_channels if i == 0 else conv_out_channels self.convs.append( ConvModule( in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Linear(conv_out_channels, num_classes) self.criterion = nn.BCEWithLogitsLoss() @auto_fp16() def forward(self, feats): """Forward function.""" x = feats[-1] for i in range(self.num_convs): x = self.convs[i](x) x = self.pool(x) # multi-class prediction mc_pred = x.reshape(x.size(0), -1) mc_pred = self.fc(mc_pred) return mc_pred, x @force_fp32(apply_to=('pred', )) def loss(self, pred, labels): """Loss function.""" labels = [lbl.unique() for lbl in labels] targets = pred.new_zeros(pred.size()) for i, label in enumerate(labels): targets[i, label] = 1.0 loss = self.loss_weight * self.criterion(pred, targets) return loss
3,774
36.009804
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/roi_heads/mask_heads/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .coarse_mask_head import CoarseMaskHead from .fcn_mask_head import FCNMaskHead from .feature_relay_head import FeatureRelayHead from .fused_semantic_head import FusedSemanticHead from .global_context_head import GlobalContextHead from .grid_head import GridHead from .htc_mask_head import HTCMaskHead from .mask_point_head import MaskPointHead from .maskiou_head import MaskIoUHead from .scnet_mask_head import SCNetMaskHead from .scnet_semantic_head import SCNetSemanticHead __all__ = [ 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', 'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead' ]
747
38.368421
70
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/roi_heads/mask_heads/htc_mask_head.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.cnn import ConvModule from mmdet.models.builder import HEADS from .fcn_mask_head import FCNMaskHead @HEADS.register_module() class HTCMaskHead(FCNMaskHead): def __init__(self, with_conv_res=True, *args, **kwargs): super(HTCMaskHead, self).__init__(*args, **kwargs) self.with_conv_res = with_conv_res if self.with_conv_res: self.conv_res = ConvModule( self.conv_out_channels, self.conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def forward(self, x, res_feat=None, return_logits=True, return_feat=True): if res_feat is not None: assert self.with_conv_res res_feat = self.conv_res(res_feat) x = x + res_feat for conv in self.convs: x = conv(x) res_feat = x outs = [] if return_logits: x = self.upsample(x) if self.upsample_method == 'deconv': x = self.relu(x) mask_pred = self.conv_logits(x) outs.append(mask_pred) if return_feat: outs.append(res_feat) return outs if len(outs) > 1 else outs[0]
1,282
31.075
78
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py
# Copyright (c) OpenMMLab. All rights reserved. from warnings import warn import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer from mmcv.ops.carafe import CARAFEPack from mmcv.runner import BaseModule, ModuleList, auto_fp16, force_fp32 from torch.nn.modules.utils import _pair from mmdet.core import mask_target from mmdet.models.builder import HEADS, build_loss BYTES_PER_FLOAT = 4 # TODO: This memory limit may be too much or too little. It would be better to # determine it based on available resources. GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit @HEADS.register_module() class FCNMaskHead(BaseModule): def __init__(self, num_convs=4, roi_feat_size=14, in_channels=256, conv_kernel_size=3, conv_out_channels=256, num_classes=80, class_agnostic=False, upsample_cfg=dict(type='deconv', scale_factor=2), conv_cfg=None, norm_cfg=None, predictor_cfg=dict(type='Conv'), loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0), init_cfg=None): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super(FCNMaskHead, self).__init__(init_cfg) self.upsample_cfg = upsample_cfg.copy() if self.upsample_cfg['type'] not in [ None, 'deconv', 'nearest', 'bilinear', 'carafe' ]: raise ValueError( f'Invalid upsample method {self.upsample_cfg["type"]}, ' 'accepted methods are "deconv", "nearest", "bilinear", ' '"carafe"') self.num_convs = num_convs # WARN: roi_feat_size is reserved and not used self.roi_feat_size = _pair(roi_feat_size) self.in_channels = in_channels self.conv_kernel_size = conv_kernel_size self.conv_out_channels = conv_out_channels self.upsample_method = self.upsample_cfg.get('type') self.scale_factor = self.upsample_cfg.pop('scale_factor', None) self.num_classes = num_classes self.class_agnostic = class_agnostic self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.predictor_cfg = predictor_cfg self.fp16_enabled = False self.loss_mask = build_loss(loss_mask) self.convs = ModuleList() for i in range(self.num_convs): in_channels = ( self.in_channels if i == 0 else self.conv_out_channels) padding = (self.conv_kernel_size - 1) // 2 self.convs.append( ConvModule( in_channels, self.conv_out_channels, self.conv_kernel_size, padding=padding, conv_cfg=conv_cfg, norm_cfg=norm_cfg)) upsample_in_channels = ( self.conv_out_channels if self.num_convs > 0 else in_channels) upsample_cfg_ = self.upsample_cfg.copy() if self.upsample_method is None: self.upsample = None elif self.upsample_method == 'deconv': upsample_cfg_.update( in_channels=upsample_in_channels, out_channels=self.conv_out_channels, kernel_size=self.scale_factor, stride=self.scale_factor) self.upsample = build_upsample_layer(upsample_cfg_) elif self.upsample_method == 'carafe': upsample_cfg_.update( channels=upsample_in_channels, scale_factor=self.scale_factor) self.upsample = build_upsample_layer(upsample_cfg_) else: # suppress warnings align_corners = (None if self.upsample_method == 'nearest' else False) upsample_cfg_.update( scale_factor=self.scale_factor, mode=self.upsample_method, align_corners=align_corners) self.upsample = build_upsample_layer(upsample_cfg_) out_channels = 1 if self.class_agnostic else self.num_classes logits_in_channel = ( self.conv_out_channels if self.upsample_method == 'deconv' else upsample_in_channels) self.conv_logits = build_conv_layer(self.predictor_cfg, logits_in_channel, out_channels, 1) self.relu = nn.ReLU(inplace=True) self.debug_imgs = None def init_weights(self): super(FCNMaskHead, self).init_weights() for m in [self.upsample, self.conv_logits]: if m is None: continue elif isinstance(m, CARAFEPack): m.init_weights() else: nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu') nn.init.constant_(m.bias, 0) @auto_fp16() def forward(self, x): for conv in self.convs: x = conv(x) if self.upsample is not None: x = self.upsample(x) if self.upsample_method == 'deconv': x = self.relu(x) mask_pred = self.conv_logits(x) return mask_pred def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg): pos_proposals = [res.pos_bboxes for res in sampling_results] pos_assigned_gt_inds = [ res.pos_assigned_gt_inds for res in sampling_results ] mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, gt_masks, rcnn_train_cfg) return mask_targets @force_fp32(apply_to=('mask_pred', )) def loss(self, mask_pred, mask_targets, labels): """ Example: >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA >>> N = 7 # N = number of extracted ROIs >>> C, H, W = 11, 32, 32 >>> # Create example instance of FCN Mask Head. >>> # There are lots of variations depending on the configuration >>> self = FCNMaskHead(num_classes=C, num_convs=1) >>> inputs = torch.rand(N, self.in_channels, H, W) >>> mask_pred = self.forward(inputs) >>> sf = self.scale_factor >>> labels = torch.randint(0, C, size=(N,)) >>> # With the default properties the mask targets should indicate >>> # a (potentially soft) single-class label >>> mask_targets = torch.rand(N, H * sf, W * sf) >>> loss = self.loss(mask_pred, mask_targets, labels) >>> print('loss = {!r}'.format(loss)) """ loss = dict() if mask_pred.size(0) == 0: loss_mask = mask_pred.sum() else: if self.class_agnostic: loss_mask = self.loss_mask(mask_pred, mask_targets, torch.zeros_like(labels)) else: loss_mask = self.loss_mask(mask_pred, mask_targets, labels) loss['loss_mask'] = loss_mask return loss def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor, rescale): """Get segmentation masks from mask_pred and bboxes. Args: mask_pred (Tensor or ndarray): shape (n, #class, h, w). For single-scale testing, mask_pred is the direct output of model, whose type is Tensor, while for multi-scale testing, it will be converted to numpy array outside of this method. det_bboxes (Tensor): shape (n, 4/5) det_labels (Tensor): shape (n, ) rcnn_test_cfg (dict): rcnn testing config ori_shape (Tuple): original image height and width, shape (2,) scale_factor(ndarray | Tensor): If ``rescale is True``, box coordinates are divided by this scale factor to fit ``ori_shape``. rescale (bool): If True, the resulting masks will be rescaled to ``ori_shape``. Returns: list[list]: encoded masks. The c-th item in the outer list corresponds to the c-th class. Given the c-th outer list, the i-th item in that inner list is the mask for the i-th box with class label c. Example: >>> import mmcv >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA >>> N = 7 # N = number of extracted ROIs >>> C, H, W = 11, 32, 32 >>> # Create example instance of FCN Mask Head. >>> self = FCNMaskHead(num_classes=C, num_convs=0) >>> inputs = torch.rand(N, self.in_channels, H, W) >>> mask_pred = self.forward(inputs) >>> # Each input is associated with some bounding box >>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N) >>> det_labels = torch.randint(0, C, size=(N,)) >>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, }) >>> ori_shape = (H * 4, W * 4) >>> scale_factor = torch.FloatTensor((1, 1)) >>> rescale = False >>> # Encoded masks are a list for each category. >>> encoded_masks = self.get_seg_masks( >>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, >>> scale_factor, rescale >>> ) >>> assert len(encoded_masks) == C >>> assert sum(list(map(len, encoded_masks))) == N """ if isinstance(mask_pred, torch.Tensor): mask_pred = mask_pred.sigmoid() else: # In AugTest, has been activated before mask_pred = det_bboxes.new_tensor(mask_pred) device = mask_pred.device cls_segms = [[] for _ in range(self.num_classes) ] # BG is not included in num_classes bboxes = det_bboxes[:, :4] labels = det_labels # In most cases, scale_factor should have been # converted to Tensor when rescale the bbox if not isinstance(scale_factor, torch.Tensor): if isinstance(scale_factor, float): scale_factor = np.array([scale_factor] * 4) warn('Scale_factor should be a Tensor or ndarray ' 'with shape (4,), float would be deprecated. ') assert isinstance(scale_factor, np.ndarray) scale_factor = torch.Tensor(scale_factor) if rescale: img_h, img_w = ori_shape[:2] bboxes = bboxes / scale_factor else: w_scale, h_scale = scale_factor[0], scale_factor[1] img_h = np.round(ori_shape[0] * h_scale.item()).astype(np.int32) img_w = np.round(ori_shape[1] * w_scale.item()).astype(np.int32) N = len(mask_pred) # The actual implementation split the input into chunks, # and paste them chunk by chunk. if device.type == 'cpu': # CPU is most efficient when they are pasted one by one with # skip_empty=True, so that it performs minimal number of # operations. num_chunks = N else: # GPU benefits from parallelism for larger chunks, # but may have memory issue # the types of img_w and img_h are np.int32, # when the image resolution is large, # the calculation of num_chunks will overflow. # so we neet to change the types of img_w and img_h to int. # See https://github.com/open-mmlab/mmdetection/pull/5191 num_chunks = int( np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) assert (num_chunks <= N), 'Default GPU_MEM_LIMIT is too small; try increasing it' chunks = torch.chunk(torch.arange(N, device=device), num_chunks) threshold = rcnn_test_cfg.mask_thr_binary im_mask = torch.zeros( N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8) if not self.class_agnostic: mask_pred = mask_pred[range(N), labels][:, None] for inds in chunks: masks_chunk, spatial_inds = _do_paste_mask( mask_pred[inds], bboxes[inds], img_h, img_w, skip_empty=device.type == 'cpu') if threshold >= 0: masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) else: # for visualization and debugging masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) im_mask[(inds, ) + spatial_inds] = masks_chunk for i in range(N): cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy()) return cls_segms def onnx_export(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, **kwargs): """Get segmentation masks from mask_pred and bboxes. Args: mask_pred (Tensor): shape (n, #class, h, w). det_bboxes (Tensor): shape (n, 4/5) det_labels (Tensor): shape (n, ) rcnn_test_cfg (dict): rcnn testing config ori_shape (Tuple): original image height and width, shape (2,) Returns: Tensor: a mask of shape (N, img_h, img_w). """ mask_pred = mask_pred.sigmoid() bboxes = det_bboxes[:, :4] labels = det_labels # No need to consider rescale and scale_factor while exporting to ONNX img_h, img_w = ori_shape[:2] threshold = rcnn_test_cfg.mask_thr_binary if not self.class_agnostic: box_inds = torch.arange(mask_pred.shape[0]) mask_pred = mask_pred[box_inds, labels][:, None] masks, _ = _do_paste_mask( mask_pred, bboxes, img_h, img_w, skip_empty=False) if threshold >= 0: # should convert to float to avoid problems in TRT masks = (masks >= threshold).to(dtype=torch.float) return masks def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True): """Paste instance masks according to boxes. This implementation is modified from https://github.com/facebookresearch/detectron2/ Args: masks (Tensor): N, 1, H, W boxes (Tensor): N, 4 img_h (int): Height of the image to be pasted. img_w (int): Width of the image to be pasted. skip_empty (bool): Only paste masks within the region that tightly bound all boxes, and returns the results this region only. An important optimization for CPU. Returns: tuple: (Tensor, tuple). The first item is mask tensor, the second one is the slice object. If skip_empty == False, the whole image will be pasted. It will return a mask of shape (N, img_h, img_w) and an empty tuple. If skip_empty == True, only area around the mask will be pasted. A mask of shape (N, h', w') and its start and end coordinates in the original image will be returned. """ # On GPU, paste all masks together (up to chunk size) # by using the entire image to sample the masks # Compared to pasting them one by one, # this has more operations but is faster on COCO-scale dataset. device = masks.device if skip_empty: x0_int, y0_int = torch.clamp( boxes.min(dim=0).values.floor()[:2] - 1, min=0).to(dtype=torch.int32) x1_int = torch.clamp( boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) y1_int = torch.clamp( boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) else: x0_int, y0_int = 0, 0 x1_int, y1_int = img_w, img_h x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 N = masks.shape[0] img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5 img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5 img_y = (img_y - y0) / (y1 - y0) * 2 - 1 img_x = (img_x - x0) / (x1 - x0) * 2 - 1 # img_x, img_y have shapes (N, w), (N, h) # IsInf op is not supported with ONNX<=1.7.0 if not torch.onnx.is_in_onnx_export(): if torch.isinf(img_x).any(): inds = torch.where(torch.isinf(img_x)) img_x[inds] = 0 if torch.isinf(img_y).any(): inds = torch.where(torch.isinf(img_y)) img_y[inds] = 0 gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) grid = torch.stack([gx, gy], dim=3) img_masks = F.grid_sample( masks.to(dtype=torch.float32), grid, align_corners=False) if skip_empty: return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) else: return img_masks[:, 0], ()
17,394
41.118644
85
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule, auto_fp16, force_fp32 from mmdet.models.builder import HEADS, build_loss @HEADS.register_module() class FusedSemanticHead(BaseModule): r"""Multi-level fused semantic segmentation head. .. code-block:: none in_1 -> 1x1 conv --- | in_2 -> 1x1 conv -- | || in_3 -> 1x1 conv - || ||| /-> 1x1 conv (mask prediction) in_4 -> 1x1 conv -----> 3x3 convs (*4) | \-> 1x1 conv (feature) in_5 -> 1x1 conv --- """ # noqa: W605 def __init__(self, num_ins, fusion_level, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=183, conv_cfg=None, norm_cfg=None, ignore_label=None, loss_weight=None, loss_seg=dict( type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2), init_cfg=dict( type='Kaiming', override=dict(name='conv_logits'))): super(FusedSemanticHead, self).__init__(init_cfg) self.num_ins = num_ins self.fusion_level = fusion_level self.num_convs = num_convs self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.num_classes = num_classes self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self.lateral_convs = nn.ModuleList() for i in range(self.num_ins): self.lateral_convs.append( ConvModule( self.in_channels, self.in_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False)) self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = self.in_channels if i == 0 else conv_out_channels self.convs.append( ConvModule( in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_embedding = ConvModule( conv_out_channels, conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1) if ignore_label: loss_seg['ignore_index'] = ignore_label if loss_weight: loss_seg['loss_weight'] = loss_weight if ignore_label or loss_weight: warnings.warn('``ignore_label`` and ``loss_weight`` would be ' 'deprecated soon. Please set ``ingore_index`` and ' '``loss_weight`` in ``loss_seg`` instead.') self.criterion = build_loss(loss_seg) @auto_fp16() def forward(self, feats): x = self.lateral_convs[self.fusion_level](feats[self.fusion_level]) fused_size = tuple(x.shape[-2:]) for i, feat in enumerate(feats): if i != self.fusion_level: feat = F.interpolate( feat, size=fused_size, mode='bilinear', align_corners=True) x += self.lateral_convs[i](feat) for i in range(self.num_convs): x = self.convs[i](x) mask_pred = self.conv_logits(x) x = self.conv_embedding(x) return mask_pred, x @force_fp32(apply_to=('mask_pred', )) def loss(self, mask_pred, labels): labels = labels.squeeze(1).long() loss_semantic_seg = self.criterion(mask_pred, labels) return loss_semantic_seg
4,150
34.177966
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/roi_heads/mask_heads/mask_point_head.py
# Copyright (c) OpenMMLab. All rights reserved. # Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa import torch import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point from mmcv.runner import BaseModule from mmdet.models.builder import HEADS, build_loss @HEADS.register_module() class MaskPointHead(BaseModule): """A mask point head use in PointRend. ``MaskPointHead`` use shared multi-layer perceptron (equivalent to nn.Conv1d) to predict the logit of input points. The fine-grained feature and coarse feature will be concatenate together for predication. Args: num_fcs (int): Number of fc layers in the head. Default: 3. in_channels (int): Number of input channels. Default: 256. fc_channels (int): Number of fc channels. Default: 256. num_classes (int): Number of classes for logits. Default: 80. class_agnostic (bool): Whether use class agnostic classification. If so, the output channels of logits will be 1. Default: False. coarse_pred_each_layer (bool): Whether concatenate coarse feature with the output of each fc layer. Default: True. conv_cfg (dict | None): Dictionary to construct and config conv layer. Default: dict(type='Conv1d')) norm_cfg (dict | None): Dictionary to construct and config norm layer. Default: None. loss_point (dict): Dictionary to construct and config loss layer of point head. Default: dict(type='CrossEntropyLoss', use_mask=True, loss_weight=1.0). init_cfg (dict or list[dict], optional): Initialization config dict. """ def __init__(self, num_classes, num_fcs=3, in_channels=256, fc_channels=256, class_agnostic=False, coarse_pred_each_layer=True, conv_cfg=dict(type='Conv1d'), norm_cfg=None, act_cfg=dict(type='ReLU'), loss_point=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0), init_cfg=dict( type='Normal', std=0.001, override=dict(name='fc_logits'))): super().__init__(init_cfg) self.num_fcs = num_fcs self.in_channels = in_channels self.fc_channels = fc_channels self.num_classes = num_classes self.class_agnostic = class_agnostic self.coarse_pred_each_layer = coarse_pred_each_layer self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.loss_point = build_loss(loss_point) fc_in_channels = in_channels + num_classes self.fcs = nn.ModuleList() for _ in range(num_fcs): fc = ConvModule( fc_in_channels, fc_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.fcs.append(fc) fc_in_channels = fc_channels fc_in_channels += num_classes if self.coarse_pred_each_layer else 0 out_channels = 1 if self.class_agnostic else self.num_classes self.fc_logits = nn.Conv1d( fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0) def forward(self, fine_grained_feats, coarse_feats): """Classify each point base on fine grained and coarse feats. Args: fine_grained_feats (Tensor): Fine grained feature sampled from FPN, shape (num_rois, in_channels, num_points). coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead, shape (num_rois, num_classes, num_points). Returns: Tensor: Point classification results, shape (num_rois, num_class, num_points). """ x = torch.cat([fine_grained_feats, coarse_feats], dim=1) for fc in self.fcs: x = fc(x) if self.coarse_pred_each_layer: x = torch.cat((x, coarse_feats), dim=1) return self.fc_logits(x) def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks, cfg): """Get training targets of MaskPointHead for all images. Args: rois (Tensor): Region of Interest, shape (num_rois, 5). rel_roi_points: Points coordinates relative to RoI, shape (num_rois, num_points, 2). sampling_results (:obj:`SamplingResult`): Sampling result after sampling and assignment. gt_masks (Tensor) : Ground truth segmentation masks of corresponding boxes, shape (num_rois, height, width). cfg (dict): Training cfg. Returns: Tensor: Point target, shape (num_rois, num_points). """ num_imgs = len(sampling_results) rois_list = [] rel_roi_points_list = [] for batch_ind in range(num_imgs): inds = (rois[:, 0] == batch_ind) rois_list.append(rois[inds]) rel_roi_points_list.append(rel_roi_points[inds]) pos_assigned_gt_inds_list = [ res.pos_assigned_gt_inds for res in sampling_results ] cfg_list = [cfg for _ in range(num_imgs)] point_targets = map(self._get_target_single, rois_list, rel_roi_points_list, pos_assigned_gt_inds_list, gt_masks, cfg_list) point_targets = list(point_targets) if len(point_targets) > 0: point_targets = torch.cat(point_targets) return point_targets def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds, gt_masks, cfg): """Get training target of MaskPointHead for each image.""" num_pos = rois.size(0) num_points = cfg.num_points if num_pos > 0: gt_masks_th = ( gt_masks.to_tensor(rois.dtype, rois.device).index_select( 0, pos_assigned_gt_inds)) gt_masks_th = gt_masks_th.unsqueeze(1) rel_img_points = rel_roi_point_to_rel_img_point( rois, rel_roi_points, gt_masks_th) point_targets = point_sample(gt_masks_th, rel_img_points).squeeze(1) else: point_targets = rois.new_zeros((0, num_points)) return point_targets def loss(self, point_pred, point_targets, labels): """Calculate loss for MaskPointHead. Args: point_pred (Tensor): Point predication result, shape (num_rois, num_classes, num_points). point_targets (Tensor): Point targets, shape (num_roi, num_points). labels (Tensor): Class label of corresponding boxes, shape (num_rois, ) Returns: dict[str, Tensor]: a dictionary of point loss components """ loss = dict() if self.class_agnostic: loss_point = self.loss_point(point_pred, point_targets, torch.zeros_like(labels)) else: loss_point = self.loss_point(point_pred, point_targets, labels) loss['loss_point'] = loss_point return loss def _get_uncertainty(self, mask_pred, labels): """Estimate uncertainty based on pred logits. We estimate uncertainty as L1 distance between 0.0 and the logits prediction in 'mask_pred' for the foreground class in `classes`. Args: mask_pred (Tensor): mask predication logits, shape (num_rois, num_classes, mask_height, mask_width). labels (list[Tensor]): Either predicted or ground truth label for each predicted mask, of length num_rois. Returns: scores (Tensor): Uncertainty scores with the most uncertain locations having the highest uncertainty score, shape (num_rois, 1, mask_height, mask_width) """ if mask_pred.shape[1] == 1: gt_class_logits = mask_pred.clone() else: inds = torch.arange(mask_pred.shape[0], device=mask_pred.device) gt_class_logits = mask_pred[inds, labels].unsqueeze(1) return -torch.abs(gt_class_logits) def get_roi_rel_points_train(self, mask_pred, labels, cfg): """Get ``num_points`` most uncertain points with random points during train. Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The uncertainties are calculated for each point using '_get_uncertainty()' function that takes point's logit prediction as input. Args: mask_pred (Tensor): A tensor of shape (num_rois, num_classes, mask_height, mask_width) for class-specific or class-agnostic prediction. labels (list): The ground truth class for each instance. cfg (dict): Training config of point head. Returns: point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) that contains the coordinates sampled points. """ num_points = cfg.num_points oversample_ratio = cfg.oversample_ratio importance_sample_ratio = cfg.importance_sample_ratio assert oversample_ratio >= 1 assert 0 <= importance_sample_ratio <= 1 batch_size = mask_pred.shape[0] num_sampled = int(num_points * oversample_ratio) point_coords = torch.rand( batch_size, num_sampled, 2, device=mask_pred.device) point_logits = point_sample(mask_pred, point_coords) # It is crucial to calculate uncertainty based on the sampled # prediction value for the points. Calculating uncertainties of the # coarse predictions first and sampling them for points leads to # incorrect results. To illustrate this: assume uncertainty func( # logits)=-abs(logits), a sampled point between two coarse # predictions with -1 and 1 logits has 0 logits, and therefore 0 # uncertainty value. However, if we calculate uncertainties for the # coarse predictions first, both will have -1 uncertainty, # and sampled point will get -1 uncertainty. point_uncertainties = self._get_uncertainty(point_logits, labels) num_uncertain_points = int(importance_sample_ratio * num_points) num_random_points = num_points - num_uncertain_points idx = torch.topk( point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] shift = num_sampled * torch.arange( batch_size, dtype=torch.long, device=mask_pred.device) idx += shift[:, None] point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( batch_size, num_uncertain_points, 2) if num_random_points > 0: rand_roi_coords = torch.rand( batch_size, num_random_points, 2, device=mask_pred.device) point_coords = torch.cat((point_coords, rand_roi_coords), dim=1) return point_coords def get_roi_rel_points_test(self, mask_pred, pred_label, cfg): """Get ``num_points`` most uncertain points during test. Args: mask_pred (Tensor): A tensor of shape (num_rois, num_classes, mask_height, mask_width) for class-specific or class-agnostic prediction. pred_label (list): The predication class for each instance. cfg (dict): Testing config of point head. Returns: point_indices (Tensor): A tensor of shape (num_rois, num_points) that contains indices from [0, mask_height x mask_width) of the most uncertain points. point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) that contains [0, 1] x [0, 1] normalized coordinates of the most uncertain points from the [mask_height, mask_width] grid . """ num_points = cfg.subdivision_num_points uncertainty_map = self._get_uncertainty(mask_pred, pred_label) num_rois, _, mask_height, mask_width = uncertainty_map.shape # During ONNX exporting, the type of each elements of 'shape' is # `Tensor(float)`, while it is `float` during PyTorch inference. if isinstance(mask_height, torch.Tensor): h_step = 1.0 / mask_height.float() w_step = 1.0 / mask_width.float() else: h_step = 1.0 / mask_height w_step = 1.0 / mask_width # cast to int to avoid dynamic K for TopK op in ONNX mask_size = int(mask_height * mask_width) uncertainty_map = uncertainty_map.view(num_rois, mask_size) num_points = min(mask_size, num_points) point_indices = uncertainty_map.topk(num_points, dim=1)[1] xs = w_step / 2.0 + (point_indices % mask_width).float() * w_step ys = h_step / 2.0 + (point_indices // mask_width).float() * h_step point_coords = torch.stack([xs, ys], dim=2) return point_indices, point_coords
13,455
42.830619
126
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.models.builder import HEADS from mmdet.models.utils import ResLayer, SimplifiedBasicBlock from .fcn_mask_head import FCNMaskHead @HEADS.register_module() class SCNetMaskHead(FCNMaskHead): """Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_. Args: conv_to_res (bool, optional): if True, change the conv layers to ``SimplifiedBasicBlock``. """ def __init__(self, conv_to_res=True, **kwargs): super(SCNetMaskHead, self).__init__(**kwargs) self.conv_to_res = conv_to_res if conv_to_res: assert self.conv_kernel_size == 3 self.num_res_blocks = self.num_convs // 2 self.convs = ResLayer( SimplifiedBasicBlock, self.in_channels, self.conv_out_channels, self.num_res_blocks, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
979
32.793103
72
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/ghm_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weight_reduce_loss def _expand_onehot_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero( (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 bin_label_weights = label_weights.view(-1, 1).expand( label_weights.size(0), label_channels) return bin_labels, bin_label_weights # TODO: code refactoring to make it consistent with other losses @LOSSES.register_module() class GHMC(nn.Module): """GHM Classification Loss. Details of the theorem can be viewed in the paper `Gradient Harmonized Single-stage Detector <https://arxiv.org/abs/1811.05181>`_. Args: bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. use_sigmoid (bool): Can only be true for BCE based loss now. loss_weight (float): The weight of the total GHM-C loss. reduction (str): Options are "none", "mean" and "sum". Defaults to "mean" """ def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0, reduction='mean'): super(GHMC, self).__init__() self.bins = bins self.momentum = momentum edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] += 1e-6 if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.use_sigmoid = use_sigmoid if not self.use_sigmoid: raise NotImplementedError self.loss_weight = loss_weight self.reduction = reduction def forward(self, pred, target, label_weight, reduction_override=None, **kwargs): """Calculate the GHM-C loss. Args: pred (float tensor of size [batch_num, class_num]): The direct prediction of classification fc layer. target (float tensor of size [batch_num, class_num]): Binary class target for each sample. label_weight (float tensor of size [batch_num, class_num]): the value is 1 if the sample is valid and 0 if ignored. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Returns: The gradient harmonized loss. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) # the target should be binary class label if pred.dim() != target.dim(): target, label_weight = _expand_onehot_labels( target, label_weight, pred.size(-1)) target, label_weight = target.float(), label_weight.float() edges = self.edges mmt = self.momentum weights = torch.zeros_like(pred) # gradient length g = torch.abs(pred.sigmoid().detach() - target) valid = label_weight > 0 tot = max(valid.float().sum().item(), 1.0) n = 0 # n valid bins for i in range(self.bins): inds = (g >= edges[i]) & (g < edges[i + 1]) & valid num_in_bin = inds.sum().item() if num_in_bin > 0: if mmt > 0: self.acc_sum[i] = mmt * self.acc_sum[i] \ + (1 - mmt) * num_in_bin weights[inds] = tot / self.acc_sum[i] else: weights[inds] = tot / num_in_bin n += 1 if n > 0: weights = weights / n loss = F.binary_cross_entropy_with_logits( pred, target, reduction='none') loss = weight_reduce_loss( loss, weights, reduction=reduction, avg_factor=tot) return loss * self.loss_weight # TODO: code refactoring to make it consistent with other losses @LOSSES.register_module() class GHMR(nn.Module): """GHM Regression Loss. Details of the theorem can be viewed in the paper `Gradient Harmonized Single-stage Detector <https://arxiv.org/abs/1811.05181>`_. Args: mu (float): The parameter for the Authentic Smooth L1 loss. bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. loss_weight (float): The weight of the total GHM-R loss. reduction (str): Options are "none", "mean" and "sum". Defaults to "mean" """ def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0, reduction='mean'): super(GHMR, self).__init__() self.mu = mu self.bins = bins edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] = 1e3 self.momentum = momentum if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.loss_weight = loss_weight self.reduction = reduction # TODO: support reduction parameter def forward(self, pred, target, label_weight, avg_factor=None, reduction_override=None): """Calculate the GHM-R loss. Args: pred (float tensor of size [batch_num, 4 (* class_num)]): The prediction of box regression layer. Channel number can be 4 or 4 * class_num depending on whether it is class-agnostic. target (float tensor of size [batch_num, 4 (* class_num)]): The target regression values with the same size of pred. label_weight (float tensor of size [batch_num, 4 (* class_num)]): The weight of each sample, 0 if ignored. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Returns: The gradient harmonized loss. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) mu = self.mu edges = self.edges mmt = self.momentum # ASL1 loss diff = pred - target loss = torch.sqrt(diff * diff + mu * mu) - mu # gradient length g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach() weights = torch.zeros_like(g) valid = label_weight > 0 tot = max(label_weight.float().sum().item(), 1.0) n = 0 # n: valid bins for i in range(self.bins): inds = (g >= edges[i]) & (g < edges[i + 1]) & valid num_in_bin = inds.sum().item() if num_in_bin > 0: n += 1 if mmt > 0: self.acc_sum[i] = mmt * self.acc_sum[i] \ + (1 - mmt) * num_in_bin weights[inds] = tot / self.acc_sum[i] else: weights[inds] = tot / num_in_bin if n > 0: weights /= n loss = weight_reduce_loss( loss, weights, reduction=reduction, avg_factor=tot) return loss * self.loss_weight
7,923
36.028037
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/mse_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weighted_loss @weighted_loss def mse_loss(pred, target): """Warpper of mse loss.""" return F.mse_loss(pred, target, reduction='none') @LOSSES.register_module() class MSELoss(nn.Module): """MSELoss. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function of loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): Weight of the loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss = self.loss_weight * mse_loss( pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss
1,905
31.862069
78
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/pisa_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from mmdet.core import bbox_overlaps @mmcv.jit(derivate=True, coderize=True) def isr_p(cls_score, bbox_pred, bbox_targets, rois, sampling_results, loss_cls, bbox_coder, k=2, bias=0, num_class=80): """Importance-based Sample Reweighting (ISR_P), positive part. Args: cls_score (Tensor): Predicted classification scores. bbox_pred (Tensor): Predicted bbox deltas. bbox_targets (tuple[Tensor]): A tuple of bbox targets, the are labels, label_weights, bbox_targets, bbox_weights, respectively. rois (Tensor): Anchors (single_stage) in shape (n, 4) or RoIs (two_stage) in shape (n, 5). sampling_results (obj): Sampling results. loss_cls (func): Classification loss func of the head. bbox_coder (obj): BBox coder of the head. k (float): Power of the non-linear mapping. bias (float): Shift of the non-linear mapping. num_class (int): Number of classes, default: 80. Return: tuple([Tensor]): labels, imp_based_label_weights, bbox_targets, bbox_target_weights """ labels, label_weights, bbox_targets, bbox_weights = bbox_targets pos_label_inds = ((labels >= 0) & (labels < num_class)).nonzero().reshape(-1) pos_labels = labels[pos_label_inds] # if no positive samples, return the original targets num_pos = float(pos_label_inds.size(0)) if num_pos == 0: return labels, label_weights, bbox_targets, bbox_weights # merge pos_assigned_gt_inds of per image to a single tensor gts = list() last_max_gt = 0 for i in range(len(sampling_results)): gt_i = sampling_results[i].pos_assigned_gt_inds gts.append(gt_i + last_max_gt) if len(gt_i) != 0: last_max_gt = gt_i.max() + 1 gts = torch.cat(gts) assert len(gts) == num_pos cls_score = cls_score.detach() bbox_pred = bbox_pred.detach() # For single stage detectors, rois here indicate anchors, in shape (N, 4) # For two stage detectors, rois are in shape (N, 5) if rois.size(-1) == 5: pos_rois = rois[pos_label_inds][:, 1:] else: pos_rois = rois[pos_label_inds] if bbox_pred.size(-1) > 4: bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4) pos_delta_pred = bbox_pred[pos_label_inds, pos_labels].view(-1, 4) else: pos_delta_pred = bbox_pred[pos_label_inds].view(-1, 4) # compute iou of the predicted bbox and the corresponding GT pos_delta_target = bbox_targets[pos_label_inds].view(-1, 4) pos_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_pred) target_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_target) ious = bbox_overlaps(pos_bbox_pred, target_bbox_pred, is_aligned=True) pos_imp_weights = label_weights[pos_label_inds] # Two steps to compute IoU-HLR. Samples are first sorted by IoU locally, # then sorted again within the same-rank group max_l_num = pos_labels.bincount().max() for label in pos_labels.unique(): l_inds = (pos_labels == label).nonzero().view(-1) l_gts = gts[l_inds] for t in l_gts.unique(): t_inds = l_inds[l_gts == t] t_ious = ious[t_inds] _, t_iou_rank_idx = t_ious.sort(descending=True) _, t_iou_rank = t_iou_rank_idx.sort() ious[t_inds] += max_l_num - t_iou_rank.float() l_ious = ious[l_inds] _, l_iou_rank_idx = l_ious.sort(descending=True) _, l_iou_rank = l_iou_rank_idx.sort() # IoU-HLR # linearly map HLR to label weights pos_imp_weights[l_inds] *= (max_l_num - l_iou_rank.float()) / max_l_num pos_imp_weights = (bias + pos_imp_weights * (1 - bias)).pow(k) # normalize to make the new weighted loss value equal to the original loss pos_loss_cls = loss_cls( cls_score[pos_label_inds], pos_labels, reduction_override='none') if pos_loss_cls.dim() > 1: ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds][:, None] new_pos_loss_cls = pos_loss_cls * pos_imp_weights[:, None] else: ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds] new_pos_loss_cls = pos_loss_cls * pos_imp_weights pos_loss_cls_ratio = ori_pos_loss_cls.sum() / new_pos_loss_cls.sum() pos_imp_weights = pos_imp_weights * pos_loss_cls_ratio label_weights[pos_label_inds] = pos_imp_weights bbox_targets = labels, label_weights, bbox_targets, bbox_weights return bbox_targets @mmcv.jit(derivate=True, coderize=True) def carl_loss(cls_score, labels, bbox_pred, bbox_targets, loss_bbox, k=1, bias=0.2, avg_factor=None, sigmoid=False, num_class=80): """Classification-Aware Regression Loss (CARL). Args: cls_score (Tensor): Predicted classification scores. labels (Tensor): Targets of classification. bbox_pred (Tensor): Predicted bbox deltas. bbox_targets (Tensor): Target of bbox regression. loss_bbox (func): Regression loss func of the head. bbox_coder (obj): BBox coder of the head. k (float): Power of the non-linear mapping. bias (float): Shift of the non-linear mapping. avg_factor (int): Average factor used in regression loss. sigmoid (bool): Activation of the classification score. num_class (int): Number of classes, default: 80. Return: dict: CARL loss dict. """ pos_label_inds = ((labels >= 0) & (labels < num_class)).nonzero().reshape(-1) if pos_label_inds.numel() == 0: return dict(loss_carl=cls_score.sum()[None] * 0.) pos_labels = labels[pos_label_inds] # multiply pos_cls_score with the corresponding bbox weight # and remain gradient if sigmoid: pos_cls_score = cls_score.sigmoid()[pos_label_inds, pos_labels] else: pos_cls_score = cls_score.softmax(-1)[pos_label_inds, pos_labels] carl_loss_weights = (bias + (1 - bias) * pos_cls_score).pow(k) # normalize carl_loss_weight to make its sum equal to num positive num_pos = float(pos_cls_score.size(0)) weight_ratio = num_pos / carl_loss_weights.sum() carl_loss_weights *= weight_ratio if avg_factor is None: avg_factor = bbox_targets.size(0) # if is class agnostic, bbox pred is in shape (N, 4) # otherwise, bbox pred is in shape (N, #classes, 4) if bbox_pred.size(-1) > 4: bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4) pos_bbox_preds = bbox_pred[pos_label_inds, pos_labels] else: pos_bbox_preds = bbox_pred[pos_label_inds] ori_loss_reg = loss_bbox( pos_bbox_preds, bbox_targets[pos_label_inds], reduction_override='none') / avg_factor loss_carl = (ori_loss_reg * carl_loss_weights[:, None]).sum() return dict(loss_carl=loss_carl[None])
7,216
38.010811
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/balanced_l1_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import torch import torch.nn as nn from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5, reduction='mean'): """Calculate balanced L1 loss. Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_ Args: pred (torch.Tensor): The prediction with shape (N, 4). target (torch.Tensor): The learning target of the prediction with shape (N, 4). beta (float): The loss is a piecewise function of prediction and target and ``beta`` serves as a threshold for the difference between the prediction and target. Defaults to 1.0. alpha (float): The denominator ``alpha`` in the balanced L1 loss. Defaults to 0.5. gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5. reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert beta > 0 if target.numel() == 0: return pred.sum() * 0 assert pred.size() == target.size() diff = torch.abs(pred - target) b = np.e**(gamma / alpha) - 1 loss = torch.where( diff < beta, alpha / b * (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b - alpha * beta) return loss @LOSSES.register_module() class BalancedL1Loss(nn.Module): """Balanced L1 Loss. arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) Args: alpha (float): The denominator ``alpha`` in the balanced L1 loss. Defaults to 0.5. gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5. beta (float, optional): The loss is a piecewise function of prediction and target. ``beta`` serves as a threshold for the difference between the prediction and target. Defaults to 1.0. reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean', loss_weight=1.0): super(BalancedL1Loss, self).__init__() self.alpha = alpha self.gamma = gamma self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward function of loss. Args: pred (torch.Tensor): The prediction with shape (N, 4). target (torch.Tensor): The learning target of the prediction with shape (N, 4). weight (torch.Tensor, optional): Sample-wise loss weight with shape (N, ). avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * balanced_l1_loss( pred, target, weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_bbox
4,252
33.024
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/iou_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import math import warnings import mmcv import torch import torch.nn as nn from mmdet.core import bbox_overlaps from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def iou_loss(pred, target, linear=False, mode='log', eps=1e-6): """IoU loss. Computing the IoU loss between a set of predicted bboxes and target bboxes. The loss is calculated as negative log of IoU. Args: pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). linear (bool, optional): If True, use linear scale of loss instead of log scale. Default: False. mode (str): Loss scaling mode, including "linear", "square", and "log". Default: 'log' eps (float): Eps to avoid log(0). Return: torch.Tensor: Loss tensor. """ assert mode in ['linear', 'square', 'log'] if linear: mode = 'linear' warnings.warn('DeprecationWarning: Setting "linear=True" in ' 'iou_loss is deprecated, please use "mode=`linear`" ' 'instead.') ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps) if mode == 'linear': loss = 1 - ious elif mode == 'square': loss = 1 - ious**2 elif mode == 'log': loss = -ious.log() else: raise NotImplementedError return loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3): """BIoULoss. This is an implementation of paper `Improving Object Localization with Fitness NMS and Bounded IoU Loss. <https://arxiv.org/abs/1711.00164>`_. Args: pred (torch.Tensor): Predicted bboxes. target (torch.Tensor): Target bboxes. beta (float): beta parameter in smoothl1. eps (float): eps to avoid NaN. """ pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5 pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5 pred_w = pred[:, 2] - pred[:, 0] pred_h = pred[:, 3] - pred[:, 1] with torch.no_grad(): target_ctrx = (target[:, 0] + target[:, 2]) * 0.5 target_ctry = (target[:, 1] + target[:, 3]) * 0.5 target_w = target[:, 2] - target[:, 0] target_h = target[:, 3] - target[:, 1] dx = target_ctrx - pred_ctrx dy = target_ctry - pred_ctry loss_dx = 1 - torch.max( (target_w - 2 * dx.abs()) / (target_w + 2 * dx.abs() + eps), torch.zeros_like(dx)) loss_dy = 1 - torch.max( (target_h - 2 * dy.abs()) / (target_h + 2 * dy.abs() + eps), torch.zeros_like(dy)) loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w / (target_w + eps)) loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h / (target_h + eps)) # view(..., -1) does not work for empty tensor loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], dim=-1).flatten(1) loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta, loss_comb - 0.5 * beta) return loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def giou_loss(pred, target, eps=1e-7): r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression <https://arxiv.org/abs/1902.09630>`_. Args: pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. """ gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps) loss = 1 - gious return loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def diou_loss(pred, target, eps=1e-7): r"""`Implementation of Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_. Code is modified from https://github.com/Zzh-tju/DIoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. """ # overlap lt = torch.max(pred[:, :2], target[:, :2]) rb = torch.min(pred[:, 2:], target[:, 2:]) wh = (rb - lt).clamp(min=0) overlap = wh[:, 0] * wh[:, 1] # union ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) union = ap + ag - overlap + eps # IoU ious = overlap / union # enclose area enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) cw = enclose_wh[:, 0] ch = enclose_wh[:, 1] c2 = cw**2 + ch**2 + eps b1_x1, b1_y1 = pred[:, 0], pred[:, 1] b1_x2, b1_y2 = pred[:, 2], pred[:, 3] b2_x1, b2_y1 = target[:, 0], target[:, 1] b2_x2, b2_y2 = target[:, 2], target[:, 3] left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 rho2 = left + right # DIoU dious = ious - rho2 / c2 loss = 1 - dious return loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def ciou_loss(pred, target, eps=1e-7): r"""`Implementation of paper `Enhancing Geometric Factors into Model Learning and Inference for Object Detection and Instance Segmentation <https://arxiv.org/abs/2005.03572>`_. Code is modified from https://github.com/Zzh-tju/CIoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. """ # overlap lt = torch.max(pred[:, :2], target[:, :2]) rb = torch.min(pred[:, 2:], target[:, 2:]) wh = (rb - lt).clamp(min=0) overlap = wh[:, 0] * wh[:, 1] # union ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) union = ap + ag - overlap + eps # IoU ious = overlap / union # enclose area enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) cw = enclose_wh[:, 0] ch = enclose_wh[:, 1] c2 = cw**2 + ch**2 + eps b1_x1, b1_y1 = pred[:, 0], pred[:, 1] b1_x2, b1_y2 = pred[:, 2], pred[:, 3] b2_x1, b2_y1 = target[:, 0], target[:, 1] b2_x2, b2_y2 = target[:, 2], target[:, 3] w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 rho2 = left + right factor = 4 / math.pi**2 v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) with torch.no_grad(): alpha = (ious > 0.5).float() * v / (1 - ious + v) # CIoU cious = ious - (rho2 / c2 + alpha * v) loss = 1 - cious.clamp(min=-1.0, max=1.0) return loss @LOSSES.register_module() class IoULoss(nn.Module): """IoULoss. Computing the IoU loss between a set of predicted bboxes and target bboxes. Args: linear (bool): If True, use linear scale of loss else determined by mode. Default: False. eps (float): Eps to avoid log(0). reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Weight of loss. mode (str): Loss scaling mode, including "linear", "square", and "log". Default: 'log' """ def __init__(self, linear=False, eps=1e-6, reduction='mean', loss_weight=1.0, mode='log'): super(IoULoss, self).__init__() assert mode in ['linear', 'square', 'log'] if linear: mode = 'linear' warnings.warn('DeprecationWarning: Setting "linear=True" in ' 'IOULoss is deprecated, please use "mode=`linear`" ' 'instead.') self.mode = mode self.linear = linear self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Options are "none", "mean" and "sum". """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if (weight is not None) and (not torch.any(weight > 0)) and ( reduction != 'none'): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # iou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * iou_loss( pred, target, weight, mode=self.mode, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @LOSSES.register_module() class BoundedIoULoss(nn.Module): def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1.0): super(BoundedIoULoss, self).__init__() self.beta = beta self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): if weight is not None and not torch.any(weight > 0): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss = self.loss_weight * bounded_iou_loss( pred, target, weight, beta=self.beta, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @LOSSES.register_module() class GIoULoss(nn.Module): def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): super(GIoULoss, self).__init__() self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): if weight is not None and not torch.any(weight > 0): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # giou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * giou_loss( pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @LOSSES.register_module() class DIoULoss(nn.Module): def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): super(DIoULoss, self).__init__() self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): if weight is not None and not torch.any(weight > 0): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # giou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * diou_loss( pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @LOSSES.register_module() class CIoULoss(nn.Module): def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): super(CIoULoss, self).__init__() self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): if weight is not None and not torch.any(weight > 0): if pred.dim() == weight.dim() + 1: weight = weight.unsqueeze(1) return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if weight is not None and weight.dim() > 1: # TODO: remove this in the future # reduce the weight of shape (n, 4) to (n,) to match the # giou_loss of shape (n,) assert weight.shape == pred.shape weight = weight.mean(-1) loss = self.loss_weight * ciou_loss( pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss
15,714
32.084211
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/smooth_l1_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch import torch.nn as nn from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): """Smooth L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. Returns: torch.Tensor: Calculated loss """ assert beta > 0 if target.numel() == 0: return pred.sum() * 0 assert pred.size() == target.size() diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta) return loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def l1_loss(pred, target): """L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. Returns: torch.Tensor: Calculated loss """ if target.numel() == 0: return pred.sum() * 0 assert pred.size() == target.size() loss = torch.abs(pred - target) return loss @LOSSES.register_module() class SmoothL1Loss(nn.Module): """Smooth L1 loss. Args: beta (float, optional): The threshold in the piecewise function. Defaults to 1.0. reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". Defaults to "mean". loss_weight (float, optional): The weight of loss. """ def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): super(SmoothL1Loss, self).__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * smooth_l1_loss( pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_bbox @LOSSES.register_module() class L1Loss(nn.Module): """L1 loss. Args: reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of loss. """ def __init__(self, reduction='mean', loss_weight=1.0): super(L1Loss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * l1_loss( pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss_bbox
4,635
30.537415
78
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/gfocal_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def quality_focal_loss(pred, target, beta=2.0): r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. Returns: torch.Tensor: Loss tensor with shape (N,). """ assert len(target) == 2, """target for QFL must be a tuple of two elements, including category label and quality label, respectively""" # label denotes the category id, score denotes the quality score label, score = target # negatives are supervised by 0 quality score pred_sigmoid = pred.sigmoid() scale_factor = pred_sigmoid zerolabel = scale_factor.new_zeros(pred.shape) loss = F.binary_cross_entropy_with_logits( pred, zerolabel, reduction='none') * scale_factor.pow(beta) # FG cat_id: [0, num_classes -1], BG cat_id: num_classes bg_class_ind = pred.size(1) pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) pos_label = label[pos].long() # positives are supervised by bbox quality (IoU) score scale_factor = score[pos] - pred_sigmoid[pos, pos_label] loss[pos, pos_label] = F.binary_cross_entropy_with_logits( pred[pos, pos_label], score[pos], reduction='none') * scale_factor.abs().pow(beta) loss = loss.sum(dim=1, keepdim=False) return loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def distribution_focal_loss(pred, label): r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: pred (torch.Tensor): Predicted general distribution of bounding boxes (before softmax) with shape (N, n+1), n is the max value of the integral set `{0, ..., n}` in paper. label (torch.Tensor): Target distance label for bounding boxes with shape (N,). Returns: torch.Tensor: Loss tensor with shape (N,). """ dis_left = label.long() dis_right = dis_left + 1 weight_left = dis_right.float() - label weight_right = label - dis_left.float() loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \ + F.cross_entropy(pred, dis_right, reduction='none') * weight_right return loss @LOSSES.register_module() class QualityFocalLoss(nn.Module): r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: use_sigmoid (bool): Whether sigmoid operation is conducted in QFL. Defaults to True. beta (float): The beta parameter for calculating the modulating factor. Defaults to 2.0. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. """ def __init__(self, use_sigmoid=True, beta=2.0, reduction='mean', loss_weight=1.0): super(QualityFocalLoss, self).__init__() assert use_sigmoid is True, 'Only sigmoid in QFL supported now.' self.use_sigmoid = use_sigmoid self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.use_sigmoid: loss_cls = self.loss_weight * quality_focal_loss( pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls @LOSSES.register_module() class DistributionFocalLoss(nn.Module): r"""Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection <https://arxiv.org/abs/2006.04388>`_. Args: reduction (str): Options are `'none'`, `'mean'` and `'sum'`. loss_weight (float): Loss weight of current loss. """ def __init__(self, reduction='mean', loss_weight=1.0): super(DistributionFocalLoss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): Predicted general distribution of bounding boxes (before softmax) with shape (N, n+1), n is the max value of the integral set `{0, ..., n}` in paper. target (torch.Tensor): Target distance label for bounding boxes with shape (N,). weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_cls = self.loss_weight * distribution_focal_loss( pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss_cls
7,458
38.257895
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/varifocal_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weight_reduce_loss @mmcv.jit(derivate=True, coderize=True) def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', avg_factor=None): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive example with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ # pred and target should be of the same size assert pred.size() == target.size() pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if iou_weighted: focal_weight = target * (target > 0.0).float() + \ alpha * (pred_sigmoid - target).abs().pow(gamma) * \ (target <= 0.0).float() else: focal_weight = (target > 0.0).float() + \ alpha * (pred_sigmoid - target).abs().pow(gamma) * \ (target <= 0.0).float() loss = F.binary_cross_entropy_with_logits( pred, target, reduction='none') * focal_weight loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss @LOSSES.register_module() class VarifocalLoss(nn.Module): def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: use_sigmoid (bool, optional): Whether the prediction is used for sigmoid or softmax. Defaults to True. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive examples with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". loss_weight (float, optional): Weight of loss. Defaults to 1.0. """ super(VarifocalLoss, self).__init__() assert use_sigmoid is True, \ 'Only sigmoid varifocal loss supported now.' assert alpha >= 0.0 self.use_sigmoid = use_sigmoid self.alpha = alpha self.gamma = gamma self.iou_weighted = iou_weighted self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.use_sigmoid: loss_cls = self.loss_weight * varifocal_loss( pred, target, weight, alpha=self.alpha, gamma=self.gamma, iou_weighted=self.iou_weighted, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls
5,365
38.748148
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/utils.py
# Copyright (c) OpenMMLab. All rights reserved. import functools import mmcv import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) # none: 0, elementwise_mean:1, sum: 2 if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() @mmcv.jit(derivate=True, coderize=True) def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ # if weight is specified, apply element-wise weight if weight is not None: loss = loss * weight # if avg_factor is not specified, just reduce the loss if avg_factor is None: loss = reduce_loss(loss, reduction) else: # if reduction is mean, then average the loss by avg_factor if reduction == 'mean': loss = loss.sum() / avg_factor # if reduction is 'none', then do nothing, otherwise raise an error elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs): # get element-wise loss loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper
3,103
29.431373
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/seesaw_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .accuracy import accuracy from .cross_entropy_loss import cross_entropy from .utils import weight_reduce_loss def seesaw_ce_loss(cls_score, labels, label_weights, cum_samples, num_classes, p, q, eps, reduction='mean', avg_factor=None): """Calculate the Seesaw CrossEntropy loss. Args: cls_score (torch.Tensor): The prediction with shape (N, C), C is the number of classes. labels (torch.Tensor): The learning label of the prediction. label_weights (torch.Tensor): Sample-wise loss weight. cum_samples (torch.Tensor): Cumulative samples for each category. num_classes (int): The number of classes. p (float): The ``p`` in the mitigation factor. q (float): The ``q`` in the compenstation factor. eps (float): The minimal value of divisor to smooth the computation of compensation factor reduction (str, optional): The method used to reduce the loss. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. Returns: torch.Tensor: The calculated loss """ assert cls_score.size(-1) == num_classes assert len(cum_samples) == num_classes onehot_labels = F.one_hot(labels, num_classes) seesaw_weights = cls_score.new_ones(onehot_labels.size()) # mitigation factor if p > 0: sample_ratio_matrix = cum_samples[None, :].clamp( min=1) / cum_samples[:, None].clamp(min=1) index = (sample_ratio_matrix < 1.0).float() sample_weights = sample_ratio_matrix.pow(p) * index + (1 - index) mitigation_factor = sample_weights[labels.long(), :] seesaw_weights = seesaw_weights * mitigation_factor # compensation factor if q > 0: scores = F.softmax(cls_score.detach(), dim=1) self_scores = scores[ torch.arange(0, len(scores)).to(scores.device).long(), labels.long()] score_matrix = scores / self_scores[:, None].clamp(min=eps) index = (score_matrix > 1.0).float() compensation_factor = score_matrix.pow(q) * index + (1 - index) seesaw_weights = seesaw_weights * compensation_factor cls_score = cls_score + (seesaw_weights.log() * (1 - onehot_labels)) loss = F.cross_entropy(cls_score, labels, weight=None, reduction='none') if label_weights is not None: label_weights = label_weights.float() loss = weight_reduce_loss( loss, weight=label_weights, reduction=reduction, avg_factor=avg_factor) return loss @LOSSES.register_module() class SeesawLoss(nn.Module): """ Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021) arXiv: https://arxiv.org/abs/2008.10032 Args: use_sigmoid (bool, optional): Whether the prediction uses sigmoid of softmax. Only False is supported. p (float, optional): The ``p`` in the mitigation factor. Defaults to 0.8. q (float, optional): The ``q`` in the compenstation factor. Defaults to 2.0. num_classes (int, optional): The number of classes. Default to 1203 for LVIS v1 dataset. eps (float, optional): The minimal value of divisor to smooth the computation of compensation factor reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 return_dict (bool, optional): Whether return the losses as a dict. Default to True. """ def __init__(self, use_sigmoid=False, p=0.8, q=2.0, num_classes=1203, eps=1e-2, reduction='mean', loss_weight=1.0, return_dict=True): super(SeesawLoss, self).__init__() assert not use_sigmoid self.use_sigmoid = False self.p = p self.q = q self.num_classes = num_classes self.eps = eps self.reduction = reduction self.loss_weight = loss_weight self.return_dict = return_dict # 0 for pos, 1 for neg self.cls_criterion = seesaw_ce_loss # cumulative samples for each category self.register_buffer( 'cum_samples', torch.zeros(self.num_classes + 1, dtype=torch.float)) # custom output channels of the classifier self.custom_cls_channels = True # custom activation of cls_score self.custom_activation = True # custom accuracy of the classsifier self.custom_accuracy = True def _split_cls_score(self, cls_score): # split cls_score to cls_score_classes and cls_score_objectness assert cls_score.size(-1) == self.num_classes + 2 cls_score_classes = cls_score[..., :-2] cls_score_objectness = cls_score[..., -2:] return cls_score_classes, cls_score_objectness def get_cls_channels(self, num_classes): """Get custom classification channels. Args: num_classes (int): The number of classes. Returns: int: The custom classification channels. """ assert num_classes == self.num_classes return num_classes + 2 def get_activation(self, cls_score): """Get custom activation of cls_score. Args: cls_score (torch.Tensor): The prediction with shape (N, C + 2). Returns: torch.Tensor: The custom activation of cls_score with shape (N, C + 1). """ cls_score_classes, cls_score_objectness = self._split_cls_score( cls_score) score_classes = F.softmax(cls_score_classes, dim=-1) score_objectness = F.softmax(cls_score_objectness, dim=-1) score_pos = score_objectness[..., [0]] score_neg = score_objectness[..., [1]] score_classes = score_classes * score_pos scores = torch.cat([score_classes, score_neg], dim=-1) return scores def get_accuracy(self, cls_score, labels): """Get custom accuracy w.r.t. cls_score and labels. Args: cls_score (torch.Tensor): The prediction with shape (N, C + 2). labels (torch.Tensor): The learning label of the prediction. Returns: Dict [str, torch.Tensor]: The accuracy for objectness and classes, respectively. """ pos_inds = labels < self.num_classes obj_labels = (labels == self.num_classes).long() cls_score_classes, cls_score_objectness = self._split_cls_score( cls_score) acc_objectness = accuracy(cls_score_objectness, obj_labels) acc_classes = accuracy(cls_score_classes[pos_inds], labels[pos_inds]) acc = dict() acc['acc_objectness'] = acc_objectness acc['acc_classes'] = acc_classes return acc def forward(self, cls_score, labels, label_weights=None, avg_factor=None, reduction_override=None): """Forward function. Args: cls_score (torch.Tensor): The prediction with shape (N, C + 2). labels (torch.Tensor): The learning label of the prediction. label_weights (torch.Tensor, optional): Sample-wise loss weight. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor | Dict [str, torch.Tensor]: if return_dict == False: The calculated loss | if return_dict == True: The dict of calculated losses for objectness and classes, respectively. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) assert cls_score.size(-1) == self.num_classes + 2 pos_inds = labels < self.num_classes # 0 for pos, 1 for neg obj_labels = (labels == self.num_classes).long() # accumulate the samples for each category unique_labels = labels.unique() for u_l in unique_labels: inds_ = labels == u_l.item() self.cum_samples[u_l] += inds_.sum() if label_weights is not None: label_weights = label_weights.float() else: label_weights = labels.new_ones(labels.size(), dtype=torch.float) cls_score_classes, cls_score_objectness = self._split_cls_score( cls_score) # calculate loss_cls_classes (only need pos samples) if pos_inds.sum() > 0: loss_cls_classes = self.loss_weight * self.cls_criterion( cls_score_classes[pos_inds], labels[pos_inds], label_weights[pos_inds], self.cum_samples[:self.num_classes], self.num_classes, self.p, self.q, self.eps, reduction, avg_factor) else: loss_cls_classes = cls_score_classes[pos_inds].sum() # calculate loss_cls_objectness loss_cls_objectness = self.loss_weight * cross_entropy( cls_score_objectness, obj_labels, label_weights, reduction, avg_factor) if self.return_dict: loss_cls = dict() loss_cls['loss_cls_objectness'] = loss_cls_objectness loss_cls['loss_cls_classes'] = loss_cls_classes else: loss_cls = loss_cls_classes + loss_cls_objectness return loss_cls
10,136
37.543726
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/ae_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES @mmcv.jit(derivate=True, coderize=True) def ae_loss_per_image(tl_preds, br_preds, match): """Associative Embedding Loss in one image. Associative Embedding Loss including two parts: pull loss and push loss. Pull loss makes embedding vectors from same object closer to each other. Push loss distinguish embedding vector from different objects, and makes the gap between them is large enough. During computing, usually there are 3 cases: - no object in image: both pull loss and push loss will be 0. - one object in image: push loss will be 0 and pull loss is computed by the two corner of the only object. - more than one objects in image: pull loss is computed by corner pairs from each object, push loss is computed by each object with all other objects. We use confusion matrix with 0 in diagonal to compute the push loss. Args: tl_preds (tensor): Embedding feature map of left-top corner. br_preds (tensor): Embedding feature map of bottim-right corner. match (list): Downsampled coordinates pair of each ground truth box. """ tl_list, br_list, me_list = [], [], [] if len(match) == 0: # no object in image pull_loss = tl_preds.sum() * 0. push_loss = tl_preds.sum() * 0. else: for m in match: [tl_y, tl_x], [br_y, br_x] = m tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1) br_e = br_preds[:, br_y, br_x].view(-1, 1) tl_list.append(tl_e) br_list.append(br_e) me_list.append((tl_e + br_e) / 2.0) tl_list = torch.cat(tl_list) br_list = torch.cat(br_list) me_list = torch.cat(me_list) assert tl_list.size() == br_list.size() # N is object number in image, M is dimension of embedding vector N, M = tl_list.size() pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2) pull_loss = pull_loss.sum() / N margin = 1 # exp setting of CornerNet, details in section 3.3 of paper # confusion matrix of push loss conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list conf_weight = 1 - torch.eye(N).type_as(me_list) conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs()) if N > 1: # more than one object in current image push_loss = F.relu(conf_mat).sum() / (N * (N - 1)) else: push_loss = tl_preds.sum() * 0. return pull_loss, push_loss @LOSSES.register_module() class AssociativeEmbeddingLoss(nn.Module): """Associative Embedding Loss. More details can be found in `Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and `CornerNet <https://arxiv.org/abs/1808.01244>`_ . Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_ # noqa: E501 Args: pull_weight (float): Loss weight for corners from same object. push_weight (float): Loss weight for corners from different object. """ def __init__(self, pull_weight=0.25, push_weight=0.25): super(AssociativeEmbeddingLoss, self).__init__() self.pull_weight = pull_weight self.push_weight = push_weight def forward(self, pred, target, match): """Forward function.""" batch = pred.size(0) pull_all, push_all = 0.0, 0.0 for i in range(batch): pull, push = ae_loss_per_image(pred[i], target[i], match[i]) pull_all += self.pull_weight * pull push_all += self.push_weight * push return pull_all, push_all
3,857
36.096154
143
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/accuracy.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn @mmcv.jit(coderize=True) def accuracy(pred, target, topk=1, thresh=None): """Calculate accuracy according to the prediction and target. Args: pred (torch.Tensor): The model prediction, shape (N, num_class) target (torch.Tensor): The target of each prediction, shape (N, ) topk (int | tuple[int], optional): If the predictions in ``topk`` matches the target, the predictions will be regarded as correct ones. Defaults to 1. thresh (float, optional): If not None, predictions with scores under this threshold are considered incorrect. Default to None. Returns: float | tuple[float]: If the input ``topk`` is a single integer, the function will return a single float as accuracy. If ``topk`` is a tuple containing multiple integers, the function will return a tuple containing accuracies of each ``topk`` number. """ assert isinstance(topk, (int, tuple)) if isinstance(topk, int): topk = (topk, ) return_single = True else: return_single = False maxk = max(topk) if pred.size(0) == 0: accu = [pred.new_tensor(0.) for i in range(len(topk))] return accu[0] if return_single else accu assert pred.ndim == 2 and target.ndim == 1 assert pred.size(0) == target.size(0) assert maxk <= pred.size(1), \ f'maxk {maxk} exceeds pred dimension {pred.size(1)}' pred_value, pred_label = pred.topk(maxk, dim=1) pred_label = pred_label.t() # transpose to shape (maxk, N) correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) if thresh is not None: # Only prediction values larger than thresh are counted as correct correct = correct & (pred_value > thresh).t() res = [] for k in topk: correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / pred.size(0))) return res[0] if return_single else res class Accuracy(nn.Module): def __init__(self, topk=(1, ), thresh=None): """Module to calculate the accuracy. Args: topk (tuple, optional): The criterion used to calculate the accuracy. Defaults to (1,). thresh (float, optional): If not None, predictions with scores under this threshold are considered incorrect. Default to None. """ super().__init__() self.topk = topk self.thresh = thresh def forward(self, pred, target): """Forward function to calculate accuracy. Args: pred (torch.Tensor): Prediction of models. target (torch.Tensor): Target for each prediction. Returns: tuple[float]: The accuracies under different topk criterions. """ return accuracy(pred, target, self.topk, self.thresh)
2,990
36.3875
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/focal_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss from ..builder import LOSSES from .utils import weight_reduce_loss import ipdb # This method is only for debugging def py_sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): """PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma) loss = F.binary_cross_entropy_with_logits( pred, target, reduction='none') * focal_weight if weight is not None: if weight.shape != loss.shape: if weight.size(0) == loss.size(0): # For most cases, weight is of shape (num_priors, ), # which means it does not have the second axis num_class weight = weight.view(-1, 1) else: # Sometimes, weight per anchor per class is also needed. e.g. # in FSAF. But it may be flattened of shape # (num_priors x num_class, ), while loss is still of shape # (num_priors, num_class). assert weight.numel() == loss.numel() weight = weight.view(loss.size(0), -1) assert weight.ndim == loss.ndim loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss def sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): r"""A warpper of cuda version `Focal Loss <https://arxiv.org/abs/1708.02002>`_. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. target (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ # Function.apply does not accept keyword arguments, so the decorator # "weighted_loss" is not applicable loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), gamma, alpha, None, 'none') if weight is not None: if weight.shape != loss.shape: if weight.size(0) == loss.size(0): # For most cases, weight is of shape (num_priors, ), # which means it does not have the second axis num_class weight = weight.view(-1, 1) else: # Sometimes, weight per anchor per class is also needed. e.g. # in FSAF. But it may be flattened of shape # (num_priors x num_class, ), while loss is still of shape # (num_priors, num_class). assert weight.numel() == loss.numel() weight = weight.view(loss.size(0), -1) assert weight.ndim == loss.ndim loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss @LOSSES.register_module() class FocalLoss(nn.Module): def __init__(self, use_sigmoid=True, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0): """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ Args: use_sigmoid (bool, optional): Whether to the prediction is used for sigmoid or softmax. Defaults to True. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". loss_weight (float, optional): Weight of loss. Defaults to 1.0. """ super(FocalLoss, self).__init__() assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' self.use_sigmoid = use_sigmoid self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.use_sigmoid: if torch.cuda.is_available() and pred.is_cuda: calculate_loss_func = sigmoid_focal_loss else: num_classes = pred.size(1) target = F.one_hot(target, num_classes=num_classes + 1) target = target[:, :num_classes] calculate_loss_func = py_sigmoid_focal_loss loss_cls = self.loss_weight * calculate_loss_func( pred, target, weight, gamma=self.gamma, alpha=self.alpha, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls
7,589
40.47541
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/cross_entropy_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weight_reduce_loss def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=-100): """Calculate the CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. ignore_index (int | None): The label index to be ignored. If None, it will be set to default value. Default: -100. Returns: torch.Tensor: The calculated loss """ # The default value of ignore_index is the same as F.cross_entropy ignore_index = -100 if ignore_index is None else ignore_index # element-wise losses loss = F.cross_entropy( pred, label, weight=class_weight, reduction='none', ignore_index=ignore_index) # apply weights and do the reduction if weight is not None: weight = weight.float() loss = weight_reduce_loss( loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index): """Expand onehot labels to match the size of prediction.""" bin_labels = labels.new_full((labels.size(0), label_channels), 0) valid_mask = (labels >= 0) & (labels != ignore_index) inds = torch.nonzero( valid_mask & (labels < label_channels), as_tuple=False) if inds.numel() > 0: bin_labels[inds, labels[inds]] = 1 valid_mask = valid_mask.view(-1, 1).expand(labels.size(0), label_channels).float() if label_weights is None: bin_label_weights = valid_mask else: bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels) bin_label_weights *= valid_mask return bin_labels, bin_label_weights def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=-100): """Calculate the binary CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, 1). label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. ignore_index (int | None): The label index to be ignored. If None, it will be set to default value. Default: -100. Returns: torch.Tensor: The calculated loss. """ # The default value of ignore_index is the same as F.cross_entropy ignore_index = -100 if ignore_index is None else ignore_index if pred.dim() != label.dim(): label, weight = _expand_onehot_labels(label, weight, pred.size(-1), ignore_index) # weighted element-wise losses if weight is not None: weight = weight.float() loss = F.binary_cross_entropy_with_logits( pred, label.float(), pos_weight=class_weight, reduction='none') # do the reduction for the weighted loss loss = weight_reduce_loss( loss, weight, reduction=reduction, avg_factor=avg_factor) return loss def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None, class_weight=None, ignore_index=None): """Calculate the CrossEntropy loss for masks. Args: pred (torch.Tensor): The prediction with shape (N, C, *), C is the number of classes. The trailing * indicates arbitrary shape. target (torch.Tensor): The learning label of the prediction. label (torch.Tensor): ``label`` indicates the class label of the mask corresponding object. This will be used to select the mask in the of the class which the object belongs to when the mask prediction if not class-agnostic. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. ignore_index (None): Placeholder, to be consistent with other loss. Default: None. Returns: torch.Tensor: The calculated loss Example: >>> N, C = 3, 11 >>> H, W = 2, 2 >>> pred = torch.randn(N, C, H, W) * 1000 >>> target = torch.rand(N, H, W) >>> label = torch.randint(0, C, size=(N,)) >>> reduction = 'mean' >>> avg_factor = None >>> class_weights = None >>> loss = mask_cross_entropy(pred, target, label, reduction, >>> avg_factor, class_weights) >>> assert loss.shape == (1,) """ assert ignore_index is None, 'BCE loss does not support ignore_index' # TODO: handle these two reserved arguments assert reduction == 'mean' and avg_factor is None num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits( pred_slice, target, weight=class_weight, reduction='mean')[None] @LOSSES.register_module() class CrossEntropyLoss(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', class_weight=None, ignore_index=None, loss_weight=1.0): """CrossEntropyLoss. Args: use_sigmoid (bool, optional): Whether the prediction uses sigmoid of softmax. Defaults to False. use_mask (bool, optional): Whether to use mask cross entropy loss. Defaults to False. reduction (str, optional): . Defaults to 'mean'. Options are "none", "mean" and "sum". class_weight (list[float], optional): Weight of each class. Defaults to None. ignore_index (int | None): The label index to be ignored. Defaults to None. loss_weight (float, optional): Weight of the loss. Defaults to 1.0. """ super(CrossEntropyLoss, self).__init__() assert (use_sigmoid is False) or (use_mask is False) self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight self.class_weight = class_weight self.ignore_index = ignore_index if self.use_sigmoid: self.cls_criterion = binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = cross_entropy def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, ignore_index=None, **kwargs): """Forward function. Args: cls_score (torch.Tensor): The prediction. label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". ignore_index (int | None): The label index to be ignored. If not None, it will override the default value. Default: None. Returns: torch.Tensor: The calculated loss. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if ignore_index is None: ignore_index = self.ignore_index if self.class_weight is not None: class_weight = cls_score.new_tensor( self.class_weight, device=cls_score.device) else: class_weight = None loss_cls = self.loss_weight * self.cls_criterion( cls_score, label, weight, class_weight=class_weight, reduction=reduction, avg_factor=avg_factor, ignore_index=ignore_index, **kwargs) return loss_cls
9,696
37.480159
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .accuracy import Accuracy, accuracy from .ae_loss import AssociativeEmbeddingLoss from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, cross_entropy, mask_cross_entropy) from .focal_loss import FocalLoss, sigmoid_focal_loss from .gaussian_focal_loss import GaussianFocalLoss from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss from .ghm_loss import GHMC, GHMR from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss, bounded_iou_loss, iou_loss) from .kd_loss import KnowledgeDistillationKLDivLoss, FeatImitate_L2Loss from .mse_loss import MSELoss, mse_loss from .pisa_loss import carl_loss, isr_p from .seesaw_loss import SeesawLoss from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss from .utils import reduce_loss, weight_reduce_loss, weighted_loss from .varifocal_loss import VarifocalLoss from .semi_focal_loss import DiffFocalLoss, RobustFocalLoss __all__ = [ 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', 'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss', 'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss', 'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss', 'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss', 'GHMC', 'GHMR', 'reduce_loss', 'weight_reduce_loss', 'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p', 'carl_loss', 'AssociativeEmbeddingLoss', 'GaussianFocalLoss', 'QualityFocalLoss', 'DistributionFocalLoss', 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss', 'SeesawLoss', 'FeatImitate_L2Loss', 'DiffFocalLoss', 'RobustFocalLoss' ]
1,820
52.558824
77
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/gaussian_focal_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian distribution. Args: pred (torch.Tensor): The prediction. gaussian_target (torch.Tensor): The learning target of the prediction in gaussian distribution. alpha (float, optional): A balanced form for Focal Loss. Defaults to 2.0. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 4.0. """ eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights return pos_loss + neg_loss @LOSSES.register_module() class GaussianFocalLoss(nn.Module): """GaussianFocalLoss is a variant of focal loss. More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501 Please notice that the target in GaussianFocalLoss is a gaussian heatmap, not 0/1 binary target. Args: alpha (float): Power of prediction. gamma (float): Power of target for negative samples. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. """ def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0): super(GaussianFocalLoss, self).__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction in gaussian distribution. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_reg = self.loss_weight * gaussian_focal_loss( pred, target, weight, alpha=self.alpha, gamma=self.gamma, reduction=reduction, avg_factor=avg_factor) return loss_reg
3,312
34.623656
108
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/semi_focal_loss.py
import mmcv import torch import torch.nn as nn import torch.nn.functional as F from mmdet.core import reduce_mean from ..builder import LOSSES from .utils import weighted_loss, weight_reduce_loss import ipdb def diff_focal_loss(pred, target, weight=None, beta=2.0, hard_filter=False, reduction='mean', avg_factor=None): assert len(target) == 3, """target for diff_focal_loss must be a tuple of three elements, including category label, student score and teacher score, respectively.""" label, stu_score, tea_score = target # negatives if hard_filter: scale_factor = torch.clamp(stu_score - tea_score, min=0) else: scale_factor = stu_score - tea_score outlier_scale_factor = torch.min(scale_factor[scale_factor > 0].detach()) scale_factor[scale_factor < 0] = outlier_scale_factor zerolabel = scale_factor.new_zeros(pred.shape) loss = F.binary_cross_entropy_with_logits( pred, zerolabel, reduction='none') * scale_factor.pow(beta) # positives bg_class_ind = pred.size(1) pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) if pos.shape[0] > 0: pos_label = label[pos].long() filter_flags = torch.clamp(tea_score[pos, pos_label] - stu_score[pos, pos_label], min=0) pre_filter_num = torch.tensor(pos.shape[0], device=pred.device, dtype=torch.float) post_filter_num = torch.sum(filter_flags > 0).float() if hard_filter: scale_factor = filter_flags else: scale_factor = tea_score[pos, pos_label] - stu_score[pos, pos_label] if scale_factor[filter_flags > 0].shape[0] > 0: outlier_scale_factor = torch.min(scale_factor[filter_flags > 0].detach()) scale_factor[filter_flags == 0] = outlier_scale_factor pos_pred = pred[pos, pos_label] onelabel = pos_pred.new_ones(pos_pred.shape) loss[pos, pos_label] = F.binary_cross_entropy_with_logits( pos_pred, onelabel, reduction='none') * scale_factor.pow(beta) else: pre_filter_num, post_filter_num = pred.sum() * 0, pred.sum() * 0 loss = loss.sum(dim=1, keepdim=False) loss = weight_reduce_loss( loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss, pre_filter_num, post_filter_num def robust_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): assert len(target) == 2, """target for tea_guided_focal_loss must be a tuple of two elements, including category label and teacher score, respectively.""" label, tea_score = target num_classes = pred.size(1) target = F.one_hot(label, num_classes=num_classes + 1) target = target[:, :num_classes].type_as(pred) pred_sigmoid = pred.sigmoid() target = target.type_as(pred) # focal weight pt = tea_score * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + 0.75 * (1 - target)) * pt.pow(gamma) loss = F.binary_cross_entropy_with_logits( pred, target, reduction='none') * focal_weight if weight is not None: if weight.shape != loss.shape: if weight.size(0) == loss.size(0): # For most cases, weight is of shape (num_priors, ), # which means it does not have the second axis num_class weight = weight.view(-1, 1) else: # Sometimes, weight per anchor per class is also needed. e.g. # in FSAF. But it may be flattened of shape # (num_priors x num_class, ), while loss is still of shape # (num_priors, num_class). assert weight.numel() == loss.numel() weight = weight.view(loss.size(0), -1) assert weight.ndim == loss.ndim loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss @LOSSES.register_module() class DiffFocalLoss(nn.Module): def __init__(self, use_sigmoid=True, beta=2.0, hard_filter=True, reduction='mean', loss_weight=1.0): super(DiffFocalLoss, self).__init__() assert use_sigmoid is True, 'Only sigmoid in DFL supported now.' self.use_sigmoid = use_sigmoid self.beta = beta self.hard_filter = hard_filter self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.use_sigmoid: loss_cls, pre_filter_number, post_filter_number = diff_focal_loss( pred, target, weight, beta=self.beta, hard_filter=self.hard_filter, reduction=reduction, avg_factor=avg_factor) loss_cls *= self.loss_weight else: raise NotImplementedError return loss_cls, pre_filter_number, post_filter_number @LOSSES.register_module() class RobustFocalLoss(nn.Module): def __init__(self, use_sigmoid=True, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0): super(RobustFocalLoss, self).__init__() assert use_sigmoid is True, 'Only sigmoid in DFL supported now.' self.use_sigmoid = use_sigmoid self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): Predicted joint representation of classification and quality (IoU) estimation with shape (N, C), C is the number of classes. target (tuple([torch.Tensor])): Target category label with shape (N,) and target quality label with shape (N,). weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.use_sigmoid: loss_cls = robust_focal_loss( pred, target, weight, gamma=self.gamma, alpha=self.alpha, reduction=reduction, avg_factor=avg_factor) loss_cls *= self.loss_weight else: raise NotImplementedError return loss_cls
8,302
39.305825
97
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/losses/kd_loss.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch import torch.nn as nn import torch.nn.functional as F from ..builder import LOSSES from .utils import weighted_loss import ipdb @mmcv.jit(derivate=True, coderize=True) @weighted_loss def knowledge_distillation_kl_div_loss(pred, soft_label, T, detach_target=True): r"""Loss function for knowledge distilling using KL divergence. Args: pred (Tensor): Predicted logits with shape (N, n + 1). soft_label (Tensor): Target logits with shape (N, N + 1). T (int): Temperature for distillation. detach_target (bool): Remove soft_label from automatic differentiation Returns: torch.Tensor: Loss tensor with shape (N,). """ assert pred.size() == soft_label.size() target = F.softmax(soft_label / T, dim=1) if detach_target: target = target.detach() kd_loss = F.kl_div( F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * ( T * T) return kd_loss @LOSSES.register_module() class KnowledgeDistillationKLDivLoss(nn.Module): """Loss function for knowledge distilling using KL divergence. Args: reduction (str): Options are `'none'`, `'mean'` and `'sum'`. loss_weight (float): Loss weight of current loss. T (int): Temperature for distillation. """ def __init__(self, reduction='mean', loss_weight=1.0, T=10): super(KnowledgeDistillationKLDivLoss, self).__init__() assert T >= 1 self.reduction = reduction self.loss_weight = loss_weight self.T = T def forward(self, pred, soft_label, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (Tensor): Predicted logits with shape (N, n + 1). soft_label (Tensor): Target logits with shape (N, N + 1). weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss( pred, soft_label, weight, reduction=reduction, avg_factor=avg_factor, T=self.T) return loss_kd @LOSSES.register_module() class FeatImitate_L2Loss(nn.Module): """Loss function for feature imitation in knowledge distilling using L2 Loss. Args: reduction (str): Options are `'none'`, `'mean'` and `'sum'`. loss_weight (float): Loss weight of current loss. """ def __init__(self, reduction='mean', loss_weight=1.0): super(FeatImitate_L2Loss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight self.relu = nn.ReLU() def forward(self, preds, targets, weight=None, avg_factor=None, reduction_override=None): self.reduction = ( reduction_override if reduction_override else self.reduction) total_loss = 0 if not isinstance(preds, list): preds = [preds] targets = [targets] for pred, target in zip(preds, targets): pred = self.relu(pred) target = self.relu(target) pred = self.normalize_feature(pred) target = self.normalize_feature(target) loss = torch.sum(torch.pow(torch.add(pred, target, alpha=-1) ,2)) / len(pred) total_loss += loss return self.loss_weight * (total_loss / len(preds)) def normalize_feature(self, x, mult=1.0): x = x.reshape(x.size(0), -1) return x / x.norm(2, dim=1, keepdim=True) * mult
4,411
31.925373
89
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/hrnet.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn from mmcv.cnn import build_conv_layer, build_norm_layer from mmcv.runner import BaseModule, ModuleList, Sequential from torch.nn.modules.batchnorm import _BatchNorm from ..builder import BACKBONES from .resnet import BasicBlock, Bottleneck class HRModule(BaseModule): """High-Resolution Module for HRNet. In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange is in this module. """ def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), block_init_cfg=None, init_cfg=None): super(HRModule, self).__init__(init_cfg) self.block_init_cfg = block_init_cfg self._check_branches(num_branches, num_blocks, in_channels, num_channels) self.in_channels = in_channels self.num_branches = num_branches self.multiscale_output = multiscale_output self.norm_cfg = norm_cfg self.conv_cfg = conv_cfg self.with_cp = with_cp self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels) self.fuse_layers = self._make_fuse_layers() self.relu = nn.ReLU(inplace=False) def _check_branches(self, num_branches, num_blocks, in_channels, num_channels): if num_branches != len(num_blocks): error_msg = f'NUM_BRANCHES({num_branches}) ' \ f'!= NUM_BLOCKS({len(num_blocks)})' raise ValueError(error_msg) if num_branches != len(num_channels): error_msg = f'NUM_BRANCHES({num_branches}) ' \ f'!= NUM_CHANNELS({len(num_channels)})' raise ValueError(error_msg) if num_branches != len(in_channels): error_msg = f'NUM_BRANCHES({num_branches}) ' \ f'!= NUM_INCHANNELS({len(in_channels)})' raise ValueError(error_msg) def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): downsample = None if stride != 1 or \ self.in_channels[branch_index] != \ num_channels[branch_index] * block.expansion: downsample = nn.Sequential( build_conv_layer( self.conv_cfg, self.in_channels[branch_index], num_channels[branch_index] * block.expansion, kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, num_channels[branch_index] * block.expansion)[1]) layers = [] layers.append( block( self.in_channels[branch_index], num_channels[branch_index], stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=self.block_init_cfg)) self.in_channels[branch_index] = \ num_channels[branch_index] * block.expansion for i in range(1, num_blocks[branch_index]): layers.append( block( self.in_channels[branch_index], num_channels[branch_index], with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=self.block_init_cfg)) return Sequential(*layers) def _make_branches(self, num_branches, block, num_blocks, num_channels): branches = [] for i in range(num_branches): branches.append( self._make_one_branch(i, block, num_blocks, num_channels)) return ModuleList(branches) def _make_fuse_layers(self): if self.num_branches == 1: return None num_branches = self.num_branches in_channels = self.in_channels fuse_layers = [] num_out_branches = num_branches if self.multiscale_output else 1 for i in range(num_out_branches): fuse_layer = [] for j in range(num_branches): if j > i: fuse_layer.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels[j], in_channels[i], kernel_size=1, stride=1, padding=0, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1], nn.Upsample( scale_factor=2**(j - i), mode='nearest'))) elif j == i: fuse_layer.append(None) else: conv_downsamples = [] for k in range(i - j): if k == i - j - 1: conv_downsamples.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels[j], in_channels[i], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1])) else: conv_downsamples.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels[j], in_channels[j], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[j])[1], nn.ReLU(inplace=False))) fuse_layer.append(nn.Sequential(*conv_downsamples)) fuse_layers.append(nn.ModuleList(fuse_layer)) return nn.ModuleList(fuse_layers) def forward(self, x): """Forward function.""" if self.num_branches == 1: return [self.branches[0](x[0])] for i in range(self.num_branches): x[i] = self.branches[i](x[i]) x_fuse = [] for i in range(len(self.fuse_layers)): y = 0 for j in range(self.num_branches): if i == j: y += x[j] else: y += self.fuse_layers[i][j](x[j]) x_fuse.append(self.relu(y)) return x_fuse @BACKBONES.register_module() class HRNet(BaseModule): """HRNet backbone. `High-Resolution Representations for Labeling Pixels and Regions arXiv: <https://arxiv.org/abs/1904.04514>`_. Args: extra (dict): Detailed configuration for each stage of HRNet. There must be 4 stages, the configuration for each stage must have 5 keys: - num_modules(int): The number of HRModule in this stage. - num_branches(int): The number of branches in the HRModule. - block(str): The type of convolution block. - num_blocks(tuple): The number of blocks in each branch. The length must be equal to num_branches. - num_channels(tuple): The number of channels in each branch. The length must be equal to num_branches. in_channels (int): Number of input image channels. Default: 3. conv_cfg (dict): Dictionary to construct and config conv layer. norm_cfg (dict): Dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. Default: True. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. zero_init_residual (bool): Whether to use zero init for last norm layer in resblocks to let them behave as identity. Default: False. multiscale_output (bool): Whether to output multi-level features produced by multiple branches. If False, only the first level feature will be output. Default: True. pretrained (str, optional): Model pretrained path. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None. Example: >>> from mmdet.models import HRNet >>> import torch >>> extra = dict( >>> stage1=dict( >>> num_modules=1, >>> num_branches=1, >>> block='BOTTLENECK', >>> num_blocks=(4, ), >>> num_channels=(64, )), >>> stage2=dict( >>> num_modules=1, >>> num_branches=2, >>> block='BASIC', >>> num_blocks=(4, 4), >>> num_channels=(32, 64)), >>> stage3=dict( >>> num_modules=4, >>> num_branches=3, >>> block='BASIC', >>> num_blocks=(4, 4, 4), >>> num_channels=(32, 64, 128)), >>> stage4=dict( >>> num_modules=3, >>> num_branches=4, >>> block='BASIC', >>> num_blocks=(4, 4, 4, 4), >>> num_channels=(32, 64, 128, 256))) >>> self = HRNet(extra, in_channels=1) >>> self.eval() >>> inputs = torch.rand(1, 1, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 32, 8, 8) (1, 64, 4, 4) (1, 128, 2, 2) (1, 256, 1, 1) """ blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} def __init__(self, extra, in_channels=3, conv_cfg=None, norm_cfg=dict(type='BN'), norm_eval=True, with_cp=False, zero_init_residual=False, multiscale_output=True, pretrained=None, init_cfg=None): super(HRNet, self).__init__(init_cfg) self.pretrained = pretrained assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] else: raise TypeError('pretrained must be a str or None') # Assert configurations of 4 stages are in extra assert 'stage1' in extra and 'stage2' in extra \ and 'stage3' in extra and 'stage4' in extra # Assert whether the length of `num_blocks` and `num_channels` are # equal to `num_branches` for i in range(4): cfg = extra[f'stage{i + 1}'] assert len(cfg['num_blocks']) == cfg['num_branches'] and \ len(cfg['num_channels']) == cfg['num_branches'] self.extra = extra self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.norm_eval = norm_eval self.with_cp = with_cp self.zero_init_residual = zero_init_residual # stem net self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) self.conv1 = build_conv_layer( self.conv_cfg, in_channels, 64, kernel_size=3, stride=2, padding=1, bias=False) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer( self.conv_cfg, 64, 64, kernel_size=3, stride=2, padding=1, bias=False) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) # stage 1 self.stage1_cfg = self.extra['stage1'] num_channels = self.stage1_cfg['num_channels'][0] block_type = self.stage1_cfg['block'] num_blocks = self.stage1_cfg['num_blocks'][0] block = self.blocks_dict[block_type] stage1_out_channels = num_channels * block.expansion self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) # stage 2 self.stage2_cfg = self.extra['stage2'] num_channels = self.stage2_cfg['num_channels'] block_type = self.stage2_cfg['block'] block = self.blocks_dict[block_type] num_channels = [channel * block.expansion for channel in num_channels] self.transition1 = self._make_transition_layer([stage1_out_channels], num_channels) self.stage2, pre_stage_channels = self._make_stage( self.stage2_cfg, num_channels) # stage 3 self.stage3_cfg = self.extra['stage3'] num_channels = self.stage3_cfg['num_channels'] block_type = self.stage3_cfg['block'] block = self.blocks_dict[block_type] num_channels = [channel * block.expansion for channel in num_channels] self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage3, pre_stage_channels = self._make_stage( self.stage3_cfg, num_channels) # stage 4 self.stage4_cfg = self.extra['stage4'] num_channels = self.stage4_cfg['num_channels'] block_type = self.stage4_cfg['block'] block = self.blocks_dict[block_type] num_channels = [channel * block.expansion for channel in num_channels] self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage4, pre_stage_channels = self._make_stage( self.stage4_cfg, num_channels, multiscale_output=multiscale_output) @property def norm1(self): """nn.Module: the normalization layer named "norm1" """ return getattr(self, self.norm1_name) @property def norm2(self): """nn.Module: the normalization layer named "norm2" """ return getattr(self, self.norm2_name) def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): num_branches_cur = len(num_channels_cur_layer) num_branches_pre = len(num_channels_pre_layer) transition_layers = [] for i in range(num_branches_cur): if i < num_branches_pre: if num_channels_cur_layer[i] != num_channels_pre_layer[i]: transition_layers.append( nn.Sequential( build_conv_layer( self.conv_cfg, num_channels_pre_layer[i], num_channels_cur_layer[i], kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, num_channels_cur_layer[i])[1], nn.ReLU(inplace=True))) else: transition_layers.append(None) else: conv_downsamples = [] for j in range(i + 1 - num_branches_pre): in_channels = num_channels_pre_layer[-1] out_channels = num_channels_cur_layer[i] \ if j == i - num_branches_pre else in_channels conv_downsamples.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, out_channels)[1], nn.ReLU(inplace=True))) transition_layers.append(nn.Sequential(*conv_downsamples)) return nn.ModuleList(transition_layers) def _make_layer(self, block, inplanes, planes, blocks, stride=1): downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = nn.Sequential( build_conv_layer( self.conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, planes * block.expansion)[1]) layers = [] block_init_cfg = None if self.pretrained is None and not hasattr( self, 'init_cfg') and self.zero_init_residual: if block is BasicBlock: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm2')) elif block is Bottleneck: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm3')) layers.append( block( inplanes, planes, stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=block_init_cfg, )) inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block( inplanes, planes, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=block_init_cfg)) return Sequential(*layers) def _make_stage(self, layer_config, in_channels, multiscale_output=True): num_modules = layer_config['num_modules'] num_branches = layer_config['num_branches'] num_blocks = layer_config['num_blocks'] num_channels = layer_config['num_channels'] block = self.blocks_dict[layer_config['block']] hr_modules = [] block_init_cfg = None if self.pretrained is None and not hasattr( self, 'init_cfg') and self.zero_init_residual: if block is BasicBlock: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm2')) elif block is Bottleneck: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm3')) for i in range(num_modules): # multi_scale_output is only used for the last module if not multiscale_output and i == num_modules - 1: reset_multiscale_output = False else: reset_multiscale_output = True hr_modules.append( HRModule( num_branches, block, num_blocks, in_channels, num_channels, reset_multiscale_output, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, block_init_cfg=block_init_cfg)) return Sequential(*hr_modules), in_channels def forward(self, x): """Forward function.""" x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.conv2(x) x = self.norm2(x) x = self.relu(x) x = self.layer1(x) x_list = [] for i in range(self.stage2_cfg['num_branches']): if self.transition1[i] is not None: x_list.append(self.transition1[i](x)) else: x_list.append(x) y_list = self.stage2(x_list) x_list = [] for i in range(self.stage3_cfg['num_branches']): if self.transition2[i] is not None: x_list.append(self.transition2[i](y_list[-1])) else: x_list.append(y_list[i]) y_list = self.stage3(x_list) x_list = [] for i in range(self.stage4_cfg['num_branches']): if self.transition3[i] is not None: x_list.append(self.transition3[i](y_list[-1])) else: x_list.append(y_list[i]) y_list = self.stage4(x_list) return y_list def train(self, mode=True): """Convert the model into training mode will keeping the normalization layer freezed.""" super(HRNet, self).train(mode) if mode and self.norm_eval: for m in self.modules(): # trick: eval have effect on BatchNorm only if isinstance(m, _BatchNorm): m.eval()
23,106
38.164407
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/regnet.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import numpy as np import torch.nn as nn from mmcv.cnn import build_conv_layer, build_norm_layer from ..builder import BACKBONES from .resnet import ResNet from .resnext import Bottleneck @BACKBONES.register_module() class RegNet(ResNet): """RegNet backbone. More details can be found in `paper <https://arxiv.org/abs/2003.13678>`_ . Args: arch (dict): The parameter of RegNets. - w0 (int): initial width - wa (float): slope of width - wm (float): quantization parameter to quantize the width - depth (int): depth of the backbone - group_w (int): width of group - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck. strides (Sequence[int]): Strides of the first block of each stage. base_channels (int): Base channels after stem layer. in_channels (int): Number of input image channels. Default: 3. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. frozen_stages (int): Stages to be frozen (all param fixed). -1 means not freezing any parameters. norm_cfg (dict): dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): whether to use zero init for last norm layer in resblocks to let them behave as identity. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Example: >>> from mmdet.models import RegNet >>> import torch >>> self = RegNet( arch=dict( w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0)) >>> self.eval() >>> inputs = torch.rand(1, 3, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 96, 8, 8) (1, 192, 4, 4) (1, 432, 2, 2) (1, 1008, 1, 1) """ arch_settings = { 'regnetx_400mf': dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), 'regnetx_800mf': dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), 'regnetx_1.6gf': dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), 'regnetx_3.2gf': dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), 'regnetx_4.0gf': dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), 'regnetx_6.4gf': dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), 'regnetx_8.0gf': dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), 'regnetx_12gf': dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0), } def __init__(self, arch, in_channels=3, stem_channels=32, base_channels=32, strides=(2, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, with_cp=False, zero_init_residual=True, pretrained=None, init_cfg=None): super(ResNet, self).__init__(init_cfg) # Generate RegNet parameters first if isinstance(arch, str): assert arch in self.arch_settings, \ f'"arch": "{arch}" is not one of the' \ ' arch_settings' arch = self.arch_settings[arch] elif not isinstance(arch, dict): raise ValueError('Expect "arch" to be either a string ' f'or a dict, got {type(arch)}') widths, num_stages = self.generate_regnet( arch['w0'], arch['wa'], arch['wm'], arch['depth'], ) # Convert to per stage format stage_widths, stage_blocks = self.get_stages_from_blocks(widths) # Generate group widths and bot muls group_widths = [arch['group_w'] for _ in range(num_stages)] self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)] # Adjust the compatibility of stage_widths and group_widths stage_widths, group_widths = self.adjust_width_group( stage_widths, self.bottleneck_ratio, group_widths) # Group params by stage self.stage_widths = stage_widths self.group_widths = group_widths self.depth = sum(stage_blocks) self.stem_channels = stem_channels self.base_channels = base_channels self.num_stages = num_stages assert num_stages >= 1 and num_stages <= 4 self.strides = strides self.dilations = dilations assert len(strides) == len(dilations) == num_stages self.out_indices = out_indices assert max(out_indices) < num_stages self.style = style self.deep_stem = deep_stem self.avg_down = avg_down self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.with_cp = with_cp self.norm_eval = norm_eval self.dcn = dcn self.stage_with_dcn = stage_with_dcn if dcn is not None: assert len(stage_with_dcn) == num_stages self.plugins = plugins self.zero_init_residual = zero_init_residual self.block = Bottleneck expansion_bak = self.block.expansion self.block.expansion = 1 self.stage_blocks = stage_blocks[:num_stages] self._make_stem_layer(in_channels, stem_channels) block_init_cfg = None assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] if self.zero_init_residual: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm3')) else: raise TypeError('pretrained must be a str or None') self.inplanes = stem_channels self.res_layers = [] for i, num_blocks in enumerate(self.stage_blocks): stride = self.strides[i] dilation = self.dilations[i] group_width = self.group_widths[i] width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i])) stage_groups = width // group_width dcn = self.dcn if self.stage_with_dcn[i] else None if self.plugins is not None: stage_plugins = self.make_stage_plugins(self.plugins, i) else: stage_plugins = None res_layer = self.make_res_layer( block=self.block, inplanes=self.inplanes, planes=self.stage_widths[i], num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, plugins=stage_plugins, groups=stage_groups, base_width=group_width, base_channels=self.stage_widths[i], init_cfg=block_init_cfg) self.inplanes = self.stage_widths[i] layer_name = f'layer{i + 1}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() self.feat_dim = stage_widths[-1] self.block.expansion = expansion_bak def _make_stem_layer(self, in_channels, base_channels): self.conv1 = build_conv_layer( self.conv_cfg, in_channels, base_channels, kernel_size=3, stride=2, padding=1, bias=False) self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, base_channels, postfix=1) self.add_module(self.norm1_name, norm1) self.relu = nn.ReLU(inplace=True) def generate_regnet(self, initial_width, width_slope, width_parameter, depth, divisor=8): """Generates per block width from RegNet parameters. Args: initial_width ([int]): Initial width of the backbone width_slope ([float]): Slope of the quantized linear function width_parameter ([int]): Parameter used to quantize the width. depth ([int]): Depth of the backbone. divisor (int, optional): The divisor of channels. Defaults to 8. Returns: list, int: return a list of widths of each stage and the number \ of stages """ assert width_slope >= 0 assert initial_width > 0 assert width_parameter > 1 assert initial_width % divisor == 0 widths_cont = np.arange(depth) * width_slope + initial_width ks = np.round( np.log(widths_cont / initial_width) / np.log(width_parameter)) widths = initial_width * np.power(width_parameter, ks) widths = np.round(np.divide(widths, divisor)) * divisor num_stages = len(np.unique(widths)) widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() return widths, num_stages @staticmethod def quantize_float(number, divisor): """Converts a float to closest non-zero int divisible by divisor. Args: number (int): Original number to be quantized. divisor (int): Divisor used to quantize the number. Returns: int: quantized number that is divisible by devisor. """ return int(round(number / divisor) * divisor) def adjust_width_group(self, widths, bottleneck_ratio, groups): """Adjusts the compatibility of widths and groups. Args: widths (list[int]): Width of each stage. bottleneck_ratio (float): Bottleneck ratio. groups (int): number of groups in each stage Returns: tuple(list): The adjusted widths and groups of each stage. """ bottleneck_width = [ int(w * b) for w, b in zip(widths, bottleneck_ratio) ] groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)] bottleneck_width = [ self.quantize_float(w_bot, g) for w_bot, g in zip(bottleneck_width, groups) ] widths = [ int(w_bot / b) for w_bot, b in zip(bottleneck_width, bottleneck_ratio) ] return widths, groups def get_stages_from_blocks(self, widths): """Gets widths/stage_blocks of network at each stage. Args: widths (list[int]): Width in each stage. Returns: tuple(list): width and depth of each stage """ width_diff = [ width != width_prev for width, width_prev in zip(widths + [0], [0] + widths) ] stage_widths = [ width for width, diff in zip(widths, width_diff[:-1]) if diff ] stage_blocks = np.diff([ depth for depth, diff in zip(range(len(width_diff)), width_diff) if diff ]).tolist() return stage_widths, stage_blocks def forward(self, x): """Forward function.""" x = self.conv1(x) x = self.norm1(x) x = self.relu(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if i in self.out_indices: outs.append(x) return tuple(outs)
13,605
37.112045
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/mobilenet_v2.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from torch.nn.modules.batchnorm import _BatchNorm from ..builder import BACKBONES from ..utils import InvertedResidual, make_divisible @BACKBONES.register_module() class MobileNetV2(BaseModule): """MobileNetV2 backbone. Args: widen_factor (float): Width multiplier, multiply number of channels in each layer by this amount. Default: 1.0. out_indices (Sequence[int], optional): Output from which stages. Default: (1, 2, 4, 7). frozen_stages (int): Stages to be frozen (all param fixed). Default: -1, which means not freezing any parameters. conv_cfg (dict, optional): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU6'). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. Default: False. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ # Parameters to build layers. 4 parameters are needed to construct a # layer, from left to right: expand_ratio, channel, num_blocks, stride. arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]] def __init__(self, widen_factor=1., out_indices=(1, 2, 4, 7), frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU6'), norm_eval=False, with_cp=False, pretrained=None, init_cfg=None): super(MobileNetV2, self).__init__(init_cfg) self.pretrained = pretrained assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] else: raise TypeError('pretrained must be a str or None') self.widen_factor = widen_factor self.out_indices = out_indices if not set(out_indices).issubset(set(range(0, 8))): raise ValueError('out_indices must be a subset of range' f'(0, 8). But received {out_indices}') if frozen_stages not in range(-1, 8): raise ValueError('frozen_stages must be in range(-1, 8). ' f'But received {frozen_stages}') self.out_indices = out_indices self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.norm_eval = norm_eval self.with_cp = with_cp self.in_channels = make_divisible(32 * widen_factor, 8) self.conv1 = ConvModule( in_channels=3, out_channels=self.in_channels, kernel_size=3, stride=2, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.layers = [] for i, layer_cfg in enumerate(self.arch_settings): expand_ratio, channel, num_blocks, stride = layer_cfg out_channels = make_divisible(channel * widen_factor, 8) inverted_res_layer = self.make_layer( out_channels=out_channels, num_blocks=num_blocks, stride=stride, expand_ratio=expand_ratio) layer_name = f'layer{i + 1}' self.add_module(layer_name, inverted_res_layer) self.layers.append(layer_name) if widen_factor > 1.0: self.out_channel = int(1280 * widen_factor) else: self.out_channel = 1280 layer = ConvModule( in_channels=self.in_channels, out_channels=self.out_channel, kernel_size=1, stride=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.add_module('conv2', layer) self.layers.append('conv2') def make_layer(self, out_channels, num_blocks, stride, expand_ratio): """Stack InvertedResidual blocks to build a layer for MobileNetV2. Args: out_channels (int): out_channels of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 expand_ratio (int): Expand the number of channels of the hidden layer in InvertedResidual by this ratio. Default: 6. """ layers = [] for i in range(num_blocks): if i >= 1: stride = 1 layers.append( InvertedResidual( self.in_channels, out_channels, mid_channels=int(round(self.in_channels * expand_ratio)), stride=stride, with_expand_conv=expand_ratio != 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp)) self.in_channels = out_channels return nn.Sequential(*layers) def _freeze_stages(self): if self.frozen_stages >= 0: for param in self.conv1.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): layer = getattr(self, f'layer{i}') layer.eval() for param in layer.parameters(): param.requires_grad = False def forward(self, x): """Forward function.""" x = self.conv1(x) outs = [] for i, layer_name in enumerate(self.layers): layer = getattr(self, layer_name) x = layer(x) if i in self.out_indices: outs.append(x) return tuple(outs) def train(self, mode=True): """Convert the model into training mode while keep normalization layer frozen.""" super(MobileNetV2, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): # trick: eval have effect on BatchNorm only if isinstance(m, _BatchNorm): m.eval()
7,599
37.383838
78
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/swin.py
import warnings from collections import OrderedDict from copy import deepcopy import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv.cnn import build_norm_layer, constant_init, trunc_normal_init from mmcv.cnn.bricks.transformer import FFN, build_dropout from mmcv.runner import BaseModule, ModuleList, _load_checkpoint from mmcv.utils import to_2tuple from ...utils import get_root_logger from ..builder import BACKBONES from ..utils.ckpt_convert import swin_converter from ..utils.transformer import PatchEmbed, PatchMerging class WindowMSA(BaseModule): """Window based multi-head self-attention (W-MSA) module with relative position bias. Args: embed_dims (int): Number of input channels. num_heads (int): Number of attention heads. window_size (tuple[int]): The height and width of the window. qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. Default: True. qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. Default: None. attn_drop_rate (float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. init_cfg (dict | None, optional): The Config for initialization. Default: None. """ def __init__(self, embed_dims, num_heads, window_size, qkv_bias=True, qk_scale=None, attn_drop_rate=0., proj_drop_rate=0., init_cfg=None): super().__init__() self.embed_dims = embed_dims self.window_size = window_size # Wh, Ww self.num_heads = num_heads head_embed_dims = embed_dims // num_heads self.scale = qk_scale or head_embed_dims**-0.5 self.init_cfg = init_cfg # define a parameter table of relative position bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH # About 2x faster than original impl Wh, Ww = self.window_size rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) rel_position_index = rel_index_coords + rel_index_coords.T rel_position_index = rel_position_index.flip(1).contiguous() self.register_buffer('relative_position_index', rel_position_index) self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop_rate) self.proj = nn.Linear(embed_dims, embed_dims) self.proj_drop = nn.Dropout(proj_drop_rate) self.softmax = nn.Softmax(dim=-1) def init_weights(self): trunc_normal_init(self.relative_position_bias_table, std=0.02) def forward(self, x, mask=None): """ Args: x (tensor): input features with shape of (num_windows*B, N, C) mask (tensor | None, Optional): mask with shape of (num_windows, Wh*Ww, Wh*Ww), value should be between (-inf, 0]. """ B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) # make torchscript happy (cannot use tensor as tuple) q, k, v = qkv[0], qkv[1], qkv[2] q = q * self.scale attn = (q @ k.transpose(-2, -1)) relative_position_bias = self.relative_position_bias_table[ self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute( 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: nW = mask.shape[0] attn = attn.view(B // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x @staticmethod def double_step_seq(step1, len1, step2, len2): seq1 = torch.arange(0, step1 * len1, step1) seq2 = torch.arange(0, step2 * len2, step2) return (seq1[:, None] + seq2[None, :]).reshape(1, -1) class ShiftWindowMSA(BaseModule): """Shifted Window Multihead Self-Attention Module. Args: embed_dims (int): Number of input channels. num_heads (int): Number of attention heads. window_size (int): The height and width of the window. shift_size (int, optional): The shift step of each window towards right-bottom. If zero, act as regular window-msa. Defaults to 0. qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. Defaults: None. attn_drop_rate (float, optional): Dropout ratio of attention weight. Defaults: 0. proj_drop_rate (float, optional): Dropout ratio of output. Defaults: 0. dropout_layer (dict, optional): The dropout_layer used before output. Defaults: dict(type='DropPath', drop_prob=0.). init_cfg (dict, optional): The extra config for initialization. Default: None. """ def __init__(self, embed_dims, num_heads, window_size, shift_size=0, qkv_bias=True, qk_scale=None, attn_drop_rate=0, proj_drop_rate=0, dropout_layer=dict(type='DropPath', drop_prob=0.), init_cfg=None): super().__init__(init_cfg) self.window_size = window_size self.shift_size = shift_size assert 0 <= self.shift_size < self.window_size self.w_msa = WindowMSA( embed_dims=embed_dims, num_heads=num_heads, window_size=to_2tuple(window_size), qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop_rate=attn_drop_rate, proj_drop_rate=proj_drop_rate, init_cfg=None) self.drop = build_dropout(dropout_layer) def forward(self, query, hw_shape): B, L, C = query.shape H, W = hw_shape assert L == H * W, 'input feature has wrong size' query = query.view(B, H, W, C) # pad feature maps to multiples of window size pad_r = (self.window_size - W % self.window_size) % self.window_size pad_b = (self.window_size - H % self.window_size) % self.window_size query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b)) H_pad, W_pad = query.shape[1], query.shape[2] # cyclic shift if self.shift_size > 0: shifted_query = torch.roll( query, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) # calculate attention mask for SW-MSA img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device) h_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) w_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) cnt = 0 for h in h_slices: for w in w_slices: img_mask[:, h, w, :] = cnt cnt += 1 # nW, window_size, window_size, 1 mask_windows = self.window_partition(img_mask) mask_windows = mask_windows.view( -1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( attn_mask == 0, float(0.0)) else: shifted_query = query attn_mask = None # nW*B, window_size, window_size, C query_windows = self.window_partition(shifted_query) # nW*B, window_size*window_size, C query_windows = query_windows.view(-1, self.window_size**2, C) # W-MSA/SW-MSA (nW*B, window_size*window_size, C) attn_windows = self.w_msa(query_windows, mask=attn_mask) # merge windows attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) # B H' W' C shifted_x = self.window_reverse(attn_windows, H_pad, W_pad) # reverse cyclic shift if self.shift_size > 0: x = torch.roll( shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: x = shifted_x if pad_r > 0 or pad_b: x = x[:, :H, :W, :].contiguous() x = x.view(B, H * W, C) x = self.drop(x) return x def window_reverse(self, windows, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ window_size = self.window_size B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x def window_partition(self, x): """ Args: x: (B, H, W, C) Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape window_size = self.window_size x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() windows = windows.view(-1, window_size, window_size, C) return windows class SwinBlock(BaseModule): """" Args: embed_dims (int): The feature dimension. num_heads (int): Parallel attention heads. feedforward_channels (int): The hidden dimension for FFNs. window_size (int, optional): The local window scale. Default: 7. shift (bool, optional): whether to shift window or not. Default False. qkv_bias (bool, optional): enable bias for qkv if True. Default: True. qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. Default: None. drop_rate (float, optional): Dropout rate. Default: 0. attn_drop_rate (float, optional): Attention dropout rate. Default: 0. drop_path_rate (float, optional): Stochastic depth rate. Default: 0. act_cfg (dict, optional): The config dict of activation function. Default: dict(type='GELU'). norm_cfg (dict, optional): The config dict of normalization. Default: dict(type='LN'). with_cp (bool, optional): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. init_cfg (dict | list | None, optional): The init config. Default: None. """ def __init__(self, embed_dims, num_heads, feedforward_channels, window_size=7, shift=False, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN'), with_cp=False, init_cfg=None): super(SwinBlock, self).__init__() self.init_cfg = init_cfg self.with_cp = with_cp self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] self.attn = ShiftWindowMSA( embed_dims=embed_dims, num_heads=num_heads, window_size=window_size, shift_size=window_size // 2 if shift else 0, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop_rate=attn_drop_rate, proj_drop_rate=drop_rate, dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), init_cfg=None) self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] self.ffn = FFN( embed_dims=embed_dims, feedforward_channels=feedforward_channels, num_fcs=2, ffn_drop=drop_rate, dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), act_cfg=act_cfg, add_identity=True, init_cfg=None) def forward(self, x, hw_shape): def _inner_forward(x): identity = x x = self.norm1(x) x = self.attn(x, hw_shape) x = x + identity identity = x x = self.norm2(x) x = self.ffn(x, identity=identity) return x if self.with_cp and x.requires_grad: x = cp.checkpoint(_inner_forward, x) else: x = _inner_forward(x) return x class SwinBlockSequence(BaseModule): """Implements one stage in Swin Transformer. Args: embed_dims (int): The feature dimension. num_heads (int): Parallel attention heads. feedforward_channels (int): The hidden dimension for FFNs. depth (int): The number of blocks in this stage. window_size (int, optional): The local window scale. Default: 7. qkv_bias (bool, optional): enable bias for qkv if True. Default: True. qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. Default: None. drop_rate (float, optional): Dropout rate. Default: 0. attn_drop_rate (float, optional): Attention dropout rate. Default: 0. drop_path_rate (float | list[float], optional): Stochastic depth rate. Default: 0. downsample (BaseModule | None, optional): The downsample operation module. Default: None. act_cfg (dict, optional): The config dict of activation function. Default: dict(type='GELU'). norm_cfg (dict, optional): The config dict of normalization. Default: dict(type='LN'). with_cp (bool, optional): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. init_cfg (dict | list | None, optional): The init config. Default: None. """ def __init__(self, embed_dims, num_heads, feedforward_channels, depth, window_size=7, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., downsample=None, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN'), with_cp=False, init_cfg=None): super().__init__(init_cfg=init_cfg) if isinstance(drop_path_rate, list): drop_path_rates = drop_path_rate assert len(drop_path_rates) == depth else: drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)] self.blocks = ModuleList() for i in range(depth): block = SwinBlock( embed_dims=embed_dims, num_heads=num_heads, feedforward_channels=feedforward_channels, window_size=window_size, shift=False if i % 2 == 0 else True, qkv_bias=qkv_bias, qk_scale=qk_scale, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=drop_path_rates[i], act_cfg=act_cfg, norm_cfg=norm_cfg, with_cp=with_cp, init_cfg=None) self.blocks.append(block) self.downsample = downsample def forward(self, x, hw_shape): for block in self.blocks: x = block(x, hw_shape) if self.downsample: x_down, down_hw_shape = self.downsample(x, hw_shape) return x_down, down_hw_shape, x, hw_shape else: return x, hw_shape, x, hw_shape @BACKBONES.register_module() class SwinTransformer(BaseModule): """ Swin Transformer A PyTorch implement of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - https://arxiv.org/abs/2103.14030 Inspiration from https://github.com/microsoft/Swin-Transformer Args: pretrain_img_size (int | tuple[int]): The size of input image when pretrain. Defaults: 224. in_channels (int): The num of input channels. Defaults: 3. embed_dims (int): The feature dimension. Default: 96. patch_size (int | tuple[int]): Patch size. Default: 4. window_size (int): Window size. Default: 7. mlp_ratio (int): Ratio of mlp hidden dim to embedding dim. Default: 4. depths (tuple[int]): Depths of each Swin Transformer stage. Default: (2, 2, 6, 2). num_heads (tuple[int]): Parallel attention heads of each Swin Transformer stage. Default: (3, 6, 12, 24). strides (tuple[int]): The patch merging or patch embedding stride of each Swin Transformer stage. (In swin, we set kernel size equal to stride.) Default: (4, 2, 2, 2). out_indices (tuple[int]): Output from which stages. Default: (0, 1, 2, 3). qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. Default: None. patch_norm (bool): If add a norm layer for patch embed and patch merging. Default: True. drop_rate (float): Dropout rate. Defaults: 0. attn_drop_rate (float): Attention dropout rate. Default: 0. drop_path_rate (float): Stochastic depth rate. Defaults: 0.1. use_abs_pos_embed (bool): If True, add absolute position embedding to the patch embedding. Defaults: False. act_cfg (dict): Config dict for activation layer. Default: dict(type='LN'). norm_cfg (dict): Config dict for normalization layer at output of backone. Defaults: dict(type='LN'). with_cp (bool, optional): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. pretrained (str, optional): model pretrained path. Default: None. convert_weights (bool): The flag indicates whether the pre-trained model is from the original repo. We may need to convert some keys to make it compatible. Default: False. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. init_cfg (dict, optional): The Config for initialization. Defaults to None. """ def __init__(self, pretrain_img_size=224, in_channels=3, embed_dims=96, patch_size=4, window_size=7, mlp_ratio=4, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), strides=(4, 2, 2, 2), out_indices=(0, 1, 2, 3), qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=dict(type='LN'), with_cp=False, pretrained=None, convert_weights=False, frozen_stages=-1, init_cfg=None): self.convert_weights = convert_weights self.frozen_stages = frozen_stages if isinstance(pretrain_img_size, int): pretrain_img_size = to_2tuple(pretrain_img_size) elif isinstance(pretrain_img_size, tuple): if len(pretrain_img_size) == 1: pretrain_img_size = to_2tuple(pretrain_img_size[0]) assert len(pretrain_img_size) == 2, \ f'The size of image should have length 1 or 2, ' \ f'but got {len(pretrain_img_size)}' assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: self.init_cfg = init_cfg else: raise TypeError('pretrained must be a str or None') super(SwinTransformer, self).__init__(init_cfg=init_cfg) num_layers = len(depths) self.out_indices = out_indices self.use_abs_pos_embed = use_abs_pos_embed assert strides[0] == patch_size, 'Use non-overlapping patch embed.' self.patch_embed = PatchEmbed( in_channels=in_channels, embed_dims=embed_dims, conv_type='Conv2d', kernel_size=patch_size, stride=strides[0], norm_cfg=norm_cfg if patch_norm else None, init_cfg=None) if self.use_abs_pos_embed: patch_row = pretrain_img_size[0] // patch_size patch_col = pretrain_img_size[1] // patch_size num_patches = patch_row * patch_col self.absolute_pos_embed = nn.Parameter( torch.zeros((1, num_patches, embed_dims))) self.drop_after_pos = nn.Dropout(p=drop_rate) # set stochastic depth decay rule total_depth = sum(depths) dpr = [ x.item() for x in torch.linspace(0, drop_path_rate, total_depth) ] self.stages = ModuleList() in_channels = embed_dims for i in range(num_layers): if i < num_layers - 1: downsample = PatchMerging( in_channels=in_channels, out_channels=2 * in_channels, stride=strides[i + 1], norm_cfg=norm_cfg if patch_norm else None, init_cfg=None) else: downsample = None stage = SwinBlockSequence( embed_dims=in_channels, num_heads=num_heads[i], feedforward_channels=mlp_ratio * in_channels, depth=depths[i], window_size=window_size, qkv_bias=qkv_bias, qk_scale=qk_scale, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])], downsample=downsample, act_cfg=act_cfg, norm_cfg=norm_cfg, with_cp=with_cp, init_cfg=None) self.stages.append(stage) if downsample: in_channels = downsample.out_channels self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)] # Add a norm layer for each output for i in out_indices: layer = build_norm_layer(norm_cfg, self.num_features[i])[1] layer_name = f'norm{i}' self.add_module(layer_name, layer) def train(self, mode=True): """Convert the model into training mode while keep layers freezed.""" super(SwinTransformer, self).train(mode) self._freeze_stages() def _freeze_stages(self): if self.frozen_stages >= 0: self.patch_embed.eval() for param in self.patch_embed.parameters(): param.requires_grad = False if self.use_abs_pos_embed: self.absolute_pos_embed.requires_grad = False self.drop_after_pos.eval() for i in range(1, self.frozen_stages + 1): if (i - 1) in self.out_indices: norm_layer = getattr(self, f'norm{i-1}') norm_layer.eval() for param in norm_layer.parameters(): param.requires_grad = False m = self.stages[i - 1] m.eval() for param in m.parameters(): param.requires_grad = False def init_weights(self): logger = get_root_logger() if self.init_cfg is None: logger.warn(f'No pre-trained weights for ' f'{self.__class__.__name__}, ' f'training start from scratch') if self.use_abs_pos_embed: trunc_normal_init(self.absolute_pos_embed, std=0.02) for m in self.modules(): if isinstance(m, nn.Linear): trunc_normal_init(m.weight, std=.02) if m.bias is not None: constant_init(m.bias, 0) elif isinstance(m, nn.LayerNorm): constant_init(m.bias, 0) constant_init(m.weight, 1.0) else: assert 'checkpoint' in self.init_cfg, f'Only support ' \ f'specify `Pretrained` in ' \ f'`init_cfg` in ' \ f'{self.__class__.__name__} ' ckpt = _load_checkpoint( self.init_cfg.checkpoint, logger=logger, map_location='cpu') if 'state_dict' in ckpt: _state_dict = ckpt['state_dict'] elif 'model' in ckpt: _state_dict = ckpt['model'] else: _state_dict = ckpt if self.convert_weights: # supported loading weight from original repo, _state_dict = swin_converter(_state_dict) state_dict = OrderedDict() for k, v in _state_dict.items(): if k.startswith('backbone.'): state_dict[k[9:]] = v # strip prefix of state_dict if list(state_dict.keys())[0].startswith('module.'): state_dict = {k[7:]: v for k, v in state_dict.items()} # reshape absolute position embedding if state_dict.get('absolute_pos_embed') is not None: absolute_pos_embed = state_dict['absolute_pos_embed'] N1, L, C1 = absolute_pos_embed.size() N2, C2, H, W = self.absolute_pos_embed.size() if N1 != N2 or C1 != C2 or L != H * W: logger.warning('Error in loading absolute_pos_embed, pass') else: state_dict['absolute_pos_embed'] = absolute_pos_embed.view( N2, H, W, C2).permute(0, 3, 1, 2).contiguous() # interpolate position bias table if needed relative_position_bias_table_keys = [ k for k in state_dict.keys() if 'relative_position_bias_table' in k ] for table_key in relative_position_bias_table_keys: table_pretrained = state_dict[table_key] table_current = self.state_dict()[table_key] L1, nH1 = table_pretrained.size() L2, nH2 = table_current.size() if nH1 != nH2: logger.warning(f'Error in loading {table_key}, pass') elif L1 != L2: S1 = int(L1**0.5) S2 = int(L2**0.5) table_pretrained_resized = F.interpolate( table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1), size=(S2, S2), mode='bicubic') state_dict[table_key] = table_pretrained_resized.view( nH2, L2).permute(1, 0).contiguous() # load state_dict self.load_state_dict(state_dict, False) def forward(self, x): x, hw_shape = self.patch_embed(x) if self.use_abs_pos_embed: x = x + self.absolute_pos_embed x = self.drop_after_pos(x) outs = [] for i, stage in enumerate(self.stages): x, hw_shape, out, out_hw_shape = stage(x, hw_shape) if i in self.out_indices: norm_layer = getattr(self, f'norm{i}') out = norm_layer(out) out = out.view(-1, *out_hw_shape, self.num_features[i]).permute(0, 3, 1, 2).contiguous() outs.append(out) return outs
30,173
38.443137
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/trident_resnet.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer from mmcv.runner import BaseModule from torch.nn.modules.utils import _pair from mmdet.models.backbones.resnet import Bottleneck, ResNet from mmdet.models.builder import BACKBONES class TridentConv(BaseModule): """Trident Convolution Module. Args: in_channels (int): Number of channels in input. out_channels (int): Number of channels in output. kernel_size (int): Size of convolution kernel. stride (int, optional): Convolution stride. Default: 1. trident_dilations (tuple[int, int, int], optional): Dilations of different trident branch. Default: (1, 2, 3). test_branch_idx (int, optional): In inference, all 3 branches will be used if `test_branch_idx==-1`, otherwise only branch with index `test_branch_idx` will be used. Default: 1. bias (bool, optional): Whether to use bias in convolution or not. Default: False. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, trident_dilations=(1, 2, 3), test_branch_idx=1, bias=False, init_cfg=None): super(TridentConv, self).__init__(init_cfg) self.num_branch = len(trident_dilations) self.with_bias = bias self.test_branch_idx = test_branch_idx self.stride = _pair(stride) self.kernel_size = _pair(kernel_size) self.paddings = _pair(trident_dilations) self.dilations = trident_dilations self.in_channels = in_channels self.out_channels = out_channels self.bias = bias self.weight = nn.Parameter( torch.Tensor(out_channels, in_channels, *self.kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(out_channels)) else: self.bias = None def extra_repr(self): tmpstr = f'in_channels={self.in_channels}' tmpstr += f', out_channels={self.out_channels}' tmpstr += f', kernel_size={self.kernel_size}' tmpstr += f', num_branch={self.num_branch}' tmpstr += f', test_branch_idx={self.test_branch_idx}' tmpstr += f', stride={self.stride}' tmpstr += f', paddings={self.paddings}' tmpstr += f', dilations={self.dilations}' tmpstr += f', bias={self.bias}' return tmpstr def forward(self, inputs): if self.training or self.test_branch_idx == -1: outputs = [ F.conv2d(input, self.weight, self.bias, self.stride, padding, dilation) for input, dilation, padding in zip( inputs, self.dilations, self.paddings) ] else: assert len(inputs) == 1 outputs = [ F.conv2d(inputs[0], self.weight, self.bias, self.stride, self.paddings[self.test_branch_idx], self.dilations[self.test_branch_idx]) ] return outputs # Since TridentNet is defined over ResNet50 and ResNet101, here we # only support TridentBottleneckBlock. class TridentBottleneck(Bottleneck): """BottleBlock for TridentResNet. Args: trident_dilations (tuple[int, int, int]): Dilations of different trident branch. test_branch_idx (int): In inference, all 3 branches will be used if `test_branch_idx==-1`, otherwise only branch with index `test_branch_idx` will be used. concat_output (bool): Whether to concat the output list to a Tensor. `True` only in the last Block. """ def __init__(self, trident_dilations, test_branch_idx, concat_output, **kwargs): super(TridentBottleneck, self).__init__(**kwargs) self.trident_dilations = trident_dilations self.num_branch = len(trident_dilations) self.concat_output = concat_output self.test_branch_idx = test_branch_idx self.conv2 = TridentConv( self.planes, self.planes, kernel_size=3, stride=self.conv2_stride, bias=False, trident_dilations=self.trident_dilations, test_branch_idx=test_branch_idx, init_cfg=dict( type='Kaiming', distribution='uniform', mode='fan_in', override=dict(name='conv2'))) def forward(self, x): def _inner_forward(x): num_branch = ( self.num_branch if self.training or self.test_branch_idx == -1 else 1) identity = x if not isinstance(x, list): x = (x, ) * num_branch identity = x if self.downsample is not None: identity = [self.downsample(b) for b in x] out = [self.conv1(b) for b in x] out = [self.norm1(b) for b in out] out = [self.relu(b) for b in out] if self.with_plugins: for k in range(len(out)): out[k] = self.forward_plugin(out[k], self.after_conv1_plugin_names) out = self.conv2(out) out = [self.norm2(b) for b in out] out = [self.relu(b) for b in out] if self.with_plugins: for k in range(len(out)): out[k] = self.forward_plugin(out[k], self.after_conv2_plugin_names) out = [self.conv3(b) for b in out] out = [self.norm3(b) for b in out] if self.with_plugins: for k in range(len(out)): out[k] = self.forward_plugin(out[k], self.after_conv3_plugin_names) out = [ out_b + identity_b for out_b, identity_b in zip(out, identity) ] return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = [self.relu(b) for b in out] if self.concat_output: out = torch.cat(out, dim=0) return out def make_trident_res_layer(block, inplanes, planes, num_blocks, stride=1, trident_dilations=(1, 2, 3), style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, test_branch_idx=-1): """Build Trident Res Layers.""" downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = [] conv_stride = stride downsample.extend([ build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1] ]) downsample = nn.Sequential(*downsample) layers = [] for i in range(num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=stride if i == 0 else 1, trident_dilations=trident_dilations, downsample=downsample if i == 0 else None, style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=plugins, test_branch_idx=test_branch_idx, concat_output=True if i == num_blocks - 1 else False)) inplanes = planes * block.expansion return nn.Sequential(*layers) @BACKBONES.register_module() class TridentResNet(ResNet): """The stem layer, stage 1 and stage 2 in Trident ResNet are identical to ResNet, while in stage 3, Trident BottleBlock is utilized to replace the normal BottleBlock to yield trident output. Different branch shares the convolution weight but uses different dilations to achieve multi-scale output. / stage3(b0) \ x - stem - stage1 - stage2 - stage3(b1) - output \ stage3(b2) / Args: depth (int): Depth of resnet, from {50, 101, 152}. num_branch (int): Number of branches in TridentNet. test_branch_idx (int): In inference, all 3 branches will be used if `test_branch_idx==-1`, otherwise only branch with index `test_branch_idx` will be used. trident_dilations (tuple[int]): Dilations of different trident branch. len(trident_dilations) should be equal to num_branch. """ # noqa def __init__(self, depth, num_branch, test_branch_idx, trident_dilations, **kwargs): assert num_branch == len(trident_dilations) assert depth in (50, 101, 152) super(TridentResNet, self).__init__(depth, **kwargs) assert self.num_stages == 3 self.test_branch_idx = test_branch_idx self.num_branch = num_branch last_stage_idx = self.num_stages - 1 stride = self.strides[last_stage_idx] dilation = trident_dilations dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None if self.plugins is not None: stage_plugins = self.make_stage_plugins(self.plugins, last_stage_idx) else: stage_plugins = None planes = self.base_channels * 2**last_stage_idx res_layer = make_trident_res_layer( TridentBottleneck, inplanes=(self.block.expansion * self.base_channels * 2**(last_stage_idx - 1)), planes=planes, num_blocks=self.stage_blocks[last_stage_idx], stride=stride, trident_dilations=dilation, style=self.style, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, plugins=stage_plugins, test_branch_idx=self.test_branch_idx) layer_name = f'layer{last_stage_idx + 1}' self.__setattr__(layer_name, res_layer) self.res_layers.pop(last_stage_idx) self.res_layers.insert(last_stage_idx, layer_name) self._freeze_stages()
11,129
36.22408
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/detectors_resnext.py
# Copyright (c) OpenMMLab. All rights reserved. import math from mmcv.cnn import build_conv_layer, build_norm_layer from ..builder import BACKBONES from .detectors_resnet import Bottleneck as _Bottleneck from .detectors_resnet import DetectoRS_ResNet class Bottleneck(_Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs): """Bottleneck block for ResNeXt. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if groups == 1: width = self.planes else: width = math.floor(self.planes * (base_width / base_channels)) * groups self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width, postfix=1) self.norm2_name, norm2 = build_norm_layer( self.norm_cfg, width, postfix=2) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False self.with_modulated_dcn = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if self.with_sac: self.conv2 = build_conv_layer( self.sac, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) elif not self.with_dcn or fallback_on_stride: self.conv2 = build_conv_layer( self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer( self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer( self.conv_cfg, width, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) @BACKBONES.register_module() class DetectoRS_ResNeXt(DetectoRS_ResNet): """ResNeXt backbone for DetectoRS. Args: groups (int): The number of groups in ResNeXt. base_width (int): The base width of ResNeXt. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, groups=1, base_width=4, **kwargs): self.groups = groups self.base_width = base_width super(DetectoRS_ResNeXt, self).__init__(**kwargs) def make_res_layer(self, **kwargs): return super().make_res_layer( groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
3,920
30.620968
77
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/resnet.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer from mmcv.runner import BaseModule from torch.nn.modules.batchnorm import _BatchNorm from ..builder import BACKBONES from ..utils import ResLayer class BasicBlock(BaseModule): expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_cfg=None): super(BasicBlock, self).__init__(init_cfg) assert dcn is None, 'Not implemented yet.' assert plugins is None, 'Not implemented yet.' self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) self.conv1 = build_conv_layer( conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=False) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer( conv_cfg, planes, planes, 3, padding=1, bias=False) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp @property def norm1(self): """nn.Module: normalization layer after the first convolution layer""" return getattr(self, self.norm1_name) @property def norm2(self): """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm2_name) def forward(self, x): """Forward function.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out class Bottleneck(BaseModule): expansion = 4 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_cfg=None): """Bottleneck block for ResNet. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottleneck, self).__init__(init_cfg) assert style in ['pytorch', 'caffe'] assert dcn is None or isinstance(dcn, dict) assert plugins is None or isinstance(plugins, list) if plugins is not None: allowed_position = ['after_conv1', 'after_conv2', 'after_conv3'] assert all(p['position'] in allowed_position for p in plugins) self.inplanes = inplanes self.planes = planes self.stride = stride self.dilation = dilation self.style = style self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.dcn = dcn self.with_dcn = dcn is not None self.plugins = plugins self.with_plugins = plugins is not None if self.with_plugins: # collect plugins for conv1/conv2/conv3 self.after_conv1_plugins = [ plugin['cfg'] for plugin in plugins if plugin['position'] == 'after_conv1' ] self.after_conv2_plugins = [ plugin['cfg'] for plugin in plugins if plugin['position'] == 'after_conv2' ] self.after_conv3_plugins = [ plugin['cfg'] for plugin in plugins if plugin['position'] == 'after_conv3' ] if self.style == 'pytorch': self.conv1_stride = 1 self.conv2_stride = stride else: self.conv1_stride = stride self.conv2_stride = 1 self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) self.norm3_name, norm3 = build_norm_layer( norm_cfg, planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( conv_cfg, inplanes, planes, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False if self.with_dcn: fallback_on_stride = dcn.pop('fallback_on_stride', False) if not self.with_dcn or fallback_on_stride: self.conv2 = build_conv_layer( conv_cfg, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer( dcn, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer( conv_cfg, planes, planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) self.relu = nn.ReLU(inplace=True) self.downsample = downsample if self.with_plugins: self.after_conv1_plugin_names = self.make_block_plugins( planes, self.after_conv1_plugins) self.after_conv2_plugin_names = self.make_block_plugins( planes, self.after_conv2_plugins) self.after_conv3_plugin_names = self.make_block_plugins( planes * self.expansion, self.after_conv3_plugins) def make_block_plugins(self, in_channels, plugins): """make plugins for block. Args: in_channels (int): Input channels of plugin. plugins (list[dict]): List of plugins cfg to build. Returns: list[str]: List of the names of plugin. """ assert isinstance(plugins, list) plugin_names = [] for plugin in plugins: plugin = plugin.copy() name, layer = build_plugin_layer( plugin, in_channels=in_channels, postfix=plugin.pop('postfix', '')) assert not hasattr(self, name), f'duplicate plugin {name}' self.add_module(name, layer) plugin_names.append(name) return plugin_names def forward_plugin(self, x, plugin_names): out = x for name in plugin_names: out = getattr(self, name)(x) return out @property def norm1(self): """nn.Module: normalization layer after the first convolution layer""" return getattr(self, self.norm1_name) @property def norm2(self): """nn.Module: normalization layer after the second convolution layer""" return getattr(self, self.norm2_name) @property def norm3(self): """nn.Module: normalization layer after the third convolution layer""" return getattr(self, self.norm3_name) def forward(self, x): """Forward function.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) out = self.conv2(out) out = self.norm2(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out @BACKBONES.register_module() class ResNet(BaseModule): """ResNet backbone. Args: depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. stem_channels (int | None): Number of stem channels. If not specified, it will be the same as `base_channels`. Default: None. base_channels (int): Number of base channels of res layer. Default: 64. in_channels (int): Number of input image channels. Default: 3. num_stages (int): Resnet stages. Default: 4. strides (Sequence[int]): Strides of the first block of each stage. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottleneck. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. norm_cfg (dict): Dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. plugins (list[dict]): List of plugins for stages, each dict contains: - cfg (dict, required): Cfg dict to build plugin. - position (str, required): Position inside block to insert plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. - stages (tuple[bool], optional): Stages to apply plugin, length should be same as 'num_stages'. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): Whether to use zero init for last norm layer in resblocks to let them behave as identity. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Example: >>> from mmdet.models import ResNet >>> import torch >>> self = ResNet(depth=18) >>> self.eval() >>> inputs = torch.rand(1, 3, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 64, 8, 8) (1, 128, 4, 4) (1, 256, 2, 2) (1, 512, 1, 1) """ arch_settings = { 18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, depth, in_channels=3, stem_channels=None, base_channels=64, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, with_cp=False, zero_init_residual=True, pretrained=None, init_cfg=None): super(ResNet, self).__init__(init_cfg) self.zero_init_residual = zero_init_residual if depth not in self.arch_settings: raise KeyError(f'invalid depth {depth} for resnet') block_init_cfg = None assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] block = self.arch_settings[depth][0] if self.zero_init_residual: if block is BasicBlock: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm2')) elif block is Bottleneck: block_init_cfg = dict( type='Constant', val=0, override=dict(name='norm3')) else: raise TypeError('pretrained must be a str or None') self.depth = depth if stem_channels is None: stem_channels = base_channels self.stem_channels = stem_channels self.base_channels = base_channels self.num_stages = num_stages assert num_stages >= 1 and num_stages <= 4 self.strides = strides self.dilations = dilations assert len(strides) == len(dilations) == num_stages self.out_indices = out_indices assert max(out_indices) < num_stages self.style = style self.deep_stem = deep_stem self.avg_down = avg_down self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.with_cp = with_cp self.norm_eval = norm_eval self.dcn = dcn self.stage_with_dcn = stage_with_dcn if dcn is not None: assert len(stage_with_dcn) == num_stages self.plugins = plugins self.block, stage_blocks = self.arch_settings[depth] self.stage_blocks = stage_blocks[:num_stages] self.inplanes = stem_channels self._make_stem_layer(in_channels, stem_channels) self.res_layers = [] for i, num_blocks in enumerate(self.stage_blocks): stride = strides[i] dilation = dilations[i] dcn = self.dcn if self.stage_with_dcn[i] else None if plugins is not None: stage_plugins = self.make_stage_plugins(plugins, i) else: stage_plugins = None planes = base_channels * 2**i res_layer = self.make_res_layer( block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=stage_plugins, init_cfg=block_init_cfg) self.inplanes = planes * self.block.expansion layer_name = f'layer{i + 1}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() self.feat_dim = self.block.expansion * base_channels * 2**( len(self.stage_blocks) - 1) def make_stage_plugins(self, plugins, stage_idx): """Make plugins for ResNet ``stage_idx`` th stage. Currently we support to insert ``context_block``, ``empirical_attention_block``, ``nonlocal_block`` into the backbone like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of Bottleneck. An example of plugins format could be: Examples: >>> plugins=[ ... dict(cfg=dict(type='xxx', arg1='xxx'), ... stages=(False, True, True, True), ... position='after_conv2'), ... dict(cfg=dict(type='yyy'), ... stages=(True, True, True, True), ... position='after_conv3'), ... dict(cfg=dict(type='zzz', postfix='1'), ... stages=(True, True, True, True), ... position='after_conv3'), ... dict(cfg=dict(type='zzz', postfix='2'), ... stages=(True, True, True, True), ... position='after_conv3') ... ] >>> self = ResNet(depth=18) >>> stage_plugins = self.make_stage_plugins(plugins, 0) >>> assert len(stage_plugins) == 3 Suppose ``stage_idx=0``, the structure of blocks in the stage would be: .. code-block:: none conv1-> conv2->conv3->yyy->zzz1->zzz2 Suppose 'stage_idx=1', the structure of blocks in the stage would be: .. code-block:: none conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2 If stages is missing, the plugin would be applied to all stages. Args: plugins (list[dict]): List of plugins cfg to build. The postfix is required if multiple same type plugins are inserted. stage_idx (int): Index of stage to build Returns: list[dict]: Plugins for current stage """ stage_plugins = [] for plugin in plugins: plugin = plugin.copy() stages = plugin.pop('stages', None) assert stages is None or len(stages) == self.num_stages # whether to insert plugin into current stage if stages is None or stages[stage_idx]: stage_plugins.append(plugin) return stage_plugins def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer``.""" return ResLayer(**kwargs) @property def norm1(self): """nn.Module: the normalization layer named "norm1" """ return getattr(self, self.norm1_name) def _make_stem_layer(self, in_channels, stem_channels): if self.deep_stem: self.stem = nn.Sequential( build_conv_layer( self.conv_cfg, in_channels, stem_channels // 2, kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels // 2)[1], nn.ReLU(inplace=True), build_conv_layer( self.conv_cfg, stem_channels // 2, stem_channels // 2, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels // 2)[1], nn.ReLU(inplace=True), build_conv_layer( self.conv_cfg, stem_channels // 2, stem_channels, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels)[1], nn.ReLU(inplace=True)) else: self.conv1 = build_conv_layer( self.conv_cfg, in_channels, stem_channels, kernel_size=7, stride=2, padding=3, bias=False) self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, stem_channels, postfix=1) self.add_module(self.norm1_name, norm1) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) def _freeze_stages(self): if self.frozen_stages >= 0: if self.deep_stem: self.stem.eval() for param in self.stem.parameters(): param.requires_grad = False else: self.norm1.eval() for m in [self.conv1, self.norm1]: for param in m.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, f'layer{i}') m.eval() for param in m.parameters(): param.requires_grad = False def forward(self, x): """Forward function.""" if self.deep_stem: x = self.stem(x) else: x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if i in self.out_indices: outs.append(x) return tuple(outs) def train(self, mode=True): """Convert the model into training mode while keep normalization layer freezed.""" super(ResNet, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): # trick: eval have effect on BatchNorm only if isinstance(m, _BatchNorm): m.eval() @BACKBONES.register_module() class ResNetV1d(ResNet): r"""ResNetV1d variant described in `Bag of Tricks <https://arxiv.org/pdf/1812.01187.pdf>`_. Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in the input stem with three 3x3 convs. And in the downsampling block, a 2x2 avg_pool with stride 2 is added before conv, whose stride is changed to 1. """ def __init__(self, **kwargs): super(ResNetV1d, self).__init__( deep_stem=True, avg_down=True, **kwargs)
23,838
34.421991
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/detectors_resnet.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init, kaiming_init) from mmcv.runner import Sequential, load_checkpoint from torch.nn.modules.batchnorm import _BatchNorm from mmdet.utils import get_root_logger from ..builder import BACKBONES from .resnet import BasicBlock from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet class Bottleneck(_Bottleneck): r"""Bottleneck for the ResNet backbone in `DetectoRS <https://arxiv.org/pdf/2006.02334.pdf>`_. This bottleneck allows the users to specify whether to use SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid). Args: inplanes (int): The number of input channels. planes (int): The number of output channels before expansion. rfp_inplanes (int, optional): The number of channels from RFP. Default: None. If specified, an additional conv layer will be added for ``rfp_feat``. Otherwise, the structure is the same as base class. sac (dict, optional): Dictionary to construct SAC. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ expansion = 4 def __init__(self, inplanes, planes, rfp_inplanes=None, sac=None, init_cfg=None, **kwargs): super(Bottleneck, self).__init__( inplanes, planes, init_cfg=init_cfg, **kwargs) assert sac is None or isinstance(sac, dict) self.sac = sac self.with_sac = sac is not None if self.with_sac: self.conv2 = build_conv_layer( self.sac, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False) self.rfp_inplanes = rfp_inplanes if self.rfp_inplanes: self.rfp_conv = build_conv_layer( None, self.rfp_inplanes, planes * self.expansion, 1, stride=1, bias=True) if init_cfg is None: self.init_cfg = dict( type='Constant', val=0, override=dict(name='rfp_conv')) def rfp_forward(self, x, rfp_feat): """The forward function that also takes the RFP features as input.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) out = self.conv2(out) out = self.norm2(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) if self.rfp_inplanes: rfp_feat = self.rfp_conv(rfp_feat) out = out + rfp_feat out = self.relu(out) return out class ResLayer(Sequential): """ResLayer to build ResNet style backbone for RPF in detectoRS. The difference between this module and base class is that we pass ``rfp_inplanes`` to the first block. Args: block (nn.Module): block used to build ResLayer. inplanes (int): inplanes of block. planes (int): planes of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottleneck. Default: False conv_cfg (dict): dictionary to construct and config conv layer. Default: None norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='BN') downsample_first (bool): Downsample at the first block or last block. False for Hourglass, True for ResNet. Default: True rfp_inplanes (int, optional): The number of channels from RFP. Default: None. If specified, an additional conv layer will be added for ``rfp_feat``. Otherwise, the structure is the same as base class. """ def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), downsample_first=True, rfp_inplanes=None, **kwargs): self.block = block assert downsample_first, f'downsample_first={downsample_first} is ' \ 'not supported in DetectoRS' downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = [] conv_stride = stride if avg_down and stride != 1: conv_stride = 1 downsample.append( nn.AvgPool2d( kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)) downsample.extend([ build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1] ]) downsample = nn.Sequential(*downsample) layers = [] layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, rfp_inplanes=rfp_inplanes, **kwargs)) inplanes = planes * block.expansion for _ in range(1, num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs)) super(ResLayer, self).__init__(*layers) @BACKBONES.register_module() class DetectoRS_ResNet(ResNet): """ResNet backbone for DetectoRS. Args: sac (dict, optional): Dictionary to construct SAC (Switchable Atrous Convolution). Default: None. stage_with_sac (list): Which stage to use sac. Default: (False, False, False, False). rfp_inplanes (int, optional): The number of channels from RFP. Default: None. If specified, an additional conv layer will be added for ``rfp_feat``. Otherwise, the structure is the same as base class. output_img (bool): If ``True``, the input image will be inserted into the starting position of output. Default: False. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, sac=None, stage_with_sac=(False, False, False, False), rfp_inplanes=None, output_img=False, pretrained=None, init_cfg=None, **kwargs): assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' self.pretrained = pretrained if init_cfg is not None: assert isinstance(init_cfg, dict), \ f'init_cfg must be a dict, but got {type(init_cfg)}' if 'type' in init_cfg: assert init_cfg.get('type') == 'Pretrained', \ 'Only can initialize module by loading a pretrained model' else: raise KeyError('`init_cfg` must contain the key "type"') self.pretrained = init_cfg.get('checkpoint') self.sac = sac self.stage_with_sac = stage_with_sac self.rfp_inplanes = rfp_inplanes self.output_img = output_img super(DetectoRS_ResNet, self).__init__(**kwargs) self.inplanes = self.stem_channels self.res_layers = [] for i, num_blocks in enumerate(self.stage_blocks): stride = self.strides[i] dilation = self.dilations[i] dcn = self.dcn if self.stage_with_dcn[i] else None sac = self.sac if self.stage_with_sac[i] else None if self.plugins is not None: stage_plugins = self.make_stage_plugins(self.plugins, i) else: stage_plugins = None planes = self.base_channels * 2**i res_layer = self.make_res_layer( block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, sac=sac, rfp_inplanes=rfp_inplanes if i > 0 else None, plugins=stage_plugins) self.inplanes = planes * self.block.expansion layer_name = f'layer{i + 1}' self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() # In order to be properly initialized by RFP def init_weights(self): # Calling this method will cause parameter initialization exception # super(DetectoRS_ResNet, self).init_weights() if isinstance(self.pretrained, str): logger = get_root_logger() load_checkpoint(self, self.pretrained, strict=False, logger=logger) elif self.pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) if self.dcn is not None: for m in self.modules(): if isinstance(m, Bottleneck) and hasattr( m.conv2, 'conv_offset'): constant_init(m.conv2.conv_offset, 0) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): constant_init(m.norm3, 0) elif isinstance(m, BasicBlock): constant_init(m.norm2, 0) else: raise TypeError('pretrained must be a str or None') def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer`` for DetectoRS.""" return ResLayer(**kwargs) def forward(self, x): """Forward function.""" outs = list(super(DetectoRS_ResNet, self).forward(x)) if self.output_img: outs.insert(0, x) return tuple(outs) def rfp_forward(self, x, rfp_feats): """Forward function for RFP.""" if self.deep_stem: x = self.stem(x) else: x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) rfp_feat = rfp_feats[i] if i > 0 else None for layer in res_layer: x = layer.rfp_forward(x, rfp_feat) if i in self.out_indices: outs.append(x) return tuple(outs)
12,736
34.980226
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/ssd_vgg.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch.nn as nn from mmcv.cnn import VGG from mmcv.runner import BaseModule from ..builder import BACKBONES from ..necks import ssd_neck @BACKBONES.register_module() class SSDVGG(VGG, BaseModule): """VGG Backbone network for single-shot-detection. Args: depth (int): Depth of vgg, from {11, 13, 16, 19}. with_last_pool (bool): Whether to add a pooling layer at the last of the model ceil_mode (bool): When True, will use `ceil` instead of `floor` to compute the output shape. out_indices (Sequence[int]): Output from which stages. out_feature_indices (Sequence[int]): Output from which feature map. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None input_size (int, optional): Deprecated argumment. Width and height of input, from {300, 512}. l2_norm_scale (float, optional) : Deprecated argumment. L2 normalization layer init scale. Example: >>> self = SSDVGG(input_size=300, depth=11) >>> self.eval() >>> inputs = torch.rand(1, 3, 300, 300) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 1024, 19, 19) (1, 512, 10, 10) (1, 256, 5, 5) (1, 256, 3, 3) (1, 256, 1, 1) """ extra_setting = { 300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256), 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128), } def __init__(self, depth, with_last_pool=False, ceil_mode=True, out_indices=(3, 4), out_feature_indices=(22, 34), pretrained=None, init_cfg=None, input_size=None, l2_norm_scale=None): # TODO: in_channels for mmcv.VGG super(SSDVGG, self).__init__( depth, with_last_pool=with_last_pool, ceil_mode=ceil_mode, out_indices=out_indices) self.features.add_module( str(len(self.features)), nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) self.features.add_module( str(len(self.features)), nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)) self.features.add_module( str(len(self.features)), nn.ReLU(inplace=True)) self.features.add_module( str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1)) self.features.add_module( str(len(self.features)), nn.ReLU(inplace=True)) self.out_feature_indices = out_feature_indices assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if init_cfg is not None: self.init_cfg = init_cfg elif isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer='BatchNorm2d'), dict(type='Normal', std=0.01, layer='Linear'), ] else: raise TypeError('pretrained must be a str or None') if input_size is not None: warnings.warn('DeprecationWarning: input_size is deprecated') if l2_norm_scale is not None: warnings.warn('DeprecationWarning: l2_norm_scale in VGG is ' 'deprecated, it has been moved to SSDNeck.') def init_weights(self, pretrained=None): super(VGG, self).init_weights() def forward(self, x): """Forward function.""" outs = [] for i, layer in enumerate(self.features): x = layer(x) if i in self.out_feature_indices: outs.append(x) if len(outs) == 1: return outs[0] else: return tuple(outs) class L2Norm(ssd_neck.L2Norm): def __init__(self, **kwargs): super(L2Norm, self).__init__(**kwargs) warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py ' 'is deprecated, please use L2Norm in ' 'mmdet/models/necks/ssd_neck.py instead')
4,705
35.48062
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/resnext.py
# Copyright (c) OpenMMLab. All rights reserved. import math from mmcv.cnn import build_conv_layer, build_norm_layer from ..builder import BACKBONES from ..utils import ResLayer from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet class Bottleneck(_Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs): """Bottleneck block for ResNeXt. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if groups == 1: width = self.planes else: width = math.floor(self.planes * (base_width / base_channels)) * groups self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width, postfix=1) self.norm2_name, norm2 = build_norm_layer( self.norm_cfg, width, postfix=2) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False self.with_modulated_dcn = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if not self.with_dcn or fallback_on_stride: self.conv2 = build_conv_layer( self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' self.conv2 = build_conv_layer( self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer( self.conv_cfg, width, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) if self.with_plugins: self._del_block_plugins(self.after_conv1_plugin_names + self.after_conv2_plugin_names + self.after_conv3_plugin_names) self.after_conv1_plugin_names = self.make_block_plugins( width, self.after_conv1_plugins) self.after_conv2_plugin_names = self.make_block_plugins( width, self.after_conv2_plugins) self.after_conv3_plugin_names = self.make_block_plugins( self.planes * self.expansion, self.after_conv3_plugins) def _del_block_plugins(self, plugin_names): """delete plugins for block if exist. Args: plugin_names (list[str]): List of plugins name to delete. """ assert isinstance(plugin_names, list) for plugin_name in plugin_names: del self._modules[plugin_name] @BACKBONES.register_module() class ResNeXt(ResNet): """ResNeXt backbone. Args: depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. in_channels (int): Number of input image channels. Default: 3. num_stages (int): Resnet stages. Default: 4. groups (int): Group of resnext. base_width (int): Base width of resnext. strides (Sequence[int]): Strides of the first block of each stage. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. frozen_stages (int): Stages to be frozen (all param fixed). -1 means not freezing any parameters. norm_cfg (dict): dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): whether to use zero init for last norm layer in resblocks to let them behave as identity. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, groups=1, base_width=4, **kwargs): self.groups = groups self.base_width = base_width super(ResNeXt, self).__init__(**kwargs) def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer``""" return ResLayer( groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
5,712
35.858065
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/resnest.py
# Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer from mmcv.runner import BaseModule from ..builder import BACKBONES from ..utils import ResLayer from .resnet import Bottleneck as _Bottleneck from .resnet import ResNetV1d class RSoftmax(nn.Module): """Radix Softmax module in ``SplitAttentionConv2d``. Args: radix (int): Radix of input. groups (int): Groups of input. """ def __init__(self, radix, groups): super().__init__() self.radix = radix self.groups = groups def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplitAttentionConv2d(BaseModule): """Split-Attention Conv2d in ResNeSt. Args: in_channels (int): Number of channels in the input feature map. channels (int): Number of intermediate channels. kernel_size (int | tuple[int]): Size of the convolution kernel. stride (int | tuple[int]): Stride of the convolution. padding (int | tuple[int]): Zero-padding added to both sides of dilation (int | tuple[int]): Spacing between kernel elements. groups (int): Number of blocked connections from input channels to output channels. groups (int): Same as nn.Conv2d. radix (int): Radix of SpltAtConv2d. Default: 2 reduction_factor (int): Reduction factor of inter_channels. Default: 4. conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: None. dcn (dict): Config dict for DCN. Default: None. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, radix=2, reduction_factor=4, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, init_cfg=None): super(SplitAttentionConv2d, self).__init__(init_cfg) inter_channels = max(in_channels * radix // reduction_factor, 32) self.radix = radix self.groups = groups self.channels = channels self.with_dcn = dcn is not None self.dcn = dcn fallback_on_stride = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if self.with_dcn and not fallback_on_stride: assert conv_cfg is None, 'conv_cfg must be None for DCN' conv_cfg = dcn self.conv = build_conv_layer( conv_cfg, in_channels, channels * radix, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups * radix, bias=False) # To be consistent with original implementation, starting from 0 self.norm0_name, norm0 = build_norm_layer( norm_cfg, channels * radix, postfix=0) self.add_module(self.norm0_name, norm0) self.relu = nn.ReLU(inplace=True) self.fc1 = build_conv_layer( None, channels, inter_channels, 1, groups=self.groups) self.norm1_name, norm1 = build_norm_layer( norm_cfg, inter_channels, postfix=1) self.add_module(self.norm1_name, norm1) self.fc2 = build_conv_layer( None, inter_channels, channels * radix, 1, groups=self.groups) self.rsoftmax = RSoftmax(radix, groups) @property def norm0(self): """nn.Module: the normalization layer named "norm0" """ return getattr(self, self.norm0_name) @property def norm1(self): """nn.Module: the normalization layer named "norm1" """ return getattr(self, self.norm1_name) def forward(self, x): x = self.conv(x) x = self.norm0(x) x = self.relu(x) batch, rchannel = x.shape[:2] batch = x.size(0) if self.radix > 1: splits = x.view(batch, self.radix, -1, *x.shape[2:]) gap = splits.sum(dim=1) else: gap = x gap = F.adaptive_avg_pool2d(gap, 1) gap = self.fc1(gap) gap = self.norm1(gap) gap = self.relu(gap) atten = self.fc2(gap) atten = self.rsoftmax(atten).view(batch, -1, 1, 1) if self.radix > 1: attens = atten.view(batch, self.radix, -1, *atten.shape[2:]) out = torch.sum(attens * splits, dim=1) else: out = atten * x return out.contiguous() class Bottleneck(_Bottleneck): """Bottleneck block for ResNeSt. Args: inplane (int): Input planes of this block. planes (int): Middle planes of this block. groups (int): Groups of conv2. base_width (int): Base of width in terms of base channels. Default: 4. base_channels (int): Base of channels for calculating width. Default: 64. radix (int): Radix of SpltAtConv2d. Default: 2 reduction_factor (int): Reduction factor of inter_channels in SplitAttentionConv2d. Default: 4. avg_down_stride (bool): Whether to use average pool for stride in Bottleneck. Default: True. kwargs (dict): Key word arguments for base class. """ expansion = 4 def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, radix=2, reduction_factor=4, avg_down_stride=True, **kwargs): """Bottleneck block for ResNeSt.""" super(Bottleneck, self).__init__(inplanes, planes, **kwargs) if groups == 1: width = self.planes else: width = math.floor(self.planes * (base_width / base_channels)) * groups self.avg_down_stride = avg_down_stride and self.conv2_stride > 1 self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width, postfix=1) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) self.with_modulated_dcn = False self.conv2 = SplitAttentionConv2d( width, width, kernel_size=3, stride=1 if self.avg_down_stride else self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, radix=radix, reduction_factor=reduction_factor, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=self.dcn) delattr(self, self.norm2_name) if self.avg_down_stride: self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) self.conv3 = build_conv_layer( self.conv_cfg, width, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) def forward(self, x): def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) out = self.conv2(out) if self.avg_down_stride: out = self.avd_layer(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out @BACKBONES.register_module() class ResNeSt(ResNetV1d): """ResNeSt backbone. Args: groups (int): Number of groups of Bottleneck. Default: 1 base_width (int): Base width of Bottleneck. Default: 4 radix (int): Radix of SplitAttentionConv2d. Default: 2 reduction_factor (int): Reduction factor of inter_channels in SplitAttentionConv2d. Default: 4. avg_down_stride (bool): Whether to use average pool for stride in Bottleneck. Default: True. kwargs (dict): Keyword arguments for ResNet. """ arch_settings = { 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)), 200: (Bottleneck, (3, 24, 36, 3)) } def __init__(self, groups=1, base_width=4, radix=2, reduction_factor=4, avg_down_stride=True, **kwargs): self.groups = groups self.base_width = base_width self.radix = radix self.reduction_factor = reduction_factor self.avg_down_stride = avg_down_stride super(ResNeSt, self).__init__(**kwargs) def make_res_layer(self, **kwargs): """Pack all blocks in a stage into a ``ResLayer``.""" return ResLayer( groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, radix=self.radix, reduction_factor=self.reduction_factor, avg_down_stride=self.avg_down_stride, **kwargs)
10,579
31.755418
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/csp_darknet.py
# Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule from mmcv.runner import BaseModule from torch.nn.modules.batchnorm import _BatchNorm from ..builder import BACKBONES from ..utils import CSPLayer class Focus(nn.Module): """Focus width and height information into channel space. Args: in_channels (int): The input channels of this Module. out_channels (int): The output channels of this Module. kernel_size (int): The kernel size of the convolution. Default: 1 stride (int): The stride of the convolution. Default: 1 conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN', momentum=0.03, eps=0.001). act_cfg (dict): Config dict for activation layer. Default: dict(type='Swish'). """ def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish')): super().__init__() self.conv = ConvModule( in_channels * 4, out_channels, kernel_size, stride, padding=(kernel_size - 1) // 2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) def forward(self, x): # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2) patch_top_left = x[..., ::2, ::2] patch_top_right = x[..., ::2, 1::2] patch_bot_left = x[..., 1::2, ::2] patch_bot_right = x[..., 1::2, 1::2] x = torch.cat( ( patch_top_left, patch_bot_left, patch_top_right, patch_bot_right, ), dim=1, ) return self.conv(x) class SPPBottleneck(BaseModule): """Spatial pyramid pooling layer used in YOLOv3-SPP. Args: in_channels (int): The input channels of this Module. out_channels (int): The output channels of this Module. kernel_sizes (tuple[int]): Sequential of kernel sizes of pooling layers. Default: (5, 9, 13). conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='Swish'). init_cfg (dict or list[dict], optional): Initialization config dict. Default: None. """ def __init__(self, in_channels, out_channels, kernel_sizes=(5, 9, 13), conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), init_cfg=None): super().__init__(init_cfg) mid_channels = in_channels // 2 self.conv1 = ConvModule( in_channels, mid_channels, 1, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.poolings = nn.ModuleList([ nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2) for ks in kernel_sizes ]) conv2_channels = mid_channels * (len(kernel_sizes) + 1) self.conv2 = ConvModule( conv2_channels, out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) def forward(self, x): x = self.conv1(x) x = torch.cat([x] + [pooling(x) for pooling in self.poolings], dim=1) x = self.conv2(x) return x @BACKBONES.register_module() class CSPDarknet(BaseModule): """CSP-Darknet backbone used in YOLOv5 and YOLOX. Args: arch (str): Architechture of CSP-Darknet, from {P5, P6}. Default: P5. deepen_factor (float): Depth multiplier, multiply number of channels in each layer by this amount. Default: 1.0. widen_factor (float): Width multiplier, multiply number of blocks in CSP layer by this amount. Default: 1.0. out_indices (Sequence[int]): Output from which stages. Default: (2, 3, 4). frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. Default: -1. use_depthwise (bool): Whether to use depthwise separable convolution. Default: False. arch_ovewrite(list): Overwrite default arch settings. Default: None. spp_kernal_sizes: (tuple[int]): Sequential of kernel sizes of SPP layers. Default: (5, 9, 13). conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True). act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None. Example: >>> from mmdet.models import CSPDarknet >>> import torch >>> self = CSPDarknet(depth=53) >>> self.eval() >>> inputs = torch.rand(1, 3, 416, 416) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) ... (1, 256, 52, 52) (1, 512, 26, 26) (1, 1024, 13, 13) """ # From left to right: # in_channels, out_channels, num_blocks, add_identity, use_spp arch_settings = { 'P5': [[64, 128, 3, True, False], [128, 256, 9, True, False], [256, 512, 9, True, False], [512, 1024, 3, False, True]], 'P6': [[64, 128, 3, True, False], [128, 256, 9, True, False], [256, 512, 9, True, False], [512, 768, 3, True, False], [768, 1024, 3, False, True]] } def __init__(self, arch='P5', deepen_factor=1.0, widen_factor=1.0, out_indices=(2, 3, 4), frozen_stages=-1, use_depthwise=False, arch_ovewrite=None, spp_kernal_sizes=(5, 9, 13), conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), norm_eval=False, init_cfg=dict( type='Kaiming', layer='Conv2d', a=math.sqrt(5), distribution='uniform', mode='fan_in', nonlinearity='leaky_relu')): super().__init__(init_cfg) arch_setting = self.arch_settings[arch] if arch_ovewrite: arch_setting = arch_ovewrite assert set(out_indices).issubset( i for i in range(len(arch_setting) + 1)) if frozen_stages not in range(-1, len(arch_setting) + 1): raise ValueError('frozen_stages must be in range(-1, ' 'len(arch_setting) + 1). But received ' f'{frozen_stages}') self.out_indices = out_indices self.frozen_stages = frozen_stages self.use_depthwise = use_depthwise self.norm_eval = norm_eval conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule self.stem = Focus( 3, int(arch_setting[0][0] * widen_factor), kernel_size=3, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.layers = ['stem'] for i, (in_channels, out_channels, num_blocks, add_identity, use_spp) in enumerate(arch_setting): in_channels = int(in_channels * widen_factor) out_channels = int(out_channels * widen_factor) num_blocks = max(round(num_blocks * deepen_factor), 1) stage = [] conv_layer = conv( in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) stage.append(conv_layer) if use_spp: spp = SPPBottleneck( out_channels, out_channels, kernel_sizes=spp_kernal_sizes, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) stage.append(spp) csp_layer = CSPLayer( out_channels, out_channels, num_blocks=num_blocks, add_identity=add_identity, use_depthwise=use_depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) stage.append(csp_layer) self.add_module(f'stage{i + 1}', nn.Sequential(*stage)) self.layers.append(f'stage{i + 1}') def _freeze_stages(self): if self.frozen_stages >= 0: for i in range(self.frozen_stages + 1): m = getattr(self, self.layers[i]) m.eval() for param in m.parameters(): param.requires_grad = False def train(self, mode=True): super(CSPDarknet, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval() def forward(self, x): outs = [] for i, layer_name in enumerate(self.layers): layer = getattr(self, layer_name) x = layer(x) if i in self.out_indices: outs.append(x) return tuple(outs)
10,544
36
77
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .csp_darknet import CSPDarknet from .darknet import Darknet from .detectors_resnet import DetectoRS_ResNet from .detectors_resnext import DetectoRS_ResNeXt from .hourglass import HourglassNet from .hrnet import HRNet from .mobilenet_v2 import MobileNetV2 from .regnet import RegNet from .res2net import Res2Net from .resnest import ResNeSt from .resnet import ResNet, ResNetV1d from .resnext import ResNeXt from .ssd_vgg import SSDVGG from .swin import SwinTransformer from .trident_resnet import TridentResNet __all__ = [ 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet', 'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet', 'SwinTransformer' ]
812
32.875
77
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/hourglass.py
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from ..builder import BACKBONES from ..utils import ResLayer from .resnet import BasicBlock class HourglassModule(BaseModule): """Hourglass Module for HourglassNet backbone. Generate module recursively and use BasicBlock as the base unit. Args: depth (int): Depth of current HourglassModule. stage_channels (list[int]): Feature channels of sub-modules in current and follow-up HourglassModule. stage_blocks (list[int]): Number of sub-modules stacked in current and follow-up HourglassModule. norm_cfg (dict): Dictionary to construct and config norm layer. init_cfg (dict or list[dict], optional): Initialization config dict. Default: None upsample_cfg (dict, optional): Config dict for interpolate layer. Default: `dict(mode='nearest')` """ def __init__(self, depth, stage_channels, stage_blocks, norm_cfg=dict(type='BN', requires_grad=True), init_cfg=None, upsample_cfg=dict(mode='nearest')): super(HourglassModule, self).__init__(init_cfg) self.depth = depth cur_block = stage_blocks[0] next_block = stage_blocks[1] cur_channel = stage_channels[0] next_channel = stage_channels[1] self.up1 = ResLayer( BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg) self.low1 = ResLayer( BasicBlock, cur_channel, next_channel, cur_block, stride=2, norm_cfg=norm_cfg) if self.depth > 1: self.low2 = HourglassModule(depth - 1, stage_channels[1:], stage_blocks[1:]) else: self.low2 = ResLayer( BasicBlock, next_channel, next_channel, next_block, norm_cfg=norm_cfg) self.low3 = ResLayer( BasicBlock, next_channel, cur_channel, cur_block, norm_cfg=norm_cfg, downsample_first=False) self.up2 = F.interpolate self.upsample_cfg = upsample_cfg def forward(self, x): """Forward function.""" up1 = self.up1(x) low1 = self.low1(x) low2 = self.low2(low1) low3 = self.low3(low2) # Fixing `scale factor` (e.g. 2) is common for upsampling, but # in some cases the spatial size is mismatched and error will arise. if 'scale_factor' in self.upsample_cfg: up2 = self.up2(low3, **self.upsample_cfg) else: shape = up1.shape[2:] up2 = self.up2(low3, size=shape, **self.upsample_cfg) return up1 + up2 @BACKBONES.register_module() class HourglassNet(BaseModule): """HourglassNet backbone. Stacked Hourglass Networks for Human Pose Estimation. More details can be found in the `paper <https://arxiv.org/abs/1603.06937>`_ . Args: downsample_times (int): Downsample times in a HourglassModule. num_stacks (int): Number of HourglassModule modules stacked, 1 for Hourglass-52, 2 for Hourglass-104. stage_channels (list[int]): Feature channel of each sub-module in a HourglassModule. stage_blocks (list[int]): Number of sub-modules stacked in a HourglassModule. feat_channel (int): Feature channel of conv after a HourglassModule. norm_cfg (dict): Dictionary to construct and config norm layer. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Example: >>> from mmdet.models import HourglassNet >>> import torch >>> self = HourglassNet() >>> self.eval() >>> inputs = torch.rand(1, 3, 511, 511) >>> level_outputs = self.forward(inputs) >>> for level_output in level_outputs: ... print(tuple(level_output.shape)) (1, 256, 128, 128) (1, 256, 128, 128) """ def __init__(self, downsample_times=5, num_stacks=2, stage_channels=(256, 256, 384, 384, 384, 512), stage_blocks=(2, 2, 2, 2, 2, 4), feat_channel=256, norm_cfg=dict(type='BN', requires_grad=True), pretrained=None, init_cfg=None): assert init_cfg is None, 'To prevent abnormal initialization ' \ 'behavior, init_cfg is not allowed to be set' super(HourglassNet, self).__init__(init_cfg) self.num_stacks = num_stacks assert self.num_stacks >= 1 assert len(stage_channels) == len(stage_blocks) assert len(stage_channels) > downsample_times cur_channel = stage_channels[0] self.stem = nn.Sequential( ConvModule( 3, cur_channel // 2, 7, padding=3, stride=2, norm_cfg=norm_cfg), ResLayer( BasicBlock, cur_channel // 2, cur_channel, 1, stride=2, norm_cfg=norm_cfg)) self.hourglass_modules = nn.ModuleList([ HourglassModule(downsample_times, stage_channels, stage_blocks) for _ in range(num_stacks) ]) self.inters = ResLayer( BasicBlock, cur_channel, cur_channel, num_stacks - 1, norm_cfg=norm_cfg) self.conv1x1s = nn.ModuleList([ ConvModule( cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range(num_stacks - 1) ]) self.out_convs = nn.ModuleList([ ConvModule( cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg) for _ in range(num_stacks) ]) self.remap_convs = nn.ModuleList([ ConvModule( feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range(num_stacks - 1) ]) self.relu = nn.ReLU(inplace=True) def init_weights(self): """Init module weights.""" # Training Centripetal Model needs to reset parameters for Conv2d super(HourglassNet, self).init_weights() for m in self.modules(): if isinstance(m, nn.Conv2d): m.reset_parameters() def forward(self, x): """Forward function.""" inter_feat = self.stem(x) out_feats = [] for ind in range(self.num_stacks): single_hourglass = self.hourglass_modules[ind] out_conv = self.out_convs[ind] hourglass_feat = single_hourglass(inter_feat) out_feat = out_conv(hourglass_feat) out_feats.append(out_feat) if ind < self.num_stacks - 1: inter_feat = self.conv1x1s[ind]( inter_feat) + self.remap_convs[ind]( out_feat) inter_feat = self.inters[ind](self.relu(inter_feat)) return out_feats
7,494
32.609865
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/res2net.py
# Copyright (c) OpenMMLab. All rights reserved. import math import torch import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import build_conv_layer, build_norm_layer from mmcv.runner import Sequential from ..builder import BACKBONES from .resnet import Bottleneck as _Bottleneck from .resnet import ResNet class Bottle2neck(_Bottleneck): expansion = 4 def __init__(self, inplanes, planes, scales=4, base_width=26, base_channels=64, stage_type='normal', **kwargs): """Bottle2neck block for Res2Net. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottle2neck, self).__init__(inplanes, planes, **kwargs) assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.' width = int(math.floor(self.planes * (base_width / base_channels))) self.norm1_name, norm1 = build_norm_layer( self.norm_cfg, width * scales, postfix=1) self.norm3_name, norm3 = build_norm_layer( self.norm_cfg, self.planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( self.conv_cfg, self.inplanes, width * scales, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) if stage_type == 'stage' and self.conv2_stride != 1: self.pool = nn.AvgPool2d( kernel_size=3, stride=self.conv2_stride, padding=1) convs = [] bns = [] fallback_on_stride = False if self.with_dcn: fallback_on_stride = self.dcn.pop('fallback_on_stride', False) if not self.with_dcn or fallback_on_stride: for i in range(scales - 1): convs.append( build_conv_layer( self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)) bns.append( build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) else: assert self.conv_cfg is None, 'conv_cfg must be None for DCN' for i in range(scales - 1): convs.append( build_conv_layer( self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)) bns.append( build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) self.conv3 = build_conv_layer( self.conv_cfg, width * scales, self.planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) self.stage_type = stage_type self.scales = scales self.width = width delattr(self, 'conv2') delattr(self, self.norm2_name) def forward(self, x): """Forward function.""" def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv1_plugin_names) spx = torch.split(out, self.width, 1) sp = self.convs[0](spx[0].contiguous()) sp = self.relu(self.bns[0](sp)) out = sp for i in range(1, self.scales - 1): if self.stage_type == 'stage': sp = spx[i] else: sp = sp + spx[i] sp = self.convs[i](sp.contiguous()) sp = self.relu(self.bns[i](sp)) out = torch.cat((out, sp), 1) if self.stage_type == 'normal' or self.conv2_stride == 1: out = torch.cat((out, spx[self.scales - 1]), 1) elif self.stage_type == 'stage': out = torch.cat((out, self.pool(spx[self.scales - 1])), 1) if self.with_plugins: out = self.forward_plugin(out, self.after_conv2_plugin_names) out = self.conv3(out) out = self.norm3(out) if self.with_plugins: out = self.forward_plugin(out, self.after_conv3_plugin_names) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out class Res2Layer(Sequential): """Res2Layer to build Res2Net style backbone. Args: block (nn.Module): block used to build ResLayer. inplanes (int): inplanes of block. planes (int): planes of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottle2neck. Default: False conv_cfg (dict): dictionary to construct and config conv layer. Default: None norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='BN') scales (int): Scales used in Res2Net. Default: 4 base_width (int): Basic width of each scale. Default: 26 """ def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=True, conv_cfg=None, norm_cfg=dict(type='BN'), scales=4, base_width=26, **kwargs): self.block = block downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = nn.Sequential( nn.AvgPool2d( kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False), build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=1, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1], ) layers = [] layers.append( block( inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, stage_type='stage', **kwargs)) inplanes = planes * block.expansion for i in range(1, num_blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, **kwargs)) super(Res2Layer, self).__init__(*layers) @BACKBONES.register_module() class Res2Net(ResNet): """Res2Net backbone. Args: scales (int): Scales used in Res2Net. Default: 4 base_width (int): Basic width of each scale. Default: 26 depth (int): Depth of res2net, from {50, 101, 152}. in_channels (int): Number of input image channels. Default: 3. num_stages (int): Res2net stages. Default: 4. strides (Sequence[int]): Strides of the first block of each stage. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv avg_down (bool): Use AvgPool instead of stride conv when downsampling in the bottle2neck. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. norm_cfg (dict): Dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. plugins (list[dict]): List of plugins for stages, each dict contains: - cfg (dict, required): Cfg dict to build plugin. - position (str, required): Position inside block to insert plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. - stages (tuple[bool], optional): Stages to apply plugin, length should be same as 'num_stages'. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): Whether to use zero init for last norm layer in resblocks to let them behave as identity. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Example: >>> from mmdet.models import Res2Net >>> import torch >>> self = Res2Net(depth=50, scales=4, base_width=26) >>> self.eval() >>> inputs = torch.rand(1, 3, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 256, 8, 8) (1, 512, 4, 4) (1, 1024, 2, 2) (1, 2048, 1, 1) """ arch_settings = { 50: (Bottle2neck, (3, 4, 6, 3)), 101: (Bottle2neck, (3, 4, 23, 3)), 152: (Bottle2neck, (3, 8, 36, 3)) } def __init__(self, scales=4, base_width=26, style='pytorch', deep_stem=True, avg_down=True, pretrained=None, init_cfg=None, **kwargs): self.scales = scales self.base_width = base_width super(Res2Net, self).__init__( style='pytorch', deep_stem=True, avg_down=True, pretrained=pretrained, init_cfg=init_cfg, **kwargs) def make_res_layer(self, **kwargs): return Res2Layer( scales=self.scales, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
11,659
34.54878
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/models/backbones/darknet.py
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) 2019 Western Digital Corporation or its affiliates. import warnings import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from torch.nn.modules.batchnorm import _BatchNorm from ..builder import BACKBONES class ResBlock(BaseModule): """The basic residual block used in Darknet. Each ResBlock consists of two ConvModules and the input is added to the final output. Each ConvModule is composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer has half of the number of the filters as much as the second convLayer. The first convLayer has filter size of 1x1 and the second one has the filter size of 3x3. Args: in_channels (int): The input channels. Must be even. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). init_cfg (dict or list[dict], optional): Initialization config dict. Default: None """ def __init__(self, in_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None): super(ResBlock, self).__init__(init_cfg) assert in_channels % 2 == 0 # ensure the in_channels is even half_in_channels = in_channels // 2 # shortcut cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg) self.conv2 = ConvModule( half_in_channels, in_channels, 3, padding=1, **cfg) def forward(self, x): residual = x out = self.conv1(x) out = self.conv2(out) out = out + residual return out @BACKBONES.register_module() class Darknet(BaseModule): """Darknet backbone. Args: depth (int): Depth of Darknet. Currently only support 53. out_indices (Sequence[int]): Output from which stages. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. Default: -1. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. pretrained (str, optional): model pretrained path. Default: None init_cfg (dict or list[dict], optional): Initialization config dict. Default: None Example: >>> from mmdet.models import Darknet >>> import torch >>> self = Darknet(depth=53) >>> self.eval() >>> inputs = torch.rand(1, 3, 416, 416) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) ... (1, 256, 52, 52) (1, 512, 26, 26) (1, 1024, 13, 13) """ # Dict(depth: (layers, channels)) arch_settings = { 53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512), (512, 1024))) } def __init__(self, depth=53, out_indices=(3, 4, 5), frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), norm_eval=True, pretrained=None, init_cfg=None): super(Darknet, self).__init__(init_cfg) if depth not in self.arch_settings: raise KeyError(f'invalid depth {depth} for darknet') self.depth = depth self.out_indices = out_indices self.frozen_stages = frozen_stages self.layers, self.channels = self.arch_settings[depth] cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg) self.cr_blocks = ['conv1'] for i, n_layers in enumerate(self.layers): layer_name = f'conv_res_block{i + 1}' in_c, out_c = self.channels[i] self.add_module( layer_name, self.make_conv_res_block(in_c, out_c, n_layers, **cfg)) self.cr_blocks.append(layer_name) self.norm_eval = norm_eval assert not (init_cfg and pretrained), \ 'init_cfg and pretrained cannot be specified at the same time' if isinstance(pretrained, str): warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) elif pretrained is None: if init_cfg is None: self.init_cfg = [ dict(type='Kaiming', layer='Conv2d'), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ] else: raise TypeError('pretrained must be a str or None') def forward(self, x): outs = [] for i, layer_name in enumerate(self.cr_blocks): cr_block = getattr(self, layer_name) x = cr_block(x) if i in self.out_indices: outs.append(x) return tuple(outs) def _freeze_stages(self): if self.frozen_stages >= 0: for i in range(self.frozen_stages): m = getattr(self, self.cr_blocks[i]) m.eval() for param in m.parameters(): param.requires_grad = False def train(self, mode=True): super(Darknet, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval() @staticmethod def make_conv_res_block(in_channels, out_channels, res_repeat, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1)): """In Darknet backbone, ConvLayer is usually followed by ResBlock. This function will make that. The Conv layers always have 3x3 filters with stride=2. The number of the filters in Conv layer is the same as the out channels of the ResBlock. Args: in_channels (int): The number of input channels. out_channels (int): The number of output channels. res_repeat (int): The number of ResBlocks. conv_cfg (dict): Config dict for convolution layer. Default: None. norm_cfg (dict): Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True) act_cfg (dict): Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1). """ cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) model = nn.Sequential() model.add_module( 'conv', ConvModule( in_channels, out_channels, 3, stride=2, padding=1, **cfg)) for idx in range(res_repeat): model.add_module('res{}'.format(idx), ResBlock(out_channels, **cfg)) return model
8,233
37.476636
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/custom.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import warnings from collections import OrderedDict import mmcv import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable from torch.utils.data import Dataset from mmdet.core import eval_map, eval_recalls from .builder import DATASETS from .pipelines import Compose import ipdb @DATASETS.register_module() class CustomDataset(Dataset): """Custom dataset for detection. The annotation format is shown as follows. The `ann` field is optional for testing. .. code-block:: none [ { 'filename': 'a.jpg', 'width': 1280, 'height': 720, 'ann': { 'bboxes': <np.ndarray> (n, 4) in (x1, y1, x2, y2) order. 'labels': <np.ndarray> (n, ), 'bboxes_ignore': <np.ndarray> (k, 4), (optional field) 'labels_ignore': <np.ndarray> (k, 4) (optional field) } }, ... ] Args: ann_file (str): Annotation file path. pipeline (list[dict]): Processing pipeline. classes (str | Sequence[str], optional): Specify classes to load. If is None, ``cls.CLASSES`` will be used. Default: None. data_root (str, optional): Data root for ``ann_file``, ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified. test_mode (bool, optional): If set True, annotation will not be loaded. filter_empty_gt (bool, optional): If set true, images without bounding boxes of the dataset's classes will be filtered out. This option only works when `test_mode=False`, i.e., we never filter images during tests. """ CLASSES = None def __init__(self, ann_file, pipeline, classes=None, data_root=None, img_prefix='', seg_prefix=None, proposal_file=None, test_mode=False, filter_empty_gt=True): self.ann_file = ann_file self.data_root = data_root self.img_prefix = img_prefix self.seg_prefix = seg_prefix self.proposal_file = proposal_file self.test_mode = test_mode self.filter_empty_gt = filter_empty_gt self.CLASSES = self.get_classes(classes) # join paths if data_root is specified if self.data_root is not None: if not osp.isabs(self.ann_file): self.ann_file = osp.join(self.data_root, self.ann_file) if not (self.img_prefix is None or osp.isabs(self.img_prefix)): self.img_prefix = osp.join(self.data_root, self.img_prefix) if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)): self.seg_prefix = osp.join(self.data_root, self.seg_prefix) if not (self.proposal_file is None or osp.isabs(self.proposal_file)): self.proposal_file = osp.join(self.data_root, self.proposal_file) # load annotations (and proposals) self.data_infos = self.load_annotations(self.ann_file) if self.proposal_file is not None: self.proposals = self.load_proposals(self.proposal_file) else: self.proposals = None # filter images too small and containing no annotations if not test_mode: valid_inds = self._filter_imgs() self.data_infos = [self.data_infos[i] for i in valid_inds] if self.proposals is not None: self.proposals = [self.proposals[i] for i in valid_inds] # set group flag for the sampler self._set_group_flag() # processing pipeline self.pipeline = Compose(pipeline) def __len__(self): """Total number of samples of data.""" return len(self.data_infos) def load_annotations(self, ann_file): """Load annotation from annotation file.""" return mmcv.load(ann_file) def load_proposals(self, proposal_file): """Load proposal from proposal file.""" return mmcv.load(proposal_file) def get_ann_info(self, idx): """Get annotation by index. Args: idx (int): Index of data. Returns: dict: Annotation info of specified index. """ return self.data_infos[idx]['ann'] def get_cat_ids(self, idx): """Get category ids by index. Args: idx (int): Index of data. Returns: list[int]: All categories in the image of specified index. """ return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist() def pre_pipeline(self, results): """Prepare results dict for pipeline.""" results['img_prefix'] = self.img_prefix results['seg_prefix'] = self.seg_prefix results['proposal_file'] = self.proposal_file results['bbox_fields'] = [] results['mask_fields'] = [] results['seg_fields'] = [] def _filter_imgs(self, min_size=32): """Filter images too small.""" if self.filter_empty_gt: warnings.warn( 'CustomDataset does not support filtering empty gt images.') valid_inds = [] for i, img_info in enumerate(self.data_infos): if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) return valid_inds def _set_group_flag(self): """Set flag according to image aspect ratio. Images with aspect ratio greater than 1 will be set as group 1, otherwise group 0. """ self.flag = np.zeros(len(self), dtype=np.uint8) for i in range(len(self)): img_info = self.data_infos[i] if img_info['width'] / img_info['height'] > 1: self.flag[i] = 1 def _rand_another(self, idx): """Get another random index from the same group as the given index.""" pool = np.where(self.flag == self.flag[idx])[0] return np.random.choice(pool) def __getitem__(self, idx): """Get training/test data after pipeline. Args: idx (int): Index of data. Returns: dict: Training/test data (with annotation if `test_mode` is set \ True). """ if self.test_mode: return self.prepare_test_img(idx) while True: data = self.prepare_train_img(idx) if data is None: idx = self._rand_another(idx) continue return data def prepare_train_img(self, idx): """Get training data and annotations after pipeline. Args: idx (int): Index of data. Returns: dict: Training data and annotation after pipeline with new keys \ introduced by pipeline. """ img_info = self.data_infos[idx] ann_info = self.get_ann_info(idx) results = dict(img_info=img_info, ann_info=ann_info) if self.proposals is not None: results['proposals'] = self.proposals[idx] self.pre_pipeline(results) return self.pipeline(results) def prepare_test_img(self, idx): """Get testing data after pipeline. Args: idx (int): Index of data. Returns: dict: Testing data after pipeline with new keys introduced by \ pipeline. """ img_info = self.data_infos[idx] results = dict(img_info=img_info) if self.proposals is not None: results['proposals'] = self.proposals[idx] self.pre_pipeline(results) return self.pipeline(results) @classmethod def get_classes(cls, classes=None): """Get class names of current dataset. Args: classes (Sequence[str] | str | None): If classes is None, use default CLASSES defined by builtin dataset. If classes is a string, take it as a file name. The file contains the name of classes where each line contains one class name. If classes is a tuple or list, override the CLASSES defined by the dataset. Returns: tuple[str] or list[str]: Names of categories of the dataset. """ if classes is None: return cls.CLASSES if isinstance(classes, str): # take it as a file path class_names = mmcv.list_from_file(classes) elif isinstance(classes, (tuple, list)): class_names = classes else: raise ValueError(f'Unsupported type {type(classes)} of classes.') return class_names def format_results(self, results, **kwargs): """Place holder to format result to dataset specific output.""" def evaluate(self, results, metric='mAP', logger=None, proposal_nums=(100, 300, 1000), iou_thr=0.5, scale_ranges=None): """Evaluate the dataset. Args: results (list): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. logger (logging.Logger | None | str): Logger used for printing related information during evaluation. Default: None. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thr (float | list[float]): IoU threshold. Default: 0.5. scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP. Default: None. """ if not isinstance(metric, str): assert len(metric) == 1 metric = metric[0] allowed_metrics = ['mAP', 'recall'] if metric not in allowed_metrics: raise KeyError(f'metric {metric} is not supported') annotations = [self.get_ann_info(i) for i in range(len(self))] eval_results = OrderedDict() iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr if metric == 'mAP': assert isinstance(iou_thrs, list) mean_aps = [] for iou_thr in iou_thrs: print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}') mean_ap, _ = eval_map( results, annotations, scale_ranges=scale_ranges, iou_thr=iou_thr, dataset=self.CLASSES, logger=logger) mean_aps.append(mean_ap) eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3) eval_results['mAP'] = sum(mean_aps) / len(mean_aps) elif metric == 'recall': gt_bboxes = [ann['bboxes'] for ann in annotations] recalls = eval_recalls( gt_bboxes, results, proposal_nums, iou_thr, logger=logger) for i, num in enumerate(proposal_nums): for j, iou in enumerate(iou_thrs): eval_results[f'recall@{num}@{iou}'] = recalls[i, j] if recalls.shape[1] > 1: ar = recalls.mean(axis=1) for i, num in enumerate(proposal_nums): eval_results[f'AR@{num}'] = ar[i] return eval_results def __repr__(self): """Print the number of instance number.""" dataset_type = 'Test' if self.test_mode else 'Train' result = (f'\n{self.__class__.__name__} {dataset_type} dataset ' f'with number of images {len(self)}, ' f'and instance counts: \n') if self.CLASSES is None: result += 'Category names are not provided. \n' return result instance_count = np.zeros(len(self.CLASSES) + 1).astype(int) # count the instance number in each image for idx in range(len(self)): label = self.get_ann_info(idx)['labels'] unique, counts = np.unique(label, return_counts=True) if len(unique) > 0: # add the occurrence number to each class instance_count[unique] += counts else: # background is the last index instance_count[-1] += 1 # create a table with category count table_data = [['category', 'count'] * 5] row_data = [] for cls, count in enumerate(instance_count): if cls < len(self.CLASSES): row_data += [f'{cls} [{self.CLASSES[cls]}]', f'{count}'] else: # add the background number row_data += ['-1 background', f'{count}'] if len(row_data) == 10: table_data.append(row_data) row_data = [] if len(row_data) >= 2: if row_data[-1] == '0': row_data = row_data[:-2] if len(row_data) >= 2: table_data.append([]) table_data.append(row_data) table = AsciiTable(table_data) result += table.table return result
13,457
35.570652
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/deepfashion.py
# Copyright (c) OpenMMLab. All rights reserved. from .builder import DATASETS from .coco import CocoDataset @DATASETS.register_module() class DeepFashionDataset(CocoDataset): CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair', 'skin', 'face')
365
29.5
78
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/voc.py
# Copyright (c) OpenMMLab. All rights reserved. from collections import OrderedDict from mmcv.utils import print_log from mmdet.core import eval_map, eval_recalls from .builder import DATASETS from .xml_style import XMLDataset @DATASETS.register_module() class VOCDataset(XMLDataset): CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') def __init__(self, **kwargs): super(VOCDataset, self).__init__(**kwargs) if 'VOC2007' in self.img_prefix: self.year = 2007 elif 'VOC2012' in self.img_prefix: self.year = 2012 else: raise ValueError('Cannot infer dataset year from img_prefix') def evaluate(self, results, metric='mAP', logger=None, proposal_nums=(100, 300, 1000), iou_thr=0.5, scale_ranges=None): """Evaluate in VOC protocol. Args: results (list[list | tuple]): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. Options are 'mAP', 'recall'. logger (logging.Logger | str, optional): Logger used for printing related information during evaluation. Default: None. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thr (float | list[float]): IoU threshold. Default: 0.5. scale_ranges (list[tuple], optional): Scale ranges for evaluating mAP. If not specified, all bounding boxes would be included in evaluation. Default: None. Returns: dict[str, float]: AP/recall metrics. """ if not isinstance(metric, str): assert len(metric) == 1 metric = metric[0] allowed_metrics = ['mAP', 'recall'] if metric not in allowed_metrics: raise KeyError(f'metric {metric} is not supported') annotations = [self.get_ann_info(i) for i in range(len(self))] eval_results = OrderedDict() iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr if metric == 'mAP': assert isinstance(iou_thrs, list) if self.year == 2007: ds_name = 'voc07' else: ds_name = self.CLASSES mean_aps = [] for iou_thr in iou_thrs: print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}') # Follow the official implementation, # http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar # we should use the legacy coordinate system in mmdet 1.x, # which means w, h should be computed as 'x2 - x1 + 1` and # `y2 - y1 + 1` mean_ap, _ = eval_map( results, annotations, scale_ranges=None, iou_thr=iou_thr, dataset=ds_name, logger=logger, use_legacy_coordinate=True) mean_aps.append(mean_ap) eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3) eval_results['mAP'] = sum(mean_aps) / len(mean_aps) elif metric == 'recall': gt_bboxes = [ann['bboxes'] for ann in annotations] recalls = eval_recalls( gt_bboxes, results, proposal_nums, iou_thrs, logger=logger, use_legacy_coordinate=True) for i, num in enumerate(proposal_nums): for j, iou_thr in enumerate(iou_thrs): eval_results[f'recall@{num}@{iou_thr}'] = recalls[i, j] if recalls.shape[1] > 1: ar = recalls.mean(axis=1) for i, num in enumerate(proposal_nums): eval_results[f'AR@{num}'] = ar[i] return eval_results
4,297
39.54717
90
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/cityscapes.py
# Copyright (c) OpenMMLab. All rights reserved. # Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa # and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa import glob import os import os.path as osp import tempfile from collections import OrderedDict import mmcv import numpy as np import pycocotools.mask as maskUtils from mmcv.utils import print_log from .builder import DATASETS from .coco import CocoDataset @DATASETS.register_module() class CityscapesDataset(CocoDataset): CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle') def _filter_imgs(self, min_size=32): """Filter images too small or without ground truths.""" valid_inds = [] # obtain images that contain annotation ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) # obtain images that contain annotations of the required categories ids_in_cat = set() for i, class_id in enumerate(self.cat_ids): ids_in_cat |= set(self.coco.cat_img_map[class_id]) # merge the image id sets of the two conditions and use the merged set # to filter out images if self.filter_empty_gt=True ids_in_cat &= ids_with_ann valid_img_ids = [] for i, img_info in enumerate(self.data_infos): img_id = img_info['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) all_iscrowd = all([_['iscrowd'] for _ in ann_info]) if self.filter_empty_gt and (self.img_ids[i] not in ids_in_cat or all_iscrowd): continue if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) valid_img_ids.append(img_id) self.img_ids = valid_img_ids return valid_inds def _parse_ann_info(self, img_info, ann_info): """Parse bbox and mask annotation. Args: img_info (dict): Image info of an image. ann_info (list[dict]): Annotation info of an image. Returns: dict: A dict containing the following keys: bboxes, \ bboxes_ignore, labels, masks, seg_map. \ "masks" are already decoded into binary masks. """ gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] gt_masks_ann = [] for i, ann in enumerate(ann_info): if ann.get('ignore', False): continue x1, y1, w, h = ann['bbox'] if ann['area'] <= 0 or w < 1 or h < 1: continue if ann['category_id'] not in self.cat_ids: continue bbox = [x1, y1, x1 + w, y1 + h] if ann.get('iscrowd', False): gt_bboxes_ignore.append(bbox) else: gt_bboxes.append(bbox) gt_labels.append(self.cat2label[ann['category_id']]) gt_masks_ann.append(ann['segmentation']) if gt_bboxes: gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) else: gt_bboxes = np.zeros((0, 4), dtype=np.float32) gt_labels = np.array([], dtype=np.int64) if gt_bboxes_ignore: gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=img_info['segm_file']) return ann def results2txt(self, results, outfile_prefix): """Dump the detection results to a txt file. Args: results (list[list | tuple]): Testing results of the dataset. outfile_prefix (str): The filename prefix of the json files. If the prefix is "somepath/xxx", the txt files will be named "somepath/xxx.txt". Returns: list[str]: Result txt files which contains corresponding \ instance segmentation images. """ try: import cityscapesscripts.helpers.labels as CSLabels except ImportError: raise ImportError('Please run "pip install citscapesscripts" to ' 'install cityscapesscripts first.') result_files = [] os.makedirs(outfile_prefix, exist_ok=True) prog_bar = mmcv.ProgressBar(len(self)) for idx in range(len(self)): result = results[idx] filename = self.data_infos[idx]['filename'] basename = osp.splitext(osp.basename(filename))[0] pred_txt = osp.join(outfile_prefix, basename + '_pred.txt') bbox_result, segm_result = result bboxes = np.vstack(bbox_result) # segm results if isinstance(segm_result, tuple): # Some detectors use different scores for bbox and mask, # like Mask Scoring R-CNN. Score of segm will be used instead # of bbox score. segms = mmcv.concat_list(segm_result[0]) mask_score = segm_result[1] else: # use bbox score for mask score segms = mmcv.concat_list(segm_result) mask_score = [bbox[-1] for bbox in bboxes] labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) assert len(bboxes) == len(segms) == len(labels) num_instances = len(bboxes) prog_bar.update() with open(pred_txt, 'w') as fout: for i in range(num_instances): pred_class = labels[i] classes = self.CLASSES[pred_class] class_id = CSLabels.name2label[classes].id score = mask_score[i] mask = maskUtils.decode(segms[i]).astype(np.uint8) png_filename = osp.join(outfile_prefix, basename + f'_{i}_{classes}.png') mmcv.imwrite(mask, png_filename) fout.write(f'{osp.basename(png_filename)} {class_id} ' f'{score}\n') result_files.append(pred_txt) return result_files def format_results(self, results, txtfile_prefix=None): """Format the results to txt (standard format for Cityscapes evaluation). Args: results (list): Testing results of the dataset. txtfile_prefix (str | None): The prefix of txt files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. Returns: tuple: (result_files, tmp_dir), result_files is a dict containing \ the json filepaths, tmp_dir is the temporal directory created \ for saving txt/png files when txtfile_prefix is not specified. """ assert isinstance(results, list), 'results must be a list' assert len(results) == len(self), ( 'The length of results is not equal to the dataset len: {} != {}'. format(len(results), len(self))) assert isinstance(results, list), 'results must be a list' assert len(results) == len(self), ( 'The length of results is not equal to the dataset len: {} != {}'. format(len(results), len(self))) if txtfile_prefix is None: tmp_dir = tempfile.TemporaryDirectory() txtfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = self.results2txt(results, txtfile_prefix) return result_files, tmp_dir def evaluate(self, results, metric='bbox', logger=None, outfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)): """Evaluation in Cityscapes/COCO protocol. Args: results (list[list | tuple]): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. Options are 'bbox', 'segm', 'proposal', 'proposal_fast'. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. outfile_prefix (str | None): The prefix of output file. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If results are evaluated with COCO protocol, it would be the prefix of output json file. For example, the metric is 'bbox' and 'segm', then json files would be "a/b/prefix.bbox.json" and "a/b/prefix.segm.json". If results are evaluated with cityscapes protocol, it would be the prefix of output txt/png files. The output files would be png images under folder "a/b/prefix/xxx/" and the file name of images would be written into a txt file "a/b/prefix/xxx_pred.txt", where "xxx" is the video name of cityscapes. If not specified, a temp file will be created. Default: None. classwise (bool): Whether to evaluating the AP for each class. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thrs (Sequence[float]): IoU threshold used for evaluating recalls. If set to a list, the average recall of all IoUs will also be computed. Default: 0.5. Returns: dict[str, float]: COCO style evaluation metric or cityscapes mAP \ and AP@50. """ eval_results = dict() metrics = metric.copy() if isinstance(metric, list) else [metric] if 'cityscapes' in metrics: eval_results.update( self._evaluate_cityscapes(results, outfile_prefix, logger)) metrics.remove('cityscapes') # left metrics are all coco metric if len(metrics) > 0: # create CocoDataset with CityscapesDataset annotation self_coco = CocoDataset(self.ann_file, self.pipeline.transforms, None, self.data_root, self.img_prefix, self.seg_prefix, self.proposal_file, self.test_mode, self.filter_empty_gt) # TODO: remove this in the future # reload annotations of correct class self_coco.CLASSES = self.CLASSES self_coco.data_infos = self_coco.load_annotations(self.ann_file) eval_results.update( self_coco.evaluate(results, metrics, logger, outfile_prefix, classwise, proposal_nums, iou_thrs)) return eval_results def _evaluate_cityscapes(self, results, txtfile_prefix, logger): """Evaluation in Cityscapes protocol. Args: results (list): Testing results of the dataset. txtfile_prefix (str | None): The prefix of output txt file logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. Returns: dict[str: float]: Cityscapes evaluation results, contains 'mAP' \ and 'AP@50'. """ try: import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval # noqa except ImportError: raise ImportError('Please run "pip install citscapesscripts" to ' 'install cityscapesscripts first.') msg = 'Evaluating in Cityscapes style' if logger is None: msg = '\n' + msg print_log(msg, logger=logger) result_files, tmp_dir = self.format_results(results, txtfile_prefix) if tmp_dir is None: result_dir = osp.join(txtfile_prefix, 'results') else: result_dir = osp.join(tmp_dir.name, 'results') eval_results = OrderedDict() print_log(f'Evaluating results under {result_dir} ...', logger=logger) # set global states in cityscapes evaluation API CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..') CSEval.args.predictionPath = os.path.abspath(result_dir) CSEval.args.predictionWalk = None CSEval.args.JSONOutput = False CSEval.args.colorized = False CSEval.args.gtInstancesFile = os.path.join(result_dir, 'gtInstances.json') CSEval.args.groundTruthSearch = os.path.join( self.img_prefix.replace('leftImg8bit', 'gtFine'), '*/*_gtFine_instanceIds.png') groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch) assert len(groundTruthImgList), 'Cannot find ground truth images' \ f' in {CSEval.args.groundTruthSearch}.' predictionImgList = [] for gt in groundTruthImgList: predictionImgList.append(CSEval.getPrediction(gt, CSEval.args)) CSEval_results = CSEval.evaluateImgLists(predictionImgList, groundTruthImgList, CSEval.args)['averages'] eval_results['mAP'] = CSEval_results['allAp'] eval_results['AP@50'] = CSEval_results['allAp50%'] if tmp_dir is not None: tmp_dir.cleanup() return eval_results
14,336
41.669643
135
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/utils.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import warnings from mmcv.cnn import VGG from mmcv.runner.hooks import HOOKS, Hook from mmdet.datasets.builder import PIPELINES from mmdet.datasets.pipelines import LoadAnnotations, LoadImageFromFile from mmdet.models.dense_heads import GARPNHead, RPNHead from mmdet.models.roi_heads.mask_heads import FusedSemanticHead def replace_ImageToTensor(pipelines): """Replace the ImageToTensor transform in a data pipeline to DefaultFormatBundle, which is normally useful in batch inference. Args: pipelines (list[dict]): Data pipeline configs. Returns: list: The new pipeline list with all ImageToTensor replaced by DefaultFormatBundle. Examples: >>> pipelines = [ ... dict(type='LoadImageFromFile'), ... dict( ... type='MultiScaleFlipAug', ... img_scale=(1333, 800), ... flip=False, ... transforms=[ ... dict(type='Resize', keep_ratio=True), ... dict(type='RandomFlip'), ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), ... dict(type='Pad', size_divisor=32), ... dict(type='ImageToTensor', keys=['img']), ... dict(type='Collect', keys=['img']), ... ]) ... ] >>> expected_pipelines = [ ... dict(type='LoadImageFromFile'), ... dict( ... type='MultiScaleFlipAug', ... img_scale=(1333, 800), ... flip=False, ... transforms=[ ... dict(type='Resize', keep_ratio=True), ... dict(type='RandomFlip'), ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), ... dict(type='Pad', size_divisor=32), ... dict(type='DefaultFormatBundle'), ... dict(type='Collect', keys=['img']), ... ]) ... ] >>> assert expected_pipelines == replace_ImageToTensor(pipelines) """ pipelines = copy.deepcopy(pipelines) for i, pipeline in enumerate(pipelines): if pipeline['type'] == 'MultiScaleFlipAug': assert 'transforms' in pipeline pipeline['transforms'] = replace_ImageToTensor( pipeline['transforms']) elif pipeline['type'] == 'ImageToTensor': warnings.warn( '"ImageToTensor" pipeline is replaced by ' '"DefaultFormatBundle" for batch inference. It is ' 'recommended to manually replace it in the test ' 'data pipeline in your config file.', UserWarning) pipelines[i] = {'type': 'DefaultFormatBundle'} return pipelines def get_loading_pipeline(pipeline): """Only keep loading image and annotations related configuration. Args: pipeline (list[dict]): Data pipeline configs. Returns: list[dict]: The new pipeline list with only keep loading image and annotations related configuration. Examples: >>> pipelines = [ ... dict(type='LoadImageFromFile'), ... dict(type='LoadAnnotations', with_bbox=True), ... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), ... dict(type='RandomFlip', flip_ratio=0.5), ... dict(type='Normalize', **img_norm_cfg), ... dict(type='Pad', size_divisor=32), ... dict(type='DefaultFormatBundle'), ... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ... ] >>> expected_pipelines = [ ... dict(type='LoadImageFromFile'), ... dict(type='LoadAnnotations', with_bbox=True) ... ] >>> assert expected_pipelines ==\ ... get_loading_pipeline(pipelines) """ loading_pipeline_cfg = [] for cfg in pipeline: obj_cls = PIPELINES.get(cfg['type']) # TODO:use more elegant way to distinguish loading modules if obj_cls is not None and obj_cls in (LoadImageFromFile, LoadAnnotations): loading_pipeline_cfg.append(cfg) assert len(loading_pipeline_cfg) == 2, \ 'The data pipeline in your config file must include ' \ 'loading image and annotations related pipeline.' return loading_pipeline_cfg @HOOKS.register_module() class NumClassCheckHook(Hook): def _check_head(self, runner): """Check whether the `num_classes` in head matches the length of `CLASSES` in `dataset`. Args: runner (obj:`EpochBasedRunner`): Epoch based Runner. """ model = runner.model dataset = runner.data_loader.dataset if dataset.CLASSES is None: runner.logger.warning( f'Please set `CLASSES` ' f'in the {dataset.__class__.__name__} and' f'check if it is consistent with the `num_classes` ' f'of head') else: assert type(dataset.CLASSES) is not str, \ (f'`CLASSES` in {dataset.__class__.__name__}' f'should be a tuple of str.' f'Add comma if number of classes is 1 as ' f'CLASSES = ({dataset.CLASSES},)') for name, module in model.named_modules(): if hasattr(module, 'num_classes') and not isinstance( module, (RPNHead, VGG, FusedSemanticHead, GARPNHead)): assert module.num_classes == len(dataset.CLASSES), \ (f'The `num_classes` ({module.num_classes}) in ' f'{module.__class__.__name__} of ' f'{model.__class__.__name__} does not matches ' f'the length of `CLASSES` ' f'{len(dataset.CLASSES)}) in ' f'{dataset.__class__.__name__}') def before_train_epoch(self, runner): """Check whether the training dataset is compatible with head. Args: runner (obj:`EpochBasedRunner`): Epoch based Runner. """ self._check_head(runner) def before_val_epoch(self, runner): """Check whether the dataset in val epoch is compatible with head. Args: runner (obj:`EpochBasedRunner`): Epoch based Runner. """ self._check_head(runner)
6,533
38.6
78
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/dataset_wrappers.py
# Copyright (c) OpenMMLab. All rights reserved. import bisect import collections import copy import math from collections import defaultdict import numpy as np from mmcv.utils import build_from_cfg, print_log from torch.utils.data.dataset import ConcatDataset as _ConcatDataset from .builder import DATASETS, PIPELINES from .coco import CocoDataset import ipdb @DATASETS.register_module() class ConcatDataset(_ConcatDataset): """A wrapper of concatenated dataset. Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but concat the group flag for image aspect ratio. Args: datasets (list[:obj:`Dataset`]): A list of datasets. separate_eval (bool): Whether to evaluate the results separately if it is used as validation dataset. Defaults to True. """ def __init__(self, datasets, separate_eval=True): super(ConcatDataset, self).__init__(datasets) self.CLASSES = datasets[0].CLASSES self.separate_eval = separate_eval if not separate_eval: if any([isinstance(ds, CocoDataset) for ds in datasets]): raise NotImplementedError( 'Evaluating concatenated CocoDataset as a whole is not' ' supported! Please set "separate_eval=True"') elif len(set([type(ds) for ds in datasets])) != 1: raise NotImplementedError( 'All the datasets should have same types') if hasattr(datasets[0], 'flag'): flags = [] for i in range(0, len(datasets)): flags.append(datasets[i].flag) self.flag = np.concatenate(flags) def get_cat_ids(self, idx): """Get category ids of concatenated dataset by index. Args: idx (int): Index of data. Returns: list[int]: All categories in the image of specified index. """ if idx < 0: if -idx > len(self): raise ValueError( 'absolute value of index should not exceed dataset length') idx = len(self) + idx dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if dataset_idx == 0: sample_idx = idx else: sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] return self.datasets[dataset_idx].get_cat_ids(sample_idx) def evaluate(self, results, logger=None, **kwargs): """Evaluate the results. Args: results (list[list | tuple]): Testing results of the dataset. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. Returns: dict[str: float]: AP results of the total dataset or each separate dataset if `self.separate_eval=True`. """ assert len(results) == self.cumulative_sizes[-1], \ ('Dataset and results have different sizes: ' f'{self.cumulative_sizes[-1]} v.s. {len(results)}') # Check whether all the datasets support evaluation for dataset in self.datasets: assert hasattr(dataset, 'evaluate'), \ f'{type(dataset)} does not implement evaluate function' if self.separate_eval: dataset_idx = -1 total_eval_results = dict() for size, dataset in zip(self.cumulative_sizes, self.datasets): start_idx = 0 if dataset_idx == -1 else \ self.cumulative_sizes[dataset_idx] end_idx = self.cumulative_sizes[dataset_idx + 1] results_per_dataset = results[start_idx:end_idx] print_log( f'\nEvaluateing {dataset.ann_file} with ' f'{len(results_per_dataset)} images now', logger=logger) eval_results_per_dataset = dataset.evaluate( results_per_dataset, logger=logger, **kwargs) dataset_idx += 1 for k, v in eval_results_per_dataset.items(): total_eval_results.update({f'{dataset_idx}_{k}': v}) return total_eval_results elif any([isinstance(ds, CocoDataset) for ds in self.datasets]): raise NotImplementedError( 'Evaluating concatenated CocoDataset as a whole is not' ' supported! Please set "separate_eval=True"') elif len(set([type(ds) for ds in self.datasets])) != 1: raise NotImplementedError( 'All the datasets should have same types') else: original_data_infos = self.datasets[0].data_infos self.datasets[0].data_infos = sum( [dataset.data_infos for dataset in self.datasets], []) eval_results = self.datasets[0].evaluate( results, logger=logger, **kwargs) self.datasets[0].data_infos = original_data_infos return eval_results @DATASETS.register_module() class RepeatDataset: """A wrapper of repeated dataset. The length of repeated dataset will be `times` larger than the original dataset. This is useful when the data loading time is long but the dataset is small. Using RepeatDataset can reduce the data loading time between epochs. Args: dataset (:obj:`Dataset`): The dataset to be repeated. times (int): Repeat times. """ def __init__(self, dataset, times): self.dataset = dataset self.times = times self.CLASSES = dataset.CLASSES if hasattr(self.dataset, 'flag'): self.flag = np.tile(self.dataset.flag, times) self._ori_len = len(self.dataset) def __getitem__(self, idx): return self.dataset[idx % self._ori_len] def get_cat_ids(self, idx): """Get category ids of repeat dataset by index. Args: idx (int): Index of data. Returns: list[int]: All categories in the image of specified index. """ return self.dataset.get_cat_ids(idx % self._ori_len) def __len__(self): """Length after repetition.""" return self.times * self._ori_len # Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa @DATASETS.register_module() class ClassBalancedDataset: """A wrapper of repeated dataset with repeat factor. Suitable for training on class imbalanced datasets like LVIS. Following the sampling strategy in the `paper <https://arxiv.org/abs/1908.03195>`_, in each epoch, an image may appear multiple times based on its "repeat factor". The repeat factor for an image is a function of the frequency the rarest category labeled in that image. The "frequency of category c" in [0, 1] is defined by the fraction of images in the training set (without repeats) in which category c appears. The dataset needs to instantiate :func:`self.get_cat_ids` to support ClassBalancedDataset. The repeat factor is computed as followed. 1. For each category c, compute the fraction # of images that contain it: :math:`f(c)` 2. For each category c, compute the category-level repeat factor: :math:`r(c) = max(1, sqrt(t/f(c)))` 3. For each image I, compute the image-level repeat factor: :math:`r(I) = max_{c in I} r(c)` Args: dataset (:obj:`CustomDataset`): The dataset to be repeated. oversample_thr (float): frequency threshold below which data is repeated. For categories with ``f_c >= oversample_thr``, there is no oversampling. For categories with ``f_c < oversample_thr``, the degree of oversampling following the square-root inverse frequency heuristic above. filter_empty_gt (bool, optional): If set true, images without bounding boxes will not be oversampled. Otherwise, they will be categorized as the pure background class and involved into the oversampling. Default: True. """ def __init__(self, dataset, oversample_thr, filter_empty_gt=True): self.dataset = dataset self.oversample_thr = oversample_thr self.filter_empty_gt = filter_empty_gt self.CLASSES = dataset.CLASSES repeat_factors = self._get_repeat_factors(dataset, oversample_thr) repeat_indices = [] for dataset_idx, repeat_factor in enumerate(repeat_factors): repeat_indices.extend([dataset_idx] * math.ceil(repeat_factor)) self.repeat_indices = repeat_indices flags = [] if hasattr(self.dataset, 'flag'): for flag, repeat_factor in zip(self.dataset.flag, repeat_factors): flags.extend([flag] * int(math.ceil(repeat_factor))) assert len(flags) == len(repeat_indices) self.flag = np.asarray(flags, dtype=np.uint8) def _get_repeat_factors(self, dataset, repeat_thr): """Get repeat factor for each images in the dataset. Args: dataset (:obj:`CustomDataset`): The dataset repeat_thr (float): The threshold of frequency. If an image contains the categories whose frequency below the threshold, it would be repeated. Returns: list[float]: The repeat factors for each images in the dataset. """ # 1. For each category c, compute the fraction # of images # that contain it: f(c) category_freq = defaultdict(int) num_images = len(dataset) for idx in range(num_images): cat_ids = set(self.dataset.get_cat_ids(idx)) if len(cat_ids) == 0 and not self.filter_empty_gt: cat_ids = set([len(self.CLASSES)]) for cat_id in cat_ids: category_freq[cat_id] += 1 for k, v in category_freq.items(): category_freq[k] = v / num_images # 2. For each category c, compute the category-level repeat factor: # r(c) = max(1, sqrt(t/f(c))) category_repeat = { cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq)) for cat_id, cat_freq in category_freq.items() } # 3. For each image I, compute the image-level repeat factor: # r(I) = max_{c in I} r(c) repeat_factors = [] for idx in range(num_images): cat_ids = set(self.dataset.get_cat_ids(idx)) if len(cat_ids) == 0 and not self.filter_empty_gt: cat_ids = set([len(self.CLASSES)]) repeat_factor = 1 if len(cat_ids) > 0: repeat_factor = max( {category_repeat[cat_id] for cat_id in cat_ids}) repeat_factors.append(repeat_factor) return repeat_factors def __getitem__(self, idx): ori_index = self.repeat_indices[idx] return self.dataset[ori_index] def __len__(self): """Length after repetition.""" return len(self.repeat_indices) @DATASETS.register_module() class MultiImageMixDataset: """A wrapper of multiple images mixed dataset. Suitable for training on multiple images mixed data augmentation like mosaic and mixup. For the augmentation pipeline of mixed image data, the `get_indexes` method needs to be provided to obtain the image indexes, and you can set `skip_flags` to change the pipeline running process. At the same time, we provide the `dynamic_scale` parameter to dynamically change the output image size. Args: dataset (:obj:`CustomDataset`): The dataset to be mixed. pipeline (Sequence[dict]): Sequence of transform object or config dict to be composed. dynamic_scale (tuple[int], optional): The image scale can be changed dynamically. Default to None. skip_type_keys (list[str], optional): Sequence of type string to be skip pipeline. Default to None. """ def __init__(self, dataset, pipeline, dynamic_scale=None, skip_type_keys=None): assert isinstance(pipeline, collections.abc.Sequence) if skip_type_keys is not None: assert all([ isinstance(skip_type_key, str) for skip_type_key in skip_type_keys ]) self._skip_type_keys = skip_type_keys self.pipeline = [] self.pipeline_types = [] for transform in pipeline: if isinstance(transform, dict): self.pipeline_types.append(transform['type']) transform = build_from_cfg(transform, PIPELINES) self.pipeline.append(transform) else: raise TypeError('pipeline must be a dict') self.dataset = dataset self.CLASSES = dataset.CLASSES if hasattr(self.dataset, 'flag'): self.flag = dataset.flag self.num_samples = len(dataset) if dynamic_scale is not None: assert isinstance(dynamic_scale, tuple) self._dynamic_scale = dynamic_scale def __len__(self): return self.num_samples def __getitem__(self, idx): results = copy.deepcopy(self.dataset[idx]) for (transform, transform_type) in zip(self.pipeline, self.pipeline_types): if self._skip_type_keys is not None and \ transform_type in self._skip_type_keys: continue if hasattr(transform, 'get_indexes'): indexes = transform.get_indexes(self.dataset) if not isinstance(indexes, collections.abc.Sequence): indexes = [indexes] mix_results = [ copy.deepcopy(self.dataset[index]) for index in indexes ] results['mix_results'] = mix_results if self._dynamic_scale is not None: # Used for subsequent pipeline to automatically change # the output image size. E.g MixUp, Resize. results['scale'] = self._dynamic_scale results = transform(results) if 'mix_results' in results: results.pop('mix_results') if 'img_scale' in results: results.pop('img_scale') return results def update_skip_type_keys(self, skip_type_keys): """Update skip_type_keys. It is called by an external hook. Args: skip_type_keys (list[str], optional): Sequence of type string to be skip pipeline. """ assert all([ isinstance(skip_type_key, str) for skip_type_key in skip_type_keys ]) self._skip_type_keys = skip_type_keys def update_dynamic_scale(self, dynamic_scale): """Update dynamic_scale. It is called by an external hook. Args: dynamic_scale (tuple[int]): The image scale can be changed dynamically. """ assert isinstance(dynamic_scale, tuple) self._dynamic_scale = dynamic_scale
15,324
37.602015
167
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/xml_style.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import xml.etree.ElementTree as ET import mmcv import numpy as np from PIL import Image from .builder import DATASETS from .custom import CustomDataset @DATASETS.register_module() class XMLDataset(CustomDataset): """XML dataset for detection. Args: min_size (int | float, optional): The minimum size of bounding boxes in the images. If the size of a bounding box is less than ``min_size``, it would be add to ignored field. img_subdir (str): Subdir where images are stored. Default: JPEGImages. ann_subdir (str): Subdir where annotations are. Default: Annotations. """ def __init__(self, min_size=None, img_subdir='JPEGImages', ann_subdir='Annotations', **kwargs): assert self.CLASSES or kwargs.get( 'classes', None), 'CLASSES in `XMLDataset` can not be None.' self.img_subdir = img_subdir self.ann_subdir = ann_subdir super(XMLDataset, self).__init__(**kwargs) self.cat2label = {cat: i for i, cat in enumerate(self.CLASSES)} self.min_size = min_size def load_annotations(self, ann_file): """Load annotation from XML style ann_file. Args: ann_file (str): Path of XML file. Returns: list[dict]: Annotation info from XML file. """ data_infos = [] img_ids = mmcv.list_from_file(ann_file) for img_id in img_ids: filename = osp.join(self.img_subdir, f'{img_id}.jpg') xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml') tree = ET.parse(xml_path) root = tree.getroot() size = root.find('size') if size is not None: width = int(size.find('width').text) height = int(size.find('height').text) else: img_path = osp.join(self.img_prefix, filename) img = Image.open(img_path) width, height = img.size data_infos.append( dict(id=img_id, filename=filename, width=width, height=height)) return data_infos def _filter_imgs(self, min_size=32): """Filter images too small or without annotation.""" valid_inds = [] for i, img_info in enumerate(self.data_infos): if min(img_info['width'], img_info['height']) < min_size: continue if self.filter_empty_gt: img_id = img_info['id'] xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml') tree = ET.parse(xml_path) root = tree.getroot() for obj in root.findall('object'): name = obj.find('name').text if name in self.CLASSES: valid_inds.append(i) break else: valid_inds.append(i) return valid_inds def get_ann_info(self, idx): """Get annotation from XML file by index. Args: idx (int): Index of data. Returns: dict: Annotation info of specified index. """ img_id = self.data_infos[idx]['id'] xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml') tree = ET.parse(xml_path) root = tree.getroot() bboxes = [] labels = [] bboxes_ignore = [] labels_ignore = [] for obj in root.findall('object'): name = obj.find('name').text if name not in self.CLASSES: continue label = self.cat2label[name] difficult = obj.find('difficult') difficult = 0 if difficult is None else int(difficult.text) bnd_box = obj.find('bndbox') # TODO: check whether it is necessary to use int # Coordinates may be float type bbox = [ int(float(bnd_box.find('xmin').text)), int(float(bnd_box.find('ymin').text)), int(float(bnd_box.find('xmax').text)), int(float(bnd_box.find('ymax').text)) ] ignore = False if self.min_size: assert not self.test_mode w = bbox[2] - bbox[0] h = bbox[3] - bbox[1] if w < self.min_size or h < self.min_size: ignore = True if difficult or ignore: bboxes_ignore.append(bbox) labels_ignore.append(label) else: bboxes.append(bbox) labels.append(label) if not bboxes: bboxes = np.zeros((0, 4)) labels = np.zeros((0, )) else: bboxes = np.array(bboxes, ndmin=2) - 1 labels = np.array(labels) if not bboxes_ignore: bboxes_ignore = np.zeros((0, 4)) labels_ignore = np.zeros((0, )) else: bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1 labels_ignore = np.array(labels_ignore) ann = dict( bboxes=bboxes.astype(np.float32), labels=labels.astype(np.int64), bboxes_ignore=bboxes_ignore.astype(np.float32), labels_ignore=labels_ignore.astype(np.int64)) return ann def get_cat_ids(self, idx): """Get category ids in XML file by index. Args: idx (int): Index of data. Returns: list[int]: All categories in the image of specified index. """ cat_ids = [] img_id = self.data_infos[idx]['id'] xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml') tree = ET.parse(xml_path) root = tree.getroot() for obj in root.findall('object'): name = obj.find('name').text if name not in self.CLASSES: continue label = self.cat2label[name] cat_ids.append(label) return cat_ids
6,243
33.882682
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset from .cityscapes import CityscapesDataset from .coco import CocoDataset from .coco_panoptic import CocoPanopticDataset from .custom import CustomDataset from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset, MultiImageMixDataset, RepeatDataset) from .deepfashion import DeepFashionDataset from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler from .utils import (NumClassCheckHook, get_loading_pipeline, replace_ImageToTensor) from .voc import VOCDataset from .wider_face import WIDERFaceDataset from .xml_style import XMLDataset __all__ = [ 'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler', 'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', 'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES', 'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline', 'NumClassCheckHook', 'CocoPanopticDataset', 'MultiImageMixDataset' ]
1,320
47.925926
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/lvis.py
# Copyright (c) OpenMMLab. All rights reserved. import itertools import logging import os.path as osp import tempfile import warnings from collections import OrderedDict import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable from .builder import DATASETS from .coco import CocoDataset @DATASETS.register_module() class LVISV05Dataset(CocoDataset): CLASSES = ( 'acorn', 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna', 'apple', 'apple_juice', 'applesauce', 'apricot', 'apron', 'aquarium', 'armband', 'armchair', 'armoire', 'armor', 'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer', 'avocado', 'award', 'awning', 'ax', 'baby_buggy', 'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel', 'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon', 'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo', 'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow', 'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap', 'baseball_glove', 'basket', 'basketball_hoop', 'basketball', 'bass_horn', 'bat_(animal)', 'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)', 'battery', 'beachball', 'bead', 'beaker', 'bean_curd', 'beanbag', 'beanie', 'bear', 'bed', 'bedspread', 'cow', 'beef_(food)', 'beeper', 'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt', 'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath', 'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card', 'biscuit_(bread)', 'pirate_flag', 'black_sheep', 'blackboard', 'blanket', 'blazer', 'blender', 'blimp', 'blinker', 'blueberry', 'boar', 'gameboard', 'boat', 'bobbin', 'bobby_pin', 'boiled_egg', 'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'book_bag', 'bookcase', 'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle', 'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)', 'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'bowling_pin', 'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere', 'bread-bin', 'breechcloth', 'bridal_gown', 'briefcase', 'bristle_brush', 'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts', 'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer', 'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn', 'corned_beef', 'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butcher_knife', 'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car', 'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf', 'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)', 'can', 'can_opener', 'candelabrum', 'candle', 'candle_holder', 'candy_bar', 'candy_cane', 'walking_cane', 'canister', 'cannon', 'canoe', 'cantaloup', 'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino', 'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car', 'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship', 'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton', 'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower', 'caviar', 'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue', 'champagne', 'chandelier', 'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard', 'chest_of_drawers_(furniture)', 'chicken_(animal)', 'chicken_wire', 'chickpea', 'Chihuahua', 'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker', 'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent', 'clementine', 'clip', 'clipboard', 'clock', 'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat', 'coat_hanger', 'coatrack', 'cock', 'coconut', 'coffee_filter', 'coffee_maker', 'coffee_table', 'coffeepot', 'coil', 'coin', 'colander', 'coleslaw', 'coloring_material', 'combination_lock', 'pacifier', 'comic_book', 'computer_keyboard', 'concrete_mixer', 'cone', 'control', 'convertible_(automobile)', 'sofa_bed', 'cookie', 'cookie_jar', 'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)', 'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet', 'cornice', 'cornmeal', 'corset', 'romaine_lettuce', 'costume', 'cougar', 'coverall', 'cowbell', 'cowboy_hat', 'crab_(animal)', 'cracker', 'crape', 'crate', 'crayon', 'cream_pitcher', 'credit_card', 'crescent_roll', 'crib', 'crock_pot', 'crossbar', 'crouton', 'crow', 'crown', 'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch', 'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup', 'cupcake', 'hair_curler', 'curling_iron', 'curtain', 'cushion', 'custard', 'cutting_tool', 'cylinder', 'cymbal', 'dachshund', 'dagger', 'dartboard', 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk', 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux', 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher', 'dishwasher_detergent', 'diskette', 'dispenser', 'Dixie_cup', 'dog', 'dog_collar', 'doll', 'dollar', 'dolphin', 'domestic_ass', 'eye_mask', 'doorbell', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly', 'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit', 'dresser', 'drill', 'drinking_fountain', 'drone', 'dropper', 'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan', 'Dutch_oven', 'eagle', 'earphone', 'earplug', 'earring', 'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater', 'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk', 'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan', 'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)', 'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm', 'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace', 'fireplug', 'fish', 'fish_(food)', 'fishbowl', 'fishing_boat', 'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal', 'folding_chair', 'food_processor', 'football_(American)', 'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car', 'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice', 'fruit_salad', 'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage', 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic', 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture', 'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles', 'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose', 'gorilla', 'gourd', 'surgical_gown', 'grape', 'grasshopper', 'grater', 'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle', 'grillroom', 'grinder_(tool)', 'grits', 'grizzly', 'grocery_bag', 'guacamole', 'guitar', 'gull', 'gun', 'hair_spray', 'hairbrush', 'hairnet', 'hairpin', 'ham', 'hamburger', 'hammer', 'hammock', 'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel', 'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw', 'hardback_book', 'harmonium', 'hat', 'hatbox', 'hatch', 'veil', 'headband', 'headboard', 'headlight', 'headscarf', 'headset', 'headstall_(for_horses)', 'hearing_aid', 'heart', 'heater', 'helicopter', 'helmet', 'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog', 'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce', 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear', 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate', 'ice_tea', 'igniter', 'incense', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board', 'jacket', 'jam', 'jean', 'jeep', 'jelly_bean', 'jersey', 'jet_plane', 'jewelry', 'joystick', 'jumpsuit', 'kayak', 'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono', 'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit', 'knee_pad', 'knife', 'knight_(chess_piece)', 'knitting_needle', 'knob', 'knocker_(on_a_door)', 'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)', 'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard', 'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather', 'legging_(clothing)', 'Lego', 'lemon', 'lemonade', 'lettuce', 'license_plate', 'life_buoy', 'life_jacket', 'lightbulb', 'lightning_rod', 'lime', 'limousine', 'linen_paper', 'lion', 'lip_balm', 'lipstick', 'liquor', 'lizard', 'Loafer_(type_of_shoe)', 'log', 'lollipop', 'lotion', 'speaker_(stereo_equipment)', 'loveseat', 'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)', 'mallet', 'mammoth', 'mandarin_orange', 'manger', 'manhole', 'map', 'marker', 'martini', 'mascot', 'mashed_potato', 'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox', 'mattress', 'measuring_cup', 'measuring_stick', 'meatball', 'medicine', 'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone', 'milk', 'minivan', 'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)', 'money', 'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor', 'motor_scooter', 'motor_vehicle', 'motorboat', 'motorcycle', 'mound_(baseball)', 'mouse_(animal_rodent)', 'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom', 'music_stool', 'musical_instrument', 'nailfile', 'nameplate', 'napkin', 'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newsstand', 'nightshirt', 'nosebag_(for_animals)', 'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil', 'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'oregano', 'ostrich', 'ottoman', 'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad', 'paddle', 'padlock', 'paintbox', 'paintbrush', 'painting', 'pajamas', 'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose', 'papaya', 'paperclip', 'paper_plate', 'paper_towel', 'paperback_book', 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parchment', 'parka', 'parking_meter', 'parrot', 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport', 'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter', 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box', 'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)', 'pepper', 'pepper_mill', 'perfume', 'persimmon', 'baby', 'pet', 'petfood', 'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano', 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow', 'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball', 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)', 'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat', 'plate', 'platter', 'playing_card', 'playpen', 'pliers', 'plow_(farm_equipment)', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)', 'pole', 'police_van', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)', 'portrait', 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato', 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune', 'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher', 'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit', 'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish', 'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat', 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt', 'recliner', 'record_player', 'red_cabbage', 'reflector', 'remote_control', 'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map', 'robe', 'rocking_chair', 'roller_skate', 'Rollerblade', 'rolling_pin', 'root_beer', 'router_(computer_equipment)', 'rubber_band', 'runner_(carpet)', 'plastic_bag', 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin', 'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)', 'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)', 'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse', 'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf', 'school_bus', 'scissors', 'scoreboard', 'scrambled_eggs', 'scraper', 'scratcher', 'screwdriver', 'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane', 'seashell', 'seedling', 'serving_dish', 'sewing_machine', 'shaker', 'shampoo', 'shark', 'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl', 'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt', 'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass', 'shoulder_bag', 'shovel', 'shower_head', 'shower_curtain', 'shredder_(for_paper)', 'sieve', 'signboard', 'silo', 'sink', 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole', 'skirt', 'sled', 'sleeping_bag', 'sling_(bandage)', 'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman', 'snowmobile', 'soap', 'soccer_ball', 'sock', 'soda_fountain', 'carbonated_water', 'sofa', 'softball', 'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon', 'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)', 'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'sponge', 'spoon', 'sportswear', 'spotlight', 'squirrel', 'stapler_(stapling_machine)', 'starfish', 'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steamer_(kitchen_appliance)', 'steering_wheel', 'stencil', 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer', 'stirrup', 'stockings_(leg_wear)', 'stool', 'stop_sign', 'brake_light', 'stove', 'strainer', 'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer', 'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses', 'sunhat', 'sunscreen', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband', 'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table', 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight', 'tambourine', 'army_tank', 'tank_(storage_vessel)', 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure', 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup', 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth', 'telephone_pole', 'telephoto_lens', 'television_camera', 'television_set', 'tennis_ball', 'tennis_racket', 'tequila', 'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread', 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil', 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven', 'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush', 'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel', 'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light', 'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline', 'tray', 'tree_house', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle', 'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat', 'turban', 'turkey_(bird)', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)', 'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn', 'vacuum_cleaner', 'valve', 'vase', 'vending_machine', 'vent', 'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture', 'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick', 'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe', 'wasabi', 'automatic_washer', 'watch', 'water_bottle', 'water_cooler', 'water_faucet', 'water_filter', 'water_heater', 'water_jug', 'water_gun', 'water_scooter', 'water_ski', 'water_tower', 'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake', 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream', 'whiskey', 'whistle', 'wick', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)', 'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket', 'wineglass', 'wing_chair', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon', 'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yak', 'yogurt', 'yoke_(animal_equipment)', 'zebra', 'zucchini') def load_annotations(self, ann_file): """Load annotation from lvis style annotation file. Args: ann_file (str): Path of annotation file. Returns: list[dict]: Annotation info from LVIS api. """ try: import lvis if getattr(lvis, '__version__', '0') >= '10.5.3': warnings.warn( 'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501 UserWarning) from lvis import LVIS except ImportError: raise ImportError( 'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501 ) self.coco = LVIS(ann_file) self.cat_ids = self.coco.get_cat_ids() self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.img_ids = self.coco.get_img_ids() data_infos = [] for i in self.img_ids: info = self.coco.load_imgs([i])[0] if info['file_name'].startswith('COCO'): # Convert form the COCO 2014 file naming convention of # COCO_[train/val/test]2014_000000000000.jpg to the 2017 # naming convention of 000000000000.jpg # (LVIS v1 will fix this naming issue) info['filename'] = info['file_name'][-16:] else: info['filename'] = info['file_name'] data_infos.append(info) return data_infos def evaluate(self, results, metric='bbox', logger=None, jsonfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)): """Evaluation in LVIS protocol. Args: results (list[list | tuple]): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. Options are 'bbox', 'segm', 'proposal', 'proposal_fast'. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. jsonfile_prefix (str | None): classwise (bool): Whether to evaluating the AP for each class. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thrs (Sequence[float]): IoU threshold used for evaluating recalls. If set to a list, the average recall of all IoUs will also be computed. Default: 0.5. Returns: dict[str, float]: LVIS style metrics. """ try: import lvis if getattr(lvis, '__version__', '0') >= '10.5.3': warnings.warn( 'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501 UserWarning) from lvis import LVISResults, LVISEval except ImportError: raise ImportError( 'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501 ) assert isinstance(results, list), 'results must be a list' assert len(results) == len(self), ( 'The length of results is not equal to the dataset len: {} != {}'. format(len(results), len(self))) metrics = metric if isinstance(metric, list) else [metric] allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] for metric in metrics: if metric not in allowed_metrics: raise KeyError('metric {} is not supported'.format(metric)) if jsonfile_prefix is None: tmp_dir = tempfile.TemporaryDirectory() jsonfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = self.results2json(results, jsonfile_prefix) eval_results = OrderedDict() # get original api lvis_gt = self.coco for metric in metrics: msg = 'Evaluating {}...'.format(metric) if logger is None: msg = '\n' + msg print_log(msg, logger=logger) if metric == 'proposal_fast': ar = self.fast_eval_recall( results, proposal_nums, iou_thrs, logger='silent') log_msg = [] for i, num in enumerate(proposal_nums): eval_results['AR@{}'.format(num)] = ar[i] log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i])) log_msg = ''.join(log_msg) print_log(log_msg, logger=logger) continue if metric not in result_files: raise KeyError('{} is not in results'.format(metric)) try: lvis_dt = LVISResults(lvis_gt, result_files[metric]) except IndexError: print_log( 'The testing results of the whole dataset is empty.', logger=logger, level=logging.ERROR) break iou_type = 'bbox' if metric == 'proposal' else metric lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type) lvis_eval.params.imgIds = self.img_ids if metric == 'proposal': lvis_eval.params.useCats = 0 lvis_eval.params.maxDets = list(proposal_nums) lvis_eval.evaluate() lvis_eval.accumulate() lvis_eval.summarize() for k, v in lvis_eval.get_results().items(): if k.startswith('AR'): val = float('{:.3f}'.format(float(v))) eval_results[k] = val else: lvis_eval.evaluate() lvis_eval.accumulate() lvis_eval.summarize() lvis_results = lvis_eval.get_results() if classwise: # Compute per-category AP # Compute per-category AP # from https://github.com/facebookresearch/detectron2/ precisions = lvis_eval.eval['precision'] # precision: (iou, recall, cls, area range, max dets) assert len(self.cat_ids) == precisions.shape[2] results_per_category = [] for idx, catId in enumerate(self.cat_ids): # area range index 0: all area ranges # max dets index -1: typically 100 per image nm = self.coco.load_cats(catId)[0] precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] if precision.size: ap = np.mean(precision) else: ap = float('nan') results_per_category.append( (f'{nm["name"]}', f'{float(ap):0.3f}')) num_columns = min(6, len(results_per_category) * 2) results_flatten = list( itertools.chain(*results_per_category)) headers = ['category', 'AP'] * (num_columns // 2) results_2d = itertools.zip_longest(*[ results_flatten[i::num_columns] for i in range(num_columns) ]) table_data = [headers] table_data += [result for result in results_2d] table = AsciiTable(table_data) print_log('\n' + table.table, logger=logger) for k, v in lvis_results.items(): if k.startswith('AP'): key = '{}_{}'.format(metric, k) val = float('{:.3f}'.format(float(v))) eval_results[key] = val ap_summary = ' '.join([ '{}:{:.3f}'.format(k, float(v)) for k, v in lvis_results.items() if k.startswith('AP') ]) eval_results['{}_mAP_copypaste'.format(metric)] = ap_summary lvis_eval.print_results() if tmp_dir is not None: tmp_dir.cleanup() return eval_results LVISDataset = LVISV05Dataset DATASETS.register_module(name='LVISDataset', module=LVISDataset) @DATASETS.register_module() class LVISV1Dataset(LVISDataset): CLASSES = ( 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna', 'apple', 'applesauce', 'apricot', 'apron', 'aquarium', 'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor', 'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer', 'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy', 'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel', 'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon', 'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo', 'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow', 'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap', 'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)', 'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)', 'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie', 'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper', 'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt', 'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor', 'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath', 'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card', 'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket', 'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry', 'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg', 'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase', 'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle', 'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)', 'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'box', 'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere', 'bread-bin', 'bread', 'breechcloth', 'bridal_gown', 'briefcase', 'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts', 'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer', 'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn', 'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car', 'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf', 'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)', 'can', 'can_opener', 'candle', 'candle_holder', 'candy_bar', 'candy_cane', 'walking_cane', 'canister', 'canoe', 'cantaloup', 'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino', 'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car', 'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship', 'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton', 'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower', 'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue', 'chalice', 'chandelier', 'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard', 'chicken_(animal)', 'chickpea', 'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker', 'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent', 'cleat_(for_securing_rope)', 'clementine', 'clip', 'clipboard', 'clippers_(for_plants)', 'cloak', 'clock', 'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat', 'coat_hanger', 'coatrack', 'cock', 'cockroach', 'cocoa_(beverage)', 'coconut', 'coffee_maker', 'coffee_table', 'coffeepot', 'coil', 'coin', 'colander', 'coleslaw', 'coloring_material', 'combination_lock', 'pacifier', 'comic_book', 'compass', 'computer_keyboard', 'condiment', 'cone', 'control', 'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie', 'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)', 'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet', 'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall', 'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker', 'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib', 'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown', 'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch', 'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup', 'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain', 'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard', 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk', 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux', 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher', 'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup', 'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin', 'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly', 'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit', 'dresser', 'drill', 'drone', 'dropper', 'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan', 'eagle', 'earphone', 'earplug', 'earring', 'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater', 'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk', 'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan', 'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)', 'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm', 'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace', 'fireplug', 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl', 'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flap', 'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal', 'folding_chair', 'food_processor', 'football_(American)', 'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car', 'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice', 'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage', 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic', 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'generator', 'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture', 'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles', 'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose', 'gorilla', 'gourd', 'grape', 'grater', 'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle', 'grill', 'grits', 'grizzly', 'grocery_bag', 'guitar', 'gull', 'gun', 'hairbrush', 'hairnet', 'hairpin', 'halter_top', 'ham', 'hamburger', 'hammer', 'hammock', 'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel', 'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw', 'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil', 'headband', 'headboard', 'headlight', 'headscarf', 'headset', 'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet', 'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog', 'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah', 'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce', 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear', 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate', 'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board', 'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey', 'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak', 'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono', 'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit', 'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)', 'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)', 'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard', 'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather', 'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade', 'lettuce', 'license_plate', 'life_buoy', 'life_jacket', 'lightbulb', 'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor', 'lizard', 'log', 'lollipop', 'speaker_(stereo_equipment)', 'loveseat', 'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)', 'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange', 'manger', 'manhole', 'map', 'marker', 'martini', 'mascot', 'mashed_potato', 'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox', 'mattress', 'measuring_cup', 'measuring_stick', 'meatball', 'medicine', 'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone', 'milk', 'milk_can', 'milkshake', 'minivan', 'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)', 'money', 'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor', 'motor_scooter', 'motor_vehicle', 'motorcycle', 'mound_(baseball)', 'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom', 'music_stool', 'musical_instrument', 'nailfile', 'napkin', 'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newspaper', 'newsstand', 'nightshirt', 'nosebag_(for_animals)', 'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil', 'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'ostrich', 'ottoman', 'oven', 'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad', 'paddle', 'padlock', 'paintbrush', 'painting', 'pajamas', 'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose', 'papaya', 'paper_plate', 'paper_towel', 'paperback_book', 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parasol', 'parchment', 'parka', 'parking_meter', 'parrot', 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport', 'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter', 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg', 'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box', 'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)', 'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet', 'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano', 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow', 'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball', 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)', 'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat', 'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)', 'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)', 'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)', 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato', 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'pretzel', 'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune', 'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher', 'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit', 'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish', 'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat', 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt', 'recliner', 'record_player', 'reflector', 'remote_control', 'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map', 'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade', 'rolling_pin', 'root_beer', 'router_(computer_equipment)', 'rubber_band', 'runner_(carpet)', 'plastic_bag', 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin', 'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)', 'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)', 'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse', 'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf', 'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver', 'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane', 'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark', 'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl', 'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt', 'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass', 'shoulder_bag', 'shovel', 'shower_head', 'shower_cap', 'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink', 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole', 'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)', 'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman', 'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball', 'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon', 'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)', 'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish', 'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)', 'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish', 'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel', 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer', 'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove', 'strainer', 'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer', 'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses', 'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband', 'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table', 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight', 'tambourine', 'army_tank', 'tank_(storage_vessel)', 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure', 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup', 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth', 'telephone_pole', 'telephoto_lens', 'television_camera', 'television_set', 'tennis_ball', 'tennis_racket', 'tequila', 'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread', 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil', 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven', 'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush', 'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel', 'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light', 'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline', 'tray', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle', 'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat', 'turban', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)', 'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn', 'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest', 'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture', 'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick', 'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe', 'washbasin', 'automatic_washer', 'watch', 'water_bottle', 'water_cooler', 'water_faucet', 'water_heater', 'water_jug', 'water_gun', 'water_scooter', 'water_ski', 'water_tower', 'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake', 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream', 'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)', 'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket', 'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon', 'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt', 'yoke_(animal_equipment)', 'zebra', 'zucchini') def load_annotations(self, ann_file): try: import lvis if getattr(lvis, '__version__', '0') >= '10.5.3': warnings.warn( 'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501 UserWarning) from lvis import LVIS except ImportError: raise ImportError( 'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501 ) self.coco = LVIS(ann_file) self.cat_ids = self.coco.get_cat_ids() self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.img_ids = self.coco.get_img_ids() data_infos = [] for i in self.img_ids: info = self.coco.load_imgs([i])[0] # coco_url is used in LVISv1 instead of file_name # e.g. http://images.cocodataset.org/train2017/000000391895.jpg # train/val split in specified in url info['filename'] = info['coco_url'].replace( 'http://images.cocodataset.org/', '') data_infos.append(info) return data_infos
46,184
61.496617
157
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/builder.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import platform import random from functools import partial import numpy as np from mmcv.parallel import collate from mmcv.runner import get_dist_info from mmcv.utils import Registry, build_from_cfg from torch.utils.data import DataLoader from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler if platform.system() != 'Windows': # https://github.com/pytorch/pytorch/issues/973 import resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) base_soft_limit = rlimit[0] hard_limit = rlimit[1] soft_limit = min(max(4096, base_soft_limit), hard_limit) resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) DATASETS = Registry('dataset') PIPELINES = Registry('pipeline') def _concat_dataset(cfg, default_args=None): from .dataset_wrappers import ConcatDataset ann_files = cfg['ann_file'] img_prefixes = cfg.get('img_prefix', None) seg_prefixes = cfg.get('seg_prefix', None) proposal_files = cfg.get('proposal_file', None) separate_eval = cfg.get('separate_eval', True) datasets = [] num_dset = len(ann_files) for i in range(num_dset): data_cfg = copy.deepcopy(cfg) # pop 'separate_eval' since it is not a valid key for common datasets. if 'separate_eval' in data_cfg: data_cfg.pop('separate_eval') data_cfg['ann_file'] = ann_files[i] if isinstance(img_prefixes, (list, tuple)): data_cfg['img_prefix'] = img_prefixes[i] if isinstance(seg_prefixes, (list, tuple)): data_cfg['seg_prefix'] = seg_prefixes[i] if isinstance(proposal_files, (list, tuple)): data_cfg['proposal_file'] = proposal_files[i] datasets.append(build_dataset(data_cfg, default_args)) return ConcatDataset(datasets, separate_eval) def build_dataset(cfg, default_args=None): from .dataset_wrappers import (ConcatDataset, RepeatDataset, ClassBalancedDataset, MultiImageMixDataset) if isinstance(cfg, (list, tuple)): dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) elif cfg['type'] == 'ConcatDataset': dataset = ConcatDataset( [build_dataset(c, default_args) for c in cfg['datasets']], cfg.get('separate_eval', True)) elif cfg['type'] == 'RepeatDataset': dataset = RepeatDataset( build_dataset(cfg['dataset'], default_args), cfg['times']) elif cfg['type'] == 'ClassBalancedDataset': dataset = ClassBalancedDataset( build_dataset(cfg['dataset'], default_args), cfg['oversample_thr']) elif cfg['type'] == 'MultiImageMixDataset': cp_cfg = copy.deepcopy(cfg) cp_cfg['dataset'] = build_dataset(cp_cfg['dataset']) cp_cfg.pop('type') dataset = MultiImageMixDataset(**cp_cfg) elif isinstance(cfg.get('ann_file'), (list, tuple)): dataset = _concat_dataset(cfg, default_args) else: dataset = build_from_cfg(cfg, DATASETS, default_args) return dataset def build_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, **kwargs): """Build PyTorch DataLoader. In distributed training, each GPU/process has a dataloader. In non-distributed training, there is only one dataloader for all GPUs. Args: dataset (Dataset): A PyTorch dataset. samples_per_gpu (int): Number of training samples on each GPU, i.e., batch size of each GPU. workers_per_gpu (int): How many subprocesses to use for data loading for each GPU. num_gpus (int): Number of GPUs. Only used in non-distributed training. dist (bool): Distributed training/test or not. Default: True. shuffle (bool): Whether to shuffle the data at every epoch. Default: True. kwargs: any keyword argument to be used to initialize DataLoader Returns: DataLoader: A PyTorch dataloader. """ rank, world_size = get_dist_info() if dist: # DistributedGroupSampler will definitely shuffle the data to satisfy # that images on each GPU are in the same group if shuffle: sampler = DistributedGroupSampler( dataset, samples_per_gpu, world_size, rank, seed=seed) else: sampler = DistributedSampler( dataset, world_size, rank, shuffle=False, seed=seed) batch_size = samples_per_gpu num_workers = workers_per_gpu else: sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None batch_size = num_gpus * samples_per_gpu num_workers = num_gpus * workers_per_gpu init_fn = partial( worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if seed is not None else None data_loader = DataLoader( dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), pin_memory=False, worker_init_fn=init_fn, **kwargs) return data_loader def worker_init_fn(worker_id, num_workers, rank, seed): # The seed of each worker equals to # num_worker * rank + worker_id + user_seed worker_seed = num_workers * rank + worker_id + seed np.random.seed(worker_seed) random.seed(worker_seed)
5,629
36.284768
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/coco_panoptic.py
# Copyright (c) OpenMMLab. All rights reserved. import itertools import os from collections import defaultdict import mmcv import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable from .api_wrappers import COCO from .builder import DATASETS from .coco import CocoDataset try: import panopticapi from panopticapi.evaluation import pq_compute_multi_core, VOID from panopticapi.utils import id2rgb except ImportError: panopticapi = None pq_compute_multi_core = None id2rgb = None VOID = None __all__ = ['CocoPanopticDataset'] # A custom value to distinguish instance ID and category ID; need to # be greater than the number of categories. # For a pixel in the panoptic result map: # pan_id = ins_id * INSTANCE_OFFSET + cat_id INSTANCE_OFFSET = 1000 class COCOPanoptic(COCO): """This wrapper is for loading the panoptic style annotation file. The format is shown in the CocoPanopticDataset class. Args: annotation_file (str): Path of annotation file. """ def __init__(self, annotation_file=None): if panopticapi is None: raise RuntimeError( 'panopticapi is not installed, please install it by: ' 'pip install git+https://github.com/cocodataset/' 'panopticapi.git.') super(COCOPanoptic, self).__init__(annotation_file) def createIndex(self): # create index print('creating index...') # anns stores 'segment_id -> annotation' anns, cats, imgs = {}, {}, {} img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list) if 'annotations' in self.dataset: for ann, img_info in zip(self.dataset['annotations'], self.dataset['images']): img_info['segm_file'] = ann['file_name'] for seg_ann in ann['segments_info']: # to match with instance.json seg_ann['image_id'] = ann['image_id'] seg_ann['height'] = img_info['height'] seg_ann['width'] = img_info['width'] img_to_anns[ann['image_id']].append(seg_ann) # segment_id is not unique in coco dataset orz... if seg_ann['id'] in anns.keys(): anns[seg_ann['id']].append(seg_ann) else: anns[seg_ann['id']] = [seg_ann] if 'images' in self.dataset: for img in self.dataset['images']: imgs[img['id']] = img if 'categories' in self.dataset: for cat in self.dataset['categories']: cats[cat['id']] = cat if 'annotations' in self.dataset and 'categories' in self.dataset: for ann in self.dataset['annotations']: for seg_ann in ann['segments_info']: cat_to_imgs[seg_ann['category_id']].append(ann['image_id']) print('index created!') self.anns = anns self.imgToAnns = img_to_anns self.catToImgs = cat_to_imgs self.imgs = imgs self.cats = cats def load_anns(self, ids=[]): """Load anns with the specified ids. self.anns is a list of annotation lists instead of a list of annotations. Args: ids (int array): integer ids specifying anns Returns: anns (object array): loaded ann objects """ anns = [] if hasattr(ids, '__iter__') and hasattr(ids, '__len__'): # self.anns is a list of annotation lists instead of # a list of annotations for id in ids: anns += self.anns[id] return anns elif type(ids) == int: return self.anns[ids] @DATASETS.register_module() class CocoPanopticDataset(CocoDataset): """Coco dataset for Panoptic segmentation. The annotation format is shown as follows. The `ann` field is optional for testing. .. code-block:: none [ { 'filename': f'{image_id:012}.png', 'image_id':9 'segments_info': { [ { 'id': 8345037, (segment_id in panoptic png, convert from rgb) 'category_id': 51, 'iscrowd': 0, 'bbox': (x1, y1, w, h), 'area': 24315, 'segmentation': list,(coded mask) }, ... } } }, ... ] """ CLASSES = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', ' truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'window-blind', 'window-other', 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', 'cabinet-merged', 'table-merged', 'floor-other-merged', 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', 'paper-merged', 'food-other-merged', 'building-other-merged', 'rock-merged', 'wall-other-merged', 'rug-merged' ] THING_CLASSES = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' ] STUFF_CLASSES = [ 'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'window-blind', 'window-other', 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', 'cabinet-merged', 'table-merged', 'floor-other-merged', 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', 'paper-merged', 'food-other-merged', 'building-other-merged', 'rock-merged', 'wall-other-merged', 'rug-merged' ] def load_annotations(self, ann_file): """Load annotation from COCO Panoptic style annotation file. Args: ann_file (str): Path of annotation file. Returns: list[dict]: Annotation info from COCO api. """ self.coco = COCOPanoptic(ann_file) self.cat_ids = self.coco.get_cat_ids() self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.categories = self.coco.cats self.img_ids = self.coco.get_img_ids() data_infos = [] for i in self.img_ids: info = self.coco.load_imgs([i])[0] info['filename'] = info['file_name'] info['segm_file'] = info['filename'].replace('jpg', 'png') data_infos.append(info) return data_infos def get_ann_info(self, idx): """Get COCO annotation by index. Args: idx (int): Index of data. Returns: dict: Annotation info of specified index. """ img_id = self.data_infos[idx]['id'] ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) ann_info = self.coco.load_anns(ann_ids) # filter out unmatched images ann_info = [i for i in ann_info if i['image_id'] == img_id] return self._parse_ann_info(self.data_infos[idx], ann_info) def _parse_ann_info(self, img_info, ann_info): """Parse annotations and load panoptic ground truths. Args: img_info (int): Image info of an image. ann_info (list[dict]): Annotation info of an image. Returns: dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, seg_map. """ gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] gt_mask_infos = [] for i, ann in enumerate(ann_info): x1, y1, w, h = ann['bbox'] if ann['area'] <= 0 or w < 1 or h < 1: continue bbox = [x1, y1, x1 + w, y1 + h] category_id = ann['category_id'] contiguous_cat_id = self.cat2label[category_id] is_thing = self.coco.load_cats(ids=category_id)[0]['isthing'] if is_thing: is_crowd = ann.get('iscrowd', False) if not is_crowd: gt_bboxes.append(bbox) gt_labels.append(contiguous_cat_id) else: gt_bboxes_ignore.append(bbox) is_thing = False mask_info = { 'id': ann['id'], 'category': contiguous_cat_id, 'is_thing': is_thing } gt_mask_infos.append(mask_info) if gt_bboxes: gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) else: gt_bboxes = np.zeros((0, 4), dtype=np.float32) gt_labels = np.array([], dtype=np.int64) if gt_bboxes_ignore: gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_mask_infos, seg_map=img_info['segm_file']) return ann def _filter_imgs(self, min_size=32): """Filter images too small or without ground truths.""" ids_with_ann = [] # check whether images have legal thing annotations. for lists in self.coco.anns.values(): for item in lists: category_id = item['category_id'] is_thing = self.coco.load_cats(ids=category_id)[0]['isthing'] if not is_thing: continue ids_with_ann.append(item['image_id']) ids_with_ann = set(ids_with_ann) valid_inds = [] valid_img_ids = [] for i, img_info in enumerate(self.data_infos): img_id = self.img_ids[i] if self.filter_empty_gt and img_id not in ids_with_ann: continue if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) valid_img_ids.append(img_id) self.img_ids = valid_img_ids return valid_inds def _pan2json(self, results, outfile_prefix): """Convert panoptic results to COCO panoptic json style.""" label2cat = dict((v, k) for (k, v) in self.cat2label.items()) pred_annotations = [] outdir = os.path.join(os.path.dirname(outfile_prefix), 'panoptic') for idx in range(len(self)): img_id = self.img_ids[idx] segm_file = self.data_infos[idx]['segm_file'] pan = results[idx] pan_labels = np.unique(pan) segm_info = [] for pan_label in pan_labels: sem_label = pan_label % INSTANCE_OFFSET # We reserve the length of self.CLASSES for VOID label if sem_label == len(self.CLASSES): continue # convert sem_label to json label cat_id = label2cat[sem_label] is_thing = self.categories[cat_id]['isthing'] mask = pan == pan_label area = mask.sum() segm_info.append({ 'id': int(pan_label), 'category_id': cat_id, 'isthing': is_thing, 'area': int(area) }) # evaluation script uses 0 for VOID label. pan[pan % INSTANCE_OFFSET == len(self.CLASSES)] = VOID pan = id2rgb(pan).astype(np.uint8) mmcv.imwrite(pan[:, :, ::-1], os.path.join(outdir, segm_file)) record = { 'image_id': img_id, 'segments_info': segm_info, 'file_name': segm_file } pred_annotations.append(record) pan_json_results = dict(annotations=pred_annotations) return pan_json_results def results2json(self, results, outfile_prefix): """Dump the panoptic results to a COCO panoptic style json file. Args: results (dict): Testing results of the dataset. outfile_prefix (str): The filename prefix of the json files. If the prefix is "somepath/xxx", the json files will be named "somepath/xxx.panoptic.json" Returns: dict[str: str]: The key is 'panoptic' and the value is corresponding filename. """ result_files = dict() pan_results = [result['pan_results'] for result in results] pan_json_results = self._pan2json(pan_results, outfile_prefix) result_files['panoptic'] = f'{outfile_prefix}.panoptic.json' mmcv.dump(pan_json_results, result_files['panoptic']) return result_files def evaluate_pan_json(self, result_files, outfile_prefix, logger=None, classwise=False): """Evaluate PQ according to the panoptic results json file.""" imgs = self.coco.imgs gt_json = self.coco.img_ann_map # image to annotations gt_json = [{ 'image_id': k, 'segments_info': v, 'file_name': imgs[k]['segm_file'] } for k, v in gt_json.items()] pred_json = mmcv.load(result_files['panoptic']) pred_json = dict( (el['image_id'], el) for el in pred_json['annotations']) # match the gt_anns and pred_anns in the same image matched_annotations_list = [] for gt_ann in gt_json: img_id = gt_ann['image_id'] if img_id not in pred_json.keys(): raise Exception('no prediction for the image' ' with id: {}'.format(img_id)) matched_annotations_list.append((gt_ann, pred_json[img_id])) gt_folder = self.seg_prefix pred_folder = os.path.join(os.path.dirname(outfile_prefix), 'panoptic') pq_stat = pq_compute_multi_core(matched_annotations_list, gt_folder, pred_folder, self.categories) metrics = [('All', None), ('Things', True), ('Stuff', False)] pq_results = {} for name, isthing in metrics: pq_results[name], classwise_results = pq_stat.pq_average( self.categories, isthing=isthing) if name == 'All': pq_results['classwise'] = classwise_results classwise_results = None if classwise: classwise_results = { k: v for k, v in zip(self.CLASSES, pq_results['classwise'].values()) } print_panoptic_table(pq_results, classwise_results, logger=logger) return parse_pq_results(pq_results) def evaluate(self, results, metric='PQ', logger=None, jsonfile_prefix=None, classwise=False, **kwargs): """Evaluation in COCO Panoptic protocol. Args: results (list[dict]): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. Only support 'PQ' at present. 'pq' will be regarded as 'PQ. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. classwise (bool): Whether to print classwise evaluation results. Default: False. Returns: dict[str, float]: COCO Panoptic style evaluation metric. """ metrics = metric if isinstance(metric, list) else [metric] # Compatible with lowercase 'pq' metrics = ['PQ' if metric == 'pq' else metric for metric in metrics] allowed_metrics = ['PQ'] # todo: support other metrics like 'bbox' for metric in metrics: if metric not in allowed_metrics: raise KeyError(f'metric {metric} is not supported') result_files, tmp_dir = self.format_results(results, jsonfile_prefix) eval_results = {} outfile_prefix = os.path.join(tmp_dir.name, 'results') \ if tmp_dir is not None else jsonfile_prefix if 'PQ' in metrics: eval_pan_results = self.evaluate_pan_json(result_files, outfile_prefix, logger, classwise) eval_results.update(eval_pan_results) if tmp_dir is not None: tmp_dir.cleanup() return eval_results def parse_pq_results(pq_results): """Parse the Panoptic Quality results.""" result = dict() result['PQ'] = 100 * pq_results['All']['pq'] result['SQ'] = 100 * pq_results['All']['sq'] result['RQ'] = 100 * pq_results['All']['rq'] result['PQ_th'] = 100 * pq_results['Things']['pq'] result['SQ_th'] = 100 * pq_results['Things']['sq'] result['RQ_th'] = 100 * pq_results['Things']['rq'] result['PQ_st'] = 100 * pq_results['Stuff']['pq'] result['SQ_st'] = 100 * pq_results['Stuff']['sq'] result['RQ_st'] = 100 * pq_results['Stuff']['rq'] return result def print_panoptic_table(pq_results, classwise_results=None, logger=None): """Print the panoptic evaluation results table. Args: pq_results(dict): The Panoptic Quality results. classwise_results(dict | None): The classwise Panoptic Quality results. The keys are class names and the values are metrics. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. """ headers = ['', 'PQ', 'SQ', 'RQ', 'categories'] data = [headers] for name in ['All', 'Things', 'Stuff']: numbers = [ f'{(pq_results[name][k] * 100):0.3f}' for k in ['pq', 'sq', 'rq'] ] row = [name] + numbers + [pq_results[name]['n']] data.append(row) table = AsciiTable(data) print_log('Panoptic Evaluation Results:\n' + table.table, logger=logger) if classwise_results is not None: class_metrics = [(name, ) + tuple(f'{(metrics[k] * 100):0.3f}' for k in ['pq', 'sq', 'rq']) for name, metrics in classwise_results.items()] num_columns = min(8, len(class_metrics) * 4) results_flatten = list(itertools.chain(*class_metrics)) headers = ['category', 'PQ', 'SQ', 'RQ'] * (num_columns // 4) results_2d = itertools.zip_longest( *[results_flatten[i::num_columns] for i in range(num_columns)]) data = [headers] data += [result for result in results_2d] table = AsciiTable(data) print_log( 'Classwise Panoptic Evaluation Results:\n' + table.table, logger=logger)
21,817
39.033028
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/coco.py
# Copyright (c) OpenMMLab. All rights reserved. import itertools import logging import os.path as osp import tempfile import warnings from collections import OrderedDict import mmcv import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable from mmdet.core import eval_recalls from .api_wrappers import COCO, COCOeval from .builder import DATASETS from .custom import CustomDataset import ipdb @DATASETS.register_module() class CocoDataset(CustomDataset): CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush') def load_annotations(self, ann_file): """Load annotation from COCO style annotation file. Args: ann_file (str): Path of annotation file. Returns: list[dict]: Annotation info from COCO api. """ self.coco = COCO(ann_file) # The order of returned `cat_ids` will not # change with the order of the CLASSES self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES) self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.img_ids = self.coco.get_img_ids() data_infos = [] total_ann_ids = [] for i in self.img_ids: info = self.coco.load_imgs([i])[0] info['filename'] = info['file_name'] data_infos.append(info) ann_ids = self.coco.get_ann_ids(img_ids=[i]) total_ann_ids.extend(ann_ids) assert len(set(total_ann_ids)) == len( total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!" return data_infos def get_ann_info(self, idx): """Get COCO annotation by index. Args: idx (int): Index of data. Returns: dict: Annotation info of specified index. """ img_id = self.data_infos[idx]['id'] ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) ann_info = self.coco.load_anns(ann_ids) return self._parse_ann_info(self.data_infos[idx], ann_info) def get_cat_ids(self, idx): """Get COCO category ids by index. Args: idx (int): Index of data. Returns: list[int]: All categories in the image of specified index. """ img_id = self.data_infos[idx]['id'] ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) ann_info = self.coco.load_anns(ann_ids) return [ann['category_id'] for ann in ann_info] def _filter_imgs(self, min_size=32): """Filter images too small or without ground truths.""" valid_inds = [] # obtain images that contain annotation ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) # obtain images that contain annotations of the required categories ids_in_cat = set() for i, class_id in enumerate(self.cat_ids): ids_in_cat |= set(self.coco.cat_img_map[class_id]) # merge the image id sets of the two conditions and use the merged set # to filter out images if self.filter_empty_gt=True ids_in_cat &= ids_with_ann valid_img_ids = [] for i, img_info in enumerate(self.data_infos): img_id = self.img_ids[i] if self.filter_empty_gt and img_id not in ids_in_cat: continue if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) valid_img_ids.append(img_id) self.img_ids = valid_img_ids return valid_inds def _parse_ann_info(self, img_info, ann_info): """Parse bbox and mask annotation. Args: ann_info (list[dict]): Annotation info of an image. with_mask (bool): Whether to parse mask annotations. Returns: dict: A dict containing the following keys: bboxes, bboxes_ignore,\ labels, masks, seg_map. "masks" are raw annotations and not \ decoded into binary masks. """ gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] gt_masks_ann = [] for i, ann in enumerate(ann_info): if ann.get('ignore', False): continue x1, y1, w, h = ann['bbox'] inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0)) inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0)) if inter_w * inter_h == 0: continue if ann['area'] <= 0 or w < 1 or h < 1: continue if ann['category_id'] not in self.cat_ids: continue bbox = [x1, y1, x1 + w, y1 + h] if ann.get('iscrowd', False): gt_bboxes_ignore.append(bbox) else: gt_bboxes.append(bbox) gt_labels.append(self.cat2label[ann['category_id']]) gt_masks_ann.append(ann.get('segmentation', None)) if gt_bboxes: gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) else: gt_bboxes = np.zeros((0, 4), dtype=np.float32) gt_labels = np.array([], dtype=np.int64) if gt_bboxes_ignore: gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) seg_map = img_info['filename'].replace('jpg', 'png') ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=seg_map) return ann def xyxy2xywh(self, bbox): """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO evaluation. Args: bbox (numpy.ndarray): The bounding boxes, shape (4, ), in ``xyxy`` order. Returns: list[float]: The converted bounding boxes, in ``xywh`` order. """ _bbox = bbox.tolist() return [ _bbox[0], _bbox[1], _bbox[2] - _bbox[0], _bbox[3] - _bbox[1], ] def _proposal2json(self, results): """Convert proposal results to COCO json style.""" json_results = [] for idx in range(len(self)): img_id = self.img_ids[idx] bboxes = results[idx] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = 1 json_results.append(data) return json_results def _det2json(self, results): """Convert detection results to COCO json style.""" json_results = [] for idx in range(len(self)): img_id = self.img_ids[idx] result = results[idx] for label in range(len(result)): bboxes = result[label] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = self.cat_ids[label] json_results.append(data) return json_results def _segm2json(self, results): """Convert instance segmentation results to COCO json style.""" bbox_json_results = [] segm_json_results = [] for idx in range(len(self)): img_id = self.img_ids[idx] det, seg = results[idx] for label in range(len(det)): # bbox results bboxes = det[label] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = self.cat_ids[label] bbox_json_results.append(data) # segm results # some detectors use different scores for bbox and mask if isinstance(seg, tuple): segms = seg[0][label] mask_score = seg[1][label] else: segms = seg[label] mask_score = [bbox[4] for bbox in bboxes] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(mask_score[i]) data['category_id'] = self.cat_ids[label] if isinstance(segms[i]['counts'], bytes): segms[i]['counts'] = segms[i]['counts'].decode() data['segmentation'] = segms[i] segm_json_results.append(data) return bbox_json_results, segm_json_results def results2json(self, results, outfile_prefix): """Dump the detection results to a COCO style json file. There are 3 types of results: proposals, bbox predictions, mask predictions, and they have different data types. This method will automatically recognize the type, and dump them to json files. Args: results (list[list | tuple | ndarray]): Testing results of the dataset. outfile_prefix (str): The filename prefix of the json files. If the prefix is "somepath/xxx", the json files will be named "somepath/xxx.bbox.json", "somepath/xxx.segm.json", "somepath/xxx.proposal.json". Returns: dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \ values are corresponding filenames. """ result_files = dict() if isinstance(results[0], list): json_results = self._det2json(results) result_files['bbox'] = f'{outfile_prefix}.bbox.json' result_files['proposal'] = f'{outfile_prefix}.bbox.json' mmcv.dump(json_results, result_files['bbox']) elif isinstance(results[0], tuple): json_results = self._segm2json(results) result_files['bbox'] = f'{outfile_prefix}.bbox.json' result_files['proposal'] = f'{outfile_prefix}.bbox.json' result_files['segm'] = f'{outfile_prefix}.segm.json' mmcv.dump(json_results[0], result_files['bbox']) mmcv.dump(json_results[1], result_files['segm']) elif isinstance(results[0], np.ndarray): json_results = self._proposal2json(results) result_files['proposal'] = f'{outfile_prefix}.proposal.json' mmcv.dump(json_results, result_files['proposal']) else: raise TypeError('invalid type of results') return result_files def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None): gt_bboxes = [] for i in range(len(self.img_ids)): ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i]) ann_info = self.coco.load_anns(ann_ids) if len(ann_info) == 0: gt_bboxes.append(np.zeros((0, 4))) continue bboxes = [] for ann in ann_info: if ann.get('ignore', False) or ann['iscrowd']: continue x1, y1, w, h = ann['bbox'] bboxes.append([x1, y1, x1 + w, y1 + h]) bboxes = np.array(bboxes, dtype=np.float32) if bboxes.shape[0] == 0: bboxes = np.zeros((0, 4)) gt_bboxes.append(bboxes) recalls = eval_recalls( gt_bboxes, results, proposal_nums, iou_thrs, logger=logger) ar = recalls.mean(axis=1) return ar def format_results(self, results, jsonfile_prefix=None, **kwargs): """Format the results to json (standard format for COCO evaluation). Args: results (list[tuple | numpy.ndarray]): Testing results of the dataset. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. Returns: tuple: (result_files, tmp_dir), result_files is a dict containing \ the json filepaths, tmp_dir is the temporal directory created \ for saving json files when jsonfile_prefix is not specified. """ assert isinstance(results, list), 'results must be a list' assert len(results) == len(self), ( 'The length of results is not equal to the dataset len: {} != {}'. format(len(results), len(self))) if jsonfile_prefix is None: tmp_dir = tempfile.TemporaryDirectory() jsonfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = self.results2json(results, jsonfile_prefix) return result_files, tmp_dir def evaluate(self, results, metric='bbox', logger=None, jsonfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=None, metric_items=None): """Evaluation in COCO protocol. Args: results (list[list | tuple]): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. Options are 'bbox', 'segm', 'proposal', 'proposal_fast'. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. classwise (bool): Whether to evaluating the AP for each class. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thrs (Sequence[float], optional): IoU threshold used for evaluating recalls/mAPs. If set to a list, the average of all IoUs will also be computed. If not specified, [0.50, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. Default: None. metric_items (list[str] | str, optional): Metric items that will be returned. If not specified, ``['AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when ``metric=='bbox' or metric=='segm'``. Returns: dict[str, float]: COCO style evaluation metric. """ metrics = metric if isinstance(metric, list) else [metric] allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] for metric in metrics: if metric not in allowed_metrics: raise KeyError(f'metric {metric} is not supported') if iou_thrs is None: iou_thrs = np.linspace( .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) if metric_items is not None: if not isinstance(metric_items, list): metric_items = [metric_items] result_files, tmp_dir = self.format_results(results, jsonfile_prefix) eval_results = OrderedDict() cocoGt = self.coco for metric in metrics: msg = f'Evaluating {metric}...' if logger is None: msg = '\n' + msg print_log(msg, logger=logger) if metric == 'proposal_fast': ar = self.fast_eval_recall( results, proposal_nums, iou_thrs, logger='silent') log_msg = [] for i, num in enumerate(proposal_nums): eval_results[f'AR@{num}'] = ar[i] log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}') log_msg = ''.join(log_msg) print_log(log_msg, logger=logger) continue iou_type = 'bbox' if metric == 'proposal' else metric if metric not in result_files: raise KeyError(f'{metric} is not in results') try: predictions = mmcv.load(result_files[metric]) if iou_type == 'segm': # Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa # When evaluating mask AP, if the results contain bbox, # cocoapi will use the box area instead of the mask area # for calculating the instance area. Though the overall AP # is not affected, this leads to different # small/medium/large mask AP results. for x in predictions: x.pop('bbox') warnings.simplefilter('once') warnings.warn( 'The key "bbox" is deleted for more accurate mask AP ' 'of small/medium/large instances since v2.12.0. This ' 'does not change the overall mAP calculation.', UserWarning) cocoDt = cocoGt.loadRes(predictions) except IndexError: print_log( 'The testing results of the whole dataset is empty.', logger=logger, level=logging.ERROR) break cocoEval = COCOeval(cocoGt, cocoDt, iou_type) cocoEval.params.catIds = self.cat_ids cocoEval.params.imgIds = self.img_ids cocoEval.params.maxDets = list(proposal_nums) cocoEval.params.iouThrs = iou_thrs # mapping of cocoEval.stats coco_metric_names = { 'mAP': 0, 'mAP_50': 1, 'mAP_75': 2, 'mAP_s': 3, 'mAP_m': 4, 'mAP_l': 5, 'AR@100': 6, 'AR@300': 7, 'AR@1000': 8, 'AR_s@1000': 9, 'AR_m@1000': 10, 'AR_l@1000': 11 } if metric_items is not None: for metric_item in metric_items: if metric_item not in coco_metric_names: raise KeyError( f'metric item {metric_item} is not supported') if metric == 'proposal': cocoEval.params.useCats = 0 cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if metric_items is None: metric_items = [ 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ] for item in metric_items: val = float( f'{cocoEval.stats[coco_metric_names[item]]:.3f}') eval_results[item] = val else: cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if classwise: # Compute per-category AP # Compute per-category AP # from https://github.com/facebookresearch/detectron2/ precisions = cocoEval.eval['precision'] # precision: (iou, recall, cls, area range, max dets) assert len(self.cat_ids) == precisions.shape[2] results_per_category = [] for idx, catId in enumerate(self.cat_ids): # area range index 0: all area ranges # max dets index -1: typically 100 per image nm = self.coco.loadCats(catId)[0] precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] if precision.size: ap = np.mean(precision) else: ap = float('nan') results_per_category.append( (f'{nm["name"]}', f'{float(ap):0.3f}')) num_columns = min(6, len(results_per_category) * 2) results_flatten = list( itertools.chain(*results_per_category)) headers = ['category', 'AP'] * (num_columns // 2) results_2d = itertools.zip_longest(*[ results_flatten[i::num_columns] for i in range(num_columns) ]) table_data = [headers] table_data += [result for result in results_2d] table = AsciiTable(table_data) print_log('\n' + table.table, logger=logger) if metric_items is None: metric_items = [ 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l' ] for metric_item in metric_items: key = f'{metric}_{metric_item}' val = float( f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}' ) eval_results[key] = val ap = cocoEval.stats[:6] eval_results[f'{metric}_mAP_copypaste'] = ( f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} ' f'{ap[4]:.3f} {ap[5]:.3f}') if tmp_dir is not None: tmp_dir.cleanup() return eval_results
23,532
40.948307
124
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/wider_face.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import xml.etree.ElementTree as ET import mmcv from .builder import DATASETS from .xml_style import XMLDataset @DATASETS.register_module() class WIDERFaceDataset(XMLDataset): """Reader for the WIDER Face dataset in PASCAL VOC format. Conversion scripts can be found in https://github.com/sovrasov/wider-face-pascal-voc-annotations """ CLASSES = ('face', ) def __init__(self, **kwargs): super(WIDERFaceDataset, self).__init__(**kwargs) def load_annotations(self, ann_file): """Load annotation from WIDERFace XML style annotation file. Args: ann_file (str): Path of XML file. Returns: list[dict]: Annotation info from XML file. """ data_infos = [] img_ids = mmcv.list_from_file(ann_file) for img_id in img_ids: filename = f'{img_id}.jpg' xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml') tree = ET.parse(xml_path) root = tree.getroot() size = root.find('size') width = int(size.find('width').text) height = int(size.find('height').text) folder = root.find('folder').text data_infos.append( dict( id=img_id, filename=osp.join(folder, filename), width=width, height=height)) return data_infos
1,549
28.245283
68
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/api_wrappers/coco_api.py
# Copyright (c) OpenMMLab. All rights reserved. # This file add snake case alias for coco api import warnings import pycocotools from pycocotools.coco import COCO as _COCO from pycocotools.cocoeval import COCOeval as _COCOeval # for analysis use # import thirdparty.pycocotools as pycocotools # from thirdparty.pycocotools.coco import COCO as _COCO # from thirdparty.pycocotools.cocoeval import COCOeval as _COCOeval class COCO(_COCO): """This class is almost the same as official pycocotools package. It implements some snake case function aliases. So that the COCO class has the same interface as LVIS class. """ def __init__(self, annotation_file=None): if getattr(pycocotools, '__version__', '0') >= '12.0.2': warnings.warn( 'mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', # noqa: E501 UserWarning) super().__init__(annotation_file=annotation_file) self.img_ann_map = self.imgToAnns self.cat_img_map = self.catToImgs def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None): return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd) def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]): return self.getCatIds(cat_names, sup_names, cat_ids) def get_img_ids(self, img_ids=[], cat_ids=[]): return self.getImgIds(img_ids, cat_ids) def load_anns(self, ids): return self.loadAnns(ids) def load_cats(self, ids): return self.loadCats(ids) def load_imgs(self, ids): return self.loadImgs(ids) # just for the ease of import COCOeval = _COCOeval
1,697
31.037736
126
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/api_wrappers/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .coco_api import COCO, COCOeval __all__ = ['COCO', 'COCOeval']
117
22.6
47
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/samplers/group_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. import math import numpy as np import torch from mmcv.runner import get_dist_info from torch.utils.data import Sampler class GroupSampler(Sampler): def __init__(self, dataset, samples_per_gpu=1): assert hasattr(dataset, 'flag') self.dataset = dataset self.samples_per_gpu = samples_per_gpu self.flag = dataset.flag.astype(np.int64) self.group_sizes = np.bincount(self.flag) self.num_samples = 0 for i, size in enumerate(self.group_sizes): self.num_samples += int(np.ceil( size / self.samples_per_gpu)) * self.samples_per_gpu def __iter__(self): indices = [] for i, size in enumerate(self.group_sizes): if size == 0: continue indice = np.where(self.flag == i)[0] assert len(indice) == size np.random.shuffle(indice) num_extra = int(np.ceil(size / self.samples_per_gpu) ) * self.samples_per_gpu - len(indice) indice = np.concatenate( [indice, np.random.choice(indice, num_extra)]) indices.append(indice) indices = np.concatenate(indices) indices = [ indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu] for i in np.random.permutation( range(len(indices) // self.samples_per_gpu)) ] indices = np.concatenate(indices) indices = indices.astype(np.int64).tolist() assert len(indices) == self.num_samples return iter(indices) def __len__(self): return self.num_samples class DistributedGroupSampler(Sampler): """Sampler that restricts data loading to a subset of the dataset. It is especially useful in conjunction with :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each process can pass a DistributedSampler instance as a DataLoader sampler, and load a subset of the original dataset that is exclusive to it. .. note:: Dataset is assumed to be of constant size. Arguments: dataset: Dataset used for sampling. num_replicas (optional): Number of processes participating in distributed training. rank (optional): Rank of the current process within num_replicas. seed (int, optional): random seed used to shuffle the sampler if ``shuffle=True``. This number should be identical across all processes in the distributed group. Default: 0. """ def __init__(self, dataset, samples_per_gpu=1, num_replicas=None, rank=None, seed=0): _rank, _num_replicas = get_dist_info() if num_replicas is None: num_replicas = _num_replicas if rank is None: rank = _rank self.dataset = dataset self.samples_per_gpu = samples_per_gpu self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.seed = seed if seed is not None else 0 assert hasattr(self.dataset, 'flag') self.flag = self.dataset.flag self.group_sizes = np.bincount(self.flag) self.num_samples = 0 for i, j in enumerate(self.group_sizes): self.num_samples += int( math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu / self.num_replicas)) * self.samples_per_gpu self.total_size = self.num_samples * self.num_replicas def __iter__(self): # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch + self.seed) indices = [] for i, size in enumerate(self.group_sizes): if size > 0: indice = np.where(self.flag == i)[0] assert len(indice) == size # add .numpy() to avoid bug when selecting indice in parrots. # TODO: check whether torch.randperm() can be replaced by # numpy.random.permutation(). indice = indice[list( torch.randperm(int(size), generator=g).numpy())].tolist() extra = int( math.ceil( size * 1.0 / self.samples_per_gpu / self.num_replicas) ) * self.samples_per_gpu * self.num_replicas - len(indice) # pad indice tmp = indice.copy() for _ in range(extra // size): indice.extend(tmp) indice.extend(tmp[:extra % size]) indices.extend(indice) assert len(indices) == self.total_size indices = [ indices[j] for i in list( torch.randperm( len(indices) // self.samples_per_gpu, generator=g)) for j in range(i * self.samples_per_gpu, (i + 1) * self.samples_per_gpu) ] # subsample offset = self.num_samples * self.rank indices = indices[offset:offset + self.num_samples] assert len(indices) == self.num_samples return iter(indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch
5,384
35.14094
78
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/samplers/distributed_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. import math import torch from torch.utils.data import DistributedSampler as _DistributedSampler class DistributedSampler(_DistributedSampler): def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, seed=0): super().__init__( dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) # for the compatibility from PyTorch 1.3+ self.seed = seed if seed is not None else 0 def __iter__(self): # deterministically shuffle based on epoch if self.shuffle: g = torch.Generator() g.manual_seed(self.epoch + self.seed) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() # add extra samples to make it evenly divisible # in case that indices is shorter than half of total_size indices = (indices * math.ceil(self.total_size / len(indices)))[:self.total_size] assert len(indices) == self.total_size # subsample indices = indices[self.rank:self.total_size:self.num_replicas] assert len(indices) == self.num_samples return iter(indices)
1,358
32.146341
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/samplers/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .distributed_sampler import DistributedSampler from .group_sampler import DistributedGroupSampler, GroupSampler __all__ = ['DistributedSampler', 'DistributedGroupSampler', 'GroupSampler']
242
39.5
75
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/pipelines/loading.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import mmcv import numpy as np import pycocotools.mask as maskUtils from mmdet.core import BitmapMasks, PolygonMasks from ..builder import PIPELINES try: from panopticapi.utils import rgb2id except ImportError: rgb2id = None @PIPELINES.register_module() class LoadImageFromFile: """Load an image from file. Required keys are "img_prefix" and "img_info" (a dict that must contain the key "filename"). Added or updated keys are "filename", "img", "img_shape", "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). Args: to_float32 (bool): Whether to convert the loaded image to a float32 numpy array. If set to False, the loaded image is an uint8 array. Defaults to False. color_type (str): The flag argument for :func:`mmcv.imfrombytes`. Defaults to 'color'. file_client_args (dict): Arguments to instantiate a FileClient. See :class:`mmcv.fileio.FileClient` for details. Defaults to ``dict(backend='disk')``. """ def __init__(self, to_float32=False, color_type='color', file_client_args=dict(backend='disk')): self.to_float32 = to_float32 self.color_type = color_type self.file_client_args = file_client_args.copy() self.file_client = None def __call__(self, results): """Call functions to load image and get image meta information. Args: results (dict): Result dict from :obj:`mmdet.CustomDataset`. Returns: dict: The dict contains loaded image and meta information. """ if self.file_client is None: self.file_client = mmcv.FileClient(**self.file_client_args) if results['img_prefix'] is not None: filename = osp.join(results['img_prefix'], results['img_info']['filename']) else: filename = results['img_info']['filename'] img_bytes = self.file_client.get(filename) img = mmcv.imfrombytes(img_bytes, flag=self.color_type) if self.to_float32: img = img.astype(np.float32) results['filename'] = filename results['ori_filename'] = results['img_info']['filename'] results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape results['img_fields'] = ['img'] return results def __repr__(self): repr_str = (f'{self.__class__.__name__}(' f'to_float32={self.to_float32}, ' f"color_type='{self.color_type}', " f'file_client_args={self.file_client_args})') return repr_str @PIPELINES.register_module() class LoadImageFromWebcam(LoadImageFromFile): """Load an image from webcam. Similar with :obj:`LoadImageFromFile`, but the image read from webcam is in ``results['img']``. """ def __call__(self, results): """Call functions to add image meta information. Args: results (dict): Result dict with Webcam read image in ``results['img']``. Returns: dict: The dict contains loaded image and meta information. """ img = results['img'] if self.to_float32: img = img.astype(np.float32) results['filename'] = None results['ori_filename'] = None results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape results['img_fields'] = ['img'] return results @PIPELINES.register_module() class LoadMultiChannelImageFromFiles: """Load multi-channel images from a list of separate channel files. Required keys are "img_prefix" and "img_info" (a dict that must contain the key "filename", which is expected to be a list of filenames). Added or updated keys are "filename", "img", "img_shape", "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). Args: to_float32 (bool): Whether to convert the loaded image to a float32 numpy array. If set to False, the loaded image is an uint8 array. Defaults to False. color_type (str): The flag argument for :func:`mmcv.imfrombytes`. Defaults to 'color'. file_client_args (dict): Arguments to instantiate a FileClient. See :class:`mmcv.fileio.FileClient` for details. Defaults to ``dict(backend='disk')``. """ def __init__(self, to_float32=False, color_type='unchanged', file_client_args=dict(backend='disk')): self.to_float32 = to_float32 self.color_type = color_type self.file_client_args = file_client_args.copy() self.file_client = None def __call__(self, results): """Call functions to load multiple images and get images meta information. Args: results (dict): Result dict from :obj:`mmdet.CustomDataset`. Returns: dict: The dict contains loaded images and meta information. """ if self.file_client is None: self.file_client = mmcv.FileClient(**self.file_client_args) if results['img_prefix'] is not None: filename = [ osp.join(results['img_prefix'], fname) for fname in results['img_info']['filename'] ] else: filename = results['img_info']['filename'] img = [] for name in filename: img_bytes = self.file_client.get(name) img.append(mmcv.imfrombytes(img_bytes, flag=self.color_type)) img = np.stack(img, axis=-1) if self.to_float32: img = img.astype(np.float32) results['filename'] = filename results['ori_filename'] = results['img_info']['filename'] results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape # Set initial values for default meta_keys results['pad_shape'] = img.shape results['scale_factor'] = 1.0 num_channels = 1 if len(img.shape) < 3 else img.shape[2] results['img_norm_cfg'] = dict( mean=np.zeros(num_channels, dtype=np.float32), std=np.ones(num_channels, dtype=np.float32), to_rgb=False) return results def __repr__(self): repr_str = (f'{self.__class__.__name__}(' f'to_float32={self.to_float32}, ' f"color_type='{self.color_type}', " f'file_client_args={self.file_client_args})') return repr_str @PIPELINES.register_module() class LoadAnnotations: """Load multiple types of annotations. Args: with_bbox (bool): Whether to parse and load the bbox annotation. Default: True. with_label (bool): Whether to parse and load the label annotation. Default: True. with_mask (bool): Whether to parse and load the mask annotation. Default: False. with_seg (bool): Whether to parse and load the semantic segmentation annotation. Default: False. poly2mask (bool): Whether to convert the instance masks from polygons to bitmaps. Default: True. file_client_args (dict): Arguments to instantiate a FileClient. See :class:`mmcv.fileio.FileClient` for details. Defaults to ``dict(backend='disk')``. """ def __init__(self, with_bbox=True, with_label=True, with_mask=False, with_seg=False, poly2mask=True, file_client_args=dict(backend='disk')): self.with_bbox = with_bbox self.with_label = with_label self.with_mask = with_mask self.with_seg = with_seg self.poly2mask = poly2mask self.file_client_args = file_client_args.copy() self.file_client = None def _load_bboxes(self, results): """Private function to load bounding box annotations. Args: results (dict): Result dict from :obj:`mmdet.CustomDataset`. Returns: dict: The dict contains loaded bounding box annotations. """ ann_info = results['ann_info'] results['gt_bboxes'] = ann_info['bboxes'].copy() gt_bboxes_ignore = ann_info.get('bboxes_ignore', None) if gt_bboxes_ignore is not None: results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy() results['bbox_fields'].append('gt_bboxes_ignore') results['bbox_fields'].append('gt_bboxes') return results def _load_labels(self, results): """Private function to load label annotations. Args: results (dict): Result dict from :obj:`mmdet.CustomDataset`. Returns: dict: The dict contains loaded label annotations. """ results['gt_labels'] = results['ann_info']['labels'].copy() return results def _poly2mask(self, mask_ann, img_h, img_w): """Private function to convert masks represented with polygon to bitmaps. Args: mask_ann (list | dict): Polygon mask annotation input. img_h (int): The height of output mask. img_w (int): The width of output mask. Returns: numpy.ndarray: The decode bitmap mask of shape (img_h, img_w). """ if isinstance(mask_ann, list): # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(mask_ann, img_h, img_w) rle = maskUtils.merge(rles) elif isinstance(mask_ann['counts'], list): # uncompressed RLE rle = maskUtils.frPyObjects(mask_ann, img_h, img_w) else: # rle rle = mask_ann mask = maskUtils.decode(rle) return mask def process_polygons(self, polygons): """Convert polygons to list of ndarray and filter invalid polygons. Args: polygons (list[list]): Polygons of one instance. Returns: list[numpy.ndarray]: Processed polygons. """ polygons = [np.array(p) for p in polygons] valid_polygons = [] for polygon in polygons: if len(polygon) % 2 == 0 and len(polygon) >= 6: valid_polygons.append(polygon) return valid_polygons def _load_masks(self, results): """Private function to load mask annotations. Args: results (dict): Result dict from :obj:`mmdet.CustomDataset`. Returns: dict: The dict contains loaded mask annotations. If ``self.poly2mask`` is set ``True``, `gt_mask` will contain :obj:`PolygonMasks`. Otherwise, :obj:`BitmapMasks` is used. """ h, w = results['img_info']['height'], results['img_info']['width'] gt_masks = results['ann_info']['masks'] if self.poly2mask: gt_masks = BitmapMasks( [self._poly2mask(mask, h, w) for mask in gt_masks], h, w) else: gt_masks = PolygonMasks( [self.process_polygons(polygons) for polygons in gt_masks], h, w) results['gt_masks'] = gt_masks results['mask_fields'].append('gt_masks') return results def _load_semantic_seg(self, results): """Private function to load semantic segmentation annotations. Args: results (dict): Result dict from :obj:`dataset`. Returns: dict: The dict contains loaded semantic segmentation annotations. """ if self.file_client is None: self.file_client = mmcv.FileClient(**self.file_client_args) filename = osp.join(results['seg_prefix'], results['ann_info']['seg_map']) img_bytes = self.file_client.get(filename) results['gt_semantic_seg'] = mmcv.imfrombytes( img_bytes, flag='unchanged').squeeze() results['seg_fields'].append('gt_semantic_seg') return results def __call__(self, results): """Call function to load multiple types annotations. Args: results (dict): Result dict from :obj:`mmdet.CustomDataset`. Returns: dict: The dict contains loaded bounding box, label, mask and semantic segmentation annotations. """ if self.with_bbox: results = self._load_bboxes(results) if results is None: return None if self.with_label: results = self._load_labels(results) if self.with_mask: results = self._load_masks(results) if self.with_seg: results = self._load_semantic_seg(results) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(with_bbox={self.with_bbox}, ' repr_str += f'with_label={self.with_label}, ' repr_str += f'with_mask={self.with_mask}, ' repr_str += f'with_seg={self.with_seg}, ' repr_str += f'poly2mask={self.poly2mask}, ' repr_str += f'poly2mask={self.file_client_args})' return repr_str @PIPELINES.register_module() class LoadPanopticAnnotations(LoadAnnotations): """Load multiple types of panoptic annotations. Args: with_bbox (bool): Whether to parse and load the bbox annotation. Default: True. with_label (bool): Whether to parse and load the label annotation. Default: True. with_mask (bool): Whether to parse and load the mask annotation. Default: True. with_seg (bool): Whether to parse and load the semantic segmentation annotation. Default: True. file_client_args (dict): Arguments to instantiate a FileClient. See :class:`mmcv.fileio.FileClient` for details. Defaults to ``dict(backend='disk')``. """ def __init__(self, with_bbox=True, with_label=True, with_mask=True, with_seg=True, file_client_args=dict(backend='disk')): if rgb2id is None: raise RuntimeError( 'panopticapi is not installed, please install it by: ' 'pip install git+https://github.com/cocodataset/' 'panopticapi.git.') super(LoadPanopticAnnotations, self).__init__(with_bbox, with_label, with_mask, with_seg, True, file_client_args) def _load_masks_and_semantic_segs(self, results): """Private function to load mask and semantic segmentation annotations. Args: results (dict): Result dict from :obj:`mmdet.CustomDataset`. Returns: dict: The dict contains loaded mask and semantic segmentation annotations. `BitmapMasks` is used for mask annotations. """ if self.file_client is None: self.file_client = mmcv.FileClient(**self.file_client_args) filename = osp.join(results['seg_prefix'], results['ann_info']['seg_map']) img_bytes = self.file_client.get(filename) pan_png = mmcv.imfrombytes( img_bytes, flag='color', channel_order='rgb').squeeze() pan_png = rgb2id(pan_png) gt_masks = [] gt_seg = np.zeros_like(pan_png) # 0 as ignore for mask_info in results['ann_info']['masks']: mask = (pan_png == mask_info['id']) gt_seg = np.where(mask, mask_info['category'] + 1, gt_seg) # The legal thing masks if mask_info.get('is_thing'): gt_masks.append(mask.astype(np.uint8)) if self.with_mask: h, w = results['img_info']['height'], results['img_info']['width'] gt_masks = BitmapMasks(gt_masks, h, w) results['gt_masks'] = gt_masks results['mask_fields'].append('gt_masks') if self.with_seg: results['gt_semantic_seg'] = gt_seg results['seg_fields'].append('gt_semantic_seg') return results def __call__(self, results): """Call function to load multiple types panoptic annotations. Args: results (dict): Result dict from :obj:`mmdet.CustomDataset`. Returns: dict: The dict contains loaded bounding box, label, mask and semantic segmentation annotations. """ if self.with_bbox: results = self._load_bboxes(results) if results is None: return None if self.with_label: results = self._load_labels(results) if self.with_mask or self.with_seg: # The tasks completed by '_load_masks' and '_load_semantic_segs' # in LoadAnnotations are merged to one function. results = self._load_masks_and_semantic_segs(results) return results @PIPELINES.register_module() class LoadProposals: """Load proposal pipeline. Required key is "proposals". Updated keys are "proposals", "bbox_fields". Args: num_max_proposals (int, optional): Maximum number of proposals to load. If not specified, all proposals will be loaded. """ def __init__(self, num_max_proposals=None): self.num_max_proposals = num_max_proposals def __call__(self, results): """Call function to load proposals from file. Args: results (dict): Result dict from :obj:`mmdet.CustomDataset`. Returns: dict: The dict contains loaded proposal annotations. """ proposals = results['proposals'] if proposals.shape[1] not in (4, 5): raise AssertionError( 'proposals should have shapes (n, 4) or (n, 5), ' f'but found {proposals.shape}') proposals = proposals[:, :4] if self.num_max_proposals is not None: proposals = proposals[:self.num_max_proposals] if len(proposals) == 0: proposals = np.array([[0, 0, 0, 0]], dtype=np.float32) results['proposals'] = proposals results['bbox_fields'].append('proposals') return results def __repr__(self): return self.__class__.__name__ + \ f'(num_max_proposals={self.num_max_proposals})' @PIPELINES.register_module() class FilterAnnotations: """Filter invalid annotations. Args: min_gt_bbox_wh (tuple[int]): Minimum width and height of ground truth boxes. """ def __init__(self, min_gt_bbox_wh): # TODO: add more filter options self.min_gt_bbox_wh = min_gt_bbox_wh def __call__(self, results): assert 'gt_bboxes' in results gt_bboxes = results['gt_bboxes'] w = gt_bboxes[:, 2] - gt_bboxes[:, 0] h = gt_bboxes[:, 3] - gt_bboxes[:, 1] keep = (w > self.min_gt_bbox_wh[0]) & (h > self.min_gt_bbox_wh[1]) if not keep.any(): return None else: keys = ('gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg') for key in keys: if key in results: results[key] = results[key][keep] return results
19,800
33.922399
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/pipelines/instaboost.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np from ..builder import PIPELINES @PIPELINES.register_module() class InstaBoost: r"""Data augmentation method in `InstaBoost: Boosting Instance Segmentation Via Probability Map Guided Copy-Pasting <https://arxiv.org/abs/1908.07801>`_. Refer to https://github.com/GothicAi/Instaboost for implementation details. Args: action_candidate (tuple): Action candidates. "normal", "horizontal", \ "vertical", "skip" are supported. Default: ('normal', \ 'horizontal', 'skip'). action_prob (tuple): Corresponding action probabilities. Should be \ the same length as action_candidate. Default: (1, 0, 0). scale (tuple): (min scale, max scale). Default: (0.8, 1.2). dx (int): The maximum x-axis shift will be (instance width) / dx. Default 15. dy (int): The maximum y-axis shift will be (instance height) / dy. Default 15. theta (tuple): (min rotation degree, max rotation degree). \ Default: (-1, 1). color_prob (float): Probability of images for color augmentation. Default 0.5. heatmap_flag (bool): Whether to use heatmap guided. Default False. aug_ratio (float): Probability of applying this transformation. \ Default 0.5. """ def __init__(self, action_candidate=('normal', 'horizontal', 'skip'), action_prob=(1, 0, 0), scale=(0.8, 1.2), dx=15, dy=15, theta=(-1, 1), color_prob=0.5, hflag=False, aug_ratio=0.5): try: import instaboostfast as instaboost except ImportError: raise ImportError( 'Please run "pip install instaboostfast" ' 'to install instaboostfast first for instaboost augmentation.') self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob, scale, dx, dy, theta, color_prob, hflag) self.aug_ratio = aug_ratio def _load_anns(self, results): labels = results['ann_info']['labels'] masks = results['ann_info']['masks'] bboxes = results['ann_info']['bboxes'] n = len(labels) anns = [] for i in range(n): label = labels[i] bbox = bboxes[i] mask = masks[i] x1, y1, x2, y2 = bbox # assert (x2 - x1) >= 1 and (y2 - y1) >= 1 bbox = [x1, y1, x2 - x1, y2 - y1] anns.append({ 'category_id': label, 'segmentation': mask, 'bbox': bbox }) return anns def _parse_anns(self, results, anns, img): gt_bboxes = [] gt_labels = [] gt_masks_ann = [] for ann in anns: x1, y1, w, h = ann['bbox'] # TODO: more essential bug need to be fixed in instaboost if w <= 0 or h <= 0: continue bbox = [x1, y1, x1 + w, y1 + h] gt_bboxes.append(bbox) gt_labels.append(ann['category_id']) gt_masks_ann.append(ann['segmentation']) gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) results['ann_info']['labels'] = gt_labels results['ann_info']['bboxes'] = gt_bboxes results['ann_info']['masks'] = gt_masks_ann results['img'] = img return results def __call__(self, results): img = results['img'] orig_type = img.dtype anns = self._load_anns(results) if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]): try: import instaboostfast as instaboost except ImportError: raise ImportError('Please run "pip install instaboostfast" ' 'to install instaboostfast first.') anns, img = instaboost.get_new_data( anns, img.astype(np.uint8), self.cfg, background=None) results = self._parse_anns(results, anns, img.astype(orig_type)) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})' return repr_str
4,510
36.907563
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/pipelines/compose.py
# Copyright (c) OpenMMLab. All rights reserved. import collections from mmcv.utils import build_from_cfg from ..builder import PIPELINES import ipdb @PIPELINES.register_module() class Compose: """Compose multiple transforms sequentially. Args: transforms (Sequence[dict | callable]): Sequence of transform object or config dict to be composed. """ def __init__(self, transforms): assert isinstance(transforms, collections.abc.Sequence) self.transforms = [] for transform in transforms: if isinstance(transform, dict): transform = build_from_cfg(transform, PIPELINES) self.transforms.append(transform) elif callable(transform): self.transforms.append(transform) else: raise TypeError('transform must be callable or a dict') def __call__(self, data, datatet=None): """Call function to apply transforms sequentially. Args: data (dict): A result dict contains the data to transform. Returns: dict: Transformed data. """ for t in self.transforms: data = t(data) if data is None: return None return data def __repr__(self): format_string = self.__class__.__name__ + '(' for t in self.transforms: format_string += '\n' format_string += f' {t}' format_string += '\n)' return format_string
1,528
28.403846
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/pipelines/auto_augment.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import cv2 import mmcv import numpy as np from ..builder import PIPELINES from .compose import Compose _MAX_LEVEL = 10 def level_to_value(level, max_value): """Map from level to values based on max_value.""" return (level / _MAX_LEVEL) * max_value def enhance_level_to_value(level, a=1.8, b=0.1): """Map from level to values.""" return (level / _MAX_LEVEL) * a + b def random_negative(value, random_negative_prob): """Randomly negate value based on random_negative_prob.""" return -value if np.random.rand() < random_negative_prob else value def bbox2fields(): """The key correspondence from bboxes to labels, masks and segmentations.""" bbox2label = { 'gt_bboxes': 'gt_labels', 'gt_bboxes_ignore': 'gt_labels_ignore' } bbox2mask = { 'gt_bboxes': 'gt_masks', 'gt_bboxes_ignore': 'gt_masks_ignore' } bbox2seg = { 'gt_bboxes': 'gt_semantic_seg', } return bbox2label, bbox2mask, bbox2seg @PIPELINES.register_module() class AutoAugment: """Auto augmentation. This data augmentation is proposed in `Learning Data Augmentation Strategies for Object Detection <https://arxiv.org/pdf/1906.11172>`_. TODO: Implement 'Shear', 'Sharpness' and 'Rotate' transforms Args: policies (list[list[dict]]): The policies of auto augmentation. Each policy in ``policies`` is a specific augmentation policy, and is composed by several augmentations (dict). When AutoAugment is called, a random policy in ``policies`` will be selected to augment images. Examples: >>> replace = (104, 116, 124) >>> policies = [ >>> [ >>> dict(type='Sharpness', prob=0.0, level=8), >>> dict( >>> type='Shear', >>> prob=0.4, >>> level=0, >>> replace=replace, >>> axis='x') >>> ], >>> [ >>> dict( >>> type='Rotate', >>> prob=0.6, >>> level=10, >>> replace=replace), >>> dict(type='Color', prob=1.0, level=6) >>> ] >>> ] >>> augmentation = AutoAugment(policies) >>> img = np.ones(100, 100, 3) >>> gt_bboxes = np.ones(10, 4) >>> results = dict(img=img, gt_bboxes=gt_bboxes) >>> results = augmentation(results) """ def __init__(self, policies): assert isinstance(policies, list) and len(policies) > 0, \ 'Policies must be a non-empty list.' for policy in policies: assert isinstance(policy, list) and len(policy) > 0, \ 'Each policy in policies must be a non-empty list.' for augment in policy: assert isinstance(augment, dict) and 'type' in augment, \ 'Each specific augmentation must be a dict with key' \ ' "type".' self.policies = copy.deepcopy(policies) self.transforms = [Compose(policy) for policy in self.policies] def __call__(self, results): transform = np.random.choice(self.transforms) return transform(results) def __repr__(self): return f'{self.__class__.__name__}(policies={self.policies})' @PIPELINES.register_module() class Shear: """Apply Shear Transformation to image (and its corresponding bbox, mask, segmentation). Args: level (int | float): The level should be in range [0,_MAX_LEVEL]. img_fill_val (int | float | tuple): The filled values for image border. If float, the same fill value will be used for all the three channels of image. If tuple, the should be 3 elements. seg_ignore_label (int): The fill value used for segmentation map. Note this value must equals ``ignore_label`` in ``semantic_head`` of the corresponding config. Default 255. prob (float): The probability for performing Shear and should be in range [0, 1]. direction (str): The direction for shear, either "horizontal" or "vertical". max_shear_magnitude (float): The maximum magnitude for Shear transformation. random_negative_prob (float): The probability that turns the offset negative. Should be in range [0,1] interpolation (str): Same as in :func:`mmcv.imshear`. """ def __init__(self, level, img_fill_val=128, seg_ignore_label=255, prob=0.5, direction='horizontal', max_shear_magnitude=0.3, random_negative_prob=0.5, interpolation='bilinear'): assert isinstance(level, (int, float)), 'The level must be type ' \ f'int or float, got {type(level)}.' assert 0 <= level <= _MAX_LEVEL, 'The level should be in range ' \ f'[0,{_MAX_LEVEL}], got {level}.' if isinstance(img_fill_val, (float, int)): img_fill_val = tuple([float(img_fill_val)] * 3) elif isinstance(img_fill_val, tuple): assert len(img_fill_val) == 3, 'img_fill_val as tuple must ' \ f'have 3 elements. got {len(img_fill_val)}.' img_fill_val = tuple([float(val) for val in img_fill_val]) else: raise ValueError( 'img_fill_val must be float or tuple with 3 elements.') assert np.all([0 <= val <= 255 for val in img_fill_val]), 'all ' \ 'elements of img_fill_val should between range [0,255].' \ f'got {img_fill_val}.' assert 0 <= prob <= 1.0, 'The probability of shear should be in ' \ f'range [0,1]. got {prob}.' assert direction in ('horizontal', 'vertical'), 'direction must ' \ f'in be either "horizontal" or "vertical". got {direction}.' assert isinstance(max_shear_magnitude, float), 'max_shear_magnitude ' \ f'should be type float. got {type(max_shear_magnitude)}.' assert 0. <= max_shear_magnitude <= 1., 'Defaultly ' \ 'max_shear_magnitude should be in range [0,1]. ' \ f'got {max_shear_magnitude}.' self.level = level self.magnitude = level_to_value(level, max_shear_magnitude) self.img_fill_val = img_fill_val self.seg_ignore_label = seg_ignore_label self.prob = prob self.direction = direction self.max_shear_magnitude = max_shear_magnitude self.random_negative_prob = random_negative_prob self.interpolation = interpolation def _shear_img(self, results, magnitude, direction='horizontal', interpolation='bilinear'): """Shear the image. Args: results (dict): Result dict from loading pipeline. magnitude (int | float): The magnitude used for shear. direction (str): The direction for shear, either "horizontal" or "vertical". interpolation (str): Same as in :func:`mmcv.imshear`. """ for key in results.get('img_fields', ['img']): img = results[key] img_sheared = mmcv.imshear( img, magnitude, direction, border_value=self.img_fill_val, interpolation=interpolation) results[key] = img_sheared.astype(img.dtype) def _shear_bboxes(self, results, magnitude): """Shear the bboxes.""" h, w, c = results['img_shape'] if self.direction == 'horizontal': shear_matrix = np.stack([[1, magnitude], [0, 1]]).astype(np.float32) # [2, 2] else: shear_matrix = np.stack([[1, 0], [magnitude, 1]]).astype(np.float32) for key in results.get('bbox_fields', []): min_x, min_y, max_x, max_y = np.split( results[key], results[key].shape[-1], axis=-1) coordinates = np.stack([[min_x, min_y], [max_x, min_y], [min_x, max_y], [max_x, max_y]]) # [4, 2, nb_box, 1] coordinates = coordinates[..., 0].transpose( (2, 1, 0)).astype(np.float32) # [nb_box, 2, 4] new_coords = np.matmul(shear_matrix[None, :, :], coordinates) # [nb_box, 2, 4] min_x = np.min(new_coords[:, 0, :], axis=-1) min_y = np.min(new_coords[:, 1, :], axis=-1) max_x = np.max(new_coords[:, 0, :], axis=-1) max_y = np.max(new_coords[:, 1, :], axis=-1) min_x = np.clip(min_x, a_min=0, a_max=w) min_y = np.clip(min_y, a_min=0, a_max=h) max_x = np.clip(max_x, a_min=min_x, a_max=w) max_y = np.clip(max_y, a_min=min_y, a_max=h) results[key] = np.stack([min_x, min_y, max_x, max_y], axis=-1).astype(results[key].dtype) def _shear_masks(self, results, magnitude, direction='horizontal', fill_val=0, interpolation='bilinear'): """Shear the masks.""" h, w, c = results['img_shape'] for key in results.get('mask_fields', []): masks = results[key] results[key] = masks.shear((h, w), magnitude, direction, border_value=fill_val, interpolation=interpolation) def _shear_seg(self, results, magnitude, direction='horizontal', fill_val=255, interpolation='bilinear'): """Shear the segmentation maps.""" for key in results.get('seg_fields', []): seg = results[key] results[key] = mmcv.imshear( seg, magnitude, direction, border_value=fill_val, interpolation=interpolation).astype(seg.dtype) def _filter_invalid(self, results, min_bbox_size=0): """Filter bboxes and corresponding masks too small after shear augmentation.""" bbox2label, bbox2mask, _ = bbox2fields() for key in results.get('bbox_fields', []): bbox_w = results[key][:, 2] - results[key][:, 0] bbox_h = results[key][:, 3] - results[key][:, 1] valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size) valid_inds = np.nonzero(valid_inds)[0] results[key] = results[key][valid_inds] # label fields. e.g. gt_labels and gt_labels_ignore label_key = bbox2label.get(key) if label_key in results: results[label_key] = results[label_key][valid_inds] # mask fields, e.g. gt_masks and gt_masks_ignore mask_key = bbox2mask.get(key) if mask_key in results: results[mask_key] = results[mask_key][valid_inds] def __call__(self, results): """Call function to shear images, bounding boxes, masks and semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: dict: Sheared results. """ if np.random.rand() > self.prob: return results magnitude = random_negative(self.magnitude, self.random_negative_prob) self._shear_img(results, magnitude, self.direction, self.interpolation) self._shear_bboxes(results, magnitude) # fill_val set to 0 for background of mask. self._shear_masks( results, magnitude, self.direction, fill_val=0, interpolation=self.interpolation) self._shear_seg( results, magnitude, self.direction, fill_val=self.seg_ignore_label, interpolation=self.interpolation) self._filter_invalid(results) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(level={self.level}, ' repr_str += f'img_fill_val={self.img_fill_val}, ' repr_str += f'seg_ignore_label={self.seg_ignore_label}, ' repr_str += f'prob={self.prob}, ' repr_str += f'direction={self.direction}, ' repr_str += f'max_shear_magnitude={self.max_shear_magnitude}, ' repr_str += f'random_negative_prob={self.random_negative_prob}, ' repr_str += f'interpolation={self.interpolation})' return repr_str @PIPELINES.register_module() class Rotate: """Apply Rotate Transformation to image (and its corresponding bbox, mask, segmentation). Args: level (int | float): The level should be in range (0,_MAX_LEVEL]. scale (int | float): Isotropic scale factor. Same in ``mmcv.imrotate``. center (int | float | tuple[float]): Center point (w, h) of the rotation in the source image. If None, the center of the image will be used. Same in ``mmcv.imrotate``. img_fill_val (int | float | tuple): The fill value for image border. If float, the same value will be used for all the three channels of image. If tuple, the should be 3 elements (e.g. equals the number of channels for image). seg_ignore_label (int): The fill value used for segmentation map. Note this value must equals ``ignore_label`` in ``semantic_head`` of the corresponding config. Default 255. prob (float): The probability for perform transformation and should be in range 0 to 1. max_rotate_angle (int | float): The maximum angles for rotate transformation. random_negative_prob (float): The probability that turns the offset negative. """ def __init__(self, level, scale=1, center=None, img_fill_val=128, seg_ignore_label=255, prob=0.5, max_rotate_angle=30, random_negative_prob=0.5): assert isinstance(level, (int, float)), \ f'The level must be type int or float. got {type(level)}.' assert 0 <= level <= _MAX_LEVEL, \ f'The level should be in range (0,{_MAX_LEVEL}]. got {level}.' assert isinstance(scale, (int, float)), \ f'The scale must be type int or float. got type {type(scale)}.' if isinstance(center, (int, float)): center = (center, center) elif isinstance(center, tuple): assert len(center) == 2, 'center with type tuple must have '\ f'2 elements. got {len(center)} elements.' else: assert center is None, 'center must be None or type int, '\ f'float or tuple, got type {type(center)}.' if isinstance(img_fill_val, (float, int)): img_fill_val = tuple([float(img_fill_val)] * 3) elif isinstance(img_fill_val, tuple): assert len(img_fill_val) == 3, 'img_fill_val as tuple must '\ f'have 3 elements. got {len(img_fill_val)}.' img_fill_val = tuple([float(val) for val in img_fill_val]) else: raise ValueError( 'img_fill_val must be float or tuple with 3 elements.') assert np.all([0 <= val <= 255 for val in img_fill_val]), \ 'all elements of img_fill_val should between range [0,255]. '\ f'got {img_fill_val}.' assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. '\ 'got {prob}.' assert isinstance(max_rotate_angle, (int, float)), 'max_rotate_angle '\ f'should be type int or float. got type {type(max_rotate_angle)}.' self.level = level self.scale = scale # Rotation angle in degrees. Positive values mean # clockwise rotation. self.angle = level_to_value(level, max_rotate_angle) self.center = center self.img_fill_val = img_fill_val self.seg_ignore_label = seg_ignore_label self.prob = prob self.max_rotate_angle = max_rotate_angle self.random_negative_prob = random_negative_prob def _rotate_img(self, results, angle, center=None, scale=1.0): """Rotate the image. Args: results (dict): Result dict from loading pipeline. angle (float): Rotation angle in degrees, positive values mean clockwise rotation. Same in ``mmcv.imrotate``. center (tuple[float], optional): Center point (w, h) of the rotation. Same in ``mmcv.imrotate``. scale (int | float): Isotropic scale factor. Same in ``mmcv.imrotate``. """ for key in results.get('img_fields', ['img']): img = results[key].copy() img_rotated = mmcv.imrotate( img, angle, center, scale, border_value=self.img_fill_val) results[key] = img_rotated.astype(img.dtype) def _rotate_bboxes(self, results, rotate_matrix): """Rotate the bboxes.""" h, w, c = results['img_shape'] for key in results.get('bbox_fields', []): min_x, min_y, max_x, max_y = np.split( results[key], results[key].shape[-1], axis=-1) coordinates = np.stack([[min_x, min_y], [max_x, min_y], [min_x, max_y], [max_x, max_y]]) # [4, 2, nb_bbox, 1] # pad 1 to convert from format [x, y] to homogeneous # coordinates format [x, y, 1] coordinates = np.concatenate( (coordinates, np.ones((4, 1, coordinates.shape[2], 1), coordinates.dtype)), axis=1) # [4, 3, nb_bbox, 1] coordinates = coordinates.transpose( (2, 0, 1, 3)) # [nb_bbox, 4, 3, 1] rotated_coords = np.matmul(rotate_matrix, coordinates) # [nb_bbox, 4, 2, 1] rotated_coords = rotated_coords[..., 0] # [nb_bbox, 4, 2] min_x, min_y = np.min( rotated_coords[:, :, 0], axis=1), np.min( rotated_coords[:, :, 1], axis=1) max_x, max_y = np.max( rotated_coords[:, :, 0], axis=1), np.max( rotated_coords[:, :, 1], axis=1) min_x, min_y = np.clip( min_x, a_min=0, a_max=w), np.clip( min_y, a_min=0, a_max=h) max_x, max_y = np.clip( max_x, a_min=min_x, a_max=w), np.clip( max_y, a_min=min_y, a_max=h) results[key] = np.stack([min_x, min_y, max_x, max_y], axis=-1).astype(results[key].dtype) def _rotate_masks(self, results, angle, center=None, scale=1.0, fill_val=0): """Rotate the masks.""" h, w, c = results['img_shape'] for key in results.get('mask_fields', []): masks = results[key] results[key] = masks.rotate((h, w), angle, center, scale, fill_val) def _rotate_seg(self, results, angle, center=None, scale=1.0, fill_val=255): """Rotate the segmentation map.""" for key in results.get('seg_fields', []): seg = results[key].copy() results[key] = mmcv.imrotate( seg, angle, center, scale, border_value=fill_val).astype(seg.dtype) def _filter_invalid(self, results, min_bbox_size=0): """Filter bboxes and corresponding masks too small after rotate augmentation.""" bbox2label, bbox2mask, _ = bbox2fields() for key in results.get('bbox_fields', []): bbox_w = results[key][:, 2] - results[key][:, 0] bbox_h = results[key][:, 3] - results[key][:, 1] valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size) valid_inds = np.nonzero(valid_inds)[0] results[key] = results[key][valid_inds] # label fields. e.g. gt_labels and gt_labels_ignore label_key = bbox2label.get(key) if label_key in results: results[label_key] = results[label_key][valid_inds] # mask fields, e.g. gt_masks and gt_masks_ignore mask_key = bbox2mask.get(key) if mask_key in results: results[mask_key] = results[mask_key][valid_inds] def __call__(self, results): """Call function to rotate images, bounding boxes, masks and semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: dict: Rotated results. """ if np.random.rand() > self.prob: return results h, w = results['img'].shape[:2] center = self.center if center is None: center = ((w - 1) * 0.5, (h - 1) * 0.5) angle = random_negative(self.angle, self.random_negative_prob) self._rotate_img(results, angle, center, self.scale) rotate_matrix = cv2.getRotationMatrix2D(center, -angle, self.scale) self._rotate_bboxes(results, rotate_matrix) self._rotate_masks(results, angle, center, self.scale, fill_val=0) self._rotate_seg( results, angle, center, self.scale, fill_val=self.seg_ignore_label) self._filter_invalid(results) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(level={self.level}, ' repr_str += f'scale={self.scale}, ' repr_str += f'center={self.center}, ' repr_str += f'img_fill_val={self.img_fill_val}, ' repr_str += f'seg_ignore_label={self.seg_ignore_label}, ' repr_str += f'prob={self.prob}, ' repr_str += f'max_rotate_angle={self.max_rotate_angle}, ' repr_str += f'random_negative_prob={self.random_negative_prob})' return repr_str @PIPELINES.register_module() class Translate: """Translate the images, bboxes, masks and segmentation maps horizontally or vertically. Args: level (int | float): The level for Translate and should be in range [0,_MAX_LEVEL]. prob (float): The probability for performing translation and should be in range [0, 1]. img_fill_val (int | float | tuple): The filled value for image border. If float, the same fill value will be used for all the three channels of image. If tuple, the should be 3 elements (e.g. equals the number of channels for image). seg_ignore_label (int): The fill value used for segmentation map. Note this value must equals ``ignore_label`` in ``semantic_head`` of the corresponding config. Default 255. direction (str): The translate direction, either "horizontal" or "vertical". max_translate_offset (int | float): The maximum pixel's offset for Translate. random_negative_prob (float): The probability that turns the offset negative. min_size (int | float): The minimum pixel for filtering invalid bboxes after the translation. """ def __init__(self, level, prob=0.5, img_fill_val=128, seg_ignore_label=255, direction='horizontal', max_translate_offset=250., random_negative_prob=0.5, min_size=0): assert isinstance(level, (int, float)), \ 'The level must be type int or float.' assert 0 <= level <= _MAX_LEVEL, \ 'The level used for calculating Translate\'s offset should be ' \ 'in range [0,_MAX_LEVEL]' assert 0 <= prob <= 1.0, \ 'The probability of translation should be in range [0, 1].' if isinstance(img_fill_val, (float, int)): img_fill_val = tuple([float(img_fill_val)] * 3) elif isinstance(img_fill_val, tuple): assert len(img_fill_val) == 3, \ 'img_fill_val as tuple must have 3 elements.' img_fill_val = tuple([float(val) for val in img_fill_val]) else: raise ValueError('img_fill_val must be type float or tuple.') assert np.all([0 <= val <= 255 for val in img_fill_val]), \ 'all elements of img_fill_val should between range [0,255].' assert direction in ('horizontal', 'vertical'), \ 'direction should be "horizontal" or "vertical".' assert isinstance(max_translate_offset, (int, float)), \ 'The max_translate_offset must be type int or float.' # the offset used for translation self.offset = int(level_to_value(level, max_translate_offset)) self.level = level self.prob = prob self.img_fill_val = img_fill_val self.seg_ignore_label = seg_ignore_label self.direction = direction self.max_translate_offset = max_translate_offset self.random_negative_prob = random_negative_prob self.min_size = min_size def _translate_img(self, results, offset, direction='horizontal'): """Translate the image. Args: results (dict): Result dict from loading pipeline. offset (int | float): The offset for translate. direction (str): The translate direction, either "horizontal" or "vertical". """ for key in results.get('img_fields', ['img']): img = results[key].copy() results[key] = mmcv.imtranslate( img, offset, direction, self.img_fill_val).astype(img.dtype) def _translate_bboxes(self, results, offset): """Shift bboxes horizontally or vertically, according to offset.""" h, w, c = results['img_shape'] for key in results.get('bbox_fields', []): min_x, min_y, max_x, max_y = np.split( results[key], results[key].shape[-1], axis=-1) if self.direction == 'horizontal': min_x = np.maximum(0, min_x + offset) max_x = np.minimum(w, max_x + offset) elif self.direction == 'vertical': min_y = np.maximum(0, min_y + offset) max_y = np.minimum(h, max_y + offset) # the boxes translated outside of image will be filtered along with # the corresponding masks, by invoking ``_filter_invalid``. results[key] = np.concatenate([min_x, min_y, max_x, max_y], axis=-1) def _translate_masks(self, results, offset, direction='horizontal', fill_val=0): """Translate masks horizontally or vertically.""" h, w, c = results['img_shape'] for key in results.get('mask_fields', []): masks = results[key] results[key] = masks.translate((h, w), offset, direction, fill_val) def _translate_seg(self, results, offset, direction='horizontal', fill_val=255): """Translate segmentation maps horizontally or vertically.""" for key in results.get('seg_fields', []): seg = results[key].copy() results[key] = mmcv.imtranslate(seg, offset, direction, fill_val).astype(seg.dtype) def _filter_invalid(self, results, min_size=0): """Filter bboxes and masks too small or translated out of image.""" bbox2label, bbox2mask, _ = bbox2fields() for key in results.get('bbox_fields', []): bbox_w = results[key][:, 2] - results[key][:, 0] bbox_h = results[key][:, 3] - results[key][:, 1] valid_inds = (bbox_w > min_size) & (bbox_h > min_size) valid_inds = np.nonzero(valid_inds)[0] results[key] = results[key][valid_inds] # label fields. e.g. gt_labels and gt_labels_ignore label_key = bbox2label.get(key) if label_key in results: results[label_key] = results[label_key][valid_inds] # mask fields, e.g. gt_masks and gt_masks_ignore mask_key = bbox2mask.get(key) if mask_key in results: results[mask_key] = results[mask_key][valid_inds] return results def __call__(self, results): """Call function to translate images, bounding boxes, masks and semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: dict: Translated results. """ if np.random.rand() > self.prob: return results offset = random_negative(self.offset, self.random_negative_prob) self._translate_img(results, offset, self.direction) self._translate_bboxes(results, offset) # fill_val defaultly 0 for BitmapMasks and None for PolygonMasks. self._translate_masks(results, offset, self.direction) # fill_val set to ``seg_ignore_label`` for the ignored value # of segmentation map. self._translate_seg( results, offset, self.direction, fill_val=self.seg_ignore_label) self._filter_invalid(results, min_size=self.min_size) return results @PIPELINES.register_module() class ColorTransform: """Apply Color transformation to image. The bboxes, masks, and segmentations are not modified. Args: level (int | float): Should be in range [0,_MAX_LEVEL]. prob (float): The probability for performing Color transformation. """ def __init__(self, level, prob=0.5): assert isinstance(level, (int, float)), \ 'The level must be type int or float.' assert 0 <= level <= _MAX_LEVEL, \ 'The level should be in range [0,_MAX_LEVEL].' assert 0 <= prob <= 1.0, \ 'The probability should be in range [0,1].' self.level = level self.prob = prob self.factor = enhance_level_to_value(level) def _adjust_color_img(self, results, factor=1.0): """Apply Color transformation to image.""" for key in results.get('img_fields', ['img']): # NOTE defaultly the image should be BGR format img = results[key] results[key] = mmcv.adjust_color(img, factor).astype(img.dtype) def __call__(self, results): """Call function for Color transformation. Args: results (dict): Result dict from loading pipeline. Returns: dict: Colored results. """ if np.random.rand() > self.prob: return results self._adjust_color_img(results, self.factor) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(level={self.level}, ' repr_str += f'prob={self.prob})' return repr_str @PIPELINES.register_module() class EqualizeTransform: """Apply Equalize transformation to image. The bboxes, masks and segmentations are not modified. Args: prob (float): The probability for performing Equalize transformation. """ def __init__(self, prob=0.5): assert 0 <= prob <= 1.0, \ 'The probability should be in range [0,1].' self.prob = prob def _imequalize(self, results): """Equalizes the histogram of one image.""" for key in results.get('img_fields', ['img']): img = results[key] results[key] = mmcv.imequalize(img).astype(img.dtype) def __call__(self, results): """Call function for Equalize transformation. Args: results (dict): Results dict from loading pipeline. Returns: dict: Results after the transformation. """ if np.random.rand() > self.prob: return results self._imequalize(results) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(prob={self.prob})' @PIPELINES.register_module() class BrightnessTransform: """Apply Brightness transformation to image. The bboxes, masks and segmentations are not modified. Args: level (int | float): Should be in range [0,_MAX_LEVEL]. prob (float): The probability for performing Brightness transformation. """ def __init__(self, level, prob=0.5): assert isinstance(level, (int, float)), \ 'The level must be type int or float.' assert 0 <= level <= _MAX_LEVEL, \ 'The level should be in range [0,_MAX_LEVEL].' assert 0 <= prob <= 1.0, \ 'The probability should be in range [0,1].' self.level = level self.prob = prob self.factor = enhance_level_to_value(level) def _adjust_brightness_img(self, results, factor=1.0): """Adjust the brightness of image.""" for key in results.get('img_fields', ['img']): img = results[key] results[key] = mmcv.adjust_brightness(img, factor).astype(img.dtype) def __call__(self, results): """Call function for Brightness transformation. Args: results (dict): Results dict from loading pipeline. Returns: dict: Results after the transformation. """ if np.random.rand() > self.prob: return results self._adjust_brightness_img(results, self.factor) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(level={self.level}, ' repr_str += f'prob={self.prob})' return repr_str @PIPELINES.register_module() class ContrastTransform: """Apply Contrast transformation to image. The bboxes, masks and segmentations are not modified. Args: level (int | float): Should be in range [0,_MAX_LEVEL]. prob (float): The probability for performing Contrast transformation. """ def __init__(self, level, prob=0.5): assert isinstance(level, (int, float)), \ 'The level must be type int or float.' assert 0 <= level <= _MAX_LEVEL, \ 'The level should be in range [0,_MAX_LEVEL].' assert 0 <= prob <= 1.0, \ 'The probability should be in range [0,1].' self.level = level self.prob = prob self.factor = enhance_level_to_value(level) def _adjust_contrast_img(self, results, factor=1.0): """Adjust the image contrast.""" for key in results.get('img_fields', ['img']): img = results[key] results[key] = mmcv.adjust_contrast(img, factor).astype(img.dtype) def __call__(self, results): """Call function for Contrast transformation. Args: results (dict): Results dict from loading pipeline. Returns: dict: Results after the transformation. """ if np.random.rand() > self.prob: return results self._adjust_contrast_img(results, self.factor) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(level={self.level}, ' repr_str += f'prob={self.prob})' return repr_str
36,375
39.780269
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/pipelines/formating.py
# Copyright (c) OpenMMLab. All rights reserved. from collections.abc import Sequence import mmcv import numpy as np import torch from mmcv.parallel import DataContainer as DC from ..builder import PIPELINES def to_tensor(data): """Convert objects of various python types to :obj:`torch.Tensor`. Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`Sequence`, :class:`int` and :class:`float`. Args: data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to be converted. """ if isinstance(data, torch.Tensor): return data elif isinstance(data, np.ndarray): return torch.from_numpy(data) elif isinstance(data, Sequence) and not mmcv.is_str(data): return torch.tensor(data) elif isinstance(data, int): return torch.LongTensor([data]) elif isinstance(data, float): return torch.FloatTensor([data]) else: raise TypeError(f'type {type(data)} cannot be converted to tensor.') @PIPELINES.register_module() class ToTensor: """Convert some results to :obj:`torch.Tensor` by given keys. Args: keys (Sequence[str]): Keys that need to be converted to Tensor. """ def __init__(self, keys): self.keys = keys def __call__(self, results): """Call function to convert data in results to :obj:`torch.Tensor`. Args: results (dict): Result dict contains the data to convert. Returns: dict: The result dict contains the data converted to :obj:`torch.Tensor`. """ for key in self.keys: results[key] = to_tensor(results[key]) return results def __repr__(self): return self.__class__.__name__ + f'(keys={self.keys})' @PIPELINES.register_module() class ImageToTensor: """Convert image to :obj:`torch.Tensor` by given keys. The dimension order of input image is (H, W, C). The pipeline will convert it to (C, H, W). If only 2 dimension (H, W) is given, the output would be (1, H, W). Args: keys (Sequence[str]): Key of images to be converted to Tensor. """ def __init__(self, keys): self.keys = keys def __call__(self, results): """Call function to convert image in results to :obj:`torch.Tensor` and transpose the channel order. Args: results (dict): Result dict contains the image data to convert. Returns: dict: The result dict contains the image converted to :obj:`torch.Tensor` and transposed to (C, H, W) order. """ for key in self.keys: img = results[key] if len(img.shape) < 3: img = np.expand_dims(img, -1) results[key] = (to_tensor(img.transpose(2, 0, 1))).contiguous() return results def __repr__(self): return self.__class__.__name__ + f'(keys={self.keys})' @PIPELINES.register_module() class Transpose: """Transpose some results by given keys. Args: keys (Sequence[str]): Keys of results to be transposed. order (Sequence[int]): Order of transpose. """ def __init__(self, keys, order): self.keys = keys self.order = order def __call__(self, results): """Call function to transpose the channel order of data in results. Args: results (dict): Result dict contains the data to transpose. Returns: dict: The result dict contains the data transposed to \ ``self.order``. """ for key in self.keys: results[key] = results[key].transpose(self.order) return results def __repr__(self): return self.__class__.__name__ + \ f'(keys={self.keys}, order={self.order})' @PIPELINES.register_module() class ToDataContainer: """Convert results to :obj:`mmcv.DataContainer` by given fields. Args: fields (Sequence[dict]): Each field is a dict like ``dict(key='xxx', **kwargs)``. The ``key`` in result will be converted to :obj:`mmcv.DataContainer` with ``**kwargs``. Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'), dict(key='gt_labels'))``. """ def __init__(self, fields=(dict(key='img', stack=True), dict(key='gt_bboxes'), dict(key='gt_labels'))): self.fields = fields def __call__(self, results): """Call function to convert data in results to :obj:`mmcv.DataContainer`. Args: results (dict): Result dict contains the data to convert. Returns: dict: The result dict contains the data converted to \ :obj:`mmcv.DataContainer`. """ for field in self.fields: field = field.copy() key = field.pop('key') results[key] = DC(results[key], **field) return results def __repr__(self): return self.__class__.__name__ + f'(fields={self.fields})' @PIPELINES.register_module() class DefaultFormatBundle: """Default formatting bundle. It simplifies the pipeline of formatting common fields, including "img", "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg". These fields are formatted as follows. - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) - proposals: (1)to tensor, (2)to DataContainer - gt_bboxes: (1)to tensor, (2)to DataContainer - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer - gt_labels: (1)to tensor, (2)to DataContainer - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True) - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \ (3)to DataContainer (stack=True) """ def __call__(self, results): """Call function to transform and format common fields in results. Args: results (dict): Result dict contains the data to convert. Returns: dict: The result dict contains the data that is formatted with \ default bundle. """ if 'img' in results: img = results['img'] # add default meta keys results = self._add_default_meta_keys(results) if len(img.shape) < 3: img = np.expand_dims(img, -1) img = np.ascontiguousarray(img.transpose(2, 0, 1)) results['img'] = DC(to_tensor(img), stack=True) for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']: if key not in results: continue results[key] = DC(to_tensor(results[key])) if 'gt_masks' in results: results['gt_masks'] = DC(results['gt_masks'], cpu_only=True) if 'gt_semantic_seg' in results: results['gt_semantic_seg'] = DC( to_tensor(results['gt_semantic_seg'][None, ...]), stack=True) return results def _add_default_meta_keys(self, results): """Add default meta keys. We set default meta keys including `pad_shape`, `scale_factor` and `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and `Pad` are implemented during the whole pipeline. Args: results (dict): Result dict contains the data to convert. Returns: results (dict): Updated result dict contains the data to convert. """ img = results['img'] results.setdefault('pad_shape', img.shape) results.setdefault('scale_factor', 1.0) num_channels = 1 if len(img.shape) < 3 else img.shape[2] results.setdefault( 'img_norm_cfg', dict( mean=np.zeros(num_channels, dtype=np.float32), std=np.ones(num_channels, dtype=np.float32), to_rgb=False)) return results def __repr__(self): return self.__class__.__name__ @PIPELINES.register_module() class Collect: """Collect data from the loader relevant to the specific task. This is usually the last stage of the data loader pipeline. Typically keys is set to some subset of "img", "proposals", "gt_bboxes", "gt_bboxes_ignore", "gt_labels", and/or "gt_masks". The "img_meta" item is always populated. The contents of the "img_meta" dictionary depends on "meta_keys". By default this includes: - "img_shape": shape of the image input to the network as a tuple \ (h, w, c). Note that images may be zero padded on the \ bottom/right if the batch tensor is larger than this shape. - "scale_factor": a float indicating the preprocessing scale - "flip": a boolean indicating if image flip transform was used - "filename": path to the image file - "ori_shape": original shape of the image as a tuple (h, w, c) - "pad_shape": image shape after padding - "img_norm_cfg": a dict of normalization information: - mean - per channel mean subtraction - std - per channel std divisor - to_rgb - bool indicating if bgr was converted to rgb Args: keys (Sequence[str]): Keys of results to be collected in ``data``. meta_keys (Sequence[str], optional): Meta keys to be converted to ``mmcv.DataContainer`` and collected in ``data[img_metas]``. Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg')`` """ def __init__(self, keys, meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg')): self.keys = keys self.meta_keys = meta_keys def __call__(self, results): """Call function to collect keys in results. The keys in ``meta_keys`` will be converted to :obj:mmcv.DataContainer. Args: results (dict): Result dict contains the data to collect. Returns: dict: The result dict contains the following keys - keys in``self.keys`` - ``img_metas`` """ data = {} img_meta = {} for key in self.meta_keys: img_meta[key] = results[key] data['img_metas'] = DC(img_meta, cpu_only=True) for key in self.keys: data[key] = results[key] return data def __repr__(self): return self.__class__.__name__ + \ f'(keys={self.keys}, meta_keys={self.meta_keys})' @PIPELINES.register_module() class WrapFieldsToLists: """Wrap fields of the data dictionary into lists for evaluation. This class can be used as a last step of a test or validation pipeline for single image evaluation or inference. Example: >>> test_pipeline = [ >>> dict(type='LoadImageFromFile'), >>> dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), >>> dict(type='Pad', size_divisor=32), >>> dict(type='ImageToTensor', keys=['img']), >>> dict(type='Collect', keys=['img']), >>> dict(type='WrapFieldsToLists') >>> ] """ def __call__(self, results): """Call function to wrap fields into lists. Args: results (dict): Result dict contains the data to wrap. Returns: dict: The result dict where value of ``self.keys`` are wrapped \ into list. """ # Wrap dict fields into lists for key, val in results.items(): results[key] = [val] return results def __repr__(self): return f'{self.__class__.__name__}()'
12,044
31.909836
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/pipelines/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform, ContrastTransform, EqualizeTransform, Rotate, Shear, Translate) from .compose import Compose from .formating import (Collect, DefaultFormatBundle, ImageToTensor, ToDataContainer, ToTensor, Transpose, to_tensor) from .instaboost import InstaBoost from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam, LoadMultiChannelImageFromFiles, LoadProposals) from .test_time_aug import MultiScaleFlipAug from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion, RandomAffine, RandomCenterCropPad, RandomCrop, RandomFlip, RandomShift, Resize, SegRescale) __all__ = [ 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', 'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations', 'LoadImageFromFile', 'LoadImageFromWebcam', 'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear', 'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform', 'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp', 'RandomAffine' ]
1,598
54.137931
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/pipelines/transforms.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import inspect import math import cv2 import mmcv import numpy as np from numpy import random from mmdet.core import PolygonMasks from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps from ..builder import PIPELINES try: from imagecorruptions import corrupt except ImportError: corrupt = None try: import albumentations from albumentations import Compose except ImportError: albumentations = None Compose = None import ipdb @PIPELINES.register_module() class Resize: """Resize images & bbox & mask. This transform resizes the input image to some scale. Bboxes and masks are then resized with the same scale factor. If the input dict contains the key "scale", then the scale in the input dict is used, otherwise the specified scale in the init method is used. If the input dict contains the key "scale_factor" (if MultiScaleFlipAug does not give img_scale but scale_factor), the actual scale will be computed by image shape and scale_factor. `img_scale` can either be a tuple (single-scale) or a list of tuple (multi-scale). There are 3 multiscale modes: - ``ratio_range is not None``: randomly sample a ratio from the ratio \ range and multiply it with the image scale. - ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \ sample a scale from the multiscale range. - ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \ sample a scale from multiple scales. Args: img_scale (tuple or list[tuple]): Images scales for resizing. multiscale_mode (str): Either "range" or "value". ratio_range (tuple[float]): (min_ratio, max_ratio) keep_ratio (bool): Whether to keep the aspect ratio when resizing the image. bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. backend (str): Image resize backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. override (bool, optional): Whether to override `scale` and `scale_factor` so as to call resize twice. Default False. If True, after the first resizing, the existed `scale` and `scale_factor` will be ignored so the second resizing can be allowed. This option is a work-around for multiple times of resize in DETR. Defaults to False. """ def __init__(self, img_scale=None, multiscale_mode='range', ratio_range=None, keep_ratio=True, bbox_clip_border=True, backend='cv2', override=False): if img_scale is None: self.img_scale = None else: if isinstance(img_scale, list): self.img_scale = img_scale else: self.img_scale = [img_scale] assert mmcv.is_list_of(self.img_scale, tuple) if ratio_range is not None: # mode 1: given a scale and a range of image ratio # assert len(self.img_scale) == 1 # obtain img_scale from results['img'] pass else: # mode 2: given multiple scales or a range of scales assert multiscale_mode in ['value', 'range'] self.backend = backend self.multiscale_mode = multiscale_mode self.ratio_range = ratio_range self.keep_ratio = keep_ratio # TODO: refactor the override option in Resize self.override = override self.bbox_clip_border = bbox_clip_border @staticmethod def random_select(img_scales): """Randomly select an img_scale from given candidates. Args: img_scales (list[tuple]): Images scales for selection. Returns: (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \ where ``img_scale`` is the selected image scale and \ ``scale_idx`` is the selected index in the given candidates. """ assert mmcv.is_list_of(img_scales, tuple) scale_idx = np.random.randint(len(img_scales)) img_scale = img_scales[scale_idx] return img_scale, scale_idx @staticmethod def random_sample(img_scales): """Randomly sample an img_scale when ``multiscale_mode=='range'``. Args: img_scales (list[tuple]): Images scale range for sampling. There must be two tuples in img_scales, which specify the lower and upper bound of image scales. Returns: (tuple, None): Returns a tuple ``(img_scale, None)``, where \ ``img_scale`` is sampled scale and None is just a placeholder \ to be consistent with :func:`random_select`. """ assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2 img_scale_long = [max(s) for s in img_scales] img_scale_short = [min(s) for s in img_scales] long_edge = np.random.randint( min(img_scale_long), max(img_scale_long) + 1) short_edge = np.random.randint( min(img_scale_short), max(img_scale_short) + 1) img_scale = (long_edge, short_edge) return img_scale, None @staticmethod def random_sample_ratio(img_scale, ratio_range): """Randomly sample an img_scale when ``ratio_range`` is specified. A ratio will be randomly sampled from the range specified by ``ratio_range``. Then it would be multiplied with ``img_scale`` to generate sampled scale. Args: img_scale (tuple): Images scale base to multiply with ratio. ratio_range (tuple[float]): The minimum and maximum ratio to scale the ``img_scale``. Returns: (tuple, None): Returns a tuple ``(scale, None)``, where \ ``scale`` is sampled ratio multiplied with ``img_scale`` and \ None is just a placeholder to be consistent with \ :func:`random_select`. """ assert isinstance(img_scale, tuple) and len(img_scale) == 2 min_ratio, max_ratio = ratio_range assert min_ratio <= max_ratio ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio) return scale, None def _random_scale(self, results): """Randomly sample an img_scale according to ``ratio_range`` and ``multiscale_mode``. If ``ratio_range`` is specified, a ratio will be sampled and be multiplied with ``img_scale``. If multiple scales are specified by ``img_scale``, a scale will be sampled according to ``multiscale_mode``. Otherwise, single scale will be used. Args: results (dict): Result dict from :obj:`dataset`. Returns: dict: Two new keys 'scale` and 'scale_idx` are added into \ ``results``, which would be used by subsequent pipelines. """ if self.ratio_range is not None: scale, scale_idx = self.random_sample_ratio( self.img_scale[0], self.ratio_range) elif len(self.img_scale) == 1: scale, scale_idx = self.img_scale[0], 0 elif self.multiscale_mode == 'range': scale, scale_idx = self.random_sample(self.img_scale) elif self.multiscale_mode == 'value': scale, scale_idx = self.random_select(self.img_scale) else: raise NotImplementedError results['scale'] = scale results['scale_idx'] = scale_idx def _resize_img(self, results): """Resize images with ``results['scale']``.""" for key in results.get('img_fields', ['img']): if self.keep_ratio: img, scale_factor = mmcv.imrescale( results[key], results['scale'], return_scale=True, backend=self.backend) # the w_scale and h_scale has minor difference # a real fix should be done in the mmcv.imrescale in the future new_h, new_w = img.shape[:2] h, w = results[key].shape[:2] w_scale = new_w / w h_scale = new_h / h else: img, w_scale, h_scale = mmcv.imresize( results[key], results['scale'], return_scale=True, backend=self.backend) results[key] = img scale_factor = np.array([w_scale, h_scale, w_scale, h_scale], dtype=np.float32) results['img_shape'] = img.shape # in case that there is no padding results['pad_shape'] = img.shape results['scale_factor'] = scale_factor results['keep_ratio'] = self.keep_ratio def _resize_bboxes(self, results): """Resize bounding boxes with ``results['scale_factor']``.""" for key in results.get('bbox_fields', []): bboxes = results[key] * results['scale_factor'] if self.bbox_clip_border: img_shape = results['img_shape'] bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1]) bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0]) results[key] = bboxes def _resize_masks(self, results): """Resize masks with ``results['scale']``""" for key in results.get('mask_fields', []): if results[key] is None: continue if self.keep_ratio: results[key] = results[key].rescale(results['scale']) else: results[key] = results[key].resize(results['img_shape'][:2]) def _resize_seg(self, results): """Resize semantic segmentation map with ``results['scale']``.""" for key in results.get('seg_fields', []): if self.keep_ratio: gt_seg = mmcv.imrescale( results[key], results['scale'], interpolation='nearest', backend=self.backend) else: gt_seg = mmcv.imresize( results[key], results['scale'], interpolation='nearest', backend=self.backend) results['gt_semantic_seg'] = gt_seg def __call__(self, results): """Call function to resize images, bounding boxes, masks, semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \ 'keep_ratio' keys are added into result dict. """ if self.img_scale is None: self.img_scale = [tuple(results['img'].shape[:2][::-1])] if 'scale' not in results: if 'scale_factor' in results: img_shape = results['img'].shape[:2] scale_factor = results['scale_factor'] assert isinstance(scale_factor, float) results['scale'] = tuple( [int(x * scale_factor) for x in img_shape][::-1]) else: self._random_scale(results) else: if not self.override: assert 'scale_factor' not in results, ( 'scale and scale_factor cannot be both set.') else: results.pop('scale') if 'scale_factor' in results: results.pop('scale_factor') self._random_scale(results) self._resize_img(results) self._resize_bboxes(results) self._resize_masks(results) self._resize_seg(results) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(img_scale={self.img_scale}, ' repr_str += f'multiscale_mode={self.multiscale_mode}, ' repr_str += f'ratio_range={self.ratio_range}, ' repr_str += f'keep_ratio={self.keep_ratio}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @PIPELINES.register_module() class RandomFlip: """Flip the image & bbox & mask. If the input dict contains the key "flip", then the flag will be used, otherwise it will be randomly decided by a ratio specified in the init method. When random flip is enabled, ``flip_ratio``/``direction`` can either be a float/string or tuple of float/string. There are 3 flip modes: - ``flip_ratio`` is float, ``direction`` is string: the image will be ``direction``ly flipped with probability of ``flip_ratio`` . E.g., ``flip_ratio=0.5``, ``direction='horizontal'``, then image will be horizontally flipped with probability of 0.5. - ``flip_ratio`` is float, ``direction`` is list of string: the image wil be ``direction[i]``ly flipped with probability of ``flip_ratio/len(direction)``. E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``, then image will be horizontally flipped with probability of 0.25, vertically with probability of 0.25. - ``flip_ratio`` is list of float, ``direction`` is list of string: given ``len(flip_ratio) == len(direction)``, the image wil be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``. E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal', 'vertical']``, then image will be horizontally flipped with probability of 0.3, vertically with probability of 0.5. Args: flip_ratio (float | list[float], optional): The flipping probability. Default: None. direction(str | list[str], optional): The flipping direction. Options are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'. If input is a list, the length must equal ``flip_ratio``. Each element in ``flip_ratio`` indicates the flip probability of corresponding direction. """ def __init__(self, flip_ratio=None, direction='horizontal'): if isinstance(flip_ratio, list): assert mmcv.is_list_of(flip_ratio, float) assert 0 <= sum(flip_ratio) <= 1 elif isinstance(flip_ratio, float): assert 0 <= flip_ratio <= 1 elif flip_ratio is None: pass else: raise ValueError('flip_ratios must be None, float, ' 'or list of float') self.flip_ratio = flip_ratio valid_directions = ['horizontal', 'vertical', 'diagonal'] if isinstance(direction, str): assert direction in valid_directions elif isinstance(direction, list): assert mmcv.is_list_of(direction, str) assert set(direction).issubset(set(valid_directions)) else: raise ValueError('direction must be either str or list of str') self.direction = direction if isinstance(flip_ratio, list): assert len(self.flip_ratio) == len(self.direction) def bbox_flip(self, bboxes, img_shape, direction): """Flip bboxes horizontally. Args: bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k) img_shape (tuple[int]): Image shape (height, width) direction (str): Flip direction. Options are 'horizontal', 'vertical'. Returns: numpy.ndarray: Flipped bounding boxes. """ assert bboxes.shape[-1] % 4 == 0 flipped = bboxes.copy() if direction == 'horizontal': w = img_shape[1] flipped[..., 0::4] = w - bboxes[..., 2::4] flipped[..., 2::4] = w - bboxes[..., 0::4] elif direction == 'vertical': h = img_shape[0] flipped[..., 1::4] = h - bboxes[..., 3::4] flipped[..., 3::4] = h - bboxes[..., 1::4] elif direction == 'diagonal': w = img_shape[1] h = img_shape[0] flipped[..., 0::4] = w - bboxes[..., 2::4] flipped[..., 1::4] = h - bboxes[..., 3::4] flipped[..., 2::4] = w - bboxes[..., 0::4] flipped[..., 3::4] = h - bboxes[..., 1::4] else: raise ValueError(f"Invalid flipping direction '{direction}'") return flipped def __call__(self, results): """Call function to flip bounding boxes, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: dict: Flipped results, 'flip', 'flip_direction' keys are added \ into result dict. """ if 'flip' not in results: if isinstance(self.direction, list): # None means non-flip direction_list = self.direction + [None] else: # None means non-flip direction_list = [self.direction, None] if isinstance(self.flip_ratio, list): non_flip_ratio = 1 - sum(self.flip_ratio) flip_ratio_list = self.flip_ratio + [non_flip_ratio] else: non_flip_ratio = 1 - self.flip_ratio # exclude non-flip single_ratio = self.flip_ratio / (len(direction_list) - 1) flip_ratio_list = [single_ratio] * (len(direction_list) - 1) + [non_flip_ratio] cur_dir = np.random.choice(direction_list, p=flip_ratio_list) results['flip'] = cur_dir is not None if 'flip_direction' not in results: results['flip_direction'] = cur_dir if results['flip']: # flip image for key in results.get('img_fields', ['img']): results[key] = mmcv.imflip( results[key], direction=results['flip_direction']) # flip bboxes for key in results.get('bbox_fields', []): results[key] = self.bbox_flip(results[key], results['img_shape'], results['flip_direction']) # flip masks for key in results.get('mask_fields', []): results[key] = results[key].flip(results['flip_direction']) # flip segs for key in results.get('seg_fields', []): results[key] = mmcv.imflip( results[key], direction=results['flip_direction']) return results def __repr__(self): return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})' @PIPELINES.register_module() class RandomShift: """Shift the image and box given shift pixels and probability. Args: shift_ratio (float): Probability of shifts. Default 0.5. max_shift_px (int): The max pixels for shifting. Default 32. filter_thr_px (int): The width and height threshold for filtering. The bbox and the rest of the targets below the width and height threshold will be filtered. Default 1. """ def __init__(self, shift_ratio=0.5, max_shift_px=32, filter_thr_px=1): assert 0 <= shift_ratio <= 1 assert max_shift_px >= 0 self.shift_ratio = shift_ratio self.max_shift_px = max_shift_px self.filter_thr_px = int(filter_thr_px) # The key correspondence from bboxes to labels. self.bbox2label = { 'gt_bboxes': 'gt_labels', 'gt_bboxes_ignore': 'gt_labels_ignore' } def __call__(self, results): """Call function to random shift images, bounding boxes. Args: results (dict): Result dict from loading pipeline. Returns: dict: Shift results. """ if random.random() < self.shift_ratio: img_shape = results['img'].shape[:2] random_shift_x = random.randint(-self.max_shift_px, self.max_shift_px) random_shift_y = random.randint(-self.max_shift_px, self.max_shift_px) new_x = max(0, random_shift_x) orig_x = max(0, -random_shift_x) new_y = max(0, random_shift_y) orig_y = max(0, -random_shift_y) # TODO: support mask and semantic segmentation maps. for key in results.get('bbox_fields', []): bboxes = results[key].copy() bboxes[..., 0::2] += random_shift_x bboxes[..., 1::2] += random_shift_y # clip border bboxes[..., 0::2] = np.clip(bboxes[..., 0::2], 0, img_shape[1]) bboxes[..., 1::2] = np.clip(bboxes[..., 1::2], 0, img_shape[0]) # remove invalid bboxes bbox_w = bboxes[..., 2] - bboxes[..., 0] bbox_h = bboxes[..., 3] - bboxes[..., 1] valid_inds = (bbox_w > self.filter_thr_px) & ( bbox_h > self.filter_thr_px) # If the shift does not contain any gt-bbox area, skip this # image. if key == 'gt_bboxes' and not valid_inds.any(): return results bboxes = bboxes[valid_inds] results[key] = bboxes # label fields. e.g. gt_labels and gt_labels_ignore label_key = self.bbox2label.get(key) if label_key in results: results[label_key] = results[label_key][valid_inds] for key in results.get('img_fields', ['img']): img = results[key] new_img = np.zeros_like(img) img_h, img_w = img.shape[:2] new_h = img_h - np.abs(random_shift_y) new_w = img_w - np.abs(random_shift_x) new_img[new_y:new_y + new_h, new_x:new_x + new_w] \ = img[orig_y:orig_y + new_h, orig_x:orig_x + new_w] results[key] = new_img return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(max_shift_px={self.max_shift_px}, ' return repr_str @PIPELINES.register_module() class Pad: """Pad the image & mask. There are two padding modes: (1) pad to a fixed size and (2) pad to the minimum size that is divisible by some number. Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor", Args: size (tuple, optional): Fixed padding size. size_divisor (int, optional): The divisor of padded size. pad_to_square (bool): Whether to pad the image into a square. Currently only used for YOLOX. Default: False. pad_val (float, optional): Padding value, 0 by default. """ def __init__(self, size=None, size_divisor=None, pad_to_square=False, pad_val=0): self.size = size self.size_divisor = size_divisor self.pad_val = pad_val self.pad_to_square = pad_to_square if pad_to_square: assert size is None and size_divisor is None, \ 'The size and size_divisor must be None ' \ 'when pad2square is True' else: assert size is not None or size_divisor is not None, \ 'only one of size and size_divisor should be valid' assert size is None or size_divisor is None def _pad_img(self, results): """Pad images according to ``self.size``.""" for key in results.get('img_fields', ['img']): if self.pad_to_square: max_size = max(results[key].shape[:2]) self.size = (max_size, max_size) if self.size is not None: padded_img = mmcv.impad( results[key], shape=self.size, pad_val=self.pad_val) elif self.size_divisor is not None: padded_img = mmcv.impad_to_multiple( results[key], self.size_divisor, pad_val=self.pad_val) results[key] = padded_img results['pad_shape'] = padded_img.shape results['pad_fixed_size'] = self.size results['pad_size_divisor'] = self.size_divisor def _pad_masks(self, results): """Pad masks according to ``results['pad_shape']``.""" pad_shape = results['pad_shape'][:2] for key in results.get('mask_fields', []): results[key] = results[key].pad(pad_shape, pad_val=self.pad_val) def _pad_seg(self, results): """Pad semantic segmentation map according to ``results['pad_shape']``.""" for key in results.get('seg_fields', []): results[key] = mmcv.impad( results[key], shape=results['pad_shape'][:2]) def __call__(self, results): """Call function to pad images, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: dict: Updated result dict. """ self._pad_img(results) self._pad_masks(results) self._pad_seg(results) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(size={self.size}, ' repr_str += f'size_divisor={self.size_divisor}, ' repr_str += f'pad_to_square={self.pad_to_square}, ' repr_str += f'pad_val={self.pad_val})' return repr_str @PIPELINES.register_module() class Normalize: """Normalize the image. Added key is "img_norm_cfg". Args: mean (sequence): Mean values of 3 channels. std (sequence): Std values of 3 channels. to_rgb (bool): Whether to convert the image from BGR to RGB, default is true. """ def __init__(self, mean, std, to_rgb=True): self.mean = np.array(mean, dtype=np.float32) self.std = np.array(std, dtype=np.float32) self.to_rgb = to_rgb def __call__(self, results): """Call function to normalize images. Args: results (dict): Result dict from loading pipeline. Returns: dict: Normalized results, 'img_norm_cfg' key is added into result dict. """ for key in results.get('img_fields', ['img']): results[key] = mmcv.imnormalize(results[key], self.mean, self.std, self.to_rgb) results['img_norm_cfg'] = dict( mean=self.mean, std=self.std, to_rgb=self.to_rgb) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})' return repr_str @PIPELINES.register_module() class RandomCrop: """Random crop the image & bboxes & masks. The absolute `crop_size` is sampled based on `crop_type` and `image_size`, then the cropped results are generated. Args: crop_size (tuple): The relative ratio or absolute pixels of height and width. crop_type (str, optional): one of "relative_range", "relative", "absolute", "absolute_range". "relative" randomly crops (h * crop_size[0], w * crop_size[1]) part from an input of size (h, w). "relative_range" uniformly samples relative crop size from range [crop_size[0], 1] and [crop_size[1], 1] for height and width respectively. "absolute" crops from an input with absolute size (crop_size[0], crop_size[1]). "absolute_range" uniformly samples crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w in range [crop_size[0], min(w, crop_size[1])]. Default "absolute". allow_negative_crop (bool, optional): Whether to allow a crop that does not contain any bbox area. Default False. bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Note: - If the image is smaller than the absolute crop size, return the original image. - The keys for bboxes, labels and masks must be aligned. That is, `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and `gt_bboxes_ignore` corresponds to `gt_labels_ignore` and `gt_masks_ignore`. - If the crop does not contain any gt-bbox region and `allow_negative_crop` is set to False, skip this image. """ def __init__(self, crop_size, crop_type='absolute', allow_negative_crop=False, bbox_clip_border=True): if crop_type not in [ 'relative_range', 'relative', 'absolute', 'absolute_range' ]: raise ValueError(f'Invalid crop_type {crop_type}.') if crop_type in ['absolute', 'absolute_range']: assert crop_size[0] > 0 and crop_size[1] > 0 assert isinstance(crop_size[0], int) and isinstance( crop_size[1], int) else: assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1 self.crop_size = crop_size self.crop_type = crop_type self.allow_negative_crop = allow_negative_crop self.bbox_clip_border = bbox_clip_border # The key correspondence from bboxes to labels and masks. self.bbox2label = { 'gt_bboxes': 'gt_labels', 'gt_bboxes_ignore': 'gt_labels_ignore' } self.bbox2mask = { 'gt_bboxes': 'gt_masks', 'gt_bboxes_ignore': 'gt_masks_ignore' } def _crop_data(self, results, crop_size, allow_negative_crop): """Function to randomly crop images, bounding boxes, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. crop_size (tuple): Expected absolute size after cropping, (h, w). allow_negative_crop (bool): Whether to allow a crop that does not contain any bbox area. Default to False. Returns: dict: Randomly cropped results, 'img_shape' key in result dict is updated according to crop size. """ assert crop_size[0] > 0 and crop_size[1] > 0 for key in results.get('img_fields', ['img']): img = results[key] margin_h = max(img.shape[0] - crop_size[0], 0) margin_w = max(img.shape[1] - crop_size[1], 0) offset_h = np.random.randint(0, margin_h + 1) offset_w = np.random.randint(0, margin_w + 1) crop_y1, crop_y2 = offset_h, offset_h + crop_size[0] crop_x1, crop_x2 = offset_w, offset_w + crop_size[1] # crop the image img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] img_shape = img.shape results[key] = img results['img_shape'] = img_shape # crop bboxes accordingly and clip to the image boundary for key in results.get('bbox_fields', []): # e.g. gt_bboxes and gt_bboxes_ignore bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h], dtype=np.float32) bboxes = results[key] - bbox_offset if self.bbox_clip_border: bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1]) bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0]) valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & ( bboxes[:, 3] > bboxes[:, 1]) # If the crop does not contain any gt-bbox area and # allow_negative_crop is False, skip this image. if (key == 'gt_bboxes' and not valid_inds.any() and not allow_negative_crop): return None results[key] = bboxes[valid_inds, :] # label fields. e.g. gt_labels and gt_labels_ignore label_key = self.bbox2label.get(key) if label_key in results: results[label_key] = results[label_key][valid_inds] # mask fields, e.g. gt_masks and gt_masks_ignore mask_key = self.bbox2mask.get(key) if mask_key in results: results[mask_key] = results[mask_key][ valid_inds.nonzero()[0]].crop( np.asarray([crop_x1, crop_y1, crop_x2, crop_y2])) # crop semantic seg for key in results.get('seg_fields', []): results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2] return results def _get_crop_size(self, image_size): """Randomly generates the absolute crop size based on `crop_type` and `image_size`. Args: image_size (tuple): (h, w). Returns: crop_size (tuple): (crop_h, crop_w) in absolute pixels. """ h, w = image_size if self.crop_type == 'absolute': return (min(self.crop_size[0], h), min(self.crop_size[1], w)) elif self.crop_type == 'absolute_range': assert self.crop_size[0] <= self.crop_size[1] crop_h = np.random.randint( min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1) crop_w = np.random.randint( min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1) return crop_h, crop_w elif self.crop_type == 'relative': crop_h, crop_w = self.crop_size return int(h * crop_h + 0.5), int(w * crop_w + 0.5) elif self.crop_type == 'relative_range': crop_size = np.asarray(self.crop_size, dtype=np.float32) crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size) return int(h * crop_h + 0.5), int(w * crop_w + 0.5) def __call__(self, results): """Call function to randomly crop images, bounding boxes, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: dict: Randomly cropped results, 'img_shape' key in result dict is updated according to crop size. """ image_size = results['img'].shape[:2] crop_size = self._get_crop_size(image_size) results = self._crop_data(results, crop_size, self.allow_negative_crop) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(crop_size={self.crop_size}, ' repr_str += f'crop_type={self.crop_type}, ' repr_str += f'allow_negative_crop={self.allow_negative_crop}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @PIPELINES.register_module() class SegRescale: """Rescale semantic segmentation maps. Args: scale_factor (float): The scale factor of the final output. backend (str): Image rescale backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. """ def __init__(self, scale_factor=1, backend='cv2'): self.scale_factor = scale_factor self.backend = backend def __call__(self, results): """Call function to scale the semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with semantic segmentation map scaled. """ for key in results.get('seg_fields', []): if self.scale_factor != 1: results[key] = mmcv.imrescale( results[key], self.scale_factor, interpolation='nearest', backend=self.backend) return results def __repr__(self): return self.__class__.__name__ + f'(scale_factor={self.scale_factor})' @PIPELINES.register_module() class PhotoMetricDistortion: """Apply photometric distortion to image sequentially, every transformation is applied with a probability of 0.5. The position of random contrast is in second or second to last. 1. random brightness 2. random contrast (mode 0) 3. convert color from BGR to HSV 4. random saturation 5. random hue 6. convert color from HSV to BGR 7. random contrast (mode 1) 8. randomly swap channels Args: brightness_delta (int): delta of brightness. contrast_range (tuple): range of contrast. saturation_range (tuple): range of saturation. hue_delta (int): delta of hue. """ def __init__(self, brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18): self.brightness_delta = brightness_delta self.contrast_lower, self.contrast_upper = contrast_range self.saturation_lower, self.saturation_upper = saturation_range self.hue_delta = hue_delta def __call__(self, results): """Call function to perform photometric distortion on images. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images distorted. """ if 'img_fields' in results: assert results['img_fields'] == ['img'], \ 'Only single img_fields is allowed' img = results['img'] assert img.dtype == np.float32, \ 'PhotoMetricDistortion needs the input image of dtype ' \ 'np.float32, please set "to_float32=True" in ' \ '"LoadImageFromFile" pipeline' # random brightness if random.randint(2): delta = random.uniform(-self.brightness_delta, self.brightness_delta) img += delta # mode == 0 --> do random contrast first # mode == 1 --> do random contrast last mode = random.randint(2) if mode == 1: if random.randint(2): alpha = random.uniform(self.contrast_lower, self.contrast_upper) img *= alpha # convert color from BGR to HSV img = mmcv.bgr2hsv(img) # random saturation if random.randint(2): img[..., 1] *= random.uniform(self.saturation_lower, self.saturation_upper) # random hue if random.randint(2): img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta) img[..., 0][img[..., 0] > 360] -= 360 img[..., 0][img[..., 0] < 0] += 360 # convert color from HSV to BGR img = mmcv.hsv2bgr(img) # random contrast if mode == 0: if random.randint(2): alpha = random.uniform(self.contrast_lower, self.contrast_upper) img *= alpha # randomly swap channels if random.randint(2): img = img[..., random.permutation(3)] results['img'] = img return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(\nbrightness_delta={self.brightness_delta},\n' repr_str += 'contrast_range=' repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n' repr_str += 'saturation_range=' repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n' repr_str += f'hue_delta={self.hue_delta})' return repr_str @PIPELINES.register_module() class Expand: """Random expand the image & bboxes. Randomly place the original image on a canvas of 'ratio' x original image size filled with mean values. The ratio is in the range of ratio_range. Args: mean (tuple): mean value of dataset. to_rgb (bool): if need to convert the order of mean to align with RGB. ratio_range (tuple): range of expand ratio. prob (float): probability of applying this transformation """ def __init__(self, mean=(0, 0, 0), to_rgb=True, ratio_range=(1, 4), seg_ignore_label=None, prob=0.5): self.to_rgb = to_rgb self.ratio_range = ratio_range if to_rgb: self.mean = mean[::-1] else: self.mean = mean self.min_ratio, self.max_ratio = ratio_range self.seg_ignore_label = seg_ignore_label self.prob = prob def __call__(self, results): """Call function to expand images, bounding boxes. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images, bounding boxes expanded """ if random.uniform(0, 1) > self.prob: return results if 'img_fields' in results: assert results['img_fields'] == ['img'], \ 'Only single img_fields is allowed' img = results['img'] h, w, c = img.shape ratio = random.uniform(self.min_ratio, self.max_ratio) # speedup expand when meets large image if np.all(self.mean == self.mean[0]): expand_img = np.empty((int(h * ratio), int(w * ratio), c), img.dtype) expand_img.fill(self.mean[0]) else: expand_img = np.full((int(h * ratio), int(w * ratio), c), self.mean, dtype=img.dtype) left = int(random.uniform(0, w * ratio - w)) top = int(random.uniform(0, h * ratio - h)) expand_img[top:top + h, left:left + w] = img results['img'] = expand_img # expand bboxes for key in results.get('bbox_fields', []): results[key] = results[key] + np.tile( (left, top), 2).astype(results[key].dtype) # expand masks for key in results.get('mask_fields', []): results[key] = results[key].expand( int(h * ratio), int(w * ratio), top, left) # expand segs for key in results.get('seg_fields', []): gt_seg = results[key] expand_gt_seg = np.full((int(h * ratio), int(w * ratio)), self.seg_ignore_label, dtype=gt_seg.dtype) expand_gt_seg[top:top + h, left:left + w] = gt_seg results[key] = expand_gt_seg return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, ' repr_str += f'ratio_range={self.ratio_range}, ' repr_str += f'seg_ignore_label={self.seg_ignore_label})' return repr_str @PIPELINES.register_module() class MinIoURandomCrop: """Random crop the image & bboxes, the cropped patches have minimum IoU requirement with original image & bboxes, the IoU threshold is randomly selected from min_ious. Args: min_ious (tuple): minimum IoU threshold for all intersections with bounding boxes min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w, where a >= min_crop_size). bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Note: The keys for bboxes, labels and masks should be paired. That is, \ `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \ `gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`. """ def __init__(self, min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3, bbox_clip_border=True): # 1: return ori img self.min_ious = min_ious self.sample_mode = (1, *min_ious, 0) self.min_crop_size = min_crop_size self.bbox_clip_border = bbox_clip_border self.bbox2label = { 'gt_bboxes': 'gt_labels', 'gt_bboxes_ignore': 'gt_labels_ignore' } self.bbox2mask = { 'gt_bboxes': 'gt_masks', 'gt_bboxes_ignore': 'gt_masks_ignore' } def __call__(self, results): """Call function to crop images and bounding boxes with minimum IoU constraint. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images and bounding boxes cropped, \ 'img_shape' key is updated. """ if 'img_fields' in results: assert results['img_fields'] == ['img'], \ 'Only single img_fields is allowed' img = results['img'] assert 'bbox_fields' in results boxes = [results[key] for key in results['bbox_fields']] boxes = np.concatenate(boxes, 0) h, w, c = img.shape while True: mode = random.choice(self.sample_mode) self.mode = mode if mode == 1: return results min_iou = mode for i in range(50): new_w = random.uniform(self.min_crop_size * w, w) new_h = random.uniform(self.min_crop_size * h, h) # h / w in [0.5, 2] if new_h / new_w < 0.5 or new_h / new_w > 2: continue left = random.uniform(w - new_w) top = random.uniform(h - new_h) patch = np.array( (int(left), int(top), int(left + new_w), int(top + new_h))) # Line or point crop is not allowed if patch[2] == patch[0] or patch[3] == patch[1]: continue overlaps = bbox_overlaps( patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1) if len(overlaps) > 0 and overlaps.min() < min_iou: continue # center of boxes should inside the crop img # only adjust boxes and instance masks when the gt is not empty if len(overlaps) > 0: # adjust boxes def is_center_of_bboxes_in_patch(boxes, patch): center = (boxes[:, :2] + boxes[:, 2:]) / 2 mask = ((center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (center[:, 0] < patch[2]) * (center[:, 1] < patch[3])) return mask mask = is_center_of_bboxes_in_patch(boxes, patch) if not mask.any(): continue for key in results.get('bbox_fields', []): boxes = results[key].copy() mask = is_center_of_bboxes_in_patch(boxes, patch) boxes = boxes[mask] if self.bbox_clip_border: boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:]) boxes[:, :2] = boxes[:, :2].clip(min=patch[:2]) boxes -= np.tile(patch[:2], 2) results[key] = boxes # labels label_key = self.bbox2label.get(key) if label_key in results: results[label_key] = results[label_key][mask] # mask fields mask_key = self.bbox2mask.get(key) if mask_key in results: results[mask_key] = results[mask_key][ mask.nonzero()[0]].crop(patch) # adjust the img no matter whether the gt is empty before crop img = img[patch[1]:patch[3], patch[0]:patch[2]] results['img'] = img results['img_shape'] = img.shape # seg fields for key in results.get('seg_fields', []): results[key] = results[key][patch[1]:patch[3], patch[0]:patch[2]] return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(min_ious={self.min_ious}, ' repr_str += f'min_crop_size={self.min_crop_size}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @PIPELINES.register_module() class Corrupt: """Corruption augmentation. Corruption transforms implemented based on `imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_. Args: corruption (str): Corruption name. severity (int, optional): The severity of corruption. Default: 1. """ def __init__(self, corruption, severity=1): self.corruption = corruption self.severity = severity def __call__(self, results): """Call function to corrupt image. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images corrupted. """ if corrupt is None: raise RuntimeError('imagecorruptions is not installed') if 'img_fields' in results: assert results['img_fields'] == ['img'], \ 'Only single img_fields is allowed' results['img'] = corrupt( results['img'].astype(np.uint8), corruption_name=self.corruption, severity=self.severity) return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(corruption={self.corruption}, ' repr_str += f'severity={self.severity})' return repr_str @PIPELINES.register_module() class Albu: """Albumentation augmentation. Adds custom transformations from Albumentations library. Please, visit `https://albumentations.readthedocs.io` to get more information. An example of ``transforms`` is as followed: .. code-block:: [ dict( type='ShiftScaleRotate', shift_limit=0.0625, scale_limit=0.0, rotate_limit=0, interpolation=1, p=0.5), dict( type='RandomBrightnessContrast', brightness_limit=[0.1, 0.3], contrast_limit=[0.1, 0.3], p=0.2), dict(type='ChannelShuffle', p=0.1), dict( type='OneOf', transforms=[ dict(type='Blur', blur_limit=3, p=1.0), dict(type='MedianBlur', blur_limit=3, p=1.0) ], p=0.1), ] Args: transforms (list[dict]): A list of albu transformations bbox_params (dict): Bbox_params for albumentation `Compose` keymap (dict): Contains {'input key':'albumentation-style key'} skip_img_without_anno (bool): Whether to skip the image if no ann left after aug """ def __init__(self, transforms, bbox_params=None, keymap=None, update_pad_shape=False, skip_img_without_anno=False): if Compose is None: raise RuntimeError('albumentations is not installed') # Args will be modified later, copying it will be safer transforms = copy.deepcopy(transforms) if bbox_params is not None: bbox_params = copy.deepcopy(bbox_params) if keymap is not None: keymap = copy.deepcopy(keymap) self.transforms = transforms self.filter_lost_elements = False self.update_pad_shape = update_pad_shape self.skip_img_without_anno = skip_img_without_anno # A simple workaround to remove masks without boxes if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params and 'filter_lost_elements' in bbox_params): self.filter_lost_elements = True self.origin_label_fields = bbox_params['label_fields'] bbox_params['label_fields'] = ['idx_mapper'] del bbox_params['filter_lost_elements'] self.bbox_params = ( self.albu_builder(bbox_params) if bbox_params else None) self.aug = Compose([self.albu_builder(t) for t in self.transforms], bbox_params=self.bbox_params) if not keymap: self.keymap_to_albu = { 'img': 'image', 'gt_masks': 'masks', 'gt_bboxes': 'bboxes' } else: self.keymap_to_albu = keymap self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()} def albu_builder(self, cfg): """Import a module from albumentations. It inherits some of :func:`build_from_cfg` logic. Args: cfg (dict): Config dict. It should at least contain the key "type". Returns: obj: The constructed object. """ assert isinstance(cfg, dict) and 'type' in cfg args = cfg.copy() obj_type = args.pop('type') if mmcv.is_str(obj_type): if albumentations is None: raise RuntimeError('albumentations is not installed') obj_cls = getattr(albumentations, obj_type) elif inspect.isclass(obj_type): obj_cls = obj_type else: raise TypeError( f'type must be a str or valid type, but got {type(obj_type)}') if 'transforms' in args: args['transforms'] = [ self.albu_builder(transform) for transform in args['transforms'] ] return obj_cls(**args) @staticmethod def mapper(d, keymap): """Dictionary mapper. Renames keys according to keymap provided. Args: d (dict): old dict keymap (dict): {'old_key':'new_key'} Returns: dict: new dict. """ updated_dict = {} for k, v in zip(d.keys(), d.values()): new_k = keymap.get(k, k) updated_dict[new_k] = d[k] return updated_dict def __call__(self, results): # dict to albumentations format results = self.mapper(results, self.keymap_to_albu) # TODO: add bbox_fields if 'bboxes' in results: # to list of boxes if isinstance(results['bboxes'], np.ndarray): results['bboxes'] = [x for x in results['bboxes']] # add pseudo-field for filtration if self.filter_lost_elements: results['idx_mapper'] = np.arange(len(results['bboxes'])) # TODO: Support mask structure in albu if 'masks' in results: if isinstance(results['masks'], PolygonMasks): raise NotImplementedError( 'Albu only supports BitMap masks now') ori_masks = results['masks'] if albumentations.__version__ < '0.5': results['masks'] = results['masks'].masks else: results['masks'] = [mask for mask in results['masks'].masks] results = self.aug(**results) if 'bboxes' in results: if isinstance(results['bboxes'], list): results['bboxes'] = np.array( results['bboxes'], dtype=np.float32) results['bboxes'] = results['bboxes'].reshape(-1, 4) # filter label_fields if self.filter_lost_elements: for label in self.origin_label_fields: results[label] = np.array( [results[label][i] for i in results['idx_mapper']]) if 'masks' in results: results['masks'] = np.array( [results['masks'][i] for i in results['idx_mapper']]) results['masks'] = ori_masks.__class__( results['masks'], results['image'].shape[0], results['image'].shape[1]) if (not len(results['idx_mapper']) and self.skip_img_without_anno): return None if 'gt_labels' in results: if isinstance(results['gt_labels'], list): results['gt_labels'] = np.array(results['gt_labels']) results['gt_labels'] = results['gt_labels'].astype(np.int64) # back to the original format results = self.mapper(results, self.keymap_back) # update final shape if self.update_pad_shape: results['pad_shape'] = results['img'].shape return results def __repr__(self): repr_str = self.__class__.__name__ + f'(transforms={self.transforms})' return repr_str @PIPELINES.register_module() class RandomCenterCropPad: """Random center crop and random around padding for CornerNet. This operation generates randomly cropped image from the original image and pads it simultaneously. Different from :class:`RandomCrop`, the output shape may not equal to ``crop_size`` strictly. We choose a random value from ``ratios`` and the output shape could be larger or smaller than ``crop_size``. The padding operation is also different from :class:`Pad`, here we use around padding instead of right-bottom padding. The relation between output image (padding image) and original image: .. code:: text output image +----------------------------+ | padded area | +------|----------------------------|----------+ | | cropped area | | | | +---------------+ | | | | | . center | | | original image | | | range | | | | | +---------------+ | | +------|----------------------------|----------+ | padded area | +----------------------------+ There are 5 main areas in the figure: - output image: output image of this operation, also called padding image in following instruction. - original image: input image of this operation. - padded area: non-intersect area of output image and original image. - cropped area: the overlap of output image and original image. - center range: a smaller area where random center chosen from. center range is computed by ``border`` and original image's shape to avoid our random center is too close to original image's border. Also this operation act differently in train and test mode, the summary pipeline is listed below. Train pipeline: 1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image will be ``random_ratio * crop_size``. 2. Choose a ``random_center`` in center range. 3. Generate padding image with center matches the ``random_center``. 4. Initialize the padding image with pixel value equals to ``mean``. 5. Copy the cropped area to padding image. 6. Refine annotations. Test pipeline: 1. Compute output shape according to ``test_pad_mode``. 2. Generate padding image with center matches the original image center. 3. Initialize the padding image with pixel value equals to ``mean``. 4. Copy the ``cropped area`` to padding image. Args: crop_size (tuple | None): expected size after crop, final size will computed according to ratio. Requires (h, w) in train mode, and None in test mode. ratios (tuple): random select a ratio from tuple and crop image to (crop_size[0] * ratio) * (crop_size[1] * ratio). Only available in train mode. border (int): max distance from center select area to image border. Only available in train mode. mean (sequence): Mean values of 3 channels. std (sequence): Std values of 3 channels. to_rgb (bool): Whether to convert the image from BGR to RGB. test_mode (bool): whether involve random variables in transform. In train mode, crop_size is fixed, center coords and ratio is random selected from predefined lists. In test mode, crop_size is image's original shape, center coords and ratio is fixed. test_pad_mode (tuple): padding method and padding shape value, only available in test mode. Default is using 'logical_or' with 127 as padding shape value. - 'logical_or': final_shape = input_shape | padding_shape_value - 'size_divisor': final_shape = int( ceil(input_shape / padding_shape_value) * padding_shape_value) test_pad_add_pix (int): Extra padding pixel in test mode. Default 0. bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, crop_size=None, ratios=(0.9, 1.0, 1.1), border=128, mean=None, std=None, to_rgb=None, test_mode=False, test_pad_mode=('logical_or', 127), test_pad_add_pix=0, bbox_clip_border=True): if test_mode: assert crop_size is None, 'crop_size must be None in test mode' assert ratios is None, 'ratios must be None in test mode' assert border is None, 'border must be None in test mode' assert isinstance(test_pad_mode, (list, tuple)) assert test_pad_mode[0] in ['logical_or', 'size_divisor'] else: assert isinstance(crop_size, (list, tuple)) assert crop_size[0] > 0 and crop_size[1] > 0, ( 'crop_size must > 0 in train mode') assert isinstance(ratios, (list, tuple)) assert test_pad_mode is None, ( 'test_pad_mode must be None in train mode') self.crop_size = crop_size self.ratios = ratios self.border = border # We do not set default value to mean, std and to_rgb because these # hyper-parameters are easy to forget but could affect the performance. # Please use the same setting as Normalize for performance assurance. assert mean is not None and std is not None and to_rgb is not None self.to_rgb = to_rgb self.input_mean = mean self.input_std = std if to_rgb: self.mean = mean[::-1] self.std = std[::-1] else: self.mean = mean self.std = std self.test_mode = test_mode self.test_pad_mode = test_pad_mode self.test_pad_add_pix = test_pad_add_pix self.bbox_clip_border = bbox_clip_border def _get_border(self, border, size): """Get final border for the target size. This function generates a ``final_border`` according to image's shape. The area between ``final_border`` and ``size - final_border`` is the ``center range``. We randomly choose center from the ``center range`` to avoid our random center is too close to original image's border. Also ``center range`` should be larger than 0. Args: border (int): The initial border, default is 128. size (int): The width or height of original image. Returns: int: The final border. """ k = 2 * border / size i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k))) return border // i def _filter_boxes(self, patch, boxes): """Check whether the center of each box is in the patch. Args: patch (list[int]): The cropped area, [left, top, right, bottom]. boxes (numpy array, (N x 4)): Ground truth boxes. Returns: mask (numpy array, (N,)): Each box is inside or outside the patch. """ center = (boxes[:, :2] + boxes[:, 2:]) / 2 mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * ( center[:, 0] < patch[2]) * ( center[:, 1] < patch[3]) return mask def _crop_image_and_paste(self, image, center, size): """Crop image with a given center and size, then paste the cropped image to a blank image with two centers align. This function is equivalent to generating a blank image with ``size`` as its shape. Then cover it on the original image with two centers ( the center of blank image and the random center of original image) aligned. The overlap area is paste from the original image and the outside area is filled with ``mean pixel``. Args: image (np array, H x W x C): Original image. center (list[int]): Target crop center coord. size (list[int]): Target crop size. [target_h, target_w] Returns: cropped_img (np array, target_h x target_w x C): Cropped image. border (np array, 4): The distance of four border of ``cropped_img`` to the original image area, [top, bottom, left, right] patch (list[int]): The cropped area, [left, top, right, bottom]. """ center_y, center_x = center target_h, target_w = size img_h, img_w, img_c = image.shape x0 = max(0, center_x - target_w // 2) x1 = min(center_x + target_w // 2, img_w) y0 = max(0, center_y - target_h // 2) y1 = min(center_y + target_h // 2, img_h) patch = np.array((int(x0), int(y0), int(x1), int(y1))) left, right = center_x - x0, x1 - center_x top, bottom = center_y - y0, y1 - center_y cropped_center_y, cropped_center_x = target_h // 2, target_w // 2 cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype) for i in range(img_c): cropped_img[:, :, i] += self.mean[i] y_slice = slice(cropped_center_y - top, cropped_center_y + bottom) x_slice = slice(cropped_center_x - left, cropped_center_x + right) cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :] border = np.array([ cropped_center_y - top, cropped_center_y + bottom, cropped_center_x - left, cropped_center_x + right ], dtype=np.float32) return cropped_img, border, patch def _train_aug(self, results): """Random crop and around padding the original image. Args: results (dict): Image infomations in the augment pipeline. Returns: results (dict): The updated dict. """ img = results['img'] h, w, c = img.shape boxes = results['gt_bboxes'] while True: scale = random.choice(self.ratios) new_h = int(self.crop_size[0] * scale) new_w = int(self.crop_size[1] * scale) h_border = self._get_border(self.border, h) w_border = self._get_border(self.border, w) for i in range(50): center_x = random.randint(low=w_border, high=w - w_border) center_y = random.randint(low=h_border, high=h - h_border) cropped_img, border, patch = self._crop_image_and_paste( img, [center_y, center_x], [new_h, new_w]) mask = self._filter_boxes(patch, boxes) # if image do not have valid bbox, any crop patch is valid. if not mask.any() and len(boxes) > 0: continue results['img'] = cropped_img results['img_shape'] = cropped_img.shape results['pad_shape'] = cropped_img.shape x0, y0, x1, y1 = patch left_w, top_h = center_x - x0, center_y - y0 cropped_center_x, cropped_center_y = new_w // 2, new_h // 2 # crop bboxes accordingly and clip to the image boundary for key in results.get('bbox_fields', []): mask = self._filter_boxes(patch, results[key]) bboxes = results[key][mask] bboxes[:, 0:4:2] += cropped_center_x - left_w - x0 bboxes[:, 1:4:2] += cropped_center_y - top_h - y0 if self.bbox_clip_border: bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w) bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h) keep = (bboxes[:, 2] > bboxes[:, 0]) & ( bboxes[:, 3] > bboxes[:, 1]) bboxes = bboxes[keep] results[key] = bboxes if key in ['gt_bboxes']: if 'gt_labels' in results: labels = results['gt_labels'][mask] labels = labels[keep] results['gt_labels'] = labels if 'gt_masks' in results: raise NotImplementedError( 'RandomCenterCropPad only supports bbox.') # crop semantic seg for key in results.get('seg_fields', []): raise NotImplementedError( 'RandomCenterCropPad only supports bbox.') return results def _test_aug(self, results): """Around padding the original image without cropping. The padding mode and value are from ``test_pad_mode``. Args: results (dict): Image infomations in the augment pipeline. Returns: results (dict): The updated dict. """ img = results['img'] h, w, c = img.shape results['img_shape'] = img.shape if self.test_pad_mode[0] in ['logical_or']: # self.test_pad_add_pix is only used for centernet target_h = (h | self.test_pad_mode[1]) + self.test_pad_add_pix target_w = (w | self.test_pad_mode[1]) + self.test_pad_add_pix elif self.test_pad_mode[0] in ['size_divisor']: divisor = self.test_pad_mode[1] target_h = int(np.ceil(h / divisor)) * divisor target_w = int(np.ceil(w / divisor)) * divisor else: raise NotImplementedError( 'RandomCenterCropPad only support two testing pad mode:' 'logical-or and size_divisor.') cropped_img, border, _ = self._crop_image_and_paste( img, [h // 2, w // 2], [target_h, target_w]) results['img'] = cropped_img results['pad_shape'] = cropped_img.shape results['border'] = border return results def __call__(self, results): img = results['img'] assert img.dtype == np.float32, ( 'RandomCenterCropPad needs the input image of dtype np.float32,' ' please set "to_float32=True" in "LoadImageFromFile" pipeline') h, w, c = img.shape assert c == len(self.mean) if self.test_mode: return self._test_aug(results) else: return self._train_aug(results) def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(crop_size={self.crop_size}, ' repr_str += f'ratios={self.ratios}, ' repr_str += f'border={self.border}, ' repr_str += f'mean={self.input_mean}, ' repr_str += f'std={self.input_std}, ' repr_str += f'to_rgb={self.to_rgb}, ' repr_str += f'test_mode={self.test_mode}, ' repr_str += f'test_pad_mode={self.test_pad_mode}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @PIPELINES.register_module() class CutOut: """CutOut operation. Randomly drop some regions of image used in `Cutout <https://arxiv.org/abs/1708.04552>`_. Args: n_holes (int | tuple[int, int]): Number of regions to be dropped. If it is given as a list, number of holes will be randomly selected from the closed interval [`n_holes[0]`, `n_holes[1]`]. cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate shape of dropped regions. It can be `tuple[int, int]` to use a fixed cutout shape, or `list[tuple[int, int]]` to randomly choose shape from the list. cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The candidate ratio of dropped regions. It can be `tuple[float, float]` to use a fixed ratio or `list[tuple[float, float]]` to randomly choose ratio from the list. Please note that `cutout_shape` and `cutout_ratio` cannot be both given at the same time. fill_in (tuple[float, float, float] | tuple[int, int, int]): The value of pixel to fill in the dropped regions. Default: (0, 0, 0). """ def __init__(self, n_holes, cutout_shape=None, cutout_ratio=None, fill_in=(0, 0, 0)): assert (cutout_shape is None) ^ (cutout_ratio is None), \ 'Either cutout_shape or cutout_ratio should be specified.' assert (isinstance(cutout_shape, (list, tuple)) or isinstance(cutout_ratio, (list, tuple))) if isinstance(n_holes, tuple): assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1] else: n_holes = (n_holes, n_holes) self.n_holes = n_holes self.fill_in = fill_in self.with_ratio = cutout_ratio is not None self.candidates = cutout_ratio if self.with_ratio else cutout_shape if not isinstance(self.candidates, list): self.candidates = [self.candidates] def __call__(self, results): """Call function to drop some regions of image.""" h, w, c = results['img'].shape n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1) for _ in range(n_holes): x1 = np.random.randint(0, w) y1 = np.random.randint(0, h) index = np.random.randint(0, len(self.candidates)) if not self.with_ratio: cutout_w, cutout_h = self.candidates[index] else: cutout_w = int(self.candidates[index][0] * w) cutout_h = int(self.candidates[index][1] * h) x2 = np.clip(x1 + cutout_w, 0, w) y2 = np.clip(y1 + cutout_h, 0, h) results['img'][y1:y2, x1:x2, :] = self.fill_in return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(n_holes={self.n_holes}, ' repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio else f'cutout_shape={self.candidates}, ') repr_str += f'fill_in={self.fill_in})' return repr_str @PIPELINES.register_module() class Mosaic: """Mosaic augmentation. Given 4 images, mosaic transform combines them into one output image. The output image is composed of the parts from each sub- image. .. code:: text mosaic transform center_x +------------------------------+ | pad | pad | | +-----------+ | | | | | | | image1 |--------+ | | | | | | | | | image2 | | center_y |----+-------------+-----------| | | cropped | | |pad | image3 | image4 | | | | | +----|-------------+-----------+ | | +-------------+ The mosaic transform steps are as follows: 1. Choose the mosaic center as the intersections of 4 images 2. Get the left top image according to the index, and randomly sample another 3 images from the custom dataset. 3. Sub image will be cropped if image is larger than mosaic patch Args: img_scale (Sequence[int]): Image size after mosaic pipeline of single image. Default to (640, 640). center_ratio_range (Sequence[float]): Center ratio range of mosaic output. Default to (0.5, 1.5). min_bbox_size (int | float): The minimum pixel for filtering invalid bboxes after the mosaic pipeline. Default to 0. pad_val (int): Pad value. Default to 114. """ def __init__(self, img_scale=(640, 640), center_ratio_range=(0.5, 1.5), min_bbox_size=0, pad_val=114): assert isinstance(img_scale, tuple) self.img_scale = img_scale self.center_ratio_range = center_ratio_range self.min_bbox_size = min_bbox_size self.pad_val = pad_val def __call__(self, results): """Call function to make a mosaic of image. Args: results (dict): Result dict. Returns: dict: Result dict with mosaic transformed. """ results = self._mosaic_transform(results) return results def get_indexes(self, dataset): """Call function to collect indexes. Args: dataset (:obj:`MultiImageMixDataset`): The dataset. Returns: list: indexes. """ indexs = [random.randint(0, len(dataset)) for _ in range(3)] return indexs def _mosaic_transform(self, results): """Mosaic transform function. Args: results (dict): Result dict. Returns: dict: Updated result dict. """ assert 'mix_results' in results mosaic_labels = [] mosaic_bboxes = [] if len(results['img'].shape) == 3: mosaic_img = np.full( (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2), 3), self.pad_val, dtype=results['img'].dtype) else: mosaic_img = np.full( (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2)), self.pad_val, dtype=results['img'].dtype) # mosaic center x, y center_x = int( random.uniform(*self.center_ratio_range) * self.img_scale[1]) center_y = int( random.uniform(*self.center_ratio_range) * self.img_scale[0]) center_position = (center_x, center_y) loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right') for i, loc in enumerate(loc_strs): if loc == 'top_left': results_patch = copy.deepcopy(results) else: results_patch = copy.deepcopy(results['mix_results'][i - 1]) img_i = results_patch['img'] h_i, w_i = img_i.shape[:2] # keep_ratio resize scale_ratio_i = min(self.img_scale[0] / h_i, self.img_scale[1] / w_i) img_i = mmcv.imresize( img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i))) # compute the combine parameters paste_coord, crop_coord = self._mosaic_combine( loc, center_position, img_i.shape[:2][::-1]) x1_p, y1_p, x2_p, y2_p = paste_coord x1_c, y1_c, x2_c, y2_c = crop_coord # crop and paste image mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c] # adjust coordinate gt_bboxes_i = results_patch['gt_bboxes'] gt_labels_i = results_patch['gt_labels'] if gt_bboxes_i.shape[0] > 0: padw = x1_p - x1_c padh = y1_p - y1_c gt_bboxes_i[:, 0::2] = \ scale_ratio_i * gt_bboxes_i[:, 0::2] + padw gt_bboxes_i[:, 1::2] = \ scale_ratio_i * gt_bboxes_i[:, 1::2] + padh mosaic_bboxes.append(gt_bboxes_i) mosaic_labels.append(gt_labels_i) if len(mosaic_labels) > 0: mosaic_bboxes = np.concatenate(mosaic_bboxes, 0) mosaic_bboxes[:, 0::2] = np.clip(mosaic_bboxes[:, 0::2], 0, 2 * self.img_scale[1]) mosaic_bboxes[:, 1::2] = np.clip(mosaic_bboxes[:, 1::2], 0, 2 * self.img_scale[0]) mosaic_labels = np.concatenate(mosaic_labels, 0) mosaic_bboxes, mosaic_labels = \ self._filter_box_candidates(mosaic_bboxes, mosaic_labels) results['img'] = mosaic_img results['img_shape'] = mosaic_img.shape results['ori_shape'] = mosaic_img.shape results['gt_bboxes'] = mosaic_bboxes results['gt_labels'] = mosaic_labels return results def _mosaic_combine(self, loc, center_position_xy, img_shape_wh): """Calculate global coordinate of mosaic image and local coordinate of cropped sub-image. Args: loc (str): Index for the sub-image, loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right'). center_position_xy (Sequence[float]): Mixing center for 4 images, (x, y). img_shape_wh (Sequence[int]): Width and height of sub-image Returns: tuple[tuple[float]]: Corresponding coordinate of pasting and cropping - paste_coord (tuple): paste corner coordinate in mosaic image. - crop_coord (tuple): crop corner coordinate in mosaic image. """ assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right') if loc == 'top_left': # index0 to top left part of image x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ max(center_position_xy[1] - img_shape_wh[1], 0), \ center_position_xy[0], \ center_position_xy[1] crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - ( y2 - y1), img_shape_wh[0], img_shape_wh[1] elif loc == 'top_right': # index1 to top right part of image x1, y1, x2, y2 = center_position_xy[0], \ max(center_position_xy[1] - img_shape_wh[1], 0), \ min(center_position_xy[0] + img_shape_wh[0], self.img_scale[1] * 2), \ center_position_xy[1] crop_coord = 0, img_shape_wh[1] - (y2 - y1), min( img_shape_wh[0], x2 - x1), img_shape_wh[1] elif loc == 'bottom_left': # index2 to bottom left part of image x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ center_position_xy[1], \ center_position_xy[0], \ min(self.img_scale[0] * 2, center_position_xy[1] + img_shape_wh[1]) crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min( y2 - y1, img_shape_wh[1]) else: # index3 to bottom right part of image x1, y1, x2, y2 = center_position_xy[0], \ center_position_xy[1], \ min(center_position_xy[0] + img_shape_wh[0], self.img_scale[1] * 2), \ min(self.img_scale[0] * 2, center_position_xy[1] + img_shape_wh[1]) crop_coord = 0, 0, min(img_shape_wh[0], x2 - x1), min(y2 - y1, img_shape_wh[1]) paste_coord = x1, y1, x2, y2 return paste_coord, crop_coord def _filter_box_candidates(self, bboxes, labels): """Filter out bboxes too small after Mosaic.""" bbox_w = bboxes[:, 2] - bboxes[:, 0] bbox_h = bboxes[:, 3] - bboxes[:, 1] valid_inds = (bbox_w > self.min_bbox_size) & \ (bbox_h > self.min_bbox_size) valid_inds = np.nonzero(valid_inds)[0] return bboxes[valid_inds], labels[valid_inds] def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'img_scale={self.img_scale}, ' repr_str += f'center_ratio_range={self.center_ratio_range})' repr_str += f'pad_val={self.pad_val})' return repr_str @PIPELINES.register_module() class MixUp: """MixUp data augmentation. .. code:: text mixup transform +------------------------------+ | mixup image | | | +--------|--------+ | | | | | | |---------------+ | | | | | | | | image | | | | | | | | | | | |-----------------+ | | pad | +------------------------------+ The mixup transform steps are as follows:: 1. Another random image is picked by dataset and embedded in the top left patch(after padding and resizing) 2. The target of mixup transform is the weighted average of mixup image and origin image. Args: img_scale (Sequence[int]): Image output size after mixup pipeline. Default: (640, 640). ratio_range (Sequence[float]): Scale ratio of mixup image. Default: (0.5, 1.5). flip_ratio (float): Horizontal flip ratio of mixup image. Default: 0.5. pad_val (int): Pad value. Default: 114. max_iters (int): The maximum number of iterations. If the number of iterations is greater than `max_iters`, but gt_bbox is still empty, then the iteration is terminated. Default: 15. min_bbox_size (float): Width and height threshold to filter bboxes. If the height or width of a box is smaller than this value, it will be removed. Default: 5. min_area_ratio (float): Threshold of area ratio between original bboxes and wrapped bboxes. If smaller than this value, the box will be removed. Default: 0.2. max_aspect_ratio (float): Aspect ratio of width and height threshold to filter bboxes. If max(h/w, w/h) larger than this value, the box will be removed. Default: 20. """ def __init__(self, img_scale=(640, 640), ratio_range=(0.5, 1.5), flip_ratio=0.5, pad_val=114, max_iters=15, min_bbox_size=5, min_area_ratio=0.2, max_aspect_ratio=20): assert isinstance(img_scale, tuple) self.dynamic_scale = img_scale self.ratio_range = ratio_range self.flip_ratio = flip_ratio self.pad_val = pad_val self.max_iters = max_iters self.min_bbox_size = min_bbox_size self.min_area_ratio = min_area_ratio self.max_aspect_ratio = max_aspect_ratio def __call__(self, results): """Call function to make a mixup of image. Args: results (dict): Result dict. Returns: dict: Result dict with mixup transformed. """ results = self._mixup_transform(results) return results def get_indexes(self, dataset): """Call function to collect indexes. Args: dataset (:obj:`MultiImageMixDataset`): The dataset. Returns: list: indexes. """ for i in range(self.max_iters): index = random.randint(0, len(dataset)) gt_bboxes_i = dataset.get_ann_info(index)['bboxes'] if len(gt_bboxes_i) != 0: break return index def _mixup_transform(self, results): """MixUp transform function. Args: results (dict): Result dict. Returns: dict: Updated result dict. """ assert 'mix_results' in results assert len( results['mix_results']) == 1, 'MixUp only support 2 images now !' if results['mix_results'][0]['gt_bboxes'].shape[0] == 0: # empty bbox return results if 'scale' in results: self.dynamic_scale = results['scale'] retrieve_results = results['mix_results'][0] retrieve_img = retrieve_results['img'] jit_factor = random.uniform(*self.ratio_range) is_filp = random.uniform(0, 1) > self.flip_ratio if len(retrieve_img.shape) == 3: out_img = np.ones( (self.dynamic_scale[0], self.dynamic_scale[1], 3), dtype=retrieve_img.dtype) * self.pad_val else: out_img = np.ones( self.dynamic_scale, dtype=retrieve_img.dtype) * self.pad_val # 1. keep_ratio resize scale_ratio = min(self.dynamic_scale[0] / retrieve_img.shape[0], self.dynamic_scale[1] / retrieve_img.shape[1]) retrieve_img = mmcv.imresize( retrieve_img, (int(retrieve_img.shape[1] * scale_ratio), int(retrieve_img.shape[0] * scale_ratio))) # 2. paste out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img # 3. scale jit scale_ratio *= jit_factor out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor), int(out_img.shape[0] * jit_factor))) # 4. flip if is_filp: out_img = out_img[:, ::-1, :] # 5. random crop ori_img = results['img'] origin_h, origin_w = out_img.shape[:2] target_h, target_w = ori_img.shape[:2] padded_img = np.zeros( (max(origin_h, target_h), max(origin_w, target_w), 3)).astype(np.uint8) padded_img[:origin_h, :origin_w] = out_img x_offset, y_offset = 0, 0 if padded_img.shape[0] > target_h: y_offset = random.randint(0, padded_img.shape[0] - target_h) if padded_img.shape[1] > target_w: x_offset = random.randint(0, padded_img.shape[1] - target_w) padded_cropped_img = padded_img[y_offset:y_offset + target_h, x_offset:x_offset + target_w] # 6. adjust bbox retrieve_gt_bboxes = retrieve_results['gt_bboxes'] retrieve_gt_bboxes[:, 0::2] = np.clip( retrieve_gt_bboxes[:, 0::2] * scale_ratio, 0, origin_w) retrieve_gt_bboxes[:, 1::2] = np.clip( retrieve_gt_bboxes[:, 1::2] * scale_ratio, 0, origin_h) if is_filp: retrieve_gt_bboxes[:, 0::2] = ( origin_w - retrieve_gt_bboxes[:, 0::2][:, ::-1]) # 7. filter cp_retrieve_gt_bboxes = retrieve_gt_bboxes.copy() cp_retrieve_gt_bboxes[:, 0::2] = np.clip( cp_retrieve_gt_bboxes[:, 0::2] - x_offset, 0, target_w) cp_retrieve_gt_bboxes[:, 1::2] = np.clip( cp_retrieve_gt_bboxes[:, 1::2] - y_offset, 0, target_h) keep_list = self._filter_box_candidates(retrieve_gt_bboxes.T, cp_retrieve_gt_bboxes.T) # 8. mix up if keep_list.sum() >= 1.0: ori_img = ori_img.astype(np.float32) mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype( np.float32) retrieve_gt_labels = retrieve_results['gt_labels'][keep_list] retrieve_gt_bboxes = cp_retrieve_gt_bboxes[keep_list] mixup_gt_bboxes = np.concatenate( (results['gt_bboxes'], retrieve_gt_bboxes), axis=0) mixup_gt_labels = np.concatenate( (results['gt_labels'], retrieve_gt_labels), axis=0) results['img'] = mixup_img results['img_shape'] = mixup_img.shape results['gt_bboxes'] = mixup_gt_bboxes results['gt_labels'] = mixup_gt_labels return results def _filter_box_candidates(self, bbox1, bbox2): """Compute candidate boxes which include following 5 things: bbox1 before augment, bbox2 after augment, min_bbox_size (pixels), min_area_ratio, max_aspect_ratio. """ w1, h1 = bbox1[2] - bbox1[0], bbox1[3] - bbox1[1] w2, h2 = bbox2[2] - bbox2[0], bbox2[3] - bbox2[1] ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) return ((w2 > self.min_bbox_size) & (h2 > self.min_bbox_size) & (w2 * h2 / (w1 * h1 + 1e-16) > self.min_area_ratio) & (ar < self.max_aspect_ratio)) def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'dynamic_scale={self.dynamic_scale}, ' repr_str += f'ratio_range={self.ratio_range})' repr_str += f'flip_ratio={self.flip_ratio})' repr_str += f'pad_val={self.pad_val})' repr_str += f'max_iters={self.max_iters})' repr_str += f'min_bbox_size={self.min_bbox_size})' repr_str += f'min_area_ratio={self.min_area_ratio})' repr_str += f'max_aspect_ratio={self.max_aspect_ratio})' return repr_str @PIPELINES.register_module() class RandomAffine: """Random affine transform data augmentation. This operation randomly generates affine transform matrix which including rotation, translation, shear and scaling transforms. Args: max_rotate_degree (float): Maximum degrees of rotation transform. Default: 10. max_translate_ratio (float): Maximum ratio of translation. Default: 0.1. scaling_ratio_range (tuple[float]): Min and max ratio of scaling transform. Default: (0.5, 1.5). max_shear_degree (float): Maximum degrees of shear transform. Default: 2. border (tuple[int]): Distance from height and width sides of input image to adjust output shape. Only used in mosaic dataset. Default: (0, 0). border_val (tuple[int]): Border padding values of 3 channels. Default: (114, 114, 114). min_bbox_size (float): Width and height threshold to filter bboxes. If the height or width of a box is smaller than this value, it will be removed. Default: 2. min_area_ratio (float): Threshold of area ratio between original bboxes and wrapped bboxes. If smaller than this value, the box will be removed. Default: 0.2. max_aspect_ratio (float): Aspect ratio of width and height threshold to filter bboxes. If max(h/w, w/h) larger than this value, the box will be removed. """ def __init__(self, max_rotate_degree=10.0, max_translate_ratio=0.1, scaling_ratio_range=(0.5, 1.5), max_shear_degree=2.0, border=(0, 0), border_val=(114, 114, 114), min_bbox_size=2, min_area_ratio=0.2, max_aspect_ratio=20): assert 0 <= max_translate_ratio <= 1 assert scaling_ratio_range[0] <= scaling_ratio_range[1] assert scaling_ratio_range[0] > 0 self.max_rotate_degree = max_rotate_degree self.max_translate_ratio = max_translate_ratio self.scaling_ratio_range = scaling_ratio_range self.max_shear_degree = max_shear_degree self.border = border self.border_val = border_val self.min_bbox_size = min_bbox_size self.min_area_ratio = min_area_ratio self.max_aspect_ratio = max_aspect_ratio def __call__(self, results): img = results['img'] height = img.shape[0] + self.border[0] * 2 width = img.shape[1] + self.border[1] * 2 # Center center_matrix = np.eye(3, dtype=np.float32) center_matrix[0, 2] = -img.shape[1] / 2 # x translation (pixels) center_matrix[1, 2] = -img.shape[0] / 2 # y translation (pixels) # Rotation rotation_degree = random.uniform(-self.max_rotate_degree, self.max_rotate_degree) rotation_matrix = self._get_rotation_matrix(rotation_degree) # Scaling scaling_ratio = random.uniform(self.scaling_ratio_range[0], self.scaling_ratio_range[1]) scaling_matrix = self._get_scaling_matrix(scaling_ratio) # Shear x_degree = random.uniform(-self.max_shear_degree, self.max_shear_degree) y_degree = random.uniform(-self.max_shear_degree, self.max_shear_degree) shear_matrix = self._get_shear_matrix(x_degree, y_degree) # Translation trans_x = random.uniform(0.5 - self.max_translate_ratio, 0.5 + self.max_translate_ratio) * width trans_y = random.uniform(0.5 - self.max_translate_ratio, 0.5 + self.max_translate_ratio) * height translate_matrix = self._get_translation_matrix(trans_x, trans_y) warp_matrix = ( translate_matrix @ shear_matrix @ rotation_matrix @ scaling_matrix @ center_matrix) img = cv2.warpPerspective( img, warp_matrix, dsize=(width, height), borderValue=self.border_val) results['img'] = img results['img_shape'] = img.shape for key in results.get('bbox_fields', []): bboxes = results[key] num_bboxes = len(bboxes) if num_bboxes: # homogeneous coordinates xs = bboxes[:, [0, 2, 2, 0]].reshape(num_bboxes * 4) ys = bboxes[:, [1, 3, 3, 1]].reshape(num_bboxes * 4) ones = np.ones_like(xs) points = np.vstack([xs, ys, ones]) warp_points = warp_matrix @ points warp_points = warp_points[:2] / warp_points[2] xs = warp_points[0].reshape(num_bboxes, 4) ys = warp_points[1].reshape(num_bboxes, 4) warp_bboxes = np.vstack( (xs.min(1), ys.min(1), xs.max(1), ys.max(1))).T warp_bboxes[:, [0, 2]] = warp_bboxes[:, [0, 2]].clip(0, width) warp_bboxes[:, [1, 3]] = warp_bboxes[:, [1, 3]].clip(0, height) # filter bboxes valid_index = self.filter_gt_bboxes(bboxes * scaling_ratio, warp_bboxes) results[key] = warp_bboxes[valid_index] if key in ['gt_bboxes']: if 'gt_labels' in results: results['gt_labels'] = results['gt_labels'][ valid_index] if 'gt_masks' in results: raise NotImplementedError( 'RandomAffine only supports bbox.') return results def filter_gt_bboxes(self, origin_bboxes, wrapped_bboxes): origin_w = origin_bboxes[:, 2] - origin_bboxes[:, 0] origin_h = origin_bboxes[:, 3] - origin_bboxes[:, 1] wrapped_w = wrapped_bboxes[:, 2] - wrapped_bboxes[:, 0] wrapped_h = wrapped_bboxes[:, 3] - wrapped_bboxes[:, 1] aspect_ratio = np.maximum(wrapped_w / (wrapped_h + 1e-16), wrapped_h / (wrapped_w + 1e-16)) wh_valid_idx = (wrapped_w > self.min_bbox_size) & \ (wrapped_h > self.min_bbox_size) area_valid_idx = wrapped_w * wrapped_h / (origin_w * origin_h + 1e-16) > self.min_area_ratio aspect_ratio_valid_idx = aspect_ratio < self.max_aspect_ratio return wh_valid_idx & area_valid_idx & aspect_ratio_valid_idx def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(max_rotate_degree={self.max_rotate_degree}, ' repr_str += f'max_translate_ratio={self.max_translate_ratio}, ' repr_str += f'scaling_ratio={self.scaling_ratio_range}, ' repr_str += f'max_shear_degree={self.max_shear_degree}, ' repr_str += f'border={self.border}, ' repr_str += f'border_val={self.border_val}, ' repr_str += f'min_bbox_size={self.min_bbox_size}, ' repr_str += f'min_area_ratio={self.min_area_ratio}, ' repr_str += f'max_aspect_ratio={self.max_aspect_ratio})' return repr_str @staticmethod def _get_rotation_matrix(rotate_degrees): radian = math.radians(rotate_degrees) rotation_matrix = np.array( [[np.cos(radian), -np.sin(radian), 0.], [np.sin(radian), np.cos(radian), 0.], [0., 0., 1.]], dtype=np.float32) return rotation_matrix @staticmethod def _get_scaling_matrix(scale_ratio): scaling_matrix = np.array( [[scale_ratio, 0., 0.], [0., scale_ratio, 0.], [0., 0., 1.]], dtype=np.float32) return scaling_matrix @staticmethod def _get_share_matrix(scale_ratio): scaling_matrix = np.array( [[scale_ratio, 0., 0.], [0., scale_ratio, 0.], [0., 0., 1.]], dtype=np.float32) return scaling_matrix @staticmethod def _get_shear_matrix(x_shear_degrees, y_shear_degrees): x_radian = math.radians(x_shear_degrees) y_radian = math.radians(y_shear_degrees) shear_matrix = np.array([[1, np.tan(x_radian), 0.], [np.tan(y_radian), 1, 0.], [0., 0., 1.]], dtype=np.float32) return shear_matrix @staticmethod def _get_translation_matrix(x, y): translation_matrix = np.array([[1, 0., x], [0., 1, y], [0., 0., 1.]], dtype=np.float32) return translation_matrix
103,716
38.632021
86
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/datasets/pipelines/test_time_aug.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import mmcv from ..builder import PIPELINES from .compose import Compose @PIPELINES.register_module() class MultiScaleFlipAug: """Test-time augmentation with multiple scales and flipping. An example configuration is as followed: .. code-block:: img_scale=[(1333, 400), (1333, 800)], flip=True, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ] After MultiScaleFLipAug with above configuration, the results are wrapped into lists of the same length as followed: .. code-block:: dict( img=[...], img_shape=[...], scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)] flip=[False, True, False, True] ... ) Args: transforms (list[dict]): Transforms to apply in each augmentation. img_scale (tuple | list[tuple] | None): Images scales for resizing. scale_factor (float | list[float] | None): Scale factors for resizing. flip (bool): Whether apply flip augmentation. Default: False. flip_direction (str | list[str]): Flip augmentation directions, options are "horizontal", "vertical" and "diagonal". If flip_direction is a list, multiple flip augmentations will be applied. It has no effect when flip == False. Default: "horizontal". """ def __init__(self, transforms, img_scale=None, scale_factor=None, flip=False, flip_direction='horizontal'): self.transforms = Compose(transforms) assert (img_scale is None) ^ (scale_factor is None), ( 'Must have but only one variable can be setted') if img_scale is not None: self.img_scale = img_scale if isinstance(img_scale, list) else [img_scale] self.scale_key = 'scale' assert mmcv.is_list_of(self.img_scale, tuple) else: self.img_scale = scale_factor if isinstance( scale_factor, list) else [scale_factor] self.scale_key = 'scale_factor' self.flip = flip self.flip_direction = flip_direction if isinstance( flip_direction, list) else [flip_direction] assert mmcv.is_list_of(self.flip_direction, str) if not self.flip and self.flip_direction != ['horizontal']: warnings.warn( 'flip_direction has no effect when flip is set to False') if (self.flip and not any([t['type'] == 'RandomFlip' for t in transforms])): warnings.warn( 'flip has no effect when RandomFlip is not in transforms') def __call__(self, results): """Call function to apply test time augment transforms on results. Args: results (dict): Result dict contains the data to transform. Returns: dict[str: list]: The augmented data, where each value is wrapped into a list. """ aug_data = [] flip_args = [(False, None)] if self.flip: flip_args += [(True, direction) for direction in self.flip_direction] for scale in self.img_scale: for flip, direction in flip_args: _results = results.copy() _results[self.scale_key] = scale _results['flip'] = flip _results['flip_direction'] = direction data = self.transforms(_results) aug_data.append(data) # list of dict to dict of list aug_data_dict = {key: [] for key in aug_data[0]} for data in aug_data: for key, val in data.items(): aug_data_dict[key].append(val) return aug_data_dict def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(transforms={self.transforms}, ' repr_str += f'img_scale={self.img_scale}, flip={self.flip}, ' repr_str += f'flip_direction={self.flip_direction})' return repr_str
4,469
35.639344
78
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/utils/contextmanagers.py
# Copyright (c) OpenMMLab. All rights reserved. import asyncio import contextlib import logging import os import time from typing import List import torch logger = logging.getLogger(__name__) DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False)) @contextlib.asynccontextmanager async def completed(trace_name='', name='', sleep_interval=0.05, streams: List[torch.cuda.Stream] = None): """Async context manager that waits for work to complete on given CUDA streams.""" if not torch.cuda.is_available(): yield return stream_before_context_switch = torch.cuda.current_stream() if not streams: streams = [stream_before_context_switch] else: streams = [s if s else stream_before_context_switch for s in streams] end_events = [ torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams ] if DEBUG_COMPLETED_TIME: start = torch.cuda.Event(enable_timing=True) stream_before_context_switch.record_event(start) cpu_start = time.monotonic() logger.debug('%s %s starting, streams: %s', trace_name, name, streams) grad_enabled_before = torch.is_grad_enabled() try: yield finally: current_stream = torch.cuda.current_stream() assert current_stream == stream_before_context_switch if DEBUG_COMPLETED_TIME: cpu_end = time.monotonic() for i, stream in enumerate(streams): event = end_events[i] stream.record_event(event) grad_enabled_after = torch.is_grad_enabled() # observed change of torch.is_grad_enabled() during concurrent run of # async_test_bboxes code assert (grad_enabled_before == grad_enabled_after ), 'Unexpected is_grad_enabled() value change' are_done = [e.query() for e in end_events] logger.debug('%s %s completed: %s streams: %s', trace_name, name, are_done, streams) with torch.cuda.stream(stream_before_context_switch): while not all(are_done): await asyncio.sleep(sleep_interval) are_done = [e.query() for e in end_events] logger.debug( '%s %s completed: %s streams: %s', trace_name, name, are_done, streams, ) current_stream = torch.cuda.current_stream() assert current_stream == stream_before_context_switch if DEBUG_COMPLETED_TIME: cpu_time = (cpu_end - cpu_start) * 1000 stream_times_ms = '' for i, stream in enumerate(streams): elapsed_time = start.elapsed_time(end_events[i]) stream_times_ms += f' {stream} {elapsed_time:.2f} ms' logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time, stream_times_ms) @contextlib.asynccontextmanager async def concurrent(streamqueue: asyncio.Queue, trace_name='concurrent', name='stream'): """Run code concurrently in different streams. :param streamqueue: asyncio.Queue instance. Queue tasks define the pool of streams used for concurrent execution. """ if not torch.cuda.is_available(): yield return initial_stream = torch.cuda.current_stream() with torch.cuda.stream(initial_stream): stream = await streamqueue.get() assert isinstance(stream, torch.cuda.Stream) try: with torch.cuda.stream(stream): logger.debug('%s %s is starting, stream: %s', trace_name, name, stream) yield current = torch.cuda.current_stream() assert current == stream logger.debug('%s %s has finished, stream: %s', trace_name, name, stream) finally: streamqueue.task_done() streamqueue.put_nowait(stream)
4,125
32.544715
79
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/utils/util_mixins.py
# Copyright (c) OpenMMLab. All rights reserved. """This module defines the :class:`NiceRepr` mixin class, which defines a ``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__`` method, which you must define. This means you only have to overload one function instead of two. Furthermore, if the object defines a ``__len__`` method, then the ``__nice__`` method defaults to something sensible, otherwise it is treated as abstract and raises ``NotImplementedError``. To use simply have your object inherit from :class:`NiceRepr` (multi-inheritance should be ok). This code was copied from the ubelt library: https://github.com/Erotemic/ubelt Example: >>> # Objects that define __nice__ have a default __str__ and __repr__ >>> class Student(NiceRepr): ... def __init__(self, name): ... self.name = name ... def __nice__(self): ... return self.name >>> s1 = Student('Alice') >>> s2 = Student('Bob') >>> print(f's1 = {s1}') >>> print(f's2 = {s2}') s1 = <Student(Alice)> s2 = <Student(Bob)> Example: >>> # Objects that define __len__ have a default __nice__ >>> class Group(NiceRepr): ... def __init__(self, data): ... self.data = data ... def __len__(self): ... return len(self.data) >>> g = Group([1, 2, 3]) >>> print(f'g = {g}') g = <Group(3)> """ import warnings class NiceRepr: """Inherit from this class and define ``__nice__`` to "nicely" print your objects. Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``. If the inheriting class has a ``__len__``, method then the default ``__nice__`` method will return its length. Example: >>> class Foo(NiceRepr): ... def __nice__(self): ... return 'info' >>> foo = Foo() >>> assert str(foo) == '<Foo(info)>' >>> assert repr(foo).startswith('<Foo(info) at ') Example: >>> class Bar(NiceRepr): ... pass >>> bar = Bar() >>> import pytest >>> with pytest.warns(None) as record: >>> assert 'object at' in str(bar) >>> assert 'object at' in repr(bar) Example: >>> class Baz(NiceRepr): ... def __len__(self): ... return 5 >>> baz = Baz() >>> assert str(baz) == '<Baz(5)>' """ def __nice__(self): """str: a "nice" summary string describing this module""" if hasattr(self, '__len__'): # It is a common pattern for objects to use __len__ in __nice__ # As a convenience we define a default __nice__ for these objects return str(len(self)) else: # In all other cases force the subclass to overload __nice__ raise NotImplementedError( f'Define the __nice__ method for {self.__class__!r}') def __repr__(self): """str: the string of the module""" try: nice = self.__nice__() classname = self.__class__.__name__ return f'<{classname}({nice}) at {hex(id(self))}>' except NotImplementedError as ex: warnings.warn(str(ex), category=RuntimeWarning) return object.__repr__(self) def __str__(self): """str: the string of the module""" try: classname = self.__class__.__name__ nice = self.__nice__() return f'<{classname}({nice})>' except NotImplementedError as ex: warnings.warn(str(ex), category=RuntimeWarning) return object.__repr__(self)
3,712
34.028302
78
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/utils/profiling.py
# Copyright (c) OpenMMLab. All rights reserved. import contextlib import sys import time import torch if sys.version_info >= (3, 7): @contextlib.contextmanager def profile_time(trace_name, name, enabled=True, stream=None, end_stream=None): """Print time spent by CPU and GPU. Useful as a temporary context manager to find sweet spots of code suitable for async implementation. """ if (not enabled) or not torch.cuda.is_available(): yield return stream = stream if stream else torch.cuda.current_stream() end_stream = end_stream if end_stream else stream start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) stream.record_event(start) try: cpu_start = time.monotonic() yield finally: cpu_end = time.monotonic() end_stream.record_event(end) end.synchronize() cpu_time = (cpu_end - cpu_start) * 1000 gpu_time = start.elapsed_time(end) msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms ' msg += f'gpu_time {gpu_time:.2f} ms stream {stream}' print(msg, end_stream)
1,336
31.609756
73
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/utils/util_random.py
# Copyright (c) OpenMMLab. All rights reserved. """Helpers for random number generators.""" import numpy as np def ensure_rng(rng=None): """Coerces input into a random number generator. If the input is None, then a global random state is returned. If the input is a numeric value, then that is used as a seed to construct a random state. Otherwise the input is returned as-is. Adapted from [1]_. Args: rng (int | numpy.random.RandomState | None): if None, then defaults to the global rng. Otherwise this can be an integer or a RandomState class Returns: (numpy.random.RandomState) : rng - a numpy random number generator References: .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501 """ if rng is None: rng = np.random.mtrand._rand elif isinstance(rng, int): rng = np.random.RandomState(rng) else: rng = rng return rng
1,025
28.314286
119
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/utils/logger.py
# Copyright (c) OpenMMLab. All rights reserved. import logging from mmcv.utils import get_logger def get_root_logger(log_file=None, log_level=logging.INFO): """Get root logger. Args: log_file (str, optional): File path of log. Defaults to None. log_level (int, optional): The level of logger. Defaults to logging.INFO. Returns: :obj:`logging.Logger`: The obtained logger """ logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level) return logger
529
24.238095
77
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/utils/collect_env.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import collect_env as collect_base_env from mmcv.utils import get_git_hash import mmdet def collect_env(): """Collect the information of the running environments.""" env_info = collect_base_env() env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7] return env_info if __name__ == '__main__': for name, val in collect_env().items(): print(f'{name}: {val}')
471
25.222222
74
py
PseCo
PseCo-master/thirdparty/mmdetection/mmdet/utils/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .collect_env import collect_env from .logger import get_root_logger __all__ = ['get_root_logger', 'collect_env']
167
27
47
py
PseCo
PseCo-master/scripts/check_ceph.py
from petrel_client.client import Client import os import ipdb petrel_conf = "~/petreloss.conf" client = Client(petrel_conf) filename = "1984:s3://openmmlab/datasets/detection/coco/val2017/000000252219.jpg" bytes = client.Get(filename)
236
28.625
81
py
PseCo
PseCo-master/scripts/convert_json.py
import json import ipdb val_anno = "../data/annotations/instances_val2017.json" new_anno = "../data/annotations/val_mini.json" new_anno_dict = {} with open(val_anno, "r") as f: annos = json.load(f) new_anno_dict["info"] = annos["info"] new_anno_dict["licenses"] = annos["licenses"] new_anno_dict["images"] = annos["images"][:100] new_anno_dict["annotations"] = annos["annotations"] new_anno_dict["categories"] = annos["categories"] with open(new_anno, "w") as g: json.dump(new_anno_dict, g) g.close()
539
26
55
py
PseCo
PseCo-master/demo/image_demo.py
# Copyright (c) OpenMMLab. All rights reserved. # Modified from thirdparty/mmdetection/demo/image_demo.py import asyncio import glob import os from argparse import ArgumentParser from mmcv import Config from mmdet.apis import async_inference_detector, inference_detector, show_result_pyplot from ssod.apis.inference import init_detector, save_result from ssod.utils import patch_config def parse_args(): parser = ArgumentParser() parser.add_argument("img", help="Image file") parser.add_argument("config", help="Config file") parser.add_argument("checkpoint", help="Checkpoint file") parser.add_argument("--device", default="cuda:0", help="Device used for inference") parser.add_argument( "--score-thr", type=float, default=0.3, help="bbox score threshold" ) parser.add_argument( "--async-test", action="store_true", help="whether to set async options for async inference.", ) parser.add_argument( "--output", type=str, default=None, help="specify the directory to save visualization results.", ) args = parser.parse_args() return args def main(args): cfg = Config.fromfile(args.config) # Not affect anything, just avoid index error cfg.work_dir = "./work_dirs" cfg = patch_config(cfg) # build the model from a config file and a checkpoint file model = init_detector(cfg, args.checkpoint, device=args.device) imgs = glob.glob(args.img) for img in imgs: # test a single image result = inference_detector(model, img) # show the results if args.output is None: show_result_pyplot(model, img, result, score_thr=args.score_thr) else: out_file_path = os.path.join(args.output, os.path.basename(img)) print(f"Save results to {out_file_path}") save_result( model, img, result, score_thr=args.score_thr, out_file=out_file_path ) async def async_main(args): cfg = Config.fromfile(args.config) # Not affect anything, just avoid index error cfg.work_dir = "./work_dirs" cfg = patch_config(cfg) # build the model from a config file and a checkpoint file model = init_detector(cfg, args.checkpoint, device=args.device) # test a single image args.img = glob.glob(args.img) tasks = asyncio.create_task(async_inference_detector(model, args.img)) result = await asyncio.gather(tasks) # show the results for img, pred in zip(args.img, result): if args.output is None: show_result_pyplot(model, img, pred, score_thr=args.score_thr) else: out_file_path = os.path.join(args.output, os.path.basename(img)) print(f"Save results to {out_file_path}") save_result( model, img, pred, score_thr=args.score_thr, out_file=out_file_path ) if __name__ == "__main__": args = parse_args() if args.async_test: asyncio.run(async_main(args)) else: main(args)
3,063
33.044444
87
py
PseCo
PseCo-master/configs/supervised_baseline/base.py
mmdet_base = "../../thirdparty/mmdetection/configs/_base_" _base_ = [ f"{mmdet_base}/models/faster_rcnn_r50_fpn.py", f"{mmdet_base}/datasets/coco_detection.py", f"{mmdet_base}/schedules/schedule_1x.py", f"{mmdet_base}/default_runtime.py", ] model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style="caffe", init_cfg=dict( type="Pretrained", checkpoint="open-mmlab://detectron2/resnet50_caffe" ), ) ) img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type="LoadImageFromFile"), dict(type="LoadAnnotations", with_bbox=True), dict( type="Sequential", transforms=[ dict( type="RandResize", img_scale=[(1333, 400), (1333, 1200)], multiscale_mode="range", keep_ratio=True, ), dict(type="RandFlip", flip_ratio=0.5), dict( type="OneOf", transforms=[ dict(type=k) for k in [ "Identity", "AutoContrast", "RandEqualize", "RandSolarize", "RandColor", "RandContrast", "RandBrightness", "RandSharpness", "RandPosterize", ] ], ), ], ), dict(type="Pad", size_divisor=32), dict(type="Normalize", **img_norm_cfg), dict(type="ExtraAttrs", tag="sup"), dict(type="DefaultFormatBundle"), dict( type="Collect", keys=["img", "gt_bboxes", "gt_labels"], meta_keys=( "filename", "ori_shape", "img_shape", "img_norm_cfg", "pad_shape", "scale_factor", "tag", ), ), ] test_pipeline = [ dict(type="LoadImageFromFile"), dict( type="MultiScaleFlipAug", img_scale=(1333, 800), flip=False, transforms=[ dict(type="Resize", keep_ratio=True), dict(type="RandomFlip"), dict(type="Normalize", **img_norm_cfg), dict(type="Pad", size_divisor=32), dict(type="ImageToTensor", keys=["img"]), dict(type="Collect", keys=["img"]), ], ), ] data = dict( samples_per_gpu=1, workers_per_gpu=1, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline), ) custom_hooks = [ dict(type="WeightSummary"), ] optimizer = dict(type="SGD", lr=0.01, momentum=0.9, weight_decay=0.0001) lr_config = dict(step=[120000, 160000]) runner = dict(_delete_=True, type="IterBasedRunner", max_iters=180000) checkpoint_config = dict(by_epoch=False, interval=10000, max_keep_ckpts=5, create_symlink=False) evaluation = dict(interval=10000) # fp16 = dict(loss_scale="dynamic") log_config = dict( interval=50, hooks=[ dict(type="TextLoggerHook", by_epoch=False), ], )
3,220
26.767241
96
py
PseCo
PseCo-master/configs/supervised_baseline/faster_rcnn_r50_caffe_fpn_coco_full_180k_p3p7.py
_base_ = "base.py" data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( ann_file="data/annotations/instances_train2017.json", img_prefix="data/train2017/", ), ) model = dict( neck=dict( start_level=1, add_extra_convs='on_input' ), rpn_head=dict( anchor_generator=dict( type='AnchorGenerator', scales=[4], ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), ), roi_head=dict( bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[8, 16, 32, 64]), ), ) optimizer = dict(lr=0.02) lr_config = dict(step=[120000, 160000]) runner = dict(_delete_=True, type="IterBasedRunner", max_iters=180000)
888
23.027027
77
py
PseCo
PseCo-master/configs/supervised_baseline/faster_rcnn_r50_caffe_fpn_coco_partial_180k.py
_base_ = "base.py" fold = 1 percent = 1 data = dict( samples_per_gpu=1, workers_per_gpu=1, train=dict( ann_file="../data/annotations/semi_supervised/instances_train2017.${fold}@${percent}.json", img_prefix="../data/train2017/", ), ) log_config = dict( interval=50, hooks=[ dict(type="TextLoggerHook"), ], )
360
18
99
py
PseCo
PseCo-master/configs/supervised_baseline/faster_rcnn_r50_caffe_fpn_coco_full_720k.py
_base_ = "base.py" data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( ann_file="data/annotations/instances_train2017.json", img_prefix="data/train2017/", ), ) optimizer = dict(lr=0.02) lr_config = dict(step=[120000 * 4, 160000 * 4]) runner = dict(_delete_=True, type="IterBasedRunner", max_iters=180000 * 4)
354
22.666667
74
py
PseCo
PseCo-master/configs/PseCo/base.py
mmdet_base = "../../thirdparty/mmdetection/configs/_base_" _base_ = [ f"{mmdet_base}/models/faster_rcnn_r50_fpn.py", f"{mmdet_base}/datasets/coco_detection.py", f"{mmdet_base}/schedules/schedule_1x.py", f"{mmdet_base}/default_runtime.py", ] model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style="caffe", init_cfg=dict( type="Pretrained", checkpoint="open-mmlab://detectron2/resnet50_caffe" ), ) ) img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type="LoadImageFromFile", file_client_args=dict(backend="${backend}")), dict(type="LoadAnnotations", with_bbox=True), dict( type="Sequential", transforms=[ dict( type="RandResize", img_scale=[(1333, 400), (1333, 1200)], multiscale_mode="range", keep_ratio=True, ), dict(type="RandFlip", flip_ratio=0.5), dict( type="OneOf", transforms=[ dict(type=k) for k in [ "Identity", "AutoContrast", "RandEqualize", "RandSolarize", "RandColor", "RandContrast", "RandBrightness", "RandSharpness", "RandPosterize", ] ], ), ], record=True, ), dict(type="Pad", size_divisor=32), dict(type="Normalize", **img_norm_cfg), dict(type="ExtraAttrs", tag="sup"), dict(type="DefaultFormatBundle"), dict( type="Collect", keys=["img", "gt_bboxes", "gt_labels"], meta_keys=( "filename", "ori_shape", "img_shape", "img_norm_cfg", "pad_shape", "scale_factor", "tag", ), ), ] strong_pipeline = [ dict( type="Sequential", transforms=[ dict( type="RandResize", img_scale=[(1333, 400), (1333, 1200)], multiscale_mode="range", keep_ratio=True, ), dict(type="RandFlip", flip_ratio=0.5), dict( type="ShuffledSequential", transforms=[ dict( type="OneOf", transforms=[ dict(type=k) for k in [ "Identity", "AutoContrast", "RandEqualize", "RandSolarize", "RandColor", "RandContrast", "RandBrightness", "RandSharpness", "RandPosterize", ] ], ), dict( type="OneOf", transforms=[ dict(type="RandTranslate", x=(-0.1, 0.1)), dict(type="RandTranslate", y=(-0.1, 0.1)), dict(type="RandRotate", angle=(-30, 30)), [ dict(type="RandShear", x=(-30, 30)), dict(type="RandShear", y=(-30, 30)), ], ], ), ], ), dict( type="RandErase", n_iterations=(1, 5), size=[0, 0.2], squared=True, ), ], record=True, ), dict(type="Pad", size_divisor=32), dict(type="Normalize", **img_norm_cfg), dict(type="ExtraAttrs", tag="unsup_student"), dict(type="DefaultFormatBundle"), dict( type="Collect", keys=["img", "gt_bboxes", "gt_labels"], meta_keys=( "filename", "ori_shape", "img_shape", "img_norm_cfg", "pad_shape", "scale_factor", "tag", "transform_matrix", ), ), ] weak_pipeline = [ dict( type="Sequential", transforms=[ dict( type="RandResize", img_scale=[(1333, 400), (1333, 1200)], multiscale_mode="range", keep_ratio=True, ), dict(type="RandFlip", flip_ratio=0.5), ], record=True, ), dict(type="Pad", size_divisor=32), dict(type="Normalize", **img_norm_cfg), dict(type="ExtraAttrs", tag="unsup_teacher"), dict(type="DefaultFormatBundle"), dict( type="Collect", keys=["img", "gt_bboxes", "gt_labels"], meta_keys=( "filename", "ori_shape", "img_shape", "img_norm_cfg", "pad_shape", "scale_factor", "tag", "transform_matrix", ), ), ] unsup_pipeline = [ dict(type="LoadImageFromFile"), # dict(type="LoadAnnotations", with_bbox=True), # generate fake labels for data format compatibility dict(type="PseudoSamples", with_bbox=True), dict( type="MultiBranch", unsup_student=strong_pipeline, unsup_teacher=weak_pipeline ), ] test_pipeline = [ dict(type="LoadImageFromFile", file_client_args=dict(backend="${backend}")), dict( type="MultiScaleFlipAug", img_scale=(1333, 800), flip=False, transforms=[ dict(type="Resize", keep_ratio=True), dict(type="RandomFlip"), dict(type="Normalize", **img_norm_cfg), dict(type="Pad", size_divisor=32), dict(type="ImageToTensor", keys=["img"]), dict(type="Collect", keys=["img"]), ], ), ] data = dict( samples_per_gpu=None, workers_per_gpu=None, train=dict( _delete_=True, type="SemiDataset", sup=dict( type="CocoDataset", ann_file=None, img_prefix=None, pipeline=train_pipeline, ), unsup=dict( type="CocoDataset", ann_file=None, img_prefix=None, pipeline=unsup_pipeline, filter_empty_gt=False, ), ), val=dict( ann_file="../data/annotations/instances_val2017.json", img_prefix="../data/val2017/", pipeline=test_pipeline), test=dict( ann_file="../data/annotations/instances_val2017.json", img_prefix="../data/val2017/", pipeline=test_pipeline), sampler=dict( train=dict( type="SemiBalanceSampler", sample_ratio=[1, 4], by_prob=True, # at_least_one=True, epoch_length=7330, ) ), ) thres=0.9 refresh=False custom_hooks = [ dict(type="NumClassCheckHook"), dict(type="WeightSummary"), dict(type="MeanTeacher", momentum=0.999, warm_up=0), ] evaluation = dict(type="SubModulesDistEvalHook", interval=10000, start=20000) optimizer = dict(type="SGD", lr=0.01, momentum=0.9, weight_decay=0.0001) lr_config = dict(step=[120000]) runner = dict(_delete_=True, type="IterBasedRunner", max_iters=180000) checkpoint_config = dict(by_epoch=False, interval=5000, max_keep_ckpts=10, create_symlink=False) fp16 = dict(loss_scale="dynamic") log_config = dict( interval=50, hooks=[ dict(type="TextLoggerHook", by_epoch=False), ], )
7,897
28.580524
96
py
PseCo
PseCo-master/configs/PseCo/PseCo_faster_rcnn_celoss_r50_caffe_fpn_coco_180k.py
_base_ = "base.py" model = dict( neck=dict( num_outs=6, add_extra_convs='on_input' ), rpn_head=dict( anchor_generator=dict( type='AnchorGenerator', scales=[4], ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), ), roi_head=dict( bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[8, 16, 32, 64]), ), train_cfg=dict( rcnn=dict( sampler=dict( add_gt_as_proposals=False ), ), ), ) img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) strong_pipeline = [ dict( type="Sequential", transforms=[ dict(type="RandFlip", flip_ratio=0.5), dict( type="RandResize", img_scale=[(1333, 400), (1333, 1200)], multiscale_mode="range", keep_ratio=True), dict( type="ShuffledSequential", transforms=[ dict( type="OneOf", transforms=[ dict(type=k) for k in [ "Identity", "AutoContrast", "RandEqualize", "RandSolarize", "RandColor", "RandContrast", "RandBrightness", "RandSharpness", "RandPosterize", ] ], ), dict( type="OneOf", transforms=[ dict(type="RandTranslate", x=(-0.1, 0.1)), dict(type="RandTranslate", y=(-0.1, 0.1)), dict(type="RandRotate", angle=(-30, 30)), [ dict(type="RandShear", x=(-30, 30)), dict(type="RandShear", y=(-30, 30)), ], ], ), ], ), dict( type="RandErase", n_iterations=(1, 5), size=[0, 0.2], squared=True, ), ], record=True, ), dict(type="Pad", size_divisor=32), dict(type="Normalize", **img_norm_cfg), dict(type="ExtraAttrs", tag="unsup_student"), dict(type="DefaultFormatBundle"), dict( type="Collect", keys=["img", "gt_bboxes", "gt_labels"], meta_keys=( "filename", "ori_shape", "img_shape", "img_norm_cfg", "pad_shape", "scale_factor", "tag", "transform_matrix", "flip", "flip_direction" ), ), ] weak_pipeline = [ dict(type="Sequential", transforms=[ dict(type="RandFlip", flip_ratio=0.5), dict( type="RandResize", img_scale=[(1333, 400), (1333, 1200)], multiscale_mode="range", keep_ratio=True, )], record=True, ), dict(type="Pad", size_divisor=32), dict(type="Normalize", **img_norm_cfg), dict(type="ExtraAttrs", tag="unsup_teacher"), dict(type="DefaultFormatBundle"), dict( type="Collect", keys=["img", "gt_bboxes", "gt_labels"], meta_keys=( "filename", "ori_shape", "img_shape", "img_norm_cfg", "pad_shape", "scale_factor", "tag", "transform_matrix", "flip", "flip_direction" ), ), ] unsup_pipeline = [ dict(type="LoadImageFromFile"), # dict(type="LoadAnnotations", with_bbox=True), # generate fake labels for data format compatibility dict(type="PseudoSamples", with_bbox=True), dict( type="MultiBranch", unsup_student=strong_pipeline, unsup_teacher=weak_pipeline ), ] data = dict( samples_per_gpu=5, workers_per_gpu=2, train=dict( sup=dict( type="CocoDataset", ann_file="../data/annotations/semi_supervised/instances_train2017.${fold}@${percent}.json", img_prefix="../data/train2017/", ), unsup=dict( type="CocoDataset", ann_file="../data/annotations/semi_supervised/instances_train2017.${fold}@${percent}-unlabeled.json", img_prefix="../data/train2017/", pipeline=unsup_pipeline, ), ), sampler=dict( train=dict( sample_ratio=[1, 4], ) ), ) semi_wrapper = dict( type="PseCo_FRCNN", model="${model}", train_cfg=dict( pseudo_label_initial_score_thr=0.3, rpn_pseudo_threshold=0.6, cls_pseudo_threshold=0.6, min_pseduo_box_size=0, unsup_weight=2.0, use_teacher_proposal=True, use_MSL=True, # ------ PLA config ------- # PLA_iou_thres=0.4, PLA_candidate_topk=12, ), test_cfg=dict( inference_on="teacher" ), ) fold = 1 percent = 1 custom_hooks = [ dict(type="NumClassCheckHook"), dict(type="WeightSummary"), dict(type="MeanTeacher", momentum=0.999, warm_up=0), dict(type="GetCurrentIter") ]
5,732
27.241379
113
py
PseCo
PseCo-master/configs/PseCo/PseCo_faster_rcnn_r50_caffe_fpn_coco_180k.py
_base_ = "base.py" model = dict( neck=dict( num_outs=6, add_extra_convs='on_input' ), rpn_head=dict( anchor_generator=dict( type='AnchorGenerator', scales=[4], ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), ), roi_head=dict( bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[8, 16, 32, 64]), bbox_head=dict( loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=10.0), ), ), train_cfg=dict( rcnn=dict( sampler=dict( add_gt_as_proposals=False ), ), ), ) img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) strong_pipeline = [ dict( type="Sequential", transforms=[ dict( type="RandResize", img_scale=[(1333, 400), (1333, 1200)], multiscale_mode="range", keep_ratio=True, ), dict(type="RandFlip", flip_ratio=0.5), dict( type="ShuffledSequential", transforms=[ dict( type="OneOf", transforms=[ dict(type=k) for k in [ "Identity", "AutoContrast", "RandEqualize", "RandSolarize", "RandColor", "RandContrast", "RandBrightness", "RandSharpness", "RandPosterize", ] ], ), dict( type="OneOf", transforms=[ dict(type="RandTranslate", x=(-0.1, 0.1)), dict(type="RandTranslate", y=(-0.1, 0.1)), dict(type="RandRotate", angle=(-30, 30)), [ dict(type="RandShear", x=(-30, 30)), dict(type="RandShear", y=(-30, 30)), ], ], ), ], ), dict( type="RandErase", n_iterations=(1, 5), size=[0, 0.2], squared=True, ), ], record=True, ), dict(type="Pad", size_divisor=32), dict(type="Normalize", **img_norm_cfg), dict(type="ExtraAttrs", tag="unsup_student"), dict(type="DefaultFormatBundle"), dict( type="Collect", keys=["img", "gt_bboxes", "gt_labels"], meta_keys=( "filename", "ori_shape", "img_shape", "img_norm_cfg", "pad_shape", "scale_factor", "tag", "transform_matrix", "flip", "flip_direction" ), ), ] weak_pipeline = [ dict(type="Sequential", transforms=[ dict( type="RandResize", img_scale=[(1333, 400), (1333, 1200)], multiscale_mode="range", keep_ratio=True, ), dict(type="RandFlip", flip_ratio=0.5)], record=True, ), dict(type="Pad", size_divisor=32), dict(type="Normalize", **img_norm_cfg), dict(type="ExtraAttrs", tag="unsup_teacher"), dict(type="DefaultFormatBundle"), dict( type="Collect", keys=["img", "gt_bboxes", "gt_labels"], meta_keys=( "filename", "ori_shape", "img_shape", "img_norm_cfg", "pad_shape", "scale_factor", "tag", "transform_matrix", "flip", "flip_direction" ), ), ] unsup_pipeline = [ dict(type="LoadImageFromFile", file_client_args=dict(backend="${backend}")), # dict(type="LoadAnnotations", with_bbox=True), # generate fake labels for data format compatibility dict(type="PseudoSamples", with_bbox=True), dict( type="MultiBranch", unsup_student=strong_pipeline, unsup_teacher=weak_pipeline ), ] data = dict( samples_per_gpu=5, workers_per_gpu=2, train=dict( sup=dict( type="CocoDataset", ann_file="../data/annotations/semi_supervised/instances_train2017.${fold}@${percent}.json", img_prefix="../data/train2017/", ), unsup=dict( type="CocoDataset", ann_file="../data/annotations/semi_supervised/instances_train2017.${fold}@${percent}-unlabeled.json", img_prefix="../data/train2017/", pipeline=unsup_pipeline, ), ), sampler=dict( train=dict( sample_ratio=[1, 4], ) ), ) semi_wrapper = dict( type="PseCo_FRCNN", model="${model}", train_cfg=dict( pseudo_label_initial_score_thr=0.3, rpn_pseudo_threshold=0.5, cls_pseudo_threshold=0.5, min_pseduo_box_size=0, unsup_weight=2.0, use_teacher_proposal=True, use_MSL=True, # ------ PLA config ------- # PLA_iou_thres=0.4, PLA_candidate_topk=12, ), test_cfg=dict( inference_on="teacher" ), ) fold = 1 percent = 1 custom_hooks = [ dict(type="NumClassCheckHook"), dict(type="WeightSummary"), dict(type="MeanTeacher", momentum=0.999, warm_up=0), dict(type="GetCurrentIter") ] auto_resume=True find_unused_parameters=True backend="disk"
6,089
27.325581
113
py
pybo
pybo-master/setup.py
""" Setup script for pybo. """ import os import setuptools def read(fname): """Construct the name and descriptions from README.md.""" text = open(os.path.join(os.path.dirname(__file__), fname)).read() text = text.split('\n\n') name = text[0].lstrip('#').strip() description = text[1].strip('.') long_description = text[2] return name, description, long_description def main(): """Run the setup.""" NAME, DESCRIPTION, LONG_DESCRIPTION = read('README.md') setuptools.setup( name=NAME, version='0.2', author='Matthew W. Hoffman', author_email='mwh30@cam.ac.uk', url='http://github.com/mwhoffman/' + NAME, description=DESCRIPTION, long_description=LONG_DESCRIPTION, license='Simplified BSD', packages=setuptools.find_packages(), install_requires=['numpy', 'scipy', 'reggie', 'ezplot']) if __name__ == '__main__': main()
947
24.621622
70
py
pybo
pybo-master/pybo/bayesopt.py
""" Solver method for GP-based optimization which uses an inner-loop optimizer to maximize some acquisition function, generally given as a simple function of the posterior sufficient statistics. """ from __future__ import division from __future__ import absolute_import from __future__ import print_function import numpy as np import inspect import functools import os.path import cPickle as pickle import collections import reggie # each method/class defined exported by these modules will be exposed as a # string to the solve_bayesopt method so that we can swap in/out different # components for the "meta" solver. from . import inits from . import solvers from . import policies from . import recommenders from .utils import rstate # exported symbols __all__ = ['solve_bayesopt', 'init_model'] # DUMP/LOAD HELPERS ########################################################### Info = collections.namedtuple('Info', ['x', 'y', 'xbest']) def safe_dump(model, info, filename=None): """Safely dump the object to `filename` unless the name is None.""" if filename is not None: with open(filename, 'w') as fp: pickle.dump((model, info), fp) def safe_load(filename=None): """ Safely load checkpoint data; if it doesn't exist return properly formatted, but empty data. """ if filename is not None and os.path.exists(filename): with open(filename, 'r') as fp: return pickle.load(fp) else: return None, Info([], [], []) # MODEL INITIALIZATION ######################################################## def init_model(f, bounds, ninit=None, design='latin', log=None, rng=None): """ Initialize model and its hyperpriors using initial data. Arguments: f: function handle bounds: list of doubles (xmin, xmax) for each dimension. ninit: int, number of design points to initialize model with. design: string, corresponding to a function in `pybo.inits`, with 'init_' stripped. log: string, path to file where the model is dumped. rng: int or random state. Returns: Initialized model. """ rng = rstate(rng) bounds = np.array(bounds, dtype=float, ndmin=2) ninit = ninit if (ninit is not None) else 3*len(bounds) model, info = safe_load(log) if model is not None: # if we've already constructed a model return it right away return model elif len(info.x) == 0: # otherwise get the initial design design = getattr(inits, 'init_' + design) info.x.extend(design(bounds, ninit, rng)) info.y.extend(np.nan for _ in xrange(ninit)) # sample the initial data for i, x in enumerate(info.x): if np.isnan(info.y[i]): info.y[i] = f(x) # save progress safe_dump(None, info, filename=log) # define initial setting of hyper parameters sn2 = 1e-6 rho = max(info.y) - min(info.y) if (len(info.y) > 1) else 1. rho = 1. if (rho < 1e-1) else rho ell = 0.25 * (bounds[:, 1] - bounds[:, 0]) bias = np.mean(info.y) if (len(info.y) > 0) else 0. # initialize the base model model = reggie.make_gp(sn2, rho, ell, bias) # define priors model.params['like.sn2'].set_prior('horseshoe', 0.1) model.params['kern.rho'].set_prior('lognormal', np.log(rho), 1.) model.params['kern.ell'].set_prior('uniform', ell / 100, ell * 10) model.params['mean.bias'].set_prior('normal', bias, rho) # initialize the MCMC inference meta-model and add data model.add_data(info.x, info.y) model = reggie.MCMC(model, n=10, burn=100, rng=rng) # save model safe_dump(model, info, filename=log) return model # HELPER FOR CONSTRUCTING COMPONENTS ########################################## def get_component(value, module, rng, lstrip=''): """ Construct the model component if the given value is either a function or a string identifying a function in the given module (after stripping extraneous text). The value can also be passed as a 2-tuple where the second element includes kwargs. Partially apply any kwargs and the rng before returning the function. """ if isinstance(value, (list, tuple)): try: value, kwargs = value kwargs = dict(kwargs) except (ValueError, TypeError): raise ValueError('invalid component: {:r}'.format(value)) else: kwargs = {} if hasattr(value, '__call__'): func = value else: for fname in module.__all__: func = getattr(module, fname) if fname.startswith(lstrip): fname = fname[len(lstrip):] fname = fname.lower() if fname == value: break else: raise ValueError('invalid component: {:s}'.format(value)) # get the argspec argspec = inspect.getargspec(func) # from the argspec determine the valid kwargs; these should correspond # to any kwargs of the function except for rng. if argspec.defaults is not None: valid = set(argspec.args[-len(argspec.defaults):]) valid.discard('rng') else: valid = set() if not valid.issuperset(kwargs.keys()): raise ValueError("unknown arguments for {:s}: {:s}" .format(func.__name__, ', '.join(kwargs.keys()))) if 'rng' in argspec.args: kwargs['rng'] = rng if len(kwargs) > 0: func = functools.partial(func, **kwargs) return func # FORMATTING HELPERS FOR VERBOSITY IN SOLVE_BAYESOPT ########################## # simple format functions int2str = '{:03d}'.format float2str = '{: .3f}'.format def array2str(a): """Formatting helper for arrays.""" return np.array2string(a, formatter=dict(float=float2str, int=int2str)) # THE BAYESOPT META SOLVER #################################################### def solve_bayesopt(objective, bounds, model=None, niter=100, policy='ei', solver='lbfgs', recommender='latent', ninit=None, verbose=False, log=None, rng=None): """ Maximize the given function using Bayesian Optimization. Args: objective: function handle representing the objective function. bounds: bounds of the search space as a (d,2)-array. model: the Bayesian model instantiation. niter: horizon for optimization. init: the initialization component. policy: the acquisition component. solver: the inner-loop solver component. recommender: the recommendation component. rng: either an RandomState object or an integer used to seed the state; this will be fed to each component that requests randomness. callback: a function to call on each iteration for visualization. Note that the modular way in which this function has been written allows one to also pass parameters directly to some of the components. This works for the `init`, `policy`, `solver`, and `recommender` inputs. These components can be passed as either a string, a function, or a 2-tuple where the first item is a string/function and the second is a dictionary of additional arguments to pass to the component. Returns: A numpy record array containing a trace of the optimization process. The fields of this array are `x`, `y`, and `xbest` corresponding to the query locations, outputs, and recommendations at each iteration. If ground-truth is known an additional field `fbest` will be included. """ rng = rstate(rng) bounds = np.array(bounds, dtype=float, ndmin=2) # get modular components. policy = get_component(policy, policies, rng) solver = get_component(solver, solvers, rng, lstrip='solve_') recommender = get_component(recommender, recommenders, rng, lstrip='best_') # load/initialize model model_, info = safe_load(log) if (model is None) and (model_ is None): model = init_model(objective, bounds, ninit, log=log, rng=rng) else: # copy the model to avoid changing it in the outer scope model = model_ if (model_ is not None) else model.copy() # if we're given a model then initialize the model with a single point in # the middle. if len(info.x) == 0: x = inits.init_middle(bounds)[0] y = objective(x) info.x.append(x) info.y.append(y) model.add_data(x, y) safe_dump(model, info, filename=log) # Bayesian optimization loop for i in xrange(len(info.xbest), niter): # get the next point to evaluate. index = policy(model, bounds, info.x) x, _ = solver(index, bounds) # make an observation and record it. y = objective(x) model.add_data(x, y) xbest = recommender(model, bounds, info.x) # save progress info.x.append(x) info.y.append(y) info.xbest.append(xbest) safe_dump(model, info, filename=log) # print out the progress if requested. if verbose: print('i={:s}, x={:s}, y={:s}, xbest={:s}' .format(int2str(i), array2str(x), float2str(y), array2str(xbest))) # make sure the returned info contains arrays info = Info(*[np.array(_) for _ in info]) return xbest, model, info
9,562
32.204861
79
py
pybo
pybo-master/pybo/utils.py
""" Various utility functions. """ from __future__ import division from __future__ import absolute_import from __future__ import print_function import numpy as np import re import subprocess __all__ = ['rstate', 'SubprocessQuery', 'InteractiveQuery'] def rstate(rng=None): """ Return a RandomState object. This is just a simple wrapper such that if rng is already an instance of RandomState it will be passed through, otherwise it will create a RandomState object using rng as its seed. """ if not isinstance(rng, np.random.RandomState): rng = np.random.RandomState(rng) return rng class SubprocessQuery(object): """ Class for black-boxes that should be run from the shell. Simply pass the shell command with variables replaced with `{}` with python string formatting specs inside, then call the object with inputs to replace the `{}` in the same order as in the provided string. """ def __init__(self, command): self.command = command def __call__(self, x): out = subprocess.check_output(self.command.format(*x), shell=True) out = out.splitlines()[-1] # keep last line out = re.compile(r'\x1b[^m]*m').sub('', out) # strip color codes out = out.split('=')[-1] # strip left hand side return np.float(out) class InteractiveQuery(object): """ Wrapper for queries which interactively query the user. """ def __init__(self, prompt='Enter value at design x = {}\ny = '): self.prompt = prompt def __call__(self, x): y = input(self.prompt.format(x)) if not isinstance(y, (np.int, np.long, np.float)): # FIXME: this should probably just re-query the user rather than # raising an exception. raise ValueError('output must be a number') return y
1,897
31.169492
81
py
pybo
pybo-master/pybo/__init__.py
""" Objects which global optimization solvers. """ # pylint: disable=wildcard-import from .bayesopt import * from . import bayesopt __all__ = [] __all__ += bayesopt.__all__
176
13.75
42
py
pybo
pybo-master/pybo/recommenders.py
""" Recommendations. """ from __future__ import division from __future__ import absolute_import from __future__ import print_function from . import solvers __all__ = ['best_latent', 'best_incumbent'] def best_latent(model, bounds, X): """ Given a model return the best recommendation, corresponding to the point with maximum posterior mean. """ def mu(X, grad=False): """Posterior mean objective function.""" if grad: return model.predict(X, True)[::2] else: return model.predict(X)[0] xbest, _ = solvers.solve_lbfgs(mu, bounds, xgrid=X) return xbest def best_incumbent(model, _, X): """ Return a recommendation given by the best latent function value evaluated at points seen so far. """ f, _ = model.predict(X) return X[f.argmax()]
841
22.388889
77
py
pybo
pybo-master/pybo/policies/simple.py
""" Acquisition functions based on the probability or expected value of improvement. """ from __future__ import division from __future__ import absolute_import from __future__ import print_function import numpy as np __all__ = ['EI', 'PI', 'UCB', 'Thompson'] def EI(model, _, X, xi=0.0): """ Expected improvement policy with an exploration parameter of `xi`. """ model = model.copy() target = model.predict(X)[0].max() + xi def index(X, grad=False): """EI policy instance.""" return model.get_improvement(target, X, grad) return index def PI(model, _, X, xi=0.05): """ Probability of improvement policy with an exploration parameter of `xi`. """ model = model.copy() target = model.predict(X)[0].max() + xi def index(X, grad=False): """PI policy instance.""" return model.get_tail(target, X, grad) return index def Thompson(model, _, __, n=100, rng=None): """ Thompson sampling policy. """ return model.sample_f(n, rng).get def UCB(model, _, X, delta=0.1, xi=0.2): """ The (GP)UCB acquisition function where `delta` is the probability that the upper bound holds and `xi` is a multiplicative modification of the exploration factor. """ model = model.copy() d = len(X) a = xi * 2 * np.log(np.pi**2 / 3 / delta) b = xi * (4 + d) def index(X, grad=False): """UCB policy instance.""" posterior = model.predict(X, grad=grad) mu, s2 = posterior[:2] beta = a + b * np.log(d+1) if grad: dmu, ds2 = posterior[2:] return (mu + np.sqrt(beta * s2), dmu + 0.5 * np.sqrt(beta / s2[:, None]) * ds2) else: return mu + np.sqrt(beta * s2) return index
1,803
23.053333
78
py
pybo
pybo-master/pybo/policies/__init__.py
""" Acquisition functions. """ # pylint: disable=wildcard-import from .simple import * from . import simple __all__ = [] __all__ += simple.__all__
150
11.583333
33
py
pybo
pybo-master/pybo/inits/sobol.py
import math from numpy import * def i4_bit_hi1 ( n ): #*****************************************************************************80 # ## I4_BIT_HI1 returns the position of the high 1 bit base 2 in an integer. # # Example: # # N Binary BIT # ---- -------- ---- # 0 0 0 # 1 1 1 # 2 10 2 # 3 11 2 # 4 100 3 # 5 101 3 # 6 110 3 # 7 111 3 # 8 1000 4 # 9 1001 4 # 10 1010 4 # 11 1011 4 # 12 1100 4 # 13 1101 4 # 14 1110 4 # 15 1111 4 # 16 10000 5 # 17 10001 5 # 1023 1111111111 10 # 1024 10000000000 11 # 1025 10000000001 11 # # Licensing: # # This code is distributed under the GNU LGPL license. # # Modified: # # 26 Nov 2011 # # Author: # # Original MATLAB version by John Burkardt. # PYTHON version by Corrado Chisari # Modified by Jasper Snoek to scale to 1111 dimensions # # Parameters: # # Input, integer N, the integer to be measured. # N should be nonnegative. If N is nonpositive, the value will always be 0. # # Output, integer BIT, the number of bits base 2. # i = math.floor ( n ) bit = 0 while ( 1 ): if ( i <= 0 ): break bit += 1 i = math.floor ( i / 2. ) return bit def i4_bit_lo0 ( n ): #*****************************************************************************80 # ## I4_BIT_LO0 returns the position of the low 0 bit base 2 in an integer. # # Example: # # N Binary BIT # ---- -------- ---- # 0 0 1 # 1 1 2 # 2 10 1 # 3 11 3 # 4 100 1 # 5 101 2 # 6 110 1 # 7 111 4 # 8 1000 1 # 9 1001 2 # 10 1010 1 # 11 1011 3 # 12 1100 1 # 13 1101 2 # 14 1110 1 # 15 1111 5 # 16 10000 1 # 17 10001 2 # 1023 1111111111 1 # 1024 10000000000 1 # 1025 10000000001 1 # # Licensing: # # This code is distributed under the GNU LGPL license. # # Modified: # # 22 February 2011 # # Author: # # Original MATLAB version by John Burkardt. # PYTHON version by Corrado Chisari # # Parameters: # # Input, integer N, the integer to be measured. # N should be nonnegative. # # Output, integer BIT, the position of the low 1 bit. # bit = 0 i = math.floor ( n ) while ( 1 ): bit = bit + 1 i2 = math.floor ( i / 2. ) if ( i == 2 * i2 ): break i = i2 return bit def i4_sobol_generate ( m, n, skip ): #*****************************************************************************80 # ## I4_SOBOL_GENERATE generates a Sobol dataset. # # Licensing: # # This code is distributed under the GNU LGPL license. # # Modified: # # 22 February 2011 # # Author: # # Original MATLAB version by John Burkardt. # PYTHON version by Corrado Chisari # # Parameters: # # Input, integer M, the spatial dimension. # # Input, integer N, the number of points to generate. # # Input, integer SKIP, the number of initial points to skip. # # Output, real R(M,N), the points. # r=zeros((m,n)) for j in xrange (1, n+1): seed = skip + j - 2 [ r[0:m,j-1], seed ] = i4_sobol ( m, seed ) return r def i4_sobol ( dim_num, seed ): #*****************************************************************************80 # ## I4_SOBOL generates a new quasirandom Sobol vector with each call. # # Discussion: # # The routine adapts the ideas of Antonov and Saleev. # # Licensing: # # This code is distributed under the GNU LGPL license. # # Modified: # # 26 February 2013 # # Author: # # Original FORTRAN77 version by Bennett Fox. # MATLAB version by John Burkardt. # PYTHON version by Corrado Chisari # PYTHON version modified by Jasper Snoek to scale (Joe & Kuo) # # Reference: # # Antonov, Saleev, # USSR Computational Mathematics and Mathematical Physics, # Volume 19, 1980, pages 252 - 256. # # Paul Bratley, Bennett Fox, # Algorithm 659: # Implementing Sobol's Quasirandom Sequence Generator, # ACM Transactions on Mathematical Software, # Volume 14, Number 1, pages 88-100, 1988. # # Bennett Fox, # Algorithm 647: # Implementation and Relative Efficiency of Quasirandom # Sequence Generators, # ACM Transactions on Mathematical Software, # Volume 12, Number 4, pages 362-376, 1986. # # Ilya Sobol, # USSR Computational Mathematics and Mathematical Physics, # Volume 16, pages 236-242, 1977. # # Ilya Sobol, Levitan, # The Production of Points Uniformly Distributed in a Multidimensional # Cube (in Russian), # Preprint IPM Akad. Nauk SSSR, # Number 40, Moscow 1976. # # Stephen Joe, Frances Kuo, # Remark on Algorithm 659: Implementing Sobol's Quasirandom Sequence Generator, # ACM Transactions on Mathematical Software, # Volume 29, Number 1, March 2003, pages 49-57. # # Parameters: # # Input, integer DIM_NUM, the number of spatial dimensions. # DIM_NUM must satisfy 1 <= DIM_NUM <= 1111. # # Input/output, integer SEED, the "seed" for the sequence. # This is essentially the index in the sequence of the quasirandom # value to be generated. On output, SEED has been set to the # appropriate next value, usually simply SEED+1. # If SEED is less than 0 on input, it is treated as though it were 0. # An input value of 0 requests the first (0-th) element of the sequence. # # Output, real QUASI(DIM_NUM), the next quasirandom vector. # global atmost global dim_max global dim_num_save global initialized global lastq global log_max global maxcol global poly global recipd global seed_save global v if ( not 'initialized' in globals().keys() ): initialized = 0 dim_num_save = -1 if ( not initialized or dim_num != dim_num_save ): initialized = 1 dim_max = 1111 dim_num_save = -1 log_max = 30 seed_save = -1 # # Initialize (part of) V. # v = zeros((dim_max,log_max)) v[0,0] = 1 v[1,0] = 1 v[2,0] = 1 v[3,0] = 1 v[4,0] = 1 v[5,0] = 1 v[6,0] = 1 v[7,0] = 1 v[8,0] = 1 v[9,0] = 1 v[10,0] = 1 v[11,0] = 1 v[12,0] = 1 v[13,0] = 1 v[14,0] = 1 v[15,0] = 1 v[16,0] = 1 v[17,0] = 1 v[18,0] = 1 v[19,0] = 1 v[20,0] = 1 v[21,0] = 1 v[22,0] = 1 v[23,0] = 1 v[24,0] = 1 v[25,0] = 1 v[26,0] = 1 v[27,0] = 1 v[28,0] = 1 v[29,0] = 1 v[30,0] = 1 v[31,0] = 1 v[32,0] = 1 v[33,0] = 1 v[34,0] = 1 v[35,0] = 1 v[36,0] = 1 v[37,0] = 1 v[38,0] = 1 v[39,0] = 1 v[40,0] = 1 v[41,0] = 1 v[42,0] = 1 v[43,0] = 1 v[44,0] = 1 v[45,0] = 1 v[46,0] = 1 v[47,0] = 1 v[48,0] = 1 v[49,0] = 1 v[50,0] = 1 v[51,0] = 1 v[52,0] = 1 v[53,0] = 1 v[54,0] = 1 v[55,0] = 1 v[56,0] = 1 v[57,0] = 1 v[58,0] = 1 v[59,0] = 1 v[60,0] = 1 v[61,0] = 1 v[62,0] = 1 v[63,0] = 1 v[64,0] = 1 v[65,0] = 1 v[66,0] = 1 v[67,0] = 1 v[68,0] = 1 v[69,0] = 1 v[70,0] = 1 v[71,0] = 1 v[72,0] = 1 v[73,0] = 1 v[74,0] = 1 v[75,0] = 1 v[76,0] = 1 v[77,0] = 1 v[78,0] = 1 v[79,0] = 1 v[80,0] = 1 v[81,0] = 1 v[82,0] = 1 v[83,0] = 1 v[84,0] = 1 v[85,0] = 1 v[86,0] = 1 v[87,0] = 1 v[88,0] = 1 v[89,0] = 1 v[90,0] = 1 v[91,0] = 1 v[92,0] = 1 v[93,0] = 1 v[94,0] = 1 v[95,0] = 1 v[96,0] = 1 v[97,0] = 1 v[98,0] = 1 v[99,0] = 1 v[100,0] = 1 v[101,0] = 1 v[102,0] = 1 v[103,0] = 1 v[104,0] = 1 v[105,0] = 1 v[106,0] = 1 v[107,0] = 1 v[108,0] = 1 v[109,0] = 1 v[110,0] = 1 v[111,0] = 1 v[112,0] = 1 v[113,0] = 1 v[114,0] = 1 v[115,0] = 1 v[116,0] = 1 v[117,0] = 1 v[118,0] = 1 v[119,0] = 1 v[120,0] = 1 v[121,0] = 1 v[122,0] = 1 v[123,0] = 1 v[124,0] = 1 v[125,0] = 1 v[126,0] = 1 v[127,0] = 1 v[128,0] = 1 v[129,0] = 1 v[130,0] = 1 v[131,0] = 1 v[132,0] = 1 v[133,0] = 1 v[134,0] = 1 v[135,0] = 1 v[136,0] = 1 v[137,0] = 1 v[138,0] = 1 v[139,0] = 1 v[140,0] = 1 v[141,0] = 1 v[142,0] = 1 v[143,0] = 1 v[144,0] = 1 v[145,0] = 1 v[146,0] = 1 v[147,0] = 1 v[148,0] = 1 v[149,0] = 1 v[150,0] = 1 v[151,0] = 1 v[152,0] = 1 v[153,0] = 1 v[154,0] = 1 v[155,0] = 1 v[156,0] = 1 v[157,0] = 1 v[158,0] = 1 v[159,0] = 1 v[160,0] = 1 v[161,0] = 1 v[162,0] = 1 v[163,0] = 1 v[164,0] = 1 v[165,0] = 1 v[166,0] = 1 v[167,0] = 1 v[168,0] = 1 v[169,0] = 1 v[170,0] = 1 v[171,0] = 1 v[172,0] = 1 v[173,0] = 1 v[174,0] = 1 v[175,0] = 1 v[176,0] = 1 v[177,0] = 1 v[178,0] = 1 v[179,0] = 1 v[180,0] = 1 v[181,0] = 1 v[182,0] = 1 v[183,0] = 1 v[184,0] = 1 v[185,0] = 1 v[186,0] = 1 v[187,0] = 1 v[188,0] = 1 v[189,0] = 1 v[190,0] = 1 v[191,0] = 1 v[192,0] = 1 v[193,0] = 1 v[194,0] = 1 v[195,0] = 1 v[196,0] = 1 v[197,0] = 1 v[198,0] = 1 v[199,0] = 1 v[200,0] = 1 v[201,0] = 1 v[202,0] = 1 v[203,0] = 1 v[204,0] = 1 v[205,0] = 1 v[206,0] = 1 v[207,0] = 1 v[208,0] = 1 v[209,0] = 1 v[210,0] = 1 v[211,0] = 1 v[212,0] = 1 v[213,0] = 1 v[214,0] = 1 v[215,0] = 1 v[216,0] = 1 v[217,0] = 1 v[218,0] = 1 v[219,0] = 1 v[220,0] = 1 v[221,0] = 1 v[222,0] = 1 v[223,0] = 1 v[224,0] = 1 v[225,0] = 1 v[226,0] = 1 v[227,0] = 1 v[228,0] = 1 v[229,0] = 1 v[230,0] = 1 v[231,0] = 1 v[232,0] = 1 v[233,0] = 1 v[234,0] = 1 v[235,0] = 1 v[236,0] = 1 v[237,0] = 1 v[238,0] = 1 v[239,0] = 1 v[240,0] = 1 v[241,0] = 1 v[242,0] = 1 v[243,0] = 1 v[244,0] = 1 v[245,0] = 1 v[246,0] = 1 v[247,0] = 1 v[248,0] = 1 v[249,0] = 1 v[250,0] = 1 v[251,0] = 1 v[252,0] = 1 v[253,0] = 1 v[254,0] = 1 v[255,0] = 1 v[256,0] = 1 v[257,0] = 1 v[258,0] = 1 v[259,0] = 1 v[260,0] = 1 v[261,0] = 1 v[262,0] = 1 v[263,0] = 1 v[264,0] = 1 v[265,0] = 1 v[266,0] = 1 v[267,0] = 1 v[268,0] = 1 v[269,0] = 1 v[270,0] = 1 v[271,0] = 1 v[272,0] = 1 v[273,0] = 1 v[274,0] = 1 v[275,0] = 1 v[276,0] = 1 v[277,0] = 1 v[278,0] = 1 v[279,0] = 1 v[280,0] = 1 v[281,0] = 1 v[282,0] = 1 v[283,0] = 1 v[284,0] = 1 v[285,0] = 1 v[286,0] = 1 v[287,0] = 1 v[288,0] = 1 v[289,0] = 1 v[290,0] = 1 v[291,0] = 1 v[292,0] = 1 v[293,0] = 1 v[294,0] = 1 v[295,0] = 1 v[296,0] = 1 v[297,0] = 1 v[298,0] = 1 v[299,0] = 1 v[300,0] = 1 v[301,0] = 1 v[302,0] = 1 v[303,0] = 1 v[304,0] = 1 v[305,0] = 1 v[306,0] = 1 v[307,0] = 1 v[308,0] = 1 v[309,0] = 1 v[310,0] = 1 v[311,0] = 1 v[312,0] = 1 v[313,0] = 1 v[314,0] = 1 v[315,0] = 1 v[316,0] = 1 v[317,0] = 1 v[318,0] = 1 v[319,0] = 1 v[320,0] = 1 v[321,0] = 1 v[322,0] = 1 v[323,0] = 1 v[324,0] = 1 v[325,0] = 1 v[326,0] = 1 v[327,0] = 1 v[328,0] = 1 v[329,0] = 1 v[330,0] = 1 v[331,0] = 1 v[332,0] = 1 v[333,0] = 1 v[334,0] = 1 v[335,0] = 1 v[336,0] = 1 v[337,0] = 1 v[338,0] = 1 v[339,0] = 1 v[340,0] = 1 v[341,0] = 1 v[342,0] = 1 v[343,0] = 1 v[344,0] = 1 v[345,0] = 1 v[346,0] = 1 v[347,0] = 1 v[348,0] = 1 v[349,0] = 1 v[350,0] = 1 v[351,0] = 1 v[352,0] = 1 v[353,0] = 1 v[354,0] = 1 v[355,0] = 1 v[356,0] = 1 v[357,0] = 1 v[358,0] = 1 v[359,0] = 1 v[360,0] = 1 v[361,0] = 1 v[362,0] = 1 v[363,0] = 1 v[364,0] = 1 v[365,0] = 1 v[366,0] = 1 v[367,0] = 1 v[368,0] = 1 v[369,0] = 1 v[370,0] = 1 v[371,0] = 1 v[372,0] = 1 v[373,0] = 1 v[374,0] = 1 v[375,0] = 1 v[376,0] = 1 v[377,0] = 1 v[378,0] = 1 v[379,0] = 1 v[380,0] = 1 v[381,0] = 1 v[382,0] = 1 v[383,0] = 1 v[384,0] = 1 v[385,0] = 1 v[386,0] = 1 v[387,0] = 1 v[388,0] = 1 v[389,0] = 1 v[390,0] = 1 v[391,0] = 1 v[392,0] = 1 v[393,0] = 1 v[394,0] = 1 v[395,0] = 1 v[396,0] = 1 v[397,0] = 1 v[398,0] = 1 v[399,0] = 1 v[400,0] = 1 v[401,0] = 1 v[402,0] = 1 v[403,0] = 1 v[404,0] = 1 v[405,0] = 1 v[406,0] = 1 v[407,0] = 1 v[408,0] = 1 v[409,0] = 1 v[410,0] = 1 v[411,0] = 1 v[412,0] = 1 v[413,0] = 1 v[414,0] = 1 v[415,0] = 1 v[416,0] = 1 v[417,0] = 1 v[418,0] = 1 v[419,0] = 1 v[420,0] = 1 v[421,0] = 1 v[422,0] = 1 v[423,0] = 1 v[424,0] = 1 v[425,0] = 1 v[426,0] = 1 v[427,0] = 1 v[428,0] = 1 v[429,0] = 1 v[430,0] = 1 v[431,0] = 1 v[432,0] = 1 v[433,0] = 1 v[434,0] = 1 v[435,0] = 1 v[436,0] = 1 v[437,0] = 1 v[438,0] = 1 v[439,0] = 1 v[440,0] = 1 v[441,0] = 1 v[442,0] = 1 v[443,0] = 1 v[444,0] = 1 v[445,0] = 1 v[446,0] = 1 v[447,0] = 1 v[448,0] = 1 v[449,0] = 1 v[450,0] = 1 v[451,0] = 1 v[452,0] = 1 v[453,0] = 1 v[454,0] = 1 v[455,0] = 1 v[456,0] = 1 v[457,0] = 1 v[458,0] = 1 v[459,0] = 1 v[460,0] = 1 v[461,0] = 1 v[462,0] = 1 v[463,0] = 1 v[464,0] = 1 v[465,0] = 1 v[466,0] = 1 v[467,0] = 1 v[468,0] = 1 v[469,0] = 1 v[470,0] = 1 v[471,0] = 1 v[472,0] = 1 v[473,0] = 1 v[474,0] = 1 v[475,0] = 1 v[476,0] = 1 v[477,0] = 1 v[478,0] = 1 v[479,0] = 1 v[480,0] = 1 v[481,0] = 1 v[482,0] = 1 v[483,0] = 1 v[484,0] = 1 v[485,0] = 1 v[486,0] = 1 v[487,0] = 1 v[488,0] = 1 v[489,0] = 1 v[490,0] = 1 v[491,0] = 1 v[492,0] = 1 v[493,0] = 1 v[494,0] = 1 v[495,0] = 1 v[496,0] = 1 v[497,0] = 1 v[498,0] = 1 v[499,0] = 1 v[500,0] = 1 v[501,0] = 1 v[502,0] = 1 v[503,0] = 1 v[504,0] = 1 v[505,0] = 1 v[506,0] = 1 v[507,0] = 1 v[508,0] = 1 v[509,0] = 1 v[510,0] = 1 v[511,0] = 1 v[512,0] = 1 v[513,0] = 1 v[514,0] = 1 v[515,0] = 1 v[516,0] = 1 v[517,0] = 1 v[518,0] = 1 v[519,0] = 1 v[520,0] = 1 v[521,0] = 1 v[522,0] = 1 v[523,0] = 1 v[524,0] = 1 v[525,0] = 1 v[526,0] = 1 v[527,0] = 1 v[528,0] = 1 v[529,0] = 1 v[530,0] = 1 v[531,0] = 1 v[532,0] = 1 v[533,0] = 1 v[534,0] = 1 v[535,0] = 1 v[536,0] = 1 v[537,0] = 1 v[538,0] = 1 v[539,0] = 1 v[540,0] = 1 v[541,0] = 1 v[542,0] = 1 v[543,0] = 1 v[544,0] = 1 v[545,0] = 1 v[546,0] = 1 v[547,0] = 1 v[548,0] = 1 v[549,0] = 1 v[550,0] = 1 v[551,0] = 1 v[552,0] = 1 v[553,0] = 1 v[554,0] = 1 v[555,0] = 1 v[556,0] = 1 v[557,0] = 1 v[558,0] = 1 v[559,0] = 1 v[560,0] = 1 v[561,0] = 1 v[562,0] = 1 v[563,0] = 1 v[564,0] = 1 v[565,0] = 1 v[566,0] = 1 v[567,0] = 1 v[568,0] = 1 v[569,0] = 1 v[570,0] = 1 v[571,0] = 1 v[572,0] = 1 v[573,0] = 1 v[574,0] = 1 v[575,0] = 1 v[576,0] = 1 v[577,0] = 1 v[578,0] = 1 v[579,0] = 1 v[580,0] = 1 v[581,0] = 1 v[582,0] = 1 v[583,0] = 1 v[584,0] = 1 v[585,0] = 1 v[586,0] = 1 v[587,0] = 1 v[588,0] = 1 v[589,0] = 1 v[590,0] = 1 v[591,0] = 1 v[592,0] = 1 v[593,0] = 1 v[594,0] = 1 v[595,0] = 1 v[596,0] = 1 v[597,0] = 1 v[598,0] = 1 v[599,0] = 1 v[600,0] = 1 v[601,0] = 1 v[602,0] = 1 v[603,0] = 1 v[604,0] = 1 v[605,0] = 1 v[606,0] = 1 v[607,0] = 1 v[608,0] = 1 v[609,0] = 1 v[610,0] = 1 v[611,0] = 1 v[612,0] = 1 v[613,0] = 1 v[614,0] = 1 v[615,0] = 1 v[616,0] = 1 v[617,0] = 1 v[618,0] = 1 v[619,0] = 1 v[620,0] = 1 v[621,0] = 1 v[622,0] = 1 v[623,0] = 1 v[624,0] = 1 v[625,0] = 1 v[626,0] = 1 v[627,0] = 1 v[628,0] = 1 v[629,0] = 1 v[630,0] = 1 v[631,0] = 1 v[632,0] = 1 v[633,0] = 1 v[634,0] = 1 v[635,0] = 1 v[636,0] = 1 v[637,0] = 1 v[638,0] = 1 v[639,0] = 1 v[640,0] = 1 v[641,0] = 1 v[642,0] = 1 v[643,0] = 1 v[644,0] = 1 v[645,0] = 1 v[646,0] = 1 v[647,0] = 1 v[648,0] = 1 v[649,0] = 1 v[650,0] = 1 v[651,0] = 1 v[652,0] = 1 v[653,0] = 1 v[654,0] = 1 v[655,0] = 1 v[656,0] = 1 v[657,0] = 1 v[658,0] = 1 v[659,0] = 1 v[660,0] = 1 v[661,0] = 1 v[662,0] = 1 v[663,0] = 1 v[664,0] = 1 v[665,0] = 1 v[666,0] = 1 v[667,0] = 1 v[668,0] = 1 v[669,0] = 1 v[670,0] = 1 v[671,0] = 1 v[672,0] = 1 v[673,0] = 1 v[674,0] = 1 v[675,0] = 1 v[676,0] = 1 v[677,0] = 1 v[678,0] = 1 v[679,0] = 1 v[680,0] = 1 v[681,0] = 1 v[682,0] = 1 v[683,0] = 1 v[684,0] = 1 v[685,0] = 1 v[686,0] = 1 v[687,0] = 1 v[688,0] = 1 v[689,0] = 1 v[690,0] = 1 v[691,0] = 1 v[692,0] = 1 v[693,0] = 1 v[694,0] = 1 v[695,0] = 1 v[696,0] = 1 v[697,0] = 1 v[698,0] = 1 v[699,0] = 1 v[700,0] = 1 v[701,0] = 1 v[702,0] = 1 v[703,0] = 1 v[704,0] = 1 v[705,0] = 1 v[706,0] = 1 v[707,0] = 1 v[708,0] = 1 v[709,0] = 1 v[710,0] = 1 v[711,0] = 1 v[712,0] = 1 v[713,0] = 1 v[714,0] = 1 v[715,0] = 1 v[716,0] = 1 v[717,0] = 1 v[718,0] = 1 v[719,0] = 1 v[720,0] = 1 v[721,0] = 1 v[722,0] = 1 v[723,0] = 1 v[724,0] = 1 v[725,0] = 1 v[726,0] = 1 v[727,0] = 1 v[728,0] = 1 v[729,0] = 1 v[730,0] = 1 v[731,0] = 1 v[732,0] = 1 v[733,0] = 1 v[734,0] = 1 v[735,0] = 1 v[736,0] = 1 v[737,0] = 1 v[738,0] = 1 v[739,0] = 1 v[740,0] = 1 v[741,0] = 1 v[742,0] = 1 v[743,0] = 1 v[744,0] = 1 v[745,0] = 1 v[746,0] = 1 v[747,0] = 1 v[748,0] = 1 v[749,0] = 1 v[750,0] = 1 v[751,0] = 1 v[752,0] = 1 v[753,0] = 1 v[754,0] = 1 v[755,0] = 1 v[756,0] = 1 v[757,0] = 1 v[758,0] = 1 v[759,0] = 1 v[760,0] = 1 v[761,0] = 1 v[762,0] = 1 v[763,0] = 1 v[764,0] = 1 v[765,0] = 1 v[766,0] = 1 v[767,0] = 1 v[768,0] = 1 v[769,0] = 1 v[770,0] = 1 v[771,0] = 1 v[772,0] = 1 v[773,0] = 1 v[774,0] = 1 v[775,0] = 1 v[776,0] = 1 v[777,0] = 1 v[778,0] = 1 v[779,0] = 1 v[780,0] = 1 v[781,0] = 1 v[782,0] = 1 v[783,0] = 1 v[784,0] = 1 v[785,0] = 1 v[786,0] = 1 v[787,0] = 1 v[788,0] = 1 v[789,0] = 1 v[790,0] = 1 v[791,0] = 1 v[792,0] = 1 v[793,0] = 1 v[794,0] = 1 v[795,0] = 1 v[796,0] = 1 v[797,0] = 1 v[798,0] = 1 v[799,0] = 1 v[800,0] = 1 v[801,0] = 1 v[802,0] = 1 v[803,0] = 1 v[804,0] = 1 v[805,0] = 1 v[806,0] = 1 v[807,0] = 1 v[808,0] = 1 v[809,0] = 1 v[810,0] = 1 v[811,0] = 1 v[812,0] = 1 v[813,0] = 1 v[814,0] = 1 v[815,0] = 1 v[816,0] = 1 v[817,0] = 1 v[818,0] = 1 v[819,0] = 1 v[820,0] = 1 v[821,0] = 1 v[822,0] = 1 v[823,0] = 1 v[824,0] = 1 v[825,0] = 1 v[826,0] = 1 v[827,0] = 1 v[828,0] = 1 v[829,0] = 1 v[830,0] = 1 v[831,0] = 1 v[832,0] = 1 v[833,0] = 1 v[834,0] = 1 v[835,0] = 1 v[836,0] = 1 v[837,0] = 1 v[838,0] = 1 v[839,0] = 1 v[840,0] = 1 v[841,0] = 1 v[842,0] = 1 v[843,0] = 1 v[844,0] = 1 v[845,0] = 1 v[846,0] = 1 v[847,0] = 1 v[848,0] = 1 v[849,0] = 1 v[850,0] = 1 v[851,0] = 1 v[852,0] = 1 v[853,0] = 1 v[854,0] = 1 v[855,0] = 1 v[856,0] = 1 v[857,0] = 1 v[858,0] = 1 v[859,0] = 1 v[860,0] = 1 v[861,0] = 1 v[862,0] = 1 v[863,0] = 1 v[864,0] = 1 v[865,0] = 1 v[866,0] = 1 v[867,0] = 1 v[868,0] = 1 v[869,0] = 1 v[870,0] = 1 v[871,0] = 1 v[872,0] = 1 v[873,0] = 1 v[874,0] = 1 v[875,0] = 1 v[876,0] = 1 v[877,0] = 1 v[878,0] = 1 v[879,0] = 1 v[880,0] = 1 v[881,0] = 1 v[882,0] = 1 v[883,0] = 1 v[884,0] = 1 v[885,0] = 1 v[886,0] = 1 v[887,0] = 1 v[888,0] = 1 v[889,0] = 1 v[890,0] = 1 v[891,0] = 1 v[892,0] = 1 v[893,0] = 1 v[894,0] = 1 v[895,0] = 1 v[896,0] = 1 v[897,0] = 1 v[898,0] = 1 v[899,0] = 1 v[900,0] = 1 v[901,0] = 1 v[902,0] = 1 v[903,0] = 1 v[904,0] = 1 v[905,0] = 1 v[906,0] = 1 v[907,0] = 1 v[908,0] = 1 v[909,0] = 1 v[910,0] = 1 v[911,0] = 1 v[912,0] = 1 v[913,0] = 1 v[914,0] = 1 v[915,0] = 1 v[916,0] = 1 v[917,0] = 1 v[918,0] = 1 v[919,0] = 1 v[920,0] = 1 v[921,0] = 1 v[922,0] = 1 v[923,0] = 1 v[924,0] = 1 v[925,0] = 1 v[926,0] = 1 v[927,0] = 1 v[928,0] = 1 v[929,0] = 1 v[930,0] = 1 v[931,0] = 1 v[932,0] = 1 v[933,0] = 1 v[934,0] = 1 v[935,0] = 1 v[936,0] = 1 v[937,0] = 1 v[938,0] = 1 v[939,0] = 1 v[940,0] = 1 v[941,0] = 1 v[942,0] = 1 v[943,0] = 1 v[944,0] = 1 v[945,0] = 1 v[946,0] = 1 v[947,0] = 1 v[948,0] = 1 v[949,0] = 1 v[950,0] = 1 v[951,0] = 1 v[952,0] = 1 v[953,0] = 1 v[954,0] = 1 v[955,0] = 1 v[956,0] = 1 v[957,0] = 1 v[958,0] = 1 v[959,0] = 1 v[960,0] = 1 v[961,0] = 1 v[962,0] = 1 v[963,0] = 1 v[964,0] = 1 v[965,0] = 1 v[966,0] = 1 v[967,0] = 1 v[968,0] = 1 v[969,0] = 1 v[970,0] = 1 v[971,0] = 1 v[972,0] = 1 v[973,0] = 1 v[974,0] = 1 v[975,0] = 1 v[976,0] = 1 v[977,0] = 1 v[978,0] = 1 v[979,0] = 1 v[980,0] = 1 v[981,0] = 1 v[982,0] = 1 v[983,0] = 1 v[984,0] = 1 v[985,0] = 1 v[986,0] = 1 v[987,0] = 1 v[988,0] = 1 v[989,0] = 1 v[990,0] = 1 v[991,0] = 1 v[992,0] = 1 v[993,0] = 1 v[994,0] = 1 v[995,0] = 1 v[996,0] = 1 v[997,0] = 1 v[998,0] = 1 v[999,0] = 1 v[1000,0] = 1 v[1001,0] = 1 v[1002,0] = 1 v[1003,0] = 1 v[1004,0] = 1 v[1005,0] = 1 v[1006,0] = 1 v[1007,0] = 1 v[1008,0] = 1 v[1009,0] = 1 v[1010,0] = 1 v[1011,0] = 1 v[1012,0] = 1 v[1013,0] = 1 v[1014,0] = 1 v[1015,0] = 1 v[1016,0] = 1 v[1017,0] = 1 v[1018,0] = 1 v[1019,0] = 1 v[1020,0] = 1 v[1021,0] = 1 v[1022,0] = 1 v[1023,0] = 1 v[1024,0] = 1 v[1025,0] = 1 v[1026,0] = 1 v[1027,0] = 1 v[1028,0] = 1 v[1029,0] = 1 v[1030,0] = 1 v[1031,0] = 1 v[1032,0] = 1 v[1033,0] = 1 v[1034,0] = 1 v[1035,0] = 1 v[1036,0] = 1 v[1037,0] = 1 v[1038,0] = 1 v[1039,0] = 1 v[1040,0] = 1 v[1041,0] = 1 v[1042,0] = 1 v[1043,0] = 1 v[1044,0] = 1 v[1045,0] = 1 v[1046,0] = 1 v[1047,0] = 1 v[1048,0] = 1 v[1049,0] = 1 v[1050,0] = 1 v[1051,0] = 1 v[1052,0] = 1 v[1053,0] = 1 v[1054,0] = 1 v[1055,0] = 1 v[1056,0] = 1 v[1057,0] = 1 v[1058,0] = 1 v[1059,0] = 1 v[1060,0] = 1 v[1061,0] = 1 v[1062,0] = 1 v[1063,0] = 1 v[1064,0] = 1 v[1065,0] = 1 v[1066,0] = 1 v[1067,0] = 1 v[1068,0] = 1 v[1069,0] = 1 v[1070,0] = 1 v[1071,0] = 1 v[1072,0] = 1 v[1073,0] = 1 v[1074,0] = 1 v[1075,0] = 1 v[1076,0] = 1 v[1077,0] = 1 v[1078,0] = 1 v[1079,0] = 1 v[1080,0] = 1 v[1081,0] = 1 v[1082,0] = 1 v[1083,0] = 1 v[1084,0] = 1 v[1085,0] = 1 v[1086,0] = 1 v[1087,0] = 1 v[1088,0] = 1 v[1089,0] = 1 v[1090,0] = 1 v[1091,0] = 1 v[1092,0] = 1 v[1093,0] = 1 v[1094,0] = 1 v[1095,0] = 1 v[1096,0] = 1 v[1097,0] = 1 v[1098,0] = 1 v[1099,0] = 1 v[1100,0] = 1 v[1101,0] = 1 v[1102,0] = 1 v[1103,0] = 1 v[1104,0] = 1 v[1105,0] = 1 v[1106,0] = 1 v[1107,0] = 1 v[1108,0] = 1 v[1109,0] = 1 v[1110,0] = 1 v[2,1] = 1 v[3,1] = 3 v[4,1] = 1 v[5,1] = 3 v[6,1] = 1 v[7,1] = 3 v[8,1] = 3 v[9,1] = 1 v[10,1] = 3 v[11,1] = 1 v[12,1] = 3 v[13,1] = 1 v[14,1] = 3 v[15,1] = 1 v[16,1] = 1 v[17,1] = 3 v[18,1] = 1 v[19,1] = 3 v[20,1] = 1 v[21,1] = 3 v[22,1] = 1 v[23,1] = 3 v[24,1] = 3 v[25,1] = 1 v[26,1] = 1 v[27,1] = 1 v[28,1] = 3 v[29,1] = 1 v[30,1] = 3 v[31,1] = 1 v[32,1] = 3 v[33,1] = 3 v[34,1] = 1 v[35,1] = 3 v[36,1] = 1 v[37,1] = 1 v[38,1] = 1 v[39,1] = 3 v[40,1] = 1 v[41,1] = 3 v[42,1] = 1 v[43,1] = 1 v[44,1] = 1 v[45,1] = 3 v[46,1] = 3 v[47,1] = 1 v[48,1] = 3 v[49,1] = 3 v[50,1] = 1 v[51,1] = 1 v[52,1] = 3 v[53,1] = 3 v[54,1] = 1 v[55,1] = 3 v[56,1] = 3 v[57,1] = 3 v[58,1] = 1 v[59,1] = 3 v[60,1] = 1 v[61,1] = 3 v[62,1] = 1 v[63,1] = 1 v[64,1] = 3 v[65,1] = 3 v[66,1] = 1 v[67,1] = 1 v[68,1] = 1 v[69,1] = 1 v[70,1] = 3 v[71,1] = 1 v[72,1] = 1 v[73,1] = 3 v[74,1] = 1 v[75,1] = 1 v[76,1] = 1 v[77,1] = 3 v[78,1] = 3 v[79,1] = 1 v[80,1] = 3 v[81,1] = 3 v[82,1] = 1 v[83,1] = 3 v[84,1] = 3 v[85,1] = 3 v[86,1] = 1 v[87,1] = 3 v[88,1] = 3 v[89,1] = 3 v[90,1] = 1 v[91,1] = 3 v[92,1] = 3 v[93,1] = 1 v[94,1] = 3 v[95,1] = 3 v[96,1] = 3 v[97,1] = 1 v[98,1] = 3 v[99,1] = 1 v[100,1] = 3 v[101,1] = 1 v[102,1] = 1 v[103,1] = 3 v[104,1] = 3 v[105,1] = 1 v[106,1] = 3 v[107,1] = 3 v[108,1] = 1 v[109,1] = 1 v[110,1] = 1 v[111,1] = 3 v[112,1] = 3 v[113,1] = 1 v[114,1] = 3 v[115,1] = 3 v[116,1] = 1 v[117,1] = 3 v[118,1] = 1 v[119,1] = 1 v[120,1] = 3 v[121,1] = 3 v[122,1] = 3 v[123,1] = 1 v[124,1] = 1 v[125,1] = 1 v[126,1] = 3 v[127,1] = 1 v[128,1] = 1 v[129,1] = 3 v[130,1] = 1 v[131,1] = 1 v[132,1] = 3 v[133,1] = 3 v[134,1] = 1 v[135,1] = 3 v[136,1] = 1 v[137,1] = 3 v[138,1] = 3 v[139,1] = 3 v[140,1] = 3 v[141,1] = 1 v[142,1] = 1 v[143,1] = 1 v[144,1] = 3 v[145,1] = 3 v[146,1] = 1 v[147,1] = 1 v[148,1] = 3 v[149,1] = 1 v[150,1] = 1 v[151,1] = 1 v[152,1] = 1 v[153,1] = 1 v[154,1] = 1 v[155,1] = 3 v[156,1] = 1 v[157,1] = 3 v[158,1] = 1 v[159,1] = 1 v[160,1] = 1 v[161,1] = 3 v[162,1] = 1 v[163,1] = 3 v[164,1] = 1 v[165,1] = 3 v[166,1] = 3 v[167,1] = 3 v[168,1] = 1 v[169,1] = 1 v[170,1] = 3 v[171,1] = 3 v[172,1] = 1 v[173,1] = 3 v[174,1] = 1 v[175,1] = 3 v[176,1] = 1 v[177,1] = 1 v[178,1] = 3 v[179,1] = 1 v[180,1] = 3 v[181,1] = 1 v[182,1] = 3 v[183,1] = 1 v[184,1] = 3 v[185,1] = 1 v[186,1] = 1 v[187,1] = 1 v[188,1] = 3 v[189,1] = 3 v[190,1] = 1 v[191,1] = 3 v[192,1] = 3 v[193,1] = 1 v[194,1] = 3 v[195,1] = 1 v[196,1] = 1 v[197,1] = 1 v[198,1] = 3 v[199,1] = 1 v[200,1] = 3 v[201,1] = 1 v[202,1] = 1 v[203,1] = 3 v[204,1] = 1 v[205,1] = 1 v[206,1] = 3 v[207,1] = 3 v[208,1] = 1 v[209,1] = 1 v[210,1] = 3 v[211,1] = 3 v[212,1] = 3 v[213,1] = 1 v[214,1] = 3 v[215,1] = 3 v[216,1] = 3 v[217,1] = 1 v[218,1] = 3 v[219,1] = 1 v[220,1] = 3 v[221,1] = 1 v[222,1] = 1 v[223,1] = 1 v[224,1] = 3 v[225,1] = 1 v[226,1] = 1 v[227,1] = 1 v[228,1] = 3 v[229,1] = 1 v[230,1] = 1 v[231,1] = 1 v[232,1] = 1 v[233,1] = 1 v[234,1] = 3 v[235,1] = 3 v[236,1] = 3 v[237,1] = 1 v[238,1] = 1 v[239,1] = 1 v[240,1] = 1 v[241,1] = 3 v[242,1] = 3 v[243,1] = 3 v[244,1] = 1 v[245,1] = 3 v[246,1] = 3 v[247,1] = 1 v[248,1] = 1 v[249,1] = 1 v[250,1] = 1 v[251,1] = 3 v[252,1] = 1 v[253,1] = 1 v[254,1] = 3 v[255,1] = 1 v[256,1] = 3 v[257,1] = 3 v[258,1] = 1 v[259,1] = 1 v[260,1] = 3 v[261,1] = 3 v[262,1] = 1 v[263,1] = 1 v[264,1] = 1 v[265,1] = 1 v[266,1] = 3 v[267,1] = 1 v[268,1] = 3 v[269,1] = 3 v[270,1] = 1 v[271,1] = 3 v[272,1] = 3 v[273,1] = 1 v[274,1] = 1 v[275,1] = 1 v[276,1] = 3 v[277,1] = 3 v[278,1] = 3 v[279,1] = 1 v[280,1] = 3 v[281,1] = 3 v[282,1] = 1 v[283,1] = 3 v[284,1] = 3 v[285,1] = 1 v[286,1] = 3 v[287,1] = 1 v[288,1] = 3 v[289,1] = 3 v[290,1] = 3 v[291,1] = 1 v[292,1] = 3 v[293,1] = 1 v[294,1] = 1 v[295,1] = 3 v[296,1] = 1 v[297,1] = 3 v[298,1] = 1 v[299,1] = 1 v[300,1] = 1 v[301,1] = 3 v[302,1] = 3 v[303,1] = 3 v[304,1] = 1 v[305,1] = 1 v[306,1] = 3 v[307,1] = 1 v[308,1] = 3 v[309,1] = 1 v[310,1] = 1 v[311,1] = 1 v[312,1] = 1 v[313,1] = 1 v[314,1] = 1 v[315,1] = 3 v[316,1] = 1 v[317,1] = 1 v[318,1] = 3 v[319,1] = 1 v[320,1] = 3 v[321,1] = 3 v[322,1] = 1 v[323,1] = 1 v[324,1] = 1 v[325,1] = 1 v[326,1] = 3 v[327,1] = 1 v[328,1] = 3 v[329,1] = 1 v[330,1] = 3 v[331,1] = 1 v[332,1] = 1 v[333,1] = 1 v[334,1] = 1 v[335,1] = 3 v[336,1] = 3 v[337,1] = 1 v[338,1] = 1 v[339,1] = 1 v[340,1] = 1 v[341,1] = 1 v[342,1] = 3 v[343,1] = 3 v[344,1] = 3 v[345,1] = 1 v[346,1] = 1 v[347,1] = 3 v[348,1] = 3 v[349,1] = 3 v[350,1] = 3 v[351,1] = 3 v[352,1] = 1 v[353,1] = 3 v[354,1] = 3 v[355,1] = 1 v[356,1] = 3 v[357,1] = 3 v[358,1] = 3 v[359,1] = 3 v[360,1] = 1 v[361,1] = 1 v[362,1] = 1 v[363,1] = 1 v[364,1] = 1 v[365,1] = 1 v[366,1] = 3 v[367,1] = 1 v[368,1] = 1 v[369,1] = 3 v[370,1] = 1 v[371,1] = 1 v[372,1] = 1 v[373,1] = 3 v[374,1] = 1 v[375,1] = 1 v[376,1] = 1 v[377,1] = 3 v[378,1] = 3 v[379,1] = 3 v[380,1] = 1 v[381,1] = 3 v[382,1] = 1 v[383,1] = 1 v[384,1] = 3 v[385,1] = 3 v[386,1] = 3 v[387,1] = 1 v[388,1] = 3 v[389,1] = 3 v[390,1] = 1 v[391,1] = 3 v[392,1] = 1 v[393,1] = 3 v[394,1] = 3 v[395,1] = 1 v[396,1] = 3 v[397,1] = 3 v[398,1] = 3 v[399,1] = 1 v[400,1] = 1 v[401,1] = 3 v[402,1] = 3 v[403,1] = 1 v[404,1] = 3 v[405,1] = 1 v[406,1] = 3 v[407,1] = 1 v[408,1] = 1 v[409,1] = 1 v[410,1] = 3 v[411,1] = 3 v[412,1] = 3 v[413,1] = 3 v[414,1] = 1 v[415,1] = 3 v[416,1] = 1 v[417,1] = 1 v[418,1] = 3 v[419,1] = 1 v[420,1] = 3 v[421,1] = 1 v[422,1] = 1 v[423,1] = 1 v[424,1] = 3 v[425,1] = 1 v[426,1] = 3 v[427,1] = 1 v[428,1] = 3 v[429,1] = 1 v[430,1] = 3 v[431,1] = 3 v[432,1] = 3 v[433,1] = 3 v[434,1] = 3 v[435,1] = 3 v[436,1] = 3 v[437,1] = 3 v[438,1] = 1 v[439,1] = 3 v[440,1] = 3 v[441,1] = 3 v[442,1] = 3 v[443,1] = 3 v[444,1] = 1 v[445,1] = 3 v[446,1] = 1 v[447,1] = 3 v[448,1] = 3 v[449,1] = 3 v[450,1] = 1 v[451,1] = 3 v[452,1] = 1 v[453,1] = 3 v[454,1] = 1 v[455,1] = 3 v[456,1] = 3 v[457,1] = 1 v[458,1] = 3 v[459,1] = 3 v[460,1] = 3 v[461,1] = 3 v[462,1] = 3 v[463,1] = 3 v[464,1] = 3 v[465,1] = 3 v[466,1] = 3 v[467,1] = 1 v[468,1] = 1 v[469,1] = 1 v[470,1] = 1 v[471,1] = 1 v[472,1] = 1 v[473,1] = 3 v[474,1] = 3 v[475,1] = 1 v[476,1] = 1 v[477,1] = 3 v[478,1] = 3 v[479,1] = 1 v[480,1] = 1 v[481,1] = 1 v[482,1] = 3 v[483,1] = 3 v[484,1] = 1 v[485,1] = 1 v[486,1] = 3 v[487,1] = 3 v[488,1] = 3 v[489,1] = 3 v[490,1] = 1 v[491,1] = 1 v[492,1] = 3 v[493,1] = 1 v[494,1] = 3 v[495,1] = 3 v[496,1] = 1 v[497,1] = 3 v[498,1] = 3 v[499,1] = 1 v[500,1] = 1 v[501,1] = 1 v[502,1] = 3 v[503,1] = 3 v[504,1] = 3 v[505,1] = 1 v[506,1] = 1 v[507,1] = 3 v[508,1] = 3 v[509,1] = 3 v[510,1] = 3 v[511,1] = 3 v[512,1] = 1 v[513,1] = 1 v[514,1] = 1 v[515,1] = 3 v[516,1] = 1 v[517,1] = 3 v[518,1] = 3 v[519,1] = 1 v[520,1] = 3 v[521,1] = 3 v[522,1] = 3 v[523,1] = 3 v[524,1] = 1 v[525,1] = 1 v[526,1] = 3 v[527,1] = 1 v[528,1] = 1 v[529,1] = 3 v[530,1] = 1 v[531,1] = 3 v[532,1] = 1 v[533,1] = 3 v[534,1] = 1 v[535,1] = 3 v[536,1] = 3 v[537,1] = 1 v[538,1] = 1 v[539,1] = 3 v[540,1] = 3 v[541,1] = 1 v[542,1] = 3 v[543,1] = 3 v[544,1] = 1 v[545,1] = 3 v[546,1] = 3 v[547,1] = 1 v[548,1] = 1 v[549,1] = 3 v[550,1] = 1 v[551,1] = 3 v[552,1] = 3 v[553,1] = 1 v[554,1] = 1 v[555,1] = 3 v[556,1] = 1 v[557,1] = 3 v[558,1] = 1 v[559,1] = 3 v[560,1] = 1 v[561,1] = 1 v[562,1] = 3 v[563,1] = 3 v[564,1] = 1 v[565,1] = 1 v[566,1] = 1 v[567,1] = 3 v[568,1] = 3 v[569,1] = 1 v[570,1] = 3 v[571,1] = 1 v[572,1] = 1 v[573,1] = 3 v[574,1] = 3 v[575,1] = 1 v[576,1] = 1 v[577,1] = 3 v[578,1] = 1 v[579,1] = 3 v[580,1] = 1 v[581,1] = 1 v[582,1] = 1 v[583,1] = 1 v[584,1] = 1 v[585,1] = 3 v[586,1] = 1 v[587,1] = 1 v[588,1] = 1 v[589,1] = 1 v[590,1] = 3 v[591,1] = 1 v[592,1] = 3 v[593,1] = 1 v[594,1] = 1 v[595,1] = 3 v[596,1] = 3 v[597,1] = 1 v[598,1] = 1 v[599,1] = 3 v[600,1] = 1 v[601,1] = 3 v[602,1] = 1 v[603,1] = 3 v[604,1] = 3 v[605,1] = 3 v[606,1] = 1 v[607,1] = 3 v[608,1] = 3 v[609,1] = 3 v[610,1] = 1 v[611,1] = 1 v[612,1] = 3 v[613,1] = 3 v[614,1] = 3 v[615,1] = 1 v[616,1] = 1 v[617,1] = 1 v[618,1] = 1 v[619,1] = 3 v[620,1] = 1 v[621,1] = 3 v[622,1] = 1 v[623,1] = 3 v[624,1] = 1 v[625,1] = 1 v[626,1] = 3 v[627,1] = 3 v[628,1] = 1 v[629,1] = 1 v[630,1] = 1 v[631,1] = 3 v[632,1] = 3 v[633,1] = 1 v[634,1] = 3 v[635,1] = 1 v[636,1] = 3 v[637,1] = 1 v[638,1] = 1 v[639,1] = 1 v[640,1] = 1 v[641,1] = 1 v[642,1] = 1 v[643,1] = 3 v[644,1] = 1 v[645,1] = 3 v[646,1] = 3 v[647,1] = 1 v[648,1] = 3 v[649,1] = 3 v[650,1] = 3 v[651,1] = 1 v[652,1] = 3 v[653,1] = 1 v[654,1] = 1 v[655,1] = 3 v[656,1] = 3 v[657,1] = 1 v[658,1] = 1 v[659,1] = 3 v[660,1] = 3 v[661,1] = 1 v[662,1] = 1 v[663,1] = 1 v[664,1] = 3 v[665,1] = 1 v[666,1] = 3 v[667,1] = 3 v[668,1] = 1 v[669,1] = 1 v[670,1] = 3 v[671,1] = 1 v[672,1] = 1 v[673,1] = 3 v[674,1] = 1 v[675,1] = 3 v[676,1] = 1 v[677,1] = 1 v[678,1] = 1 v[679,1] = 3 v[680,1] = 3 v[681,1] = 3 v[682,1] = 3 v[683,1] = 1 v[684,1] = 1 v[685,1] = 3 v[686,1] = 3 v[687,1] = 1 v[688,1] = 1 v[689,1] = 1 v[690,1] = 1 v[691,1] = 3 v[692,1] = 1 v[693,1] = 1 v[694,1] = 3 v[695,1] = 3 v[696,1] = 3 v[697,1] = 1 v[698,1] = 1 v[699,1] = 3 v[700,1] = 3 v[701,1] = 1 v[702,1] = 3 v[703,1] = 3 v[704,1] = 1 v[705,1] = 1 v[706,1] = 3 v[707,1] = 3 v[708,1] = 3 v[709,1] = 3 v[710,1] = 3 v[711,1] = 3 v[712,1] = 3 v[713,1] = 1 v[714,1] = 3 v[715,1] = 3 v[716,1] = 1 v[717,1] = 3 v[718,1] = 1 v[719,1] = 3 v[720,1] = 1 v[721,1] = 1 v[722,1] = 3 v[723,1] = 3 v[724,1] = 1 v[725,1] = 1 v[726,1] = 1 v[727,1] = 3 v[728,1] = 1 v[729,1] = 3 v[730,1] = 3 v[731,1] = 1 v[732,1] = 3 v[733,1] = 3 v[734,1] = 1 v[735,1] = 3 v[736,1] = 1 v[737,1] = 1 v[738,1] = 3 v[739,1] = 3 v[740,1] = 3 v[741,1] = 1 v[742,1] = 1 v[743,1] = 1 v[744,1] = 3 v[745,1] = 1 v[746,1] = 1 v[747,1] = 1 v[748,1] = 3 v[749,1] = 3 v[750,1] = 3 v[751,1] = 1 v[752,1] = 3 v[753,1] = 3 v[754,1] = 1 v[755,1] = 3 v[756,1] = 1 v[757,1] = 1 v[758,1] = 3 v[759,1] = 3 v[760,1] = 3 v[761,1] = 1 v[762,1] = 3 v[763,1] = 3 v[764,1] = 1 v[765,1] = 1 v[766,1] = 1 v[767,1] = 3 v[768,1] = 1 v[769,1] = 3 v[770,1] = 3 v[771,1] = 3 v[772,1] = 3 v[773,1] = 3 v[774,1] = 3 v[775,1] = 3 v[776,1] = 3 v[777,1] = 1 v[778,1] = 3 v[779,1] = 3 v[780,1] = 1 v[781,1] = 3 v[782,1] = 1 v[783,1] = 1 v[784,1] = 3 v[785,1] = 3 v[786,1] = 3 v[787,1] = 1 v[788,1] = 3 v[789,1] = 3 v[790,1] = 3 v[791,1] = 3 v[792,1] = 3 v[793,1] = 1 v[794,1] = 3 v[795,1] = 3 v[796,1] = 3 v[797,1] = 1 v[798,1] = 1 v[799,1] = 1 v[800,1] = 3 v[801,1] = 3 v[802,1] = 1 v[803,1] = 3 v[804,1] = 3 v[805,1] = 1 v[806,1] = 3 v[807,1] = 1 v[808,1] = 3 v[809,1] = 1 v[810,1] = 3 v[811,1] = 1 v[812,1] = 3 v[813,1] = 3 v[814,1] = 3 v[815,1] = 3 v[816,1] = 3 v[817,1] = 3 v[818,1] = 1 v[819,1] = 1 v[820,1] = 3 v[821,1] = 1 v[822,1] = 3 v[823,1] = 1 v[824,1] = 1 v[825,1] = 1 v[826,1] = 1 v[827,1] = 1 v[828,1] = 3 v[829,1] = 1 v[830,1] = 1 v[831,1] = 1 v[832,1] = 3 v[833,1] = 1 v[834,1] = 3 v[835,1] = 1 v[836,1] = 1 v[837,1] = 3 v[838,1] = 3 v[839,1] = 3 v[840,1] = 1 v[841,1] = 3 v[842,1] = 1 v[843,1] = 3 v[844,1] = 1 v[845,1] = 1 v[846,1] = 3 v[847,1] = 1 v[848,1] = 3 v[849,1] = 3 v[850,1] = 1 v[851,1] = 3 v[852,1] = 1 v[853,1] = 3 v[854,1] = 3 v[855,1] = 1 v[856,1] = 3 v[857,1] = 3 v[858,1] = 1 v[859,1] = 3 v[860,1] = 3 v[861,1] = 3 v[862,1] = 3 v[863,1] = 3 v[864,1] = 3 v[865,1] = 1 v[866,1] = 3 v[867,1] = 1 v[868,1] = 1 v[869,1] = 3 v[870,1] = 3 v[871,1] = 3 v[872,1] = 1 v[873,1] = 1 v[874,1] = 3 v[875,1] = 3 v[876,1] = 3 v[877,1] = 3 v[878,1] = 3 v[879,1] = 3 v[880,1] = 3 v[881,1] = 1 v[882,1] = 3 v[883,1] = 3 v[884,1] = 3 v[885,1] = 3 v[886,1] = 1 v[887,1] = 3 v[888,1] = 1 v[889,1] = 3 v[890,1] = 3 v[891,1] = 3 v[892,1] = 1 v[893,1] = 3 v[894,1] = 1 v[895,1] = 3 v[896,1] = 1 v[897,1] = 1 v[898,1] = 1 v[899,1] = 3 v[900,1] = 3 v[901,1] = 1 v[902,1] = 3 v[903,1] = 1 v[904,1] = 1 v[905,1] = 3 v[906,1] = 3 v[907,1] = 1 v[908,1] = 3 v[909,1] = 1 v[910,1] = 1 v[911,1] = 1 v[912,1] = 1 v[913,1] = 3 v[914,1] = 1 v[915,1] = 3 v[916,1] = 1 v[917,1] = 1 v[918,1] = 3 v[919,1] = 1 v[920,1] = 3 v[921,1] = 1 v[922,1] = 3 v[923,1] = 3 v[924,1] = 3 v[925,1] = 3 v[926,1] = 3 v[927,1] = 3 v[928,1] = 1 v[929,1] = 3 v[930,1] = 3 v[931,1] = 3 v[932,1] = 3 v[933,1] = 1 v[934,1] = 3 v[935,1] = 3 v[936,1] = 1 v[937,1] = 3 v[938,1] = 3 v[939,1] = 3 v[940,1] = 3 v[941,1] = 3 v[942,1] = 1 v[943,1] = 1 v[944,1] = 1 v[945,1] = 1 v[946,1] = 3 v[947,1] = 3 v[948,1] = 3 v[949,1] = 1 v[950,1] = 3 v[951,1] = 3 v[952,1] = 1 v[953,1] = 1 v[954,1] = 3 v[955,1] = 3 v[956,1] = 1 v[957,1] = 1 v[958,1] = 3 v[959,1] = 3 v[960,1] = 1 v[961,1] = 3 v[962,1] = 1 v[963,1] = 1 v[964,1] = 3 v[965,1] = 1 v[966,1] = 3 v[967,1] = 3 v[968,1] = 3 v[969,1] = 3 v[970,1] = 3 v[971,1] = 1 v[972,1] = 3 v[973,1] = 1 v[974,1] = 1 v[975,1] = 3 v[976,1] = 3 v[977,1] = 3 v[978,1] = 3 v[979,1] = 1 v[980,1] = 3 v[981,1] = 1 v[982,1] = 1 v[983,1] = 3 v[984,1] = 3 v[985,1] = 3 v[986,1] = 3 v[987,1] = 3 v[988,1] = 3 v[989,1] = 1 v[990,1] = 1 v[991,1] = 3 v[992,1] = 1 v[993,1] = 3 v[994,1] = 1 v[995,1] = 1 v[996,1] = 3 v[997,1] = 1 v[998,1] = 1 v[999,1] = 1 v[1000,1] = 1 v[1001,1] = 3 v[1002,1] = 3 v[1003,1] = 1 v[1004,1] = 1 v[1005,1] = 3 v[1006,1] = 1 v[1007,1] = 1 v[1008,1] = 1 v[1009,1] = 3 v[1010,1] = 1 v[1011,1] = 3 v[1012,1] = 1 v[1013,1] = 1 v[1014,1] = 3 v[1015,1] = 3 v[1016,1] = 1 v[1017,1] = 3 v[1018,1] = 1 v[1019,1] = 1 v[1020,1] = 3 v[1021,1] = 3 v[1022,1] = 3 v[1023,1] = 3 v[1024,1] = 3 v[1025,1] = 1 v[1026,1] = 3 v[1027,1] = 1 v[1028,1] = 1 v[1029,1] = 1 v[1030,1] = 3 v[1031,1] = 1 v[1032,1] = 1 v[1033,1] = 1 v[1034,1] = 3 v[1035,1] = 1 v[1036,1] = 1 v[1037,1] = 3 v[1038,1] = 1 v[1039,1] = 3 v[1040,1] = 3 v[1041,1] = 3 v[1042,1] = 3 v[1043,1] = 3 v[1044,1] = 1 v[1045,1] = 1 v[1046,1] = 1 v[1047,1] = 3 v[1048,1] = 3 v[1049,1] = 3 v[1050,1] = 3 v[1051,1] = 1 v[1052,1] = 3 v[1053,1] = 3 v[1054,1] = 3 v[1055,1] = 3 v[1056,1] = 1 v[1057,1] = 1 v[1058,1] = 3 v[1059,1] = 3 v[1060,1] = 3 v[1061,1] = 1 v[1062,1] = 3 v[1063,1] = 1 v[1064,1] = 1 v[1065,1] = 3 v[1066,1] = 3 v[1067,1] = 1 v[1068,1] = 3 v[1069,1] = 3 v[1070,1] = 1 v[1071,1] = 1 v[1072,1] = 1 v[1073,1] = 1 v[1074,1] = 1 v[1075,1] = 3 v[1076,1] = 1 v[1077,1] = 1 v[1078,1] = 3 v[1079,1] = 3 v[1080,1] = 1 v[1081,1] = 1 v[1082,1] = 1 v[1083,1] = 3 v[1084,1] = 1 v[1085,1] = 1 v[1086,1] = 3 v[1087,1] = 3 v[1088,1] = 1 v[1089,1] = 3 v[1090,1] = 3 v[1091,1] = 3 v[1092,1] = 3 v[1093,1] = 3 v[1094,1] = 3 v[1095,1] = 3 v[1096,1] = 3 v[1097,1] = 1 v[1098,1] = 1 v[1099,1] = 3 v[1100,1] = 3 v[1101,1] = 1 v[1102,1] = 1 v[1103,1] = 3 v[1104,1] = 1 v[1105,1] = 3 v[1106,1] = 3 v[1107,1] = 3 v[1108,1] = 3 v[1109,1] = 3 v[1110,1] = 1 v[3,2] = 7 v[4,2] = 5 v[5,2] = 1 v[6,2] = 3 v[7,2] = 3 v[8,2] = 7 v[9,2] = 5 v[10,2] = 5 v[11,2] = 7 v[12,2] = 7 v[13,2] = 1 v[14,2] = 3 v[15,2] = 3 v[16,2] = 7 v[17,2] = 5 v[18,2] = 1 v[19,2] = 1 v[20,2] = 5 v[21,2] = 3 v[22,2] = 7 v[23,2] = 1 v[24,2] = 7 v[25,2] = 5 v[26,2] = 1 v[27,2] = 3 v[28,2] = 7 v[29,2] = 7 v[30,2] = 1 v[31,2] = 1 v[32,2] = 1 v[33,2] = 5 v[34,2] = 7 v[35,2] = 7 v[36,2] = 5 v[37,2] = 1 v[38,2] = 3 v[39,2] = 3 v[40,2] = 7 v[41,2] = 5 v[42,2] = 5 v[43,2] = 5 v[44,2] = 3 v[45,2] = 3 v[46,2] = 3 v[47,2] = 1 v[48,2] = 1 v[49,2] = 5 v[50,2] = 1 v[51,2] = 1 v[52,2] = 5 v[53,2] = 3 v[54,2] = 3 v[55,2] = 3 v[56,2] = 3 v[57,2] = 1 v[58,2] = 3 v[59,2] = 7 v[60,2] = 5 v[61,2] = 7 v[62,2] = 3 v[63,2] = 7 v[64,2] = 1 v[65,2] = 3 v[66,2] = 3 v[67,2] = 5 v[68,2] = 1 v[69,2] = 3 v[70,2] = 5 v[71,2] = 5 v[72,2] = 7 v[73,2] = 7 v[74,2] = 7 v[75,2] = 1 v[76,2] = 1 v[77,2] = 3 v[78,2] = 3 v[79,2] = 1 v[80,2] = 1 v[81,2] = 5 v[82,2] = 1 v[83,2] = 5 v[84,2] = 7 v[85,2] = 5 v[86,2] = 1 v[87,2] = 7 v[88,2] = 5 v[89,2] = 3 v[90,2] = 3 v[91,2] = 1 v[92,2] = 5 v[93,2] = 7 v[94,2] = 1 v[95,2] = 7 v[96,2] = 5 v[97,2] = 1 v[98,2] = 7 v[99,2] = 3 v[100,2] = 1 v[101,2] = 7 v[102,2] = 1 v[103,2] = 7 v[104,2] = 3 v[105,2] = 3 v[106,2] = 5 v[107,2] = 7 v[108,2] = 3 v[109,2] = 3 v[110,2] = 5 v[111,2] = 1 v[112,2] = 3 v[113,2] = 3 v[114,2] = 1 v[115,2] = 3 v[116,2] = 5 v[117,2] = 1 v[118,2] = 3 v[119,2] = 3 v[120,2] = 3 v[121,2] = 7 v[122,2] = 1 v[123,2] = 1 v[124,2] = 7 v[125,2] = 3 v[126,2] = 1 v[127,2] = 3 v[128,2] = 7 v[129,2] = 5 v[130,2] = 5 v[131,2] = 7 v[132,2] = 5 v[133,2] = 5 v[134,2] = 3 v[135,2] = 1 v[136,2] = 3 v[137,2] = 3 v[138,2] = 3 v[139,2] = 1 v[140,2] = 3 v[141,2] = 3 v[142,2] = 7 v[143,2] = 3 v[144,2] = 3 v[145,2] = 1 v[146,2] = 7 v[147,2] = 5 v[148,2] = 1 v[149,2] = 7 v[150,2] = 7 v[151,2] = 5 v[152,2] = 7 v[153,2] = 5 v[154,2] = 1 v[155,2] = 3 v[156,2] = 1 v[157,2] = 7 v[158,2] = 3 v[159,2] = 7 v[160,2] = 3 v[161,2] = 5 v[162,2] = 7 v[163,2] = 3 v[164,2] = 1 v[165,2] = 3 v[166,2] = 3 v[167,2] = 3 v[168,2] = 1 v[169,2] = 5 v[170,2] = 7 v[171,2] = 3 v[172,2] = 3 v[173,2] = 7 v[174,2] = 7 v[175,2] = 7 v[176,2] = 5 v[177,2] = 3 v[178,2] = 1 v[179,2] = 7 v[180,2] = 1 v[181,2] = 3 v[182,2] = 7 v[183,2] = 5 v[184,2] = 3 v[185,2] = 3 v[186,2] = 3 v[187,2] = 7 v[188,2] = 1 v[189,2] = 1 v[190,2] = 3 v[191,2] = 1 v[192,2] = 5 v[193,2] = 7 v[194,2] = 1 v[195,2] = 3 v[196,2] = 5 v[197,2] = 3 v[198,2] = 5 v[199,2] = 3 v[200,2] = 3 v[201,2] = 7 v[202,2] = 5 v[203,2] = 5 v[204,2] = 3 v[205,2] = 3 v[206,2] = 1 v[207,2] = 3 v[208,2] = 7 v[209,2] = 7 v[210,2] = 7 v[211,2] = 1 v[212,2] = 5 v[213,2] = 7 v[214,2] = 1 v[215,2] = 3 v[216,2] = 1 v[217,2] = 1 v[218,2] = 7 v[219,2] = 1 v[220,2] = 3 v[221,2] = 1 v[222,2] = 7 v[223,2] = 1 v[224,2] = 5 v[225,2] = 3 v[226,2] = 5 v[227,2] = 3 v[228,2] = 1 v[229,2] = 1 v[230,2] = 5 v[231,2] = 5 v[232,2] = 3 v[233,2] = 3 v[234,2] = 5 v[235,2] = 7 v[236,2] = 1 v[237,2] = 5 v[238,2] = 3 v[239,2] = 7 v[240,2] = 7 v[241,2] = 3 v[242,2] = 5 v[243,2] = 3 v[244,2] = 3 v[245,2] = 1 v[246,2] = 7 v[247,2] = 3 v[248,2] = 1 v[249,2] = 3 v[250,2] = 5 v[251,2] = 7 v[252,2] = 1 v[253,2] = 3 v[254,2] = 7 v[255,2] = 1 v[256,2] = 5 v[257,2] = 1 v[258,2] = 3 v[259,2] = 1 v[260,2] = 5 v[261,2] = 3 v[262,2] = 1 v[263,2] = 7 v[264,2] = 1 v[265,2] = 5 v[266,2] = 5 v[267,2] = 5 v[268,2] = 3 v[269,2] = 7 v[270,2] = 1 v[271,2] = 1 v[272,2] = 7 v[273,2] = 3 v[274,2] = 1 v[275,2] = 1 v[276,2] = 7 v[277,2] = 5 v[278,2] = 7 v[279,2] = 5 v[280,2] = 7 v[281,2] = 7 v[282,2] = 3 v[283,2] = 7 v[284,2] = 1 v[285,2] = 3 v[286,2] = 7 v[287,2] = 7 v[288,2] = 3 v[289,2] = 5 v[290,2] = 1 v[291,2] = 1 v[292,2] = 7 v[293,2] = 1 v[294,2] = 5 v[295,2] = 5 v[296,2] = 5 v[297,2] = 1 v[298,2] = 5 v[299,2] = 1 v[300,2] = 7 v[301,2] = 5 v[302,2] = 5 v[303,2] = 7 v[304,2] = 1 v[305,2] = 1 v[306,2] = 7 v[307,2] = 1 v[308,2] = 7 v[309,2] = 7 v[310,2] = 1 v[311,2] = 1 v[312,2] = 3 v[313,2] = 3 v[314,2] = 3 v[315,2] = 7 v[316,2] = 7 v[317,2] = 5 v[318,2] = 3 v[319,2] = 7 v[320,2] = 3 v[321,2] = 1 v[322,2] = 3 v[323,2] = 7 v[324,2] = 5 v[325,2] = 3 v[326,2] = 3 v[327,2] = 5 v[328,2] = 7 v[329,2] = 1 v[330,2] = 1 v[331,2] = 5 v[332,2] = 5 v[333,2] = 7 v[334,2] = 7 v[335,2] = 1 v[336,2] = 1 v[337,2] = 1 v[338,2] = 1 v[339,2] = 5 v[340,2] = 5 v[341,2] = 5 v[342,2] = 7 v[343,2] = 5 v[344,2] = 7 v[345,2] = 1 v[346,2] = 1 v[347,2] = 3 v[348,2] = 5 v[349,2] = 1 v[350,2] = 3 v[351,2] = 3 v[352,2] = 7 v[353,2] = 3 v[354,2] = 7 v[355,2] = 5 v[356,2] = 3 v[357,2] = 5 v[358,2] = 3 v[359,2] = 1 v[360,2] = 7 v[361,2] = 1 v[362,2] = 7 v[363,2] = 7 v[364,2] = 1 v[365,2] = 1 v[366,2] = 7 v[367,2] = 7 v[368,2] = 7 v[369,2] = 5 v[370,2] = 5 v[371,2] = 1 v[372,2] = 1 v[373,2] = 7 v[374,2] = 5 v[375,2] = 5 v[376,2] = 7 v[377,2] = 5 v[378,2] = 1 v[379,2] = 1 v[380,2] = 5 v[381,2] = 5 v[382,2] = 5 v[383,2] = 5 v[384,2] = 5 v[385,2] = 5 v[386,2] = 1 v[387,2] = 3 v[388,2] = 1 v[389,2] = 5 v[390,2] = 7 v[391,2] = 3 v[392,2] = 3 v[393,2] = 5 v[394,2] = 7 v[395,2] = 3 v[396,2] = 7 v[397,2] = 1 v[398,2] = 7 v[399,2] = 7 v[400,2] = 1 v[401,2] = 3 v[402,2] = 5 v[403,2] = 1 v[404,2] = 5 v[405,2] = 5 v[406,2] = 3 v[407,2] = 7 v[408,2] = 3 v[409,2] = 7 v[410,2] = 7 v[411,2] = 5 v[412,2] = 7 v[413,2] = 5 v[414,2] = 7 v[415,2] = 1 v[416,2] = 1 v[417,2] = 5 v[418,2] = 3 v[419,2] = 5 v[420,2] = 1 v[421,2] = 5 v[422,2] = 3 v[423,2] = 7 v[424,2] = 1 v[425,2] = 5 v[426,2] = 7 v[427,2] = 7 v[428,2] = 3 v[429,2] = 5 v[430,2] = 1 v[431,2] = 3 v[432,2] = 5 v[433,2] = 1 v[434,2] = 5 v[435,2] = 3 v[436,2] = 3 v[437,2] = 3 v[438,2] = 7 v[439,2] = 3 v[440,2] = 5 v[441,2] = 1 v[442,2] = 3 v[443,2] = 7 v[444,2] = 7 v[445,2] = 3 v[446,2] = 7 v[447,2] = 5 v[448,2] = 3 v[449,2] = 3 v[450,2] = 1 v[451,2] = 7 v[452,2] = 5 v[453,2] = 1 v[454,2] = 1 v[455,2] = 3 v[456,2] = 7 v[457,2] = 1 v[458,2] = 7 v[459,2] = 1 v[460,2] = 7 v[461,2] = 3 v[462,2] = 7 v[463,2] = 3 v[464,2] = 5 v[465,2] = 7 v[466,2] = 3 v[467,2] = 5 v[468,2] = 3 v[469,2] = 1 v[470,2] = 1 v[471,2] = 1 v[472,2] = 5 v[473,2] = 7 v[474,2] = 7 v[475,2] = 3 v[476,2] = 3 v[477,2] = 1 v[478,2] = 1 v[479,2] = 1 v[480,2] = 5 v[481,2] = 5 v[482,2] = 7 v[483,2] = 3 v[484,2] = 1 v[485,2] = 1 v[486,2] = 3 v[487,2] = 3 v[488,2] = 7 v[489,2] = 3 v[490,2] = 3 v[491,2] = 5 v[492,2] = 1 v[493,2] = 3 v[494,2] = 7 v[495,2] = 3 v[496,2] = 3 v[497,2] = 7 v[498,2] = 3 v[499,2] = 5 v[500,2] = 7 v[501,2] = 5 v[502,2] = 7 v[503,2] = 7 v[504,2] = 3 v[505,2] = 3 v[506,2] = 5 v[507,2] = 1 v[508,2] = 3 v[509,2] = 5 v[510,2] = 3 v[511,2] = 1 v[512,2] = 3 v[513,2] = 5 v[514,2] = 1 v[515,2] = 1 v[516,2] = 3 v[517,2] = 7 v[518,2] = 7 v[519,2] = 1 v[520,2] = 5 v[521,2] = 1 v[522,2] = 3 v[523,2] = 7 v[524,2] = 3 v[525,2] = 7 v[526,2] = 3 v[527,2] = 5 v[528,2] = 1 v[529,2] = 7 v[530,2] = 1 v[531,2] = 1 v[532,2] = 3 v[533,2] = 5 v[534,2] = 3 v[535,2] = 7 v[536,2] = 1 v[537,2] = 5 v[538,2] = 5 v[539,2] = 1 v[540,2] = 1 v[541,2] = 3 v[542,2] = 1 v[543,2] = 3 v[544,2] = 3 v[545,2] = 7 v[546,2] = 1 v[547,2] = 7 v[548,2] = 3 v[549,2] = 1 v[550,2] = 7 v[551,2] = 3 v[552,2] = 1 v[553,2] = 7 v[554,2] = 3 v[555,2] = 5 v[556,2] = 3 v[557,2] = 5 v[558,2] = 7 v[559,2] = 3 v[560,2] = 3 v[561,2] = 3 v[562,2] = 5 v[563,2] = 1 v[564,2] = 7 v[565,2] = 7 v[566,2] = 1 v[567,2] = 3 v[568,2] = 1 v[569,2] = 3 v[570,2] = 7 v[571,2] = 7 v[572,2] = 1 v[573,2] = 3 v[574,2] = 7 v[575,2] = 3 v[576,2] = 1 v[577,2] = 5 v[578,2] = 3 v[579,2] = 1 v[580,2] = 1 v[581,2] = 1 v[582,2] = 5 v[583,2] = 3 v[584,2] = 3 v[585,2] = 7 v[586,2] = 1 v[587,2] = 5 v[588,2] = 3 v[589,2] = 5 v[590,2] = 1 v[591,2] = 3 v[592,2] = 1 v[593,2] = 3 v[594,2] = 1 v[595,2] = 5 v[596,2] = 7 v[597,2] = 7 v[598,2] = 1 v[599,2] = 1 v[600,2] = 5 v[601,2] = 3 v[602,2] = 1 v[603,2] = 5 v[604,2] = 1 v[605,2] = 1 v[606,2] = 7 v[607,2] = 7 v[608,2] = 3 v[609,2] = 5 v[610,2] = 5 v[611,2] = 1 v[612,2] = 7 v[613,2] = 1 v[614,2] = 5 v[615,2] = 1 v[616,2] = 1 v[617,2] = 3 v[618,2] = 1 v[619,2] = 5 v[620,2] = 7 v[621,2] = 5 v[622,2] = 7 v[623,2] = 7 v[624,2] = 1 v[625,2] = 5 v[626,2] = 1 v[627,2] = 1 v[628,2] = 3 v[629,2] = 5 v[630,2] = 1 v[631,2] = 5 v[632,2] = 5 v[633,2] = 3 v[634,2] = 1 v[635,2] = 3 v[636,2] = 1 v[637,2] = 5 v[638,2] = 5 v[639,2] = 3 v[640,2] = 3 v[641,2] = 3 v[642,2] = 3 v[643,2] = 1 v[644,2] = 1 v[645,2] = 3 v[646,2] = 1 v[647,2] = 3 v[648,2] = 5 v[649,2] = 5 v[650,2] = 7 v[651,2] = 5 v[652,2] = 5 v[653,2] = 7 v[654,2] = 5 v[655,2] = 7 v[656,2] = 1 v[657,2] = 3 v[658,2] = 7 v[659,2] = 7 v[660,2] = 3 v[661,2] = 5 v[662,2] = 5 v[663,2] = 7 v[664,2] = 5 v[665,2] = 5 v[666,2] = 3 v[667,2] = 3 v[668,2] = 3 v[669,2] = 1 v[670,2] = 7 v[671,2] = 1 v[672,2] = 5 v[673,2] = 5 v[674,2] = 5 v[675,2] = 3 v[676,2] = 3 v[677,2] = 5 v[678,2] = 1 v[679,2] = 3 v[680,2] = 1 v[681,2] = 3 v[682,2] = 3 v[683,2] = 3 v[684,2] = 7 v[685,2] = 1 v[686,2] = 7 v[687,2] = 7 v[688,2] = 3 v[689,2] = 7 v[690,2] = 1 v[691,2] = 1 v[692,2] = 5 v[693,2] = 7 v[694,2] = 1 v[695,2] = 7 v[696,2] = 1 v[697,2] = 7 v[698,2] = 7 v[699,2] = 1 v[700,2] = 3 v[701,2] = 7 v[702,2] = 5 v[703,2] = 1 v[704,2] = 3 v[705,2] = 5 v[706,2] = 5 v[707,2] = 5 v[708,2] = 1 v[709,2] = 1 v[710,2] = 7 v[711,2] = 1 v[712,2] = 7 v[713,2] = 1 v[714,2] = 7 v[715,2] = 7 v[716,2] = 3 v[717,2] = 1 v[718,2] = 1 v[719,2] = 5 v[720,2] = 1 v[721,2] = 5 v[722,2] = 1 v[723,2] = 5 v[724,2] = 3 v[725,2] = 5 v[726,2] = 5 v[727,2] = 5 v[728,2] = 5 v[729,2] = 5 v[730,2] = 3 v[731,2] = 3 v[732,2] = 7 v[733,2] = 3 v[734,2] = 3 v[735,2] = 5 v[736,2] = 5 v[737,2] = 3 v[738,2] = 7 v[739,2] = 1 v[740,2] = 5 v[741,2] = 7 v[742,2] = 5 v[743,2] = 1 v[744,2] = 5 v[745,2] = 5 v[746,2] = 3 v[747,2] = 5 v[748,2] = 5 v[749,2] = 7 v[750,2] = 5 v[751,2] = 3 v[752,2] = 5 v[753,2] = 5 v[754,2] = 5 v[755,2] = 1 v[756,2] = 5 v[757,2] = 5 v[758,2] = 5 v[759,2] = 5 v[760,2] = 1 v[761,2] = 3 v[762,2] = 5 v[763,2] = 3 v[764,2] = 1 v[765,2] = 7 v[766,2] = 5 v[767,2] = 5 v[768,2] = 7 v[769,2] = 1 v[770,2] = 5 v[771,2] = 3 v[772,2] = 3 v[773,2] = 1 v[774,2] = 5 v[775,2] = 3 v[776,2] = 7 v[777,2] = 1 v[778,2] = 7 v[779,2] = 5 v[780,2] = 1 v[781,2] = 1 v[782,2] = 3 v[783,2] = 1 v[784,2] = 1 v[785,2] = 7 v[786,2] = 1 v[787,2] = 5 v[788,2] = 5 v[789,2] = 3 v[790,2] = 7 v[791,2] = 3 v[792,2] = 7 v[793,2] = 5 v[794,2] = 3 v[795,2] = 1 v[796,2] = 1 v[797,2] = 3 v[798,2] = 1 v[799,2] = 3 v[800,2] = 5 v[801,2] = 5 v[802,2] = 7 v[803,2] = 5 v[804,2] = 3 v[805,2] = 7 v[806,2] = 7 v[807,2] = 7 v[808,2] = 3 v[809,2] = 7 v[810,2] = 3 v[811,2] = 7 v[812,2] = 1 v[813,2] = 3 v[814,2] = 1 v[815,2] = 7 v[816,2] = 7 v[817,2] = 1 v[818,2] = 7 v[819,2] = 3 v[820,2] = 7 v[821,2] = 3 v[822,2] = 7 v[823,2] = 3 v[824,2] = 7 v[825,2] = 3 v[826,2] = 5 v[827,2] = 1 v[828,2] = 1 v[829,2] = 7 v[830,2] = 3 v[831,2] = 1 v[832,2] = 5 v[833,2] = 5 v[834,2] = 7 v[835,2] = 1 v[836,2] = 5 v[837,2] = 5 v[838,2] = 5 v[839,2] = 7 v[840,2] = 1 v[841,2] = 5 v[842,2] = 5 v[843,2] = 1 v[844,2] = 5 v[845,2] = 5 v[846,2] = 3 v[847,2] = 1 v[848,2] = 3 v[849,2] = 1 v[850,2] = 7 v[851,2] = 3 v[852,2] = 1 v[853,2] = 3 v[854,2] = 5 v[855,2] = 7 v[856,2] = 7 v[857,2] = 7 v[858,2] = 1 v[859,2] = 1 v[860,2] = 7 v[861,2] = 3 v[862,2] = 1 v[863,2] = 5 v[864,2] = 5 v[865,2] = 5 v[866,2] = 1 v[867,2] = 1 v[868,2] = 1 v[869,2] = 1 v[870,2] = 1 v[871,2] = 5 v[872,2] = 3 v[873,2] = 5 v[874,2] = 1 v[875,2] = 3 v[876,2] = 5 v[877,2] = 3 v[878,2] = 1 v[879,2] = 1 v[880,2] = 1 v[881,2] = 1 v[882,2] = 3 v[883,2] = 7 v[884,2] = 3 v[885,2] = 7 v[886,2] = 5 v[887,2] = 7 v[888,2] = 1 v[889,2] = 5 v[890,2] = 5 v[891,2] = 7 v[892,2] = 5 v[893,2] = 3 v[894,2] = 3 v[895,2] = 7 v[896,2] = 5 v[897,2] = 3 v[898,2] = 1 v[899,2] = 1 v[900,2] = 3 v[901,2] = 1 v[902,2] = 3 v[903,2] = 1 v[904,2] = 1 v[905,2] = 3 v[906,2] = 7 v[907,2] = 1 v[908,2] = 7 v[909,2] = 1 v[910,2] = 1 v[911,2] = 5 v[912,2] = 1 v[913,2] = 7 v[914,2] = 5 v[915,2] = 3 v[916,2] = 7 v[917,2] = 3 v[918,2] = 5 v[919,2] = 3 v[920,2] = 1 v[921,2] = 1 v[922,2] = 5 v[923,2] = 5 v[924,2] = 1 v[925,2] = 7 v[926,2] = 7 v[927,2] = 3 v[928,2] = 7 v[929,2] = 3 v[930,2] = 7 v[931,2] = 1 v[932,2] = 5 v[933,2] = 1 v[934,2] = 5 v[935,2] = 3 v[936,2] = 7 v[937,2] = 3 v[938,2] = 5 v[939,2] = 7 v[940,2] = 7 v[941,2] = 7 v[942,2] = 3 v[943,2] = 3 v[944,2] = 1 v[945,2] = 1 v[946,2] = 5 v[947,2] = 5 v[948,2] = 3 v[949,2] = 7 v[950,2] = 1 v[951,2] = 1 v[952,2] = 1 v[953,2] = 3 v[954,2] = 5 v[955,2] = 3 v[956,2] = 1 v[957,2] = 1 v[958,2] = 3 v[959,2] = 3 v[960,2] = 7 v[961,2] = 5 v[962,2] = 1 v[963,2] = 1 v[964,2] = 3 v[965,2] = 7 v[966,2] = 1 v[967,2] = 5 v[968,2] = 7 v[969,2] = 3 v[970,2] = 7 v[971,2] = 5 v[972,2] = 5 v[973,2] = 7 v[974,2] = 3 v[975,2] = 5 v[976,2] = 3 v[977,2] = 1 v[978,2] = 5 v[979,2] = 3 v[980,2] = 1 v[981,2] = 1 v[982,2] = 7 v[983,2] = 5 v[984,2] = 1 v[985,2] = 7 v[986,2] = 3 v[987,2] = 7 v[988,2] = 5 v[989,2] = 1 v[990,2] = 7 v[991,2] = 1 v[992,2] = 7 v[993,2] = 7 v[994,2] = 1 v[995,2] = 1 v[996,2] = 7 v[997,2] = 1 v[998,2] = 5 v[999,2] = 5 v[1000,2] = 1 v[1001,2] = 1 v[1002,2] = 7 v[1003,2] = 5 v[1004,2] = 7 v[1005,2] = 1 v[1006,2] = 5 v[1007,2] = 3 v[1008,2] = 5 v[1009,2] = 3 v[1010,2] = 3 v[1011,2] = 7 v[1012,2] = 1 v[1013,2] = 5 v[1014,2] = 1 v[1015,2] = 1 v[1016,2] = 5 v[1017,2] = 5 v[1018,2] = 3 v[1019,2] = 3 v[1020,2] = 7 v[1021,2] = 5 v[1022,2] = 5 v[1023,2] = 1 v[1024,2] = 1 v[1025,2] = 1 v[1026,2] = 3 v[1027,2] = 1 v[1028,2] = 5 v[1029,2] = 7 v[1030,2] = 7 v[1031,2] = 1 v[1032,2] = 7 v[1033,2] = 5 v[1034,2] = 7 v[1035,2] = 3 v[1036,2] = 7 v[1037,2] = 3 v[1038,2] = 1 v[1039,2] = 3 v[1040,2] = 7 v[1041,2] = 3 v[1042,2] = 1 v[1043,2] = 5 v[1044,2] = 5 v[1045,2] = 3 v[1046,2] = 5 v[1047,2] = 1 v[1048,2] = 3 v[1049,2] = 5 v[1050,2] = 5 v[1051,2] = 5 v[1052,2] = 1 v[1053,2] = 1 v[1054,2] = 7 v[1055,2] = 7 v[1056,2] = 1 v[1057,2] = 5 v[1058,2] = 5 v[1059,2] = 1 v[1060,2] = 3 v[1061,2] = 5 v[1062,2] = 1 v[1063,2] = 5 v[1064,2] = 3 v[1065,2] = 5 v[1066,2] = 3 v[1067,2] = 3 v[1068,2] = 7 v[1069,2] = 5 v[1070,2] = 7 v[1071,2] = 3 v[1072,2] = 7 v[1073,2] = 3 v[1074,2] = 1 v[1075,2] = 3 v[1076,2] = 7 v[1077,2] = 7 v[1078,2] = 3 v[1079,2] = 3 v[1080,2] = 1 v[1081,2] = 1 v[1082,2] = 3 v[1083,2] = 3 v[1084,2] = 3 v[1085,2] = 3 v[1086,2] = 3 v[1087,2] = 5 v[1088,2] = 5 v[1089,2] = 3 v[1090,2] = 3 v[1091,2] = 3 v[1092,2] = 1 v[1093,2] = 3 v[1094,2] = 5 v[1095,2] = 7 v[1096,2] = 7 v[1097,2] = 1 v[1098,2] = 5 v[1099,2] = 7 v[1100,2] = 3 v[1101,2] = 7 v[1102,2] = 1 v[1103,2] = 1 v[1104,2] = 3 v[1105,2] = 5 v[1106,2] = 7 v[1107,2] = 5 v[1108,2] = 3 v[1109,2] = 3 v[1110,2] = 3 v[5,3] = 1 v[6,3] = 7 v[7,3] = 9 v[8,3] = 13 v[9,3] = 11 v[10,3] = 1 v[11,3] = 3 v[12,3] = 7 v[13,3] = 9 v[14,3] = 5 v[15,3] = 13 v[16,3] = 13 v[17,3] = 11 v[18,3] = 3 v[19,3] = 15 v[20,3] = 5 v[21,3] = 3 v[22,3] = 15 v[23,3] = 7 v[24,3] = 9 v[25,3] = 13 v[26,3] = 9 v[27,3] = 1 v[28,3] = 11 v[29,3] = 7 v[30,3] = 5 v[31,3] = 15 v[32,3] = 1 v[33,3] = 15 v[34,3] = 11 v[35,3] = 5 v[36,3] = 11 v[37,3] = 1 v[38,3] = 7 v[39,3] = 9 v[40,3] = 7 v[41,3] = 7 v[42,3] = 1 v[43,3] = 15 v[44,3] = 15 v[45,3] = 15 v[46,3] = 13 v[47,3] = 3 v[48,3] = 3 v[49,3] = 15 v[50,3] = 5 v[51,3] = 9 v[52,3] = 7 v[53,3] = 13 v[54,3] = 3 v[55,3] = 7 v[56,3] = 5 v[57,3] = 11 v[58,3] = 9 v[59,3] = 1 v[60,3] = 9 v[61,3] = 1 v[62,3] = 5 v[63,3] = 7 v[64,3] = 13 v[65,3] = 9 v[66,3] = 9 v[67,3] = 1 v[68,3] = 7 v[69,3] = 3 v[70,3] = 5 v[71,3] = 1 v[72,3] = 11 v[73,3] = 11 v[74,3] = 13 v[75,3] = 7 v[76,3] = 7 v[77,3] = 9 v[78,3] = 9 v[79,3] = 1 v[80,3] = 1 v[81,3] = 3 v[82,3] = 9 v[83,3] = 15 v[84,3] = 1 v[85,3] = 5 v[86,3] = 13 v[87,3] = 1 v[88,3] = 9 v[89,3] = 9 v[90,3] = 9 v[91,3] = 9 v[92,3] = 9 v[93,3] = 13 v[94,3] = 11 v[95,3] = 3 v[96,3] = 5 v[97,3] = 11 v[98,3] = 11 v[99,3] = 13 v[100,3] = 5 v[101,3] = 3 v[102,3] = 15 v[103,3] = 1 v[104,3] = 11 v[105,3] = 11 v[106,3] = 7 v[107,3] = 13 v[108,3] = 15 v[109,3] = 11 v[110,3] = 13 v[111,3] = 9 v[112,3] = 11 v[113,3] = 15 v[114,3] = 15 v[115,3] = 13 v[116,3] = 3 v[117,3] = 15 v[118,3] = 7 v[119,3] = 9 v[120,3] = 11 v[121,3] = 13 v[122,3] = 11 v[123,3] = 9 v[124,3] = 9 v[125,3] = 5 v[126,3] = 13 v[127,3] = 9 v[128,3] = 1 v[129,3] = 13 v[130,3] = 7 v[131,3] = 7 v[132,3] = 7 v[133,3] = 7 v[134,3] = 7 v[135,3] = 5 v[136,3] = 9 v[137,3] = 7 v[138,3] = 13 v[139,3] = 11 v[140,3] = 9 v[141,3] = 11 v[142,3] = 15 v[143,3] = 3 v[144,3] = 13 v[145,3] = 11 v[146,3] = 1 v[147,3] = 11 v[148,3] = 3 v[149,3] = 3 v[150,3] = 9 v[151,3] = 11 v[152,3] = 1 v[153,3] = 7 v[154,3] = 1 v[155,3] = 15 v[156,3] = 15 v[157,3] = 3 v[158,3] = 1 v[159,3] = 9 v[160,3] = 1 v[161,3] = 7 v[162,3] = 13 v[163,3] = 11 v[164,3] = 3 v[165,3] = 13 v[166,3] = 11 v[167,3] = 7 v[168,3] = 3 v[169,3] = 3 v[170,3] = 5 v[171,3] = 13 v[172,3] = 11 v[173,3] = 5 v[174,3] = 11 v[175,3] = 1 v[176,3] = 3 v[177,3] = 9 v[178,3] = 7 v[179,3] = 15 v[180,3] = 7 v[181,3] = 5 v[182,3] = 13 v[183,3] = 7 v[184,3] = 9 v[185,3] = 13 v[186,3] = 15 v[187,3] = 13 v[188,3] = 9 v[189,3] = 7 v[190,3] = 15 v[191,3] = 7 v[192,3] = 9 v[193,3] = 5 v[194,3] = 11 v[195,3] = 11 v[196,3] = 13 v[197,3] = 13 v[198,3] = 9 v[199,3] = 3 v[200,3] = 5 v[201,3] = 13 v[202,3] = 9 v[203,3] = 11 v[204,3] = 15 v[205,3] = 11 v[206,3] = 7 v[207,3] = 1 v[208,3] = 7 v[209,3] = 13 v[210,3] = 3 v[211,3] = 13 v[212,3] = 3 v[213,3] = 13 v[214,3] = 9 v[215,3] = 15 v[216,3] = 7 v[217,3] = 13 v[218,3] = 13 v[219,3] = 3 v[220,3] = 13 v[221,3] = 15 v[222,3] = 15 v[223,3] = 11 v[224,3] = 9 v[225,3] = 13 v[226,3] = 9 v[227,3] = 15 v[228,3] = 1 v[229,3] = 1 v[230,3] = 15 v[231,3] = 11 v[232,3] = 11 v[233,3] = 7 v[234,3] = 1 v[235,3] = 11 v[236,3] = 13 v[237,3] = 9 v[238,3] = 13 v[239,3] = 3 v[240,3] = 5 v[241,3] = 11 v[242,3] = 13 v[243,3] = 9 v[244,3] = 9 v[245,3] = 13 v[246,3] = 1 v[247,3] = 11 v[248,3] = 15 v[249,3] = 13 v[250,3] = 3 v[251,3] = 13 v[252,3] = 7 v[253,3] = 15 v[254,3] = 1 v[255,3] = 15 v[256,3] = 3 v[257,3] = 3 v[258,3] = 11 v[259,3] = 7 v[260,3] = 13 v[261,3] = 7 v[262,3] = 7 v[263,3] = 9 v[264,3] = 7 v[265,3] = 5 v[266,3] = 15 v[267,3] = 9 v[268,3] = 5 v[269,3] = 5 v[270,3] = 7 v[271,3] = 15 v[272,3] = 13 v[273,3] = 15 v[274,3] = 5 v[275,3] = 15 v[276,3] = 5 v[277,3] = 3 v[278,3] = 1 v[279,3] = 11 v[280,3] = 7 v[281,3] = 1 v[282,3] = 5 v[283,3] = 7 v[284,3] = 9 v[285,3] = 3 v[286,3] = 11 v[287,3] = 1 v[288,3] = 15 v[289,3] = 1 v[290,3] = 3 v[291,3] = 15 v[292,3] = 11 v[293,3] = 13 v[294,3] = 5 v[295,3] = 13 v[296,3] = 1 v[297,3] = 7 v[298,3] = 1 v[299,3] = 15 v[300,3] = 7 v[301,3] = 5 v[302,3] = 1 v[303,3] = 1 v[304,3] = 15 v[305,3] = 13 v[306,3] = 11 v[307,3] = 11 v[308,3] = 13 v[309,3] = 5 v[310,3] = 11 v[311,3] = 7 v[312,3] = 9 v[313,3] = 7 v[314,3] = 1 v[315,3] = 5 v[316,3] = 3 v[317,3] = 9 v[318,3] = 5 v[319,3] = 5 v[320,3] = 11 v[321,3] = 5 v[322,3] = 1 v[323,3] = 7 v[324,3] = 1 v[325,3] = 11 v[326,3] = 7 v[327,3] = 9 v[328,3] = 13 v[329,3] = 15 v[330,3] = 13 v[331,3] = 3 v[332,3] = 1 v[333,3] = 11 v[334,3] = 13 v[335,3] = 15 v[336,3] = 1 v[337,3] = 1 v[338,3] = 11 v[339,3] = 9 v[340,3] = 13 v[341,3] = 3 v[342,3] = 13 v[343,3] = 11 v[344,3] = 15 v[345,3] = 13 v[346,3] = 9 v[347,3] = 9 v[348,3] = 9 v[349,3] = 5 v[350,3] = 5 v[351,3] = 5 v[352,3] = 5 v[353,3] = 1 v[354,3] = 15 v[355,3] = 5 v[356,3] = 9 v[357,3] = 11 v[358,3] = 7 v[359,3] = 15 v[360,3] = 5 v[361,3] = 3 v[362,3] = 13 v[363,3] = 5 v[364,3] = 3 v[365,3] = 11 v[366,3] = 5 v[367,3] = 1 v[368,3] = 11 v[369,3] = 13 v[370,3] = 9 v[371,3] = 11 v[372,3] = 3 v[373,3] = 7 v[374,3] = 13 v[375,3] = 15 v[376,3] = 1 v[377,3] = 7 v[378,3] = 11 v[379,3] = 1 v[380,3] = 13 v[381,3] = 1 v[382,3] = 15 v[383,3] = 1 v[384,3] = 9 v[385,3] = 7 v[386,3] = 3 v[387,3] = 9 v[388,3] = 11 v[389,3] = 1 v[390,3] = 9 v[391,3] = 13 v[392,3] = 13 v[393,3] = 3 v[394,3] = 11 v[395,3] = 7 v[396,3] = 9 v[397,3] = 1 v[398,3] = 7 v[399,3] = 15 v[400,3] = 9 v[401,3] = 1 v[402,3] = 5 v[403,3] = 13 v[404,3] = 5 v[405,3] = 11 v[406,3] = 3 v[407,3] = 9 v[408,3] = 15 v[409,3] = 11 v[410,3] = 13 v[411,3] = 5 v[412,3] = 1 v[413,3] = 7 v[414,3] = 7 v[415,3] = 5 v[416,3] = 13 v[417,3] = 7 v[418,3] = 7 v[419,3] = 9 v[420,3] = 5 v[421,3] = 11 v[422,3] = 11 v[423,3] = 1 v[424,3] = 1 v[425,3] = 15 v[426,3] = 3 v[427,3] = 13 v[428,3] = 9 v[429,3] = 13 v[430,3] = 9 v[431,3] = 9 v[432,3] = 11 v[433,3] = 5 v[434,3] = 5 v[435,3] = 13 v[436,3] = 15 v[437,3] = 3 v[438,3] = 9 v[439,3] = 15 v[440,3] = 3 v[441,3] = 11 v[442,3] = 11 v[443,3] = 15 v[444,3] = 15 v[445,3] = 3 v[446,3] = 11 v[447,3] = 15 v[448,3] = 15 v[449,3] = 3 v[450,3] = 1 v[451,3] = 3 v[452,3] = 1 v[453,3] = 3 v[454,3] = 3 v[455,3] = 1 v[456,3] = 3 v[457,3] = 13 v[458,3] = 1 v[459,3] = 11 v[460,3] = 5 v[461,3] = 15 v[462,3] = 7 v[463,3] = 15 v[464,3] = 9 v[465,3] = 1 v[466,3] = 7 v[467,3] = 1 v[468,3] = 9 v[469,3] = 11 v[470,3] = 15 v[471,3] = 1 v[472,3] = 13 v[473,3] = 9 v[474,3] = 13 v[475,3] = 11 v[476,3] = 7 v[477,3] = 3 v[478,3] = 7 v[479,3] = 3 v[480,3] = 13 v[481,3] = 7 v[482,3] = 9 v[483,3] = 7 v[484,3] = 7 v[485,3] = 3 v[486,3] = 3 v[487,3] = 9 v[488,3] = 9 v[489,3] = 7 v[490,3] = 5 v[491,3] = 11 v[492,3] = 13 v[493,3] = 13 v[494,3] = 7 v[495,3] = 7 v[496,3] = 15 v[497,3] = 9 v[498,3] = 5 v[499,3] = 5 v[500,3] = 3 v[501,3] = 3 v[502,3] = 13 v[503,3] = 3 v[504,3] = 9 v[505,3] = 3 v[506,3] = 1 v[507,3] = 11 v[508,3] = 1 v[509,3] = 3 v[510,3] = 11 v[511,3] = 15 v[512,3] = 11 v[513,3] = 11 v[514,3] = 11 v[515,3] = 9 v[516,3] = 13 v[517,3] = 7 v[518,3] = 9 v[519,3] = 15 v[520,3] = 9 v[521,3] = 11 v[522,3] = 1 v[523,3] = 3 v[524,3] = 3 v[525,3] = 9 v[526,3] = 7 v[527,3] = 15 v[528,3] = 13 v[529,3] = 13 v[530,3] = 7 v[531,3] = 15 v[532,3] = 9 v[533,3] = 13 v[534,3] = 9 v[535,3] = 15 v[536,3] = 13 v[537,3] = 15 v[538,3] = 9 v[539,3] = 13 v[540,3] = 1 v[541,3] = 11 v[542,3] = 7 v[543,3] = 11 v[544,3] = 3 v[545,3] = 13 v[546,3] = 5 v[547,3] = 1 v[548,3] = 7 v[549,3] = 15 v[550,3] = 3 v[551,3] = 13 v[552,3] = 7 v[553,3] = 13 v[554,3] = 13 v[555,3] = 11 v[556,3] = 3 v[557,3] = 5 v[558,3] = 3 v[559,3] = 13 v[560,3] = 11 v[561,3] = 9 v[562,3] = 9 v[563,3] = 3 v[564,3] = 11 v[565,3] = 11 v[566,3] = 7 v[567,3] = 9 v[568,3] = 13 v[569,3] = 11 v[570,3] = 7 v[571,3] = 15 v[572,3] = 13 v[573,3] = 7 v[574,3] = 5 v[575,3] = 3 v[576,3] = 1 v[577,3] = 5 v[578,3] = 15 v[579,3] = 15 v[580,3] = 3 v[581,3] = 11 v[582,3] = 1 v[583,3] = 7 v[584,3] = 3 v[585,3] = 15 v[586,3] = 11 v[587,3] = 5 v[588,3] = 5 v[589,3] = 3 v[590,3] = 5 v[591,3] = 5 v[592,3] = 1 v[593,3] = 15 v[594,3] = 5 v[595,3] = 1 v[596,3] = 5 v[597,3] = 3 v[598,3] = 7 v[599,3] = 5 v[600,3] = 11 v[601,3] = 3 v[602,3] = 13 v[603,3] = 9 v[604,3] = 13 v[605,3] = 15 v[606,3] = 5 v[607,3] = 3 v[608,3] = 5 v[609,3] = 9 v[610,3] = 5 v[611,3] = 3 v[612,3] = 11 v[613,3] = 1 v[614,3] = 13 v[615,3] = 9 v[616,3] = 15 v[617,3] = 3 v[618,3] = 5 v[619,3] = 11 v[620,3] = 9 v[621,3] = 1 v[622,3] = 3 v[623,3] = 15 v[624,3] = 9 v[625,3] = 9 v[626,3] = 9 v[627,3] = 11 v[628,3] = 7 v[629,3] = 5 v[630,3] = 13 v[631,3] = 1 v[632,3] = 15 v[633,3] = 3 v[634,3] = 13 v[635,3] = 9 v[636,3] = 13 v[637,3] = 5 v[638,3] = 1 v[639,3] = 5 v[640,3] = 1 v[641,3] = 13 v[642,3] = 13 v[643,3] = 7 v[644,3] = 7 v[645,3] = 1 v[646,3] = 9 v[647,3] = 5 v[648,3] = 11 v[649,3] = 9 v[650,3] = 11 v[651,3] = 13 v[652,3] = 3 v[653,3] = 15 v[654,3] = 15 v[655,3] = 13 v[656,3] = 15 v[657,3] = 7 v[658,3] = 5 v[659,3] = 7 v[660,3] = 9 v[661,3] = 7 v[662,3] = 9 v[663,3] = 9 v[664,3] = 9 v[665,3] = 11 v[666,3] = 9 v[667,3] = 3 v[668,3] = 11 v[669,3] = 15 v[670,3] = 13 v[671,3] = 13 v[672,3] = 5 v[673,3] = 9 v[674,3] = 15 v[675,3] = 1 v[676,3] = 1 v[677,3] = 9 v[678,3] = 5 v[679,3] = 13 v[680,3] = 3 v[681,3] = 13 v[682,3] = 15 v[683,3] = 3 v[684,3] = 1 v[685,3] = 3 v[686,3] = 11 v[687,3] = 13 v[688,3] = 1 v[689,3] = 15 v[690,3] = 9 v[691,3] = 9 v[692,3] = 3 v[693,3] = 1 v[694,3] = 9 v[695,3] = 1 v[696,3] = 9 v[697,3] = 1 v[698,3] = 13 v[699,3] = 11 v[700,3] = 15 v[701,3] = 7 v[702,3] = 11 v[703,3] = 15 v[704,3] = 13 v[705,3] = 15 v[706,3] = 1 v[707,3] = 9 v[708,3] = 9 v[709,3] = 7 v[710,3] = 3 v[711,3] = 5 v[712,3] = 11 v[713,3] = 7 v[714,3] = 3 v[715,3] = 9 v[716,3] = 5 v[717,3] = 15 v[718,3] = 7 v[719,3] = 5 v[720,3] = 3 v[721,3] = 13 v[722,3] = 7 v[723,3] = 1 v[724,3] = 1 v[725,3] = 9 v[726,3] = 15 v[727,3] = 15 v[728,3] = 15 v[729,3] = 11 v[730,3] = 3 v[731,3] = 5 v[732,3] = 15 v[733,3] = 13 v[734,3] = 7 v[735,3] = 15 v[736,3] = 15 v[737,3] = 11 v[738,3] = 11 v[739,3] = 9 v[740,3] = 5 v[741,3] = 15 v[742,3] = 9 v[743,3] = 7 v[744,3] = 3 v[745,3] = 13 v[746,3] = 1 v[747,3] = 1 v[748,3] = 5 v[749,3] = 1 v[750,3] = 3 v[751,3] = 1 v[752,3] = 7 v[753,3] = 1 v[754,3] = 1 v[755,3] = 5 v[756,3] = 1 v[757,3] = 11 v[758,3] = 11 v[759,3] = 9 v[760,3] = 9 v[761,3] = 5 v[762,3] = 13 v[763,3] = 7 v[764,3] = 7 v[765,3] = 7 v[766,3] = 1 v[767,3] = 1 v[768,3] = 9 v[769,3] = 9 v[770,3] = 11 v[771,3] = 11 v[772,3] = 15 v[773,3] = 7 v[774,3] = 5 v[775,3] = 5 v[776,3] = 3 v[777,3] = 11 v[778,3] = 1 v[779,3] = 3 v[780,3] = 7 v[781,3] = 13 v[782,3] = 7 v[783,3] = 7 v[784,3] = 7 v[785,3] = 3 v[786,3] = 15 v[787,3] = 15 v[788,3] = 11 v[789,3] = 9 v[790,3] = 3 v[791,3] = 9 v[792,3] = 3 v[793,3] = 15 v[794,3] = 13 v[795,3] = 5 v[796,3] = 3 v[797,3] = 3 v[798,3] = 3 v[799,3] = 5 v[800,3] = 9 v[801,3] = 15 v[802,3] = 9 v[803,3] = 9 v[804,3] = 1 v[805,3] = 5 v[806,3] = 9 v[807,3] = 9 v[808,3] = 15 v[809,3] = 5 v[810,3] = 15 v[811,3] = 7 v[812,3] = 9 v[813,3] = 1 v[814,3] = 9 v[815,3] = 9 v[816,3] = 5 v[817,3] = 11 v[818,3] = 5 v[819,3] = 15 v[820,3] = 15 v[821,3] = 11 v[822,3] = 7 v[823,3] = 7 v[824,3] = 7 v[825,3] = 1 v[826,3] = 1 v[827,3] = 11 v[828,3] = 11 v[829,3] = 13 v[830,3] = 15 v[831,3] = 3 v[832,3] = 13 v[833,3] = 5 v[834,3] = 1 v[835,3] = 7 v[836,3] = 1 v[837,3] = 11 v[838,3] = 3 v[839,3] = 13 v[840,3] = 15 v[841,3] = 3 v[842,3] = 5 v[843,3] = 3 v[844,3] = 5 v[845,3] = 7 v[846,3] = 3 v[847,3] = 9 v[848,3] = 9 v[849,3] = 5 v[850,3] = 1 v[851,3] = 7 v[852,3] = 11 v[853,3] = 9 v[854,3] = 3 v[855,3] = 5 v[856,3] = 11 v[857,3] = 13 v[858,3] = 13 v[859,3] = 13 v[860,3] = 9 v[861,3] = 15 v[862,3] = 5 v[863,3] = 7 v[864,3] = 1 v[865,3] = 15 v[866,3] = 11 v[867,3] = 9 v[868,3] = 15 v[869,3] = 15 v[870,3] = 13 v[871,3] = 13 v[872,3] = 13 v[873,3] = 1 v[874,3] = 11 v[875,3] = 9 v[876,3] = 15 v[877,3] = 9 v[878,3] = 5 v[879,3] = 15 v[880,3] = 5 v[881,3] = 7 v[882,3] = 3 v[883,3] = 11 v[884,3] = 3 v[885,3] = 15 v[886,3] = 7 v[887,3] = 13 v[888,3] = 11 v[889,3] = 7 v[890,3] = 3 v[891,3] = 7 v[892,3] = 13 v[893,3] = 5 v[894,3] = 13 v[895,3] = 15 v[896,3] = 5 v[897,3] = 13 v[898,3] = 9 v[899,3] = 1 v[900,3] = 15 v[901,3] = 11 v[902,3] = 5 v[903,3] = 5 v[904,3] = 1 v[905,3] = 11 v[906,3] = 3 v[907,3] = 3 v[908,3] = 7 v[909,3] = 1 v[910,3] = 9 v[911,3] = 7 v[912,3] = 15 v[913,3] = 9 v[914,3] = 9 v[915,3] = 3 v[916,3] = 11 v[917,3] = 15 v[918,3] = 7 v[919,3] = 1 v[920,3] = 3 v[921,3] = 1 v[922,3] = 1 v[923,3] = 1 v[924,3] = 9 v[925,3] = 1 v[926,3] = 5 v[927,3] = 15 v[928,3] = 15 v[929,3] = 7 v[930,3] = 5 v[931,3] = 5 v[932,3] = 7 v[933,3] = 9 v[934,3] = 7 v[935,3] = 15 v[936,3] = 13 v[937,3] = 13 v[938,3] = 11 v[939,3] = 1 v[940,3] = 9 v[941,3] = 11 v[942,3] = 1 v[943,3] = 13 v[944,3] = 1 v[945,3] = 7 v[946,3] = 15 v[947,3] = 15 v[948,3] = 5 v[949,3] = 5 v[950,3] = 1 v[951,3] = 11 v[952,3] = 3 v[953,3] = 9 v[954,3] = 11 v[955,3] = 9 v[956,3] = 9 v[957,3] = 9 v[958,3] = 1 v[959,3] = 9 v[960,3] = 3 v[961,3] = 5 v[962,3] = 15 v[963,3] = 1 v[964,3] = 1 v[965,3] = 9 v[966,3] = 7 v[967,3] = 3 v[968,3] = 3 v[969,3] = 1 v[970,3] = 9 v[971,3] = 9 v[972,3] = 11 v[973,3] = 9 v[974,3] = 9 v[975,3] = 13 v[976,3] = 13 v[977,3] = 3 v[978,3] = 13 v[979,3] = 11 v[980,3] = 13 v[981,3] = 5 v[982,3] = 1 v[983,3] = 5 v[984,3] = 5 v[985,3] = 9 v[986,3] = 9 v[987,3] = 3 v[988,3] = 13 v[989,3] = 13 v[990,3] = 9 v[991,3] = 15 v[992,3] = 9 v[993,3] = 11 v[994,3] = 7 v[995,3] = 11 v[996,3] = 9 v[997,3] = 13 v[998,3] = 9 v[999,3] = 1 v[1000,3] = 15 v[1001,3] = 9 v[1002,3] = 7 v[1003,3] = 7 v[1004,3] = 1 v[1005,3] = 7 v[1006,3] = 9 v[1007,3] = 9 v[1008,3] = 15 v[1009,3] = 1 v[1010,3] = 11 v[1011,3] = 1 v[1012,3] = 13 v[1013,3] = 13 v[1014,3] = 15 v[1015,3] = 9 v[1016,3] = 13 v[1017,3] = 7 v[1018,3] = 15 v[1019,3] = 3 v[1020,3] = 9 v[1021,3] = 3 v[1022,3] = 1 v[1023,3] = 13 v[1024,3] = 7 v[1025,3] = 5 v[1026,3] = 9 v[1027,3] = 3 v[1028,3] = 1 v[1029,3] = 7 v[1030,3] = 1 v[1031,3] = 1 v[1032,3] = 13 v[1033,3] = 3 v[1034,3] = 3 v[1035,3] = 11 v[1036,3] = 1 v[1037,3] = 7 v[1038,3] = 13 v[1039,3] = 15 v[1040,3] = 15 v[1041,3] = 5 v[1042,3] = 7 v[1043,3] = 13 v[1044,3] = 13 v[1045,3] = 15 v[1046,3] = 11 v[1047,3] = 13 v[1048,3] = 1 v[1049,3] = 13 v[1050,3] = 13 v[1051,3] = 3 v[1052,3] = 9 v[1053,3] = 15 v[1054,3] = 15 v[1055,3] = 11 v[1056,3] = 15 v[1057,3] = 9 v[1058,3] = 15 v[1059,3] = 1 v[1060,3] = 13 v[1061,3] = 15 v[1062,3] = 1 v[1063,3] = 1 v[1064,3] = 5 v[1065,3] = 11 v[1066,3] = 5 v[1067,3] = 1 v[1068,3] = 11 v[1069,3] = 11 v[1070,3] = 5 v[1071,3] = 3 v[1072,3] = 9 v[1073,3] = 1 v[1074,3] = 3 v[1075,3] = 5 v[1076,3] = 13 v[1077,3] = 9 v[1078,3] = 7 v[1079,3] = 7 v[1080,3] = 1 v[1081,3] = 9 v[1082,3] = 9 v[1083,3] = 15 v[1084,3] = 7 v[1085,3] = 5 v[1086,3] = 5 v[1087,3] = 15 v[1088,3] = 13 v[1089,3] = 9 v[1090,3] = 7 v[1091,3] = 13 v[1092,3] = 3 v[1093,3] = 13 v[1094,3] = 11 v[1095,3] = 13 v[1096,3] = 7 v[1097,3] = 9 v[1098,3] = 13 v[1099,3] = 13 v[1100,3] = 13 v[1101,3] = 15 v[1102,3] = 9 v[1103,3] = 5 v[1104,3] = 5 v[1105,3] = 3 v[1106,3] = 3 v[1107,3] = 3 v[1108,3] = 1 v[1109,3] = 3 v[1110,3] = 15 v[7,4] = 9 v[8,4] = 3 v[9,4] = 27 v[10,4] = 15 v[11,4] = 29 v[12,4] = 21 v[13,4] = 23 v[14,4] = 19 v[15,4] = 11 v[16,4] = 25 v[17,4] = 7 v[18,4] = 13 v[19,4] = 17 v[20,4] = 1 v[21,4] = 25 v[22,4] = 29 v[23,4] = 3 v[24,4] = 31 v[25,4] = 11 v[26,4] = 5 v[27,4] = 23 v[28,4] = 27 v[29,4] = 19 v[30,4] = 21 v[31,4] = 5 v[32,4] = 1 v[33,4] = 17 v[34,4] = 13 v[35,4] = 7 v[36,4] = 15 v[37,4] = 9 v[38,4] = 31 v[39,4] = 25 v[40,4] = 3 v[41,4] = 5 v[42,4] = 23 v[43,4] = 7 v[44,4] = 3 v[45,4] = 17 v[46,4] = 23 v[47,4] = 3 v[48,4] = 3 v[49,4] = 21 v[50,4] = 25 v[51,4] = 25 v[52,4] = 23 v[53,4] = 11 v[54,4] = 19 v[55,4] = 3 v[56,4] = 11 v[57,4] = 31 v[58,4] = 7 v[59,4] = 9 v[60,4] = 5 v[61,4] = 17 v[62,4] = 23 v[63,4] = 17 v[64,4] = 17 v[65,4] = 25 v[66,4] = 13 v[67,4] = 11 v[68,4] = 31 v[69,4] = 27 v[70,4] = 19 v[71,4] = 17 v[72,4] = 23 v[73,4] = 7 v[74,4] = 5 v[75,4] = 11 v[76,4] = 19 v[77,4] = 19 v[78,4] = 7 v[79,4] = 13 v[80,4] = 21 v[81,4] = 21 v[82,4] = 7 v[83,4] = 9 v[84,4] = 11 v[85,4] = 1 v[86,4] = 5 v[87,4] = 21 v[88,4] = 11 v[89,4] = 13 v[90,4] = 25 v[91,4] = 9 v[92,4] = 7 v[93,4] = 7 v[94,4] = 27 v[95,4] = 15 v[96,4] = 25 v[97,4] = 15 v[98,4] = 21 v[99,4] = 17 v[100,4] = 19 v[101,4] = 19 v[102,4] = 21 v[103,4] = 5 v[104,4] = 11 v[105,4] = 3 v[106,4] = 5 v[107,4] = 29 v[108,4] = 31 v[109,4] = 29 v[110,4] = 5 v[111,4] = 5 v[112,4] = 1 v[113,4] = 31 v[114,4] = 27 v[115,4] = 11 v[116,4] = 13 v[117,4] = 1 v[118,4] = 3 v[119,4] = 7 v[120,4] = 11 v[121,4] = 7 v[122,4] = 3 v[123,4] = 23 v[124,4] = 13 v[125,4] = 31 v[126,4] = 17 v[127,4] = 1 v[128,4] = 27 v[129,4] = 11 v[130,4] = 25 v[131,4] = 1 v[132,4] = 23 v[133,4] = 29 v[134,4] = 17 v[135,4] = 25 v[136,4] = 7 v[137,4] = 25 v[138,4] = 27 v[139,4] = 17 v[140,4] = 13 v[141,4] = 17 v[142,4] = 23 v[143,4] = 5 v[144,4] = 17 v[145,4] = 5 v[146,4] = 13 v[147,4] = 11 v[148,4] = 21 v[149,4] = 5 v[150,4] = 11 v[151,4] = 5 v[152,4] = 9 v[153,4] = 31 v[154,4] = 19 v[155,4] = 17 v[156,4] = 9 v[157,4] = 9 v[158,4] = 27 v[159,4] = 21 v[160,4] = 15 v[161,4] = 15 v[162,4] = 1 v[163,4] = 1 v[164,4] = 29 v[165,4] = 5 v[166,4] = 31 v[167,4] = 11 v[168,4] = 17 v[169,4] = 23 v[170,4] = 19 v[171,4] = 21 v[172,4] = 25 v[173,4] = 15 v[174,4] = 11 v[175,4] = 5 v[176,4] = 5 v[177,4] = 1 v[178,4] = 19 v[179,4] = 19 v[180,4] = 19 v[181,4] = 7 v[182,4] = 13 v[183,4] = 21 v[184,4] = 17 v[185,4] = 17 v[186,4] = 25 v[187,4] = 23 v[188,4] = 19 v[189,4] = 23 v[190,4] = 15 v[191,4] = 13 v[192,4] = 5 v[193,4] = 19 v[194,4] = 25 v[195,4] = 9 v[196,4] = 7 v[197,4] = 3 v[198,4] = 21 v[199,4] = 17 v[200,4] = 25 v[201,4] = 1 v[202,4] = 27 v[203,4] = 25 v[204,4] = 27 v[205,4] = 25 v[206,4] = 9 v[207,4] = 13 v[208,4] = 3 v[209,4] = 17 v[210,4] = 25 v[211,4] = 23 v[212,4] = 9 v[213,4] = 25 v[214,4] = 9 v[215,4] = 13 v[216,4] = 17 v[217,4] = 17 v[218,4] = 3 v[219,4] = 15 v[220,4] = 7 v[221,4] = 7 v[222,4] = 29 v[223,4] = 3 v[224,4] = 19 v[225,4] = 29 v[226,4] = 29 v[227,4] = 19 v[228,4] = 29 v[229,4] = 13 v[230,4] = 15 v[231,4] = 25 v[232,4] = 27 v[233,4] = 1 v[234,4] = 3 v[235,4] = 9 v[236,4] = 9 v[237,4] = 13 v[238,4] = 31 v[239,4] = 29 v[240,4] = 31 v[241,4] = 5 v[242,4] = 15 v[243,4] = 29 v[244,4] = 1 v[245,4] = 19 v[246,4] = 5 v[247,4] = 9 v[248,4] = 19 v[249,4] = 5 v[250,4] = 15 v[251,4] = 3 v[252,4] = 5 v[253,4] = 7 v[254,4] = 15 v[255,4] = 17 v[256,4] = 17 v[257,4] = 23 v[258,4] = 11 v[259,4] = 9 v[260,4] = 23 v[261,4] = 19 v[262,4] = 3 v[263,4] = 17 v[264,4] = 1 v[265,4] = 27 v[266,4] = 9 v[267,4] = 9 v[268,4] = 17 v[269,4] = 13 v[270,4] = 25 v[271,4] = 29 v[272,4] = 23 v[273,4] = 29 v[274,4] = 11 v[275,4] = 31 v[276,4] = 25 v[277,4] = 21 v[278,4] = 29 v[279,4] = 19 v[280,4] = 27 v[281,4] = 31 v[282,4] = 3 v[283,4] = 5 v[284,4] = 3 v[285,4] = 3 v[286,4] = 13 v[287,4] = 21 v[288,4] = 9 v[289,4] = 29 v[290,4] = 3 v[291,4] = 17 v[292,4] = 11 v[293,4] = 11 v[294,4] = 9 v[295,4] = 21 v[296,4] = 19 v[297,4] = 7 v[298,4] = 17 v[299,4] = 31 v[300,4] = 25 v[301,4] = 1 v[302,4] = 27 v[303,4] = 5 v[304,4] = 15 v[305,4] = 27 v[306,4] = 29 v[307,4] = 29 v[308,4] = 29 v[309,4] = 25 v[310,4] = 27 v[311,4] = 25 v[312,4] = 3 v[313,4] = 21 v[314,4] = 17 v[315,4] = 25 v[316,4] = 13 v[317,4] = 15 v[318,4] = 17 v[319,4] = 13 v[320,4] = 23 v[321,4] = 9 v[322,4] = 3 v[323,4] = 11 v[324,4] = 7 v[325,4] = 9 v[326,4] = 9 v[327,4] = 7 v[328,4] = 17 v[329,4] = 7 v[330,4] = 1 v[331,4] = 27 v[332,4] = 1 v[333,4] = 9 v[334,4] = 5 v[335,4] = 31 v[336,4] = 21 v[337,4] = 25 v[338,4] = 25 v[339,4] = 21 v[340,4] = 11 v[341,4] = 1 v[342,4] = 23 v[343,4] = 19 v[344,4] = 27 v[345,4] = 15 v[346,4] = 3 v[347,4] = 5 v[348,4] = 23 v[349,4] = 9 v[350,4] = 25 v[351,4] = 7 v[352,4] = 29 v[353,4] = 11 v[354,4] = 9 v[355,4] = 13 v[356,4] = 5 v[357,4] = 11 v[358,4] = 1 v[359,4] = 3 v[360,4] = 31 v[361,4] = 27 v[362,4] = 3 v[363,4] = 17 v[364,4] = 27 v[365,4] = 11 v[366,4] = 13 v[367,4] = 15 v[368,4] = 29 v[369,4] = 15 v[370,4] = 1 v[371,4] = 15 v[372,4] = 23 v[373,4] = 25 v[374,4] = 13 v[375,4] = 21 v[376,4] = 15 v[377,4] = 3 v[378,4] = 29 v[379,4] = 29 v[380,4] = 5 v[381,4] = 25 v[382,4] = 17 v[383,4] = 11 v[384,4] = 7 v[385,4] = 15 v[386,4] = 5 v[387,4] = 21 v[388,4] = 7 v[389,4] = 31 v[390,4] = 13 v[391,4] = 11 v[392,4] = 23 v[393,4] = 5 v[394,4] = 7 v[395,4] = 23 v[396,4] = 27 v[397,4] = 21 v[398,4] = 29 v[399,4] = 15 v[400,4] = 7 v[401,4] = 27 v[402,4] = 27 v[403,4] = 19 v[404,4] = 7 v[405,4] = 15 v[406,4] = 27 v[407,4] = 27 v[408,4] = 19 v[409,4] = 19 v[410,4] = 9 v[411,4] = 15 v[412,4] = 1 v[413,4] = 3 v[414,4] = 29 v[415,4] = 29 v[416,4] = 5 v[417,4] = 27 v[418,4] = 31 v[419,4] = 9 v[420,4] = 1 v[421,4] = 7 v[422,4] = 3 v[423,4] = 19 v[424,4] = 19 v[425,4] = 29 v[426,4] = 9 v[427,4] = 3 v[428,4] = 21 v[429,4] = 31 v[430,4] = 29 v[431,4] = 25 v[432,4] = 1 v[433,4] = 3 v[434,4] = 9 v[435,4] = 27 v[436,4] = 5 v[437,4] = 27 v[438,4] = 25 v[439,4] = 21 v[440,4] = 11 v[441,4] = 29 v[442,4] = 31 v[443,4] = 27 v[444,4] = 21 v[445,4] = 29 v[446,4] = 17 v[447,4] = 9 v[448,4] = 17 v[449,4] = 13 v[450,4] = 11 v[451,4] = 25 v[452,4] = 15 v[453,4] = 21 v[454,4] = 11 v[455,4] = 19 v[456,4] = 31 v[457,4] = 3 v[458,4] = 19 v[459,4] = 5 v[460,4] = 3 v[461,4] = 3 v[462,4] = 9 v[463,4] = 13 v[464,4] = 13 v[465,4] = 3 v[466,4] = 29 v[467,4] = 7 v[468,4] = 5 v[469,4] = 9 v[470,4] = 23 v[471,4] = 13 v[472,4] = 21 v[473,4] = 23 v[474,4] = 21 v[475,4] = 31 v[476,4] = 11 v[477,4] = 7 v[478,4] = 7 v[479,4] = 3 v[480,4] = 23 v[481,4] = 1 v[482,4] = 23 v[483,4] = 5 v[484,4] = 9 v[485,4] = 17 v[486,4] = 21 v[487,4] = 1 v[488,4] = 17 v[489,4] = 29 v[490,4] = 7 v[491,4] = 5 v[492,4] = 17 v[493,4] = 13 v[494,4] = 25 v[495,4] = 17 v[496,4] = 9 v[497,4] = 19 v[498,4] = 9 v[499,4] = 5 v[500,4] = 7 v[501,4] = 21 v[502,4] = 19 v[503,4] = 13 v[504,4] = 9 v[505,4] = 7 v[506,4] = 3 v[507,4] = 9 v[508,4] = 3 v[509,4] = 15 v[510,4] = 31 v[511,4] = 29 v[512,4] = 29 v[513,4] = 25 v[514,4] = 13 v[515,4] = 9 v[516,4] = 21 v[517,4] = 9 v[518,4] = 31 v[519,4] = 7 v[520,4] = 15 v[521,4] = 5 v[522,4] = 31 v[523,4] = 7 v[524,4] = 15 v[525,4] = 27 v[526,4] = 25 v[527,4] = 19 v[528,4] = 9 v[529,4] = 9 v[530,4] = 25 v[531,4] = 25 v[532,4] = 23 v[533,4] = 1 v[534,4] = 9 v[535,4] = 7 v[536,4] = 11 v[537,4] = 15 v[538,4] = 19 v[539,4] = 15 v[540,4] = 27 v[541,4] = 17 v[542,4] = 11 v[543,4] = 11 v[544,4] = 31 v[545,4] = 13 v[546,4] = 25 v[547,4] = 25 v[548,4] = 9 v[549,4] = 7 v[550,4] = 13 v[551,4] = 29 v[552,4] = 19 v[553,4] = 5 v[554,4] = 19 v[555,4] = 31 v[556,4] = 25 v[557,4] = 13 v[558,4] = 25 v[559,4] = 15 v[560,4] = 5 v[561,4] = 9 v[562,4] = 29 v[563,4] = 31 v[564,4] = 9 v[565,4] = 29 v[566,4] = 27 v[567,4] = 25 v[568,4] = 27 v[569,4] = 11 v[570,4] = 17 v[571,4] = 5 v[572,4] = 17 v[573,4] = 3 v[574,4] = 23 v[575,4] = 15 v[576,4] = 9 v[577,4] = 9 v[578,4] = 17 v[579,4] = 17 v[580,4] = 31 v[581,4] = 11 v[582,4] = 19 v[583,4] = 25 v[584,4] = 13 v[585,4] = 23 v[586,4] = 15 v[587,4] = 25 v[588,4] = 21 v[589,4] = 31 v[590,4] = 19 v[591,4] = 3 v[592,4] = 11 v[593,4] = 25 v[594,4] = 7 v[595,4] = 15 v[596,4] = 19 v[597,4] = 7 v[598,4] = 5 v[599,4] = 3 v[600,4] = 13 v[601,4] = 13 v[602,4] = 1 v[603,4] = 23 v[604,4] = 5 v[605,4] = 25 v[606,4] = 11 v[607,4] = 25 v[608,4] = 15 v[609,4] = 13 v[610,4] = 21 v[611,4] = 11 v[612,4] = 23 v[613,4] = 29 v[614,4] = 5 v[615,4] = 17 v[616,4] = 27 v[617,4] = 9 v[618,4] = 19 v[619,4] = 15 v[620,4] = 5 v[621,4] = 29 v[622,4] = 23 v[623,4] = 19 v[624,4] = 1 v[625,4] = 27 v[626,4] = 3 v[627,4] = 23 v[628,4] = 21 v[629,4] = 19 v[630,4] = 27 v[631,4] = 11 v[632,4] = 17 v[633,4] = 13 v[634,4] = 27 v[635,4] = 11 v[636,4] = 31 v[637,4] = 23 v[638,4] = 5 v[639,4] = 9 v[640,4] = 21 v[641,4] = 31 v[642,4] = 29 v[643,4] = 11 v[644,4] = 21 v[645,4] = 17 v[646,4] = 15 v[647,4] = 7 v[648,4] = 15 v[649,4] = 7 v[650,4] = 9 v[651,4] = 21 v[652,4] = 27 v[653,4] = 25 v[654,4] = 29 v[655,4] = 11 v[656,4] = 3 v[657,4] = 21 v[658,4] = 13 v[659,4] = 23 v[660,4] = 19 v[661,4] = 27 v[662,4] = 17 v[663,4] = 29 v[664,4] = 25 v[665,4] = 17 v[666,4] = 9 v[667,4] = 1 v[668,4] = 19 v[669,4] = 23 v[670,4] = 5 v[671,4] = 23 v[672,4] = 1 v[673,4] = 17 v[674,4] = 17 v[675,4] = 13 v[676,4] = 27 v[677,4] = 23 v[678,4] = 7 v[679,4] = 7 v[680,4] = 11 v[681,4] = 13 v[682,4] = 17 v[683,4] = 13 v[684,4] = 11 v[685,4] = 21 v[686,4] = 13 v[687,4] = 23 v[688,4] = 1 v[689,4] = 27 v[690,4] = 13 v[691,4] = 9 v[692,4] = 7 v[693,4] = 1 v[694,4] = 27 v[695,4] = 29 v[696,4] = 5 v[697,4] = 13 v[698,4] = 25 v[699,4] = 21 v[700,4] = 3 v[701,4] = 31 v[702,4] = 15 v[703,4] = 13 v[704,4] = 3 v[705,4] = 19 v[706,4] = 13 v[707,4] = 1 v[708,4] = 27 v[709,4] = 15 v[710,4] = 17 v[711,4] = 1 v[712,4] = 3 v[713,4] = 13 v[714,4] = 13 v[715,4] = 13 v[716,4] = 31 v[717,4] = 29 v[718,4] = 27 v[719,4] = 7 v[720,4] = 7 v[721,4] = 21 v[722,4] = 29 v[723,4] = 15 v[724,4] = 17 v[725,4] = 17 v[726,4] = 21 v[727,4] = 19 v[728,4] = 17 v[729,4] = 3 v[730,4] = 15 v[731,4] = 5 v[732,4] = 27 v[733,4] = 27 v[734,4] = 3 v[735,4] = 31 v[736,4] = 31 v[737,4] = 7 v[738,4] = 21 v[739,4] = 3 v[740,4] = 13 v[741,4] = 11 v[742,4] = 17 v[743,4] = 27 v[744,4] = 25 v[745,4] = 1 v[746,4] = 9 v[747,4] = 7 v[748,4] = 29 v[749,4] = 27 v[750,4] = 21 v[751,4] = 23 v[752,4] = 13 v[753,4] = 25 v[754,4] = 29 v[755,4] = 15 v[756,4] = 17 v[757,4] = 29 v[758,4] = 9 v[759,4] = 15 v[760,4] = 3 v[761,4] = 21 v[762,4] = 15 v[763,4] = 17 v[764,4] = 17 v[765,4] = 31 v[766,4] = 9 v[767,4] = 9 v[768,4] = 23 v[769,4] = 19 v[770,4] = 25 v[771,4] = 3 v[772,4] = 1 v[773,4] = 11 v[774,4] = 27 v[775,4] = 29 v[776,4] = 1 v[777,4] = 31 v[778,4] = 29 v[779,4] = 25 v[780,4] = 29 v[781,4] = 1 v[782,4] = 23 v[783,4] = 29 v[784,4] = 25 v[785,4] = 13 v[786,4] = 3 v[787,4] = 31 v[788,4] = 25 v[789,4] = 5 v[790,4] = 5 v[791,4] = 11 v[792,4] = 3 v[793,4] = 21 v[794,4] = 9 v[795,4] = 23 v[796,4] = 7 v[797,4] = 11 v[798,4] = 23 v[799,4] = 11 v[800,4] = 1 v[801,4] = 1 v[802,4] = 3 v[803,4] = 23 v[804,4] = 25 v[805,4] = 23 v[806,4] = 1 v[807,4] = 23 v[808,4] = 3 v[809,4] = 27 v[810,4] = 9 v[811,4] = 27 v[812,4] = 3 v[813,4] = 23 v[814,4] = 25 v[815,4] = 19 v[816,4] = 29 v[817,4] = 29 v[818,4] = 13 v[819,4] = 27 v[820,4] = 5 v[821,4] = 9 v[822,4] = 29 v[823,4] = 29 v[824,4] = 13 v[825,4] = 17 v[826,4] = 3 v[827,4] = 23 v[828,4] = 19 v[829,4] = 7 v[830,4] = 13 v[831,4] = 3 v[832,4] = 19 v[833,4] = 23 v[834,4] = 5 v[835,4] = 29 v[836,4] = 29 v[837,4] = 13 v[838,4] = 13 v[839,4] = 5 v[840,4] = 19 v[841,4] = 5 v[842,4] = 17 v[843,4] = 9 v[844,4] = 11 v[845,4] = 11 v[846,4] = 29 v[847,4] = 27 v[848,4] = 23 v[849,4] = 19 v[850,4] = 17 v[851,4] = 25 v[852,4] = 13 v[853,4] = 1 v[854,4] = 13 v[855,4] = 3 v[856,4] = 11 v[857,4] = 1 v[858,4] = 17 v[859,4] = 29 v[860,4] = 1 v[861,4] = 13 v[862,4] = 17 v[863,4] = 9 v[864,4] = 17 v[865,4] = 21 v[866,4] = 1 v[867,4] = 11 v[868,4] = 1 v[869,4] = 1 v[870,4] = 25 v[871,4] = 5 v[872,4] = 7 v[873,4] = 29 v[874,4] = 29 v[875,4] = 19 v[876,4] = 19 v[877,4] = 1 v[878,4] = 29 v[879,4] = 13 v[880,4] = 3 v[881,4] = 1 v[882,4] = 31 v[883,4] = 15 v[884,4] = 13 v[885,4] = 3 v[886,4] = 1 v[887,4] = 11 v[888,4] = 19 v[889,4] = 5 v[890,4] = 29 v[891,4] = 13 v[892,4] = 29 v[893,4] = 23 v[894,4] = 3 v[895,4] = 1 v[896,4] = 31 v[897,4] = 13 v[898,4] = 19 v[899,4] = 17 v[900,4] = 5 v[901,4] = 5 v[902,4] = 1 v[903,4] = 29 v[904,4] = 23 v[905,4] = 3 v[906,4] = 19 v[907,4] = 25 v[908,4] = 19 v[909,4] = 27 v[910,4] = 9 v[911,4] = 27 v[912,4] = 13 v[913,4] = 15 v[914,4] = 29 v[915,4] = 23 v[916,4] = 13 v[917,4] = 25 v[918,4] = 25 v[919,4] = 17 v[920,4] = 19 v[921,4] = 17 v[922,4] = 15 v[923,4] = 27 v[924,4] = 3 v[925,4] = 25 v[926,4] = 17 v[927,4] = 27 v[928,4] = 3 v[929,4] = 27 v[930,4] = 31 v[931,4] = 23 v[932,4] = 13 v[933,4] = 31 v[934,4] = 11 v[935,4] = 15 v[936,4] = 7 v[937,4] = 21 v[938,4] = 19 v[939,4] = 27 v[940,4] = 19 v[941,4] = 21 v[942,4] = 29 v[943,4] = 7 v[944,4] = 31 v[945,4] = 13 v[946,4] = 9 v[947,4] = 9 v[948,4] = 7 v[949,4] = 21 v[950,4] = 13 v[951,4] = 11 v[952,4] = 9 v[953,4] = 11 v[954,4] = 29 v[955,4] = 19 v[956,4] = 11 v[957,4] = 19 v[958,4] = 21 v[959,4] = 5 v[960,4] = 29 v[961,4] = 13 v[962,4] = 7 v[963,4] = 19 v[964,4] = 19 v[965,4] = 27 v[966,4] = 23 v[967,4] = 31 v[968,4] = 1 v[969,4] = 27 v[970,4] = 21 v[971,4] = 7 v[972,4] = 3 v[973,4] = 7 v[974,4] = 11 v[975,4] = 23 v[976,4] = 13 v[977,4] = 29 v[978,4] = 11 v[979,4] = 31 v[980,4] = 19 v[981,4] = 1 v[982,4] = 5 v[983,4] = 5 v[984,4] = 11 v[985,4] = 5 v[986,4] = 3 v[987,4] = 27 v[988,4] = 5 v[989,4] = 7 v[990,4] = 11 v[991,4] = 31 v[992,4] = 1 v[993,4] = 27 v[994,4] = 31 v[995,4] = 31 v[996,4] = 23 v[997,4] = 5 v[998,4] = 21 v[999,4] = 27 v[1000,4] = 9 v[1001,4] = 25 v[1002,4] = 3 v[1003,4] = 15 v[1004,4] = 19 v[1005,4] = 1 v[1006,4] = 19 v[1007,4] = 9 v[1008,4] = 5 v[1009,4] = 25 v[1010,4] = 21 v[1011,4] = 15 v[1012,4] = 25 v[1013,4] = 29 v[1014,4] = 15 v[1015,4] = 21 v[1016,4] = 11 v[1017,4] = 19 v[1018,4] = 15 v[1019,4] = 3 v[1020,4] = 7 v[1021,4] = 13 v[1022,4] = 11 v[1023,4] = 25 v[1024,4] = 17 v[1025,4] = 1 v[1026,4] = 5 v[1027,4] = 31 v[1028,4] = 13 v[1029,4] = 29 v[1030,4] = 23 v[1031,4] = 9 v[1032,4] = 5 v[1033,4] = 29 v[1034,4] = 7 v[1035,4] = 17 v[1036,4] = 27 v[1037,4] = 7 v[1038,4] = 17 v[1039,4] = 31 v[1040,4] = 9 v[1041,4] = 31 v[1042,4] = 9 v[1043,4] = 9 v[1044,4] = 7 v[1045,4] = 21 v[1046,4] = 3 v[1047,4] = 3 v[1048,4] = 3 v[1049,4] = 9 v[1050,4] = 11 v[1051,4] = 21 v[1052,4] = 11 v[1053,4] = 31 v[1054,4] = 9 v[1055,4] = 25 v[1056,4] = 5 v[1057,4] = 1 v[1058,4] = 31 v[1059,4] = 13 v[1060,4] = 29 v[1061,4] = 9 v[1062,4] = 29 v[1063,4] = 1 v[1064,4] = 11 v[1065,4] = 19 v[1066,4] = 7 v[1067,4] = 27 v[1068,4] = 13 v[1069,4] = 31 v[1070,4] = 7 v[1071,4] = 31 v[1072,4] = 7 v[1073,4] = 25 v[1074,4] = 23 v[1075,4] = 21 v[1076,4] = 29 v[1077,4] = 11 v[1078,4] = 11 v[1079,4] = 13 v[1080,4] = 11 v[1081,4] = 27 v[1082,4] = 1 v[1083,4] = 23 v[1084,4] = 31 v[1085,4] = 21 v[1086,4] = 23 v[1087,4] = 21 v[1088,4] = 19 v[1089,4] = 31 v[1090,4] = 5 v[1091,4] = 31 v[1092,4] = 25 v[1093,4] = 25 v[1094,4] = 19 v[1095,4] = 17 v[1096,4] = 11 v[1097,4] = 25 v[1098,4] = 7 v[1099,4] = 13 v[1100,4] = 1 v[1101,4] = 29 v[1102,4] = 17 v[1103,4] = 23 v[1104,4] = 15 v[1105,4] = 7 v[1106,4] = 29 v[1107,4] = 17 v[1108,4] = 13 v[1109,4] = 3 v[1110,4] = 17 v[13,5] = 37 v[14,5] = 33 v[15,5] = 7 v[16,5] = 5 v[17,5] = 11 v[18,5] = 39 v[19,5] = 63 v[20,5] = 59 v[21,5] = 17 v[22,5] = 15 v[23,5] = 23 v[24,5] = 29 v[25,5] = 3 v[26,5] = 21 v[27,5] = 13 v[28,5] = 31 v[29,5] = 25 v[30,5] = 9 v[31,5] = 49 v[32,5] = 33 v[33,5] = 19 v[34,5] = 29 v[35,5] = 11 v[36,5] = 19 v[37,5] = 27 v[38,5] = 15 v[39,5] = 25 v[40,5] = 63 v[41,5] = 55 v[42,5] = 17 v[43,5] = 63 v[44,5] = 49 v[45,5] = 19 v[46,5] = 41 v[47,5] = 59 v[48,5] = 3 v[49,5] = 57 v[50,5] = 33 v[51,5] = 49 v[52,5] = 53 v[53,5] = 57 v[54,5] = 57 v[55,5] = 39 v[56,5] = 21 v[57,5] = 7 v[58,5] = 53 v[59,5] = 9 v[60,5] = 55 v[61,5] = 15 v[62,5] = 59 v[63,5] = 19 v[64,5] = 49 v[65,5] = 31 v[66,5] = 3 v[67,5] = 39 v[68,5] = 5 v[69,5] = 5 v[70,5] = 41 v[71,5] = 9 v[72,5] = 19 v[73,5] = 9 v[74,5] = 57 v[75,5] = 25 v[76,5] = 1 v[77,5] = 15 v[78,5] = 51 v[79,5] = 11 v[80,5] = 19 v[81,5] = 61 v[82,5] = 53 v[83,5] = 29 v[84,5] = 19 v[85,5] = 11 v[86,5] = 9 v[87,5] = 21 v[88,5] = 19 v[89,5] = 43 v[90,5] = 13 v[91,5] = 13 v[92,5] = 41 v[93,5] = 25 v[94,5] = 31 v[95,5] = 9 v[96,5] = 11 v[97,5] = 19 v[98,5] = 5 v[99,5] = 53 v[100,5] = 37 v[101,5] = 7 v[102,5] = 51 v[103,5] = 45 v[104,5] = 7 v[105,5] = 7 v[106,5] = 61 v[107,5] = 23 v[108,5] = 45 v[109,5] = 7 v[110,5] = 59 v[111,5] = 41 v[112,5] = 1 v[113,5] = 29 v[114,5] = 61 v[115,5] = 37 v[116,5] = 27 v[117,5] = 47 v[118,5] = 15 v[119,5] = 31 v[120,5] = 35 v[121,5] = 31 v[122,5] = 17 v[123,5] = 51 v[124,5] = 13 v[125,5] = 25 v[126,5] = 45 v[127,5] = 5 v[128,5] = 5 v[129,5] = 33 v[130,5] = 39 v[131,5] = 5 v[132,5] = 47 v[133,5] = 29 v[134,5] = 35 v[135,5] = 47 v[136,5] = 63 v[137,5] = 45 v[138,5] = 37 v[139,5] = 47 v[140,5] = 59 v[141,5] = 21 v[142,5] = 59 v[143,5] = 33 v[144,5] = 51 v[145,5] = 9 v[146,5] = 27 v[147,5] = 13 v[148,5] = 25 v[149,5] = 43 v[150,5] = 3 v[151,5] = 17 v[152,5] = 21 v[153,5] = 59 v[154,5] = 61 v[155,5] = 27 v[156,5] = 47 v[157,5] = 57 v[158,5] = 11 v[159,5] = 17 v[160,5] = 39 v[161,5] = 1 v[162,5] = 63 v[163,5] = 21 v[164,5] = 59 v[165,5] = 17 v[166,5] = 13 v[167,5] = 31 v[168,5] = 3 v[169,5] = 31 v[170,5] = 7 v[171,5] = 9 v[172,5] = 27 v[173,5] = 37 v[174,5] = 23 v[175,5] = 31 v[176,5] = 9 v[177,5] = 45 v[178,5] = 43 v[179,5] = 31 v[180,5] = 63 v[181,5] = 21 v[182,5] = 39 v[183,5] = 51 v[184,5] = 27 v[185,5] = 7 v[186,5] = 53 v[187,5] = 11 v[188,5] = 1 v[189,5] = 59 v[190,5] = 39 v[191,5] = 23 v[192,5] = 49 v[193,5] = 23 v[194,5] = 7 v[195,5] = 55 v[196,5] = 59 v[197,5] = 3 v[198,5] = 19 v[199,5] = 35 v[200,5] = 13 v[201,5] = 9 v[202,5] = 13 v[203,5] = 15 v[204,5] = 23 v[205,5] = 9 v[206,5] = 7 v[207,5] = 43 v[208,5] = 55 v[209,5] = 3 v[210,5] = 19 v[211,5] = 9 v[212,5] = 27 v[213,5] = 33 v[214,5] = 27 v[215,5] = 49 v[216,5] = 23 v[217,5] = 47 v[218,5] = 19 v[219,5] = 7 v[220,5] = 11 v[221,5] = 55 v[222,5] = 27 v[223,5] = 35 v[224,5] = 5 v[225,5] = 5 v[226,5] = 55 v[227,5] = 35 v[228,5] = 37 v[229,5] = 9 v[230,5] = 33 v[231,5] = 29 v[232,5] = 47 v[233,5] = 25 v[234,5] = 11 v[235,5] = 47 v[236,5] = 53 v[237,5] = 61 v[238,5] = 59 v[239,5] = 3 v[240,5] = 53 v[241,5] = 47 v[242,5] = 5 v[243,5] = 19 v[244,5] = 59 v[245,5] = 5 v[246,5] = 47 v[247,5] = 23 v[248,5] = 45 v[249,5] = 53 v[250,5] = 3 v[251,5] = 49 v[252,5] = 61 v[253,5] = 47 v[254,5] = 39 v[255,5] = 29 v[256,5] = 17 v[257,5] = 57 v[258,5] = 5 v[259,5] = 17 v[260,5] = 31 v[261,5] = 23 v[262,5] = 41 v[263,5] = 39 v[264,5] = 5 v[265,5] = 27 v[266,5] = 7 v[267,5] = 29 v[268,5] = 29 v[269,5] = 33 v[270,5] = 31 v[271,5] = 41 v[272,5] = 31 v[273,5] = 29 v[274,5] = 17 v[275,5] = 29 v[276,5] = 29 v[277,5] = 9 v[278,5] = 9 v[279,5] = 31 v[280,5] = 27 v[281,5] = 53 v[282,5] = 35 v[283,5] = 5 v[284,5] = 61 v[285,5] = 1 v[286,5] = 49 v[287,5] = 13 v[288,5] = 57 v[289,5] = 29 v[290,5] = 5 v[291,5] = 21 v[292,5] = 43 v[293,5] = 25 v[294,5] = 57 v[295,5] = 49 v[296,5] = 37 v[297,5] = 27 v[298,5] = 11 v[299,5] = 61 v[300,5] = 37 v[301,5] = 49 v[302,5] = 5 v[303,5] = 63 v[304,5] = 63 v[305,5] = 3 v[306,5] = 45 v[307,5] = 37 v[308,5] = 63 v[309,5] = 21 v[310,5] = 21 v[311,5] = 19 v[312,5] = 27 v[313,5] = 59 v[314,5] = 21 v[315,5] = 45 v[316,5] = 23 v[317,5] = 13 v[318,5] = 15 v[319,5] = 3 v[320,5] = 43 v[321,5] = 63 v[322,5] = 39 v[323,5] = 19 v[324,5] = 63 v[325,5] = 31 v[326,5] = 41 v[327,5] = 41 v[328,5] = 15 v[329,5] = 43 v[330,5] = 63 v[331,5] = 53 v[332,5] = 1 v[333,5] = 63 v[334,5] = 31 v[335,5] = 7 v[336,5] = 17 v[337,5] = 11 v[338,5] = 61 v[339,5] = 31 v[340,5] = 51 v[341,5] = 37 v[342,5] = 29 v[343,5] = 59 v[344,5] = 25 v[345,5] = 63 v[346,5] = 59 v[347,5] = 47 v[348,5] = 15 v[349,5] = 27 v[350,5] = 19 v[351,5] = 29 v[352,5] = 45 v[353,5] = 35 v[354,5] = 55 v[355,5] = 39 v[356,5] = 19 v[357,5] = 43 v[358,5] = 21 v[359,5] = 19 v[360,5] = 13 v[361,5] = 17 v[362,5] = 51 v[363,5] = 37 v[364,5] = 5 v[365,5] = 33 v[366,5] = 35 v[367,5] = 49 v[368,5] = 25 v[369,5] = 45 v[370,5] = 1 v[371,5] = 63 v[372,5] = 47 v[373,5] = 9 v[374,5] = 63 v[375,5] = 15 v[376,5] = 25 v[377,5] = 25 v[378,5] = 15 v[379,5] = 41 v[380,5] = 13 v[381,5] = 3 v[382,5] = 19 v[383,5] = 51 v[384,5] = 49 v[385,5] = 37 v[386,5] = 25 v[387,5] = 49 v[388,5] = 13 v[389,5] = 53 v[390,5] = 47 v[391,5] = 23 v[392,5] = 35 v[393,5] = 29 v[394,5] = 33 v[395,5] = 21 v[396,5] = 35 v[397,5] = 23 v[398,5] = 3 v[399,5] = 43 v[400,5] = 31 v[401,5] = 63 v[402,5] = 9 v[403,5] = 1 v[404,5] = 61 v[405,5] = 43 v[406,5] = 3 v[407,5] = 11 v[408,5] = 55 v[409,5] = 11 v[410,5] = 35 v[411,5] = 1 v[412,5] = 63 v[413,5] = 35 v[414,5] = 49 v[415,5] = 19 v[416,5] = 45 v[417,5] = 9 v[418,5] = 57 v[419,5] = 51 v[420,5] = 1 v[421,5] = 47 v[422,5] = 41 v[423,5] = 9 v[424,5] = 11 v[425,5] = 37 v[426,5] = 19 v[427,5] = 55 v[428,5] = 23 v[429,5] = 55 v[430,5] = 55 v[431,5] = 13 v[432,5] = 7 v[433,5] = 47 v[434,5] = 37 v[435,5] = 11 v[436,5] = 43 v[437,5] = 17 v[438,5] = 3 v[439,5] = 25 v[440,5] = 19 v[441,5] = 55 v[442,5] = 59 v[443,5] = 37 v[444,5] = 33 v[445,5] = 43 v[446,5] = 1 v[447,5] = 5 v[448,5] = 21 v[449,5] = 5 v[450,5] = 63 v[451,5] = 49 v[452,5] = 61 v[453,5] = 21 v[454,5] = 51 v[455,5] = 15 v[456,5] = 19 v[457,5] = 43 v[458,5] = 47 v[459,5] = 17 v[460,5] = 9 v[461,5] = 53 v[462,5] = 45 v[463,5] = 11 v[464,5] = 51 v[465,5] = 25 v[466,5] = 11 v[467,5] = 25 v[468,5] = 47 v[469,5] = 47 v[470,5] = 1 v[471,5] = 43 v[472,5] = 29 v[473,5] = 17 v[474,5] = 31 v[475,5] = 15 v[476,5] = 59 v[477,5] = 27 v[478,5] = 63 v[479,5] = 11 v[480,5] = 41 v[481,5] = 51 v[482,5] = 29 v[483,5] = 7 v[484,5] = 27 v[485,5] = 63 v[486,5] = 31 v[487,5] = 43 v[488,5] = 3 v[489,5] = 29 v[490,5] = 39 v[491,5] = 3 v[492,5] = 59 v[493,5] = 59 v[494,5] = 1 v[495,5] = 53 v[496,5] = 63 v[497,5] = 23 v[498,5] = 63 v[499,5] = 47 v[500,5] = 51 v[501,5] = 23 v[502,5] = 61 v[503,5] = 39 v[504,5] = 47 v[505,5] = 21 v[506,5] = 39 v[507,5] = 15 v[508,5] = 3 v[509,5] = 9 v[510,5] = 57 v[511,5] = 61 v[512,5] = 39 v[513,5] = 37 v[514,5] = 21 v[515,5] = 51 v[516,5] = 1 v[517,5] = 23 v[518,5] = 43 v[519,5] = 27 v[520,5] = 25 v[521,5] = 11 v[522,5] = 13 v[523,5] = 21 v[524,5] = 43 v[525,5] = 7 v[526,5] = 11 v[527,5] = 33 v[528,5] = 55 v[529,5] = 1 v[530,5] = 37 v[531,5] = 35 v[532,5] = 27 v[533,5] = 61 v[534,5] = 39 v[535,5] = 5 v[536,5] = 19 v[537,5] = 61 v[538,5] = 61 v[539,5] = 57 v[540,5] = 59 v[541,5] = 21 v[542,5] = 59 v[543,5] = 61 v[544,5] = 57 v[545,5] = 25 v[546,5] = 55 v[547,5] = 27 v[548,5] = 31 v[549,5] = 41 v[550,5] = 33 v[551,5] = 63 v[552,5] = 19 v[553,5] = 57 v[554,5] = 35 v[555,5] = 13 v[556,5] = 63 v[557,5] = 35 v[558,5] = 17 v[559,5] = 11 v[560,5] = 11 v[561,5] = 49 v[562,5] = 41 v[563,5] = 55 v[564,5] = 5 v[565,5] = 45 v[566,5] = 17 v[567,5] = 35 v[568,5] = 5 v[569,5] = 31 v[570,5] = 31 v[571,5] = 37 v[572,5] = 17 v[573,5] = 45 v[574,5] = 51 v[575,5] = 1 v[576,5] = 39 v[577,5] = 49 v[578,5] = 55 v[579,5] = 19 v[580,5] = 41 v[581,5] = 13 v[582,5] = 5 v[583,5] = 51 v[584,5] = 5 v[585,5] = 49 v[586,5] = 1 v[587,5] = 21 v[588,5] = 13 v[589,5] = 17 v[590,5] = 59 v[591,5] = 51 v[592,5] = 11 v[593,5] = 3 v[594,5] = 61 v[595,5] = 1 v[596,5] = 33 v[597,5] = 37 v[598,5] = 33 v[599,5] = 61 v[600,5] = 25 v[601,5] = 27 v[602,5] = 59 v[603,5] = 7 v[604,5] = 49 v[605,5] = 13 v[606,5] = 63 v[607,5] = 3 v[608,5] = 33 v[609,5] = 3 v[610,5] = 15 v[611,5] = 9 v[612,5] = 13 v[613,5] = 35 v[614,5] = 39 v[615,5] = 11 v[616,5] = 59 v[617,5] = 59 v[618,5] = 1 v[619,5] = 57 v[620,5] = 11 v[621,5] = 5 v[622,5] = 57 v[623,5] = 13 v[624,5] = 31 v[625,5] = 13 v[626,5] = 11 v[627,5] = 55 v[628,5] = 45 v[629,5] = 9 v[630,5] = 55 v[631,5] = 55 v[632,5] = 19 v[633,5] = 25 v[634,5] = 41 v[635,5] = 23 v[636,5] = 45 v[637,5] = 29 v[638,5] = 63 v[639,5] = 59 v[640,5] = 27 v[641,5] = 39 v[642,5] = 21 v[643,5] = 37 v[644,5] = 7 v[645,5] = 61 v[646,5] = 49 v[647,5] = 35 v[648,5] = 39 v[649,5] = 9 v[650,5] = 29 v[651,5] = 7 v[652,5] = 25 v[653,5] = 23 v[654,5] = 57 v[655,5] = 5 v[656,5] = 19 v[657,5] = 15 v[658,5] = 33 v[659,5] = 49 v[660,5] = 37 v[661,5] = 25 v[662,5] = 17 v[663,5] = 45 v[664,5] = 29 v[665,5] = 15 v[666,5] = 25 v[667,5] = 3 v[668,5] = 3 v[669,5] = 49 v[670,5] = 11 v[671,5] = 39 v[672,5] = 15 v[673,5] = 19 v[674,5] = 57 v[675,5] = 39 v[676,5] = 15 v[677,5] = 11 v[678,5] = 3 v[679,5] = 57 v[680,5] = 31 v[681,5] = 55 v[682,5] = 61 v[683,5] = 19 v[684,5] = 5 v[685,5] = 41 v[686,5] = 35 v[687,5] = 59 v[688,5] = 61 v[689,5] = 39 v[690,5] = 41 v[691,5] = 53 v[692,5] = 53 v[693,5] = 63 v[694,5] = 31 v[695,5] = 9 v[696,5] = 59 v[697,5] = 13 v[698,5] = 35 v[699,5] = 55 v[700,5] = 41 v[701,5] = 49 v[702,5] = 5 v[703,5] = 41 v[704,5] = 25 v[705,5] = 27 v[706,5] = 43 v[707,5] = 5 v[708,5] = 5 v[709,5] = 43 v[710,5] = 5 v[711,5] = 5 v[712,5] = 17 v[713,5] = 5 v[714,5] = 15 v[715,5] = 27 v[716,5] = 29 v[717,5] = 17 v[718,5] = 9 v[719,5] = 3 v[720,5] = 55 v[721,5] = 31 v[722,5] = 1 v[723,5] = 45 v[724,5] = 45 v[725,5] = 13 v[726,5] = 57 v[727,5] = 17 v[728,5] = 3 v[729,5] = 61 v[730,5] = 15 v[731,5] = 49 v[732,5] = 15 v[733,5] = 47 v[734,5] = 9 v[735,5] = 37 v[736,5] = 45 v[737,5] = 9 v[738,5] = 51 v[739,5] = 61 v[740,5] = 21 v[741,5] = 33 v[742,5] = 11 v[743,5] = 21 v[744,5] = 63 v[745,5] = 63 v[746,5] = 47 v[747,5] = 57 v[748,5] = 61 v[749,5] = 49 v[750,5] = 9 v[751,5] = 59 v[752,5] = 19 v[753,5] = 29 v[754,5] = 21 v[755,5] = 23 v[756,5] = 55 v[757,5] = 23 v[758,5] = 43 v[759,5] = 41 v[760,5] = 57 v[761,5] = 9 v[762,5] = 39 v[763,5] = 27 v[764,5] = 41 v[765,5] = 35 v[766,5] = 61 v[767,5] = 29 v[768,5] = 57 v[769,5] = 63 v[770,5] = 21 v[771,5] = 31 v[772,5] = 59 v[773,5] = 35 v[774,5] = 49 v[775,5] = 3 v[776,5] = 49 v[777,5] = 47 v[778,5] = 49 v[779,5] = 33 v[780,5] = 21 v[781,5] = 19 v[782,5] = 21 v[783,5] = 35 v[784,5] = 11 v[785,5] = 17 v[786,5] = 37 v[787,5] = 23 v[788,5] = 59 v[789,5] = 13 v[790,5] = 37 v[791,5] = 35 v[792,5] = 55 v[793,5] = 57 v[794,5] = 1 v[795,5] = 29 v[796,5] = 45 v[797,5] = 11 v[798,5] = 1 v[799,5] = 15 v[800,5] = 9 v[801,5] = 33 v[802,5] = 19 v[803,5] = 53 v[804,5] = 43 v[805,5] = 39 v[806,5] = 23 v[807,5] = 7 v[808,5] = 13 v[809,5] = 13 v[810,5] = 1 v[811,5] = 19 v[812,5] = 41 v[813,5] = 55 v[814,5] = 1 v[815,5] = 13 v[816,5] = 15 v[817,5] = 59 v[818,5] = 55 v[819,5] = 15 v[820,5] = 3 v[821,5] = 57 v[822,5] = 37 v[823,5] = 31 v[824,5] = 17 v[825,5] = 1 v[826,5] = 3 v[827,5] = 21 v[828,5] = 29 v[829,5] = 25 v[830,5] = 55 v[831,5] = 9 v[832,5] = 37 v[833,5] = 33 v[834,5] = 53 v[835,5] = 41 v[836,5] = 51 v[837,5] = 19 v[838,5] = 57 v[839,5] = 13 v[840,5] = 63 v[841,5] = 43 v[842,5] = 19 v[843,5] = 7 v[844,5] = 13 v[845,5] = 37 v[846,5] = 33 v[847,5] = 19 v[848,5] = 15 v[849,5] = 63 v[850,5] = 51 v[851,5] = 11 v[852,5] = 49 v[853,5] = 23 v[854,5] = 57 v[855,5] = 47 v[856,5] = 51 v[857,5] = 15 v[858,5] = 53 v[859,5] = 41 v[860,5] = 1 v[861,5] = 15 v[862,5] = 37 v[863,5] = 61 v[864,5] = 11 v[865,5] = 35 v[866,5] = 29 v[867,5] = 33 v[868,5] = 23 v[869,5] = 55 v[870,5] = 11 v[871,5] = 59 v[872,5] = 19 v[873,5] = 61 v[874,5] = 61 v[875,5] = 45 v[876,5] = 13 v[877,5] = 49 v[878,5] = 13 v[879,5] = 63 v[880,5] = 5 v[881,5] = 61 v[882,5] = 5 v[883,5] = 31 v[884,5] = 17 v[885,5] = 61 v[886,5] = 63 v[887,5] = 13 v[888,5] = 27 v[889,5] = 57 v[890,5] = 1 v[891,5] = 21 v[892,5] = 5 v[893,5] = 11 v[894,5] = 39 v[895,5] = 57 v[896,5] = 51 v[897,5] = 53 v[898,5] = 39 v[899,5] = 25 v[900,5] = 41 v[901,5] = 39 v[902,5] = 37 v[903,5] = 23 v[904,5] = 31 v[905,5] = 25 v[906,5] = 33 v[907,5] = 17 v[908,5] = 57 v[909,5] = 29 v[910,5] = 27 v[911,5] = 23 v[912,5] = 47 v[913,5] = 41 v[914,5] = 29 v[915,5] = 19 v[916,5] = 47 v[917,5] = 41 v[918,5] = 25 v[919,5] = 5 v[920,5] = 51 v[921,5] = 43 v[922,5] = 39 v[923,5] = 29 v[924,5] = 7 v[925,5] = 31 v[926,5] = 45 v[927,5] = 51 v[928,5] = 49 v[929,5] = 55 v[930,5] = 17 v[931,5] = 43 v[932,5] = 49 v[933,5] = 45 v[934,5] = 9 v[935,5] = 29 v[936,5] = 3 v[937,5] = 5 v[938,5] = 47 v[939,5] = 9 v[940,5] = 15 v[941,5] = 19 v[942,5] = 51 v[943,5] = 45 v[944,5] = 57 v[945,5] = 63 v[946,5] = 9 v[947,5] = 21 v[948,5] = 59 v[949,5] = 3 v[950,5] = 9 v[951,5] = 13 v[952,5] = 45 v[953,5] = 23 v[954,5] = 15 v[955,5] = 31 v[956,5] = 21 v[957,5] = 15 v[958,5] = 51 v[959,5] = 35 v[960,5] = 9 v[961,5] = 11 v[962,5] = 61 v[963,5] = 23 v[964,5] = 53 v[965,5] = 29 v[966,5] = 51 v[967,5] = 45 v[968,5] = 31 v[969,5] = 29 v[970,5] = 5 v[971,5] = 35 v[972,5] = 29 v[973,5] = 53 v[974,5] = 35 v[975,5] = 17 v[976,5] = 59 v[977,5] = 55 v[978,5] = 27 v[979,5] = 51 v[980,5] = 59 v[981,5] = 27 v[982,5] = 47 v[983,5] = 15 v[984,5] = 29 v[985,5] = 37 v[986,5] = 7 v[987,5] = 49 v[988,5] = 55 v[989,5] = 5 v[990,5] = 19 v[991,5] = 45 v[992,5] = 29 v[993,5] = 19 v[994,5] = 57 v[995,5] = 33 v[996,5] = 53 v[997,5] = 45 v[998,5] = 21 v[999,5] = 9 v[1000,5] = 3 v[1001,5] = 35 v[1002,5] = 29 v[1003,5] = 43 v[1004,5] = 31 v[1005,5] = 39 v[1006,5] = 3 v[1007,5] = 45 v[1008,5] = 1 v[1009,5] = 41 v[1010,5] = 29 v[1011,5] = 5 v[1012,5] = 59 v[1013,5] = 41 v[1014,5] = 33 v[1015,5] = 35 v[1016,5] = 27 v[1017,5] = 19 v[1018,5] = 13 v[1019,5] = 25 v[1020,5] = 27 v[1021,5] = 43 v[1022,5] = 33 v[1023,5] = 35 v[1024,5] = 17 v[1025,5] = 17 v[1026,5] = 23 v[1027,5] = 7 v[1028,5] = 35 v[1029,5] = 15 v[1030,5] = 61 v[1031,5] = 61 v[1032,5] = 53 v[1033,5] = 5 v[1034,5] = 15 v[1035,5] = 23 v[1036,5] = 11 v[1037,5] = 13 v[1038,5] = 43 v[1039,5] = 55 v[1040,5] = 47 v[1041,5] = 25 v[1042,5] = 43 v[1043,5] = 15 v[1044,5] = 57 v[1045,5] = 45 v[1046,5] = 1 v[1047,5] = 49 v[1048,5] = 63 v[1049,5] = 57 v[1050,5] = 15 v[1051,5] = 31 v[1052,5] = 31 v[1053,5] = 7 v[1054,5] = 53 v[1055,5] = 27 v[1056,5] = 15 v[1057,5] = 47 v[1058,5] = 23 v[1059,5] = 7 v[1060,5] = 29 v[1061,5] = 53 v[1062,5] = 47 v[1063,5] = 9 v[1064,5] = 53 v[1065,5] = 3 v[1066,5] = 25 v[1067,5] = 55 v[1068,5] = 45 v[1069,5] = 63 v[1070,5] = 21 v[1071,5] = 17 v[1072,5] = 23 v[1073,5] = 31 v[1074,5] = 27 v[1075,5] = 27 v[1076,5] = 43 v[1077,5] = 63 v[1078,5] = 55 v[1079,5] = 63 v[1080,5] = 45 v[1081,5] = 51 v[1082,5] = 15 v[1083,5] = 27 v[1084,5] = 5 v[1085,5] = 37 v[1086,5] = 43 v[1087,5] = 11 v[1088,5] = 27 v[1089,5] = 5 v[1090,5] = 27 v[1091,5] = 59 v[1092,5] = 21 v[1093,5] = 7 v[1094,5] = 39 v[1095,5] = 27 v[1096,5] = 63 v[1097,5] = 35 v[1098,5] = 47 v[1099,5] = 55 v[1100,5] = 17 v[1101,5] = 17 v[1102,5] = 17 v[1103,5] = 3 v[1104,5] = 19 v[1105,5] = 21 v[1106,5] = 13 v[1107,5] = 49 v[1108,5] = 61 v[1109,5] = 39 v[1110,5] = 15 v[19,6] = 13 v[20,6] = 33 v[21,6] = 115 v[22,6] = 41 v[23,6] = 79 v[24,6] = 17 v[25,6] = 29 v[26,6] = 119 v[27,6] = 75 v[28,6] = 73 v[29,6] = 105 v[30,6] = 7 v[31,6] = 59 v[32,6] = 65 v[33,6] = 21 v[34,6] = 3 v[35,6] = 113 v[36,6] = 61 v[37,6] = 89 v[38,6] = 45 v[39,6] = 107 v[40,6] = 21 v[41,6] = 71 v[42,6] = 79 v[43,6] = 19 v[44,6] = 71 v[45,6] = 61 v[46,6] = 41 v[47,6] = 57 v[48,6] = 121 v[49,6] = 87 v[50,6] = 119 v[51,6] = 55 v[52,6] = 85 v[53,6] = 121 v[54,6] = 119 v[55,6] = 11 v[56,6] = 23 v[57,6] = 61 v[58,6] = 11 v[59,6] = 35 v[60,6] = 33 v[61,6] = 43 v[62,6] = 107 v[63,6] = 113 v[64,6] = 101 v[65,6] = 29 v[66,6] = 87 v[67,6] = 119 v[68,6] = 97 v[69,6] = 29 v[70,6] = 17 v[71,6] = 89 v[72,6] = 5 v[73,6] = 127 v[74,6] = 89 v[75,6] = 119 v[76,6] = 117 v[77,6] = 103 v[78,6] = 105 v[79,6] = 41 v[80,6] = 83 v[81,6] = 25 v[82,6] = 41 v[83,6] = 55 v[84,6] = 69 v[85,6] = 117 v[86,6] = 49 v[87,6] = 127 v[88,6] = 29 v[89,6] = 1 v[90,6] = 99 v[91,6] = 53 v[92,6] = 83 v[93,6] = 15 v[94,6] = 31 v[95,6] = 73 v[96,6] = 115 v[97,6] = 35 v[98,6] = 21 v[99,6] = 89 v[100,6] = 5 v[101,6] = 1 v[102,6] = 91 v[103,6] = 53 v[104,6] = 35 v[105,6] = 95 v[106,6] = 83 v[107,6] = 19 v[108,6] = 85 v[109,6] = 55 v[110,6] = 51 v[111,6] = 101 v[112,6] = 33 v[113,6] = 41 v[114,6] = 55 v[115,6] = 45 v[116,6] = 95 v[117,6] = 61 v[118,6] = 27 v[119,6] = 37 v[120,6] = 89 v[121,6] = 75 v[122,6] = 57 v[123,6] = 61 v[124,6] = 15 v[125,6] = 117 v[126,6] = 15 v[127,6] = 21 v[128,6] = 27 v[129,6] = 25 v[130,6] = 27 v[131,6] = 123 v[132,6] = 39 v[133,6] = 109 v[134,6] = 93 v[135,6] = 51 v[136,6] = 21 v[137,6] = 91 v[138,6] = 109 v[139,6] = 107 v[140,6] = 45 v[141,6] = 15 v[142,6] = 93 v[143,6] = 127 v[144,6] = 3 v[145,6] = 53 v[146,6] = 81 v[147,6] = 79 v[148,6] = 107 v[149,6] = 79 v[150,6] = 87 v[151,6] = 35 v[152,6] = 109 v[153,6] = 73 v[154,6] = 35 v[155,6] = 83 v[156,6] = 107 v[157,6] = 1 v[158,6] = 51 v[159,6] = 7 v[160,6] = 59 v[161,6] = 33 v[162,6] = 115 v[163,6] = 43 v[164,6] = 111 v[165,6] = 45 v[166,6] = 121 v[167,6] = 105 v[168,6] = 125 v[169,6] = 87 v[170,6] = 101 v[171,6] = 41 v[172,6] = 95 v[173,6] = 75 v[174,6] = 1 v[175,6] = 57 v[176,6] = 117 v[177,6] = 21 v[178,6] = 27 v[179,6] = 67 v[180,6] = 29 v[181,6] = 53 v[182,6] = 117 v[183,6] = 63 v[184,6] = 1 v[185,6] = 77 v[186,6] = 89 v[187,6] = 115 v[188,6] = 49 v[189,6] = 127 v[190,6] = 15 v[191,6] = 79 v[192,6] = 81 v[193,6] = 29 v[194,6] = 65 v[195,6] = 103 v[196,6] = 33 v[197,6] = 73 v[198,6] = 79 v[199,6] = 29 v[200,6] = 21 v[201,6] = 113 v[202,6] = 31 v[203,6] = 33 v[204,6] = 107 v[205,6] = 95 v[206,6] = 111 v[207,6] = 59 v[208,6] = 99 v[209,6] = 117 v[210,6] = 63 v[211,6] = 63 v[212,6] = 99 v[213,6] = 39 v[214,6] = 9 v[215,6] = 35 v[216,6] = 63 v[217,6] = 125 v[218,6] = 99 v[219,6] = 45 v[220,6] = 93 v[221,6] = 33 v[222,6] = 93 v[223,6] = 9 v[224,6] = 105 v[225,6] = 75 v[226,6] = 51 v[227,6] = 115 v[228,6] = 11 v[229,6] = 37 v[230,6] = 17 v[231,6] = 41 v[232,6] = 21 v[233,6] = 43 v[234,6] = 73 v[235,6] = 19 v[236,6] = 93 v[237,6] = 7 v[238,6] = 95 v[239,6] = 81 v[240,6] = 93 v[241,6] = 79 v[242,6] = 81 v[243,6] = 55 v[244,6] = 9 v[245,6] = 51 v[246,6] = 63 v[247,6] = 45 v[248,6] = 89 v[249,6] = 73 v[250,6] = 19 v[251,6] = 115 v[252,6] = 39 v[253,6] = 47 v[254,6] = 81 v[255,6] = 39 v[256,6] = 5 v[257,6] = 5 v[258,6] = 45 v[259,6] = 53 v[260,6] = 65 v[261,6] = 49 v[262,6] = 17 v[263,6] = 105 v[264,6] = 13 v[265,6] = 107 v[266,6] = 5 v[267,6] = 5 v[268,6] = 19 v[269,6] = 73 v[270,6] = 59 v[271,6] = 43 v[272,6] = 83 v[273,6] = 97 v[274,6] = 115 v[275,6] = 27 v[276,6] = 1 v[277,6] = 69 v[278,6] = 103 v[279,6] = 3 v[280,6] = 99 v[281,6] = 103 v[282,6] = 63 v[283,6] = 67 v[284,6] = 25 v[285,6] = 121 v[286,6] = 97 v[287,6] = 77 v[288,6] = 13 v[289,6] = 83 v[290,6] = 103 v[291,6] = 41 v[292,6] = 11 v[293,6] = 27 v[294,6] = 81 v[295,6] = 37 v[296,6] = 33 v[297,6] = 125 v[298,6] = 71 v[299,6] = 41 v[300,6] = 41 v[301,6] = 59 v[302,6] = 41 v[303,6] = 87 v[304,6] = 123 v[305,6] = 43 v[306,6] = 101 v[307,6] = 63 v[308,6] = 45 v[309,6] = 39 v[310,6] = 21 v[311,6] = 97 v[312,6] = 15 v[313,6] = 97 v[314,6] = 111 v[315,6] = 21 v[316,6] = 49 v[317,6] = 13 v[318,6] = 17 v[319,6] = 79 v[320,6] = 91 v[321,6] = 65 v[322,6] = 105 v[323,6] = 75 v[324,6] = 1 v[325,6] = 45 v[326,6] = 67 v[327,6] = 83 v[328,6] = 107 v[329,6] = 125 v[330,6] = 87 v[331,6] = 15 v[332,6] = 81 v[333,6] = 95 v[334,6] = 105 v[335,6] = 65 v[336,6] = 45 v[337,6] = 59 v[338,6] = 103 v[339,6] = 23 v[340,6] = 103 v[341,6] = 99 v[342,6] = 67 v[343,6] = 99 v[344,6] = 47 v[345,6] = 117 v[346,6] = 71 v[347,6] = 89 v[348,6] = 35 v[349,6] = 53 v[350,6] = 73 v[351,6] = 9 v[352,6] = 115 v[353,6] = 49 v[354,6] = 37 v[355,6] = 1 v[356,6] = 35 v[357,6] = 9 v[358,6] = 45 v[359,6] = 81 v[360,6] = 19 v[361,6] = 127 v[362,6] = 17 v[363,6] = 17 v[364,6] = 105 v[365,6] = 89 v[366,6] = 49 v[367,6] = 101 v[368,6] = 7 v[369,6] = 37 v[370,6] = 33 v[371,6] = 11 v[372,6] = 95 v[373,6] = 95 v[374,6] = 17 v[375,6] = 111 v[376,6] = 105 v[377,6] = 41 v[378,6] = 115 v[379,6] = 5 v[380,6] = 69 v[381,6] = 101 v[382,6] = 27 v[383,6] = 27 v[384,6] = 101 v[385,6] = 103 v[386,6] = 53 v[387,6] = 9 v[388,6] = 21 v[389,6] = 43 v[390,6] = 79 v[391,6] = 91 v[392,6] = 65 v[393,6] = 117 v[394,6] = 87 v[395,6] = 125 v[396,6] = 55 v[397,6] = 45 v[398,6] = 63 v[399,6] = 85 v[400,6] = 83 v[401,6] = 97 v[402,6] = 45 v[403,6] = 83 v[404,6] = 87 v[405,6] = 113 v[406,6] = 93 v[407,6] = 95 v[408,6] = 5 v[409,6] = 17 v[410,6] = 77 v[411,6] = 77 v[412,6] = 127 v[413,6] = 123 v[414,6] = 45 v[415,6] = 81 v[416,6] = 85 v[417,6] = 121 v[418,6] = 119 v[419,6] = 27 v[420,6] = 85 v[421,6] = 41 v[422,6] = 49 v[423,6] = 15 v[424,6] = 107 v[425,6] = 21 v[426,6] = 51 v[427,6] = 119 v[428,6] = 11 v[429,6] = 87 v[430,6] = 101 v[431,6] = 115 v[432,6] = 63 v[433,6] = 63 v[434,6] = 37 v[435,6] = 121 v[436,6] = 109 v[437,6] = 7 v[438,6] = 43 v[439,6] = 69 v[440,6] = 19 v[441,6] = 77 v[442,6] = 49 v[443,6] = 71 v[444,6] = 59 v[445,6] = 35 v[446,6] = 7 v[447,6] = 13 v[448,6] = 55 v[449,6] = 101 v[450,6] = 127 v[451,6] = 103 v[452,6] = 85 v[453,6] = 109 v[454,6] = 29 v[455,6] = 61 v[456,6] = 67 v[457,6] = 21 v[458,6] = 111 v[459,6] = 67 v[460,6] = 23 v[461,6] = 57 v[462,6] = 75 v[463,6] = 71 v[464,6] = 101 v[465,6] = 123 v[466,6] = 41 v[467,6] = 107 v[468,6] = 101 v[469,6] = 107 v[470,6] = 125 v[471,6] = 27 v[472,6] = 47 v[473,6] = 119 v[474,6] = 41 v[475,6] = 19 v[476,6] = 127 v[477,6] = 33 v[478,6] = 31 v[479,6] = 109 v[480,6] = 7 v[481,6] = 91 v[482,6] = 91 v[483,6] = 39 v[484,6] = 125 v[485,6] = 105 v[486,6] = 47 v[487,6] = 125 v[488,6] = 123 v[489,6] = 91 v[490,6] = 9 v[491,6] = 103 v[492,6] = 45 v[493,6] = 23 v[494,6] = 117 v[495,6] = 9 v[496,6] = 125 v[497,6] = 73 v[498,6] = 11 v[499,6] = 37 v[500,6] = 61 v[501,6] = 79 v[502,6] = 21 v[503,6] = 5 v[504,6] = 47 v[505,6] = 117 v[506,6] = 67 v[507,6] = 53 v[508,6] = 85 v[509,6] = 33 v[510,6] = 81 v[511,6] = 121 v[512,6] = 47 v[513,6] = 61 v[514,6] = 51 v[515,6] = 127 v[516,6] = 29 v[517,6] = 65 v[518,6] = 45 v[519,6] = 41 v[520,6] = 95 v[521,6] = 57 v[522,6] = 73 v[523,6] = 33 v[524,6] = 117 v[525,6] = 61 v[526,6] = 111 v[527,6] = 59 v[528,6] = 123 v[529,6] = 65 v[530,6] = 47 v[531,6] = 105 v[532,6] = 23 v[533,6] = 29 v[534,6] = 107 v[535,6] = 37 v[536,6] = 81 v[537,6] = 67 v[538,6] = 29 v[539,6] = 115 v[540,6] = 119 v[541,6] = 75 v[542,6] = 73 v[543,6] = 99 v[544,6] = 103 v[545,6] = 7 v[546,6] = 57 v[547,6] = 45 v[548,6] = 61 v[549,6] = 95 v[550,6] = 49 v[551,6] = 101 v[552,6] = 101 v[553,6] = 35 v[554,6] = 47 v[555,6] = 119 v[556,6] = 39 v[557,6] = 67 v[558,6] = 31 v[559,6] = 103 v[560,6] = 7 v[561,6] = 61 v[562,6] = 127 v[563,6] = 87 v[564,6] = 3 v[565,6] = 35 v[566,6] = 29 v[567,6] = 73 v[568,6] = 95 v[569,6] = 103 v[570,6] = 71 v[571,6] = 75 v[572,6] = 51 v[573,6] = 87 v[574,6] = 57 v[575,6] = 97 v[576,6] = 11 v[577,6] = 105 v[578,6] = 87 v[579,6] = 41 v[580,6] = 73 v[581,6] = 109 v[582,6] = 69 v[583,6] = 35 v[584,6] = 121 v[585,6] = 39 v[586,6] = 111 v[587,6] = 1 v[588,6] = 77 v[589,6] = 39 v[590,6] = 47 v[591,6] = 53 v[592,6] = 91 v[593,6] = 3 v[594,6] = 17 v[595,6] = 51 v[596,6] = 83 v[597,6] = 39 v[598,6] = 125 v[599,6] = 85 v[600,6] = 111 v[601,6] = 21 v[602,6] = 69 v[603,6] = 85 v[604,6] = 29 v[605,6] = 55 v[606,6] = 11 v[607,6] = 117 v[608,6] = 1 v[609,6] = 47 v[610,6] = 17 v[611,6] = 65 v[612,6] = 63 v[613,6] = 47 v[614,6] = 117 v[615,6] = 17 v[616,6] = 115 v[617,6] = 51 v[618,6] = 25 v[619,6] = 33 v[620,6] = 123 v[621,6] = 123 v[622,6] = 83 v[623,6] = 51 v[624,6] = 113 v[625,6] = 95 v[626,6] = 121 v[627,6] = 51 v[628,6] = 91 v[629,6] = 109 v[630,6] = 43 v[631,6] = 55 v[632,6] = 35 v[633,6] = 55 v[634,6] = 87 v[635,6] = 33 v[636,6] = 37 v[637,6] = 5 v[638,6] = 3 v[639,6] = 45 v[640,6] = 21 v[641,6] = 105 v[642,6] = 127 v[643,6] = 35 v[644,6] = 17 v[645,6] = 35 v[646,6] = 37 v[647,6] = 97 v[648,6] = 97 v[649,6] = 21 v[650,6] = 77 v[651,6] = 123 v[652,6] = 17 v[653,6] = 89 v[654,6] = 53 v[655,6] = 105 v[656,6] = 75 v[657,6] = 25 v[658,6] = 125 v[659,6] = 13 v[660,6] = 47 v[661,6] = 21 v[662,6] = 125 v[663,6] = 23 v[664,6] = 55 v[665,6] = 63 v[666,6] = 61 v[667,6] = 5 v[668,6] = 17 v[669,6] = 93 v[670,6] = 57 v[671,6] = 121 v[672,6] = 69 v[673,6] = 73 v[674,6] = 93 v[675,6] = 121 v[676,6] = 105 v[677,6] = 75 v[678,6] = 91 v[679,6] = 67 v[680,6] = 95 v[681,6] = 75 v[682,6] = 9 v[683,6] = 69 v[684,6] = 97 v[685,6] = 99 v[686,6] = 93 v[687,6] = 11 v[688,6] = 53 v[689,6] = 19 v[690,6] = 73 v[691,6] = 5 v[692,6] = 33 v[693,6] = 79 v[694,6] = 107 v[695,6] = 65 v[696,6] = 69 v[697,6] = 79 v[698,6] = 125 v[699,6] = 25 v[700,6] = 93 v[701,6] = 55 v[702,6] = 61 v[703,6] = 17 v[704,6] = 117 v[705,6] = 69 v[706,6] = 97 v[707,6] = 87 v[708,6] = 111 v[709,6] = 37 v[710,6] = 93 v[711,6] = 59 v[712,6] = 79 v[713,6] = 95 v[714,6] = 53 v[715,6] = 115 v[716,6] = 53 v[717,6] = 85 v[718,6] = 85 v[719,6] = 65 v[720,6] = 59 v[721,6] = 23 v[722,6] = 75 v[723,6] = 21 v[724,6] = 67 v[725,6] = 27 v[726,6] = 99 v[727,6] = 79 v[728,6] = 27 v[729,6] = 3 v[730,6] = 95 v[731,6] = 27 v[732,6] = 69 v[733,6] = 19 v[734,6] = 75 v[735,6] = 47 v[736,6] = 59 v[737,6] = 41 v[738,6] = 85 v[739,6] = 77 v[740,6] = 99 v[741,6] = 55 v[742,6] = 49 v[743,6] = 93 v[744,6] = 93 v[745,6] = 119 v[746,6] = 51 v[747,6] = 125 v[748,6] = 63 v[749,6] = 13 v[750,6] = 15 v[751,6] = 45 v[752,6] = 61 v[753,6] = 19 v[754,6] = 105 v[755,6] = 115 v[756,6] = 17 v[757,6] = 83 v[758,6] = 7 v[759,6] = 7 v[760,6] = 11 v[761,6] = 61 v[762,6] = 37 v[763,6] = 63 v[764,6] = 89 v[765,6] = 95 v[766,6] = 119 v[767,6] = 113 v[768,6] = 67 v[769,6] = 123 v[770,6] = 91 v[771,6] = 33 v[772,6] = 37 v[773,6] = 99 v[774,6] = 43 v[775,6] = 11 v[776,6] = 33 v[777,6] = 65 v[778,6] = 81 v[779,6] = 79 v[780,6] = 81 v[781,6] = 107 v[782,6] = 63 v[783,6] = 63 v[784,6] = 55 v[785,6] = 89 v[786,6] = 91 v[787,6] = 25 v[788,6] = 93 v[789,6] = 101 v[790,6] = 27 v[791,6] = 55 v[792,6] = 75 v[793,6] = 121 v[794,6] = 79 v[795,6] = 43 v[796,6] = 125 v[797,6] = 73 v[798,6] = 27 v[799,6] = 109 v[800,6] = 35 v[801,6] = 21 v[802,6] = 71 v[803,6] = 113 v[804,6] = 89 v[805,6] = 59 v[806,6] = 95 v[807,6] = 41 v[808,6] = 45 v[809,6] = 113 v[810,6] = 119 v[811,6] = 113 v[812,6] = 39 v[813,6] = 59 v[814,6] = 73 v[815,6] = 15 v[816,6] = 13 v[817,6] = 59 v[818,6] = 67 v[819,6] = 121 v[820,6] = 27 v[821,6] = 7 v[822,6] = 105 v[823,6] = 15 v[824,6] = 59 v[825,6] = 59 v[826,6] = 35 v[827,6] = 91 v[828,6] = 89 v[829,6] = 23 v[830,6] = 125 v[831,6] = 97 v[832,6] = 53 v[833,6] = 41 v[834,6] = 91 v[835,6] = 111 v[836,6] = 29 v[837,6] = 31 v[838,6] = 3 v[839,6] = 103 v[840,6] = 61 v[841,6] = 71 v[842,6] = 35 v[843,6] = 7 v[844,6] = 119 v[845,6] = 29 v[846,6] = 45 v[847,6] = 49 v[848,6] = 111 v[849,6] = 41 v[850,6] = 109 v[851,6] = 59 v[852,6] = 125 v[853,6] = 13 v[854,6] = 27 v[855,6] = 19 v[856,6] = 79 v[857,6] = 9 v[858,6] = 75 v[859,6] = 83 v[860,6] = 81 v[861,6] = 33 v[862,6] = 91 v[863,6] = 109 v[864,6] = 33 v[865,6] = 29 v[866,6] = 107 v[867,6] = 111 v[868,6] = 101 v[869,6] = 107 v[870,6] = 109 v[871,6] = 65 v[872,6] = 59 v[873,6] = 43 v[874,6] = 37 v[875,6] = 1 v[876,6] = 9 v[877,6] = 15 v[878,6] = 109 v[879,6] = 37 v[880,6] = 111 v[881,6] = 113 v[882,6] = 119 v[883,6] = 79 v[884,6] = 73 v[885,6] = 65 v[886,6] = 71 v[887,6] = 93 v[888,6] = 17 v[889,6] = 101 v[890,6] = 87 v[891,6] = 97 v[892,6] = 43 v[893,6] = 23 v[894,6] = 75 v[895,6] = 109 v[896,6] = 41 v[897,6] = 49 v[898,6] = 53 v[899,6] = 31 v[900,6] = 97 v[901,6] = 105 v[902,6] = 109 v[903,6] = 119 v[904,6] = 51 v[905,6] = 9 v[906,6] = 53 v[907,6] = 113 v[908,6] = 97 v[909,6] = 73 v[910,6] = 89 v[911,6] = 79 v[912,6] = 49 v[913,6] = 61 v[914,6] = 105 v[915,6] = 13 v[916,6] = 99 v[917,6] = 53 v[918,6] = 71 v[919,6] = 7 v[920,6] = 87 v[921,6] = 21 v[922,6] = 101 v[923,6] = 5 v[924,6] = 71 v[925,6] = 31 v[926,6] = 123 v[927,6] = 121 v[928,6] = 121 v[929,6] = 73 v[930,6] = 79 v[931,6] = 115 v[932,6] = 13 v[933,6] = 39 v[934,6] = 101 v[935,6] = 19 v[936,6] = 37 v[937,6] = 51 v[938,6] = 83 v[939,6] = 97 v[940,6] = 55 v[941,6] = 81 v[942,6] = 91 v[943,6] = 127 v[944,6] = 105 v[945,6] = 89 v[946,6] = 63 v[947,6] = 47 v[948,6] = 49 v[949,6] = 75 v[950,6] = 37 v[951,6] = 77 v[952,6] = 15 v[953,6] = 49 v[954,6] = 107 v[955,6] = 23 v[956,6] = 23 v[957,6] = 35 v[958,6] = 19 v[959,6] = 69 v[960,6] = 17 v[961,6] = 59 v[962,6] = 63 v[963,6] = 73 v[964,6] = 29 v[965,6] = 125 v[966,6] = 61 v[967,6] = 65 v[968,6] = 95 v[969,6] = 101 v[970,6] = 81 v[971,6] = 57 v[972,6] = 69 v[973,6] = 83 v[974,6] = 37 v[975,6] = 11 v[976,6] = 37 v[977,6] = 95 v[978,6] = 1 v[979,6] = 73 v[980,6] = 27 v[981,6] = 29 v[982,6] = 57 v[983,6] = 7 v[984,6] = 65 v[985,6] = 83 v[986,6] = 99 v[987,6] = 69 v[988,6] = 19 v[989,6] = 103 v[990,6] = 43 v[991,6] = 95 v[992,6] = 25 v[993,6] = 19 v[994,6] = 103 v[995,6] = 41 v[996,6] = 125 v[997,6] = 97 v[998,6] = 71 v[999,6] = 105 v[1000,6] = 83 v[1001,6] = 83 v[1002,6] = 61 v[1003,6] = 39 v[1004,6] = 9 v[1005,6] = 45 v[1006,6] = 117 v[1007,6] = 63 v[1008,6] = 31 v[1009,6] = 5 v[1010,6] = 117 v[1011,6] = 67 v[1012,6] = 125 v[1013,6] = 41 v[1014,6] = 117 v[1015,6] = 43 v[1016,6] = 77 v[1017,6] = 97 v[1018,6] = 15 v[1019,6] = 29 v[1020,6] = 5 v[1021,6] = 59 v[1022,6] = 25 v[1023,6] = 63 v[1024,6] = 87 v[1025,6] = 39 v[1026,6] = 39 v[1027,6] = 77 v[1028,6] = 85 v[1029,6] = 37 v[1030,6] = 81 v[1031,6] = 73 v[1032,6] = 89 v[1033,6] = 29 v[1034,6] = 125 v[1035,6] = 109 v[1036,6] = 21 v[1037,6] = 23 v[1038,6] = 119 v[1039,6] = 105 v[1040,6] = 43 v[1041,6] = 93 v[1042,6] = 97 v[1043,6] = 15 v[1044,6] = 125 v[1045,6] = 29 v[1046,6] = 51 v[1047,6] = 69 v[1048,6] = 37 v[1049,6] = 45 v[1050,6] = 31 v[1051,6] = 75 v[1052,6] = 109 v[1053,6] = 119 v[1054,6] = 53 v[1055,6] = 5 v[1056,6] = 101 v[1057,6] = 125 v[1058,6] = 121 v[1059,6] = 35 v[1060,6] = 29 v[1061,6] = 7 v[1062,6] = 63 v[1063,6] = 17 v[1064,6] = 63 v[1065,6] = 13 v[1066,6] = 69 v[1067,6] = 15 v[1068,6] = 105 v[1069,6] = 51 v[1070,6] = 127 v[1071,6] = 105 v[1072,6] = 9 v[1073,6] = 57 v[1074,6] = 95 v[1075,6] = 59 v[1076,6] = 109 v[1077,6] = 35 v[1078,6] = 49 v[1079,6] = 23 v[1080,6] = 33 v[1081,6] = 107 v[1082,6] = 55 v[1083,6] = 33 v[1084,6] = 57 v[1085,6] = 79 v[1086,6] = 73 v[1087,6] = 69 v[1088,6] = 59 v[1089,6] = 107 v[1090,6] = 55 v[1091,6] = 11 v[1092,6] = 63 v[1093,6] = 95 v[1094,6] = 103 v[1095,6] = 23 v[1096,6] = 125 v[1097,6] = 91 v[1098,6] = 31 v[1099,6] = 91 v[1100,6] = 51 v[1101,6] = 65 v[1102,6] = 61 v[1103,6] = 75 v[1104,6] = 69 v[1105,6] = 107 v[1106,6] = 65 v[1107,6] = 101 v[1108,6] = 59 v[1109,6] = 35 v[1110,6] = 15 v[37,7] = 7 v[38,7] = 23 v[39,7] = 39 v[40,7] = 217 v[41,7] = 141 v[42,7] = 27 v[43,7] = 53 v[44,7] = 181 v[45,7] = 169 v[46,7] = 35 v[47,7] = 15 v[48,7] = 207 v[49,7] = 45 v[50,7] = 247 v[51,7] = 185 v[52,7] = 117 v[53,7] = 41 v[54,7] = 81 v[55,7] = 223 v[56,7] = 151 v[57,7] = 81 v[58,7] = 189 v[59,7] = 61 v[60,7] = 95 v[61,7] = 185 v[62,7] = 23 v[63,7] = 73 v[64,7] = 113 v[65,7] = 239 v[66,7] = 85 v[67,7] = 9 v[68,7] = 201 v[69,7] = 83 v[70,7] = 53 v[71,7] = 183 v[72,7] = 203 v[73,7] = 91 v[74,7] = 149 v[75,7] = 101 v[76,7] = 13 v[77,7] = 111 v[78,7] = 239 v[79,7] = 3 v[80,7] = 205 v[81,7] = 253 v[82,7] = 247 v[83,7] = 121 v[84,7] = 189 v[85,7] = 169 v[86,7] = 179 v[87,7] = 197 v[88,7] = 175 v[89,7] = 217 v[90,7] = 249 v[91,7] = 195 v[92,7] = 95 v[93,7] = 63 v[94,7] = 19 v[95,7] = 7 v[96,7] = 5 v[97,7] = 75 v[98,7] = 217 v[99,7] = 245 v[100,7] = 111 v[101,7] = 189 v[102,7] = 165 v[103,7] = 169 v[104,7] = 141 v[105,7] = 221 v[106,7] = 249 v[107,7] = 159 v[108,7] = 253 v[109,7] = 207 v[110,7] = 249 v[111,7] = 219 v[112,7] = 23 v[113,7] = 49 v[114,7] = 127 v[115,7] = 237 v[116,7] = 5 v[117,7] = 25 v[118,7] = 177 v[119,7] = 37 v[120,7] = 103 v[121,7] = 65 v[122,7] = 167 v[123,7] = 81 v[124,7] = 87 v[125,7] = 119 v[126,7] = 45 v[127,7] = 79 v[128,7] = 143 v[129,7] = 57 v[130,7] = 79 v[131,7] = 187 v[132,7] = 143 v[133,7] = 183 v[134,7] = 75 v[135,7] = 97 v[136,7] = 211 v[137,7] = 149 v[138,7] = 175 v[139,7] = 37 v[140,7] = 135 v[141,7] = 189 v[142,7] = 225 v[143,7] = 241 v[144,7] = 63 v[145,7] = 33 v[146,7] = 43 v[147,7] = 13 v[148,7] = 73 v[149,7] = 213 v[150,7] = 57 v[151,7] = 239 v[152,7] = 183 v[153,7] = 117 v[154,7] = 21 v[155,7] = 29 v[156,7] = 115 v[157,7] = 43 v[158,7] = 205 v[159,7] = 223 v[160,7] = 15 v[161,7] = 3 v[162,7] = 159 v[163,7] = 51 v[164,7] = 101 v[165,7] = 127 v[166,7] = 99 v[167,7] = 239 v[168,7] = 171 v[169,7] = 113 v[170,7] = 171 v[171,7] = 119 v[172,7] = 189 v[173,7] = 245 v[174,7] = 201 v[175,7] = 27 v[176,7] = 185 v[177,7] = 229 v[178,7] = 105 v[179,7] = 153 v[180,7] = 189 v[181,7] = 33 v[182,7] = 35 v[183,7] = 137 v[184,7] = 77 v[185,7] = 97 v[186,7] = 17 v[187,7] = 181 v[188,7] = 55 v[189,7] = 197 v[190,7] = 201 v[191,7] = 155 v[192,7] = 37 v[193,7] = 197 v[194,7] = 137 v[195,7] = 223 v[196,7] = 25 v[197,7] = 179 v[198,7] = 91 v[199,7] = 23 v[200,7] = 235 v[201,7] = 53 v[202,7] = 253 v[203,7] = 49 v[204,7] = 181 v[205,7] = 249 v[206,7] = 53 v[207,7] = 173 v[208,7] = 97 v[209,7] = 247 v[210,7] = 67 v[211,7] = 115 v[212,7] = 103 v[213,7] = 159 v[214,7] = 239 v[215,7] = 69 v[216,7] = 173 v[217,7] = 217 v[218,7] = 95 v[219,7] = 221 v[220,7] = 247 v[221,7] = 97 v[222,7] = 91 v[223,7] = 123 v[224,7] = 223 v[225,7] = 213 v[226,7] = 129 v[227,7] = 181 v[228,7] = 87 v[229,7] = 239 v[230,7] = 85 v[231,7] = 89 v[232,7] = 249 v[233,7] = 141 v[234,7] = 39 v[235,7] = 57 v[236,7] = 249 v[237,7] = 71 v[238,7] = 101 v[239,7] = 159 v[240,7] = 33 v[241,7] = 137 v[242,7] = 189 v[243,7] = 71 v[244,7] = 253 v[245,7] = 205 v[246,7] = 171 v[247,7] = 13 v[248,7] = 249 v[249,7] = 109 v[250,7] = 131 v[251,7] = 199 v[252,7] = 189 v[253,7] = 179 v[254,7] = 31 v[255,7] = 99 v[256,7] = 113 v[257,7] = 41 v[258,7] = 173 v[259,7] = 23 v[260,7] = 189 v[261,7] = 197 v[262,7] = 3 v[263,7] = 135 v[264,7] = 9 v[265,7] = 95 v[266,7] = 195 v[267,7] = 27 v[268,7] = 183 v[269,7] = 1 v[270,7] = 123 v[271,7] = 73 v[272,7] = 53 v[273,7] = 99 v[274,7] = 197 v[275,7] = 59 v[276,7] = 27 v[277,7] = 101 v[278,7] = 55 v[279,7] = 193 v[280,7] = 31 v[281,7] = 61 v[282,7] = 119 v[283,7] = 11 v[284,7] = 7 v[285,7] = 255 v[286,7] = 233 v[287,7] = 53 v[288,7] = 157 v[289,7] = 193 v[290,7] = 97 v[291,7] = 83 v[292,7] = 65 v[293,7] = 81 v[294,7] = 239 v[295,7] = 167 v[296,7] = 69 v[297,7] = 71 v[298,7] = 109 v[299,7] = 97 v[300,7] = 137 v[301,7] = 71 v[302,7] = 193 v[303,7] = 189 v[304,7] = 115 v[305,7] = 79 v[306,7] = 205 v[307,7] = 37 v[308,7] = 227 v[309,7] = 53 v[310,7] = 33 v[311,7] = 91 v[312,7] = 229 v[313,7] = 245 v[314,7] = 105 v[315,7] = 77 v[316,7] = 229 v[317,7] = 161 v[318,7] = 103 v[319,7] = 93 v[320,7] = 13 v[321,7] = 161 v[322,7] = 229 v[323,7] = 223 v[324,7] = 69 v[325,7] = 15 v[326,7] = 25 v[327,7] = 23 v[328,7] = 233 v[329,7] = 93 v[330,7] = 25 v[331,7] = 217 v[332,7] = 247 v[333,7] = 61 v[334,7] = 75 v[335,7] = 27 v[336,7] = 9 v[337,7] = 223 v[338,7] = 213 v[339,7] = 55 v[340,7] = 197 v[341,7] = 145 v[342,7] = 89 v[343,7] = 199 v[344,7] = 41 v[345,7] = 201 v[346,7] = 5 v[347,7] = 149 v[348,7] = 35 v[349,7] = 119 v[350,7] = 183 v[351,7] = 53 v[352,7] = 11 v[353,7] = 13 v[354,7] = 3 v[355,7] = 179 v[356,7] = 229 v[357,7] = 43 v[358,7] = 55 v[359,7] = 187 v[360,7] = 233 v[361,7] = 47 v[362,7] = 133 v[363,7] = 91 v[364,7] = 47 v[365,7] = 71 v[366,7] = 93 v[367,7] = 105 v[368,7] = 145 v[369,7] = 45 v[370,7] = 255 v[371,7] = 221 v[372,7] = 115 v[373,7] = 175 v[374,7] = 19 v[375,7] = 129 v[376,7] = 5 v[377,7] = 209 v[378,7] = 197 v[379,7] = 57 v[380,7] = 177 v[381,7] = 115 v[382,7] = 187 v[383,7] = 119 v[384,7] = 77 v[385,7] = 211 v[386,7] = 111 v[387,7] = 33 v[388,7] = 113 v[389,7] = 23 v[390,7] = 87 v[391,7] = 137 v[392,7] = 41 v[393,7] = 7 v[394,7] = 83 v[395,7] = 43 v[396,7] = 121 v[397,7] = 145 v[398,7] = 5 v[399,7] = 219 v[400,7] = 27 v[401,7] = 11 v[402,7] = 111 v[403,7] = 207 v[404,7] = 55 v[405,7] = 97 v[406,7] = 63 v[407,7] = 229 v[408,7] = 53 v[409,7] = 33 v[410,7] = 149 v[411,7] = 23 v[412,7] = 187 v[413,7] = 153 v[414,7] = 91 v[415,7] = 193 v[416,7] = 183 v[417,7] = 59 v[418,7] = 211 v[419,7] = 93 v[420,7] = 139 v[421,7] = 59 v[422,7] = 179 v[423,7] = 163 v[424,7] = 209 v[425,7] = 77 v[426,7] = 39 v[427,7] = 111 v[428,7] = 79 v[429,7] = 229 v[430,7] = 85 v[431,7] = 237 v[432,7] = 199 v[433,7] = 137 v[434,7] = 147 v[435,7] = 25 v[436,7] = 73 v[437,7] = 121 v[438,7] = 129 v[439,7] = 83 v[440,7] = 87 v[441,7] = 93 v[442,7] = 205 v[443,7] = 167 v[444,7] = 53 v[445,7] = 107 v[446,7] = 229 v[447,7] = 213 v[448,7] = 95 v[449,7] = 219 v[450,7] = 109 v[451,7] = 175 v[452,7] = 13 v[453,7] = 209 v[454,7] = 97 v[455,7] = 61 v[456,7] = 147 v[457,7] = 19 v[458,7] = 13 v[459,7] = 123 v[460,7] = 73 v[461,7] = 35 v[462,7] = 141 v[463,7] = 81 v[464,7] = 19 v[465,7] = 171 v[466,7] = 255 v[467,7] = 111 v[468,7] = 107 v[469,7] = 233 v[470,7] = 113 v[471,7] = 133 v[472,7] = 89 v[473,7] = 9 v[474,7] = 231 v[475,7] = 95 v[476,7] = 69 v[477,7] = 33 v[478,7] = 1 v[479,7] = 253 v[480,7] = 219 v[481,7] = 253 v[482,7] = 247 v[483,7] = 129 v[484,7] = 11 v[485,7] = 251 v[486,7] = 221 v[487,7] = 153 v[488,7] = 35 v[489,7] = 103 v[490,7] = 239 v[491,7] = 7 v[492,7] = 27 v[493,7] = 235 v[494,7] = 181 v[495,7] = 5 v[496,7] = 207 v[497,7] = 53 v[498,7] = 149 v[499,7] = 155 v[500,7] = 225 v[501,7] = 165 v[502,7] = 137 v[503,7] = 155 v[504,7] = 201 v[505,7] = 97 v[506,7] = 245 v[507,7] = 203 v[508,7] = 47 v[509,7] = 39 v[510,7] = 35 v[511,7] = 105 v[512,7] = 239 v[513,7] = 49 v[514,7] = 15 v[515,7] = 253 v[516,7] = 7 v[517,7] = 237 v[518,7] = 213 v[519,7] = 55 v[520,7] = 87 v[521,7] = 199 v[522,7] = 27 v[523,7] = 175 v[524,7] = 49 v[525,7] = 41 v[526,7] = 229 v[527,7] = 85 v[528,7] = 3 v[529,7] = 149 v[530,7] = 179 v[531,7] = 129 v[532,7] = 185 v[533,7] = 249 v[534,7] = 197 v[535,7] = 15 v[536,7] = 97 v[537,7] = 197 v[538,7] = 139 v[539,7] = 203 v[540,7] = 63 v[541,7] = 33 v[542,7] = 251 v[543,7] = 217 v[544,7] = 199 v[545,7] = 199 v[546,7] = 99 v[547,7] = 249 v[548,7] = 33 v[549,7] = 229 v[550,7] = 177 v[551,7] = 13 v[552,7] = 209 v[553,7] = 147 v[554,7] = 97 v[555,7] = 31 v[556,7] = 125 v[557,7] = 177 v[558,7] = 137 v[559,7] = 187 v[560,7] = 11 v[561,7] = 91 v[562,7] = 223 v[563,7] = 29 v[564,7] = 169 v[565,7] = 231 v[566,7] = 59 v[567,7] = 31 v[568,7] = 163 v[569,7] = 41 v[570,7] = 57 v[571,7] = 87 v[572,7] = 247 v[573,7] = 25 v[574,7] = 127 v[575,7] = 101 v[576,7] = 207 v[577,7] = 187 v[578,7] = 73 v[579,7] = 61 v[580,7] = 105 v[581,7] = 27 v[582,7] = 91 v[583,7] = 171 v[584,7] = 243 v[585,7] = 33 v[586,7] = 3 v[587,7] = 1 v[588,7] = 21 v[589,7] = 229 v[590,7] = 93 v[591,7] = 71 v[592,7] = 61 v[593,7] = 37 v[594,7] = 183 v[595,7] = 65 v[596,7] = 211 v[597,7] = 53 v[598,7] = 11 v[599,7] = 151 v[600,7] = 165 v[601,7] = 47 v[602,7] = 5 v[603,7] = 129 v[604,7] = 79 v[605,7] = 101 v[606,7] = 147 v[607,7] = 169 v[608,7] = 181 v[609,7] = 19 v[610,7] = 95 v[611,7] = 77 v[612,7] = 139 v[613,7] = 197 v[614,7] = 219 v[615,7] = 97 v[616,7] = 239 v[617,7] = 183 v[618,7] = 143 v[619,7] = 9 v[620,7] = 13 v[621,7] = 209 v[622,7] = 23 v[623,7] = 215 v[624,7] = 53 v[625,7] = 137 v[626,7] = 203 v[627,7] = 19 v[628,7] = 151 v[629,7] = 171 v[630,7] = 133 v[631,7] = 219 v[632,7] = 231 v[633,7] = 3 v[634,7] = 15 v[635,7] = 253 v[636,7] = 225 v[637,7] = 33 v[638,7] = 111 v[639,7] = 183 v[640,7] = 213 v[641,7] = 169 v[642,7] = 119 v[643,7] = 111 v[644,7] = 15 v[645,7] = 201 v[646,7] = 123 v[647,7] = 121 v[648,7] = 225 v[649,7] = 113 v[650,7] = 113 v[651,7] = 225 v[652,7] = 161 v[653,7] = 165 v[654,7] = 1 v[655,7] = 139 v[656,7] = 55 v[657,7] = 3 v[658,7] = 93 v[659,7] = 217 v[660,7] = 193 v[661,7] = 97 v[662,7] = 29 v[663,7] = 69 v[664,7] = 231 v[665,7] = 161 v[666,7] = 93 v[667,7] = 69 v[668,7] = 143 v[669,7] = 137 v[670,7] = 9 v[671,7] = 87 v[672,7] = 183 v[673,7] = 113 v[674,7] = 183 v[675,7] = 73 v[676,7] = 215 v[677,7] = 137 v[678,7] = 89 v[679,7] = 251 v[680,7] = 163 v[681,7] = 41 v[682,7] = 227 v[683,7] = 145 v[684,7] = 57 v[685,7] = 81 v[686,7] = 57 v[687,7] = 11 v[688,7] = 135 v[689,7] = 145 v[690,7] = 161 v[691,7] = 175 v[692,7] = 159 v[693,7] = 25 v[694,7] = 55 v[695,7] = 167 v[696,7] = 157 v[697,7] = 211 v[698,7] = 97 v[699,7] = 247 v[700,7] = 249 v[701,7] = 23 v[702,7] = 129 v[703,7] = 159 v[704,7] = 71 v[705,7] = 197 v[706,7] = 127 v[707,7] = 141 v[708,7] = 219 v[709,7] = 5 v[710,7] = 233 v[711,7] = 131 v[712,7] = 217 v[713,7] = 101 v[714,7] = 131 v[715,7] = 33 v[716,7] = 157 v[717,7] = 173 v[718,7] = 69 v[719,7] = 207 v[720,7] = 239 v[721,7] = 81 v[722,7] = 205 v[723,7] = 11 v[724,7] = 41 v[725,7] = 169 v[726,7] = 65 v[727,7] = 193 v[728,7] = 77 v[729,7] = 201 v[730,7] = 173 v[731,7] = 1 v[732,7] = 221 v[733,7] = 157 v[734,7] = 1 v[735,7] = 15 v[736,7] = 113 v[737,7] = 147 v[738,7] = 137 v[739,7] = 205 v[740,7] = 225 v[741,7] = 73 v[742,7] = 45 v[743,7] = 49 v[744,7] = 149 v[745,7] = 113 v[746,7] = 253 v[747,7] = 99 v[748,7] = 17 v[749,7] = 119 v[750,7] = 105 v[751,7] = 117 v[752,7] = 129 v[753,7] = 243 v[754,7] = 75 v[755,7] = 203 v[756,7] = 53 v[757,7] = 29 v[758,7] = 247 v[759,7] = 35 v[760,7] = 247 v[761,7] = 171 v[762,7] = 31 v[763,7] = 199 v[764,7] = 213 v[765,7] = 29 v[766,7] = 251 v[767,7] = 7 v[768,7] = 251 v[769,7] = 187 v[770,7] = 91 v[771,7] = 11 v[772,7] = 149 v[773,7] = 13 v[774,7] = 205 v[775,7] = 37 v[776,7] = 249 v[777,7] = 137 v[778,7] = 139 v[779,7] = 9 v[780,7] = 7 v[781,7] = 113 v[782,7] = 183 v[783,7] = 205 v[784,7] = 187 v[785,7] = 39 v[786,7] = 3 v[787,7] = 79 v[788,7] = 155 v[789,7] = 227 v[790,7] = 89 v[791,7] = 185 v[792,7] = 51 v[793,7] = 127 v[794,7] = 63 v[795,7] = 83 v[796,7] = 41 v[797,7] = 133 v[798,7] = 183 v[799,7] = 181 v[800,7] = 127 v[801,7] = 19 v[802,7] = 255 v[803,7] = 219 v[804,7] = 59 v[805,7] = 251 v[806,7] = 3 v[807,7] = 187 v[808,7] = 57 v[809,7] = 217 v[810,7] = 115 v[811,7] = 217 v[812,7] = 229 v[813,7] = 181 v[814,7] = 185 v[815,7] = 149 v[816,7] = 83 v[817,7] = 115 v[818,7] = 11 v[819,7] = 123 v[820,7] = 19 v[821,7] = 109 v[822,7] = 165 v[823,7] = 103 v[824,7] = 123 v[825,7] = 219 v[826,7] = 129 v[827,7] = 155 v[828,7] = 207 v[829,7] = 177 v[830,7] = 9 v[831,7] = 49 v[832,7] = 181 v[833,7] = 231 v[834,7] = 33 v[835,7] = 233 v[836,7] = 67 v[837,7] = 155 v[838,7] = 41 v[839,7] = 9 v[840,7] = 95 v[841,7] = 123 v[842,7] = 65 v[843,7] = 117 v[844,7] = 249 v[845,7] = 85 v[846,7] = 169 v[847,7] = 129 v[848,7] = 241 v[849,7] = 173 v[850,7] = 251 v[851,7] = 225 v[852,7] = 147 v[853,7] = 165 v[854,7] = 69 v[855,7] = 81 v[856,7] = 239 v[857,7] = 95 v[858,7] = 23 v[859,7] = 83 v[860,7] = 227 v[861,7] = 249 v[862,7] = 143 v[863,7] = 171 v[864,7] = 193 v[865,7] = 9 v[866,7] = 21 v[867,7] = 57 v[868,7] = 73 v[869,7] = 97 v[870,7] = 57 v[871,7] = 29 v[872,7] = 239 v[873,7] = 151 v[874,7] = 159 v[875,7] = 191 v[876,7] = 47 v[877,7] = 51 v[878,7] = 1 v[879,7] = 223 v[880,7] = 251 v[881,7] = 251 v[882,7] = 151 v[883,7] = 41 v[884,7] = 119 v[885,7] = 127 v[886,7] = 131 v[887,7] = 33 v[888,7] = 209 v[889,7] = 123 v[890,7] = 53 v[891,7] = 241 v[892,7] = 25 v[893,7] = 31 v[894,7] = 183 v[895,7] = 107 v[896,7] = 25 v[897,7] = 115 v[898,7] = 39 v[899,7] = 11 v[900,7] = 213 v[901,7] = 239 v[902,7] = 219 v[903,7] = 109 v[904,7] = 185 v[905,7] = 35 v[906,7] = 133 v[907,7] = 123 v[908,7] = 185 v[909,7] = 27 v[910,7] = 55 v[911,7] = 245 v[912,7] = 61 v[913,7] = 75 v[914,7] = 205 v[915,7] = 213 v[916,7] = 169 v[917,7] = 163 v[918,7] = 63 v[919,7] = 55 v[920,7] = 49 v[921,7] = 83 v[922,7] = 195 v[923,7] = 51 v[924,7] = 31 v[925,7] = 41 v[926,7] = 15 v[927,7] = 203 v[928,7] = 41 v[929,7] = 63 v[930,7] = 127 v[931,7] = 161 v[932,7] = 5 v[933,7] = 143 v[934,7] = 7 v[935,7] = 199 v[936,7] = 251 v[937,7] = 95 v[938,7] = 75 v[939,7] = 101 v[940,7] = 15 v[941,7] = 43 v[942,7] = 237 v[943,7] = 197 v[944,7] = 117 v[945,7] = 167 v[946,7] = 155 v[947,7] = 21 v[948,7] = 83 v[949,7] = 205 v[950,7] = 255 v[951,7] = 49 v[952,7] = 101 v[953,7] = 213 v[954,7] = 237 v[955,7] = 135 v[956,7] = 135 v[957,7] = 21 v[958,7] = 73 v[959,7] = 93 v[960,7] = 115 v[961,7] = 7 v[962,7] = 85 v[963,7] = 223 v[964,7] = 237 v[965,7] = 79 v[966,7] = 89 v[967,7] = 5 v[968,7] = 57 v[969,7] = 239 v[970,7] = 67 v[971,7] = 65 v[972,7] = 201 v[973,7] = 155 v[974,7] = 71 v[975,7] = 85 v[976,7] = 195 v[977,7] = 89 v[978,7] = 181 v[979,7] = 119 v[980,7] = 135 v[981,7] = 147 v[982,7] = 237 v[983,7] = 173 v[984,7] = 41 v[985,7] = 155 v[986,7] = 67 v[987,7] = 113 v[988,7] = 111 v[989,7] = 21 v[990,7] = 183 v[991,7] = 23 v[992,7] = 103 v[993,7] = 207 v[994,7] = 253 v[995,7] = 69 v[996,7] = 219 v[997,7] = 205 v[998,7] = 195 v[999,7] = 43 v[1000,7] = 197 v[1001,7] = 229 v[1002,7] = 139 v[1003,7] = 177 v[1004,7] = 129 v[1005,7] = 69 v[1006,7] = 97 v[1007,7] = 201 v[1008,7] = 163 v[1009,7] = 189 v[1010,7] = 11 v[1011,7] = 99 v[1012,7] = 91 v[1013,7] = 253 v[1014,7] = 239 v[1015,7] = 91 v[1016,7] = 145 v[1017,7] = 19 v[1018,7] = 179 v[1019,7] = 231 v[1020,7] = 121 v[1021,7] = 7 v[1022,7] = 225 v[1023,7] = 237 v[1024,7] = 125 v[1025,7] = 191 v[1026,7] = 119 v[1027,7] = 59 v[1028,7] = 175 v[1029,7] = 237 v[1030,7] = 131 v[1031,7] = 79 v[1032,7] = 43 v[1033,7] = 45 v[1034,7] = 205 v[1035,7] = 199 v[1036,7] = 251 v[1037,7] = 153 v[1038,7] = 207 v[1039,7] = 37 v[1040,7] = 179 v[1041,7] = 113 v[1042,7] = 255 v[1043,7] = 107 v[1044,7] = 217 v[1045,7] = 61 v[1046,7] = 7 v[1047,7] = 181 v[1048,7] = 247 v[1049,7] = 31 v[1050,7] = 13 v[1051,7] = 113 v[1052,7] = 145 v[1053,7] = 107 v[1054,7] = 233 v[1055,7] = 233 v[1056,7] = 43 v[1057,7] = 79 v[1058,7] = 23 v[1059,7] = 169 v[1060,7] = 137 v[1061,7] = 129 v[1062,7] = 183 v[1063,7] = 53 v[1064,7] = 91 v[1065,7] = 55 v[1066,7] = 103 v[1067,7] = 223 v[1068,7] = 87 v[1069,7] = 177 v[1070,7] = 157 v[1071,7] = 79 v[1072,7] = 213 v[1073,7] = 139 v[1074,7] = 183 v[1075,7] = 231 v[1076,7] = 205 v[1077,7] = 143 v[1078,7] = 129 v[1079,7] = 243 v[1080,7] = 205 v[1081,7] = 93 v[1082,7] = 59 v[1083,7] = 15 v[1084,7] = 89 v[1085,7] = 9 v[1086,7] = 11 v[1087,7] = 47 v[1088,7] = 133 v[1089,7] = 227 v[1090,7] = 75 v[1091,7] = 9 v[1092,7] = 91 v[1093,7] = 19 v[1094,7] = 171 v[1095,7] = 163 v[1096,7] = 79 v[1097,7] = 7 v[1098,7] = 103 v[1099,7] = 5 v[1100,7] = 119 v[1101,7] = 155 v[1102,7] = 75 v[1103,7] = 11 v[1104,7] = 71 v[1105,7] = 95 v[1106,7] = 17 v[1107,7] = 13 v[1108,7] = 243 v[1109,7] = 207 v[1110,7] = 187 v[53,8] = 235 v[54,8] = 307 v[55,8] = 495 v[56,8] = 417 v[57,8] = 57 v[58,8] = 151 v[59,8] = 19 v[60,8] = 119 v[61,8] = 375 v[62,8] = 451 v[63,8] = 55 v[64,8] = 449 v[65,8] = 501 v[66,8] = 53 v[67,8] = 185 v[68,8] = 317 v[69,8] = 17 v[70,8] = 21 v[71,8] = 487 v[72,8] = 13 v[73,8] = 347 v[74,8] = 393 v[75,8] = 15 v[76,8] = 391 v[77,8] = 307 v[78,8] = 189 v[79,8] = 381 v[80,8] = 71 v[81,8] = 163 v[82,8] = 99 v[83,8] = 467 v[84,8] = 167 v[85,8] = 433 v[86,8] = 337 v[87,8] = 257 v[88,8] = 179 v[89,8] = 47 v[90,8] = 385 v[91,8] = 23 v[92,8] = 117 v[93,8] = 369 v[94,8] = 425 v[95,8] = 207 v[96,8] = 433 v[97,8] = 301 v[98,8] = 147 v[99,8] = 333 v[100,8] = 85 v[101,8] = 221 v[102,8] = 423 v[103,8] = 49 v[104,8] = 3 v[105,8] = 43 v[106,8] = 229 v[107,8] = 227 v[108,8] = 201 v[109,8] = 383 v[110,8] = 281 v[111,8] = 229 v[112,8] = 207 v[113,8] = 21 v[114,8] = 343 v[115,8] = 251 v[116,8] = 397 v[117,8] = 173 v[118,8] = 507 v[119,8] = 421 v[120,8] = 443 v[121,8] = 399 v[122,8] = 53 v[123,8] = 345 v[124,8] = 77 v[125,8] = 385 v[126,8] = 317 v[127,8] = 155 v[128,8] = 187 v[129,8] = 269 v[130,8] = 501 v[131,8] = 19 v[132,8] = 169 v[133,8] = 235 v[134,8] = 415 v[135,8] = 61 v[136,8] = 247 v[137,8] = 183 v[138,8] = 5 v[139,8] = 257 v[140,8] = 401 v[141,8] = 451 v[142,8] = 95 v[143,8] = 455 v[144,8] = 49 v[145,8] = 489 v[146,8] = 75 v[147,8] = 459 v[148,8] = 377 v[149,8] = 87 v[150,8] = 463 v[151,8] = 155 v[152,8] = 233 v[153,8] = 115 v[154,8] = 429 v[155,8] = 211 v[156,8] = 419 v[157,8] = 143 v[158,8] = 487 v[159,8] = 195 v[160,8] = 209 v[161,8] = 461 v[162,8] = 193 v[163,8] = 157 v[164,8] = 193 v[165,8] = 363 v[166,8] = 181 v[167,8] = 271 v[168,8] = 445 v[169,8] = 381 v[170,8] = 231 v[171,8] = 135 v[172,8] = 327 v[173,8] = 403 v[174,8] = 171 v[175,8] = 197 v[176,8] = 181 v[177,8] = 343 v[178,8] = 113 v[179,8] = 313 v[180,8] = 393 v[181,8] = 311 v[182,8] = 415 v[183,8] = 267 v[184,8] = 247 v[185,8] = 425 v[186,8] = 233 v[187,8] = 289 v[188,8] = 55 v[189,8] = 39 v[190,8] = 247 v[191,8] = 327 v[192,8] = 141 v[193,8] = 5 v[194,8] = 189 v[195,8] = 183 v[196,8] = 27 v[197,8] = 337 v[198,8] = 341 v[199,8] = 327 v[200,8] = 87 v[201,8] = 429 v[202,8] = 357 v[203,8] = 265 v[204,8] = 251 v[205,8] = 437 v[206,8] = 201 v[207,8] = 29 v[208,8] = 339 v[209,8] = 257 v[210,8] = 377 v[211,8] = 17 v[212,8] = 53 v[213,8] = 327 v[214,8] = 47 v[215,8] = 375 v[216,8] = 393 v[217,8] = 369 v[218,8] = 403 v[219,8] = 125 v[220,8] = 429 v[221,8] = 257 v[222,8] = 157 v[223,8] = 217 v[224,8] = 85 v[225,8] = 267 v[226,8] = 117 v[227,8] = 337 v[228,8] = 447 v[229,8] = 219 v[230,8] = 501 v[231,8] = 41 v[232,8] = 41 v[233,8] = 193 v[234,8] = 509 v[235,8] = 131 v[236,8] = 207 v[237,8] = 505 v[238,8] = 421 v[239,8] = 149 v[240,8] = 111 v[241,8] = 177 v[242,8] = 167 v[243,8] = 223 v[244,8] = 291 v[245,8] = 91 v[246,8] = 29 v[247,8] = 305 v[248,8] = 151 v[249,8] = 177 v[250,8] = 337 v[251,8] = 183 v[252,8] = 361 v[253,8] = 435 v[254,8] = 307 v[255,8] = 507 v[256,8] = 77 v[257,8] = 181 v[258,8] = 507 v[259,8] = 315 v[260,8] = 145 v[261,8] = 423 v[262,8] = 71 v[263,8] = 103 v[264,8] = 493 v[265,8] = 271 v[266,8] = 469 v[267,8] = 339 v[268,8] = 237 v[269,8] = 437 v[270,8] = 483 v[271,8] = 31 v[272,8] = 219 v[273,8] = 61 v[274,8] = 131 v[275,8] = 391 v[276,8] = 233 v[277,8] = 219 v[278,8] = 69 v[279,8] = 57 v[280,8] = 459 v[281,8] = 225 v[282,8] = 421 v[283,8] = 7 v[284,8] = 461 v[285,8] = 111 v[286,8] = 451 v[287,8] = 277 v[288,8] = 185 v[289,8] = 193 v[290,8] = 125 v[291,8] = 251 v[292,8] = 199 v[293,8] = 73 v[294,8] = 71 v[295,8] = 7 v[296,8] = 409 v[297,8] = 417 v[298,8] = 149 v[299,8] = 193 v[300,8] = 53 v[301,8] = 437 v[302,8] = 29 v[303,8] = 467 v[304,8] = 229 v[305,8] = 31 v[306,8] = 35 v[307,8] = 75 v[308,8] = 105 v[309,8] = 503 v[310,8] = 75 v[311,8] = 317 v[312,8] = 401 v[313,8] = 367 v[314,8] = 131 v[315,8] = 365 v[316,8] = 441 v[317,8] = 433 v[318,8] = 93 v[319,8] = 377 v[320,8] = 405 v[321,8] = 465 v[322,8] = 259 v[323,8] = 283 v[324,8] = 443 v[325,8] = 143 v[326,8] = 445 v[327,8] = 3 v[328,8] = 461 v[329,8] = 329 v[330,8] = 309 v[331,8] = 77 v[332,8] = 323 v[333,8] = 155 v[334,8] = 347 v[335,8] = 45 v[336,8] = 381 v[337,8] = 315 v[338,8] = 463 v[339,8] = 207 v[340,8] = 321 v[341,8] = 157 v[342,8] = 109 v[343,8] = 479 v[344,8] = 313 v[345,8] = 345 v[346,8] = 167 v[347,8] = 439 v[348,8] = 307 v[349,8] = 235 v[350,8] = 473 v[351,8] = 79 v[352,8] = 101 v[353,8] = 245 v[354,8] = 19 v[355,8] = 381 v[356,8] = 251 v[357,8] = 35 v[358,8] = 25 v[359,8] = 107 v[360,8] = 187 v[361,8] = 115 v[362,8] = 113 v[363,8] = 321 v[364,8] = 115 v[365,8] = 445 v[366,8] = 61 v[367,8] = 77 v[368,8] = 293 v[369,8] = 405 v[370,8] = 13 v[371,8] = 53 v[372,8] = 17 v[373,8] = 171 v[374,8] = 299 v[375,8] = 41 v[376,8] = 79 v[377,8] = 3 v[378,8] = 485 v[379,8] = 331 v[380,8] = 13 v[381,8] = 257 v[382,8] = 59 v[383,8] = 201 v[384,8] = 497 v[385,8] = 81 v[386,8] = 451 v[387,8] = 199 v[388,8] = 171 v[389,8] = 81 v[390,8] = 253 v[391,8] = 365 v[392,8] = 75 v[393,8] = 451 v[394,8] = 149 v[395,8] = 483 v[396,8] = 81 v[397,8] = 453 v[398,8] = 469 v[399,8] = 485 v[400,8] = 305 v[401,8] = 163 v[402,8] = 401 v[403,8] = 15 v[404,8] = 91 v[405,8] = 3 v[406,8] = 129 v[407,8] = 35 v[408,8] = 239 v[409,8] = 355 v[410,8] = 211 v[411,8] = 387 v[412,8] = 101 v[413,8] = 299 v[414,8] = 67 v[415,8] = 375 v[416,8] = 405 v[417,8] = 357 v[418,8] = 267 v[419,8] = 363 v[420,8] = 79 v[421,8] = 83 v[422,8] = 437 v[423,8] = 457 v[424,8] = 39 v[425,8] = 97 v[426,8] = 473 v[427,8] = 289 v[428,8] = 179 v[429,8] = 57 v[430,8] = 23 v[431,8] = 49 v[432,8] = 79 v[433,8] = 71 v[434,8] = 341 v[435,8] = 287 v[436,8] = 95 v[437,8] = 229 v[438,8] = 271 v[439,8] = 475 v[440,8] = 49 v[441,8] = 241 v[442,8] = 261 v[443,8] = 495 v[444,8] = 353 v[445,8] = 381 v[446,8] = 13 v[447,8] = 291 v[448,8] = 37 v[449,8] = 251 v[450,8] = 105 v[451,8] = 399 v[452,8] = 81 v[453,8] = 89 v[454,8] = 265 v[455,8] = 507 v[456,8] = 205 v[457,8] = 145 v[458,8] = 331 v[459,8] = 129 v[460,8] = 119 v[461,8] = 503 v[462,8] = 249 v[463,8] = 1 v[464,8] = 289 v[465,8] = 463 v[466,8] = 163 v[467,8] = 443 v[468,8] = 63 v[469,8] = 123 v[470,8] = 361 v[471,8] = 261 v[472,8] = 49 v[473,8] = 429 v[474,8] = 137 v[475,8] = 355 v[476,8] = 175 v[477,8] = 507 v[478,8] = 59 v[479,8] = 277 v[480,8] = 391 v[481,8] = 25 v[482,8] = 185 v[483,8] = 381 v[484,8] = 197 v[485,8] = 39 v[486,8] = 5 v[487,8] = 429 v[488,8] = 119 v[489,8] = 247 v[490,8] = 177 v[491,8] = 329 v[492,8] = 465 v[493,8] = 421 v[494,8] = 271 v[495,8] = 467 v[496,8] = 151 v[497,8] = 45 v[498,8] = 429 v[499,8] = 137 v[500,8] = 471 v[501,8] = 11 v[502,8] = 17 v[503,8] = 409 v[504,8] = 347 v[505,8] = 199 v[506,8] = 463 v[507,8] = 177 v[508,8] = 11 v[509,8] = 51 v[510,8] = 361 v[511,8] = 95 v[512,8] = 497 v[513,8] = 163 v[514,8] = 351 v[515,8] = 127 v[516,8] = 395 v[517,8] = 511 v[518,8] = 327 v[519,8] = 353 v[520,8] = 49 v[521,8] = 105 v[522,8] = 151 v[523,8] = 321 v[524,8] = 331 v[525,8] = 329 v[526,8] = 509 v[527,8] = 107 v[528,8] = 109 v[529,8] = 303 v[530,8] = 467 v[531,8] = 287 v[532,8] = 161 v[533,8] = 45 v[534,8] = 385 v[535,8] = 289 v[536,8] = 363 v[537,8] = 331 v[538,8] = 265 v[539,8] = 407 v[540,8] = 37 v[541,8] = 433 v[542,8] = 315 v[543,8] = 343 v[544,8] = 63 v[545,8] = 51 v[546,8] = 185 v[547,8] = 71 v[548,8] = 27 v[549,8] = 267 v[550,8] = 503 v[551,8] = 239 v[552,8] = 293 v[553,8] = 245 v[554,8] = 281 v[555,8] = 297 v[556,8] = 75 v[557,8] = 461 v[558,8] = 371 v[559,8] = 129 v[560,8] = 189 v[561,8] = 189 v[562,8] = 339 v[563,8] = 287 v[564,8] = 111 v[565,8] = 111 v[566,8] = 379 v[567,8] = 93 v[568,8] = 27 v[569,8] = 185 v[570,8] = 347 v[571,8] = 337 v[572,8] = 247 v[573,8] = 507 v[574,8] = 161 v[575,8] = 231 v[576,8] = 43 v[577,8] = 499 v[578,8] = 73 v[579,8] = 327 v[580,8] = 263 v[581,8] = 331 v[582,8] = 249 v[583,8] = 493 v[584,8] = 37 v[585,8] = 25 v[586,8] = 115 v[587,8] = 3 v[588,8] = 167 v[589,8] = 197 v[590,8] = 127 v[591,8] = 357 v[592,8] = 497 v[593,8] = 103 v[594,8] = 125 v[595,8] = 191 v[596,8] = 165 v[597,8] = 55 v[598,8] = 101 v[599,8] = 95 v[600,8] = 79 v[601,8] = 351 v[602,8] = 341 v[603,8] = 43 v[604,8] = 125 v[605,8] = 135 v[606,8] = 173 v[607,8] = 289 v[608,8] = 373 v[609,8] = 133 v[610,8] = 421 v[611,8] = 241 v[612,8] = 281 v[613,8] = 213 v[614,8] = 177 v[615,8] = 363 v[616,8] = 151 v[617,8] = 227 v[618,8] = 145 v[619,8] = 363 v[620,8] = 239 v[621,8] = 431 v[622,8] = 81 v[623,8] = 397 v[624,8] = 241 v[625,8] = 67 v[626,8] = 291 v[627,8] = 255 v[628,8] = 405 v[629,8] = 421 v[630,8] = 399 v[631,8] = 75 v[632,8] = 399 v[633,8] = 105 v[634,8] = 329 v[635,8] = 41 v[636,8] = 425 v[637,8] = 7 v[638,8] = 283 v[639,8] = 375 v[640,8] = 475 v[641,8] = 427 v[642,8] = 277 v[643,8] = 209 v[644,8] = 411 v[645,8] = 3 v[646,8] = 137 v[647,8] = 195 v[648,8] = 289 v[649,8] = 509 v[650,8] = 121 v[651,8] = 55 v[652,8] = 147 v[653,8] = 275 v[654,8] = 251 v[655,8] = 19 v[656,8] = 129 v[657,8] = 285 v[658,8] = 415 v[659,8] = 487 v[660,8] = 491 v[661,8] = 193 v[662,8] = 219 v[663,8] = 403 v[664,8] = 23 v[665,8] = 97 v[666,8] = 65 v[667,8] = 285 v[668,8] = 75 v[669,8] = 21 v[670,8] = 373 v[671,8] = 261 v[672,8] = 339 v[673,8] = 239 v[674,8] = 495 v[675,8] = 415 v[676,8] = 333 v[677,8] = 107 v[678,8] = 435 v[679,8] = 297 v[680,8] = 213 v[681,8] = 149 v[682,8] = 463 v[683,8] = 199 v[684,8] = 323 v[685,8] = 45 v[686,8] = 19 v[687,8] = 301 v[688,8] = 121 v[689,8] = 499 v[690,8] = 187 v[691,8] = 229 v[692,8] = 63 v[693,8] = 425 v[694,8] = 99 v[695,8] = 281 v[696,8] = 35 v[697,8] = 125 v[698,8] = 349 v[699,8] = 87 v[700,8] = 101 v[701,8] = 59 v[702,8] = 195 v[703,8] = 511 v[704,8] = 355 v[705,8] = 73 v[706,8] = 263 v[707,8] = 243 v[708,8] = 101 v[709,8] = 165 v[710,8] = 141 v[711,8] = 11 v[712,8] = 389 v[713,8] = 219 v[714,8] = 187 v[715,8] = 449 v[716,8] = 447 v[717,8] = 393 v[718,8] = 477 v[719,8] = 305 v[720,8] = 221 v[721,8] = 51 v[722,8] = 355 v[723,8] = 209 v[724,8] = 499 v[725,8] = 479 v[726,8] = 265 v[727,8] = 377 v[728,8] = 145 v[729,8] = 411 v[730,8] = 173 v[731,8] = 11 v[732,8] = 433 v[733,8] = 483 v[734,8] = 135 v[735,8] = 385 v[736,8] = 341 v[737,8] = 89 v[738,8] = 209 v[739,8] = 391 v[740,8] = 33 v[741,8] = 395 v[742,8] = 319 v[743,8] = 451 v[744,8] = 119 v[745,8] = 341 v[746,8] = 227 v[747,8] = 375 v[748,8] = 61 v[749,8] = 331 v[750,8] = 493 v[751,8] = 411 v[752,8] = 293 v[753,8] = 47 v[754,8] = 203 v[755,8] = 375 v[756,8] = 167 v[757,8] = 395 v[758,8] = 155 v[759,8] = 5 v[760,8] = 237 v[761,8] = 361 v[762,8] = 489 v[763,8] = 127 v[764,8] = 21 v[765,8] = 345 v[766,8] = 101 v[767,8] = 371 v[768,8] = 233 v[769,8] = 431 v[770,8] = 109 v[771,8] = 119 v[772,8] = 277 v[773,8] = 125 v[774,8] = 263 v[775,8] = 73 v[776,8] = 135 v[777,8] = 123 v[778,8] = 83 v[779,8] = 123 v[780,8] = 405 v[781,8] = 69 v[782,8] = 75 v[783,8] = 287 v[784,8] = 401 v[785,8] = 23 v[786,8] = 283 v[787,8] = 393 v[788,8] = 41 v[789,8] = 379 v[790,8] = 431 v[791,8] = 11 v[792,8] = 475 v[793,8] = 505 v[794,8] = 19 v[795,8] = 365 v[796,8] = 265 v[797,8] = 271 v[798,8] = 499 v[799,8] = 489 v[800,8] = 443 v[801,8] = 165 v[802,8] = 91 v[803,8] = 83 v[804,8] = 291 v[805,8] = 319 v[806,8] = 199 v[807,8] = 107 v[808,8] = 245 v[809,8] = 389 v[810,8] = 143 v[811,8] = 137 v[812,8] = 89 v[813,8] = 125 v[814,8] = 281 v[815,8] = 381 v[816,8] = 215 v[817,8] = 131 v[818,8] = 299 v[819,8] = 249 v[820,8] = 375 v[821,8] = 455 v[822,8] = 43 v[823,8] = 73 v[824,8] = 281 v[825,8] = 217 v[826,8] = 297 v[827,8] = 229 v[828,8] = 431 v[829,8] = 357 v[830,8] = 81 v[831,8] = 357 v[832,8] = 171 v[833,8] = 451 v[834,8] = 481 v[835,8] = 13 v[836,8] = 387 v[837,8] = 491 v[838,8] = 489 v[839,8] = 439 v[840,8] = 385 v[841,8] = 487 v[842,8] = 177 v[843,8] = 393 v[844,8] = 33 v[845,8] = 71 v[846,8] = 375 v[847,8] = 443 v[848,8] = 129 v[849,8] = 407 v[850,8] = 395 v[851,8] = 127 v[852,8] = 65 v[853,8] = 333 v[854,8] = 309 v[855,8] = 119 v[856,8] = 197 v[857,8] = 435 v[858,8] = 497 v[859,8] = 373 v[860,8] = 71 v[861,8] = 379 v[862,8] = 509 v[863,8] = 387 v[864,8] = 159 v[865,8] = 265 v[866,8] = 477 v[867,8] = 463 v[868,8] = 449 v[869,8] = 47 v[870,8] = 353 v[871,8] = 249 v[872,8] = 335 v[873,8] = 505 v[874,8] = 89 v[875,8] = 141 v[876,8] = 55 v[877,8] = 235 v[878,8] = 187 v[879,8] = 87 v[880,8] = 363 v[881,8] = 93 v[882,8] = 363 v[883,8] = 101 v[884,8] = 67 v[885,8] = 215 v[886,8] = 321 v[887,8] = 331 v[888,8] = 305 v[889,8] = 261 v[890,8] = 411 v[891,8] = 491 v[892,8] = 479 v[893,8] = 65 v[894,8] = 307 v[895,8] = 469 v[896,8] = 415 v[897,8] = 131 v[898,8] = 315 v[899,8] = 487 v[900,8] = 83 v[901,8] = 455 v[902,8] = 19 v[903,8] = 113 v[904,8] = 163 v[905,8] = 503 v[906,8] = 99 v[907,8] = 499 v[908,8] = 251 v[909,8] = 239 v[910,8] = 81 v[911,8] = 167 v[912,8] = 391 v[913,8] = 255 v[914,8] = 317 v[915,8] = 363 v[916,8] = 359 v[917,8] = 395 v[918,8] = 419 v[919,8] = 307 v[920,8] = 251 v[921,8] = 267 v[922,8] = 171 v[923,8] = 461 v[924,8] = 183 v[925,8] = 465 v[926,8] = 165 v[927,8] = 163 v[928,8] = 293 v[929,8] = 477 v[930,8] = 223 v[931,8] = 403 v[932,8] = 389 v[933,8] = 97 v[934,8] = 335 v[935,8] = 357 v[936,8] = 297 v[937,8] = 19 v[938,8] = 469 v[939,8] = 501 v[940,8] = 249 v[941,8] = 85 v[942,8] = 213 v[943,8] = 311 v[944,8] = 265 v[945,8] = 379 v[946,8] = 297 v[947,8] = 283 v[948,8] = 393 v[949,8] = 449 v[950,8] = 463 v[951,8] = 289 v[952,8] = 159 v[953,8] = 289 v[954,8] = 499 v[955,8] = 407 v[956,8] = 129 v[957,8] = 137 v[958,8] = 221 v[959,8] = 43 v[960,8] = 89 v[961,8] = 403 v[962,8] = 271 v[963,8] = 75 v[964,8] = 83 v[965,8] = 445 v[966,8] = 453 v[967,8] = 389 v[968,8] = 149 v[969,8] = 143 v[970,8] = 423 v[971,8] = 499 v[972,8] = 317 v[973,8] = 445 v[974,8] = 157 v[975,8] = 137 v[976,8] = 453 v[977,8] = 163 v[978,8] = 87 v[979,8] = 23 v[980,8] = 391 v[981,8] = 119 v[982,8] = 427 v[983,8] = 323 v[984,8] = 173 v[985,8] = 89 v[986,8] = 259 v[987,8] = 377 v[988,8] = 511 v[989,8] = 249 v[990,8] = 31 v[991,8] = 363 v[992,8] = 229 v[993,8] = 353 v[994,8] = 329 v[995,8] = 493 v[996,8] = 427 v[997,8] = 57 v[998,8] = 205 v[999,8] = 389 v[1000,8] = 91 v[1001,8] = 83 v[1002,8] = 13 v[1003,8] = 219 v[1004,8] = 439 v[1005,8] = 45 v[1006,8] = 35 v[1007,8] = 371 v[1008,8] = 441 v[1009,8] = 17 v[1010,8] = 267 v[1011,8] = 501 v[1012,8] = 53 v[1013,8] = 25 v[1014,8] = 333 v[1015,8] = 17 v[1016,8] = 201 v[1017,8] = 475 v[1018,8] = 257 v[1019,8] = 417 v[1020,8] = 345 v[1021,8] = 381 v[1022,8] = 377 v[1023,8] = 55 v[1024,8] = 403 v[1025,8] = 77 v[1026,8] = 389 v[1027,8] = 347 v[1028,8] = 363 v[1029,8] = 211 v[1030,8] = 413 v[1031,8] = 419 v[1032,8] = 5 v[1033,8] = 167 v[1034,8] = 219 v[1035,8] = 201 v[1036,8] = 285 v[1037,8] = 425 v[1038,8] = 11 v[1039,8] = 77 v[1040,8] = 269 v[1041,8] = 489 v[1042,8] = 281 v[1043,8] = 403 v[1044,8] = 79 v[1045,8] = 425 v[1046,8] = 125 v[1047,8] = 81 v[1048,8] = 331 v[1049,8] = 437 v[1050,8] = 271 v[1051,8] = 397 v[1052,8] = 299 v[1053,8] = 475 v[1054,8] = 271 v[1055,8] = 249 v[1056,8] = 413 v[1057,8] = 233 v[1058,8] = 261 v[1059,8] = 495 v[1060,8] = 171 v[1061,8] = 69 v[1062,8] = 27 v[1063,8] = 409 v[1064,8] = 21 v[1065,8] = 421 v[1066,8] = 367 v[1067,8] = 81 v[1068,8] = 483 v[1069,8] = 255 v[1070,8] = 15 v[1071,8] = 219 v[1072,8] = 365 v[1073,8] = 497 v[1074,8] = 181 v[1075,8] = 75 v[1076,8] = 431 v[1077,8] = 99 v[1078,8] = 325 v[1079,8] = 407 v[1080,8] = 229 v[1081,8] = 281 v[1082,8] = 63 v[1083,8] = 83 v[1084,8] = 493 v[1085,8] = 5 v[1086,8] = 113 v[1087,8] = 15 v[1088,8] = 271 v[1089,8] = 37 v[1090,8] = 87 v[1091,8] = 451 v[1092,8] = 299 v[1093,8] = 83 v[1094,8] = 451 v[1095,8] = 311 v[1096,8] = 441 v[1097,8] = 47 v[1098,8] = 455 v[1099,8] = 47 v[1100,8] = 253 v[1101,8] = 13 v[1102,8] = 109 v[1103,8] = 369 v[1104,8] = 347 v[1105,8] = 11 v[1106,8] = 409 v[1107,8] = 275 v[1108,8] = 63 v[1109,8] = 441 v[1110,8] = 15 v[101,9] = 519 v[102,9] = 307 v[103,9] = 931 v[104,9] = 1023 v[105,9] = 517 v[106,9] = 771 v[107,9] = 151 v[108,9] = 1023 v[109,9] = 539 v[110,9] = 725 v[111,9] = 45 v[112,9] = 927 v[113,9] = 707 v[114,9] = 29 v[115,9] = 125 v[116,9] = 371 v[117,9] = 275 v[118,9] = 279 v[119,9] = 817 v[120,9] = 389 v[121,9] = 453 v[122,9] = 989 v[123,9] = 1015 v[124,9] = 29 v[125,9] = 169 v[126,9] = 743 v[127,9] = 99 v[128,9] = 923 v[129,9] = 981 v[130,9] = 181 v[131,9] = 693 v[132,9] = 309 v[133,9] = 227 v[134,9] = 111 v[135,9] = 219 v[136,9] = 897 v[137,9] = 377 v[138,9] = 425 v[139,9] = 609 v[140,9] = 227 v[141,9] = 19 v[142,9] = 221 v[143,9] = 143 v[144,9] = 581 v[145,9] = 147 v[146,9] = 919 v[147,9] = 127 v[148,9] = 725 v[149,9] = 793 v[150,9] = 289 v[151,9] = 411 v[152,9] = 835 v[153,9] = 921 v[154,9] = 957 v[155,9] = 443 v[156,9] = 349 v[157,9] = 813 v[158,9] = 5 v[159,9] = 105 v[160,9] = 457 v[161,9] = 393 v[162,9] = 539 v[163,9] = 101 v[164,9] = 197 v[165,9] = 697 v[166,9] = 27 v[167,9] = 343 v[168,9] = 515 v[169,9] = 69 v[170,9] = 485 v[171,9] = 383 v[172,9] = 855 v[173,9] = 693 v[174,9] = 133 v[175,9] = 87 v[176,9] = 743 v[177,9] = 747 v[178,9] = 475 v[179,9] = 87 v[180,9] = 469 v[181,9] = 763 v[182,9] = 721 v[183,9] = 345 v[184,9] = 479 v[185,9] = 965 v[186,9] = 527 v[187,9] = 121 v[188,9] = 271 v[189,9] = 353 v[190,9] = 467 v[191,9] = 177 v[192,9] = 245 v[193,9] = 627 v[194,9] = 113 v[195,9] = 357 v[196,9] = 7 v[197,9] = 691 v[198,9] = 725 v[199,9] = 355 v[200,9] = 889 v[201,9] = 635 v[202,9] = 737 v[203,9] = 429 v[204,9] = 545 v[205,9] = 925 v[206,9] = 357 v[207,9] = 873 v[208,9] = 187 v[209,9] = 351 v[210,9] = 677 v[211,9] = 999 v[212,9] = 921 v[213,9] = 477 v[214,9] = 233 v[215,9] = 765 v[216,9] = 495 v[217,9] = 81 v[218,9] = 953 v[219,9] = 479 v[220,9] = 89 v[221,9] = 173 v[222,9] = 473 v[223,9] = 131 v[224,9] = 961 v[225,9] = 411 v[226,9] = 291 v[227,9] = 967 v[228,9] = 65 v[229,9] = 511 v[230,9] = 13 v[231,9] = 805 v[232,9] = 945 v[233,9] = 369 v[234,9] = 827 v[235,9] = 295 v[236,9] = 163 v[237,9] = 835 v[238,9] = 259 v[239,9] = 207 v[240,9] = 331 v[241,9] = 29 v[242,9] = 315 v[243,9] = 999 v[244,9] = 133 v[245,9] = 967 v[246,9] = 41 v[247,9] = 117 v[248,9] = 677 v[249,9] = 471 v[250,9] = 717 v[251,9] = 881 v[252,9] = 755 v[253,9] = 351 v[254,9] = 723 v[255,9] = 259 v[256,9] = 879 v[257,9] = 455 v[258,9] = 721 v[259,9] = 289 v[260,9] = 149 v[261,9] = 199 v[262,9] = 805 v[263,9] = 987 v[264,9] = 851 v[265,9] = 423 v[266,9] = 597 v[267,9] = 129 v[268,9] = 11 v[269,9] = 733 v[270,9] = 549 v[271,9] = 153 v[272,9] = 285 v[273,9] = 451 v[274,9] = 559 v[275,9] = 377 v[276,9] = 109 v[277,9] = 357 v[278,9] = 143 v[279,9] = 693 v[280,9] = 615 v[281,9] = 677 v[282,9] = 701 v[283,9] = 475 v[284,9] = 767 v[285,9] = 85 v[286,9] = 229 v[287,9] = 509 v[288,9] = 547 v[289,9] = 151 v[290,9] = 389 v[291,9] = 711 v[292,9] = 785 v[293,9] = 657 v[294,9] = 319 v[295,9] = 509 v[296,9] = 99 v[297,9] = 1007 v[298,9] = 775 v[299,9] = 359 v[300,9] = 697 v[301,9] = 677 v[302,9] = 85 v[303,9] = 497 v[304,9] = 105 v[305,9] = 615 v[306,9] = 891 v[307,9] = 71 v[308,9] = 449 v[309,9] = 835 v[310,9] = 609 v[311,9] = 377 v[312,9] = 693 v[313,9] = 665 v[314,9] = 627 v[315,9] = 215 v[316,9] = 911 v[317,9] = 503 v[318,9] = 729 v[319,9] = 131 v[320,9] = 19 v[321,9] = 895 v[322,9] = 199 v[323,9] = 161 v[324,9] = 239 v[325,9] = 633 v[326,9] = 1013 v[327,9] = 537 v[328,9] = 255 v[329,9] = 23 v[330,9] = 149 v[331,9] = 679 v[332,9] = 1021 v[333,9] = 595 v[334,9] = 199 v[335,9] = 557 v[336,9] = 659 v[337,9] = 251 v[338,9] = 829 v[339,9] = 727 v[340,9] = 439 v[341,9] = 495 v[342,9] = 647 v[343,9] = 223 v[344,9] = 949 v[345,9] = 625 v[346,9] = 87 v[347,9] = 481 v[348,9] = 85 v[349,9] = 799 v[350,9] = 917 v[351,9] = 769 v[352,9] = 949 v[353,9] = 739 v[354,9] = 115 v[355,9] = 499 v[356,9] = 945 v[357,9] = 547 v[358,9] = 225 v[359,9] = 1015 v[360,9] = 469 v[361,9] = 737 v[362,9] = 495 v[363,9] = 353 v[364,9] = 103 v[365,9] = 17 v[366,9] = 665 v[367,9] = 639 v[368,9] = 525 v[369,9] = 75 v[370,9] = 447 v[371,9] = 185 v[372,9] = 43 v[373,9] = 729 v[374,9] = 577 v[375,9] = 863 v[376,9] = 735 v[377,9] = 317 v[378,9] = 99 v[379,9] = 17 v[380,9] = 477 v[381,9] = 893 v[382,9] = 537 v[383,9] = 519 v[384,9] = 1017 v[385,9] = 375 v[386,9] = 297 v[387,9] = 325 v[388,9] = 999 v[389,9] = 353 v[390,9] = 343 v[391,9] = 729 v[392,9] = 135 v[393,9] = 489 v[394,9] = 859 v[395,9] = 267 v[396,9] = 141 v[397,9] = 831 v[398,9] = 141 v[399,9] = 893 v[400,9] = 249 v[401,9] = 807 v[402,9] = 53 v[403,9] = 613 v[404,9] = 131 v[405,9] = 547 v[406,9] = 977 v[407,9] = 131 v[408,9] = 999 v[409,9] = 175 v[410,9] = 31 v[411,9] = 341 v[412,9] = 739 v[413,9] = 467 v[414,9] = 675 v[415,9] = 241 v[416,9] = 645 v[417,9] = 247 v[418,9] = 391 v[419,9] = 583 v[420,9] = 183 v[421,9] = 973 v[422,9] = 433 v[423,9] = 367 v[424,9] = 131 v[425,9] = 467 v[426,9] = 571 v[427,9] = 309 v[428,9] = 385 v[429,9] = 977 v[430,9] = 111 v[431,9] = 917 v[432,9] = 935 v[433,9] = 473 v[434,9] = 345 v[435,9] = 411 v[436,9] = 313 v[437,9] = 97 v[438,9] = 149 v[439,9] = 959 v[440,9] = 841 v[441,9] = 839 v[442,9] = 669 v[443,9] = 431 v[444,9] = 51 v[445,9] = 41 v[446,9] = 301 v[447,9] = 247 v[448,9] = 1015 v[449,9] = 377 v[450,9] = 329 v[451,9] = 945 v[452,9] = 269 v[453,9] = 67 v[454,9] = 979 v[455,9] = 581 v[456,9] = 643 v[457,9] = 823 v[458,9] = 557 v[459,9] = 91 v[460,9] = 405 v[461,9] = 117 v[462,9] = 801 v[463,9] = 509 v[464,9] = 347 v[465,9] = 893 v[466,9] = 303 v[467,9] = 227 v[468,9] = 783 v[469,9] = 555 v[470,9] = 867 v[471,9] = 99 v[472,9] = 703 v[473,9] = 111 v[474,9] = 797 v[475,9] = 873 v[476,9] = 541 v[477,9] = 919 v[478,9] = 513 v[479,9] = 343 v[480,9] = 319 v[481,9] = 517 v[482,9] = 135 v[483,9] = 871 v[484,9] = 917 v[485,9] = 285 v[486,9] = 663 v[487,9] = 301 v[488,9] = 15 v[489,9] = 763 v[490,9] = 89 v[491,9] = 323 v[492,9] = 757 v[493,9] = 317 v[494,9] = 807 v[495,9] = 309 v[496,9] = 1013 v[497,9] = 345 v[498,9] = 499 v[499,9] = 279 v[500,9] = 711 v[501,9] = 915 v[502,9] = 411 v[503,9] = 281 v[504,9] = 193 v[505,9] = 739 v[506,9] = 365 v[507,9] = 315 v[508,9] = 375 v[509,9] = 809 v[510,9] = 469 v[511,9] = 487 v[512,9] = 621 v[513,9] = 857 v[514,9] = 975 v[515,9] = 537 v[516,9] = 939 v[517,9] = 585 v[518,9] = 129 v[519,9] = 625 v[520,9] = 447 v[521,9] = 129 v[522,9] = 1017 v[523,9] = 133 v[524,9] = 83 v[525,9] = 3 v[526,9] = 415 v[527,9] = 661 v[528,9] = 53 v[529,9] = 115 v[530,9] = 903 v[531,9] = 49 v[532,9] = 79 v[533,9] = 55 v[534,9] = 385 v[535,9] = 261 v[536,9] = 345 v[537,9] = 297 v[538,9] = 199 v[539,9] = 385 v[540,9] = 617 v[541,9] = 25 v[542,9] = 515 v[543,9] = 275 v[544,9] = 849 v[545,9] = 401 v[546,9] = 471 v[547,9] = 377 v[548,9] = 661 v[549,9] = 535 v[550,9] = 505 v[551,9] = 939 v[552,9] = 465 v[553,9] = 225 v[554,9] = 929 v[555,9] = 219 v[556,9] = 955 v[557,9] = 659 v[558,9] = 441 v[559,9] = 117 v[560,9] = 527 v[561,9] = 427 v[562,9] = 515 v[563,9] = 287 v[564,9] = 191 v[565,9] = 33 v[566,9] = 389 v[567,9] = 197 v[568,9] = 825 v[569,9] = 63 v[570,9] = 417 v[571,9] = 949 v[572,9] = 35 v[573,9] = 571 v[574,9] = 9 v[575,9] = 131 v[576,9] = 609 v[577,9] = 439 v[578,9] = 95 v[579,9] = 19 v[580,9] = 569 v[581,9] = 893 v[582,9] = 451 v[583,9] = 397 v[584,9] = 971 v[585,9] = 801 v[586,9] = 125 v[587,9] = 471 v[588,9] = 187 v[589,9] = 257 v[590,9] = 67 v[591,9] = 949 v[592,9] = 621 v[593,9] = 453 v[594,9] = 411 v[595,9] = 621 v[596,9] = 955 v[597,9] = 309 v[598,9] = 783 v[599,9] = 893 v[600,9] = 597 v[601,9] = 377 v[602,9] = 753 v[603,9] = 145 v[604,9] = 637 v[605,9] = 941 v[606,9] = 593 v[607,9] = 317 v[608,9] = 555 v[609,9] = 375 v[610,9] = 575 v[611,9] = 175 v[612,9] = 403 v[613,9] = 571 v[614,9] = 555 v[615,9] = 109 v[616,9] = 377 v[617,9] = 931 v[618,9] = 499 v[619,9] = 649 v[620,9] = 653 v[621,9] = 329 v[622,9] = 279 v[623,9] = 271 v[624,9] = 647 v[625,9] = 721 v[626,9] = 665 v[627,9] = 429 v[628,9] = 957 v[629,9] = 803 v[630,9] = 767 v[631,9] = 425 v[632,9] = 477 v[633,9] = 995 v[634,9] = 105 v[635,9] = 495 v[636,9] = 575 v[637,9] = 687 v[638,9] = 385 v[639,9] = 227 v[640,9] = 923 v[641,9] = 563 v[642,9] = 723 v[643,9] = 481 v[644,9] = 717 v[645,9] = 111 v[646,9] = 633 v[647,9] = 113 v[648,9] = 369 v[649,9] = 955 v[650,9] = 253 v[651,9] = 321 v[652,9] = 409 v[653,9] = 909 v[654,9] = 367 v[655,9] = 33 v[656,9] = 967 v[657,9] = 453 v[658,9] = 863 v[659,9] = 449 v[660,9] = 539 v[661,9] = 781 v[662,9] = 911 v[663,9] = 113 v[664,9] = 7 v[665,9] = 219 v[666,9] = 725 v[667,9] = 1015 v[668,9] = 971 v[669,9] = 1021 v[670,9] = 525 v[671,9] = 785 v[672,9] = 873 v[673,9] = 191 v[674,9] = 893 v[675,9] = 297 v[676,9] = 507 v[677,9] = 215 v[678,9] = 21 v[679,9] = 153 v[680,9] = 645 v[681,9] = 913 v[682,9] = 755 v[683,9] = 371 v[684,9] = 881 v[685,9] = 113 v[686,9] = 903 v[687,9] = 225 v[688,9] = 49 v[689,9] = 587 v[690,9] = 201 v[691,9] = 927 v[692,9] = 429 v[693,9] = 599 v[694,9] = 513 v[695,9] = 97 v[696,9] = 319 v[697,9] = 331 v[698,9] = 833 v[699,9] = 325 v[700,9] = 887 v[701,9] = 139 v[702,9] = 927 v[703,9] = 399 v[704,9] = 163 v[705,9] = 307 v[706,9] = 803 v[707,9] = 169 v[708,9] = 1019 v[709,9] = 869 v[710,9] = 537 v[711,9] = 907 v[712,9] = 479 v[713,9] = 335 v[714,9] = 697 v[715,9] = 479 v[716,9] = 353 v[717,9] = 769 v[718,9] = 787 v[719,9] = 1023 v[720,9] = 855 v[721,9] = 493 v[722,9] = 883 v[723,9] = 521 v[724,9] = 735 v[725,9] = 297 v[726,9] = 1011 v[727,9] = 991 v[728,9] = 879 v[729,9] = 855 v[730,9] = 591 v[731,9] = 415 v[732,9] = 917 v[733,9] = 375 v[734,9] = 453 v[735,9] = 553 v[736,9] = 189 v[737,9] = 841 v[738,9] = 339 v[739,9] = 211 v[740,9] = 601 v[741,9] = 57 v[742,9] = 765 v[743,9] = 745 v[744,9] = 621 v[745,9] = 209 v[746,9] = 875 v[747,9] = 639 v[748,9] = 7 v[749,9] = 595 v[750,9] = 971 v[751,9] = 263 v[752,9] = 1009 v[753,9] = 201 v[754,9] = 23 v[755,9] = 77 v[756,9] = 621 v[757,9] = 33 v[758,9] = 535 v[759,9] = 963 v[760,9] = 661 v[761,9] = 523 v[762,9] = 263 v[763,9] = 917 v[764,9] = 103 v[765,9] = 623 v[766,9] = 231 v[767,9] = 47 v[768,9] = 301 v[769,9] = 549 v[770,9] = 337 v[771,9] = 675 v[772,9] = 189 v[773,9] = 357 v[774,9] = 1005 v[775,9] = 789 v[776,9] = 189 v[777,9] = 319 v[778,9] = 721 v[779,9] = 1005 v[780,9] = 525 v[781,9] = 675 v[782,9] = 539 v[783,9] = 191 v[784,9] = 813 v[785,9] = 917 v[786,9] = 51 v[787,9] = 167 v[788,9] = 415 v[789,9] = 579 v[790,9] = 755 v[791,9] = 605 v[792,9] = 721 v[793,9] = 837 v[794,9] = 529 v[795,9] = 31 v[796,9] = 327 v[797,9] = 799 v[798,9] = 961 v[799,9] = 279 v[800,9] = 409 v[801,9] = 847 v[802,9] = 649 v[803,9] = 241 v[804,9] = 285 v[805,9] = 545 v[806,9] = 407 v[807,9] = 161 v[808,9] = 591 v[809,9] = 73 v[810,9] = 313 v[811,9] = 811 v[812,9] = 17 v[813,9] = 663 v[814,9] = 269 v[815,9] = 261 v[816,9] = 37 v[817,9] = 783 v[818,9] = 127 v[819,9] = 917 v[820,9] = 231 v[821,9] = 577 v[822,9] = 975 v[823,9] = 793 v[824,9] = 921 v[825,9] = 343 v[826,9] = 751 v[827,9] = 139 v[828,9] = 221 v[829,9] = 79 v[830,9] = 817 v[831,9] = 393 v[832,9] = 545 v[833,9] = 11 v[834,9] = 781 v[835,9] = 71 v[836,9] = 1 v[837,9] = 699 v[838,9] = 767 v[839,9] = 917 v[840,9] = 9 v[841,9] = 107 v[842,9] = 341 v[843,9] = 587 v[844,9] = 903 v[845,9] = 965 v[846,9] = 599 v[847,9] = 507 v[848,9] = 843 v[849,9] = 739 v[850,9] = 579 v[851,9] = 397 v[852,9] = 397 v[853,9] = 325 v[854,9] = 775 v[855,9] = 565 v[856,9] = 925 v[857,9] = 75 v[858,9] = 55 v[859,9] = 979 v[860,9] = 931 v[861,9] = 93 v[862,9] = 957 v[863,9] = 857 v[864,9] = 753 v[865,9] = 965 v[866,9] = 795 v[867,9] = 67 v[868,9] = 5 v[869,9] = 87 v[870,9] = 909 v[871,9] = 97 v[872,9] = 995 v[873,9] = 271 v[874,9] = 875 v[875,9] = 671 v[876,9] = 613 v[877,9] = 33 v[878,9] = 351 v[879,9] = 69 v[880,9] = 811 v[881,9] = 669 v[882,9] = 729 v[883,9] = 401 v[884,9] = 647 v[885,9] = 241 v[886,9] = 435 v[887,9] = 447 v[888,9] = 721 v[889,9] = 271 v[890,9] = 745 v[891,9] = 53 v[892,9] = 775 v[893,9] = 99 v[894,9] = 343 v[895,9] = 451 v[896,9] = 427 v[897,9] = 593 v[898,9] = 339 v[899,9] = 845 v[900,9] = 243 v[901,9] = 345 v[902,9] = 17 v[903,9] = 573 v[904,9] = 421 v[905,9] = 517 v[906,9] = 971 v[907,9] = 499 v[908,9] = 435 v[909,9] = 769 v[910,9] = 75 v[911,9] = 203 v[912,9] = 793 v[913,9] = 985 v[914,9] = 343 v[915,9] = 955 v[916,9] = 735 v[917,9] = 523 v[918,9] = 659 v[919,9] = 703 v[920,9] = 303 v[921,9] = 421 v[922,9] = 951 v[923,9] = 405 v[924,9] = 631 v[925,9] = 825 v[926,9] = 735 v[927,9] = 433 v[928,9] = 841 v[929,9] = 485 v[930,9] = 49 v[931,9] = 749 v[932,9] = 107 v[933,9] = 669 v[934,9] = 211 v[935,9] = 497 v[936,9] = 143 v[937,9] = 99 v[938,9] = 57 v[939,9] = 277 v[940,9] = 969 v[941,9] = 107 v[942,9] = 397 v[943,9] = 563 v[944,9] = 551 v[945,9] = 447 v[946,9] = 381 v[947,9] = 187 v[948,9] = 57 v[949,9] = 405 v[950,9] = 731 v[951,9] = 769 v[952,9] = 923 v[953,9] = 955 v[954,9] = 915 v[955,9] = 737 v[956,9] = 595 v[957,9] = 341 v[958,9] = 253 v[959,9] = 823 v[960,9] = 197 v[961,9] = 321 v[962,9] = 315 v[963,9] = 181 v[964,9] = 885 v[965,9] = 497 v[966,9] = 159 v[967,9] = 571 v[968,9] = 981 v[969,9] = 899 v[970,9] = 785 v[971,9] = 947 v[972,9] = 217 v[973,9] = 217 v[974,9] = 135 v[975,9] = 753 v[976,9] = 623 v[977,9] = 565 v[978,9] = 717 v[979,9] = 903 v[980,9] = 581 v[981,9] = 955 v[982,9] = 621 v[983,9] = 361 v[984,9] = 869 v[985,9] = 87 v[986,9] = 943 v[987,9] = 907 v[988,9] = 853 v[989,9] = 353 v[990,9] = 335 v[991,9] = 197 v[992,9] = 771 v[993,9] = 433 v[994,9] = 743 v[995,9] = 195 v[996,9] = 91 v[997,9] = 1023 v[998,9] = 63 v[999,9] = 301 v[1000,9] = 647 v[1001,9] = 205 v[1002,9] = 485 v[1003,9] = 927 v[1004,9] = 1003 v[1005,9] = 987 v[1006,9] = 359 v[1007,9] = 577 v[1008,9] = 147 v[1009,9] = 141 v[1010,9] = 1017 v[1011,9] = 701 v[1012,9] = 273 v[1013,9] = 89 v[1014,9] = 589 v[1015,9] = 487 v[1016,9] = 859 v[1017,9] = 343 v[1018,9] = 91 v[1019,9] = 847 v[1020,9] = 341 v[1021,9] = 173 v[1022,9] = 287 v[1023,9] = 1003 v[1024,9] = 289 v[1025,9] = 639 v[1026,9] = 983 v[1027,9] = 685 v[1028,9] = 697 v[1029,9] = 35 v[1030,9] = 701 v[1031,9] = 645 v[1032,9] = 911 v[1033,9] = 501 v[1034,9] = 705 v[1035,9] = 873 v[1036,9] = 763 v[1037,9] = 745 v[1038,9] = 657 v[1039,9] = 559 v[1040,9] = 699 v[1041,9] = 315 v[1042,9] = 347 v[1043,9] = 429 v[1044,9] = 197 v[1045,9] = 165 v[1046,9] = 955 v[1047,9] = 859 v[1048,9] = 167 v[1049,9] = 303 v[1050,9] = 833 v[1051,9] = 531 v[1052,9] = 473 v[1053,9] = 635 v[1054,9] = 641 v[1055,9] = 195 v[1056,9] = 589 v[1057,9] = 821 v[1058,9] = 205 v[1059,9] = 3 v[1060,9] = 635 v[1061,9] = 371 v[1062,9] = 891 v[1063,9] = 249 v[1064,9] = 123 v[1065,9] = 77 v[1066,9] = 623 v[1067,9] = 993 v[1068,9] = 401 v[1069,9] = 525 v[1070,9] = 427 v[1071,9] = 71 v[1072,9] = 655 v[1073,9] = 951 v[1074,9] = 357 v[1075,9] = 851 v[1076,9] = 899 v[1077,9] = 535 v[1078,9] = 493 v[1079,9] = 323 v[1080,9] = 1003 v[1081,9] = 343 v[1082,9] = 515 v[1083,9] = 859 v[1084,9] = 1017 v[1085,9] = 5 v[1086,9] = 423 v[1087,9] = 315 v[1088,9] = 1011 v[1089,9] = 703 v[1090,9] = 41 v[1091,9] = 777 v[1092,9] = 163 v[1093,9] = 95 v[1094,9] = 831 v[1095,9] = 79 v[1096,9] = 975 v[1097,9] = 235 v[1098,9] = 633 v[1099,9] = 723 v[1100,9] = 297 v[1101,9] = 589 v[1102,9] = 317 v[1103,9] = 679 v[1104,9] = 981 v[1105,9] = 195 v[1106,9] = 399 v[1107,9] = 1003 v[1108,9] = 121 v[1109,9] = 501 v[1110,9] = 155 v[161,10] = 7 v[162,10] = 2011 v[163,10] = 1001 v[164,10] = 49 v[165,10] = 825 v[166,10] = 415 v[167,10] = 1441 v[168,10] = 383 v[169,10] = 1581 v[170,10] = 623 v[171,10] = 1621 v[172,10] = 1319 v[173,10] = 1387 v[174,10] = 619 v[175,10] = 839 v[176,10] = 217 v[177,10] = 75 v[178,10] = 1955 v[179,10] = 505 v[180,10] = 281 v[181,10] = 1629 v[182,10] = 1379 v[183,10] = 53 v[184,10] = 1111 v[185,10] = 1399 v[186,10] = 301 v[187,10] = 209 v[188,10] = 49 v[189,10] = 155 v[190,10] = 1647 v[191,10] = 631 v[192,10] = 129 v[193,10] = 1569 v[194,10] = 335 v[195,10] = 67 v[196,10] = 1955 v[197,10] = 1611 v[198,10] = 2021 v[199,10] = 1305 v[200,10] = 121 v[201,10] = 37 v[202,10] = 877 v[203,10] = 835 v[204,10] = 1457 v[205,10] = 669 v[206,10] = 1405 v[207,10] = 935 v[208,10] = 1735 v[209,10] = 665 v[210,10] = 551 v[211,10] = 789 v[212,10] = 1543 v[213,10] = 1267 v[214,10] = 1027 v[215,10] = 1 v[216,10] = 1911 v[217,10] = 163 v[218,10] = 1929 v[219,10] = 67 v[220,10] = 1975 v[221,10] = 1681 v[222,10] = 1413 v[223,10] = 191 v[224,10] = 1711 v[225,10] = 1307 v[226,10] = 401 v[227,10] = 725 v[228,10] = 1229 v[229,10] = 1403 v[230,10] = 1609 v[231,10] = 2035 v[232,10] = 917 v[233,10] = 921 v[234,10] = 1789 v[235,10] = 41 v[236,10] = 2003 v[237,10] = 187 v[238,10] = 67 v[239,10] = 1635 v[240,10] = 717 v[241,10] = 1449 v[242,10] = 277 v[243,10] = 1903 v[244,10] = 1179 v[245,10] = 363 v[246,10] = 1211 v[247,10] = 1231 v[248,10] = 647 v[249,10] = 1261 v[250,10] = 1029 v[251,10] = 1485 v[252,10] = 1309 v[253,10] = 1149 v[254,10] = 317 v[255,10] = 1335 v[256,10] = 171 v[257,10] = 243 v[258,10] = 271 v[259,10] = 1055 v[260,10] = 1601 v[261,10] = 1129 v[262,10] = 1653 v[263,10] = 205 v[264,10] = 1463 v[265,10] = 1681 v[266,10] = 1621 v[267,10] = 197 v[268,10] = 951 v[269,10] = 573 v[270,10] = 1697 v[271,10] = 1265 v[272,10] = 1321 v[273,10] = 1805 v[274,10] = 1235 v[275,10] = 1853 v[276,10] = 1307 v[277,10] = 945 v[278,10] = 1197 v[279,10] = 1411 v[280,10] = 833 v[281,10] = 273 v[282,10] = 1517 v[283,10] = 1747 v[284,10] = 1095 v[285,10] = 1345 v[286,10] = 869 v[287,10] = 57 v[288,10] = 1383 v[289,10] = 221 v[290,10] = 1713 v[291,10] = 335 v[292,10] = 1751 v[293,10] = 1141 v[294,10] = 839 v[295,10] = 523 v[296,10] = 1861 v[297,10] = 1105 v[298,10] = 389 v[299,10] = 1177 v[300,10] = 1877 v[301,10] = 805 v[302,10] = 93 v[303,10] = 1591 v[304,10] = 423 v[305,10] = 1835 v[306,10] = 99 v[307,10] = 1781 v[308,10] = 1515 v[309,10] = 1909 v[310,10] = 1011 v[311,10] = 303 v[312,10] = 385 v[313,10] = 1635 v[314,10] = 357 v[315,10] = 973 v[316,10] = 1781 v[317,10] = 1707 v[318,10] = 1363 v[319,10] = 1053 v[320,10] = 649 v[321,10] = 1469 v[322,10] = 623 v[323,10] = 1429 v[324,10] = 1241 v[325,10] = 1151 v[326,10] = 1055 v[327,10] = 503 v[328,10] = 921 v[329,10] = 3 v[330,10] = 349 v[331,10] = 1149 v[332,10] = 293 v[333,10] = 45 v[334,10] = 303 v[335,10] = 877 v[336,10] = 1565 v[337,10] = 1583 v[338,10] = 1001 v[339,10] = 663 v[340,10] = 1535 v[341,10] = 395 v[342,10] = 1141 v[343,10] = 1481 v[344,10] = 1797 v[345,10] = 643 v[346,10] = 1507 v[347,10] = 465 v[348,10] = 2027 v[349,10] = 1695 v[350,10] = 367 v[351,10] = 937 v[352,10] = 719 v[353,10] = 545 v[354,10] = 1991 v[355,10] = 83 v[356,10] = 819 v[357,10] = 239 v[358,10] = 1791 v[359,10] = 1461 v[360,10] = 1647 v[361,10] = 1501 v[362,10] = 1161 v[363,10] = 1629 v[364,10] = 139 v[365,10] = 1595 v[366,10] = 1921 v[367,10] = 1267 v[368,10] = 1415 v[369,10] = 509 v[370,10] = 347 v[371,10] = 777 v[372,10] = 1083 v[373,10] = 363 v[374,10] = 269 v[375,10] = 1015 v[376,10] = 1809 v[377,10] = 1105 v[378,10] = 1429 v[379,10] = 1471 v[380,10] = 2019 v[381,10] = 381 v[382,10] = 2025 v[383,10] = 1223 v[384,10] = 827 v[385,10] = 1733 v[386,10] = 887 v[387,10] = 1321 v[388,10] = 803 v[389,10] = 1951 v[390,10] = 1297 v[391,10] = 1995 v[392,10] = 833 v[393,10] = 1107 v[394,10] = 1135 v[395,10] = 1181 v[396,10] = 1251 v[397,10] = 983 v[398,10] = 1389 v[399,10] = 1565 v[400,10] = 273 v[401,10] = 137 v[402,10] = 71 v[403,10] = 735 v[404,10] = 1005 v[405,10] = 933 v[406,10] = 67 v[407,10] = 1471 v[408,10] = 551 v[409,10] = 457 v[410,10] = 1667 v[411,10] = 1729 v[412,10] = 919 v[413,10] = 285 v[414,10] = 1629 v[415,10] = 1815 v[416,10] = 653 v[417,10] = 1919 v[418,10] = 1039 v[419,10] = 531 v[420,10] = 393 v[421,10] = 1411 v[422,10] = 359 v[423,10] = 221 v[424,10] = 699 v[425,10] = 1485 v[426,10] = 471 v[427,10] = 1357 v[428,10] = 1715 v[429,10] = 595 v[430,10] = 1677 v[431,10] = 153 v[432,10] = 1903 v[433,10] = 1281 v[434,10] = 215 v[435,10] = 781 v[436,10] = 543 v[437,10] = 293 v[438,10] = 1807 v[439,10] = 965 v[440,10] = 1695 v[441,10] = 443 v[442,10] = 1985 v[443,10] = 321 v[444,10] = 879 v[445,10] = 1227 v[446,10] = 1915 v[447,10] = 839 v[448,10] = 1945 v[449,10] = 1993 v[450,10] = 1165 v[451,10] = 51 v[452,10] = 557 v[453,10] = 723 v[454,10] = 1491 v[455,10] = 817 v[456,10] = 1237 v[457,10] = 947 v[458,10] = 1215 v[459,10] = 1911 v[460,10] = 1225 v[461,10] = 1965 v[462,10] = 1889 v[463,10] = 1503 v[464,10] = 1177 v[465,10] = 73 v[466,10] = 1767 v[467,10] = 303 v[468,10] = 177 v[469,10] = 1897 v[470,10] = 1401 v[471,10] = 321 v[472,10] = 921 v[473,10] = 217 v[474,10] = 1779 v[475,10] = 327 v[476,10] = 1889 v[477,10] = 333 v[478,10] = 615 v[479,10] = 1665 v[480,10] = 1825 v[481,10] = 1639 v[482,10] = 237 v[483,10] = 1205 v[484,10] = 361 v[485,10] = 129 v[486,10] = 1655 v[487,10] = 983 v[488,10] = 1089 v[489,10] = 1171 v[490,10] = 401 v[491,10] = 677 v[492,10] = 643 v[493,10] = 749 v[494,10] = 303 v[495,10] = 1407 v[496,10] = 1873 v[497,10] = 1579 v[498,10] = 1491 v[499,10] = 1393 v[500,10] = 1247 v[501,10] = 789 v[502,10] = 763 v[503,10] = 49 v[504,10] = 5 v[505,10] = 1607 v[506,10] = 1891 v[507,10] = 735 v[508,10] = 1557 v[509,10] = 1909 v[510,10] = 1765 v[511,10] = 1777 v[512,10] = 1127 v[513,10] = 813 v[514,10] = 695 v[515,10] = 97 v[516,10] = 731 v[517,10] = 1503 v[518,10] = 1751 v[519,10] = 333 v[520,10] = 769 v[521,10] = 865 v[522,10] = 693 v[523,10] = 377 v[524,10] = 1919 v[525,10] = 957 v[526,10] = 1359 v[527,10] = 1627 v[528,10] = 1039 v[529,10] = 1783 v[530,10] = 1065 v[531,10] = 1665 v[532,10] = 1917 v[533,10] = 1947 v[534,10] = 991 v[535,10] = 1997 v[536,10] = 841 v[537,10] = 459 v[538,10] = 221 v[539,10] = 327 v[540,10] = 1595 v[541,10] = 1881 v[542,10] = 1269 v[543,10] = 1007 v[544,10] = 129 v[545,10] = 1413 v[546,10] = 475 v[547,10] = 1105 v[548,10] = 791 v[549,10] = 1983 v[550,10] = 1359 v[551,10] = 503 v[552,10] = 691 v[553,10] = 659 v[554,10] = 691 v[555,10] = 343 v[556,10] = 1375 v[557,10] = 1919 v[558,10] = 263 v[559,10] = 1373 v[560,10] = 603 v[561,10] = 1383 v[562,10] = 297 v[563,10] = 781 v[564,10] = 145 v[565,10] = 285 v[566,10] = 767 v[567,10] = 1739 v[568,10] = 1715 v[569,10] = 715 v[570,10] = 317 v[571,10] = 1333 v[572,10] = 85 v[573,10] = 831 v[574,10] = 1615 v[575,10] = 81 v[576,10] = 1667 v[577,10] = 1467 v[578,10] = 1457 v[579,10] = 1453 v[580,10] = 1825 v[581,10] = 109 v[582,10] = 387 v[583,10] = 1207 v[584,10] = 2039 v[585,10] = 213 v[586,10] = 1351 v[587,10] = 1329 v[588,10] = 1173 v[589,10] = 57 v[590,10] = 1769 v[591,10] = 951 v[592,10] = 183 v[593,10] = 23 v[594,10] = 451 v[595,10] = 1155 v[596,10] = 1551 v[597,10] = 2037 v[598,10] = 811 v[599,10] = 635 v[600,10] = 1671 v[601,10] = 1451 v[602,10] = 863 v[603,10] = 1499 v[604,10] = 1673 v[605,10] = 363 v[606,10] = 1029 v[607,10] = 1077 v[608,10] = 1525 v[609,10] = 277 v[610,10] = 1023 v[611,10] = 655 v[612,10] = 665 v[613,10] = 1869 v[614,10] = 1255 v[615,10] = 965 v[616,10] = 277 v[617,10] = 1601 v[618,10] = 329 v[619,10] = 1603 v[620,10] = 1901 v[621,10] = 395 v[622,10] = 65 v[623,10] = 1307 v[624,10] = 2029 v[625,10] = 21 v[626,10] = 1321 v[627,10] = 543 v[628,10] = 1569 v[629,10] = 1185 v[630,10] = 1905 v[631,10] = 1701 v[632,10] = 413 v[633,10] = 2041 v[634,10] = 1697 v[635,10] = 725 v[636,10] = 1417 v[637,10] = 1847 v[638,10] = 411 v[639,10] = 211 v[640,10] = 915 v[641,10] = 1891 v[642,10] = 17 v[643,10] = 1877 v[644,10] = 1699 v[645,10] = 687 v[646,10] = 1089 v[647,10] = 1973 v[648,10] = 1809 v[649,10] = 851 v[650,10] = 1495 v[651,10] = 1257 v[652,10] = 63 v[653,10] = 1323 v[654,10] = 1307 v[655,10] = 609 v[656,10] = 881 v[657,10] = 1543 v[658,10] = 177 v[659,10] = 617 v[660,10] = 1505 v[661,10] = 1747 v[662,10] = 1537 v[663,10] = 925 v[664,10] = 183 v[665,10] = 77 v[666,10] = 1723 v[667,10] = 1877 v[668,10] = 1703 v[669,10] = 397 v[670,10] = 459 v[671,10] = 521 v[672,10] = 257 v[673,10] = 1177 v[674,10] = 389 v[675,10] = 1947 v[676,10] = 1553 v[677,10] = 1583 v[678,10] = 1831 v[679,10] = 261 v[680,10] = 485 v[681,10] = 289 v[682,10] = 1281 v[683,10] = 1543 v[684,10] = 1591 v[685,10] = 1123 v[686,10] = 573 v[687,10] = 821 v[688,10] = 1065 v[689,10] = 1933 v[690,10] = 1373 v[691,10] = 2005 v[692,10] = 905 v[693,10] = 207 v[694,10] = 173 v[695,10] = 1573 v[696,10] = 1597 v[697,10] = 573 v[698,10] = 1883 v[699,10] = 1795 v[700,10] = 1499 v[701,10] = 1743 v[702,10] = 553 v[703,10] = 335 v[704,10] = 333 v[705,10] = 1645 v[706,10] = 791 v[707,10] = 871 v[708,10] = 1157 v[709,10] = 969 v[710,10] = 557 v[711,10] = 141 v[712,10] = 223 v[713,10] = 1129 v[714,10] = 1685 v[715,10] = 423 v[716,10] = 1069 v[717,10] = 391 v[718,10] = 99 v[719,10] = 95 v[720,10] = 1847 v[721,10] = 531 v[722,10] = 1859 v[723,10] = 1833 v[724,10] = 1833 v[725,10] = 341 v[726,10] = 237 v[727,10] = 1997 v[728,10] = 1799 v[729,10] = 409 v[730,10] = 431 v[731,10] = 1917 v[732,10] = 363 v[733,10] = 335 v[734,10] = 1039 v[735,10] = 1085 v[736,10] = 1657 v[737,10] = 1975 v[738,10] = 1527 v[739,10] = 1111 v[740,10] = 659 v[741,10] = 389 v[742,10] = 899 v[743,10] = 595 v[744,10] = 1439 v[745,10] = 1861 v[746,10] = 1979 v[747,10] = 1569 v[748,10] = 1087 v[749,10] = 1009 v[750,10] = 165 v[751,10] = 1895 v[752,10] = 1481 v[753,10] = 1583 v[754,10] = 29 v[755,10] = 1193 v[756,10] = 1673 v[757,10] = 1075 v[758,10] = 301 v[759,10] = 1081 v[760,10] = 1377 v[761,10] = 1747 v[762,10] = 1497 v[763,10] = 1103 v[764,10] = 1789 v[765,10] = 887 v[766,10] = 739 v[767,10] = 1577 v[768,10] = 313 v[769,10] = 1367 v[770,10] = 1299 v[771,10] = 1801 v[772,10] = 1131 v[773,10] = 1837 v[774,10] = 73 v[775,10] = 1865 v[776,10] = 1065 v[777,10] = 843 v[778,10] = 635 v[779,10] = 55 v[780,10] = 1655 v[781,10] = 913 v[782,10] = 1037 v[783,10] = 223 v[784,10] = 1871 v[785,10] = 1161 v[786,10] = 461 v[787,10] = 479 v[788,10] = 511 v[789,10] = 1721 v[790,10] = 1107 v[791,10] = 389 v[792,10] = 151 v[793,10] = 35 v[794,10] = 375 v[795,10] = 1099 v[796,10] = 937 v[797,10] = 1185 v[798,10] = 1701 v[799,10] = 769 v[800,10] = 639 v[801,10] = 1633 v[802,10] = 1609 v[803,10] = 379 v[804,10] = 1613 v[805,10] = 2031 v[806,10] = 685 v[807,10] = 289 v[808,10] = 975 v[809,10] = 671 v[810,10] = 1599 v[811,10] = 1447 v[812,10] = 871 v[813,10] = 647 v[814,10] = 99 v[815,10] = 139 v[816,10] = 1427 v[817,10] = 959 v[818,10] = 89 v[819,10] = 117 v[820,10] = 841 v[821,10] = 891 v[822,10] = 1959 v[823,10] = 223 v[824,10] = 1697 v[825,10] = 1145 v[826,10] = 499 v[827,10] = 1435 v[828,10] = 1809 v[829,10] = 1413 v[830,10] = 1445 v[831,10] = 1675 v[832,10] = 171 v[833,10] = 1073 v[834,10] = 1349 v[835,10] = 1545 v[836,10] = 2039 v[837,10] = 1027 v[838,10] = 1563 v[839,10] = 859 v[840,10] = 215 v[841,10] = 1673 v[842,10] = 1919 v[843,10] = 1633 v[844,10] = 779 v[845,10] = 411 v[846,10] = 1845 v[847,10] = 1477 v[848,10] = 1489 v[849,10] = 447 v[850,10] = 1545 v[851,10] = 351 v[852,10] = 1989 v[853,10] = 495 v[854,10] = 183 v[855,10] = 1639 v[856,10] = 1385 v[857,10] = 1805 v[858,10] = 1097 v[859,10] = 1249 v[860,10] = 1431 v[861,10] = 1571 v[862,10] = 591 v[863,10] = 697 v[864,10] = 1509 v[865,10] = 709 v[866,10] = 31 v[867,10] = 1563 v[868,10] = 165 v[869,10] = 513 v[870,10] = 1425 v[871,10] = 1299 v[872,10] = 1081 v[873,10] = 145 v[874,10] = 1841 v[875,10] = 1211 v[876,10] = 941 v[877,10] = 609 v[878,10] = 845 v[879,10] = 1169 v[880,10] = 1865 v[881,10] = 1593 v[882,10] = 347 v[883,10] = 293 v[884,10] = 1277 v[885,10] = 157 v[886,10] = 211 v[887,10] = 93 v[888,10] = 1679 v[889,10] = 1799 v[890,10] = 527 v[891,10] = 41 v[892,10] = 473 v[893,10] = 563 v[894,10] = 187 v[895,10] = 1525 v[896,10] = 575 v[897,10] = 1579 v[898,10] = 857 v[899,10] = 703 v[900,10] = 1211 v[901,10] = 647 v[902,10] = 709 v[903,10] = 981 v[904,10] = 285 v[905,10] = 697 v[906,10] = 163 v[907,10] = 981 v[908,10] = 153 v[909,10] = 1515 v[910,10] = 47 v[911,10] = 1553 v[912,10] = 599 v[913,10] = 225 v[914,10] = 1147 v[915,10] = 381 v[916,10] = 135 v[917,10] = 821 v[918,10] = 1965 v[919,10] = 609 v[920,10] = 1033 v[921,10] = 983 v[922,10] = 503 v[923,10] = 1117 v[924,10] = 327 v[925,10] = 453 v[926,10] = 2005 v[927,10] = 1257 v[928,10] = 343 v[929,10] = 1649 v[930,10] = 1199 v[931,10] = 599 v[932,10] = 1877 v[933,10] = 569 v[934,10] = 695 v[935,10] = 1587 v[936,10] = 1475 v[937,10] = 187 v[938,10] = 973 v[939,10] = 233 v[940,10] = 511 v[941,10] = 51 v[942,10] = 1083 v[943,10] = 665 v[944,10] = 1321 v[945,10] = 531 v[946,10] = 1875 v[947,10] = 1939 v[948,10] = 859 v[949,10] = 1507 v[950,10] = 1979 v[951,10] = 1203 v[952,10] = 1965 v[953,10] = 737 v[954,10] = 921 v[955,10] = 1565 v[956,10] = 1943 v[957,10] = 819 v[958,10] = 223 v[959,10] = 365 v[960,10] = 167 v[961,10] = 1705 v[962,10] = 413 v[963,10] = 1577 v[964,10] = 745 v[965,10] = 1573 v[966,10] = 655 v[967,10] = 1633 v[968,10] = 1003 v[969,10] = 91 v[970,10] = 1123 v[971,10] = 477 v[972,10] = 1741 v[973,10] = 1663 v[974,10] = 35 v[975,10] = 715 v[976,10] = 37 v[977,10] = 1513 v[978,10] = 815 v[979,10] = 941 v[980,10] = 1379 v[981,10] = 263 v[982,10] = 1831 v[983,10] = 1735 v[984,10] = 1111 v[985,10] = 1449 v[986,10] = 353 v[987,10] = 1941 v[988,10] = 1655 v[989,10] = 1349 v[990,10] = 877 v[991,10] = 285 v[992,10] = 1723 v[993,10] = 125 v[994,10] = 1753 v[995,10] = 985 v[996,10] = 723 v[997,10] = 175 v[998,10] = 439 v[999,10] = 791 v[1000,10] = 1051 v[1001,10] = 1261 v[1002,10] = 717 v[1003,10] = 1555 v[1004,10] = 1757 v[1005,10] = 1777 v[1006,10] = 577 v[1007,10] = 1583 v[1008,10] = 1957 v[1009,10] = 873 v[1010,10] = 331 v[1011,10] = 1163 v[1012,10] = 313 v[1013,10] = 1 v[1014,10] = 1963 v[1015,10] = 963 v[1016,10] = 1905 v[1017,10] = 821 v[1018,10] = 1677 v[1019,10] = 185 v[1020,10] = 709 v[1021,10] = 545 v[1022,10] = 1723 v[1023,10] = 215 v[1024,10] = 1885 v[1025,10] = 1249 v[1026,10] = 583 v[1027,10] = 1803 v[1028,10] = 839 v[1029,10] = 885 v[1030,10] = 485 v[1031,10] = 413 v[1032,10] = 1767 v[1033,10] = 425 v[1034,10] = 129 v[1035,10] = 1035 v[1036,10] = 329 v[1037,10] = 1263 v[1038,10] = 1881 v[1039,10] = 1779 v[1040,10] = 1565 v[1041,10] = 359 v[1042,10] = 367 v[1043,10] = 453 v[1044,10] = 707 v[1045,10] = 1419 v[1046,10] = 831 v[1047,10] = 1889 v[1048,10] = 887 v[1049,10] = 1871 v[1050,10] = 1869 v[1051,10] = 747 v[1052,10] = 223 v[1053,10] = 1547 v[1054,10] = 1799 v[1055,10] = 433 v[1056,10] = 1441 v[1057,10] = 553 v[1058,10] = 2021 v[1059,10] = 1303 v[1060,10] = 1505 v[1061,10] = 1735 v[1062,10] = 1619 v[1063,10] = 1065 v[1064,10] = 1161 v[1065,10] = 2047 v[1066,10] = 347 v[1067,10] = 867 v[1068,10] = 881 v[1069,10] = 1447 v[1070,10] = 329 v[1071,10] = 781 v[1072,10] = 1065 v[1073,10] = 219 v[1074,10] = 589 v[1075,10] = 645 v[1076,10] = 1257 v[1077,10] = 1833 v[1078,10] = 749 v[1079,10] = 1841 v[1080,10] = 1733 v[1081,10] = 1179 v[1082,10] = 1191 v[1083,10] = 1025 v[1084,10] = 1639 v[1085,10] = 1955 v[1086,10] = 1423 v[1087,10] = 1685 v[1088,10] = 1711 v[1089,10] = 493 v[1090,10] = 549 v[1091,10] = 783 v[1092,10] = 1653 v[1093,10] = 397 v[1094,10] = 895 v[1095,10] = 233 v[1096,10] = 759 v[1097,10] = 1505 v[1098,10] = 677 v[1099,10] = 1449 v[1100,10] = 1573 v[1101,10] = 1297 v[1102,10] = 1821 v[1103,10] = 1691 v[1104,10] = 791 v[1105,10] = 289 v[1106,10] = 1187 v[1107,10] = 867 v[1108,10] = 1535 v[1109,10] = 575 v[1110,10] = 183 v[337,11] = 3915 v[338,11] = 97 v[339,11] = 3047 v[340,11] = 937 v[341,11] = 2897 v[342,11] = 953 v[343,11] = 127 v[344,11] = 1201 v[345,11] = 3819 v[346,11] = 193 v[347,11] = 2053 v[348,11] = 3061 v[349,11] = 3759 v[350,11] = 1553 v[351,11] = 2007 v[352,11] = 2493 v[353,11] = 603 v[354,11] = 3343 v[355,11] = 3751 v[356,11] = 1059 v[357,11] = 783 v[358,11] = 1789 v[359,11] = 1589 v[360,11] = 283 v[361,11] = 1093 v[362,11] = 3919 v[363,11] = 2747 v[364,11] = 277 v[365,11] = 2605 v[366,11] = 2169 v[367,11] = 2905 v[368,11] = 721 v[369,11] = 4069 v[370,11] = 233 v[371,11] = 261 v[372,11] = 1137 v[373,11] = 3993 v[374,11] = 3619 v[375,11] = 2881 v[376,11] = 1275 v[377,11] = 3865 v[378,11] = 1299 v[379,11] = 3757 v[380,11] = 1193 v[381,11] = 733 v[382,11] = 993 v[383,11] = 1153 v[384,11] = 2945 v[385,11] = 3163 v[386,11] = 3179 v[387,11] = 437 v[388,11] = 271 v[389,11] = 3493 v[390,11] = 3971 v[391,11] = 1005 v[392,11] = 2615 v[393,11] = 2253 v[394,11] = 1131 v[395,11] = 585 v[396,11] = 2775 v[397,11] = 2171 v[398,11] = 2383 v[399,11] = 2937 v[400,11] = 2447 v[401,11] = 1745 v[402,11] = 663 v[403,11] = 1515 v[404,11] = 3767 v[405,11] = 2709 v[406,11] = 1767 v[407,11] = 3185 v[408,11] = 3017 v[409,11] = 2815 v[410,11] = 1829 v[411,11] = 87 v[412,11] = 3341 v[413,11] = 793 v[414,11] = 2627 v[415,11] = 2169 v[416,11] = 1875 v[417,11] = 3745 v[418,11] = 367 v[419,11] = 3783 v[420,11] = 783 v[421,11] = 827 v[422,11] = 3253 v[423,11] = 2639 v[424,11] = 2955 v[425,11] = 3539 v[426,11] = 1579 v[427,11] = 2109 v[428,11] = 379 v[429,11] = 2939 v[430,11] = 3019 v[431,11] = 1999 v[432,11] = 2253 v[433,11] = 2911 v[434,11] = 3733 v[435,11] = 481 v[436,11] = 1767 v[437,11] = 1055 v[438,11] = 4019 v[439,11] = 4085 v[440,11] = 105 v[441,11] = 1829 v[442,11] = 2097 v[443,11] = 2379 v[444,11] = 1567 v[445,11] = 2713 v[446,11] = 737 v[447,11] = 3423 v[448,11] = 3941 v[449,11] = 2659 v[450,11] = 3961 v[451,11] = 1755 v[452,11] = 3613 v[453,11] = 1937 v[454,11] = 1559 v[455,11] = 2287 v[456,11] = 2743 v[457,11] = 67 v[458,11] = 2859 v[459,11] = 325 v[460,11] = 2601 v[461,11] = 1149 v[462,11] = 3259 v[463,11] = 2403 v[464,11] = 3947 v[465,11] = 2011 v[466,11] = 175 v[467,11] = 3389 v[468,11] = 3915 v[469,11] = 1315 v[470,11] = 2447 v[471,11] = 141 v[472,11] = 359 v[473,11] = 3609 v[474,11] = 3933 v[475,11] = 729 v[476,11] = 2051 v[477,11] = 1755 v[478,11] = 2149 v[479,11] = 2107 v[480,11] = 1741 v[481,11] = 1051 v[482,11] = 3681 v[483,11] = 471 v[484,11] = 1055 v[485,11] = 845 v[486,11] = 257 v[487,11] = 1559 v[488,11] = 1061 v[489,11] = 2803 v[490,11] = 2219 v[491,11] = 1315 v[492,11] = 1369 v[493,11] = 3211 v[494,11] = 4027 v[495,11] = 105 v[496,11] = 11 v[497,11] = 1077 v[498,11] = 2857 v[499,11] = 337 v[500,11] = 3553 v[501,11] = 3503 v[502,11] = 3917 v[503,11] = 2665 v[504,11] = 3823 v[505,11] = 3403 v[506,11] = 3711 v[507,11] = 2085 v[508,11] = 1103 v[509,11] = 1641 v[510,11] = 701 v[511,11] = 4095 v[512,11] = 2883 v[513,11] = 1435 v[514,11] = 653 v[515,11] = 2363 v[516,11] = 1597 v[517,11] = 767 v[518,11] = 869 v[519,11] = 1825 v[520,11] = 1117 v[521,11] = 1297 v[522,11] = 501 v[523,11] = 505 v[524,11] = 149 v[525,11] = 873 v[526,11] = 2673 v[527,11] = 551 v[528,11] = 1499 v[529,11] = 2793 v[530,11] = 3277 v[531,11] = 2143 v[532,11] = 3663 v[533,11] = 533 v[534,11] = 3991 v[535,11] = 575 v[536,11] = 1877 v[537,11] = 1009 v[538,11] = 3929 v[539,11] = 473 v[540,11] = 3009 v[541,11] = 2595 v[542,11] = 3249 v[543,11] = 675 v[544,11] = 3593 v[545,11] = 2453 v[546,11] = 1567 v[547,11] = 973 v[548,11] = 595 v[549,11] = 1335 v[550,11] = 1715 v[551,11] = 589 v[552,11] = 85 v[553,11] = 2265 v[554,11] = 3069 v[555,11] = 461 v[556,11] = 1659 v[557,11] = 2627 v[558,11] = 1307 v[559,11] = 1731 v[560,11] = 1501 v[561,11] = 1699 v[562,11] = 3545 v[563,11] = 3803 v[564,11] = 2157 v[565,11] = 453 v[566,11] = 2813 v[567,11] = 2047 v[568,11] = 2999 v[569,11] = 3841 v[570,11] = 2361 v[571,11] = 1079 v[572,11] = 573 v[573,11] = 69 v[574,11] = 1363 v[575,11] = 1597 v[576,11] = 3427 v[577,11] = 2899 v[578,11] = 2771 v[579,11] = 1327 v[580,11] = 1117 v[581,11] = 1523 v[582,11] = 3521 v[583,11] = 2393 v[584,11] = 2537 v[585,11] = 1979 v[586,11] = 3179 v[587,11] = 683 v[588,11] = 2453 v[589,11] = 453 v[590,11] = 1227 v[591,11] = 779 v[592,11] = 671 v[593,11] = 3483 v[594,11] = 2135 v[595,11] = 3139 v[596,11] = 3381 v[597,11] = 3945 v[598,11] = 57 v[599,11] = 1541 v[600,11] = 3405 v[601,11] = 3381 v[602,11] = 2371 v[603,11] = 2879 v[604,11] = 1985 v[605,11] = 987 v[606,11] = 3017 v[607,11] = 3031 v[608,11] = 3839 v[609,11] = 1401 v[610,11] = 3749 v[611,11] = 2977 v[612,11] = 681 v[613,11] = 1175 v[614,11] = 1519 v[615,11] = 3355 v[616,11] = 907 v[617,11] = 117 v[618,11] = 771 v[619,11] = 3741 v[620,11] = 3337 v[621,11] = 1743 v[622,11] = 1227 v[623,11] = 3335 v[624,11] = 2755 v[625,11] = 1909 v[626,11] = 3603 v[627,11] = 2397 v[628,11] = 653 v[629,11] = 87 v[630,11] = 2025 v[631,11] = 2617 v[632,11] = 3257 v[633,11] = 287 v[634,11] = 3051 v[635,11] = 3809 v[636,11] = 897 v[637,11] = 2215 v[638,11] = 63 v[639,11] = 2043 v[640,11] = 1757 v[641,11] = 3671 v[642,11] = 297 v[643,11] = 3131 v[644,11] = 1305 v[645,11] = 293 v[646,11] = 3865 v[647,11] = 3173 v[648,11] = 3397 v[649,11] = 2269 v[650,11] = 3673 v[651,11] = 717 v[652,11] = 3041 v[653,11] = 3341 v[654,11] = 3595 v[655,11] = 3819 v[656,11] = 2871 v[657,11] = 3973 v[658,11] = 1129 v[659,11] = 513 v[660,11] = 871 v[661,11] = 1485 v[662,11] = 3977 v[663,11] = 2473 v[664,11] = 1171 v[665,11] = 1143 v[666,11] = 3063 v[667,11] = 3547 v[668,11] = 2183 v[669,11] = 3993 v[670,11] = 133 v[671,11] = 2529 v[672,11] = 2699 v[673,11] = 233 v[674,11] = 2355 v[675,11] = 231 v[676,11] = 3241 v[677,11] = 611 v[678,11] = 1309 v[679,11] = 3829 v[680,11] = 1839 v[681,11] = 1495 v[682,11] = 301 v[683,11] = 1169 v[684,11] = 1613 v[685,11] = 2673 v[686,11] = 243 v[687,11] = 3601 v[688,11] = 3669 v[689,11] = 2813 v[690,11] = 2671 v[691,11] = 2679 v[692,11] = 3463 v[693,11] = 2477 v[694,11] = 1795 v[695,11] = 617 v[696,11] = 2317 v[697,11] = 1855 v[698,11] = 1057 v[699,11] = 1703 v[700,11] = 1761 v[701,11] = 2515 v[702,11] = 801 v[703,11] = 1205 v[704,11] = 1311 v[705,11] = 473 v[706,11] = 3963 v[707,11] = 697 v[708,11] = 1221 v[709,11] = 251 v[710,11] = 381 v[711,11] = 3887 v[712,11] = 1761 v[713,11] = 3093 v[714,11] = 3721 v[715,11] = 2079 v[716,11] = 4085 v[717,11] = 379 v[718,11] = 3601 v[719,11] = 3845 v[720,11] = 433 v[721,11] = 1781 v[722,11] = 29 v[723,11] = 1897 v[724,11] = 1599 v[725,11] = 2163 v[726,11] = 75 v[727,11] = 3475 v[728,11] = 3957 v[729,11] = 1641 v[730,11] = 3911 v[731,11] = 2959 v[732,11] = 2833 v[733,11] = 1279 v[734,11] = 1099 v[735,11] = 403 v[736,11] = 799 v[737,11] = 2183 v[738,11] = 2699 v[739,11] = 1711 v[740,11] = 2037 v[741,11] = 727 v[742,11] = 289 v[743,11] = 1785 v[744,11] = 1575 v[745,11] = 3633 v[746,11] = 2367 v[747,11] = 1261 v[748,11] = 3953 v[749,11] = 1735 v[750,11] = 171 v[751,11] = 1959 v[752,11] = 2867 v[753,11] = 859 v[754,11] = 2951 v[755,11] = 3211 v[756,11] = 15 v[757,11] = 1279 v[758,11] = 1323 v[759,11] = 599 v[760,11] = 1651 v[761,11] = 3951 v[762,11] = 1011 v[763,11] = 315 v[764,11] = 3513 v[765,11] = 3351 v[766,11] = 1725 v[767,11] = 3793 v[768,11] = 2399 v[769,11] = 287 v[770,11] = 4017 v[771,11] = 3571 v[772,11] = 1007 v[773,11] = 541 v[774,11] = 3115 v[775,11] = 429 v[776,11] = 1585 v[777,11] = 1285 v[778,11] = 755 v[779,11] = 1211 v[780,11] = 3047 v[781,11] = 915 v[782,11] = 3611 v[783,11] = 2697 v[784,11] = 2129 v[785,11] = 3669 v[786,11] = 81 v[787,11] = 3939 v[788,11] = 2437 v[789,11] = 915 v[790,11] = 779 v[791,11] = 3567 v[792,11] = 3701 v[793,11] = 2479 v[794,11] = 3807 v[795,11] = 1893 v[796,11] = 3927 v[797,11] = 2619 v[798,11] = 2543 v[799,11] = 3633 v[800,11] = 2007 v[801,11] = 3857 v[802,11] = 3837 v[803,11] = 487 v[804,11] = 1769 v[805,11] = 3759 v[806,11] = 3105 v[807,11] = 2727 v[808,11] = 3155 v[809,11] = 2479 v[810,11] = 1341 v[811,11] = 1657 v[812,11] = 2767 v[813,11] = 2541 v[814,11] = 577 v[815,11] = 2105 v[816,11] = 799 v[817,11] = 17 v[818,11] = 2871 v[819,11] = 3637 v[820,11] = 953 v[821,11] = 65 v[822,11] = 69 v[823,11] = 2897 v[824,11] = 3841 v[825,11] = 3559 v[826,11] = 4067 v[827,11] = 2335 v[828,11] = 3409 v[829,11] = 1087 v[830,11] = 425 v[831,11] = 2813 v[832,11] = 1705 v[833,11] = 1701 v[834,11] = 1237 v[835,11] = 821 v[836,11] = 1375 v[837,11] = 3673 v[838,11] = 2693 v[839,11] = 3925 v[840,11] = 1541 v[841,11] = 1871 v[842,11] = 2285 v[843,11] = 847 v[844,11] = 4035 v[845,11] = 1101 v[846,11] = 2029 v[847,11] = 855 v[848,11] = 2733 v[849,11] = 2503 v[850,11] = 121 v[851,11] = 2855 v[852,11] = 1069 v[853,11] = 3463 v[854,11] = 3505 v[855,11] = 1539 v[856,11] = 607 v[857,11] = 1349 v[858,11] = 575 v[859,11] = 2301 v[860,11] = 2321 v[861,11] = 1101 v[862,11] = 333 v[863,11] = 291 v[864,11] = 2171 v[865,11] = 4085 v[866,11] = 2173 v[867,11] = 2541 v[868,11] = 1195 v[869,11] = 925 v[870,11] = 4039 v[871,11] = 1379 v[872,11] = 699 v[873,11] = 1979 v[874,11] = 275 v[875,11] = 953 v[876,11] = 1755 v[877,11] = 1643 v[878,11] = 325 v[879,11] = 101 v[880,11] = 2263 v[881,11] = 3329 v[882,11] = 3673 v[883,11] = 3413 v[884,11] = 1977 v[885,11] = 2727 v[886,11] = 2313 v[887,11] = 1419 v[888,11] = 887 v[889,11] = 609 v[890,11] = 2475 v[891,11] = 591 v[892,11] = 2613 v[893,11] = 2081 v[894,11] = 3805 v[895,11] = 3435 v[896,11] = 2409 v[897,11] = 111 v[898,11] = 3557 v[899,11] = 3607 v[900,11] = 903 v[901,11] = 231 v[902,11] = 3059 v[903,11] = 473 v[904,11] = 2959 v[905,11] = 2925 v[906,11] = 3861 v[907,11] = 2043 v[908,11] = 3887 v[909,11] = 351 v[910,11] = 2865 v[911,11] = 369 v[912,11] = 1377 v[913,11] = 2639 v[914,11] = 1261 v[915,11] = 3625 v[916,11] = 3279 v[917,11] = 2201 v[918,11] = 2949 v[919,11] = 3049 v[920,11] = 449 v[921,11] = 1297 v[922,11] = 897 v[923,11] = 1891 v[924,11] = 411 v[925,11] = 2773 v[926,11] = 749 v[927,11] = 2753 v[928,11] = 1825 v[929,11] = 853 v[930,11] = 2775 v[931,11] = 3547 v[932,11] = 3923 v[933,11] = 3923 v[934,11] = 987 v[935,11] = 3723 v[936,11] = 2189 v[937,11] = 3877 v[938,11] = 3577 v[939,11] = 297 v[940,11] = 2763 v[941,11] = 1845 v[942,11] = 3083 v[943,11] = 2951 v[944,11] = 483 v[945,11] = 2169 v[946,11] = 3985 v[947,11] = 245 v[948,11] = 3655 v[949,11] = 3441 v[950,11] = 1023 v[951,11] = 235 v[952,11] = 835 v[953,11] = 3693 v[954,11] = 3585 v[955,11] = 327 v[956,11] = 1003 v[957,11] = 543 v[958,11] = 3059 v[959,11] = 2637 v[960,11] = 2923 v[961,11] = 87 v[962,11] = 3617 v[963,11] = 1031 v[964,11] = 1043 v[965,11] = 903 v[966,11] = 2913 v[967,11] = 2177 v[968,11] = 2641 v[969,11] = 3279 v[970,11] = 389 v[971,11] = 2009 v[972,11] = 525 v[973,11] = 4085 v[974,11] = 3299 v[975,11] = 987 v[976,11] = 2409 v[977,11] = 813 v[978,11] = 2683 v[979,11] = 373 v[980,11] = 2695 v[981,11] = 3775 v[982,11] = 2375 v[983,11] = 1119 v[984,11] = 2791 v[985,11] = 223 v[986,11] = 325 v[987,11] = 587 v[988,11] = 1379 v[989,11] = 2877 v[990,11] = 2867 v[991,11] = 3793 v[992,11] = 655 v[993,11] = 831 v[994,11] = 3425 v[995,11] = 1663 v[996,11] = 1681 v[997,11] = 2657 v[998,11] = 1865 v[999,11] = 3943 v[1000,11] = 2977 v[1001,11] = 1979 v[1002,11] = 2271 v[1003,11] = 3247 v[1004,11] = 1267 v[1005,11] = 1747 v[1006,11] = 811 v[1007,11] = 159 v[1008,11] = 429 v[1009,11] = 2001 v[1010,11] = 1195 v[1011,11] = 3065 v[1012,11] = 553 v[1013,11] = 1499 v[1014,11] = 3529 v[1015,11] = 1081 v[1016,11] = 2877 v[1017,11] = 3077 v[1018,11] = 845 v[1019,11] = 1793 v[1020,11] = 2409 v[1021,11] = 3995 v[1022,11] = 2559 v[1023,11] = 4081 v[1024,11] = 1195 v[1025,11] = 2955 v[1026,11] = 1117 v[1027,11] = 1409 v[1028,11] = 785 v[1029,11] = 287 v[1030,11] = 1521 v[1031,11] = 1607 v[1032,11] = 85 v[1033,11] = 3055 v[1034,11] = 3123 v[1035,11] = 2533 v[1036,11] = 2329 v[1037,11] = 3477 v[1038,11] = 799 v[1039,11] = 3683 v[1040,11] = 3715 v[1041,11] = 337 v[1042,11] = 3139 v[1043,11] = 3311 v[1044,11] = 431 v[1045,11] = 3511 v[1046,11] = 2299 v[1047,11] = 365 v[1048,11] = 2941 v[1049,11] = 3067 v[1050,11] = 1331 v[1051,11] = 1081 v[1052,11] = 1097 v[1053,11] = 2853 v[1054,11] = 2299 v[1055,11] = 495 v[1056,11] = 1745 v[1057,11] = 749 v[1058,11] = 3819 v[1059,11] = 619 v[1060,11] = 1059 v[1061,11] = 3559 v[1062,11] = 183 v[1063,11] = 3743 v[1064,11] = 723 v[1065,11] = 949 v[1066,11] = 3501 v[1067,11] = 733 v[1068,11] = 2599 v[1069,11] = 3983 v[1070,11] = 3961 v[1071,11] = 911 v[1072,11] = 1899 v[1073,11] = 985 v[1074,11] = 2493 v[1075,11] = 1795 v[1076,11] = 653 v[1077,11] = 157 v[1078,11] = 433 v[1079,11] = 2361 v[1080,11] = 3093 v[1081,11] = 3119 v[1082,11] = 3679 v[1083,11] = 2367 v[1084,11] = 1701 v[1085,11] = 1445 v[1086,11] = 1321 v[1087,11] = 2397 v[1088,11] = 1241 v[1089,11] = 3305 v[1090,11] = 3985 v[1091,11] = 2349 v[1092,11] = 4067 v[1093,11] = 3805 v[1094,11] = 3073 v[1095,11] = 2837 v[1096,11] = 1567 v[1097,11] = 3783 v[1098,11] = 451 v[1099,11] = 2441 v[1100,11] = 1181 v[1101,11] = 487 v[1102,11] = 543 v[1103,11] = 1201 v[1104,11] = 3735 v[1105,11] = 2517 v[1106,11] = 733 v[1107,11] = 1535 v[1108,11] = 2175 v[1109,11] = 3613 v[1110,11] = 3019 v[481,12] = 2319 v[482,12] = 653 v[483,12] = 1379 v[484,12] = 1675 v[485,12] = 1951 v[486,12] = 7075 v[487,12] = 2087 v[488,12] = 7147 v[489,12] = 1427 v[490,12] = 893 v[491,12] = 171 v[492,12] = 2019 v[493,12] = 7235 v[494,12] = 5697 v[495,12] = 3615 v[496,12] = 1961 v[497,12] = 7517 v[498,12] = 6849 v[499,12] = 2893 v[500,12] = 1883 v[501,12] = 2863 v[502,12] = 2173 v[503,12] = 4543 v[504,12] = 73 v[505,12] = 381 v[506,12] = 3893 v[507,12] = 6045 v[508,12] = 1643 v[509,12] = 7669 v[510,12] = 1027 v[511,12] = 1549 v[512,12] = 3983 v[513,12] = 1985 v[514,12] = 6589 v[515,12] = 7497 v[516,12] = 2745 v[517,12] = 2375 v[518,12] = 7047 v[519,12] = 1117 v[520,12] = 1171 v[521,12] = 1975 v[522,12] = 5199 v[523,12] = 3915 v[524,12] = 3695 v[525,12] = 8113 v[526,12] = 4303 v[527,12] = 3773 v[528,12] = 7705 v[529,12] = 6855 v[530,12] = 1675 v[531,12] = 2245 v[532,12] = 2817 v[533,12] = 1719 v[534,12] = 569 v[535,12] = 1021 v[536,12] = 2077 v[537,12] = 5945 v[538,12] = 1833 v[539,12] = 2631 v[540,12] = 4851 v[541,12] = 6371 v[542,12] = 833 v[543,12] = 7987 v[544,12] = 331 v[545,12] = 1899 v[546,12] = 8093 v[547,12] = 6719 v[548,12] = 6903 v[549,12] = 5903 v[550,12] = 5657 v[551,12] = 5007 v[552,12] = 2689 v[553,12] = 6637 v[554,12] = 2675 v[555,12] = 1645 v[556,12] = 1819 v[557,12] = 689 v[558,12] = 6709 v[559,12] = 7717 v[560,12] = 6295 v[561,12] = 7013 v[562,12] = 7695 v[563,12] = 3705 v[564,12] = 7069 v[565,12] = 2621 v[566,12] = 3631 v[567,12] = 6571 v[568,12] = 6259 v[569,12] = 7261 v[570,12] = 3397 v[571,12] = 7645 v[572,12] = 1115 v[573,12] = 4753 v[574,12] = 2047 v[575,12] = 7579 v[576,12] = 2271 v[577,12] = 5403 v[578,12] = 4911 v[579,12] = 7629 v[580,12] = 4225 v[581,12] = 1209 v[582,12] = 6955 v[583,12] = 6951 v[584,12] = 1829 v[585,12] = 5579 v[586,12] = 5231 v[587,12] = 1783 v[588,12] = 4285 v[589,12] = 7425 v[590,12] = 599 v[591,12] = 5785 v[592,12] = 3275 v[593,12] = 5643 v[594,12] = 2263 v[595,12] = 657 v[596,12] = 6769 v[597,12] = 6261 v[598,12] = 1251 v[599,12] = 3249 v[600,12] = 4447 v[601,12] = 4111 v[602,12] = 3991 v[603,12] = 1215 v[604,12] = 131 v[605,12] = 4397 v[606,12] = 3487 v[607,12] = 7585 v[608,12] = 5565 v[609,12] = 7199 v[610,12] = 3573 v[611,12] = 7105 v[612,12] = 7409 v[613,12] = 1671 v[614,12] = 949 v[615,12] = 3889 v[616,12] = 5971 v[617,12] = 3333 v[618,12] = 225 v[619,12] = 3647 v[620,12] = 5403 v[621,12] = 3409 v[622,12] = 7459 v[623,12] = 6879 v[624,12] = 5789 v[625,12] = 6567 v[626,12] = 5581 v[627,12] = 4919 v[628,12] = 1927 v[629,12] = 4407 v[630,12] = 8085 v[631,12] = 4691 v[632,12] = 611 v[633,12] = 3005 v[634,12] = 591 v[635,12] = 753 v[636,12] = 589 v[637,12] = 171 v[638,12] = 5729 v[639,12] = 5891 v[640,12] = 1033 v[641,12] = 3049 v[642,12] = 6567 v[643,12] = 5257 v[644,12] = 8003 v[645,12] = 1757 v[646,12] = 4489 v[647,12] = 4923 v[648,12] = 6379 v[649,12] = 5171 v[650,12] = 1757 v[651,12] = 689 v[652,12] = 3081 v[653,12] = 1389 v[654,12] = 4113 v[655,12] = 455 v[656,12] = 2761 v[657,12] = 847 v[658,12] = 7575 v[659,12] = 5829 v[660,12] = 633 v[661,12] = 6629 v[662,12] = 1103 v[663,12] = 7635 v[664,12] = 803 v[665,12] = 6175 v[666,12] = 6587 v[667,12] = 2711 v[668,12] = 3879 v[669,12] = 67 v[670,12] = 1179 v[671,12] = 4761 v[672,12] = 7281 v[673,12] = 1557 v[674,12] = 3379 v[675,12] = 2459 v[676,12] = 4273 v[677,12] = 4127 v[678,12] = 7147 v[679,12] = 35 v[680,12] = 3549 v[681,12] = 395 v[682,12] = 3735 v[683,12] = 5787 v[684,12] = 4179 v[685,12] = 5889 v[686,12] = 5057 v[687,12] = 7473 v[688,12] = 4713 v[689,12] = 2133 v[690,12] = 2897 v[691,12] = 1841 v[692,12] = 2125 v[693,12] = 1029 v[694,12] = 1695 v[695,12] = 6523 v[696,12] = 1143 v[697,12] = 5105 v[698,12] = 7133 v[699,12] = 3351 v[700,12] = 2775 v[701,12] = 3971 v[702,12] = 4503 v[703,12] = 7589 v[704,12] = 5155 v[705,12] = 4305 v[706,12] = 1641 v[707,12] = 4717 v[708,12] = 2427 v[709,12] = 5617 v[710,12] = 1267 v[711,12] = 399 v[712,12] = 5831 v[713,12] = 4305 v[714,12] = 4241 v[715,12] = 3395 v[716,12] = 3045 v[717,12] = 4899 v[718,12] = 1713 v[719,12] = 171 v[720,12] = 411 v[721,12] = 7099 v[722,12] = 5473 v[723,12] = 5209 v[724,12] = 1195 v[725,12] = 1077 v[726,12] = 1309 v[727,12] = 2953 v[728,12] = 7343 v[729,12] = 4887 v[730,12] = 3229 v[731,12] = 6759 v[732,12] = 6721 v[733,12] = 6775 v[734,12] = 675 v[735,12] = 4039 v[736,12] = 2493 v[737,12] = 7511 v[738,12] = 3269 v[739,12] = 4199 v[740,12] = 6625 v[741,12] = 7943 v[742,12] = 2013 v[743,12] = 4145 v[744,12] = 667 v[745,12] = 513 v[746,12] = 2303 v[747,12] = 4591 v[748,12] = 7941 v[749,12] = 2741 v[750,12] = 987 v[751,12] = 8061 v[752,12] = 3161 v[753,12] = 5951 v[754,12] = 1431 v[755,12] = 831 v[756,12] = 5559 v[757,12] = 7405 v[758,12] = 1357 v[759,12] = 4319 v[760,12] = 4235 v[761,12] = 5421 v[762,12] = 2559 v[763,12] = 4415 v[764,12] = 2439 v[765,12] = 823 v[766,12] = 1725 v[767,12] = 6219 v[768,12] = 4903 v[769,12] = 6699 v[770,12] = 5451 v[771,12] = 349 v[772,12] = 7703 v[773,12] = 2927 v[774,12] = 7809 v[775,12] = 6179 v[776,12] = 1417 v[777,12] = 5987 v[778,12] = 3017 v[779,12] = 4983 v[780,12] = 3479 v[781,12] = 4525 v[782,12] = 4643 v[783,12] = 4911 v[784,12] = 227 v[785,12] = 5475 v[786,12] = 2287 v[787,12] = 5581 v[788,12] = 6817 v[789,12] = 1937 v[790,12] = 1421 v[791,12] = 4415 v[792,12] = 7977 v[793,12] = 1789 v[794,12] = 3907 v[795,12] = 6815 v[796,12] = 6789 v[797,12] = 6003 v[798,12] = 5609 v[799,12] = 4507 v[800,12] = 337 v[801,12] = 7427 v[802,12] = 7943 v[803,12] = 3075 v[804,12] = 6427 v[805,12] = 1019 v[806,12] = 7121 v[807,12] = 4763 v[808,12] = 81 v[809,12] = 3587 v[810,12] = 2929 v[811,12] = 1795 v[812,12] = 8067 v[813,12] = 2415 v[814,12] = 1265 v[815,12] = 4025 v[816,12] = 5599 v[817,12] = 4771 v[818,12] = 3025 v[819,12] = 2313 v[820,12] = 6129 v[821,12] = 7611 v[822,12] = 6881 v[823,12] = 5253 v[824,12] = 4413 v[825,12] = 7869 v[826,12] = 105 v[827,12] = 3173 v[828,12] = 1629 v[829,12] = 2537 v[830,12] = 1023 v[831,12] = 4409 v[832,12] = 7209 v[833,12] = 4413 v[834,12] = 7107 v[835,12] = 7469 v[836,12] = 33 v[837,12] = 1955 v[838,12] = 2881 v[839,12] = 5167 v[840,12] = 6451 v[841,12] = 4211 v[842,12] = 179 v[843,12] = 5573 v[844,12] = 7879 v[845,12] = 3387 v[846,12] = 7759 v[847,12] = 5455 v[848,12] = 7157 v[849,12] = 1891 v[850,12] = 5683 v[851,12] = 5689 v[852,12] = 6535 v[853,12] = 3109 v[854,12] = 6555 v[855,12] = 6873 v[856,12] = 1249 v[857,12] = 4251 v[858,12] = 6437 v[859,12] = 49 v[860,12] = 2745 v[861,12] = 1201 v[862,12] = 7327 v[863,12] = 4179 v[864,12] = 6783 v[865,12] = 623 v[866,12] = 2779 v[867,12] = 5963 v[868,12] = 2585 v[869,12] = 6927 v[870,12] = 5333 v[871,12] = 4033 v[872,12] = 285 v[873,12] = 7467 v[874,12] = 4443 v[875,12] = 4917 v[876,12] = 3 v[877,12] = 4319 v[878,12] = 5517 v[879,12] = 3449 v[880,12] = 813 v[881,12] = 5499 v[882,12] = 2515 v[883,12] = 5771 v[884,12] = 3357 v[885,12] = 2073 v[886,12] = 4395 v[887,12] = 4925 v[888,12] = 2643 v[889,12] = 7215 v[890,12] = 5817 v[891,12] = 1199 v[892,12] = 1597 v[893,12] = 1619 v[894,12] = 7535 v[895,12] = 4833 v[896,12] = 609 v[897,12] = 4797 v[898,12] = 8171 v[899,12] = 6847 v[900,12] = 793 v[901,12] = 6757 v[902,12] = 8165 v[903,12] = 3371 v[904,12] = 2431 v[905,12] = 5235 v[906,12] = 4739 v[907,12] = 7703 v[908,12] = 7223 v[909,12] = 6525 v[910,12] = 5891 v[911,12] = 5605 v[912,12] = 4433 v[913,12] = 3533 v[914,12] = 5267 v[915,12] = 5125 v[916,12] = 5037 v[917,12] = 225 v[918,12] = 6717 v[919,12] = 1121 v[920,12] = 5741 v[921,12] = 2013 v[922,12] = 4327 v[923,12] = 4839 v[924,12] = 569 v[925,12] = 5227 v[926,12] = 7677 v[927,12] = 4315 v[928,12] = 2391 v[929,12] = 5551 v[930,12] = 859 v[931,12] = 3627 v[932,12] = 6377 v[933,12] = 3903 v[934,12] = 4311 v[935,12] = 6527 v[936,12] = 7573 v[937,12] = 4905 v[938,12] = 7731 v[939,12] = 1909 v[940,12] = 1555 v[941,12] = 3279 v[942,12] = 1949 v[943,12] = 1887 v[944,12] = 6675 v[945,12] = 5509 v[946,12] = 2033 v[947,12] = 5473 v[948,12] = 3539 v[949,12] = 5033 v[950,12] = 5935 v[951,12] = 6095 v[952,12] = 4761 v[953,12] = 1771 v[954,12] = 1271 v[955,12] = 1717 v[956,12] = 4415 v[957,12] = 5083 v[958,12] = 6277 v[959,12] = 3147 v[960,12] = 7695 v[961,12] = 2461 v[962,12] = 4783 v[963,12] = 4539 v[964,12] = 5833 v[965,12] = 5583 v[966,12] = 651 v[967,12] = 1419 v[968,12] = 2605 v[969,12] = 5511 v[970,12] = 3913 v[971,12] = 5795 v[972,12] = 2333 v[973,12] = 2329 v[974,12] = 4431 v[975,12] = 3725 v[976,12] = 6069 v[977,12] = 2699 v[978,12] = 7055 v[979,12] = 6879 v[980,12] = 1017 v[981,12] = 3121 v[982,12] = 2547 v[983,12] = 4603 v[984,12] = 2385 v[985,12] = 6915 v[986,12] = 6103 v[987,12] = 5669 v[988,12] = 7833 v[989,12] = 2001 v[990,12] = 4287 v[991,12] = 6619 v[992,12] = 955 v[993,12] = 2761 v[994,12] = 5711 v[995,12] = 6291 v[996,12] = 3415 v[997,12] = 3909 v[998,12] = 2841 v[999,12] = 5627 v[1000,12] = 4939 v[1001,12] = 7671 v[1002,12] = 6059 v[1003,12] = 6275 v[1004,12] = 6517 v[1005,12] = 1931 v[1006,12] = 4583 v[1007,12] = 7301 v[1008,12] = 1267 v[1009,12] = 7509 v[1010,12] = 1435 v[1011,12] = 2169 v[1012,12] = 6939 v[1013,12] = 3515 v[1014,12] = 2985 v[1015,12] = 2787 v[1016,12] = 2123 v[1017,12] = 1969 v[1018,12] = 3307 v[1019,12] = 353 v[1020,12] = 4359 v[1021,12] = 7059 v[1022,12] = 5273 v[1023,12] = 5873 v[1024,12] = 6657 v[1025,12] = 6765 v[1026,12] = 6229 v[1027,12] = 3179 v[1028,12] = 1583 v[1029,12] = 6237 v[1030,12] = 2155 v[1031,12] = 371 v[1032,12] = 273 v[1033,12] = 7491 v[1034,12] = 3309 v[1035,12] = 6805 v[1036,12] = 3015 v[1037,12] = 6831 v[1038,12] = 7819 v[1039,12] = 713 v[1040,12] = 4747 v[1041,12] = 3935 v[1042,12] = 4109 v[1043,12] = 1311 v[1044,12] = 709 v[1045,12] = 3089 v[1046,12] = 7059 v[1047,12] = 4247 v[1048,12] = 2989 v[1049,12] = 1509 v[1050,12] = 4919 v[1051,12] = 1841 v[1052,12] = 3045 v[1053,12] = 3821 v[1054,12] = 6929 v[1055,12] = 4655 v[1056,12] = 1333 v[1057,12] = 6429 v[1058,12] = 6649 v[1059,12] = 2131 v[1060,12] = 5265 v[1061,12] = 1051 v[1062,12] = 261 v[1063,12] = 8057 v[1064,12] = 3379 v[1065,12] = 2179 v[1066,12] = 1993 v[1067,12] = 5655 v[1068,12] = 3063 v[1069,12] = 6381 v[1070,12] = 3587 v[1071,12] = 7417 v[1072,12] = 1579 v[1073,12] = 1541 v[1074,12] = 2107 v[1075,12] = 5085 v[1076,12] = 2873 v[1077,12] = 6141 v[1078,12] = 955 v[1079,12] = 3537 v[1080,12] = 2157 v[1081,12] = 841 v[1082,12] = 1999 v[1083,12] = 1465 v[1084,12] = 5171 v[1085,12] = 5651 v[1086,12] = 1535 v[1087,12] = 7235 v[1088,12] = 4349 v[1089,12] = 1263 v[1090,12] = 1453 v[1091,12] = 1005 v[1092,12] = 6893 v[1093,12] = 2919 v[1094,12] = 1947 v[1095,12] = 1635 v[1096,12] = 3963 v[1097,12] = 397 v[1098,12] = 969 v[1099,12] = 4569 v[1100,12] = 655 v[1101,12] = 6737 v[1102,12] = 2995 v[1103,12] = 7235 v[1104,12] = 7713 v[1105,12] = 973 v[1106,12] = 4821 v[1107,12] = 2377 v[1108,12] = 1673 v[1109,12] = 1 v[1110,12] = 6541 # v[0:40,0] = transpose([ \ # 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ # 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ # 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ # 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ]) # v[2:40,1] = transpose([ \ # 1, 3, 1, 3, 1, 3, 3, 1, \ # 3, 1, 3, 1, 3, 1, 1, 3, 1, 3, \ # 1, 3, 1, 3, 3, 1, 3, 1, 3, 1, \ # 3, 1, 1, 3, 1, 3, 1, 3, 1, 3 ]) # v[3:40,2] = transpose([ \ # 7, 5, 1, 3, 3, 7, 5, \ # 5, 7, 7, 1, 3, 3, 7, 5, 1, 1, \ # 5, 3, 3, 1, 7, 5, 1, 3, 3, 7, \ # 5, 1, 1, 5, 7, 7, 5, 1, 3, 3 ]) # v[5:40,3] = transpose([ \ # 1, 7, 9,13,11, \ # 1, 3, 7, 9, 5,13,13,11, 3,15, \ # 5, 3,15, 7, 9,13, 9, 1,11, 7, \ # 5,15, 1,15,11, 5, 3, 1, 7, 9 ]) # v[7:40,4] = transpose([ \ # 9, 3,27, \ # 15,29,21,23,19,11,25, 7,13,17, \ # 1,25,29, 3,31,11, 5,23,27,19, \ # 21, 5, 1,17,13, 7,15, 9,31, 9 ]) # v[13:40,5] = transpose([ \ # 37,33, 7, 5,11,39,63, \ # 27,17,15,23,29, 3,21,13,31,25, \ # 9,49,33,19,29,11,19,27,15,25 ]) # v[19:40,6] = transpose([ \ # 13, \ # 33,115, 41, 79, 17, 29,119, 75, 73,105, \ # 7, 59, 65, 21, 3,113, 61, 89, 45,107 ]) # v[37:40,7] = transpose([ \ # 7, 23, 39 ]) # # Set POLY. # poly= [ \ 1, 3, 7, 11, 13, 19, 25, 37, 59, 47, \ 61, 55, 41, 67, 97, 91, 109, 103, 115, 131, \ 193, 137, 145, 143, 241, 157, 185, 167, 229, 171, \ 213, 191, 253, 203, 211, 239, 247, 285, 369, 299 ] poly = [\ 1, 3, 7, 11, 13, 19, 25, 37, 59, 47, 61, 55, 41, 67, 97, 91, 109, 103, 115, 131, 193, 137, 145, 143, 241, 157, 185, 167, 229, 171, 213, 191, 253, 203, 211, 239, 247, 285, 369, 299, 301, 333, 351, 355, 357, 361, 391, 397, 425, 451, 463, 487, 501, 529, 539, 545, 557, 563, 601, 607, 617, 623, 631, 637, 647, 661, 675, 677, 687, 695, 701, 719, 721, 731, 757, 761, 787, 789, 799, 803, 817, 827, 847, 859, 865, 875, 877, 883, 895, 901, 911, 949, 953, 967, 971, 973, 981, 985, 995, 1001, 1019, 1033, 1051, 1063, 1069, 1125, 1135, 1153, 1163, 1221, 1239, 1255, 1267, 1279, 1293, 1305, 1315, 1329, 1341, 1347, 1367, 1387, 1413, 1423, 1431, 1441, 1479, 1509, 1527, 1531, 1555, 1557, 1573, 1591, 1603, 1615, 1627, 1657, 1663, 1673, 1717, 1729, 1747, 1759, 1789, 1815, 1821, 1825, 1849, 1863, 1869, 1877, 1881, 1891, 1917, 1933, 1939, 1969, 2011, 2035, 2041, 2053, 2071, 2091, 2093, 2119, 2147, 2149, 2161, 2171, 2189, 2197, 2207, 2217, 2225, 2255, 2257, 2273, 2279, 2283, 2293, 2317, 2323, 2341, 2345, 2363, 2365, 2373, 2377, 2385, 2395, 2419, 2421, 2431, 2435, 2447, 2475, 2477, 2489, 2503, 2521, 2533, 2551, 2561, 2567, 2579, 2581, 2601, 2633, 2657, 2669, 2681, 2687, 2693, 2705, 2717, 2727, 2731, 2739, 2741, 2773, 2783, 2793, 2799, 2801, 2811, 2819, 2825, 2833, 2867, 2879, 2881, 2891, 2905, 2911, 2917, 2927, 2941, 2951, 2955, 2963, 2965, 2991, 2999, 3005, 3017, 3035, 3037, 3047, 3053, 3083, 3085, 3097, 3103, 3159, 3169, 3179, 3187, 3205, 3209, 3223, 3227, 3229, 3251, 3263, 3271, 3277, 3283, 3285, 3299, 3305, 3319, 3331, 3343, 3357, 3367, 3373, 3393, 3399, 3413, 3417, 3427, 3439, 3441, 3475, 3487, 3497, 3515, 3517, 3529, 3543, 3547, 3553, 3559, 3573, 3589, 3613, 3617, 3623, 3627, 3635, 3641, 3655, 3659, 3669, 3679, 3697, 3707, 3709, 3713, 3731, 3743, 3747, 3771, 3791, 3805, 3827, 3833, 3851, 3865, 3889, 3895, 3933, 3947, 3949, 3957, 3971, 3985, 3991, 3995, 4007, 4013, 4021, 4045, 4051, 4069, 4073, 4179, 4201, 4219, 4221, 4249, 4305, 4331, 4359, 4383, 4387, 4411, 4431, 4439, 4449, 4459, 4485, 4531, 4569, 4575, 4621, 4663, 4669, 4711, 4723, 4735, 4793, 4801, 4811, 4879, 4893, 4897, 4921, 4927, 4941, 4977, 5017, 5027, 5033, 5127, 5169, 5175, 5199, 5213, 5223, 5237, 5287, 5293, 5331, 5391, 5405, 5453, 5523, 5573, 5591, 5597, 5611, 5641, 5703, 5717, 5721, 5797, 5821, 5909, 5913, 5955, 5957, 6005, 6025, 6061, 6067, 6079, 6081, 6231, 6237, 6289, 6295, 6329, 6383, 6427, 6453, 6465, 6501, 6523, 6539, 6577, 6589, 6601, 6607, 6631, 6683, 6699, 6707, 6761, 6795, 6865, 6881, 6901, 6923, 6931, 6943, 6999, 7057, 7079, 7103, 7105, 7123, 7173, 7185, 7191, 7207, 7245, 7303, 7327, 7333, 7355, 7365, 7369, 7375, 7411, 7431, 7459, 7491, 7505, 7515, 7541, 7557, 7561, 7701, 7705, 7727, 7749, 7761, 7783, 7795, 7823, 7907, 7953, 7963, 7975, 8049, 8089, 8123, 8125, 8137, 8219, 8231, 8245, 8275, 8293, 8303, 8331, 8333, 8351, 8357, 8367, 8379, 8381, 8387, 8393, 8417, 8435, 8461, 8469, 8489, 8495, 8507, 8515, 8551, 8555, 8569, 8585, 8599, 8605, 8639, 8641, 8647, 8653, 8671, 8675, 8689, 8699, 8729, 8741, 8759, 8765, 8771, 8795, 8797, 8825, 8831, 8841, 8855, 8859, 8883, 8895, 8909, 8943, 8951, 8955, 8965, 8999, 9003, 9031, 9045, 9049, 9071, 9073, 9085, 9095, 9101, 9109, 9123, 9129, 9137, 9143, 9147, 9185, 9197, 9209, 9227, 9235, 9247, 9253, 9257, 9277, 9297, 9303, 9313, 9325, 9343, 9347, 9371, 9373, 9397, 9407, 9409, 9415, 9419, 9443, 9481, 9495, 9501, 9505, 9517, 9529, 9555, 9557, 9571, 9585, 9591, 9607, 9611, 9621, 9625, 9631, 9647, 9661, 9669, 9679, 9687, 9707, 9731, 9733, 9745, 9773, 9791, 9803, 9811, 9817, 9833, 9847, 9851, 9863, 9875, 9881, 9905, 9911, 9917, 9923, 9963, 9973,10003,10025, 10043,10063,10071,10077,10091,10099,10105,10115,10129,10145, 10169,10183,10187,10207,10223,10225,10247,10265,10271,10275, 10289,10299,10301,10309,10343,10357,10373,10411,10413,10431, 10445,10453,10463,10467,10473,10491,10505,10511,10513,10523, 10539,10549,10559,10561,10571,10581,10615,10621,10625,10643, 10655,10671,10679,10685,10691,10711,10739,10741,10755,10767, 10781,10785,10803,10805,10829,10857,10863,10865,10875,10877, 10917,10921,10929,10949,10967,10971,10987,10995,11009,11029, 11043,11045,11055,11063,11075,11081,11117,11135,11141,11159, 11163,11181,11187,11225,11237,11261,11279,11297,11307,11309, 11327,11329,11341,11377,11403,11405,11413,11427,11439,11453, 11461,11473,11479,11489,11495,11499,11533,11545,11561,11567, 11575,11579,11589,11611,11623,11637,11657,11663,11687,11691, 11701,11747,11761,11773,11783,11795,11797,11817,11849,11855, 11867,11869,11873,11883,11919,11921,11927,11933,11947,11955, 11961,11999,12027,12029,12037,12041,12049,12055,12095,12097, 12107,12109,12121,12127,12133,12137,12181,12197,12207,12209, 12239,12253,12263,12269,12277,12287,12295,12309,12313,12335, 12361,12367,12391,12409,12415,12433,12449,12469,12479,12481, 12499,12505,12517,12527,12549,12559,12597,12615,12621,12639, 12643,12657,12667,12707,12713,12727,12741,12745,12763,12769, 12779,12781,12787,12799,12809,12815,12829,12839,12857,12875, 12883,12889,12901,12929,12947,12953,12959,12969,12983,12987, 12995,13015,13019,13031,13063,13077,13103,13137,13149,13173, 13207,13211,13227,13241,13249,13255,13269,13283,13285,13303, 13307,13321,13339,13351,13377,13389,13407,13417,13431,13435, 13447,13459,13465,13477,13501,13513,13531,13543,13561,13581, 13599,13605,13617,13623,13637,13647,13661,13677,13683,13695, 13725,13729,13753,13773,13781,13785,13795,13801,13807,13825, 13835,13855,13861,13871,13883,13897,13905,13915,13939,13941, 13969,13979,13981,13997,14027,14035,14037,14051,14063,14085, 14095,14107,14113,14125,14137,14145,14151,14163,14193,14199, 14219,14229,14233,14243,14277,14287,14289,14295,14301,14305, 14323,14339,14341,14359,14365,14375,14387,14411,14425,14441, 14449,14499,14513,14523,14537,14543,14561,14579,14585,14593, 14599,14603,14611,14641,14671,14695,14701,14723,14725,14743, 14753,14759,14765,14795,14797,14803,14831,14839,14845,14855, 14889,14895,14909,14929,14941,14945,14951,14963,14965,14985, 15033,15039,15053,15059,15061,15071,15077,15081,15099,15121, 15147,15149,15157,15167,15187,15193,15203,15205,15215,15217, 15223,15243,15257,15269,15273,15287,15291,15313,15335,15347, 15359,15373,15379,15381,15391,15395,15397,15419,15439,15453, 15469,15491,15503,15517,15527,15531,15545,15559,15593,15611, 15613,15619,15639,15643,15649,15661,15667,15669,15681,15693, 15717,15721,15741,15745,15765,15793,15799,15811,15825,15835, 15847,15851,15865,15877,15881,15887,15899,15915,15935,15937, 15955,15973,15977,16011,16035,16061,16069,16087,16093,16097, 16121,16141,16153,16159,16165,16183,16189,16195,16197,16201, 16209,16215,16225,16259,16265,16273,16299,16309,16355,16375, 16381] atmost = 2**log_max - 1 # # Find the number of bits in ATMOST. # maxcol = i4_bit_hi1 ( atmost ) # # Initialize row 1 of V. # v[0,0:maxcol] = 1 # # Things to do only if the dimension changed. # if ( dim_num != dim_num_save ): # # Check parameters. # if ( dim_num < 1 or dim_max < dim_num ): print 'I4_SOBOL - Fatal error!' print ' The spatial dimension DIM_NUM should satisfy:' print ' 1 <= DIM_NUM <= %d'%dim_max print ' But this input value is DIM_NUM = %d'%dim_num return dim_num_save = dim_num # # Initialize the remaining rows of V. # for i in xrange(2 , dim_num+1): # # The bits of the integer POLY(I) gives the form of polynomial I. # # Find the degree of polynomial I from binary encoding. # j = poly[i-1] m = 0 while ( 1 ): j = math.floor ( j / 2. ) if ( j <= 0 ): break m = m + 1 # # Expand this bit pattern to separate components of the logical array INCLUD. # j = poly[i-1] includ=zeros(m) for k in xrange(m, 0, -1): j2 = math.floor ( j / 2. ) includ[k-1] = (j != 2 * j2 ) j = j2 # # Calculate the remaining elements of row I as explained # in Bratley and Fox, section 2. # for j in xrange( m+1, maxcol+1 ): newv = v[i-1,j-m-1] l = 1 for k in xrange(1, m+1): l = 2 * l if ( includ[k-1] ): newv = bitwise_xor ( int(newv), int(l * v[i-1,j-k-1]) ) v[i-1,j-1] = newv # # Multiply columns of V by appropriate power of 2. # l = 1 for j in xrange( maxcol-1, 0, -1): l = 2 * l v[0:dim_num,j-1] = v[0:dim_num,j-1] * l # # RECIPD is 1/(common denominator of the elements in V). # recipd = 1.0 / ( 2 * l ) lastq=zeros(dim_num) seed = int(math.floor ( seed )) if ( seed < 0 ): seed = 0 if ( seed == 0 ): l = 1 lastq=zeros(dim_num) elif ( seed == seed_save + 1 ): # # Find the position of the right-hand zero in SEED. # l = i4_bit_lo0 ( seed ) elif ( seed <= seed_save ): seed_save = 0 l = 1 lastq=zeros(dim_num) for seed_temp in xrange( int(seed_save), int(seed)): l = i4_bit_lo0 ( seed_temp ) for i in xrange(1 , dim_num+1): lastq[i-1] = bitwise_xor ( int(lastq[i-1]), int(v[i-1,l-1]) ) l = i4_bit_lo0 ( seed ) elif ( seed_save + 1 < seed ): for seed_temp in xrange( int(seed_save + 1), int(seed) ): l = i4_bit_lo0 ( seed_temp ) for i in xrange(1, dim_num+1): lastq[i-1] = bitwise_xor ( int(lastq[i-1]), int(v[i-1,l-1]) ) l = i4_bit_lo0 ( seed ) # # Check that the user is not calling too many times! # if ( maxcol < l ): print 'I4_SOBOL - Fatal error!' print ' Too many calls!' print ' MAXCOL = %d\n'%maxcol print ' L = %d\n'%l return # # Calculate the new components of QUASI. # quasi=zeros(dim_num) for i in xrange( 1, dim_num+1): quasi[i-1] = lastq[i-1] * recipd lastq[i-1] = bitwise_xor ( int(lastq[i-1]), int(v[i-1,l-1]) ) seed_save = seed seed = seed + 1 return [ quasi, seed ] def i4_uniform ( a, b, seed ): #*****************************************************************************80 # ## I4_UNIFORM returns a scaled pseudorandom I4. # # Discussion: # # The pseudorandom number will be scaled to be uniformly distributed # between A and B. # # Licensing: # # This code is distributed under the GNU LGPL license. # # Modified: # # 22 February 2011 # # Author: # # Original MATLAB version by John Burkardt. # PYTHON version by Corrado Chisari # # Reference: # # Paul Bratley, Bennett Fox, Linus Schrage, # A Guide to Simulation, # Springer Verlag, pages 201-202, 1983. # # Pierre L'Ecuyer, # Random Number Generation, # in Handbook of Simulation, # edited by Jerry Banks, # Wiley Interscience, page 95, 1998. # # Bennett Fox, # Algorithm 647: # Implementation and Relative Efficiency of Quasirandom # Sequence Generators, # ACM Transactions on Mathematical Software, # Volume 12, Number 4, pages 362-376, 1986. # # Peter Lewis, Allen Goodman, James Miller # A Pseudo-Random Number Generator for the System/360, # IBM Systems Journal, # Volume 8, pages 136-143, 1969. # # Parameters: # # Input, integer A, B, the minimum and maximum acceptable values. # # Input, integer SEED, a seed for the random number generator. # # Output, integer C, the randomly chosen integer. # # Output, integer SEED, the updated seed. # if ( seed == 0 ): print 'I4_UNIFORM - Fatal error!' print ' Input SEED = 0!' seed = math.floor ( seed ) a = round ( a ) b = round ( b ) seed = mod ( seed, 2147483647 ) if ( seed < 0 ) : seed = seed + 2147483647 k = math.floor ( seed / 127773 ) seed = 16807 * ( seed - k * 127773 ) - k * 2836 if ( seed < 0 ): seed = seed + 2147483647 r = seed * 4.656612875E-10 # # Scale R to lie between A-0.5 and B+0.5. # r = ( 1.0 - r ) * ( min ( a, b ) - 0.5 ) + r * ( max ( a, b ) + 0.5 ) # # Use rounding to convert R to an integer between A and B. # value = round ( r ) value = max ( value, min ( a, b ) ) value = min ( value, max ( a, b ) ) c = value return [ int(c), int(seed) ] def prime_ge ( n ): #*****************************************************************************80 # ## PRIME_GE returns the smallest prime greater than or equal to N. # # # Example: # # N PRIME_GE # # -10 2 # 1 2 # 2 2 # 3 3 # 4 5 # 5 5 # 6 7 # 7 7 # 8 11 # 9 11 # 10 11 # # Licensing: # # This code is distributed under the GNU LGPL license. # # Modified: # # 22 February 2011 # # Author: # # Original MATLAB version by John Burkardt. # PYTHON version by Corrado Chisari # # Parameters: # # Input, integer N, the number to be bounded. # # Output, integer P, the smallest prime number that is greater # than or equal to N. # p = max ( math.ceil ( n ), 2 ) while ( not isprime ( p ) ): p = p + 1 return p def isprime(n): #*****************************************************************************80 # ## IS_PRIME returns True if N is a prime number, False otherwise # # # Licensing: # # This code is distributed under the GNU LGPL license. # # Modified: # # 22 February 2011 # # Author: # # Corrado Chisari # # Parameters: # # Input, integer N, the number to be checked. # # Output, boolean value, True or False # if n!=int(n) or n<1: return False p=2 while p<n: if n%p==0: return False p+=1 return True
237,450
16.005729
93
py
pybo
pybo-master/pybo/inits/methods.py
""" Implementation of methods for sampling initial points. """ from __future__ import division from __future__ import absolute_import from __future__ import print_function import numpy as np from ..utils import rstate from .sobol import i4_sobol_generate __all__ = ['init_middle', 'init_uniform', 'init_latin', 'init_sobol'] def init_middle(bounds): """ Initialize using a single query in the middle of the space. """ return np.mean(bounds, axis=1)[None, :] def init_uniform(bounds, n=None, rng=None): """ Initialize using `n` uniformly distributed query points. If `n` is `None` then use 3D points where D is the dimensionality of the input space. """ rng = rstate(rng) bounds = np.array(bounds, ndmin=2, copy=False) d = len(bounds) n = 3*d if (n is None) else n # generate the random values. w = bounds[:, 1] - bounds[:, 0] X = bounds[:, 0] + w * rng.rand(n, d) return X def init_latin(bounds, n=None, rng=None): """ Initialize using a Latin hypercube design of size `n`. If `n` is `None` then use 3D points where D is the dimensionality of the input space. """ rng = rstate(rng) bounds = np.array(bounds, ndmin=2, copy=False) d = len(bounds) n = 3*d if (n is None) else n # generate the random samples. w = bounds[:, 1] - bounds[:, 0] X = bounds[:, 0] + w * (np.arange(n)[:, None] + rng.rand(n, d)) / n # shuffle each dimension. for i in xrange(d): X[:, i] = rng.permutation(X[:, i]) return X def init_sobol(bounds, n=None, rng=None): """ Initialize using a Sobol sequence of length `n`. If `n` is `None` then use 3D points where D is the dimensionality of the input space. """ rng = rstate(rng) bounds = np.array(bounds, ndmin=2, copy=False) d = len(bounds) n = 3*len(bounds) if (n is None) else n # generate the random samples. skip = rng.randint(100, 200) w = bounds[:, 1] - bounds[:, 0] X = bounds[:, 0] + w * i4_sobol_generate(d, n, skip).T return X
2,057
25.384615
78
py
pybo
pybo-master/pybo/inits/__init__.py
""" Initialization methods. """ # pylint: disable=wildcard-import from .methods import * from . import methods __all__ = [] __all__ += methods.__all__
154
11.916667
33
py
pybo
pybo-master/pybo/solvers/lbfgs.py
""" Local gradient-based solver using multiple restarts. """ from __future__ import division from __future__ import absolute_import from __future__ import print_function import numpy as np import scipy.optimize from ..inits import init_uniform __all__ = ['solve_lbfgs'] def solve_lbfgs(f, bounds, nbest=10, ngrid=10000, xgrid=None, rng=None): """ Compute the objective function on an initial grid, pick `nbest` points, and maximize using LBFGS from these initial points. Args: f: function handle that takes an optional `grad` boolean kwarg and if `grad=True` returns a tuple of `(function, gradient)`. NOTE: this functions is assumed to allow for multiple inputs in vectorized form. bounds: bounds of the search space. nbest: number of best points from the initial test points to refine. ngrid: number of (random) grid points to test initially. xgrid: initial test points; ngrid is ignored if this is given. Returns: xmin, fmax: location and value of the maximizer. """ if xgrid is None: # TODO: The following line could be replaced with a regular grid or a # Sobol grid. xgrid = init_uniform(bounds, ngrid, rng) else: xgrid = np.array(xgrid, ndmin=2) # compute func_grad on points xgrid finit = f(xgrid, grad=False) idx_sorted = np.argsort(finit)[::-1] # lbfgsb needs the gradient to be "contiguous", squeezing the gradient # protects against func_grads that return ndmin=2 arrays. We also need to # negate everything so that we are maximizing. def objective(x): fx, gx = f(x[None], grad=True) return -fx[0], -gx[0] # TODO: the following can easily be multiprocessed result = [scipy.optimize.fmin_l_bfgs_b(objective, x0, bounds=bounds)[:2] for x0 in xgrid[idx_sorted[:nbest]]] # loop through the results and pick out the smallest. xmin, fmin = result[np.argmin(_[1] for _ in result)] # return the values (negate if we're finding a max) return xmin, -fmin
2,178
30.57971
79
py
pybo
pybo-master/pybo/solvers/direct.py
""" Interface to the nlopt DIRECT implementation. """ # future imports from __future__ import division from __future__ import absolute_import from __future__ import print_function # by default export nothing. __all__ = [] try: # try and import nlopt, and if not this package will not define or export # anything. import nlopt import numpy as np # exported symbols __all__ += ['solve_direct'] def solve_direct(f, bounds): def objective(x, grad): """Objective function in the form required by nlopt.""" if grad.size > 0: fx, gx = f(x[None], grad=True) grad[:] = gx[0][:] else: fx = f(x[None], grad=False) return fx[0] bounds = np.array(bounds, ndmin=2) opt = nlopt.opt(nlopt.GN_DIRECT_L, bounds.shape[0]) opt.set_lower_bounds(list(bounds[:, 0])) opt.set_upper_bounds(list(bounds[:, 1])) opt.set_ftol_rel(1e-6) opt.set_max_objective(objective) xmin = bounds[:, 0] + (bounds[:, 1] - bounds[:, 0]) / 2 xmin = opt.optimize(xmin) fmax = opt.last_optimum_value() return xmin, fmax except ImportError: pass
1,224
24.520833
77
py
pybo
pybo-master/pybo/solvers/__init__.py
""" Objects which global optimization solvers. """ # pylint: disable=wildcard-import from .lbfgs import * from .direct import * from . import lbfgs from . import direct __all__ = [] __all__ += lbfgs.__all__ __all__ += direct.__all__
236
14.8
42
py