code stringlengths 17 6.64M |
|---|
def get_dataset(data_cfg):
if isinstance(data_cfg['ann_file'], (list, tuple)):
ann_files = data_cfg['ann_file']
num_dset = len(ann_files)
else:
ann_files = [data_cfg['ann_file']]
num_dset = 1
if ('proposal_file' in data_cfg.keys()):
if isinstance(data_cfg['proposal_file'], (list, tuple)):
proposal_files = data_cfg['proposal_file']
else:
proposal_files = [data_cfg['proposal_file']]
else:
proposal_files = ([None] * num_dset)
assert (len(proposal_files) == num_dset)
if isinstance(data_cfg['img_prefix'], (list, tuple)):
img_prefixes = data_cfg['img_prefix']
else:
img_prefixes = ([data_cfg['img_prefix']] * num_dset)
assert (len(img_prefixes) == num_dset)
if (('generator' in data_cfg.keys()) and (data_cfg['generator'] is not None)):
generator = obj_from_dict(data_cfg['generator'], voxel_generator)
else:
generator = None
if (('augmentor' in data_cfg.keys()) and (data_cfg['augmentor'] is not None)):
augmentor = obj_from_dict(data_cfg['augmentor'], point_augmentor)
else:
augmentor = None
if (('anchor_generator' in data_cfg.keys()) and (data_cfg['anchor_generator'] is not None)):
anchor_generator = obj_from_dict(data_cfg['anchor_generator'], anchor3d_generator)
else:
anchor_generator = None
if (('target_encoder' in data_cfg.keys()) and (data_cfg['target_encoder'] is not None)):
target_encoder = obj_from_dict(data_cfg['target_encoder'], bbox3d_target)
else:
target_encoder = None
dsets = []
for i in range(num_dset):
data_info = copy.deepcopy(data_cfg)
data_info['ann_file'] = ann_files[i]
data_info['proposal_file'] = proposal_files[i]
data_info['img_prefix'] = img_prefixes[i]
if (generator is not None):
data_info['generator'] = generator
if (anchor_generator is not None):
data_info['anchor_generator'] = anchor_generator
if (augmentor is not None):
data_info['augmentor'] = augmentor
if (target_encoder is not None):
data_info['target_encoder'] = target_encoder
dset = obj_from_dict(data_info, datasets)
dsets.append(dset)
if (len(dsets) > 1):
dset = ConcatDataset(dsets)
else:
dset = dsets[0]
return dset
|
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if ('VOC2007' in self.img_prefix):
self.year = 2007
elif ('VOC2012' in self.img_prefix):
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
|
class XMLDataset(CustomDataset):
def __init__(self, **kwargs):
super(XMLDataset, self).__init__(**kwargs)
self.cat2label = {cat: (i + 1) for (i, cat) in enumerate(self.CLASSES)}
def load_annotations(self, ann_file):
img_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = 'JPEGImages/{}.jpg'.format(img_id)
xml_path = osp.join(self.img_prefix, 'Annotations', '{}.xml'.format(img_id))
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
img_infos.append(dict(id=img_id, filename=filename, width=width, height=height))
return img_infos
def get_ann_info(self, idx):
img_id = self.img_infos[idx]['id']
xml_path = osp.join(self.img_prefix, 'Annotations', '{}.xml'.format(img_id))
tree = ET.parse(xml_path)
root = tree.getroot()
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
label = self.cat2label[name]
difficult = int(obj.find('difficult').text)
bnd_box = obj.find('bndbox')
bbox = [int(bnd_box.find('xmin').text), int(bnd_box.find('ymin').text), int(bnd_box.find('xmax').text), int(bnd_box.find('ymax').text)]
if difficult:
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if (not bboxes):
bboxes = np.zeros((0, 4))
labels = np.zeros((0,))
else:
bboxes = (np.array(bboxes, ndmin=2) - 1)
labels = np.array(labels)
if (not bboxes_ignore):
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0,))
else:
bboxes_ignore = (np.array(bboxes_ignore, ndmin=2) - 1)
labels_ignore = np.array(labels_ignore)
ann = dict(bboxes=bboxes.astype(np.float32), labels=labels.astype(np.int64), bboxes_ignore=bboxes_ignore.astype(np.float32), labels_ignore=labels_ignore.astype(np.int64))
return ann
|
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
assert (not with_cp)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False):
'Bottleneck block.\n If style is "pytorch", the stride-two layer is the 3x3 conv layer,\n if it is "caffe", the stride-two layer is the first 1x1 conv layer.\n '
super(Bottleneck, self).__init__()
assert (style in ['pytorch', 'caffe'])
if (style == 'pytorch'):
conv1_stride = 1
conv2_stride = stride
else:
conv1_stride = stride
conv2_stride = 1
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=conv2_stride, padding=dilation, dilation=dilation, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * self.expansion), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
def forward(self, x):
def _inner_forward(x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
|
def make_res_layer(block, inplanes, planes, blocks, stride=1, dilation=1, style='pytorch', with_cp=False):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(inplanes, planes, stride, dilation, downsample, style=style, with_cp=with_cp))
inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp))
return nn.Sequential(*layers)
|
class ResNet(nn.Module):
'ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n num_stages (int): Resnet stages, normally 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n bn_eval (bool): Whether to set BN layers to eval mode, namely, freeze\n running stats (mean and var).\n bn_frozen (bool): Whether to freeze weight and bias of BN layers.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n '
arch_settings = {18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, depth, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', frozen_stages=(- 1), bn_eval=True, bn_frozen=False, with_cp=False):
super(ResNet, self).__init__()
if (depth not in self.arch_settings):
raise KeyError('invalid depth {} for resnet'.format(depth))
assert ((num_stages >= 1) and (num_stages <= 4))
(block, stage_blocks) = self.arch_settings[depth]
stage_blocks = stage_blocks[:num_stages]
assert (len(strides) == len(dilations) == num_stages)
assert (max(out_indices) < num_stages)
self.out_indices = out_indices
self.style = style
self.frozen_stages = frozen_stages
self.bn_eval = bn_eval
self.bn_frozen = bn_frozen
self.with_cp = with_cp
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.res_layers = []
for (i, num_blocks) in enumerate(stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = (64 * (2 ** i))
res_layer = make_res_layer(block, self.inplanes, planes, num_blocks, stride=stride, dilation=dilation, style=self.style, with_cp=with_cp)
self.inplanes = (planes * block.expansion)
layer_name = 'layer{}'.format((i + 1))
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = ((block.expansion * 64) * (2 ** (len(stage_blocks) - 1)))
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
outs = [x]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
for (i, layer_name) in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if (i in self.out_indices):
outs.append(x)
if (len(outs) == 1):
return outs[0]
else:
return tuple(outs)
def train(self, mode=True):
super(ResNet, self).train(mode)
if self.bn_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if self.bn_frozen:
for params in m.parameters():
params.requires_grad = False
if (mode and (self.frozen_stages >= 0)):
for param in self.conv1.parameters():
param.requires_grad = False
for param in self.bn1.parameters():
param.requires_grad = False
self.bn1.eval()
self.bn1.weight.requires_grad = False
self.bn1.bias.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
mod = getattr(self, 'layer{}'.format(i))
mod.eval()
for param in mod.parameters():
param.requires_grad = False
|
class ConvFCBBoxHead(BBoxHead):
'More general bbox head, with shared conv and fc layers and two optional\n separated branches.\n\n /-> cls convs -> cls fcs -> cls\n shared convs -> shared fcs\n \\-> reg convs -> reg fcs -> reg\n '
def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, conv_out_channels=256, fc_out_channels=1024, normalize=None, *args, **kwargs):
super(ConvFCBBoxHead, self).__init__(*args, **kwargs)
assert ((((((num_shared_convs + num_shared_fcs) + num_cls_convs) + num_cls_fcs) + num_reg_convs) + num_reg_fcs) > 0)
if ((num_cls_convs > 0) or (num_reg_convs > 0)):
assert (num_shared_fcs == 0)
if (not self.with_cls):
assert ((num_cls_convs == 0) and (num_cls_fcs == 0))
if (not self.with_reg):
assert ((num_reg_convs == 0) and (num_reg_fcs == 0))
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.normalize = normalize
self.with_bias = (normalize is None)
(self.shared_convs, self.shared_fcs, last_layer_dim) = self._add_conv_fc_branch(self.num_shared_convs, self.num_shared_fcs, self.in_channels, True)
self.shared_out_channels = last_layer_dim
(self.cls_convs, self.cls_fcs, self.cls_last_dim) = self._add_conv_fc_branch(self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
(self.reg_convs, self.reg_fcs, self.reg_last_dim) = self._add_conv_fc_branch(self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if ((self.num_shared_fcs == 0) and (not self.with_avg_pool)):
if (self.num_cls_fcs == 0):
self.cls_last_dim *= (self.roi_feat_size * self.roi_feat_size)
if (self.num_reg_fcs == 0):
self.reg_last_dim *= (self.roi_feat_size * self.roi_feat_size)
self.relu = nn.ReLU(inplace=True)
if self.with_cls:
self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes)
if self.with_reg:
out_dim_reg = (7 if self.reg_class_agnostic else (7 * self.num_classes))
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, is_shared=False):
'Add shared or separable branch\n\n convs -> avg pool (optional) -> fcs\n '
last_layer_dim = in_channels
branch_convs = nn.ModuleList()
if (num_branch_convs > 0):
for i in range(num_branch_convs):
conv_in_channels = (last_layer_dim if (i == 0) else self.conv_out_channels)
branch_convs.append(ConvModule(conv_in_channels, self.conv_out_channels, 3, padding=1, normalize=self.normalize, bias=self.with_bias))
last_layer_dim = self.conv_out_channels
branch_fcs = nn.ModuleList()
if (num_branch_fcs > 0):
if ((is_shared or (self.num_shared_fcs == 0)) and (not self.with_avg_pool)):
last_layer_dim *= (self.roi_feat_size * self.roi_feat_size)
for i in range(num_branch_fcs):
fc_in_channels = (last_layer_dim if (i == 0) else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return (branch_convs, branch_fcs, last_layer_dim)
def init_weights(self):
super(ConvFCBBoxHead, self).init_weights()
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
if (self.num_shared_convs > 0):
for conv in self.shared_convs:
x = conv(x)
if (self.num_shared_fcs > 0):
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.view(x.size(0), (- 1))
for fc in self.shared_fcs:
x = self.relu(fc(x))
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if (x_cls.dim() > 2):
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.view(x_cls.size(0), (- 1))
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if (x_reg.dim() > 2):
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.view(x_reg.size(0), (- 1))
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = (self.fc_cls(x_cls) if self.with_cls else None)
bbox_pred = (self.fc_reg(x_reg) if self.with_reg else None)
return (cls_score, bbox_pred)
|
class SharedFCBBoxHead(BBoxHead):
def __init__(self, in_channels=256, fc_out_channels=1024, roi_feat_size=(7, 7), num_classes=1, reg_dim=7, reg_class_agnostic=True):
super(SharedFCBBoxHead, self).__init__()
self.num_classes = num_classes
self.reg_class_agnostic = reg_class_agnostic
if isinstance(roi_feat_size, tuple):
fc_in_channels = ((in_channels * roi_feat_size[0]) * roi_feat_size[1])
else:
fc_in_channels = ((in_channels * roi_feat_size) * roi_feat_size)
self.shared_fc = nn.Linear(fc_in_channels, fc_out_channels)
self.relu = nn.ReLU(inplace=True)
if self.with_cls:
self.fc_cls = nn.Linear(fc_out_channels, self.num_classes)
if self.with_reg:
out_dim_reg = (reg_dim if self.reg_class_agnostic else (reg_dim * self.num_classes))
self.fc_reg = nn.Linear(fc_out_channels, out_dim_reg)
def forward(self, x):
x = x.view(x.size(0), (- 1))
x = self.relu(self.shared_fc(x))
cls_score = self.fc_cls(x)
bbox_pred = self.fc_reg(x)
return (cls_score, bbox_pred)
|
def _build_module(cfg, parrent=None, default_args=None):
return (cfg if isinstance(cfg, nn.Module) else obj_from_dict(cfg, parrent, default_args))
|
def build(cfg, parrent=None, default_args=None):
if isinstance(cfg, list):
modules = [_build_module(cfg_, parrent, default_args) for cfg_ in cfg]
return nn.Sequential(*modules)
else:
return _build_module(cfg, parrent, default_args)
|
def build_backbone(cfg):
return build(cfg, backbones)
|
def build_neck(cfg):
return build(cfg, necks)
|
def build_rpn_head(cfg):
return build(cfg, rpn_heads)
|
def build_roi_extractor(cfg):
return build(cfg, roi_extractors)
|
def build_bbox_head(cfg):
return build(cfg, bbox_heads)
|
def build_mask_head(cfg):
return build(cfg, mask_heads)
|
def build_single_stage_head(cfg):
return build(cfg, single_stage_heads)
|
def build_detector(cfg, train_cfg=None, test_cfg=None):
from . import detectors
return build(cfg, detectors, dict(train_cfg=train_cfg, test_cfg=test_cfg))
|
class PointPillars(BaseDetector):
def __init__(self, backbone, neck, rpn_head=None, bbox_head=None, rcnn_head=None, train_cfg=None, test_cfg=None, pretrained=None):
super(PointPillars, self).__init__()
self.backbone = builder.build_backbone(backbone)
self.neck = builder.build_neck(neck)
self.bbox_head = builder.build_single_stage_head(bbox_head)
if (rpn_head is not None):
self.rpn_head = builder.build_rpn_head(rpn_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if (rcnn_head is not None):
self.rcnn_head = builder.build_bbox_head(rcnn_head)
self.init_weights(pretrained=pretrained)
@property
def with_rpn(self):
return (hasattr(self, 'rpn_head') and (self.rpn_head is not None))
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
def freeze_layers(self, model):
for param in model.parameters():
param.requires_grad = False
def merge_second_batch(self, batch_args):
ret = {}
for (key, elems) in batch_args.items():
if (key in ['voxels', 'num_points']):
ret[key] = torch.cat(elems, dim=0)
elif (key == 'coordinates'):
coors = []
for (i, coor) in enumerate(elems):
coor_pad = F.pad(coor, [1, 0, 0, 0], mode='constant', value=i)
coors.append(coor_pad)
ret[key] = torch.cat(coors, dim=0)
elif (key in ['img_meta', 'gt_labels', 'gt_bboxes']):
ret[key] = elems
else:
ret[key] = torch.stack(elems, dim=0)
return ret
def forward_train(self, img, img_meta, **kwargs):
batch_size = len(img_meta)
ret = self.merge_second_batch(kwargs)
losses = dict()
canvas = self.backbone(ret['voxels'], ret['coordinates'], ret['num_points'], batch_size)
x = self.neck(canvas)
bbox_outs = self.bbox_head(x)
bbox_loss_inputs = (bbox_outs + (ret['gt_bboxes'], ret['gt_labels'], ret['anchors'], ret['anchors_mask'], self.train_cfg))
bbox_losses = self.bbox_head.loss(*bbox_loss_inputs)
losses.update(bbox_losses)
return losses
def forward_test(self, img, img_meta, **kwargs):
batch_size = len(img_meta)
ret = self.merge_second_batch(kwargs)
canvas = self.backbone(ret['voxels'], ret['coordinates'], ret['num_points'], batch_size)
x = self.neck(canvas)
rpn_outs = self.bbox_head.forward(x)
proposal_inputs = (rpn_outs + (ret['anchors'], ret['anchors_mask'], img_meta, self.test_cfg))
return self.bbox_head.get_det_bboxes_nms(*proposal_inputs)
|
def cfg_from_file(filename):
'Load a config file and merge it into the default options.'
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
|
def _merge_a_into_b(a, b):
'Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n '
if (type(a) is not edict):
return
for (k, v) in a.items():
if (k not in b):
raise KeyError('{} is not a valid config key'.format(k))
old_type = type(b[k])
if (old_type is not type(v)):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(b[k]), type(v), k))
if (type(v) is edict):
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
|
def cfg_from_list(cfg_list):
'Set config keys via list (e.g., from command line).'
from ast import literal_eval
assert ((len(cfg_list) % 2) == 0)
for (k, v) in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:(- 1)]:
assert (subkey in d)
d = d[subkey]
subkey = key_list[(- 1)]
assert (subkey in d)
try:
value = literal_eval(v)
except:
value = v
assert (type(value) == type(d[subkey])), 'type {} does not match original type {}'.format(type(value), type(d[subkey]))
d[subkey] = value
|
def save_config_to_file(cfg, pre='cfg', logger=None):
for (key, val) in cfg.items():
if isinstance(cfg[key], edict):
if (logger is not None):
logger.info(('\n%s.%s = edict()' % (pre, key)))
else:
print(('\n%s.%s = edict()' % (pre, key)))
save_config_to_file(cfg[key], pre=((pre + '.') + key), logger=logger)
continue
if (logger is not None):
logger.info(('%s.%s: %s' % (pre, key, val)))
else:
print(('%s.%s: %s' % (pre, key, val)))
|
class FPN(nn.Module):
def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=(- 1), add_extra_convs=False, normalize=None, activation=None):
super(FPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.activation = activation
self.with_bias = (normalize is None)
if (end_level == (- 1)):
self.backbone_end_level = self.num_ins
assert (num_outs >= (self.num_ins - start_level))
else:
self.backbone_end_level = end_level
assert (end_level <= len(in_channels))
assert (num_outs == (end_level - start_level))
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(in_channels[i], out_channels, 1, normalize=normalize, bias=self.with_bias, activation=self.activation, inplace=False)
fpn_conv = ConvModule(out_channels, out_channels, 3, padding=1, normalize=normalize, bias=self.with_bias, activation=self.activation, inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
extra_levels = ((num_outs - self.backbone_end_level) + self.start_level)
if (add_extra_convs and (extra_levels >= 1)):
for i in range(extra_levels):
in_channels = (self.in_channels[(self.backbone_end_level - 1)] if (i == 0) else out_channels)
extra_fpn_conv = ConvModule(in_channels, out_channels, 3, stride=2, padding=1, normalize=normalize, bias=self.with_bias, activation=self.activation, inplace=False)
self.fpn_convs.append(extra_fpn_conv)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self, inputs):
assert (len(inputs) == len(self.in_channels))
laterals = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)]
used_backbone_levels = len(laterals)
for i in range((used_backbone_levels - 1), 0, (- 1)):
laterals[(i - 1)] += F.interpolate(laterals[i], scale_factor=2, mode='nearest')
outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)]
if (self.num_outs > len(outs)):
if (not self.add_extra_convs):
for i in range((self.num_outs - used_backbone_levels)):
outs.append(F.max_pool2d(outs[(- 1)], 1, stride=2))
else:
orig = inputs[(self.backbone_end_level - 1)]
outs.append(self.fpn_convs[used_backbone_levels](orig))
for i in range((used_backbone_levels + 1), self.num_outs):
outs.append(self.fpn_convs[i](outs[(- 1)]))
return tuple(outs)
|
class RPNBase(nn.Module):
def __init__(self, use_norm=True, layer_nums=(3, 5, 5), layer_strides=(2, 2, 2), num_filters=(128, 128, 256), upsample_strides=(1, 2, 4), num_upsample_filters=(256, 256, 256), num_input_features=128):
'upsample_strides support float: [0.25, 0.5, 1]\n if upsample_strides < 1, conv2d will be used instead of convtranspose2d.\n '
super(RPNBase, self).__init__()
self._layer_strides = layer_strides
self._num_filters = num_filters
self._layer_nums = layer_nums
self._upsample_strides = upsample_strides
self._num_upsample_filters = num_upsample_filters
self._num_input_features = num_input_features
self._use_norm = use_norm
assert (len(layer_strides) == len(layer_nums))
assert (len(num_filters) == len(layer_nums))
assert (len(num_upsample_filters) == len(upsample_strides))
self._upsample_start_idx = (len(layer_nums) - len(upsample_strides))
must_equal_list = []
for i in range(len(upsample_strides)):
must_equal_list.append((upsample_strides[i] / np.prod(layer_strides[:((i + self._upsample_start_idx) + 1)])))
for val in must_equal_list:
assert (val == must_equal_list[0])
if use_norm:
BatchNorm2d = change_default_args(eps=0.001, momentum=0.01)(nn.BatchNorm2d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=False)(nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=True)(nn.ConvTranspose2d)
in_filters = [num_input_features, *num_filters[:(- 1)]]
blocks = []
deblocks = []
for (i, layer_num) in enumerate(layer_nums):
(block, num_out_filters) = self._make_layer(in_filters[i], num_filters[i], layer_num, stride=layer_strides[i])
blocks.append(block)
if ((i - self._upsample_start_idx) >= 0):
stride = upsample_strides[(i - self._upsample_start_idx)]
if (stride >= 1):
stride = np.round(stride).astype(np.int64)
deblock = nn.Sequential(ConvTranspose2d(num_out_filters, num_upsample_filters[(i - self._upsample_start_idx)], stride, stride=stride), BatchNorm2d(num_upsample_filters[(i - self._upsample_start_idx)]), nn.ReLU())
else:
stride = np.round((1 / stride)).astype(np.int64)
deblock = nn.Sequential(Conv2d(num_out_filters, num_upsample_filters[(i - self._upsample_start_idx)], stride, stride=stride), BatchNorm2d(num_upsample_filters[(i - self._upsample_start_idx)]), nn.ReLU())
deblocks.append(deblock)
self._num_out_filters = num_out_filters
self.blocks = nn.ModuleList(blocks)
self.deblocks = nn.ModuleList(deblocks)
@property
def downsample_factor(self):
factor = np.prod(self._layer_strides)
if (len(self._upsample_strides) > 0):
factor /= self._upsample_strides[(- 1)]
return factor
def _make_layer(self, inplanes, planes, num_blocks, stride=1):
raise NotImplementedError
def forward(self, x):
ups = []
stage_outputs = []
for i in range(len(self.blocks)):
x = self.blocks[i](x)
stage_outputs.append(x)
if ((i - self._upsample_start_idx) >= 0):
ups.append(self.deblocks[(i - self._upsample_start_idx)](x))
if (len(ups) > 0):
x = torch.cat(ups, dim=1)
return x
|
class RPN(RPNBase):
def _make_layer(self, inplanes, planes, num_blocks, stride=1):
if self._use_norm:
BatchNorm2d = change_default_args(eps=0.001, momentum=0.01)(nn.BatchNorm2d)
Conv2d = change_default_args(bias=False)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=False)(nn.ConvTranspose2d)
else:
BatchNorm2d = Empty
Conv2d = change_default_args(bias=True)(nn.Conv2d)
ConvTranspose2d = change_default_args(bias=True)(nn.ConvTranspose2d)
block = Sequential(nn.ZeroPad2d(1), Conv2d(inplanes, planes, 3, stride=stride), BatchNorm2d(planes), nn.ReLU())
for j in range(num_blocks):
block.add(Conv2d(planes, planes, 3, padding=1))
block.add(BatchNorm2d(planes))
block.add(nn.ReLU())
return (block, planes)
|
def get_paddings_indicator(actual_num, max_num, axis=0):
'Create boolean mask by actually number of a padded tensor.\n Args:\n actual_num ([type]): [description]\n max_num ([type]): [description]\n Returns:\n [type]: [description]\n '
actual_num = torch.unsqueeze(actual_num, (axis + 1))
max_num_shape = ([1] * len(actual_num.shape))
max_num_shape[(axis + 1)] = (- 1)
max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
paddings_indicator = (actual_num.int() > max_num)
return paddings_indicator
|
def get_pos_to_kw_map(func):
pos_to_kw = {}
fsig = inspect.signature(func)
pos = 0
for (name, info) in fsig.parameters.items():
if (info.kind is info.POSITIONAL_OR_KEYWORD):
pos_to_kw[pos] = name
pos += 1
return pos_to_kw
|
def change_default_args(**kwargs):
def layer_wrapper(layer_class):
class DefaultArgLayer(layer_class):
def __init__(self, *args, **kw):
pos_to_kw = get_pos_to_kw_map(layer_class.__init__)
kw_to_pos = {kw: pos for (pos, kw) in pos_to_kw.items()}
for (key, val) in kwargs.items():
if ((key not in kw) and (kw_to_pos[key] > len(args))):
kw[key] = val
super().__init__(*args, **kw)
return DefaultArgLayer
return layer_wrapper
|
def one_hot(tensor, depth, dim=(- 1), on_value=1.0, dtype=torch.float32):
tensor_onehot = torch.zeros(*list(tensor.shape), depth, dtype=dtype, device=tensor.device)
tensor_onehot.scatter_(dim, tensor.unsqueeze(dim).long(), on_value)
return tensor_onehot
|
class ConvModule(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, normalize=None, activation='relu', inplace=True, activate_last=True):
super(ConvModule, self).__init__()
self.with_norm = (normalize is not None)
self.with_activatation = (activation is not None)
self.with_bias = bias
self.activation = activation
self.activate_last = activate_last
if (self.with_norm and self.with_bias):
warnings.warn('ConvModule has norm and bias at the same time')
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=bias)
self.in_channels = self.conv.in_channels
self.out_channels = self.conv.out_channels
self.kernel_size = self.conv.kernel_size
self.stride = self.conv.stride
self.padding = self.conv.padding
self.dilation = self.conv.dilation
self.transposed = self.conv.transposed
self.output_padding = self.conv.output_padding
self.groups = self.conv.groups
if self.with_norm:
norm_channels = (out_channels if self.activate_last else in_channels)
self.norm = build_norm_layer(normalize, norm_channels)
if self.with_activatation:
assert (activation in ['relu']), 'Only ReLU supported.'
if (self.activation == 'relu'):
self.activate = nn.ReLU(inplace=inplace)
self.init_weights()
def init_weights(self):
nonlinearity = ('relu' if (self.activation is None) else self.activation)
kaiming_init(self.conv, nonlinearity=nonlinearity)
if self.with_norm:
constant_init(self.norm, 1, bias=0)
def forward(self, x, activate=True, norm=True):
if self.activate_last:
x = self.conv(x)
if (norm and self.with_norm):
x = self.norm(x)
if (activate and self.with_activatation):
x = self.activate(x)
else:
if (norm and self.with_norm):
x = self.norm(x)
if (activate and self.with_activatation):
x = self.activate(x)
x = self.conv(x)
return x
|
class Empty(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(Empty, self).__init__()
def forward(self, *args, **kwargs):
if (len(args) == 1):
return args[0]
elif (len(args) == 0):
return None
return args
|
def build_norm_layer(cfg, num_features):
assert (isinstance(cfg, dict) and ('type' in cfg))
cfg_ = cfg.copy()
cfg_.setdefault('eps', 1e-05)
layer_type = cfg_.pop('type')
if (layer_type not in norm_cfg):
raise KeyError('Unrecognized norm type {}'.format(layer_type))
elif (norm_cfg[layer_type] is None):
raise NotImplementedError
return norm_cfg[layer_type](num_features, **cfg_)
|
class Sequential(torch.nn.Module):
"A sequential container.\n Modules will be added to it in the order they are passed in the constructor.\n Alternatively, an ordered dict of modules can also be passed in.\n\n To make it easier to understand, given is a small example::\n\n # Example of using Sequential\n model = Sequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n\n # Example of using Sequential with OrderedDict\n model = Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n\n # Example of using Sequential with kwargs(python 3.6+)\n model = Sequential(\n conv1=nn.Conv2d(1,20,5),\n relu1=nn.ReLU(),\n conv2=nn.Conv2d(20,64,5),\n relu2=nn.ReLU()\n )\n "
def __init__(self, *args, **kwargs):
super(Sequential, self).__init__()
if ((len(args) == 1) and isinstance(args[0], OrderedDict)):
for (key, module) in args[0].items():
self.add_module(key, module)
else:
for (idx, module) in enumerate(args):
self.add_module(str(idx), module)
for (name, module) in kwargs.items():
if (sys.version_info < (3, 6)):
raise ValueError('kwargs only supported in py36+')
if (name in self._modules):
raise ValueError('name exists.')
self.add_module(name, module)
def __getitem__(self, idx):
if (not ((- len(self)) <= idx < len(self))):
raise IndexError('index {} is out of range'.format(idx))
if (idx < 0):
idx += len(self)
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __len__(self):
return len(self._modules)
def add(self, module, name=None):
if (name is None):
name = str(len(self._modules))
if (name in self._modules):
raise KeyError('name exists')
self.add_module(name, module)
def forward(self, input):
for module in self._modules.values():
input = module(input)
return input
|
def xavier_init(module, gain=1, bias=0, distribution='normal'):
assert (distribution in ['uniform', 'normal'])
if (distribution == 'uniform'):
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init.xavier_normal_(module.weight, gain=gain)
if hasattr(module, 'bias'):
nn.init.constant_(module.bias, bias)
|
def normal_init(module, mean=0, std=1, bias=0):
nn.init.normal_(module.weight, mean, std)
if hasattr(module, 'bias'):
nn.init.constant_(module.bias, bias)
|
def uniform_init(module, a=0, b=1, bias=0):
nn.init.uniform_(module.weight, a, b)
if hasattr(module, 'bias'):
nn.init.constant_(module.bias, bias)
|
def kaiming_init(module, mode='fan_out', nonlinearity='relu', bias=0, distribution='normal'):
assert (distribution in ['uniform', 'normal'])
if (distribution == 'uniform'):
nn.init.kaiming_uniform_(module.weight, mode=mode, nonlinearity=nonlinearity)
else:
nn.init.kaiming_normal_(module.weight, mode=mode, nonlinearity=nonlinearity)
if hasattr(module, 'bias'):
nn.init.constant_(module.bias, bias)
|
def bias_init_with_prob(prior_prob):
' initialize conv/fc bias value according to giving probablity'
bias_init = float((- np.log(((1 - prior_prob) / prior_prob))))
return bias_init
|
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
'\n Find the three nearest neighbors of unknown in known\n :param ctx:\n :param unknown: (N, 3)\n :param known: (M, 3)\n :return:\n dist: (N, 3) l2 distance to the three nearest neighbors\n idx: (N, 3) index of 3 nearest neighbors\n '
assert unknown.is_contiguous()
assert known.is_contiguous()
(N, _) = unknown.size()
m = known.size(0)
dist2 = torch.cuda.FloatTensor(N, 3)
idx = torch.cuda.IntTensor(N, 3)
pointnet2.three_nn_wrapper(N, m, unknown, known, dist2, idx)
return (torch.sqrt(dist2), idx)
@staticmethod
def backward(ctx, a=None, b=None):
return (None, None)
|
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
'\n Performs weight linear interpolation on 3 features\n :param ctx:\n :param features: (M, C) Features descriptors to be interpolated from\n :param idx: (n, 3) three nearest neighbors of the target features in features\n :param weight: (n, 3) weights\n :return:\n output: (N, C) tensor of the interpolated features\n '
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
(m, c) = features.size()
n = idx.size(0)
ctx.three_interpolate_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(n, c)
pointnet2.three_interpolate_wrapper(c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
'\n :param ctx:\n :param grad_out: (N, C) tensor with gradients of outputs\n :return:\n grad_features: (M, C) tensor with gradients of features\n None:\n None:\n '
(idx, weight, m) = ctx.three_interpolate_for_backward
(n, c) = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(m, c).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.three_interpolate_grad_wrapper(c, n, m, grad_out_data, idx, weight, grad_features.data)
return (grad_features, None, None)
|
def pts_in_boxes3d(pts, boxes3d):
N = len(pts)
M = len(boxes3d)
pts_in_flag = torch.IntTensor(M, N).fill_(0)
reg_target = torch.FloatTensor(N, 3).fill_(0)
points_op_cpu.pts_in_boxes3d(pts.contiguous(), boxes3d.contiguous(), pts_in_flag, reg_target)
return (pts_in_flag, reg_target)
|
@numba.jit(nopython=True)
def _points_to_voxel_reverse_kernel(points, voxel_size, coors_range, num_points_per_voxel, coor_to_voxelidx, voxels, coors, max_points=35, max_voxels=20000):
N = points.shape[0]
ndim = 3
ndim_minus_1 = (ndim - 1)
grid_size = ((coors_range[3:] - coors_range[:3]) / voxel_size)
grid_size = np.round(grid_size, 0, grid_size).astype(np.int32)
coor = np.zeros(shape=(3,), dtype=np.int32)
voxel_num = 0
failed = False
for i in range(N):
failed = False
for j in range(ndim):
c = np.floor(((points[(i, j)] - coors_range[j]) / voxel_size[j]))
if ((c < 0) or (c >= grid_size[j])):
failed = True
break
coor[(ndim_minus_1 - j)] = c
if failed:
continue
voxelidx = coor_to_voxelidx[(coor[0], coor[1], coor[2])]
if (voxelidx == (- 1)):
voxelidx = voxel_num
if (voxel_num >= max_voxels):
break
voxel_num += 1
coor_to_voxelidx[(coor[0], coor[1], coor[2])] = voxelidx
coors[voxelidx] = coor
num = num_points_per_voxel[voxelidx]
if (num < max_points):
voxels[(voxelidx, num)] = points[i]
num_points_per_voxel[voxelidx] += 1
return voxel_num
|
@numba.jit(nopython=True)
def _points_to_voxel_kernel(points, voxel_size, coors_range, num_points_per_voxel, coor_to_voxelidx, voxels, coors, max_points=35, max_voxels=20000):
N = points.shape[0]
ndim = 3
grid_size = ((coors_range[3:] - coors_range[:3]) / voxel_size)
grid_size = np.round(grid_size, 0, grid_size).astype(np.int32)
lower_bound = coors_range[:3]
upper_bound = coors_range[3:]
coor = np.zeros(shape=(3,), dtype=np.int32)
voxel_num = 0
failed = False
for i in range(N):
failed = False
for j in range(ndim):
c = np.floor(((points[(i, j)] - coors_range[j]) / voxel_size[j]))
if ((c < 0) or (c >= grid_size[j])):
failed = True
break
coor[j] = c
if failed:
continue
voxelidx = coor_to_voxelidx[(coor[0], coor[1], coor[2])]
if (voxelidx == (- 1)):
voxelidx = voxel_num
if (voxel_num >= max_voxels):
break
voxel_num += 1
coor_to_voxelidx[(coor[0], coor[1], coor[2])] = voxelidx
coors[voxelidx] = coor
num = num_points_per_voxel[voxelidx]
if (num < max_points):
voxels[(voxelidx, num)] = points[i]
num_points_per_voxel[voxelidx] += 1
return voxel_num
|
def points_to_voxel(points, voxel_size, coors_range, max_points=35, reverse_index=True, max_voxels=20000):
"convert kitti points(N, >=3) to voxels. This version calculate\n everything in one loop. now it takes only 4.2ms(complete point cloud)\n with jit and 3.2ghz cpu.(don't calculate other features)\n Note: this function in ubuntu seems faster than windows 10.\n\n Args:\n points: [N, ndim] float tensor. points[:, :3] contain xyz points and\n points[:, 3:] contain other information such as reflectivity.\n voxel_size: [3] list/tuple or array, float. xyz, indicate voxel size\n coors_range: [6] list/tuple or array, float. indicate voxel range.\n format: xyzxyz, minmax\n max_points: int. indicate maximum points contained in a voxel.\n reverse_index: boolean. indicate whether return reversed coordinates.\n if points has xyz format and reverse_index is True, output\n coordinates will be zyx format, but points in features always\n xyz format.\n max_voxels: int. indicate maximum voxels this function create.\n for second, 20000 is a good choice. you should shuffle points\n before call this function because max_voxels may drop some points.\n\n Returns:\n voxels: [M, max_points, ndim] float tensor. only contain points.\n coordinates: [M, 3] int32 tensor.\n num_points_per_voxel: [M] int32 tensor.\n "
if (not isinstance(voxel_size, np.ndarray)):
voxel_size = np.array(voxel_size, dtype=points.dtype)
if (not isinstance(coors_range, np.ndarray)):
coors_range = np.array(coors_range, dtype=points.dtype)
voxelmap_shape = ((coors_range[3:] - coors_range[:3]) / voxel_size)
voxelmap_shape = tuple(np.round(voxelmap_shape).astype(np.int32).tolist())
if reverse_index:
voxelmap_shape = voxelmap_shape[::(- 1)]
num_points_per_voxel = np.zeros(shape=(max_voxels,), dtype=np.int32)
coor_to_voxelidx = (- np.ones(shape=voxelmap_shape, dtype=np.int32))
voxels = np.zeros(shape=(max_voxels, max_points, points.shape[(- 1)]), dtype=points.dtype)
coors = np.zeros(shape=(max_voxels, 3), dtype=np.int32)
if reverse_index:
voxel_num = _points_to_voxel_reverse_kernel(points, voxel_size, coors_range, num_points_per_voxel, coor_to_voxelidx, voxels, coors, max_points, max_voxels)
else:
voxel_num = _points_to_voxel_kernel(points, voxel_size, coors_range, num_points_per_voxel, coor_to_voxelidx, voxels, coors, max_points, max_voxels)
coors = coors[:voxel_num]
voxels = voxels[:voxel_num]
num_points_per_voxel = num_points_per_voxel[:voxel_num]
return (voxels, coors, num_points_per_voxel)
|
@numba.jit(nopython=True)
def bound_points_jit(points, upper_bound, lower_bound):
N = points.shape[0]
ndim = points.shape[1]
keep_indices = np.zeros((N,), dtype=np.int32)
success = 0
for i in range(N):
success = 1
for j in range((ndim - 1)):
if ((points[(i, j)] < lower_bound[j]) or (points[(i, j)] >= upper_bound[j])):
success = 0
break
keep_indices[i] = success
return keep_indices
|
class get_pybind_include(object):
'Helper class to determine the pybind11 include path\n The purpose of this class is to postpone importing pybind11\n until it is actually installed, so that the ``get_include()``\n method can be invoked. '
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
|
class RoIAwarePool3d(nn.Module):
def __init__(self, out_size, max_pts_each_voxel=128):
super().__init__()
self.out_size = out_size
self.max_pts_each_voxel = max_pts_each_voxel
def forward(self, rois, pts, pts_feature, pool_method='max'):
assert (pool_method in ['max', 'avg'])
return RoIAwarePool3dFunction.apply(rois, pts, pts_feature, self.out_size, self.max_pts_each_voxel, pool_method)
|
class RoIAwarePool3dFunction(Function):
@staticmethod
def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method):
"\n :param rois: (N, 7) [x, y, z, w, l, h, ry] in LiDAR coordinate, (x, y, z) is the bottom center of rois\n :param pts: (npoints, 3)\n :param pts_feature: (npoints, C)\n :param out_size: int or tuple, like 7 or (7, 7, 7)\n :param pool_method: 'max' or 'avg'\n :return\n pooled_features: (N, out_x, out_y, out_z, C)\n "
if isinstance(out_size, int):
out_x = out_y = out_z = out_size
else:
assert (len(out_size) == 3)
for k in range(3):
assert isinstance(out_size[k], int)
(out_x, out_y, out_z) = out_size
num_rois = rois.shape[0]
num_channels = pts_feature.shape[(- 1)]
num_pts = pts.shape[0]
pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels))
argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int)
pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_each_voxel), dtype=torch.int)
pool_method_map = {'max': 0, 'avg': 1}
pool_method = pool_method_map[pool_method]
roiaware_pool3d_cuda.forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, pool_method)
ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels)
return pooled_features
@staticmethod
def backward(ctx, grad_out):
'\n :param grad_out: (N, out_x, out_y, out_z, C)\n :return:\n grad_in: (npoints, C)\n '
(pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels) = ctx.roiaware_pool3d_for_backward
grad_in = grad_out.new_zeros((num_pts, num_channels))
roiaware_pool3d_cuda.backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, pool_method)
return (None, None, grad_in, None, None, None)
|
def points_in_boxes_gpu(points, boxes):
'\n :param points: (B, M, 3)\n :param boxes: (B, T, 8), num_valid_boxes <= T\n :return box_idxs_of_pts: (B, M), default background = -1\n '
assert (boxes.shape[0] == points.shape[0])
assert (boxes.shape[2] == 7)
(batch_size, num_points, _) = points.shape
box_idxs_of_pts = points.new_zeros((batch_size, num_points), dtype=torch.int).fill_((- 1))
roiaware_pool3d_cuda.points_in_boxes_gpu(boxes.contiguous(), points.contiguous(), box_idxs_of_pts)
return box_idxs_of_pts
|
def points_in_boxes_cpu(points, boxes):
'\n :param points: (npoints, 3)\n :param boxes: (N, 7) [x, y, z, w, l, h, rz] in LiDAR coordinate, z is the bottom center, each box DO NOT overlaps\n :return point_indices: (N, npoints)\n '
assert (boxes.shape[1] == 7)
assert (points.shape[1] == 3)
point_indices = points.new_zeros((boxes.shape[0], points.shape[0]), dtype=torch.int)
roiaware_pool3d_cuda.points_in_boxes_cpu(boxes.float().contiguous(), points.float().contiguous(), point_indices)
return point_indices
|
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
self.pool_method = 'max_pool'
def forward(self, xyz: torch.Tensor, features: torch.Tensor=None, new_xyz=None) -> (torch.Tensor, torch.Tensor):
"\n :param xyz: (B, N, 3) tensor of the xyz coordinates of the features\n :param features: (B, N, C) tensor of the descriptors of the the features\n :param new_xyz:\n :return:\n new_xyz: (B, npoint, 3) tensor of the new features' xyz\n new_features: (B, npoint, \\sum_k(mlps[k][-1])) tensor of the new_features descriptors\n "
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if (new_xyz is None):
new_xyz = (pointnet2_utils.gather_operation(xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)).transpose(1, 2).contiguous() if (self.npoint is not None) else None)
for i in range(len(self.groupers)):
new_features = self.groupers[i](xyz, new_xyz, features)
new_features = self.mlps[i](new_features)
if (self.pool_method == 'max_pool'):
new_features = F.max_pool2d(new_features, kernel_size=[1, new_features.size(3)])
elif (self.pool_method == 'avg_pool'):
new_features = F.avg_pool2d(new_features, kernel_size=[1, new_features.size(3)])
else:
raise NotImplementedError
new_features = new_features.squeeze((- 1))
new_features_list.append(new_features)
return (new_xyz, torch.cat(new_features_list, dim=1))
|
class PointnetSAModuleMSG(_PointnetSAModuleBase):
'Pointnet set abstraction layer with multiscale grouping'
def __init__(self, *, npoint: int, radii: List[float], nsamples: List[int], mlps: List[List[int]], bn: bool=True, use_xyz: bool=True, pool_method='max_pool', instance_norm=False):
'\n :param npoint: int\n :param radii: list of float, list of radii to group with\n :param nsamples: list of int, number of samples in each ball query\n :param mlps: list of list of int, spec of the pointnet before the global pooling for each scale\n :param bn: whether to use batchnorm\n :param use_xyz:\n :param pool_method: max_pool / avg_pool\n :param instance_norm: whether to use instance_norm\n '
super().__init__()
assert (len(radii) == len(nsamples) == len(mlps))
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append((pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz) if (npoint is not None) else pointnet2_utils.GroupAll(use_xyz)))
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn, instance_norm=instance_norm))
self.pool_method = pool_method
|
class PointnetSAModule(PointnetSAModuleMSG):
'Pointnet set abstraction layer'
def __init__(self, *, mlp: List[int], npoint: int=None, radius: float=None, nsample: int=None, bn: bool=True, use_xyz: bool=True, pool_method='max_pool', instance_norm=False):
'\n :param mlp: list of int, spec of the pointnet before the global max_pool\n :param npoint: int, number of features\n :param radius: float, radius of ball\n :param nsample: int, number of samples in the ball query\n :param bn: whether to use batchnorm\n :param use_xyz:\n :param pool_method: max_pool / avg_pool\n :param instance_norm: whether to use instance_norm\n '
super().__init__(mlps=[mlp], npoint=npoint, radii=[radius], nsamples=[nsample], bn=bn, use_xyz=use_xyz, pool_method=pool_method, instance_norm=instance_norm)
|
class PointnetFPModule(nn.Module):
'Propigates the features of one set to another'
def __init__(self, *, mlp: List[int], bn: bool=True):
'\n :param mlp: list of int\n :param bn: whether to use batchnorm\n '
super().__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
def forward(self, unknown: torch.Tensor, known: torch.Tensor, unknow_feats: torch.Tensor, known_feats: torch.Tensor) -> torch.Tensor:
'\n :param unknown: (B, n, 3) tensor of the xyz positions of the unknown features\n :param known: (B, m, 3) tensor of the xyz positions of the known features\n :param unknow_feats: (B, C1, n) tensor of the features to be propigated to\n :param known_feats: (B, C2, m) tensor of features to be propigated\n :return:\n new_features: (B, mlp[-1], n) tensor of the features of the unknown features\n '
if (known is not None):
(dist, idx) = pointnet2_utils.three_nn(unknown, known)
dist_recip = (1.0 / (dist + 1e-08))
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = (dist_recip / norm)
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
else:
interpolated_feats = known_feats.expand(*known_feats.size()[0:2], unknown.size(1))
if (unknow_feats is not None):
new_features = torch.cat([interpolated_feats, unknow_feats], dim=1)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze((- 1))
new_features = self.mlp(new_features)
return new_features.squeeze((- 1))
|
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor:
'\n Uses iterative furthest point sampling to select a set of npoint features that have the largest\n minimum distance\n :param ctx:\n :param xyz: (B, N, 3) where N > npoint\n :param npoint: int, number of features in the sampled set\n :return:\n output: (B, npoint) tensor containing the set\n '
assert xyz.is_contiguous()
(B, N, _) = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(10000000000.0)
pointnet2.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
@staticmethod
def backward(xyz, a=None):
return (None, None)
|
class GatherOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
'\n :param ctx:\n :param features: (B, C, N)\n :param idx: (B, npoint) index tensor of the features to gather\n :return:\n output: (B, C, npoint)\n '
assert features.is_contiguous()
assert idx.is_contiguous()
(B, npoint) = idx.size()
(_, C, N) = features.size()
output = torch.cuda.FloatTensor(B, C, npoint)
pointnet2.gather_points_wrapper(B, C, N, npoint, features, idx, output)
ctx.for_backwards = (idx, C, N)
return output
@staticmethod
def backward(ctx, grad_out):
(idx, C, N) = ctx.for_backwards
(B, npoint) = idx.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.gather_points_grad_wrapper(B, C, N, npoint, grad_out_data, idx, grad_features.data)
return (grad_features, None)
|
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
'\n Find the three nearest neighbors of unknown in known\n :param ctx:\n :param unknown: (B, N, 3)\n :param known: (B, M, 3)\n :return:\n dist: (B, N, 3) l2 distance to the three nearest neighbors\n idx: (B, N, 3) index of 3 nearest neighbors\n '
assert unknown.is_contiguous()
assert known.is_contiguous()
(B, N, _) = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(B, N, 3)
idx = torch.cuda.IntTensor(B, N, 3)
pointnet2.three_nn_wrapper(B, N, m, unknown, known, dist2, idx)
return (torch.sqrt(dist2), idx)
@staticmethod
def backward(ctx, a=None, b=None):
return (None, None)
|
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
'\n Performs weight linear interpolation on 3 features\n :param ctx:\n :param features: (B, C, M) Features descriptors to be interpolated from\n :param idx: (B, n, 3) three nearest neighbors of the target features in features\n :param weight: (B, n, 3) weights\n :return:\n output: (B, C, N) tensor of the interpolated features\n '
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
(B, c, m) = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(B, c, n)
pointnet2.three_interpolate_wrapper(B, c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
'\n :param ctx:\n :param grad_out: (B, C, N) tensor with gradients of outputs\n :return:\n grad_features: (B, C, M) tensor with gradients of features\n None:\n None:\n '
(idx, weight, m) = ctx.three_interpolate_for_backward
(B, c, n) = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, c, m).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.three_interpolate_grad_wrapper(B, c, n, m, grad_out_data, idx, weight, grad_features.data)
return (grad_features, None, None)
|
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
'\n :param ctx:\n :param features: (B, C, N) tensor of features to group\n :param idx: (B, npoint, nsample) tensor containing the indicies of features to group with\n :return:\n output: (B, C, npoint, nsample) tensor\n '
assert features.is_contiguous()
assert idx.is_contiguous()
(B, nfeatures, nsample) = idx.size()
(_, C, N) = features.size()
output = torch.cuda.FloatTensor(B, C, nfeatures, nsample)
pointnet2.group_points_wrapper(B, C, N, nfeatures, nsample, features, idx, output)
ctx.for_backwards = (idx, N)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
'\n :param ctx:\n :param grad_out: (B, C, npoint, nsample) tensor of the gradients of the output from forward\n :return:\n grad_features: (B, C, N) gradient of the features\n '
(idx, N) = ctx.for_backwards
(B, C, npoint, nsample) = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.group_points_grad_wrapper(B, C, N, npoint, nsample, grad_out_data, idx, grad_features.data)
return (grad_features, None)
|
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
'\n :param ctx:\n :param radius: float, radius of the balls\n :param nsample: int, maximum number of features in the balls\n :param xyz: (B, N, 3) xyz coordinates of the features\n :param new_xyz: (B, npoint, 3) centers of the ball query\n :return:\n idx: (B, npoint, nsample) tensor with the indicies of the features that form the query balls\n '
assert new_xyz.is_contiguous()
assert xyz.is_contiguous()
(B, N, _) = xyz.size()
npoint = new_xyz.size(1)
idx = torch.cuda.IntTensor(B, npoint, nsample).zero_()
pointnet2.ball_query_wrapper(B, N, npoint, radius, nsample, new_xyz, xyz, idx)
return idx
@staticmethod
def backward(ctx, a=None):
return (None, None, None, None)
|
class QueryAndGroup(nn.Module):
def __init__(self, radius: float, nsample: int, use_xyz: bool=True):
'\n :param radius: float, radius of ball\n :param nsample: int, maximum number of features to gather in the ball\n :param use_xyz:\n '
super().__init__()
(self.radius, self.nsample, self.use_xyz) = (radius, nsample, use_xyz)
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor=None) -> Tuple[torch.Tensor]:
'\n :param xyz: (B, N, 3) xyz coordinates of the features\n :param new_xyz: (B, npoint, 3) centroids\n :param features: (B, C, N) descriptors of the features\n :return:\n new_features: (B, 3 + C, npoint, nsample)\n '
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze((- 1))
if (features is not None):
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1)
else:
new_features = grouped_features
else:
assert self.use_xyz, 'Cannot have not features and not use xyz as a feature!'
new_features = grouped_xyz
return new_features
|
class GroupAll(nn.Module):
def __init__(self, use_xyz: bool=True):
super().__init__()
self.use_xyz = use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor=None):
'\n :param xyz: (B, N, 3) xyz coordinates of the features\n :param new_xyz: ignored\n :param features: (B, C, N) descriptors of the features\n :return:\n new_features: (B, C + 3, 1, N)\n '
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if (features is not None):
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
|
class SharedMLP(nn.Sequential):
def __init__(self, args: List[int], *, bn: bool=False, activation=nn.ReLU(inplace=True), preact: bool=False, first: bool=False, name: str='', instance_norm: bool=False):
super().__init__()
for i in range((len(args) - 1)):
self.add_module((name + 'layer{}'.format(i)), Conv2d(args[i], args[(i + 1)], bn=(((not first) or (not preact) or (i != 0)) and bn), activation=(activation if ((not first) or (not preact) or (i != 0)) else None), preact=preact, instance_norm=instance_norm))
|
class _ConvBase(nn.Sequential):
def __init__(self, in_size, out_size, kernel_size, stride, padding, activation, bn, init, conv=None, batch_norm=None, bias=True, preact=False, name='', instance_norm=False, instance_norm_func=None):
super().__init__()
bias = (bias and (not bn))
conv_unit = conv(in_size, out_size, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)
init(conv_unit.weight)
if bias:
nn.init.constant_(conv_unit.bias, 0)
if bn:
if (not preact):
bn_unit = batch_norm(out_size)
else:
bn_unit = batch_norm(in_size)
if instance_norm:
if (not preact):
in_unit = instance_norm_func(out_size, affine=False, track_running_stats=False)
else:
in_unit = instance_norm_func(in_size, affine=False, track_running_stats=False)
if preact:
if bn:
self.add_module((name + 'bn'), bn_unit)
if (activation is not None):
self.add_module((name + 'activation'), activation)
if ((not bn) and instance_norm):
self.add_module((name + 'in'), in_unit)
self.add_module((name + 'conv'), conv_unit)
if (not preact):
if bn:
self.add_module((name + 'bn'), bn_unit)
if (activation is not None):
self.add_module((name + 'activation'), activation)
if ((not bn) and instance_norm):
self.add_module((name + 'in'), in_unit)
|
class _BNBase(nn.Sequential):
def __init__(self, in_size, batch_norm=None, name=''):
super().__init__()
self.add_module((name + 'bn'), batch_norm(in_size))
nn.init.constant_(self[0].weight, 1.0)
nn.init.constant_(self[0].bias, 0)
|
class BatchNorm1d(_BNBase):
def __init__(self, in_size: int, *, name: str=''):
super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
|
class BatchNorm2d(_BNBase):
def __init__(self, in_size: int, name: str=''):
super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
|
class Conv1d(_ConvBase):
def __init__(self, in_size: int, out_size: int, *, kernel_size: int=1, stride: int=1, padding: int=0, activation=nn.ReLU(inplace=True), bn: bool=False, init=nn.init.kaiming_normal_, bias: bool=True, preact: bool=False, name: str='', instance_norm=False):
super().__init__(in_size, out_size, kernel_size, stride, padding, activation, bn, init, conv=nn.Conv1d, batch_norm=BatchNorm1d, bias=bias, preact=preact, name=name, instance_norm=instance_norm, instance_norm_func=nn.InstanceNorm1d)
|
class Conv2d(_ConvBase):
def __init__(self, in_size: int, out_size: int, *, kernel_size: Tuple[(int, int)]=(1, 1), stride: Tuple[(int, int)]=(1, 1), padding: Tuple[(int, int)]=(0, 0), activation=nn.ReLU(inplace=True), bn: bool=False, init=nn.init.kaiming_normal_, bias: bool=True, preact: bool=False, name: str='', instance_norm=False):
super().__init__(in_size, out_size, kernel_size, stride, padding, activation, bn, init, conv=nn.Conv2d, batch_norm=BatchNorm2d, bias=bias, preact=preact, name=name, instance_norm=instance_norm, instance_norm_func=nn.InstanceNorm2d)
|
class FC(nn.Sequential):
def __init__(self, in_size: int, out_size: int, *, activation=nn.ReLU(inplace=True), bn: bool=False, init=None, preact: bool=False, name: str=''):
super().__init__()
fc = nn.Linear(in_size, out_size, bias=(not bn))
if (init is not None):
init(fc.weight)
if (not bn):
nn.init.constant(fc.bias, 0)
if preact:
if bn:
self.add_module((name + 'bn'), BatchNorm1d(in_size))
if (activation is not None):
self.add_module((name + 'activation'), activation)
self.add_module((name + 'fc'), fc)
if (not preact):
if bn:
self.add_module((name + 'bn'), BatchNorm1d(out_size))
if (activation is not None):
self.add_module((name + 'activation'), activation)
|
def log_print(info, log_f=None):
print(info)
if (log_f is not None):
print(info, file=log_f)
|
class DiceLoss(nn.Module):
def __init__(self, ignore_target=(- 1)):
super().__init__()
self.ignore_target = ignore_target
def forward(self, input, target):
'\n :param input: (N), logit\n :param target: (N), {0, 1}\n :return:\n '
input = torch.sigmoid(input.view((- 1)))
target = target.float().view((- 1))
mask = (target != self.ignore_target).float()
return (1.0 - ((torch.min(input, target) * mask).sum() / torch.clamp((torch.max(input, target) * mask).sum(), min=1.0)))
|
def train_one_epoch(model, train_loader, optimizer, epoch, lr_scheduler, total_it, tb_log, log_f):
model.train()
log_print(('===============TRAIN EPOCH %d================' % epoch), log_f=log_f)
loss_func = DiceLoss(ignore_target=(- 1))
for (it, batch) in enumerate(train_loader):
optimizer.zero_grad()
(pts_input, cls_labels) = (batch['pts_input'], batch['cls_labels'])
pts_input = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
cls_labels = torch.from_numpy(cls_labels).cuda(non_blocking=True).long().view((- 1))
pred_cls = model(pts_input)
pred_cls = pred_cls.view((- 1))
loss = loss_func(pred_cls, cls_labels)
loss.backward()
clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
total_it += 1
pred_class = (torch.sigmoid(pred_cls) > FG_THRESH)
fg_mask = (cls_labels > 0)
correct = ((pred_class.long() == cls_labels) & fg_mask).float().sum()
union = ((fg_mask.sum().float() + (pred_class > 0).sum().float()) - correct)
iou = (correct / torch.clamp(union, min=1.0))
cur_lr = lr_scheduler.get_lr()[0]
tb_log.log_value('learning_rate', cur_lr, epoch)
if (tb_log is not None):
tb_log.log_value('train_loss', loss, total_it)
tb_log.log_value('train_fg_iou', iou, total_it)
log_print(('training epoch %d: it=%d/%d, total_it=%d, loss=%.5f, fg_iou=%.3f, lr=%f' % (epoch, it, len(train_loader), total_it, loss.item(), iou.item(), cur_lr)), log_f=log_f)
return total_it
|
def eval_one_epoch(model, eval_loader, epoch, tb_log, log_f=None):
model.train()
log_print(('===============EVAL EPOCH %d================' % epoch), log_f=log_f)
iou_list = []
for (it, batch) in enumerate(eval_loader):
(pts_input, cls_labels) = (batch['pts_input'], batch['cls_labels'])
pts_input = torch.from_numpy(pts_input).cuda(non_blocking=True).float()
cls_labels = torch.from_numpy(cls_labels).cuda(non_blocking=True).long().view((- 1))
pred_cls = model(pts_input)
pred_cls = pred_cls.view((- 1))
pred_class = (torch.sigmoid(pred_cls) > FG_THRESH)
fg_mask = (cls_labels > 0)
correct = ((pred_class.long() == cls_labels) & fg_mask).float().sum()
union = ((fg_mask.sum().float() + (pred_class > 0).sum().float()) - correct)
iou = (correct / torch.clamp(union, min=1.0))
iou_list.append(iou.item())
log_print(('EVAL: it=%d/%d, iou=%.3f' % (it, len(eval_loader), iou)), log_f=log_f)
iou_list = np.array(iou_list)
avg_iou = iou_list.mean()
tb_log.log_value('eval_fg_iou', avg_iou, epoch)
log_print(('\nEpoch %d: Average IoU (samples=%d): %.6f' % (epoch, iou_list.__len__(), avg_iou)), log_f=log_f)
return avg_iou
|
def save_checkpoint(model, epoch, ckpt_name):
if isinstance(model, torch.nn.DataParallel):
model_state = model.module.state_dict()
else:
model_state = model.state_dict()
state = {'epoch': epoch, 'model_state': model_state}
ckpt_name = '{}.pth'.format(ckpt_name)
torch.save(state, ckpt_name)
|
def load_checkpoint(model, filename):
if os.path.isfile(filename):
log_print(('==> Loading from checkpoint %s' % filename))
checkpoint = torch.load(filename)
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state'])
log_print('==> Done')
else:
raise FileNotFoundError
return epoch
|
def train_and_eval(model, train_loader, eval_loader, tb_log, ckpt_dir, log_f):
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
def lr_lbmd(cur_epoch):
cur_decay = 1
for decay_step in args.decay_step_list:
if (cur_epoch >= decay_step):
cur_decay = (cur_decay * args.lr_decay)
return max(cur_decay, (args.lr_clip / args.lr))
lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd)
total_it = 0
for epoch in range(1, (args.epochs + 1)):
lr_scheduler.step(epoch)
total_it = train_one_epoch(model, train_loader, optimizer, epoch, lr_scheduler, total_it, tb_log, log_f)
if ((epoch % args.ckpt_save_interval) == 0):
with torch.no_grad():
avg_iou = eval_one_epoch(model, eval_loader, epoch, tb_log, log_f)
ckpt_name = os.path.join(ckpt_dir, ('checkpoint_epoch_%d' % epoch))
save_checkpoint(model, epoch, ckpt_name)
|
def main():
parser = ArgumentParser(description='COCO Evaluation')
parser.add_argument('result', help='result file path')
parser.add_argument('--ann', help='annotation file path')
parser.add_argument('--types', type=str, nargs='+', choices=['proposal_fast', 'proposal', 'bbox', 'segm', 'keypoint'], default=['bbox'], help='result types')
parser.add_argument('--max-dets', type=int, nargs='+', default=[100, 300, 1000], help='proposal numbers, only used for recall evaluation')
args = parser.parse_args()
coco_eval(args.result, args.types, args.ann, args.max_dets)
|
def parse_xml(args):
(xml_path, img_path) = args
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
label = label_ids[name]
difficult = int(obj.find('difficult').text)
bnd_box = obj.find('bndbox')
bbox = [int(bnd_box.find('xmin').text), int(bnd_box.find('ymin').text), int(bnd_box.find('xmax').text), int(bnd_box.find('ymax').text)]
if difficult:
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if (not bboxes):
bboxes = np.zeros((0, 4))
labels = np.zeros((0,))
else:
bboxes = (np.array(bboxes, ndmin=2) - 1)
labels = np.array(labels)
if (not bboxes_ignore):
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0,))
else:
bboxes_ignore = (np.array(bboxes_ignore, ndmin=2) - 1)
labels_ignore = np.array(labels_ignore)
annotation = {'filename': img_path, 'width': w, 'height': h, 'ann': {'bboxes': bboxes.astype(np.float32), 'labels': labels.astype(np.int64), 'bboxes_ignore': bboxes_ignore.astype(np.float32), 'labels_ignore': labels_ignore.astype(np.int64)}}
return annotation
|
def cvt_annotations(devkit_path, years, split, out_file):
if (not isinstance(years, list)):
years = [years]
annotations = []
for year in years:
filelist = osp.join(devkit_path, 'VOC{}/ImageSets/Main/{}.txt'.format(year, split))
if (not osp.isfile(filelist)):
print('filelist does not exist: {}, skip voc{} {}'.format(filelist, year, split))
return
img_names = mmcv.list_from_file(filelist)
xml_paths = [osp.join(devkit_path, 'VOC{}/Annotations/{}.xml'.format(year, img_name)) for img_name in img_names]
img_paths = ['VOC{}/JPEGImages/{}.jpg'.format(year, img_name) for img_name in img_names]
part_annotations = mmcv.track_progress(parse_xml, list(zip(xml_paths, img_paths)))
annotations.extend(part_annotations)
mmcv.dump(annotations, out_file)
return annotations
|
def parse_args():
parser = argparse.ArgumentParser(description='Convert PASCAL VOC annotations to mmdetection format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('-o', '--out-dir', help='output path')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
devkit_path = args.devkit_path
out_dir = (args.out_dir if args.out_dir else devkit_path)
mmcv.mkdir_or_exist(out_dir)
years = []
if osp.isdir(osp.join(devkit_path, 'VOC2007')):
years.append('2007')
if osp.isdir(osp.join(devkit_path, 'VOC2012')):
years.append('2012')
if (('2007' in years) and ('2012' in years)):
years.append(['2007', '2012'])
if (not years):
raise IOError('The devkit path {} contains neither "VOC2007" nor "VOC2012" subfolder'.format(devkit_path))
for year in years:
if (year == '2007'):
prefix = 'voc07'
elif (year == '2012'):
prefix = 'voc12'
elif (year == ['2007', '2012']):
prefix = 'voc0712'
for split in ['train', 'val', 'trainval']:
dataset_name = ((prefix + '_') + split)
print('processing {} ...'.format(dataset_name))
cvt_annotations(devkit_path, year, split, osp.join(out_dir, (dataset_name + '.pkl')))
if (not isinstance(year, list)):
dataset_name = (prefix + '_test')
print('processing {} ...'.format(dataset_name))
cvt_annotations(devkit_path, year, 'test', osp.join(out_dir, (dataset_name + '.pkl')))
print('Done!')
|
def add_path(path):
if (path not in sys.path):
sys.path.insert(0, path)
|
def single_test(model, data_loader, saveto=None, class_names=['Car'], show=False):
template = (('{} ' + ' '.join(['{:.4f}' for _ in range(15)])) + '\n')
if (saveto is not None):
mmcv.mkdir_or_exist(saveto)
model.eval()
annos = []
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for (i, data) in enumerate(data_loader):
with torch.no_grad():
results = model(return_loss=False, **data)
image_shape = (375, 1242)
for re in results:
img_idx = re['image_idx']
if (re['bbox'] is not None):
box2d = re['bbox']
box3d = re['box3d_camera']
labels = re['label_preds']
scores = re['scores']
alphas = re['alphas']
anno = kitti.get_start_result_anno()
num_example = 0
for (bbox2d, bbox3d, label, score, alpha) in zip(box2d, box3d, labels, scores, alphas):
if ((bbox2d[0] > image_shape[1]) or (bbox2d[1] > image_shape[0])):
continue
if ((bbox2d[2] < 0) or (bbox2d[3] < 0)):
continue
bbox2d[2:] = np.minimum(bbox2d[2:], image_shape[::(- 1)])
bbox2d[:2] = np.maximum(bbox2d[:2], [0, 0])
anno['name'].append(class_names[int(label)])
anno['truncated'].append(0.0)
anno['occluded'].append(0)
anno['alpha'].append(alpha)
anno['bbox'].append(bbox2d)
anno['dimensions'].append(bbox3d[[3, 4, 5]])
anno['location'].append(bbox3d[:3])
anno['rotation_y'].append(bbox3d[6])
anno['score'].append(score)
num_example += 1
if (num_example != 0):
if (saveto is not None):
of_path = os.path.join(saveto, ('%06d.txt' % img_idx))
with open(of_path, 'w+') as f:
for (name, bbox, dim, loc, ry, score, alpha) in zip(anno['name'], anno['bbox'], anno['dimensions'], anno['location'], anno['rotation_y'], anno['score'], anno['alpha']):
line = template.format(name, 0, 0, alpha, *bbox, *dim[[1, 2, 0]], *loc, ry, score)
f.write(line)
anno = {n: np.stack(v) for (n, v) in anno.items()}
annos.append(anno)
else:
if (saveto is not None):
of_path = os.path.join(saveto, ('%06d.txt' % img_idx))
f = open(of_path, 'w+')
f.close()
annos.append(kitti.empty_result_anno())
else:
if (saveto is not None):
of_path = os.path.join(saveto, ('%06d.txt' % img_idx))
f = open(of_path, 'w+')
f.close()
annos.append(kitti.empty_result_anno())
if show:
model.module.show_result(data, results, data_loader.dataset.img_norm_cfg)
num_example = annos[(- 1)]['name'].shape[0]
annos[(- 1)]['image_idx'] = np.array(([img_idx] * num_example), dtype=np.int64)
batch_size = len(results)
for _ in range(batch_size):
prog_bar.update()
return annos
|
def _data_func(data, device_id):
data = scatter(collate([data], samples_per_gpu=1), [device_id])[0]
return dict(return_loss=False, rescale=True, **data)
|
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--gpus', default=1, type=int, help='GPU number used for testing')
parser.add_argument('--proc_per_gpu', default=1, type=int, help='Number of processes per GPU')
parser.add_argument('--out', help='output result file')
parser.add_argument('--eval', type=str, nargs='+', choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'], help='eval types')
parser.add_argument('--show', action='store_true', help='show results')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
cfg.model.pretrained = None
dataset = utils.get_dataset(cfg.data.val)
class_names = cfg.data.val.class_names
if (args.gpus == 1):
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
load_checkpoint(model, args.checkpoint)
model = MMDataParallel(model, device_ids=[0])
data_loader = build_dataloader(dataset, 1, cfg.data.workers_per_gpu, num_gpus=1, shuffle=False, dist=False)
outputs = single_test(model, data_loader, args.out, class_names, show=args.show)
else:
NotImplementedError
gt_annos = kitti.get_label_annos(dataset.label_prefix, dataset.sample_ids)
result = get_official_eval_result(gt_annos, outputs, current_classes=class_names)
print(result)
|
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument('--validate', action='store_true', help='whether to evaluate the checkpoint during training')
parser.add_argument('--gpus', type=int, default=1, help='number of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
return args
|
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
cfg.gpus = args.gpus
if (cfg.checkpoint_config is not None):
cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text)
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
logger = get_root_logger(cfg.log_level)
logger.info('Distributed training: {}'.format(distributed))
if (args.seed is not None):
logger.info('Set random seed to {}'.format(args.seed))
set_random_seed(args.seed)
model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
train_dataset = get_dataset(cfg.data.train)
train_detector(model, train_dataset, cfg, distributed=distributed, validate=args.validate, logger=logger)
|
def draw_tension(time, values):
fig = plt.figure(figsize=(20, 10))
plt.rcParams['xtick.labelsize'] = 14
plt.plot(time, values, marker='o')
plt.tight_layout()
plt.show()
|
@hydra.main(config_path='configs/', config_name='convert.yaml')
def convert(config: DictConfig):
assert (config.get('convert_to') in ['pytorch', 'torchscript', 'onnx', 'tensorrt']), 'Please Choose one of [pytorch, torchscript, onnx, tensorrt]'
log.info(f'Instantiating model <{config.model._target_}>')
model: LightningModule = hydra.utils.instantiate(config.model)
log.info(f"Load checkpoint <{config.get('checkpoint_dir')}>")
ckpt_path = config.get('checkpoint_dir')
if (ckpt_path and (not os.path.isabs(ckpt_path))):
ckpt_path = config.get(os.path.join(hydra.utils.get_original_cwd(), ckpt_path))
model = model.load_from_checkpoint(ckpt_path)
model.cuda()
input_sample = config.get('input_sample')
if (config.get('convert_to') == 'pytorch'):
log.info('Convert to Pytorch (.pt)')
torch.save(model.state_dict(), f"{config.get('name')}.pt")
log.info(f"Saved model {config.get('name')}.pt")
if (config.get('convert_to') == 'torchscript'):
log.info('Convert to Torchscript (.pt)')
torch.jit.save(model.to_torchscript(), f"{config.get('name')}.pt")
log.info(f"Saved model {config.get('name')}.pt")
if (config.get('convert_to') == 'onnx'):
log.info('Convert to ONNX (.onnx)')
model.cuda()
input_sample = torch.rand((1, 3, 224, 224), device=torch.device('cuda'))
model.to_onnx(f"{config.get('name')}.onnx", input_sample, export_params=True)
log.info(f"Saved model {config.get('name')}.onnx")
|
def generate(imgs_path, vid_path, fps=30, frame_size=(1242, 375), resize=True):
'Generate frames to vid'
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
vid_writer = cv2.VideoWriter(vid_path, fourcc, fps, frame_size)
imgs_glob = sorted(glob(os.path.join(imgs_path, '*.png')))
if resize:
for img_path in tqdm(imgs_glob):
img = cv2.imread(img_path)
img = cv2.resize(img, frame_size)
vid_writer.write(img)
else:
for img_path in imgs_glob:
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
vid_writer.write(img)
vid_writer.release()
print('[INFO] Video saved to {}'.format(vid_path))
|
def generate_sets(images_path: str, dump_dir: str, postfix: str='', train_size: float=0.8, is_yolo: bool=False):
images = glob(os.path.join(images_path, '*.png'))
ids = [id_.split('/')[(- 1)].split('.')[0] for id_ in images]
train_sets = sorted(ids[:int((len(ids) * train_size))])
val_sets = sorted(ids[int((len(ids) * train_size)):])
for (name, sets) in zip(['train', 'val'], [train_sets, val_sets]):
name = os.path.join(dump_dir, f'{name}{postfix}.txt')
with open(name, 'w') as f:
for id in sets:
if is_yolo:
f.write(f'''./images/{id}.png
''')
else:
f.write(f'''{id}
''')
print(f'[INFO] Training set: {len(train_sets)}')
print(f'[INFO] Validation set: {len(val_sets)}')
print(f'[INFO] Total: {(len(train_sets) + len(val_sets))}')
print(f'[INFO] Success Generate Sets')
|
def get_assets(tag):
'Get release assets by tag name'
url = ('https://api.github.com/repos/ruhyadi/yolo3d-lightning/releases/tags/' + tag)
response = requests.get(url)
return response.json()['assets']
|
def download_assets(assets, dir):
'Download assets to dir'
for asset in assets:
url = asset['browser_download_url']
filename = asset['name']
print('[INFO] Downloading {}'.format(filename))
response = requests.get(url, stream=True)
with open(os.path.join(dir, filename), 'wb') as f:
shutil.copyfileobj(response.raw, f)
del response
with ZipFile(os.path.join(dir, filename), 'r') as zip_file:
zip_file.extractall(dir)
os.remove(os.path.join(dir, filename))
|
class KITTI2YOLO():
def __init__(self, dataset_path: str='../data/KITTI', classes: Tuple=['car', 'van', 'truck', 'pedestrian', 'cyclist'], img_width: int=1224, img_height: int=370):
self.dataset_path = dataset_path
self.img_width = img_width
self.img_height = img_height
self.classes = classes
self.ids = {self.classes[i]: i for i in range(len(self.classes))}
self.label_path = os.path.join(self.dataset_path, 'labels')
if (not os.path.isdir(self.label_path)):
os.makedirs(self.label_path)
else:
print('[INFO] Directory already exist...')
def convert(self):
files = glob(os.path.join(self.dataset_path, 'label_2', '*.txt'))
for file in tqdm(files):
with open(file, 'r') as f:
filename = os.path.join(self.label_path, file.split('/')[(- 1)])
dump_txt = open(filename, 'w')
for line in f:
parse_line = self.parse_line(line)
if (parse_line['name'].lower() not in self.classes):
continue
(xmin, ymin, xmax, ymax) = parse_line['bbox_camera']
xcenter = ((((xmax - xmin) / 2) + xmin) / self.img_width)
if (xcenter > 1.0):
xcenter = 1.0
ycenter = ((((ymax - ymin) / 2) + ymin) / self.img_height)
if (ycenter > 1.0):
ycenter = 1.0
width = ((xmax - xmin) / self.img_width)
if (width > 1.0):
width = 1.0
height = ((ymax - ymin) / self.img_height)
if (height > 1.0):
height = 1.0
bbox_yolo = f"{self.ids[parse_line['name'].lower()]} {xcenter:.3f} {ycenter:.3f} {width:.3f} {height:.3f}"
dump_txt.write((bbox_yolo + '\n'))
dump_txt.close()
def parse_line(self, line):
parts = line.split(' ')
output = {'name': parts[0].strip(), 'xyz_camera': (float(parts[11]), float(parts[12]), float(parts[13])), 'wlh': (float(parts[9]), float(parts[10]), float(parts[8])), 'yaw_camera': float(parts[14]), 'bbox_camera': (float(parts[4]), float(parts[5]), float(parts[6]), float(parts[7])), 'truncation': float(parts[1]), 'occlusion': float(parts[2]), 'alpha': float(parts[3])}
if (len(parts) > 15):
output['score'] = float(parts[15])
else:
output['score'] = np.nan
return output
|
def create_release(tag, name, description, target='main'):
'Create release'
token = os.environ.get('GITHUB_TOKEN')
headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}', 'Content-Type': 'application/zip'}
url = 'https://api.github.com/repos/ruhyadi/yolo3d-lightning/releases'
payload = {'tag_name': tag, 'target_commitish': target, 'name': name, 'body': description, 'draft': True, 'prerelease': False, 'generate_release_notes': True}
print('[INFO] Creating release {}'.format(tag))
response = requests.post(url, json=payload, headers=headers)
print('[INFO] Release created id: {}'.format(response.json()['id']))
return response.json()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.