repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
SASA
|
SASA-main/pcdet/models/roi_heads/pvrcnn_head.py
|
import torch.nn as nn
from ...ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ...utils import common_utils
from .roi_head_template import RoIHeadTemplate
class PVRCNNHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
mlps = self.model_cfg.ROI_GRID_POOL.MLPS
for k in range(len(mlps)):
mlps[k] = [input_channels] + mlps[k]
self.roi_grid_pool_layer = pointnet2_stack_modules.StackSAModuleMSG(
radii=self.model_cfg.ROI_GRID_POOL.POOL_RADIUS,
nsamples=self.model_cfg.ROI_GRID_POOL.NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method=self.model_cfg.ROI_GRID_POOL.POOL_METHOD,
)
GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
c_out = sum([x[-1] for x in mlps])
pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * c_out
shared_fc_list = []
for k in range(0, self.model_cfg.SHARED_FC.__len__()):
shared_fc_list.extend([
nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),
nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),
nn.ReLU()
])
pre_channel = self.model_cfg.SHARED_FC[k]
if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0:
shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO))
self.shared_fc_layer = nn.Sequential(*shared_fc_list)
self.cls_layers = self.make_fc_layers(
input_channels=pre_channel, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC
)
self.reg_layers = self.make_fc_layers(
input_channels=pre_channel,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roi_grid_pool(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois']
point_coords = batch_dict['point_coords']
point_features = batch_dict['point_features']
point_features = point_features * batch_dict['point_cls_scores'].view(-1, 1)
global_roi_grid_points, local_roi_grid_points = self.get_global_grid_points_of_roi(
rois, grid_size=self.model_cfg.ROI_GRID_POOL.GRID_SIZE
) # (BxN, 6x6x6, 3)
global_roi_grid_points = global_roi_grid_points.view(batch_size, -1, 3) # (B, Nx6x6x6, 3)
xyz = point_coords[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
batch_idx = point_coords[:, 0]
for k in range(batch_size):
xyz_batch_cnt[k] = (batch_idx == k).sum()
new_xyz = global_roi_grid_points.view(-1, 3)
new_xyz_batch_cnt = xyz.new_zeros(batch_size).int().fill_(global_roi_grid_points.shape[1])
pooled_points, pooled_features = self.roi_grid_pool_layer(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features.contiguous(),
) # (M1 + M2 ..., C)
pooled_features = pooled_features.view(
-1, self.model_cfg.ROI_GRID_POOL.GRID_SIZE ** 3,
pooled_features.shape[-1]
) # (BxN, 6x6x6, C)
return pooled_features
def get_global_grid_points_of_roi(self, rois, grid_size):
rois = rois.view(-1, rois.shape[-1])
batch_size_rcnn = rois.shape[0]
local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) # (B, 6x6x6, 3)
global_roi_grid_points = common_utils.rotate_points_along_z(
local_roi_grid_points.clone(), rois[:, 6]
).squeeze(dim=1)
global_center = rois[:, 0:3].clone()
global_roi_grid_points += global_center.unsqueeze(dim=1)
return global_roi_grid_points, local_roi_grid_points
@staticmethod
def get_dense_grid_points(rois, batch_size_rcnn, grid_size):
faked_features = rois.new_ones((grid_size, grid_size, grid_size))
dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx]
dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() # (B, 6x6x6, 3)
local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6]
roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \
- (local_roi_size.unsqueeze(dim=1) / 2) # (B, 6x6x6, 3)
return roi_grid_points
def forward(self, batch_dict):
"""
:param input_data: input dict
:return:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
# RoI aware pooling
pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C)
grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE
batch_size_rcnn = pooled_features.shape[0]
pooled_features = pooled_features.permute(0, 2, 1).\
contiguous().view(batch_size_rcnn, -1, grid_size, grid_size, grid_size) # (BxN, C, 6, 6, 6)
shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1))
rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
| 7,628
| 40.688525
| 116
|
py
|
SASA
|
SASA-main/pcdet/models/roi_heads/pointrcnn_head.py
|
import torch
import torch.nn as nn
from ...ops.pointnet2.pointnet2_batch import pointnet2_modules
from ...ops.roipoint_pool3d import roipoint_pool3d_utils
from ...utils import common_utils
from .roi_head_template import RoIHeadTemplate
class PointRCNNHead(RoIHeadTemplate):
def __init__(self, input_channels, model_cfg, num_class=1):
super().__init__(num_class=num_class, model_cfg=model_cfg)
self.model_cfg = model_cfg
use_bn = self.model_cfg.USE_BN
self.SA_modules = nn.ModuleList()
channel_in = input_channels
self.num_prefix_channels = 3 + 2 # xyz + point_scores + point_depth
xyz_mlps = [self.num_prefix_channels] + self.model_cfg.XYZ_UP_LAYER
shared_mlps = []
for k in range(len(xyz_mlps) - 1):
shared_mlps.append(nn.Conv2d(xyz_mlps[k], xyz_mlps[k + 1], kernel_size=1, bias=not use_bn))
if use_bn:
shared_mlps.append(nn.BatchNorm2d(xyz_mlps[k + 1]))
shared_mlps.append(nn.ReLU())
self.xyz_up_layer = nn.Sequential(*shared_mlps)
merge_channel_in = channel_in + self.model_cfg.XYZ_UP_LAYER[-1]
self.merge_down_layer = nn.Sequential(
nn.Conv2d(merge_channel_in, channel_in, kernel_size=1, bias=not use_bn),
*[nn.BatchNorm2d(channel_in), nn.ReLU()] if use_bn else [nn.ReLU()]
)
for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()):
mlps = [channel_in] + self.model_cfg.SA_CONFIG.MLPS[k]
npoint = self.model_cfg.SA_CONFIG.NPOINTS[k] if self.model_cfg.SA_CONFIG.NPOINTS[k] != -1 else None
self.SA_modules.append(
pointnet2_modules.PointnetSAModule(
npoint=npoint,
radius=self.model_cfg.SA_CONFIG.RADIUS[k],
nsample=self.model_cfg.SA_CONFIG.NSAMPLE[k],
mlp=mlps,
use_xyz=True,
bn=use_bn
)
)
channel_in = mlps[-1]
self.cls_layers = self.make_fc_layers(
input_channels=channel_in, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC
)
self.reg_layers = self.make_fc_layers(
input_channels=channel_in,
output_channels=self.box_coder.code_size * self.num_class,
fc_list=self.model_cfg.REG_FC
)
self.roipoint_pool3d_layer = roipoint_pool3d_utils.RoIPointPool3d(
num_sampled_points=self.model_cfg.ROI_POINT_POOL.NUM_SAMPLED_POINTS,
pool_extra_width=self.model_cfg.ROI_POINT_POOL.POOL_EXTRA_WIDTH
)
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001)
def roipool3d_gpu(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
point_coords: (num_points, 4) [bs_idx, x, y, z]
point_features: (num_points, C)
point_cls_scores: (N1 + N2 + N3 + ..., 1)
point_part_offset: (N1 + N2 + N3 + ..., 3)
Returns:
"""
batch_size = batch_dict['batch_size']
batch_idx = batch_dict['point_coords'][:, 0]
point_coords = batch_dict['point_coords'][:, 1:4]
point_features = batch_dict['point_features']
rois = batch_dict['rois'] # (B, num_rois, 7 + C)
batch_cnt = point_coords.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
batch_cnt[bs_idx] = (batch_idx == bs_idx).sum()
assert batch_cnt.min() == batch_cnt.max()
point_scores = batch_dict['point_cls_scores'].detach()
point_depths = point_coords.norm(dim=1) / self.model_cfg.ROI_POINT_POOL.DEPTH_NORMALIZER - 0.5
point_features_list = [point_scores[:, None], point_depths[:, None], point_features]
point_features_all = torch.cat(point_features_list, dim=1)
batch_points = point_coords.view(batch_size, -1, 3)
batch_point_features = point_features_all.view(batch_size, -1, point_features_all.shape[-1])
with torch.no_grad():
pooled_features, pooled_empty_flag = self.roipoint_pool3d_layer(
batch_points, batch_point_features, rois
) # pooled_features: (B, num_rois, num_sampled_points, 3 + C), pooled_empty_flag: (B, num_rois)
# canonical transformation
roi_center = rois[:, :, 0:3]
pooled_features[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2)
pooled_features = pooled_features.view(-1, pooled_features.shape[-2], pooled_features.shape[-1])
pooled_features[:, :, 0:3] = common_utils.rotate_points_along_z(
pooled_features[:, :, 0:3], -rois.view(-1, rois.shape[-1])[:, 6]
)
pooled_features[pooled_empty_flag.view(-1) > 0] = 0
return pooled_features
def forward(self, batch_dict):
"""
Args:
batch_dict:
Returns:
"""
targets_dict = self.proposal_layer(
batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST']
)
if self.training:
targets_dict = self.assign_targets(batch_dict)
batch_dict['rois'] = targets_dict['rois']
batch_dict['roi_labels'] = targets_dict['roi_labels']
pooled_features = self.roipool3d_gpu(batch_dict) # (total_rois, num_sampled_points, 3 + C)
xyz_input = pooled_features[..., 0:self.num_prefix_channels].transpose(1, 2).unsqueeze(dim=3).contiguous()
xyz_features = self.xyz_up_layer(xyz_input)
point_features = pooled_features[..., self.num_prefix_channels:].transpose(1, 2).unsqueeze(dim=3)
merged_features = torch.cat((xyz_features, point_features), dim=1)
merged_features = self.merge_down_layer(merged_features)
l_xyz, l_features = [pooled_features[..., 0:3].contiguous()], [merged_features.squeeze(dim=3).contiguous()]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
shared_features = l_features[-1] # (total_rois, num_features, 1)
rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
if not self.training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg
)
batch_dict['batch_cls_preds'] = batch_cls_preds
batch_dict['batch_box_preds'] = batch_box_preds
batch_dict['cls_preds_normalized'] = False
else:
targets_dict['rcnn_cls'] = rcnn_cls
targets_dict['rcnn_reg'] = rcnn_reg
self.forward_ret_dict = targets_dict
return batch_dict
| 7,866
| 42.705556
| 116
|
py
|
SASA
|
SASA-main/pcdet/models/roi_heads/target_assigner/proposal_target_layer.py
|
import numpy as np
import torch
import torch.nn as nn
from ....ops.iou3d_nms import iou3d_nms_utils
class ProposalTargetLayer(nn.Module):
def __init__(self, roi_sampler_cfg):
super().__init__()
self.roi_sampler_cfg = roi_sampler_cfg
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
batch_dict:
rois: (B, M, 7 + C)
gt_of_rois: (B, M, 7 + C)
gt_iou_of_rois: (B, M)
roi_scores: (B, M)
roi_labels: (B, M)
reg_valid_mask: (B, M)
rcnn_cls_labels: (B, M)
"""
batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels = self.sample_rois_for_rcnn(
batch_dict=batch_dict
)
# regression valid mask
reg_valid_mask = (batch_roi_ious > self.roi_sampler_cfg.REG_FG_THRESH).long()
# classification label
if self.roi_sampler_cfg.CLS_SCORE_TYPE == 'cls':
batch_cls_labels = (batch_roi_ious > self.roi_sampler_cfg.CLS_FG_THRESH).long()
ignore_mask = (batch_roi_ious > self.roi_sampler_cfg.CLS_BG_THRESH) & \
(batch_roi_ious < self.roi_sampler_cfg.CLS_FG_THRESH)
batch_cls_labels[ignore_mask > 0] = -1
elif self.roi_sampler_cfg.CLS_SCORE_TYPE == 'roi_iou':
iou_bg_thresh = self.roi_sampler_cfg.CLS_BG_THRESH
iou_fg_thresh = self.roi_sampler_cfg.CLS_FG_THRESH
fg_mask = batch_roi_ious > iou_fg_thresh
bg_mask = batch_roi_ious < iou_bg_thresh
interval_mask = (fg_mask == 0) & (bg_mask == 0)
batch_cls_labels = (fg_mask > 0).float()
batch_cls_labels[interval_mask] = \
(batch_roi_ious[interval_mask] - iou_bg_thresh) / (iou_fg_thresh - iou_bg_thresh)
else:
raise NotImplementedError
targets_dict = {'rois': batch_rois, 'gt_of_rois': batch_gt_of_rois, 'gt_iou_of_rois': batch_roi_ious,
'roi_scores': batch_roi_scores, 'roi_labels': batch_roi_labels,
'reg_valid_mask': reg_valid_mask,
'rcnn_cls_labels': batch_cls_labels}
return targets_dict
def sample_rois_for_rcnn(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois']
roi_scores = batch_dict['roi_scores']
roi_labels = batch_dict['roi_labels']
gt_boxes = batch_dict['gt_boxes']
code_size = rois.shape[-1]
batch_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size)
batch_gt_of_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size + 1)
batch_roi_ious = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_scores = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_labels = rois.new_zeros((batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE), dtype=torch.long)
for index in range(batch_size):
cur_roi, cur_gt, cur_roi_labels, cur_roi_scores = \
rois[index], gt_boxes[index], roi_labels[index], roi_scores[index]
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
cur_gt = cur_gt.new_zeros((1, cur_gt.shape[1])) if len(cur_gt) == 0 else cur_gt
if self.roi_sampler_cfg.get('SAMPLE_ROI_BY_EACH_CLASS', False):
max_overlaps, gt_assignment = self.get_max_iou_with_same_class(
rois=cur_roi, roi_labels=cur_roi_labels,
gt_boxes=cur_gt[:, 0:7], gt_labels=cur_gt[:, -1].long()
)
else:
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt[:, 0:7]) # (M, N)
max_overlaps, gt_assignment = torch.max(iou3d, dim=1)
sampled_inds = self.subsample_rois(max_overlaps=max_overlaps)
batch_rois[index] = cur_roi[sampled_inds]
batch_roi_labels[index] = cur_roi_labels[sampled_inds]
batch_roi_ious[index] = max_overlaps[sampled_inds]
batch_roi_scores[index] = cur_roi_scores[sampled_inds]
batch_gt_of_rois[index] = cur_gt[gt_assignment[sampled_inds]]
return batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels
def subsample_rois(self, max_overlaps):
# sample fg, easy_bg, hard_bg
fg_rois_per_image = int(np.round(self.roi_sampler_cfg.FG_RATIO * self.roi_sampler_cfg.ROI_PER_IMAGE))
fg_thresh = min(self.roi_sampler_cfg.REG_FG_THRESH, self.roi_sampler_cfg.CLS_FG_THRESH)
fg_inds = ((max_overlaps >= fg_thresh)).nonzero().view(-1)
easy_bg_inds = ((max_overlaps < self.roi_sampler_cfg.CLS_BG_THRESH_LO)).nonzero().view(-1)
hard_bg_inds = ((max_overlaps < self.roi_sampler_cfg.REG_FG_THRESH) &
(max_overlaps >= self.roi_sampler_cfg.CLS_BG_THRESH_LO)).nonzero().view(-1)
fg_num_rois = fg_inds.numel()
bg_num_rois = hard_bg_inds.numel() + easy_bg_inds.numel()
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).type_as(max_overlaps).long()
fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE - fg_rois_per_this_image
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
rand_num = np.floor(np.random.rand(self.roi_sampler_cfg.ROI_PER_IMAGE) * fg_num_rois)
rand_num = torch.from_numpy(rand_num).type_as(max_overlaps).long()
fg_inds = fg_inds[rand_num]
bg_inds = []
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
else:
print('maxoverlaps:(min=%f, max=%f)' % (max_overlaps.min().item(), max_overlaps.max().item()))
print('ERROR: FG=%d, BG=%d' % (fg_num_rois, bg_num_rois))
raise NotImplementedError
sampled_inds = torch.cat((fg_inds, bg_inds), dim=0)
return sampled_inds
@staticmethod
def sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, hard_bg_ratio):
if hard_bg_inds.numel() > 0 and easy_bg_inds.numel() > 0:
hard_bg_rois_num = min(int(bg_rois_per_this_image * hard_bg_ratio), len(hard_bg_inds))
easy_bg_rois_num = bg_rois_per_this_image - hard_bg_rois_num
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
hard_bg_inds = hard_bg_inds[rand_idx]
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
easy_bg_inds = easy_bg_inds[rand_idx]
bg_inds = torch.cat([hard_bg_inds, easy_bg_inds], dim=0)
elif hard_bg_inds.numel() > 0 and easy_bg_inds.numel() == 0:
hard_bg_rois_num = bg_rois_per_this_image
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
bg_inds = hard_bg_inds[rand_idx]
elif hard_bg_inds.numel() == 0 and easy_bg_inds.numel() > 0:
easy_bg_rois_num = bg_rois_per_this_image
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
bg_inds = easy_bg_inds[rand_idx]
else:
raise NotImplementedError
return bg_inds
@staticmethod
def get_max_iou_with_same_class(rois, roi_labels, gt_boxes, gt_labels):
"""
Args:
rois: (N, 7)
roi_labels: (N)
gt_boxes: (N, )
gt_labels:
Returns:
"""
"""
:param rois: (N, 7)
:param roi_labels: (N)
:param gt_boxes: (N, 8)
:return:
"""
max_overlaps = rois.new_zeros(rois.shape[0])
gt_assignment = roi_labels.new_zeros(roi_labels.shape[0])
for k in range(gt_labels.min().item(), gt_labels.max().item() + 1):
roi_mask = (roi_labels == k)
gt_mask = (gt_labels == k)
if roi_mask.sum() > 0 and gt_mask.sum() > 0:
cur_roi = rois[roi_mask]
cur_gt = gt_boxes[gt_mask]
original_gt_assignment = gt_mask.nonzero().view(-1)
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt) # (M, N)
cur_max_overlaps, cur_gt_assignment = torch.max(iou3d, dim=1)
max_overlaps[roi_mask] = cur_max_overlaps
gt_assignment[roi_mask] = original_gt_assignment[cur_gt_assignment]
return max_overlaps, gt_assignment
| 9,946
| 42.436681
| 117
|
py
|
SASA
|
SASA-main/pcdet/models/model_utils/model_nms_utils.py
|
import torch
from ...ops.iou3d_nms import iou3d_nms_utils
def class_agnostic_nms(box_scores, box_preds, nms_config, score_thresh=None):
src_box_scores = box_scores
if score_thresh is not None:
scores_mask = (box_scores >= score_thresh)
box_scores = box_scores[scores_mask]
box_preds = box_preds[scores_mask]
selected = []
if box_scores.shape[0] > 0:
box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0]))
boxes_for_nms = box_preds[indices]
keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)(
boxes_for_nms[:, 0:7], box_scores_nms, nms_config.NMS_THRESH, **nms_config
)
selected = indices[keep_idx[:nms_config.NMS_POST_MAXSIZE]]
if score_thresh is not None:
original_idxs = scores_mask.nonzero().view(-1)
selected = original_idxs[selected]
return selected, src_box_scores[selected]
def multi_classes_nms(cls_scores, box_preds, nms_config, score_thresh=None):
"""
Args:
cls_scores: (N, num_class)
box_preds: (N, 7 + C)
nms_config:
score_thresh:
Returns:
"""
pred_scores, pred_labels, pred_boxes = [], [], []
for k in range(cls_scores.shape[1]):
if score_thresh is not None:
scores_mask = (cls_scores[:, k] >= score_thresh)
box_scores = cls_scores[scores_mask, k]
cur_box_preds = box_preds[scores_mask]
else:
box_scores = cls_scores[:, k]
selected = []
if box_scores.shape[0] > 0:
box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0]))
boxes_for_nms = cur_box_preds[indices]
keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)(
boxes_for_nms[:, 0:7], box_scores_nms, nms_config.NMS_THRESH, **nms_config
)
selected = indices[keep_idx[:nms_config.NMS_POST_MAXSIZE]]
pred_scores.append(box_scores[selected])
pred_labels.append(box_scores.new_ones(len(selected)).long() * k)
pred_boxes.append(cur_box_preds[selected])
pred_scores = torch.cat(pred_scores, dim=0)
pred_labels = torch.cat(pred_labels, dim=0)
pred_boxes = torch.cat(pred_boxes, dim=0)
return pred_scores, pred_labels, pred_boxes
| 2,419
| 35.666667
| 116
|
py
|
SASA
|
SASA-main/pcdet/models/backbones_2d/base_bev_backbone.py
|
import numpy as np
import torch
import torch.nn as nn
class BaseBEVBackbone(nn.Module):
def __init__(self, model_cfg, input_channels):
super().__init__()
self.model_cfg = model_cfg
if self.model_cfg.get('LAYER_NUMS', None) is not None:
assert len(self.model_cfg.LAYER_NUMS) == len(self.model_cfg.LAYER_STRIDES) == len(self.model_cfg.NUM_FILTERS)
layer_nums = self.model_cfg.LAYER_NUMS
layer_strides = self.model_cfg.LAYER_STRIDES
num_filters = self.model_cfg.NUM_FILTERS
else:
layer_nums = layer_strides = num_filters = []
if self.model_cfg.get('UPSAMPLE_STRIDES', None) is not None:
assert len(self.model_cfg.UPSAMPLE_STRIDES) == len(self.model_cfg.NUM_UPSAMPLE_FILTERS)
num_upsample_filters = self.model_cfg.NUM_UPSAMPLE_FILTERS
upsample_strides = self.model_cfg.UPSAMPLE_STRIDES
else:
upsample_strides = num_upsample_filters = []
num_levels = len(layer_nums)
c_in_list = [input_channels, *num_filters[:-1]]
self.blocks = nn.ModuleList()
self.deblocks = nn.ModuleList()
for idx in range(num_levels):
cur_layers = [
nn.ZeroPad2d(1),
nn.Conv2d(
c_in_list[idx], num_filters[idx], kernel_size=3,
stride=layer_strides[idx], padding=0, bias=False
),
nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
]
for k in range(layer_nums[idx]):
cur_layers.extend([
nn.Conv2d(num_filters[idx], num_filters[idx], kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
])
self.blocks.append(nn.Sequential(*cur_layers))
if len(upsample_strides) > 0:
stride = upsample_strides[idx]
if stride >= 1:
self.deblocks.append(nn.Sequential(
nn.ConvTranspose2d(
num_filters[idx], num_upsample_filters[idx],
upsample_strides[idx],
stride=upsample_strides[idx], bias=False
),
nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
))
else:
stride = np.round(1 / stride).astype(np.int)
self.deblocks.append(nn.Sequential(
nn.Conv2d(
num_filters[idx], num_upsample_filters[idx],
stride,
stride=stride, bias=False
),
nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
))
c_in = sum(num_upsample_filters)
if len(upsample_strides) > num_levels:
self.deblocks.append(nn.Sequential(
nn.ConvTranspose2d(c_in, c_in, upsample_strides[-1], stride=upsample_strides[-1], bias=False),
nn.BatchNorm2d(c_in, eps=1e-3, momentum=0.01),
nn.ReLU(),
))
self.num_bev_features = c_in
def forward(self, data_dict):
"""
Args:
data_dict:
spatial_features
Returns:
"""
spatial_features = data_dict['spatial_features']
ups = []
ret_dict = {}
x = spatial_features
for i in range(len(self.blocks)):
x = self.blocks[i](x)
stride = int(spatial_features.shape[2] / x.shape[2])
ret_dict['spatial_features_%dx' % stride] = x
if len(self.deblocks) > 0:
ups.append(self.deblocks[i](x))
else:
ups.append(x)
if len(ups) > 1:
x = torch.cat(ups, dim=1)
elif len(ups) == 1:
x = ups[0]
if len(self.deblocks) > len(self.blocks):
x = self.deblocks[-1](x)
data_dict['spatial_features_2d'] = x
return data_dict
| 4,318
| 37.221239
| 121
|
py
|
SASA
|
SASA-main/pcdet/models/backbones_2d/__init__.py
|
from .base_bev_backbone import BaseBEVBackbone
__all__ = {
'BaseBEVBackbone': BaseBEVBackbone
}
| 101
| 16
| 46
|
py
|
SASA
|
SASA-main/pcdet/models/backbones_2d/map_to_bev/pointpillar_scatter.py
|
import torch
import torch.nn as nn
class PointPillarScatter(nn.Module):
def __init__(self, model_cfg, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES
self.nx, self.ny, self.nz = grid_size
assert self.nz == 1
def forward(self, batch_dict, **kwargs):
pillar_features, coords = batch_dict['pillar_features'], batch_dict['voxel_coords']
batch_spatial_features = []
batch_size = coords[:, 0].max().int().item() + 1
for batch_idx in range(batch_size):
spatial_feature = torch.zeros(
self.num_bev_features,
self.nz * self.nx * self.ny,
dtype=pillar_features.dtype,
device=pillar_features.device)
batch_mask = coords[:, 0] == batch_idx
this_coords = coords[batch_mask, :]
indices = this_coords[:, 1] + this_coords[:, 2] * self.nx + this_coords[:, 3]
indices = indices.type(torch.long)
pillars = pillar_features[batch_mask, :]
pillars = pillars.t()
spatial_feature[:, indices] = pillars
batch_spatial_features.append(spatial_feature)
batch_spatial_features = torch.stack(batch_spatial_features, 0)
batch_spatial_features = batch_spatial_features.view(batch_size, self.num_bev_features * self.nz, self.ny, self.nx)
batch_dict['spatial_features'] = batch_spatial_features
return batch_dict
| 1,545
| 39.684211
| 123
|
py
|
SASA
|
SASA-main/pcdet/models/backbones_2d/map_to_bev/__init__.py
|
from .height_compression import HeightCompression
from .pointpillar_scatter import PointPillarScatter
__all__ = {
'HeightCompression': HeightCompression,
'PointPillarScatter': PointPillarScatter
}
| 206
| 24.875
| 51
|
py
|
SASA
|
SASA-main/pcdet/models/backbones_2d/map_to_bev/height_compression.py
|
import torch.nn as nn
class HeightCompression(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES
def forward(self, batch_dict):
"""
Args:
batch_dict:
encoded_spconv_tensor: sparse tensor
Returns:
batch_dict:
spatial_features:
"""
encoded_spconv_tensor = batch_dict['encoded_spconv_tensor']
spatial_features = encoded_spconv_tensor.dense()
N, C, D, H, W = spatial_features.shape
spatial_features = spatial_features.view(N, C * D, H, W)
batch_dict['spatial_features'] = spatial_features
batch_dict['spatial_features_stride'] = batch_dict['encoded_spconv_tensor_stride']
return batch_dict
| 870
| 31.259259
| 90
|
py
|
SASA
|
SASA-main/pcdet/datasets/dataset.py
|
from collections import defaultdict
from pathlib import Path
import numpy as np
import torch.utils.data as torch_data
from ..utils import common_utils
from .augmentor.data_augmentor import DataAugmentor
from .processor.data_processor import DataProcessor
from .processor.point_feature_encoder import PointFeatureEncoder
class DatasetTemplate(torch_data.Dataset):
def __init__(self, dataset_cfg=None, class_names=None, training=True, root_path=None, logger=None):
super().__init__()
self.dataset_cfg = dataset_cfg
self.training = training
self.class_names = class_names
self.logger = logger
self.root_path = root_path if root_path is not None else Path(self.dataset_cfg.DATA_PATH)
self.logger = logger
if self.dataset_cfg is None or class_names is None:
return
self.point_cloud_range = np.array(self.dataset_cfg.POINT_CLOUD_RANGE, dtype=np.float32)
self.point_feature_encoder = PointFeatureEncoder(
self.dataset_cfg.POINT_FEATURE_ENCODING,
point_cloud_range=self.point_cloud_range
)
self.data_augmentor = DataAugmentor(
self.root_path, self.dataset_cfg.DATA_AUGMENTOR, self.class_names, logger=self.logger
) if self.training else None
self.data_processor = DataProcessor(
self.dataset_cfg.DATA_PROCESSOR, point_cloud_range=self.point_cloud_range, training=self.training
)
self.grid_size = self.data_processor.grid_size
self.voxel_size = self.data_processor.voxel_size
self.total_epochs = 0
self._merge_all_iters_to_one_epoch = False
@property
def mode(self):
return 'train' if self.training else 'test'
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
To support a custom dataset, implement this function to receive the predicted results from the model, and then
transform the unified normative coordinate to your required coordinate, and optionally save them to disk.
Args:
batch_dict: dict of original data from the dataloader
pred_dicts: dict of predicted results from the model
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path: if it is not None, save the results to this path
Returns:
"""
def merge_all_iters_to_one_epoch(self, merge=True, epochs=None):
if merge:
self._merge_all_iters_to_one_epoch = True
self.total_epochs = epochs
else:
self._merge_all_iters_to_one_epoch = False
def __len__(self):
raise NotImplementedError
def __getitem__(self, index):
"""
To support a custom dataset, implement this function to load the raw data (and labels), then transform them to
the unified normative coordinate and call the function self.prepare_data() to process the data and send them
to the model.
Args:
index:
Returns:
"""
raise NotImplementedError
def prepare_data(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
...
Returns:
data_dict:
frame_id: string
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
use_lead_xyz: bool
voxels: optional (num_voxels, max_points_per_voxel, 3 + C)
voxel_coords: optional (num_voxels, 3)
voxel_num_points: optional (num_voxels)
...
"""
if self.training:
assert 'gt_boxes' in data_dict, 'gt_boxes should be provided for training'
gt_boxes_mask = np.array([n in self.class_names for n in data_dict['gt_names']], dtype=np.bool_)
data_dict = self.data_augmentor.forward(
data_dict={
**data_dict,
'gt_boxes_mask': gt_boxes_mask
}
)
if data_dict.get('gt_boxes', None) is not None:
selected = common_utils.keep_arrays_by_name(data_dict['gt_names'], self.class_names)
data_dict['gt_boxes'] = data_dict['gt_boxes'][selected]
data_dict['gt_names'] = data_dict['gt_names'][selected]
gt_classes = np.array([self.class_names.index(n) + 1 for n in data_dict['gt_names']], dtype=np.int32)
gt_boxes = np.concatenate((data_dict['gt_boxes'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)
data_dict['gt_boxes'] = gt_boxes
data_dict = self.point_feature_encoder.forward(data_dict)
data_dict = self.data_processor.forward(
data_dict=data_dict
)
if self.training and len(data_dict['gt_boxes']) == 0:
new_index = np.random.randint(self.__len__())
return self.__getitem__(new_index)
data_dict.pop('gt_names', None)
return data_dict
@staticmethod
def collate_batch(batch_list, _unused=False):
data_dict = defaultdict(list)
for cur_sample in batch_list:
for key, val in cur_sample.items():
data_dict[key].append(val)
batch_size = len(batch_list)
ret = {}
for key, val in data_dict.items():
try:
if key in ['voxels', 'voxel_num_points']:
ret[key] = np.concatenate(val, axis=0)
elif key in ['points', 'voxel_coords']:
coors = []
for i, coor in enumerate(val):
coor_pad = np.pad(coor, ((0, 0), (1, 0)), mode='constant', constant_values=i)
coors.append(coor_pad)
ret[key] = np.concatenate(coors, axis=0)
elif key in ['gt_boxes']:
max_gt = max([len(x) for x in val])
batch_gt_boxes3d = np.zeros((batch_size, max_gt, val[0].shape[-1]), dtype=np.float32)
for k in range(batch_size):
batch_gt_boxes3d[k, :val[k].__len__(), :] = val[k]
ret[key] = batch_gt_boxes3d
else:
ret[key] = np.stack(val, axis=0)
except:
print('Error in collate_batch: key=%s' % key)
raise TypeError
ret['batch_size'] = batch_size
return ret
| 6,966
| 37.071038
| 118
|
py
|
SASA
|
SASA-main/pcdet/datasets/__init__.py
|
import torch
from torch.utils.data import DataLoader
from torch.utils.data import DistributedSampler as _DistributedSampler
from pcdet.utils import common_utils
from .dataset import DatasetTemplate
from .kitti.kitti_dataset import KittiDataset
from .nuscenes.nuscenes_dataset import NuScenesDataset
from .waymo.waymo_dataset import WaymoDataset
__all__ = {
'DatasetTemplate': DatasetTemplate,
'KittiDataset': KittiDataset,
'NuScenesDataset': NuScenesDataset,
'WaymoDataset': WaymoDataset
}
class DistributedSampler(_DistributedSampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def build_dataloader(dataset_cfg, class_names, batch_size, dist, root_path=None, workers=4,
logger=None, training=True, merge_all_iters_to_one_epoch=False, total_epochs=0):
dataset = __all__[dataset_cfg.DATASET](
dataset_cfg=dataset_cfg,
class_names=class_names,
root_path=root_path,
training=training,
logger=logger,
)
if merge_all_iters_to_one_epoch:
assert hasattr(dataset, 'merge_all_iters_to_one_epoch')
dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
if dist:
if training:
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
rank, world_size = common_utils.get_dist_info()
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False)
else:
sampler = None
dataloader = DataLoader(
dataset, batch_size=batch_size, pin_memory=True, num_workers=workers,
shuffle=(sampler is None) and training, collate_fn=dataset.collate_batch,
drop_last=False, sampler=sampler, timeout=0
)
return dataset, dataloader, sampler
| 2,440
| 32.438356
| 101
|
py
|
SASA
|
SASA-main/pcdet/datasets/waymo/waymo_utils.py
|
# OpenPCDet PyTorch Dataloader and Evaluation Tools for Waymo Open Dataset
# Reference https://github.com/open-mmlab/OpenPCDet
# Written by Shaoshuai Shi, Chaoxu Guo
# All Rights Reserved 2019-2020.
import os
import pickle
import numpy as np
from ...utils import common_utils
import tensorflow as tf
from waymo_open_dataset.utils import frame_utils, transform_utils, range_image_utils
from waymo_open_dataset import dataset_pb2
try:
tf.enable_eager_execution()
except:
pass
WAYMO_CLASSES = ['unknown', 'Vehicle', 'Pedestrian', 'Sign', 'Cyclist']
def generate_labels(frame):
obj_name, difficulty, dimensions, locations, heading_angles = [], [], [], [], []
tracking_difficulty, speeds, accelerations, obj_ids = [], [], [], []
num_points_in_gt = []
laser_labels = frame.laser_labels
for i in range(len(laser_labels)):
box = laser_labels[i].box
class_ind = laser_labels[i].type
loc = [box.center_x, box.center_y, box.center_z]
heading_angles.append(box.heading)
obj_name.append(WAYMO_CLASSES[class_ind])
difficulty.append(laser_labels[i].detection_difficulty_level)
tracking_difficulty.append(laser_labels[i].tracking_difficulty_level)
dimensions.append([box.length, box.width, box.height]) # lwh in unified coordinate of OpenPCDet
locations.append(loc)
obj_ids.append(laser_labels[i].id)
num_points_in_gt.append(laser_labels[i].num_lidar_points_in_box)
annotations = {}
annotations['name'] = np.array(obj_name)
annotations['difficulty'] = np.array(difficulty)
annotations['dimensions'] = np.array(dimensions)
annotations['location'] = np.array(locations)
annotations['heading_angles'] = np.array(heading_angles)
annotations['obj_ids'] = np.array(obj_ids)
annotations['tracking_difficulty'] = np.array(tracking_difficulty)
annotations['num_points_in_gt'] = np.array(num_points_in_gt)
annotations = common_utils.drop_info_with_name(annotations, name='unknown')
if annotations['name'].__len__() > 0:
gt_boxes_lidar = np.concatenate([
annotations['location'], annotations['dimensions'], annotations['heading_angles'][..., np.newaxis]],
axis=1
)
else:
gt_boxes_lidar = np.zeros((0, 7))
annotations['gt_boxes_lidar'] = gt_boxes_lidar
return annotations
def convert_range_image_to_point_cloud(frame, range_images, camera_projections, range_image_top_pose, ri_index=0):
"""
Modified from the codes of Waymo Open Dataset.
Convert range images to point cloud.
Args:
frame: open dataset frame
range_images: A dict of {laser_name, [range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return, camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
Returns:
points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars).
cp_points: {[N, 6]} list of camera projections of length 5 (number of lidars).
"""
calibrations = sorted(frame.context.laser_calibrations, key=lambda c: c.name)
points = []
cp_points = []
points_NLZ = []
points_intensity = []
points_elongation = []
frame_pose = tf.convert_to_tensor(np.reshape(np.array(frame.pose.transform), [4, 4]))
# [H, W, 6]
range_image_top_pose_tensor = tf.reshape(
tf.convert_to_tensor(range_image_top_pose.data), range_image_top_pose.shape.dims
)
# [H, W, 3, 3]
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(
range_image_top_pose_tensor[..., 0], range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2])
range_image_top_pose_tensor_translation = range_image_top_pose_tensor[..., 3:]
range_image_top_pose_tensor = transform_utils.get_transform(
range_image_top_pose_tensor_rotation,
range_image_top_pose_tensor_translation)
for c in calibrations:
range_image = range_images[c.name][ri_index]
if len(c.beam_inclinations) == 0: # pylint: disable=g-explicit-length-test
beam_inclinations = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min, c.beam_inclination_max]),
height=range_image.shape.dims[0])
else:
beam_inclinations = tf.constant(c.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])
range_image_tensor = tf.reshape(
tf.convert_to_tensor(range_image.data), range_image.shape.dims)
pixel_pose_local = None
frame_pose_local = None
if c.name == dataset_pb2.LaserName.TOP:
pixel_pose_local = range_image_top_pose_tensor
pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)
frame_pose_local = tf.expand_dims(frame_pose, axis=0)
range_image_mask = range_image_tensor[..., 0] > 0
range_image_NLZ = range_image_tensor[..., 3]
range_image_intensity = range_image_tensor[..., 1]
range_image_elongation = range_image_tensor[..., 2]
range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(range_image_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(beam_inclinations), axis=0),
pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local)
range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
points_tensor = tf.gather_nd(range_image_cartesian,
tf.where(range_image_mask))
points_NLZ_tensor = tf.gather_nd(range_image_NLZ, tf.compat.v1.where(range_image_mask))
points_intensity_tensor = tf.gather_nd(range_image_intensity, tf.compat.v1.where(range_image_mask))
points_elongation_tensor = tf.gather_nd(range_image_elongation, tf.compat.v1.where(range_image_mask))
cp = camera_projections[c.name][0]
cp_tensor = tf.reshape(tf.convert_to_tensor(cp.data), cp.shape.dims)
cp_points_tensor = tf.gather_nd(cp_tensor, tf.where(range_image_mask))
points.append(points_tensor.numpy())
cp_points.append(cp_points_tensor.numpy())
points_NLZ.append(points_NLZ_tensor.numpy())
points_intensity.append(points_intensity_tensor.numpy())
points_elongation.append(points_elongation_tensor.numpy())
return points, cp_points, points_NLZ, points_intensity, points_elongation
def save_lidar_points(frame, cur_save_path):
range_images, camera_projections, range_image_top_pose = \
frame_utils.parse_range_image_and_camera_projection(frame)
points, cp_points, points_in_NLZ_flag, points_intensity, points_elongation = \
convert_range_image_to_point_cloud(frame, range_images, camera_projections, range_image_top_pose)
# 3d points in vehicle frame.
points_all = np.concatenate(points, axis=0)
points_in_NLZ_flag = np.concatenate(points_in_NLZ_flag, axis=0).reshape(-1, 1)
points_intensity = np.concatenate(points_intensity, axis=0).reshape(-1, 1)
points_elongation = np.concatenate(points_elongation, axis=0).reshape(-1, 1)
num_points_of_each_lidar = [point.shape[0] for point in points]
save_points = np.concatenate([
points_all, points_intensity, points_elongation, points_in_NLZ_flag
], axis=-1).astype(np.float32)
np.save(cur_save_path, save_points)
# print('saving to ', cur_save_path)
return num_points_of_each_lidar
def process_single_sequence(sequence_file, save_path, sampled_interval, has_label=True):
sequence_name = os.path.splitext(os.path.basename(sequence_file))[0]
# print('Load record (sampled_interval=%d): %s' % (sampled_interval, sequence_name))
if not sequence_file.exists():
print('NotFoundError: %s' % sequence_file)
return []
dataset = tf.data.TFRecordDataset(str(sequence_file), compression_type='')
cur_save_dir = save_path / sequence_name
cur_save_dir.mkdir(parents=True, exist_ok=True)
pkl_file = cur_save_dir / ('%s.pkl' % sequence_name)
sequence_infos = []
if pkl_file.exists():
sequence_infos = pickle.load(open(pkl_file, 'rb'))
print('Skip sequence since it has been processed before: %s' % pkl_file)
return sequence_infos
for cnt, data in enumerate(dataset):
if cnt % sampled_interval != 0:
continue
# print(sequence_name, cnt)
frame = dataset_pb2.Frame()
frame.ParseFromString(bytearray(data.numpy()))
info = {}
pc_info = {'num_features': 5, 'lidar_sequence': sequence_name, 'sample_idx': cnt}
info['point_cloud'] = pc_info
info['frame_id'] = sequence_name + ('_%03d' % cnt)
image_info = {}
for j in range(5):
width = frame.context.camera_calibrations[j].width
height = frame.context.camera_calibrations[j].height
image_info.update({'image_shape_%d' % j: (height, width)})
info['image'] = image_info
pose = np.array(frame.pose.transform, dtype=np.float32).reshape(4, 4)
info['pose'] = pose
if has_label:
annotations = generate_labels(frame)
info['annos'] = annotations
num_points_of_each_lidar = save_lidar_points(frame, cur_save_dir / ('%04d.npy' % cnt))
info['num_points_of_each_lidar'] = num_points_of_each_lidar
sequence_infos.append(info)
with open(pkl_file, 'wb') as f:
pickle.dump(sequence_infos, f)
print('Infos are saved to (sampled_interval=%d): %s' % (sampled_interval, pkl_file))
return sequence_infos
| 9,957
| 42.484716
| 114
|
py
|
SASA
|
SASA-main/pcdet/datasets/waymo/waymo_dataset.py
|
# OpenPCDet PyTorch Dataloader and Evaluation Tools for Waymo Open Dataset
# Reference https://github.com/open-mmlab/OpenPCDet
# Written by Shaoshuai Shi, Chaoxu Guo
# All Rights Reserved 2019-2020.
import os
import pickle
import copy
import numpy as np
import torch
import multiprocessing
from tqdm import tqdm
from pathlib import Path
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils, common_utils
from ..dataset import DatasetTemplate
class WaymoDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.data_path = self.root_path / self.dataset_cfg.PROCESSED_DATA_TAG
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()]
self.infos = []
self.include_waymo_data(self.mode)
def set_split(self, split):
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training,
root_path=self.root_path, logger=self.logger
)
self.split = split
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()]
self.infos = []
self.include_waymo_data(self.mode)
def include_waymo_data(self, mode):
self.logger.info('Loading Waymo dataset')
waymo_infos = []
num_skipped_infos = 0
for k in range(len(self.sample_sequence_list)):
sequence_name = os.path.splitext(self.sample_sequence_list[k])[0]
info_path = self.data_path / sequence_name / ('%s.pkl' % sequence_name)
info_path = self.check_sequence_name_with_all_version(info_path)
if not info_path.exists():
num_skipped_infos += 1
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
waymo_infos.extend(infos)
self.infos.extend(waymo_infos[:])
self.logger.info('Total skipped info %s' % num_skipped_infos)
self.logger.info('Total samples for Waymo dataset: %d' % (len(waymo_infos)))
if self.dataset_cfg.SAMPLED_INTERVAL[mode] > 1:
sampled_waymo_infos = []
for k in range(0, len(self.infos), self.dataset_cfg.SAMPLED_INTERVAL[mode]):
sampled_waymo_infos.append(self.infos[k])
self.infos = sampled_waymo_infos
self.logger.info('Total sampled samples for Waymo dataset: %d' % len(self.infos))
@staticmethod
def check_sequence_name_with_all_version(sequence_file):
if '_with_camera_labels' not in str(sequence_file) and not sequence_file.exists():
sequence_file = Path(str(sequence_file[:-9]) + '_with_camera_labels.tfrecord')
if '_with_camera_labels' in str(sequence_file) and not sequence_file.exists():
sequence_file = Path(str(sequence_file).replace('_with_camera_labels', ''))
return sequence_file
def get_infos(self, raw_data_path, save_path, num_workers=multiprocessing.cpu_count(), has_label=True, sampled_interval=1):
import concurrent.futures as futures
from functools import partial
from . import waymo_utils
print('---------------The waymo sample interval is %d, total sequecnes is %d-----------------'
% (sampled_interval, len(self.sample_sequence_list)))
process_single_sequence = partial(
waymo_utils.process_single_sequence,
save_path=save_path, sampled_interval=sampled_interval, has_label=has_label
)
sample_sequence_file_list = [
self.check_sequence_name_with_all_version(raw_data_path / sequence_file)
for sequence_file in self.sample_sequence_list
]
# process_single_sequence(sample_sequence_file_list[0])
with futures.ThreadPoolExecutor(num_workers) as executor:
sequence_infos = list(tqdm(executor.map(process_single_sequence, sample_sequence_file_list),
total=len(sample_sequence_file_list)))
all_sequences_infos = [item for infos in sequence_infos for item in infos]
return all_sequences_infos
def get_lidar(self, sequence_name, sample_idx):
lidar_file = self.data_path / sequence_name / ('%04d.npy' % sample_idx)
point_features = np.load(lidar_file) # (N, 7): [x, y, z, intensity, elongation, NLZ_flag]
points_all, NLZ_flag = point_features[:, 0:5], point_features[:, 5]
points_all = points_all[NLZ_flag == -1]
points_all[:, 3] = np.tanh(points_all[:, 3])
return points_all
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.infos) * self.total_epochs
return len(self.infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.infos)
info = copy.deepcopy(self.infos[index])
pc_info = info['point_cloud']
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
points = self.get_lidar(sequence_name, sample_idx)
input_dict = {
'points': points,
'frame_id': info['frame_id'],
}
if 'annos' in info:
annos = info['annos']
annos = common_utils.drop_info_with_name(annos, name='unknown')
if self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False):
gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(annos['gt_boxes_lidar'])
else:
gt_boxes_lidar = annos['gt_boxes_lidar']
input_dict.update({
'gt_names': annos['name'],
'gt_boxes': gt_boxes_lidar,
'num_points_in_gt': annos.get('num_points_in_gt', None)
})
data_dict = self.prepare_data(data_dict=input_dict)
data_dict['metadata'] = info.get('metadata', info['frame_id'])
data_dict.pop('num_points_in_gt', None)
return data_dict
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_lidar': np.zeros([num_samples, 7])
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = batch_dict['frame_id'][index]
single_pred_dict['metadata'] = batch_dict['metadata'][index]
annos.append(single_pred_dict)
return annos
def evaluation(self, det_annos, class_names, **kwargs):
if 'annos' not in self.infos[0].keys():
return 'No ground-truth boxes for evaluation', {}
def kitti_eval(eval_det_annos, eval_gt_annos):
from ..kitti.kitti_object_eval_python import eval as kitti_eval
from ..kitti import kitti_utils
map_name_to_kitti = {
'Vehicle': 'Car',
'Pedestrian': 'Pedestrian',
'Cyclist': 'Cyclist',
'Sign': 'Sign',
'Car': 'Car'
}
kitti_utils.transform_annotations_to_kitti_format(eval_det_annos, map_name_to_kitti=map_name_to_kitti)
kitti_utils.transform_annotations_to_kitti_format(
eval_gt_annos, map_name_to_kitti=map_name_to_kitti,
info_with_fakelidar=self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False)
)
kitti_class_names = [map_name_to_kitti[x] for x in class_names]
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names
)
return ap_result_str, ap_dict
def waymo_eval(eval_det_annos, eval_gt_annos):
from .waymo_eval import OpenPCDetWaymoDetectionMetricsEstimator
eval = OpenPCDetWaymoDetectionMetricsEstimator()
ap_dict = eval.waymo_evaluation(
eval_det_annos, eval_gt_annos, class_name=class_names,
distance_thresh=1000, fake_gt_infos=self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False)
)
ap_result_str = '\n'
for key in ap_dict:
ap_dict[key] = ap_dict[key][0]
ap_result_str += '%s: %.4f \n' % (key, ap_dict[key])
return ap_result_str, ap_dict
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.infos]
if kwargs['eval_metric'] == 'kitti':
ap_result_str, ap_dict = kitti_eval(eval_det_annos, eval_gt_annos)
elif kwargs['eval_metric'] == 'waymo':
ap_result_str, ap_dict = waymo_eval(eval_det_annos, eval_gt_annos)
else:
raise NotImplementedError
return ap_result_str, ap_dict
def create_groundtruth_database(self, info_path, save_path, used_classes=None, split='train', sampled_interval=10,
processed_data_tag=None):
database_save_path = save_path / ('pcdet_gt_database_%s_sampled_%d' % (split, sampled_interval))
db_info_save_path = save_path / ('pcdet_waymo_dbinfos_%s_sampled_%d.pkl' % (split, sampled_interval))
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
for k in range(0, len(infos), sampled_interval):
print('gt_database sample: %d/%d' % (k + 1, len(infos)))
info = infos[k]
pc_info = info['point_cloud']
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
points = self.get_lidar(sequence_name, sample_idx)
annos = info['annos']
names = annos['name']
difficulty = annos['difficulty']
gt_boxes = annos['gt_boxes_lidar']
num_obj = gt_boxes.shape[0]
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(),
torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda()
).long().squeeze(dim=0).cpu().numpy()
for i in range(num_obj):
filename = '%s_%04d_%s_%d.bin' % (sequence_name, sample_idx, names[i], i)
filepath = database_save_path / filename
gt_points = points[box_idxs_of_pts == i]
gt_points[:, :3] -= gt_boxes[i, :3]
if (used_classes is None) or names[i] in used_classes:
with open(filepath, 'w') as f:
gt_points.tofile(f)
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'sequence_name': sequence_name,
'sample_idx': sample_idx, 'gt_idx': i, 'box3d_lidar': gt_boxes[i],
'num_points_in_gt': gt_points.shape[0], 'difficulty': difficulty[i]}
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
def create_waymo_infos(dataset_cfg, class_names, data_path, save_path,
raw_data_tag='raw_data', processed_data_tag='waymo_processed_data',
workers=multiprocessing.cpu_count()):
dataset = WaymoDataset(
dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path,
training=False, logger=common_utils.create_logger()
)
train_split, val_split = 'train', 'val'
train_filename = save_path / ('waymo_infos_%s.pkl' % train_split)
val_filename = save_path / ('waymo_infos_%s.pkl' % val_split)
print('---------------Start to generate data infos---------------')
dataset.set_split(train_split)
waymo_infos_train = dataset.get_infos(
raw_data_path=data_path / raw_data_tag,
save_path=save_path / processed_data_tag, num_workers=workers, has_label=True,
sampled_interval=1
)
with open(train_filename, 'wb') as f:
pickle.dump(waymo_infos_train, f)
print('----------------Waymo info train file is saved to %s----------------' % train_filename)
dataset.set_split(val_split)
waymo_infos_val = dataset.get_infos(
raw_data_path=data_path / raw_data_tag,
save_path=save_path / processed_data_tag, num_workers=workers, has_label=True,
sampled_interval=1
)
with open(val_filename, 'wb') as f:
pickle.dump(waymo_infos_val, f)
print('----------------Waymo info val file is saved to %s----------------' % val_filename)
print('---------------Start create groundtruth database for data augmentation---------------')
dataset.set_split(train_split)
dataset.create_groundtruth_database(
info_path=train_filename, save_path=save_path, split='train', sampled_interval=10,
used_classes=['Vehicle', 'Pedestrian', 'Cyclist']
)
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset')
parser.add_argument('--func', type=str, default='create_waymo_infos', help='')
args = parser.parse_args()
if args.func == 'create_waymo_infos':
import yaml
from easydict import EasyDict
dataset_cfg = EasyDict(yaml.load(open(args.cfg_file)))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
create_waymo_infos(
dataset_cfg=dataset_cfg,
class_names=['Vehicle', 'Pedestrian', 'Cyclist'],
data_path=ROOT_DIR / 'data' / 'waymo',
save_path=ROOT_DIR / 'data' / 'waymo',
raw_data_tag='raw_data',
processed_data_tag=dataset_cfg.PROCESSED_DATA_TAG
)
| 15,837
| 41.461126
| 127
|
py
|
SASA
|
SASA-main/pcdet/datasets/waymo/waymo_eval.py
|
# OpenPCDet PyTorch Dataloader and Evaluation Tools for Waymo Open Dataset
# Reference https://github.com/open-mmlab/OpenPCDet
# Written by Shaoshuai Shi, Chaoxu Guo
# All Rights Reserved 2019-2020.
import numpy as np
import pickle
import tensorflow as tf
from google.protobuf import text_format
from waymo_open_dataset.metrics.python import detection_metrics
from waymo_open_dataset.protos import metrics_pb2
import argparse
tf.get_logger().setLevel('INFO')
def limit_period(val, offset=0.5, period=np.pi):
return val - np.floor(val / period + offset) * period
class OpenPCDetWaymoDetectionMetricsEstimator(tf.test.TestCase):
WAYMO_CLASSES = ['unknown', 'Vehicle', 'Pedestrian', 'Truck', 'Cyclist']
def generate_waymo_type_results(self, infos, class_names, is_gt=False, fake_gt_infos=True):
def boxes3d_kitti_fakelidar_to_lidar(boxes3d_lidar):
"""
Args:
boxes3d_fakelidar: (N, 7) [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
w, l, h, r = boxes3d_lidar[:, 3:4], boxes3d_lidar[:, 4:5], boxes3d_lidar[:, 5:6], boxes3d_lidar[:, 6:7]
boxes3d_lidar[:, 2] += h[:, 0] / 2
return np.concatenate([boxes3d_lidar[:, 0:3], l, w, h, -(r + np.pi / 2)], axis=-1)
frame_id, boxes3d, obj_type, score, overlap_nlz, difficulty = [], [], [], [], [], []
for frame_index, info in enumerate(infos):
if is_gt:
box_mask = np.array([n in class_names for n in info['name']], dtype=np.bool_)
if 'num_points_in_gt' in info:
zero_difficulty_mask = info['difficulty'] == 0
info['difficulty'][(info['num_points_in_gt'] > 5) & zero_difficulty_mask] = 1
info['difficulty'][(info['num_points_in_gt'] <= 5) & zero_difficulty_mask] = 2
nonzero_mask = info['num_points_in_gt'] > 0
box_mask = box_mask & nonzero_mask
else:
print('Please provide the num_points_in_gt for evaluating on Waymo Dataset '
'(If you create Waymo Infos before 20201126, please re-create the validation infos '
'with version 1.2 Waymo dataset to get this attribute). SSS of OpenPCDet')
raise NotImplementedError
num_boxes = box_mask.sum()
box_name = info['name'][box_mask]
difficulty.append(info['difficulty'][box_mask])
score.append(np.ones(num_boxes))
if fake_gt_infos:
info['gt_boxes_lidar'] = boxes3d_kitti_fakelidar_to_lidar(info['gt_boxes_lidar'])
boxes3d.append(info['gt_boxes_lidar'][box_mask])
else:
num_boxes = len(info['boxes_lidar'])
difficulty.append([0] * num_boxes)
score.append(info['score'])
boxes3d.append(np.array(info['boxes_lidar']))
box_name = info['name']
obj_type += [self.WAYMO_CLASSES.index(name) for i, name in enumerate(box_name)]
frame_id.append(np.array([frame_index] * num_boxes))
overlap_nlz.append(np.zeros(num_boxes)) # set zero currently
frame_id = np.concatenate(frame_id).reshape(-1).astype(np.int64)
boxes3d = np.concatenate(boxes3d, axis=0)
obj_type = np.array(obj_type).reshape(-1)
score = np.concatenate(score).reshape(-1)
overlap_nlz = np.concatenate(overlap_nlz).reshape(-1)
difficulty = np.concatenate(difficulty).reshape(-1).astype(np.int8)
boxes3d[:, -1] = limit_period(boxes3d[:, -1], offset=0.5, period=np.pi * 2)
return frame_id, boxes3d, obj_type, score, overlap_nlz, difficulty
def build_config(self):
config = metrics_pb2.Config()
config_text = """
breakdown_generator_ids: OBJECT_TYPE
difficulties {
levels:1
levels:2
}
matcher_type: TYPE_HUNGARIAN
iou_thresholds: 0.0
iou_thresholds: 0.7
iou_thresholds: 0.5
iou_thresholds: 0.5
iou_thresholds: 0.5
box_type: TYPE_3D
"""
for x in range(0, 100):
config.score_cutoffs.append(x * 0.01)
config.score_cutoffs.append(1.0)
text_format.Merge(config_text, config)
return config
def build_graph(self, graph):
with graph.as_default():
self._pd_frame_id = tf.compat.v1.placeholder(dtype=tf.int64)
self._pd_bbox = tf.compat.v1.placeholder(dtype=tf.float32)
self._pd_type = tf.compat.v1.placeholder(dtype=tf.uint8)
self._pd_score = tf.compat.v1.placeholder(dtype=tf.float32)
self._pd_overlap_nlz = tf.compat.v1.placeholder(dtype=tf.bool)
self._gt_frame_id = tf.compat.v1.placeholder(dtype=tf.int64)
self._gt_bbox = tf.compat.v1.placeholder(dtype=tf.float32)
self._gt_type = tf.compat.v1.placeholder(dtype=tf.uint8)
self._gt_difficulty = tf.compat.v1.placeholder(dtype=tf.uint8)
metrics = detection_metrics.get_detection_metric_ops(
config=self.build_config(),
prediction_frame_id=self._pd_frame_id,
prediction_bbox=self._pd_bbox,
prediction_type=self._pd_type,
prediction_score=self._pd_score,
prediction_overlap_nlz=self._pd_overlap_nlz,
ground_truth_bbox=self._gt_bbox,
ground_truth_type=self._gt_type,
ground_truth_frame_id=self._gt_frame_id,
ground_truth_difficulty=self._gt_difficulty,
)
return metrics
def run_eval_ops(
self,
sess,
graph,
metrics,
prediction_frame_id,
prediction_bbox,
prediction_type,
prediction_score,
prediction_overlap_nlz,
ground_truth_frame_id,
ground_truth_bbox,
ground_truth_type,
ground_truth_difficulty,
):
sess.run(
[tf.group([value[1] for value in metrics.values()])],
feed_dict={
self._pd_bbox: prediction_bbox,
self._pd_frame_id: prediction_frame_id,
self._pd_type: prediction_type,
self._pd_score: prediction_score,
self._pd_overlap_nlz: prediction_overlap_nlz,
self._gt_bbox: ground_truth_bbox,
self._gt_type: ground_truth_type,
self._gt_frame_id: ground_truth_frame_id,
self._gt_difficulty: ground_truth_difficulty,
},
)
def eval_value_ops(self, sess, graph, metrics):
return {item[0]: sess.run([item[1][0]]) for item in metrics.items()}
def mask_by_distance(self, distance_thresh, boxes_3d, *args):
mask = np.linalg.norm(boxes_3d[:, 0:2], axis=1) < distance_thresh + 0.5
boxes_3d = boxes_3d[mask]
ret_ans = [boxes_3d]
for arg in args:
ret_ans.append(arg[mask])
return tuple(ret_ans)
def waymo_evaluation(self, prediction_infos, gt_infos, class_name, distance_thresh=100, fake_gt_infos=True):
print('Start the waymo evaluation...')
assert len(prediction_infos) == len(gt_infos), '%d vs %d' % (prediction_infos.__len__(), gt_infos.__len__())
tf.compat.v1.disable_eager_execution()
pd_frameid, pd_boxes3d, pd_type, pd_score, pd_overlap_nlz, _ = self.generate_waymo_type_results(
prediction_infos, class_name, is_gt=False
)
gt_frameid, gt_boxes3d, gt_type, gt_score, gt_overlap_nlz, gt_difficulty = self.generate_waymo_type_results(
gt_infos, class_name, is_gt=True, fake_gt_infos=fake_gt_infos
)
pd_boxes3d, pd_frameid, pd_type, pd_score, pd_overlap_nlz = self.mask_by_distance(
distance_thresh, pd_boxes3d, pd_frameid, pd_type, pd_score, pd_overlap_nlz
)
gt_boxes3d, gt_frameid, gt_type, gt_score, gt_difficulty = self.mask_by_distance(
distance_thresh, gt_boxes3d, gt_frameid, gt_type, gt_score, gt_difficulty
)
print('Number: (pd, %d) VS. (gt, %d)' % (len(pd_boxes3d), len(gt_boxes3d)))
print('Level 1: %d, Level2: %d)' % ((gt_difficulty == 1).sum(), (gt_difficulty == 2).sum()))
if pd_score.max() > 1:
# assert pd_score.max() <= 1.0, 'Waymo evaluation only supports normalized scores'
pd_score = 1 / (1 + np.exp(-pd_score))
print('Warning: Waymo evaluation only supports normalized scores')
graph = tf.Graph()
metrics = self.build_graph(graph)
with self.test_session(graph=graph) as sess:
sess.run(tf.compat.v1.initializers.local_variables())
self.run_eval_ops(
sess, graph, metrics, pd_frameid, pd_boxes3d, pd_type, pd_score, pd_overlap_nlz,
gt_frameid, gt_boxes3d, gt_type, gt_difficulty,
)
with tf.compat.v1.variable_scope('detection_metrics', reuse=True):
aps = self.eval_value_ops(sess, graph, metrics)
return aps
def main():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--pred_infos', type=str, default=None, help='pickle file')
parser.add_argument('--gt_infos', type=str, default=None, help='pickle file')
parser.add_argument('--class_names', type=str, nargs='+', default=['Vehicle', 'Pedestrian', 'Cyclist'], help='')
parser.add_argument('--sampled_interval', type=int, default=5, help='sampled interval for GT sequences')
args = parser.parse_args()
pred_infos = pickle.load(open(args.pred_infos, 'rb'))
gt_infos = pickle.load(open(args.gt_infos, 'rb'))
print('Start to evaluate the waymo format results...')
eval = OpenPCDetWaymoDetectionMetricsEstimator()
gt_infos_dst = []
for idx in range(0, len(gt_infos), args.sampled_interval):
cur_info = gt_infos[idx]['annos']
cur_info['frame_id'] = gt_infos[idx]['frame_id']
gt_infos_dst.append(cur_info)
waymo_AP = eval.waymo_evaluation(
pred_infos, gt_infos_dst, class_name=args.class_names, distance_thresh=1000, fake_gt_infos=True
)
print(waymo_AP)
if __name__ == '__main__':
main()
| 10,488
| 41.465587
| 116
|
py
|
SASA
|
SASA-main/pcdet/datasets/processor/point_feature_encoder.py
|
import numpy as np
class PointFeatureEncoder(object):
def __init__(self, config, point_cloud_range=None):
super().__init__()
self.point_encoding_config = config
assert list(self.point_encoding_config.src_feature_list[0:3]) == ['x', 'y', 'z']
self.used_feature_list = self.point_encoding_config.used_feature_list
self.src_feature_list = self.point_encoding_config.src_feature_list
self.point_cloud_range = point_cloud_range
@property
def num_point_features(self):
return getattr(self, self.point_encoding_config.encoding_type)(points=None)
def forward(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
...
Returns:
data_dict:
points: (N, 3 + C_out),
use_lead_xyz: whether to use xyz as point-wise features
...
"""
data_dict['points'], use_lead_xyz = getattr(self, self.point_encoding_config.encoding_type)(
data_dict['points']
)
data_dict['use_lead_xyz'] = use_lead_xyz
return data_dict
def absolute_coordinates_encoding(self, points=None):
if points is None:
num_output_features = len(self.used_feature_list)
return num_output_features
point_feature_list = [points[:, 0:3]]
for x in self.used_feature_list:
if x in ['x', 'y', 'z']:
continue
idx = self.src_feature_list.index(x)
point_feature_list.append(points[:, idx:idx+1])
point_features = np.concatenate(point_feature_list, axis=1)
return point_features, True
| 1,703
| 34.5
| 100
|
py
|
SASA
|
SASA-main/pcdet/datasets/processor/data_processor.py
|
from functools import partial
import numpy as np
from ...utils import box_utils, common_utils
class DataProcessor(object):
def __init__(self, processor_configs, point_cloud_range, training):
self.point_cloud_range = point_cloud_range
self.training = training
self.mode = 'train' if training else 'test'
self.grid_size = self.voxel_size = None
self.data_processor_queue = []
for cur_cfg in processor_configs:
cur_processor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
self.data_processor_queue.append(cur_processor)
def mask_points_and_boxes_outside_range(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.mask_points_and_boxes_outside_range, config=config)
mask = common_utils.mask_points_by_range(data_dict['points'], self.point_cloud_range)
data_dict['points'] = data_dict['points'][mask]
if data_dict.get('gt_boxes', None) is not None and config.REMOVE_OUTSIDE_BOXES and self.training:
mask = box_utils.mask_boxes_outside_range_numpy(
data_dict['gt_boxes'], self.point_cloud_range, min_num_corners=config.get('min_num_corners', 1)
)
data_dict['gt_boxes'] = data_dict['gt_boxes'][mask]
return data_dict
def mask_points_and_boxes_outside_crop(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.mask_points_and_boxes_outside_range, config=config)
mask = common_utils.mask_points_by_range(data_dict['points'], data_dict['crop_range'])
data_dict['points'] = data_dict['points'][mask]
if data_dict.get('gt_boxes', None) is not None and config.REMOVE_OUTSIDE_BOXES and self.training:
mask = box_utils.mask_boxes_outside_range_numpy(
data_dict['gt_boxes'], data_dict['crop_range'], min_num_corners=config.get('min_num_corners', 1)
)
data_dict['gt_boxes'] = data_dict['gt_boxes'][mask]
return data_dict
def shuffle_points(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.shuffle_points, config=config)
if config.SHUFFLE_ENABLED[self.mode]:
points = data_dict['points']
shuffle_idx = np.random.permutation(points.shape[0])
points = points[shuffle_idx]
data_dict['points'] = points
return data_dict
def transform_points_to_voxels(self, data_dict=None, config=None, voxel_generator=None):
if data_dict is None:
try:
from spconv.utils import VoxelGeneratorV2 as VoxelGenerator
except:
from spconv.utils import VoxelGenerator
voxel_generator = VoxelGenerator(
voxel_size=config.VOXEL_SIZE,
point_cloud_range=self.point_cloud_range,
max_num_points=config.MAX_POINTS_PER_VOXEL,
max_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode]
)
grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE)
self.grid_size = np.round(grid_size).astype(np.int64)
self.voxel_size = config.VOXEL_SIZE
return partial(self.transform_points_to_voxels, voxel_generator=voxel_generator)
points = data_dict['points']
voxel_output = voxel_generator.generate(points)
if isinstance(voxel_output, dict):
voxels, coordinates, num_points = \
voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']
else:
voxels, coordinates, num_points = voxel_output
if not data_dict['use_lead_xyz']:
voxels = voxels[..., 3:] # remove xyz in voxels(N, 3)
data_dict['voxels'] = voxels
data_dict['voxel_coords'] = coordinates
data_dict['voxel_num_points'] = num_points
return data_dict
def sample_points_by_voxel(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.sample_points_by_voxel, config=config)
try:
from spconv.utils import VoxelGeneratorV2 as VoxelGenerator
except:
from spconv.utils import VoxelGenerator
key_frame_voxel_generator = VoxelGenerator(
voxel_size=config.VOXEL_SIZE,
point_cloud_range=self.point_cloud_range,
max_num_points=1,
max_voxels=config.KEY_FRAME_NUMBER_OF_VOXELS[self.mode]
)
other_frame_voxel_generator = VoxelGenerator(
voxel_size=config.VOXEL_SIZE,
point_cloud_range=self.point_cloud_range,
max_num_points=1,
max_voxels=config.OTHER_FRAME_NUMBER_OF_VOXELS[self.mode]
)
grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE)
self.grid_size = np.round(grid_size).astype(np.int64)
self.voxel_size = config.VOXEL_SIZE
points = data_dict['points']
times = points[:, -1]
key_frame_mask = (times == 0)
other_frame_mask = (times != 0)
key_frame_points = points[key_frame_mask, :]
other_frame_points = points[other_frame_mask, :]
if len(other_frame_points) == 0:
other_frame_points = points
key_frame_output = key_frame_voxel_generator.generate(key_frame_points)
other_frame_output = other_frame_voxel_generator.generate(other_frame_points)
if isinstance(key_frame_output, dict):
key_frame_points = key_frame_output['voxels']
other_frame_points = other_frame_output['voxels']
else:
key_frame_points = key_frame_output[0]
other_frame_points = other_frame_output[0]
key_frame_points = np.squeeze(key_frame_points, axis=1)
other_frame_points = np.squeeze(other_frame_points, axis=1)
choice = np.arange(0, len(key_frame_points), dtype=np.int32)
if len(key_frame_points) < config.KEY_FRAME_NUMBER_OF_VOXELS[self.mode]:
extra_choice = np.random.choice(choice, config.KEY_FRAME_NUMBER_OF_VOXELS[self.mode] - len(key_frame_points), replace=True)
choice = np.concatenate((choice, extra_choice), axis=0)
np.random.shuffle(choice)
key_frame_points = key_frame_points[choice]
choice = np.arange(0, len(other_frame_points), dtype=np.int32)
if len(other_frame_points) < config.OTHER_FRAME_NUMBER_OF_VOXELS[self.mode]:
extra_choice = np.random.choice(choice, config.OTHER_FRAME_NUMBER_OF_VOXELS[self.mode] - len(other_frame_points), replace=True)
choice = np.concatenate((choice, extra_choice), axis=0)
np.random.shuffle(choice)
other_frame_points = other_frame_points[choice]
points = np.concatenate((key_frame_points, other_frame_points), axis=0)
data_dict['points'] = points
return data_dict
def sample_points(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.sample_points, config=config)
num_points = config.NUM_POINTS[self.mode]
if num_points == -1:
return data_dict
points = data_dict['points']
if num_points < len(points):
pts_depth = np.linalg.norm(points[:, 0:3], axis=1)
pts_near_flag = pts_depth < 40.0
far_idxs_choice = np.where(pts_near_flag == 0)[0]
near_idxs = np.where(pts_near_flag == 1)[0]
if num_points > len(far_idxs_choice):
near_idxs_choice = np.random.choice(near_idxs, num_points - len(far_idxs_choice), replace=False)
choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \
if len(far_idxs_choice) > 0 else near_idxs_choice
else:
choice = np.arange(0, len(points), dtype=np.int32)
choice = np.random.choice(choice, num_points, replace=False)
np.random.shuffle(choice)
else:
choice = np.arange(0, len(points), dtype=np.int32)
if num_points > len(points):
extra_choice = np.random.choice(choice, num_points - len(points), replace=True)
choice = np.concatenate((choice, extra_choice), axis=0)
np.random.shuffle(choice)
data_dict['points'] = points[choice]
return data_dict
def forward(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
...
Returns:
"""
for cur_processor in self.data_processor_queue:
data_dict = cur_processor(data_dict=data_dict)
return data_dict
| 8,976
| 43.440594
| 139
|
py
|
SASA
|
SASA-main/pcdet/datasets/augmentor/augmentor_utils.py
|
import numpy as np
import numba
import warnings
from numba.core.errors import NumbaPerformanceWarning
from ...utils import common_utils
from ...utils import box_utils
def random_flip_along_x(enable_prob, gt_boxes, points):
"""
Args:
enable_prob:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C)
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[1.0 - enable_prob, enable_prob])
if enable:
gt_boxes[:, 1] = -gt_boxes[:, 1]
gt_boxes[:, 6] = -gt_boxes[:, 6]
points[:, 1] = -points[:, 1]
if gt_boxes.shape[1] > 7:
gt_boxes[:, 8] = -gt_boxes[:, 8]
return gt_boxes, points
def random_flip_along_y(enable_prob, gt_boxes, points):
"""
Args:
enable_prob:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C)
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[1.0 - enable_prob, enable_prob])
if enable:
gt_boxes[:, 0] = -gt_boxes[:, 0]
gt_boxes[:, 6] = -(gt_boxes[:, 6] + np.pi)
points[:, 0] = -points[:, 0]
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7] = -gt_boxes[:, 7]
return gt_boxes, points
def global_rotation(enable_prob, gt_boxes, points, rot_range):
"""
Args:
enable_prob:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C),
rot_range: [min, max]
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[1.0 - enable_prob, enable_prob])
if enable:
noise_rotation = np.random.uniform(rot_range[0], rot_range[1])
points = common_utils.rotate_points_along_z(points[np.newaxis, :, :], np.array([noise_rotation]))[0]
gt_boxes[:, 0:3] = common_utils.rotate_points_along_z(gt_boxes[np.newaxis, :, 0:3], np.array([noise_rotation]))[0]
gt_boxes[:, 6] += noise_rotation
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7:9] = common_utils.rotate_points_along_z(
np.hstack((gt_boxes[:, 7:9], np.zeros((gt_boxes.shape[0], 1))))[np.newaxis, :, :],
np.array([noise_rotation])
)[0][:, 0:2]
return gt_boxes, points
def global_scaling(enable_prob, gt_boxes, points, scale_range):
"""
Args:
enable_prob:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
points: (M, 3 + C),
scale_range: [min, max]
Returns:
"""
if scale_range[1] - scale_range[0] < 1e-3:
return gt_boxes, points
enable = np.random.choice([False, True], replace=False, p=[1.0 - enable_prob, enable_prob])
if enable:
noise_scale = np.random.uniform(scale_range[0], scale_range[1])
points[:, :3] *= noise_scale
gt_boxes[:, :6] *= noise_scale
return gt_boxes, points
def box_noise(enable_prob, gt_boxes, points, valid_mask=None, extra_width=0.1, sem_labels=None,
loc_noise_std=[1.0, 1.0, 0.0], scale_range=[1.0, 1.0], rotation_range=[0.0, 0.0], num_try=100):
"""
Args:
enable_prob: list of float, prob for enabling center, scale and rotation noise
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
points: (M, 3 + C),
valid_mask: (N), mask to indicate which boxes are valid
extra_width: points in expanded regions are also permuted
sem_labels: TODO: support sem_labels
loc_noise_std: location noise std
scale_range:
rotation_range:
num_try: number of attempts for noise generating
Returns:
"""
warnings.simplefilter('ignore', category=NumbaPerformanceWarning)
num_box = gt_boxes.shape[0]
num_points = points.shape[0]
enable = np.random.choice([False, True], replace=False, p=[1.0 - enable_prob, enable_prob])
if enable:
if valid_mask is None:
valid_mask = np.ones((num_box,), dtype=np.bool_)
loc_noise = np.array(loc_noise_std, dtype=gt_boxes.dtype)
loc_noise = np.random.normal(
scale=loc_noise, size=[num_box, num_try, 3]
)
scale_noise = np.random.uniform(
scale_range[0], scale_range[1], size=[num_box, num_try]
)
rotation_noise = np.random.uniform(
rotation_range[0], rotation_range[1], size=[num_box, num_try]
)
gt_boxes_expand = gt_boxes.copy()
gt_boxes_expand[:, 3:6] += float(extra_width)
success_mask = choose_noise_for_box(gt_boxes_expand[:, [0, 1, 3, 4, 6]], valid_mask,
loc_noise, scale_noise, rotation_noise)
loc_transform = np.zeros((num_box, 3), dtype=gt_boxes.dtype)
scale_transform = np.ones((num_box,), dtype=gt_boxes.dtype)
rotation_transform = np.zeros((num_box,), dtype=gt_boxes.dtype)
for i in range(num_box):
if success_mask[i] != -1:
loc_transform[i, :] = loc_noise[i, success_mask[i], :]
scale_transform[i] = scale_noise[i, success_mask[i]]
rotation_transform[i] = rotation_noise[i, success_mask[i]]
gt_corners_expand = box_utils.boxes_to_corners_3d(gt_boxes_expand)
point_masks = np.zeros((num_box, num_points), dtype=np.bool_)
for i in range(num_box):
point_masks[i, :] = box_utils.in_hull(points[:, 0:3], gt_corners_expand[i])
point_transform_(points, gt_boxes, valid_mask, point_masks, loc_transform, scale_transform, rotation_transform)
box3d_transform_(gt_boxes, valid_mask, loc_transform, scale_transform, rotation_transform)
return gt_boxes, points
@numba.njit
def point_transform_(points, gt_boxes, valid_mask, point_masks, loc_transform, scale_transform, rotation_transform):
num_box = gt_boxes.shape[0]
num_points = points.shape[0]
rot_mat_T = np.zeros((num_box, 3, 3), dtype=points.dtype)
for i in range(num_box):
_rotation_matrix_3d_(rot_mat_T[i], rotation_transform[i], 2)
for i in range(num_points):
for j in range(num_box):
if valid_mask[j]:
if point_masks[j, i] == 1:
points[i, :3] -= gt_boxes[j, :3]
points[i, :3] *= scale_transform[j]
points[i:i + 1, :3] = points[i:i + 1, :3] @ rot_mat_T[j]
points[i, :3] += gt_boxes[j, :3]
points[i, 2] += gt_boxes[j, 5] * (scale_transform[j] - 1) / 2 # ensure box still on the ground
points[i, :3] += loc_transform[j]
break # only apply first box's transform
def box3d_transform_(boxes, valid_mask, loc_transform, scale_transform, rotation_transform):
num_box = boxes.shape[0]
for i in range(num_box):
if valid_mask[i]:
boxes[i, :3] += loc_transform[i]
boxes[i, 3:6] *= scale_transform[i]
boxes[i, 2] += boxes[i, 5] * (scale_transform[i] - 1) / 2 # ensure box still on the ground
boxes[i, 6] += rotation_transform[i]
if boxes.shape[1] > 7: # rotate [vx, vy]
boxes[i, 7:9] = common_utils.rotate_points_along_z(
np.hstack((boxes[i, 7:9], np.zeros((1,))))[np.newaxis, np.newaxis, :],
np.array([rotation_transform[i]])
)[0][0, 0:2]
@numba.njit
def _rotation_box2d_jit_(corners, angle, rot_mat_T):
"""Rotate 2D boxes.
Args:
corners (np.ndarray): Corners of boxes.
angle (float): Rotation angle.
rot_mat_T (np.ndarray): Transposed rotation matrix.
"""
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = rot_sin
rot_mat_T[1, 0] = -rot_sin
rot_mat_T[1, 1] = rot_cos
corners[:] = corners @ rot_mat_T
@numba.njit
def _rotation_matrix_3d_(rot_mat_T, angle, axis):
"""Get the 3D rotation matrix.
Args:
rot_mat_T (np.ndarray): Transposed rotation matrix.
angle (float): Rotation angle.
axis (int): Rotation axis.
"""
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T[:] = np.eye(3)
if axis == 1:
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 2] = rot_sin
rot_mat_T[2, 0] = -rot_sin
rot_mat_T[2, 2] = rot_cos
elif axis == 2 or axis == -1:
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = rot_sin
rot_mat_T[1, 0] = -rot_sin
rot_mat_T[1, 1] = rot_cos
elif axis == 0:
rot_mat_T[1, 1] = rot_cos
rot_mat_T[1, 2] = rot_sin
rot_mat_T[2, 1] = -rot_sin
rot_mat_T[2, 2] = rot_cos
@numba.njit
def choose_noise_for_box(box2d, valid_mask, loc_noise, scale_noise, rotation_noise):
"""
Args:
box2d: (N, 5) [x, y, dx, dy, heading]
valid_mask:
loc_noise: (N, M, 3)
scale_noise: (N, M)
rotation_noise: (N, M)
Returns:
success_mask: unsuccess=-1
"""
num_box = box2d.shape[0]
num_try = loc_noise.shape[1]
box_corners = box2d_to_corner_jit(box2d)
cur_corners = np.zeros((4, 2), dtype=box2d.dtype)
rot_mat_T = np.zeros((2, 2), dtype=box2d.dtype)
success_mask = -np.ones((num_box, ), dtype=np.int64)
for i in range(num_box):
if valid_mask[i]:
for j in range(num_try):
cur_corners[:] = box_corners[i]
cur_corners -= box2d[i, :2]
_rotation_box2d_jit_(cur_corners, rotation_noise[i, j], rot_mat_T)
cur_corners *= scale_noise[i, j]
cur_corners += box2d[i, :2] + loc_noise[i, j, :2]
collision_mat = box_collision_test(
cur_corners.reshape(1, 4, 2), box_corners
)
collision_mat[0, i] = False
if not collision_mat.any():
success_mask[i] = j
box_corners[i] = cur_corners
break
return success_mask
@numba.jit(nopython=True)
def box2d_to_corner_jit(boxes):
num_box = boxes.shape[0]
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(
1, 4, 2)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype)
for i in range(num_box):
rot_sin = np.sin(boxes[i, -1])
rot_cos = np.cos(boxes[i, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = rot_sin
rot_mat_T[1, 0] = -rot_sin
rot_mat_T[1, 1] = rot_cos
box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2]
return box_corners
@numba.njit
def corner_to_standup_nd_jit(boxes_corner):
"""Convert boxes_corner to aligned (min-max) boxes.
Args:
boxes_corner (np.ndarray, shape=[N, 2**dim, dim]): Boxes corners.
Returns:
np.ndarray, shape=[N, dim*2]: Aligned (min-max) boxes.
"""
num_boxes = boxes_corner.shape[0]
ndim = boxes_corner.shape[-1]
result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype)
for i in range(num_boxes):
for j in range(ndim):
result[i, j] = np.min(boxes_corner[i, :, j])
for j in range(ndim):
result[i, j + ndim] = np.max(boxes_corner[i, :, j])
return result
@numba.jit(nopython=True)
def box_collision_test(boxes, qboxes, clockwise=True):
"""Box collision test.
Args:
boxes (np.ndarray): Corners of current boxes.
qboxes (np.ndarray): Boxes to be avoid colliding.
clockwise (bool): Whether the corners are in clockwise order.
Default: True.
"""
N = boxes.shape[0]
K = qboxes.shape[0]
ret = np.zeros((N, K), dtype=np.bool_)
slices = np.array([1, 2, 3, 0])
lines_boxes = np.stack((boxes, boxes[:, slices, :]), axis=2) # [N, 4, 2(line), 2(xy)]
lines_qboxes = np.stack((qboxes, qboxes[:, slices, :]), axis=2)
# vec = np.zeros((2,), dtype=boxes.dtype)
boxes_standup = corner_to_standup_nd_jit(boxes)
qboxes_standup = corner_to_standup_nd_jit(qboxes)
for i in range(N):
for j in range(K):
# calculate standup first
iw = (
min(boxes_standup[i, 2], qboxes_standup[j, 2]) -
max(boxes_standup[i, 0], qboxes_standup[j, 0]))
if iw > 0:
ih = (
min(boxes_standup[i, 3], qboxes_standup[j, 3]) -
max(boxes_standup[i, 1], qboxes_standup[j, 1]))
if ih > 0:
for k in range(4):
for box_l in range(4):
A = lines_boxes[i, k, 0]
B = lines_boxes[i, k, 1]
C = lines_qboxes[j, box_l, 0]
D = lines_qboxes[j, box_l, 1]
acd = (D[1] - A[1]) * (C[0] -
A[0]) > (C[1] - A[1]) * (
D[0] - A[0])
bcd = (D[1] - B[1]) * (C[0] -
B[0]) > (C[1] - B[1]) * (
D[0] - B[0])
if acd != bcd:
abc = (C[1] - A[1]) * (B[0] - A[0]) > (
B[1] - A[1]) * (
C[0] - A[0])
abd = (D[1] - A[1]) * (B[0] - A[0]) > (
B[1] - A[1]) * (
D[0] - A[0])
if abc != abd:
ret[i, j] = True # collision.
break
if ret[i, j] is True:
break
if ret[i, j] is False:
# now check complete overlap.
# box overlap qbox:
box_overlap_qbox = True
for box_l in range(4): # point l in qboxes
for k in range(4): # corner k in boxes
vec = boxes[i, k] - boxes[i, (k + 1) % 4]
if clockwise:
vec = -vec
cross = vec[1] * (
boxes[i, k, 0] - qboxes[j, box_l, 0])
cross -= vec[0] * (
boxes[i, k, 1] - qboxes[j, box_l, 1])
if cross >= 0:
box_overlap_qbox = False
break
if box_overlap_qbox is False:
break
if box_overlap_qbox is False:
qbox_overlap_box = True
for box_l in range(4): # point box_l in boxes
for k in range(4): # corner k in qboxes
vec = qboxes[j, k] - qboxes[j, (k + 1) % 4]
if clockwise:
vec = -vec
cross = vec[1] * (
qboxes[j, k, 0] - boxes[i, box_l, 0])
cross -= vec[0] * (
qboxes[j, k, 1] - boxes[i, box_l, 1])
if cross >= 0: #
qbox_overlap_box = False
break
if qbox_overlap_box is False:
break
if qbox_overlap_box:
ret[i, j] = True # collision.
else:
ret[i, j] = True # collision.
return ret
| 16,240
| 38.229469
| 122
|
py
|
SASA
|
SASA-main/pcdet/datasets/augmentor/data_augmentor.py
|
from functools import partial
import numpy as np
from ...utils import common_utils
from . import augmentor_utils, database_sampler
class DataAugmentor(object):
def __init__(self, root_path, augmentor_configs, class_names, logger=None):
self.root_path = root_path
self.class_names = class_names
self.logger = logger
self.data_augmentor_queue = []
aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \
else augmentor_configs.AUG_CONFIG_LIST
for cur_cfg in aug_config_list:
if not isinstance(augmentor_configs, list):
if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST:
continue
cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
self.data_augmentor_queue.append(cur_augmentor)
def gt_sampling(self, config=None):
db_sampler = database_sampler.DataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
class_names=self.class_names,
logger=self.logger
)
return db_sampler
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def random_world_flip(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_flip, config=config)
enable_prob = config['ENABLE_PROB']
gt_boxes, points = data_dict['gt_boxes'], data_dict['points']
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y']
gt_boxes, points = getattr(augmentor_utils, 'random_flip_along_%s' % cur_axis)(
enable_prob, gt_boxes, points,
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_world_rotation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_rotation, config=config)
enable_prob = config['ENABLE_PROB']
rot_range = config['WORLD_ROT_ANGLE']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
gt_boxes, points = augmentor_utils.global_rotation(
enable_prob, data_dict['gt_boxes'], data_dict['points'], rot_range=rot_range
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_world_scaling(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling, config=config)
enable_prob = config['ENABLE_PROB']
gt_boxes, points = augmentor_utils.global_scaling(
enable_prob, data_dict['gt_boxes'], data_dict['points'], config['WORLD_SCALE_RANGE']
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_box_noise(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_box_noise, config=config)
enable_prob = config['ENABLE_PROB']
loc_noise = config['LOC_NOISE']
scale_range = config['SCALE_RANGE']
rotation_range = config['ROTATION_RANGE']
if not isinstance(loc_noise, list):
loc_noise = [
loc_noise, loc_noise, loc_noise
]
if not isinstance(scale_range, list):
scale_range = [
1.0 - scale_range, 1.0 + scale_range
]
if not isinstance(rotation_range, list):
rotation_range = [
-rotation_range, rotation_range
]
gt_boxes, points = augmentor_utils.box_noise(
enable_prob, data_dict['gt_boxes'], data_dict['points'],
loc_noise_std=loc_noise, scale_range=scale_range, rotation_range=rotation_range
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def forward(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7) [x, y, z, dx, dy, dz, heading]
gt_names: optional, (N), string
...
Returns:
"""
for cur_augmentor in self.data_augmentor_queue:
data_dict = cur_augmentor(data_dict=data_dict)
data_dict['gt_boxes'][:, 6] = common_utils.limit_period(
data_dict['gt_boxes'][:, 6], offset=0.5, period=2 * np.pi
)
if 'calib' in data_dict:
data_dict.pop('calib')
if 'road_plane' in data_dict:
data_dict.pop('road_plane')
if 'gt_boxes_mask' in data_dict:
gt_boxes_mask = data_dict['gt_boxes_mask']
data_dict['gt_boxes'] = data_dict['gt_boxes'][gt_boxes_mask]
data_dict['gt_names'] = data_dict['gt_names'][gt_boxes_mask]
data_dict.pop('gt_boxes_mask')
return data_dict
| 5,098
| 35.949275
| 96
|
py
|
SASA
|
SASA-main/pcdet/datasets/augmentor/database_sampler.py
|
import pickle
import numpy as np
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils import box_utils
class DataBaseSampler(object):
def __init__(self, root_path, sampler_cfg, class_names, logger=None):
self.root_path = root_path
self.class_names = class_names
self.sampler_cfg = sampler_cfg
self.logger = logger
self.db_infos = {}
for class_name in class_names:
self.db_infos[class_name] = []
for db_info_path in sampler_cfg.DB_INFO_PATH:
db_info_path = self.root_path.resolve() / db_info_path
with open(str(db_info_path), 'rb') as f:
infos = pickle.load(f)
[self.db_infos[cur_class].extend(infos[cur_class]) for cur_class in class_names]
for func_name, val in sampler_cfg.PREPARE.items():
self.db_infos = getattr(self, func_name)(self.db_infos, val)
self.sample_groups = {}
self.sample_class_num = {}
self.enable_prob = sampler_cfg.get('ENABLE_PROB', 1.0)
self.limit_whole_scene = sampler_cfg.get('LIMIT_WHOLE_SCENE', False)
for x in sampler_cfg.SAMPLE_GROUPS:
class_name, sample_num = x.split(':')
if class_name not in class_names:
continue
self.sample_class_num[class_name] = sample_num
self.sample_groups[class_name] = {
'sample_num': sample_num,
'pointer': len(self.db_infos[class_name]),
'indices': np.arange(len(self.db_infos[class_name]))
}
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def filter_by_difficulty(self, db_infos, removed_difficulty):
new_db_infos = {}
for key, dinfos in db_infos.items():
pre_len = len(dinfos)
new_db_infos[key] = [
info for info in dinfos
if info['difficulty'] not in removed_difficulty
]
if self.logger is not None:
self.logger.info('Database filter by difficulty %s: %d => %d' % (key, pre_len, len(new_db_infos[key])))
return new_db_infos
def filter_by_min_points(self, db_infos, min_gt_points_list):
for name_num in min_gt_points_list:
name, min_num = name_num.split(':')
min_num = int(min_num)
if min_num > 0 and name in db_infos.keys():
filtered_infos = []
for info in db_infos[name]:
if info['num_points_in_gt'] >= min_num:
filtered_infos.append(info)
if self.logger is not None:
self.logger.info('Database filter by min points %s: %d => %d' %
(name, len(db_infos[name]), len(filtered_infos)))
db_infos[name] = filtered_infos
return db_infos
def sample_with_fixed_number(self, class_name, sample_group):
"""
Args:
class_name:
sample_group:
Returns:
"""
sample_num, pointer, indices = int(sample_group['sample_num']), sample_group['pointer'], sample_group['indices']
if pointer >= len(self.db_infos[class_name]):
indices = np.random.permutation(len(self.db_infos[class_name]))
pointer = 0
sampled_dict = [self.db_infos[class_name][idx] for idx in indices[pointer: pointer + sample_num]]
pointer += sample_num
sample_group['pointer'] = pointer
sample_group['indices'] = indices
return sampled_dict
@staticmethod
def put_boxes_on_road_planes(gt_boxes, road_planes, calib):
"""
Only validate in KITTIDataset
Args:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
road_planes: [a, b, c, d]
calib:
Returns:
"""
a, b, c, d = road_planes
center_cam = calib.lidar_to_rect(gt_boxes[:, 0:3])
cur_height_cam = (-d - a * center_cam[:, 0] - c * center_cam[:, 2]) / b
center_cam[:, 1] = cur_height_cam
cur_lidar_height = calib.rect_to_lidar(center_cam)[:, 2]
mv_height = gt_boxes[:, 2] - gt_boxes[:, 5] / 2 - cur_lidar_height
gt_boxes[:, 2] -= mv_height # lidar view
return gt_boxes, mv_height
def add_sampled_boxes_to_scene(self, data_dict, sampled_gt_boxes, total_valid_sampled_dict):
gt_boxes_mask = data_dict['gt_boxes_mask']
gt_boxes = data_dict['gt_boxes'][gt_boxes_mask]
gt_names = data_dict['gt_names'][gt_boxes_mask]
points = data_dict['points']
if self.sampler_cfg.get('USE_ROAD_PLANE', False):
sampled_gt_boxes, mv_height = self.put_boxes_on_road_planes(
sampled_gt_boxes, data_dict['road_plane'], data_dict['calib']
)
data_dict.pop('calib')
data_dict.pop('road_plane')
obj_points_list = []
for idx, info in enumerate(total_valid_sampled_dict):
file_path = self.root_path / info['path']
obj_points = np.fromfile(str(file_path), dtype=np.float32).reshape(
[-1, self.sampler_cfg.NUM_POINT_FEATURES])
obj_points[:, :3] += info['box3d_lidar'][:3]
if self.sampler_cfg.get('USE_ROAD_PLANE', False):
# mv height
obj_points[:, 2] -= mv_height[idx]
obj_points_list.append(obj_points)
obj_points = np.concatenate(obj_points_list, axis=0)
sampled_gt_names = np.array([x['name'] for x in total_valid_sampled_dict])
large_sampled_gt_boxes = box_utils.enlarge_box3d(
sampled_gt_boxes[:, 0:7], extra_width=self.sampler_cfg.REMOVE_EXTRA_WIDTH
)
points = box_utils.remove_points_in_boxes3d(points, large_sampled_gt_boxes)
points = np.concatenate([obj_points, points], axis=0)
gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0)
gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes], axis=0)
data_dict['gt_boxes'] = gt_boxes
data_dict['gt_names'] = gt_names
data_dict['points'] = points
return data_dict
def __call__(self, data_dict):
"""
Args:
data_dict:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[1.0 - self.enable_prob, self.enable_prob])
if not enable:
return data_dict
gt_boxes = data_dict['gt_boxes']
gt_names = data_dict['gt_names'].astype(str)
existed_boxes = gt_boxes
total_valid_sampled_dict = []
for class_name, sample_group in self.sample_groups.items():
if self.limit_whole_scene:
num_gt = np.sum(class_name == gt_names)
sample_group['sample_num'] = str(int(self.sample_class_num[class_name]) - num_gt)
if int(sample_group['sample_num']) > 0:
sampled_dict = self.sample_with_fixed_number(class_name, sample_group)
sampled_boxes = np.stack([x['box3d_lidar'] for x in sampled_dict], axis=0).astype(np.float32)
if self.sampler_cfg.get('DATABASE_WITH_FAKELIDAR', False):
sampled_boxes = box_utils.boxes3d_kitti_fakelidar_to_lidar(sampled_boxes)
iou1 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], existed_boxes[:, 0:7])
iou2 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], sampled_boxes[:, 0:7])
iou2[range(sampled_boxes.shape[0]), range(sampled_boxes.shape[0])] = 0
iou1 = iou1 if iou1.shape[1] > 0 else iou2
valid_mask = ((iou1.max(axis=1) + iou2.max(axis=1)) == 0).nonzero()[0]
valid_sampled_dict = [sampled_dict[x] for x in valid_mask]
valid_sampled_boxes = sampled_boxes[valid_mask]
existed_boxes = np.concatenate((existed_boxes, valid_sampled_boxes), axis=0)
total_valid_sampled_dict.extend(valid_sampled_dict)
sampled_gt_boxes = existed_boxes[gt_boxes.shape[0]:, :]
if total_valid_sampled_dict.__len__() > 0:
data_dict = self.add_sampled_boxes_to_scene(data_dict, sampled_gt_boxes, total_valid_sampled_dict)
data_dict.pop('gt_boxes_mask')
return data_dict
| 8,518
| 40.354369
| 120
|
py
|
SASA
|
SASA-main/pcdet/datasets/nuscenes/nuscenes_utils.py
|
"""
The NuScenes data pre-processing and evaluation is modified from
https://github.com/traveller59/second.pytorch and https://github.com/poodarchu/Det3D
"""
import operator
from functools import reduce
from pathlib import Path
import numpy as np
import tqdm
from nuscenes.utils.data_classes import Box
from nuscenes.utils.geometry_utils import transform_matrix
from pyquaternion import Quaternion
map_name_from_general_to_detection = {
'human.pedestrian.adult': 'pedestrian',
'human.pedestrian.child': 'pedestrian',
'human.pedestrian.wheelchair': 'ignore',
'human.pedestrian.stroller': 'ignore',
'human.pedestrian.personal_mobility': 'ignore',
'human.pedestrian.police_officer': 'pedestrian',
'human.pedestrian.construction_worker': 'pedestrian',
'animal': 'ignore',
'vehicle.car': 'car',
'vehicle.motorcycle': 'motorcycle',
'vehicle.bicycle': 'bicycle',
'vehicle.bus.bendy': 'bus',
'vehicle.bus.rigid': 'bus',
'vehicle.truck': 'truck',
'vehicle.construction': 'construction_vehicle',
'vehicle.emergency.ambulance': 'ignore',
'vehicle.emergency.police': 'ignore',
'vehicle.trailer': 'trailer',
'movable_object.barrier': 'barrier',
'movable_object.trafficcone': 'traffic_cone',
'movable_object.pushable_pullable': 'ignore',
'movable_object.debris': 'ignore',
'static_object.bicycle_rack': 'ignore',
}
cls_attr_dist = {
'barrier': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 0,
'vehicle.parked': 0,
'vehicle.stopped': 0,
},
'bicycle': {
'cycle.with_rider': 2791,
'cycle.without_rider': 8946,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 0,
'vehicle.parked': 0,
'vehicle.stopped': 0,
},
'bus': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 9092,
'vehicle.parked': 3294,
'vehicle.stopped': 3881,
},
'car': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 114304,
'vehicle.parked': 330133,
'vehicle.stopped': 46898,
},
'construction_vehicle': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 882,
'vehicle.parked': 11549,
'vehicle.stopped': 2102,
},
'ignore': {
'cycle.with_rider': 307,
'cycle.without_rider': 73,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 165,
'vehicle.parked': 400,
'vehicle.stopped': 102,
},
'motorcycle': {
'cycle.with_rider': 4233,
'cycle.without_rider': 8326,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 0,
'vehicle.parked': 0,
'vehicle.stopped': 0,
},
'pedestrian': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 157444,
'pedestrian.sitting_lying_down': 13939,
'pedestrian.standing': 46530,
'vehicle.moving': 0,
'vehicle.parked': 0,
'vehicle.stopped': 0,
},
'traffic_cone': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 0,
'vehicle.parked': 0,
'vehicle.stopped': 0,
},
'trailer': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 3421,
'vehicle.parked': 19224,
'vehicle.stopped': 1895,
},
'truck': {
'cycle.with_rider': 0,
'cycle.without_rider': 0,
'pedestrian.moving': 0,
'pedestrian.sitting_lying_down': 0,
'pedestrian.standing': 0,
'vehicle.moving': 21339,
'vehicle.parked': 55626,
'vehicle.stopped': 11097,
},
}
def get_available_scenes(nusc):
available_scenes = []
print('total scene num:', len(nusc.scene))
for scene in nusc.scene:
scene_token = scene['token']
scene_rec = nusc.get('scene', scene_token)
sample_rec = nusc.get('sample', scene_rec['first_sample_token'])
sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])
has_more_frames = True
scene_not_exist = False
while has_more_frames:
lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token'])
if not Path(lidar_path).exists():
scene_not_exist = True
break
else:
break
# if not sd_rec['next'] == '':
# sd_rec = nusc.get('sample_data', sd_rec['next'])
# else:
# has_more_frames = False
if scene_not_exist:
continue
available_scenes.append(scene)
print('exist scene num:', len(available_scenes))
return available_scenes
def get_sample_data(nusc, sample_data_token, selected_anntokens=None):
"""
Returns the data path as well as all annotations related to that sample_data.
Note that the boxes are transformed into the current sensor's coordinate frame.
Args:
nusc:
sample_data_token: Sample_data token.
selected_anntokens: If provided only return the selected annotation.
Returns:
"""
# Retrieve sensor & pose records
sd_record = nusc.get('sample_data', sample_data_token)
cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
sensor_record = nusc.get('sensor', cs_record['sensor_token'])
pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])
data_path = nusc.get_sample_data_path(sample_data_token)
if sensor_record['modality'] == 'camera':
cam_intrinsic = np.array(cs_record['camera_intrinsic'])
imsize = (sd_record['width'], sd_record['height'])
else:
cam_intrinsic = imsize = None
# Retrieve all sample annotations and map to sensor coordinate system.
if selected_anntokens is not None:
boxes = list(map(nusc.get_box, selected_anntokens))
else:
boxes = nusc.get_boxes(sample_data_token)
# Make list of Box objects including coord system transforms.
box_list = []
for box in boxes:
box.velocity = nusc.box_velocity(box.token)
# Move box to ego vehicle coord system
box.translate(-np.array(pose_record['translation']))
box.rotate(Quaternion(pose_record['rotation']).inverse)
# Move box to sensor coord system
box.translate(-np.array(cs_record['translation']))
box.rotate(Quaternion(cs_record['rotation']).inverse)
box_list.append(box)
return data_path, box_list, cam_intrinsic
def quaternion_yaw(q: Quaternion) -> float:
"""
Calculate the yaw angle from a quaternion.
Note that this only works for a quaternion that represents a box in lidar or global coordinate frame.
It does not work for a box in the camera frame.
:param q: Quaternion of interest.
:return: Yaw angle in radians.
"""
# Project into xy plane.
v = np.dot(q.rotation_matrix, np.array([1, 0, 0]))
# Measure yaw using arctan.
yaw = np.arctan2(v[1], v[0])
return yaw
def fill_trainval_infos(data_path, nusc, train_scenes, val_scenes, test=False, max_sweeps=10):
train_nusc_infos = []
val_nusc_infos = []
progress_bar = tqdm.tqdm(total=len(nusc.sample), desc='create_info', dynamic_ncols=True)
ref_chan = 'LIDAR_TOP' # The radar channel from which we track back n sweeps to aggregate the point cloud.
chan = 'LIDAR_TOP' # The reference channel of the current sample_rec that the point clouds are mapped to.
for index, sample in enumerate(nusc.sample):
progress_bar.update()
ref_sd_token = sample['data'][ref_chan]
ref_sd_rec = nusc.get('sample_data', ref_sd_token)
ref_cs_rec = nusc.get('calibrated_sensor', ref_sd_rec['calibrated_sensor_token'])
ref_pose_rec = nusc.get('ego_pose', ref_sd_rec['ego_pose_token'])
ref_time = 1e-6 * ref_sd_rec['timestamp']
ref_lidar_path, ref_boxes, _ = get_sample_data(nusc, ref_sd_token)
ref_cam_front_token = sample['data']['CAM_FRONT']
ref_cam_path, _, ref_cam_intrinsic = nusc.get_sample_data(ref_cam_front_token)
# Homogeneous transform from ego car frame to reference frame
ref_from_car = transform_matrix(
ref_cs_rec['translation'], Quaternion(ref_cs_rec['rotation']), inverse=True
)
# Homogeneous transformation matrix from global to _current_ ego car frame
car_from_global = transform_matrix(
ref_pose_rec['translation'], Quaternion(ref_pose_rec['rotation']), inverse=True,
)
info = {
'lidar_path': Path(ref_lidar_path).relative_to(data_path).__str__(),
'cam_front_path': Path(ref_cam_path).relative_to(data_path).__str__(),
'cam_intrinsic': ref_cam_intrinsic,
'token': sample['token'],
'sweeps': [],
'ref_from_car': ref_from_car,
'car_from_global': car_from_global,
'timestamp': ref_time,
}
sample_data_token = sample['data'][chan]
curr_sd_rec = nusc.get('sample_data', sample_data_token)
sweeps = []
while len(sweeps) < max_sweeps - 1:
if curr_sd_rec['prev'] == '':
if len(sweeps) == 0:
sweep = {
'lidar_path': Path(ref_lidar_path).relative_to(data_path).__str__(),
'sample_data_token': curr_sd_rec['token'],
'transform_matrix': None,
'time_lag': curr_sd_rec['timestamp'] * 0,
}
sweeps.append(sweep)
else:
sweeps.append(sweeps[-1])
else:
curr_sd_rec = nusc.get('sample_data', curr_sd_rec['prev'])
# Get past pose
current_pose_rec = nusc.get('ego_pose', curr_sd_rec['ego_pose_token'])
global_from_car = transform_matrix(
current_pose_rec['translation'], Quaternion(current_pose_rec['rotation']), inverse=False,
)
# Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
current_cs_rec = nusc.get(
'calibrated_sensor', curr_sd_rec['calibrated_sensor_token']
)
car_from_current = transform_matrix(
current_cs_rec['translation'], Quaternion(current_cs_rec['rotation']), inverse=False,
)
tm = reduce(np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current])
lidar_path = nusc.get_sample_data_path(curr_sd_rec['token'])
time_lag = ref_time - 1e-6 * curr_sd_rec['timestamp']
sweep = {
'lidar_path': Path(lidar_path).relative_to(data_path).__str__(),
'sample_data_token': curr_sd_rec['token'],
'transform_matrix': tm,
'global_from_car': global_from_car,
'car_from_current': car_from_current,
'time_lag': time_lag,
}
sweeps.append(sweep)
info['sweeps'] = sweeps
assert len(info['sweeps']) == max_sweeps - 1, \
f"sweep {curr_sd_rec['token']} only has {len(info['sweeps'])} sweeps, " \
f"you should duplicate to sweep num {max_sweeps - 1}"
if not test:
annotations = [nusc.get('sample_annotation', token) for token in sample['anns']]
# the filtering gives 0.5~1 map improvement
num_lidar_pts = np.array([anno['num_lidar_pts'] for anno in annotations])
num_radar_pts = np.array([anno['num_radar_pts'] for anno in annotations])
mask = (num_lidar_pts + num_radar_pts > 0)
locs = np.array([b.center for b in ref_boxes]).reshape(-1, 3)
dims = np.array([b.wlh for b in ref_boxes]).reshape(-1, 3)[:, [1, 0, 2]] # wlh == > dxdydz (lwh)
velocity = np.array([b.velocity for b in ref_boxes]).reshape(-1, 3)
rots = np.array([quaternion_yaw(b.orientation) for b in ref_boxes]).reshape(-1, 1)
names = np.array([b.name for b in ref_boxes])
tokens = np.array([b.token for b in ref_boxes])
gt_boxes = np.concatenate([locs, dims, rots, velocity[:, :2]], axis=1)
assert len(annotations) == len(gt_boxes) == len(velocity)
info['gt_boxes'] = gt_boxes[mask, :]
info['gt_boxes_velocity'] = velocity[mask, :]
info['gt_names'] = np.array([map_name_from_general_to_detection[name] for name in names])[mask]
info['gt_boxes_token'] = tokens[mask]
info['num_lidar_pts'] = num_lidar_pts[mask]
info['num_radar_pts'] = num_radar_pts[mask]
if sample['scene_token'] in train_scenes:
train_nusc_infos.append(info)
else:
val_nusc_infos.append(info)
progress_bar.close()
return train_nusc_infos, val_nusc_infos
def boxes_lidar_to_nusenes(det_info):
boxes3d = det_info['boxes_lidar']
scores = det_info['score']
labels = det_info['pred_labels']
box_list = []
for k in range(boxes3d.shape[0]):
quat = Quaternion(axis=[0, 0, 1], radians=boxes3d[k, 6])
velocity = (*boxes3d[k, 7:9], 0.0) if boxes3d.shape[1] == 9 else (0.0, 0.0, 0.0)
box = Box(
boxes3d[k, :3],
boxes3d[k, [4, 3, 5]], # wlh
quat, label=labels[k], score=scores[k], velocity=velocity,
)
box_list.append(box)
return box_list
def lidar_nusc_box_to_global(nusc, boxes, sample_token):
s_record = nusc.get('sample', sample_token)
sample_data_token = s_record['data']['LIDAR_TOP']
sd_record = nusc.get('sample_data', sample_data_token)
cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
sensor_record = nusc.get('sensor', cs_record['sensor_token'])
pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])
data_path = nusc.get_sample_data_path(sample_data_token)
box_list = []
for box in boxes:
# Move box to ego vehicle coord system
box.rotate(Quaternion(cs_record['rotation']))
box.translate(np.array(cs_record['translation']))
# Move box to global coord system
box.rotate(Quaternion(pose_record['rotation']))
box.translate(np.array(pose_record['translation']))
box_list.append(box)
return box_list
def transform_det_annos_to_nusc_annos(det_annos, nusc):
nusc_annos = {
'results': {},
'meta': None,
}
for det in det_annos:
annos = []
box_list = boxes_lidar_to_nusenes(det)
box_list = lidar_nusc_box_to_global(
nusc=nusc, boxes=box_list, sample_token=det['metadata']['token']
)
for k, box in enumerate(box_list):
name = det['name'][k]
if np.sqrt(box.velocity[0] ** 2 + box.velocity[1] ** 2) > 0.2:
if name in ['car', 'construction_vehicle', 'bus', 'truck', 'trailer']:
attr = 'vehicle.moving'
elif name in ['bicycle', 'motorcycle']:
attr = 'cycle.with_rider'
else:
attr = None
else:
if name in ['pedestrian']:
attr = 'pedestrian.standing'
elif name in ['bus']:
attr = 'vehicle.stopped'
else:
attr = None
attr = attr if attr is not None else max(
cls_attr_dist[name].items(), key=operator.itemgetter(1))[0]
nusc_anno = {
'sample_token': det['metadata']['token'],
'translation': box.center.tolist(),
'size': box.wlh.tolist(),
'rotation': box.orientation.elements.tolist(),
'velocity': box.velocity[:2].tolist(),
'detection_name': name,
'detection_score': box.score,
'attribute_name': attr
}
annos.append(nusc_anno)
nusc_annos['results'].update({det["metadata"]["token"]: annos})
return nusc_annos
def format_nuscene_results(metrics, class_names, version='default'):
result = '----------------Nuscene %s results-----------------\n' % version
for name in class_names:
threshs = ', '.join(list(metrics['label_aps'][name].keys()))
ap_list = list(metrics['label_aps'][name].values())
err_name =', '.join([x.split('_')[0] for x in list(metrics['label_tp_errors'][name].keys())])
error_list = list(metrics['label_tp_errors'][name].values())
result += f'***{name} error@{err_name} | AP@{threshs}\n'
result += ', '.join(['%.2f' % x for x in error_list]) + ' | '
result += ', '.join(['%.2f' % (x * 100) for x in ap_list])
result += f" | mean AP: {metrics['mean_dist_aps'][name]}"
result += '\n'
result += '--------------average performance-------------\n'
details = {}
for key, val in metrics['tp_errors'].items():
result += '%s:\t %.4f\n' % (key, val)
details[key] = val
result += 'mAP:\t %.4f\n' % metrics['mean_ap']
result += 'NDS:\t %.4f\n' % metrics['nd_score']
details.update({
'mAP': metrics['mean_ap'],
'NDS': metrics['nd_score'],
})
return result, details
| 18,474
| 35.876248
| 111
|
py
|
SASA
|
SASA-main/pcdet/datasets/nuscenes/nuscenes_dataset.py
|
import copy
import pickle
from pathlib import Path
import numpy as np
from tqdm import tqdm
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils
from ..dataset import DatasetTemplate
class NuScenesDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
root_path = (root_path if root_path is not None else Path(dataset_cfg.DATA_PATH)) / dataset_cfg.VERSION
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.infos = []
self.include_nuscenes_data(self.mode)
if self.training and self.dataset_cfg.get('BALANCED_RESAMPLING', False):
self.infos = self.balanced_infos_resampling(self.infos)
def include_nuscenes_data(self, mode):
self.logger.info('Loading NuScenes dataset')
nuscenes_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
nuscenes_infos.extend(infos)
self.infos.extend(nuscenes_infos)
self.logger.info('Total samples for NuScenes dataset: %d' % (len(nuscenes_infos)))
def balanced_infos_resampling(self, infos):
"""
Class-balanced sampling of nuScenes dataset from https://arxiv.org/abs/1908.09492
"""
if self.class_names is None:
return infos
cls_infos = {name: [] for name in self.class_names}
for info in infos:
for name in set(info['gt_names']):
if name in self.class_names:
cls_infos[name].append(info)
duplicated_samples = sum([len(v) for _, v in cls_infos.items()])
cls_dist = {k: len(v) / duplicated_samples for k, v in cls_infos.items()}
sampled_infos = []
frac = 1.0 / len(self.class_names)
ratios = [frac / v for v in cls_dist.values()]
for cur_cls_infos, ratio in zip(list(cls_infos.values()), ratios):
sampled_infos += np.random.choice(
cur_cls_infos, int(len(cur_cls_infos) * ratio)
).tolist()
self.logger.info('Total samples after balanced resampling: %s' % (len(sampled_infos)))
cls_infos_new = {name: [] for name in self.class_names}
for info in sampled_infos:
for name in set(info['gt_names']):
if name in self.class_names:
cls_infos_new[name].append(info)
cls_dist_new = {k: len(v) / len(sampled_infos) for k, v in cls_infos_new.items()}
return sampled_infos
def get_sweep(self, sweep_info):
def remove_ego_points(points, center_radius=1.0):
mask = ~((np.abs(points[:, 0]) < center_radius) & (np.abs(points[:, 1]) < center_radius))
return points[mask]
lidar_path = self.root_path / sweep_info['lidar_path']
points_sweep = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
points_sweep = remove_ego_points(points_sweep).T
if sweep_info['transform_matrix'] is not None:
num_points = points_sweep.shape[1]
points_sweep[:3, :] = sweep_info['transform_matrix'].dot(
np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :]
cur_times = sweep_info['time_lag'] * np.ones((1, points_sweep.shape[1]))
return points_sweep.T, cur_times.T
def get_lidar_with_sweeps(self, index, max_sweeps=1):
info = self.infos[index]
lidar_path = self.root_path / info['lidar_path']
points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
sweep_points_list = [points]
sweep_times_list = [np.zeros((points.shape[0], 1))]
for k in np.random.choice(len(info['sweeps']), max_sweeps - 1, replace=False):
points_sweep, times_sweep = self.get_sweep(info['sweeps'][k])
sweep_points_list.append(points_sweep)
sweep_times_list.append(times_sweep)
points = np.concatenate(sweep_points_list, axis=0)
times = np.concatenate(sweep_times_list, axis=0).astype(points.dtype)
points = np.concatenate((points, times), axis=1)
return points
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.infos) * self.total_epochs
return len(self.infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.infos)
info = copy.deepcopy(self.infos[index])
points = self.get_lidar_with_sweeps(index, max_sweeps=self.dataset_cfg.MAX_SWEEPS)
input_dict = {
'points': points,
'frame_id': Path(info['lidar_path']).stem,
'metadata': {'token': info['token']}
}
if 'gt_boxes' in info:
if self.dataset_cfg.get('FILTER_MIN_POINTS_IN_GT', False):
mask = (info['num_lidar_pts'] > self.dataset_cfg.FILTER_MIN_POINTS_IN_GT - 1)
else:
mask = None
input_dict.update({
'gt_names': info['gt_names'] if mask is None else info['gt_names'][mask],
'gt_boxes': info['gt_boxes'] if mask is None else info['gt_boxes'][mask]
})
data_dict = self.prepare_data(data_dict=input_dict)
if self.dataset_cfg.get('SET_NAN_VELOCITY_TO_ZEROS', False):
gt_boxes = data_dict['gt_boxes']
gt_boxes[np.isnan(gt_boxes)] = 0
data_dict['gt_boxes'] = gt_boxes
if not self.dataset_cfg.PRED_VELOCITY and 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = data_dict['gt_boxes'][:, [0, 1, 2, 3, 4, 5, 6, -1]]
return data_dict
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_lidar': np.zeros([num_samples, 7]), 'pred_labels': np.zeros(num_samples)
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
pred_dict['pred_labels'] = pred_labels
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = batch_dict['frame_id'][index]
single_pred_dict['metadata'] = batch_dict['metadata'][index]
annos.append(single_pred_dict)
return annos
def evaluation(self, det_annos, class_names, **kwargs):
import json
from nuscenes.nuscenes import NuScenes
from . import nuscenes_utils
nusc = NuScenes(version=self.dataset_cfg.VERSION, dataroot=str(self.root_path), verbose=True)
nusc_annos = nuscenes_utils.transform_det_annos_to_nusc_annos(det_annos, nusc)
nusc_annos['meta'] = {
'use_camera': False,
'use_lidar': True,
'use_radar': False,
'use_map': False,
'use_external': False,
}
output_path = Path(kwargs['output_path'])
output_path.mkdir(exist_ok=True, parents=True)
res_path = str(output_path / 'results_nusc.json')
with open(res_path, 'w') as f:
json.dump(nusc_annos, f)
self.logger.info(f'The predictions of NuScenes have been saved to {res_path}')
if self.dataset_cfg.VERSION == 'v1.0-test':
return 'No ground-truth annotations for evaluation', {}
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.evaluate import NuScenesEval
eval_set_map = {
'v1.0-mini': 'mini_val',
'v1.0-trainval': 'val',
'v1.0-test': 'test'
}
try:
eval_version = 'detection_cvpr_2019'
eval_config = config_factory(eval_version)
except:
eval_version = 'cvpr_2019'
eval_config = config_factory(eval_version)
nusc_eval = NuScenesEval(
nusc,
config=eval_config,
result_path=res_path,
eval_set=eval_set_map[self.dataset_cfg.VERSION],
output_dir=str(output_path),
verbose=True,
)
metrics_summary = nusc_eval.main(plot_examples=0, render_curves=False)
with open(output_path / 'metrics_summary.json', 'r') as f:
metrics = json.load(f)
result_str, result_dict = nuscenes_utils.format_nuscene_results(metrics, self.class_names, version=eval_version)
return result_str, result_dict
def create_groundtruth_database(self, used_classes=None, max_sweeps=10):
import torch
database_save_path = self.root_path / f'gt_database_{max_sweeps}sweeps_withvelo'
db_info_save_path = self.root_path / f'nuscenes_dbinfos_{max_sweeps}sweeps_withvelo.pkl'
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
for idx in tqdm(range(len(self.infos))):
sample_idx = idx
info = self.infos[idx]
points = self.get_lidar_with_sweeps(idx, max_sweeps=max_sweeps)
gt_boxes = info['gt_boxes']
gt_names = info['gt_names']
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(),
torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda()
).long().squeeze(dim=0).cpu().numpy()
for i in range(gt_boxes.shape[0]):
filename = '%s_%s_%d.bin' % (sample_idx, gt_names[i], i)
filepath = database_save_path / filename
gt_points = points[box_idxs_of_pts == i]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or gt_names[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': gt_names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0]}
if gt_names[i] in all_db_infos:
all_db_infos[gt_names[i]].append(db_info)
else:
all_db_infos[gt_names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
def create_nuscenes_info(version, data_path, save_path, max_sweeps=10):
from nuscenes.nuscenes import NuScenes
from nuscenes.utils import splits
from . import nuscenes_utils
data_path = data_path / version
save_path = save_path / version
assert version in ['v1.0-trainval', 'v1.0-test', 'v1.0-mini']
if version == 'v1.0-trainval':
train_scenes = splits.train
val_scenes = splits.val
elif version == 'v1.0-test':
train_scenes = splits.test
val_scenes = []
elif version == 'v1.0-mini':
train_scenes = splits.mini_train
val_scenes = splits.mini_val
else:
raise NotImplementedError
nusc = NuScenes(version=version, dataroot=data_path, verbose=True)
available_scenes = nuscenes_utils.get_available_scenes(nusc)
available_scene_names = [s['name'] for s in available_scenes]
train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes))
val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
train_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in train_scenes])
val_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in val_scenes])
print('%s: train scene(%d), val scene(%d)' % (version, len(train_scenes), len(val_scenes)))
train_nusc_infos, val_nusc_infos = nuscenes_utils.fill_trainval_infos(
data_path=data_path, nusc=nusc, train_scenes=train_scenes, val_scenes=val_scenes,
test='test' in version, max_sweeps=max_sweeps
)
if version == 'v1.0-test':
print('test sample: %d' % len(train_nusc_infos))
with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_test.pkl', 'wb') as f:
pickle.dump(train_nusc_infos, f)
else:
print('train sample: %d, val sample: %d' % (len(train_nusc_infos), len(val_nusc_infos)))
with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_train.pkl', 'wb') as f:
pickle.dump(train_nusc_infos, f)
with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_val.pkl', 'wb') as f:
pickle.dump(val_nusc_infos, f)
if __name__ == '__main__':
import yaml
import argparse
from pathlib import Path
from easydict import EasyDict
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset')
parser.add_argument('--func', type=str, default='create_nuscenes_infos', help='')
parser.add_argument('--version', type=str, default='v1.0-trainval', help='')
args = parser.parse_args()
if args.func == 'create_nuscenes_infos':
dataset_cfg = EasyDict(yaml.load(open(args.cfg_file)))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
dataset_cfg.VERSION = args.version
create_nuscenes_info(
version=dataset_cfg.VERSION,
data_path=ROOT_DIR / 'data' / 'nuscenes',
save_path=ROOT_DIR / 'data' / 'nuscenes',
max_sweeps=dataset_cfg.MAX_SWEEPS,
)
nuscenes_dataset = NuScenesDataset(
dataset_cfg=dataset_cfg, class_names=None,
root_path=ROOT_DIR / 'data' / 'nuscenes',
logger=common_utils.create_logger(), training=True
)
nuscenes_dataset.create_groundtruth_database(max_sweeps=dataset_cfg.MAX_SWEEPS)
| 15,322
| 39.861333
| 120
|
py
|
SASA
|
SASA-main/pcdet/datasets/kitti/kitti_dataset.py
|
import copy
import pickle
import numpy as np
from skimage import io
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils, calibration_kitti, common_utils, object3d_kitti
from ..dataset import DatasetTemplate
class KittiDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing')
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
self.kitti_infos = []
self.include_kitti_data(self.mode)
def include_kitti_data(self, mode):
if self.logger is not None:
self.logger.info('Loading KITTI dataset')
kitti_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
kitti_infos.extend(infos)
self.kitti_infos.extend(kitti_infos)
if self.logger is not None:
self.logger.info('Total samples for KITTI dataset: %d' % (len(kitti_infos)))
def set_split(self, split):
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger
)
self.split = split
self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing')
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
def get_lidar(self, idx):
lidar_file = self.root_split_path / 'velodyne' / ('%s.bin' % idx)
assert lidar_file.exists()
return np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4)
def get_image_shape(self, idx):
img_file = self.root_split_path / 'image_2' / ('%s.png' % idx)
assert img_file.exists()
return np.array(io.imread(img_file).shape[:2], dtype=np.int32)
def get_label(self, idx):
label_file = self.root_split_path / 'label_2' / ('%s.txt' % idx)
assert label_file.exists()
return object3d_kitti.get_objects_from_label(label_file)
def get_calib(self, idx):
calib_file = self.root_split_path / 'calib' / ('%s.txt' % idx)
assert calib_file.exists()
return calibration_kitti.Calibration(calib_file)
def get_road_plane(self, idx):
plane_file = self.root_split_path / 'planes' / ('%s.txt' % idx)
if not plane_file.exists():
return None
with open(plane_file, 'r') as f:
lines = f.readlines()
lines = [float(i) for i in lines[3].split()]
plane = np.asarray(lines)
# Ensure normal is always facing up, this is in the rectified camera coordinate
if plane[1] > 0:
plane = -plane
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
@staticmethod
def get_fov_flag(pts_rect, img_shape, calib):
"""
Args:
pts_rect:
img_shape:
calib:
Returns:
"""
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def get_infos(self, num_workers=4, has_label=True, count_inside_pts=True, sample_id_list=None):
import concurrent.futures as futures
def process_single_scene(sample_idx):
print('%s sample_idx: %s' % (self.split, sample_idx))
info = {}
pc_info = {'num_features': 4, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info
image_info = {'image_idx': sample_idx, 'image_shape': self.get_image_shape(sample_idx)}
info['image'] = image_info
calib = self.get_calib(sample_idx)
P2 = np.concatenate([calib.P2, np.array([[0., 0., 0., 1.]])], axis=0)
R0_4x4 = np.zeros([4, 4], dtype=calib.R0.dtype)
R0_4x4[3, 3] = 1.
R0_4x4[:3, :3] = calib.R0
V2C_4x4 = np.concatenate([calib.V2C, np.array([[0., 0., 0., 1.]])], axis=0)
calib_info = {'P2': P2, 'R0_rect': R0_4x4, 'Tr_velo_to_cam': V2C_4x4}
info['calib'] = calib_info
if has_label:
obj_list = self.get_label(sample_idx)
annotations = {}
annotations['name'] = np.array([obj.cls_type for obj in obj_list])
annotations['truncated'] = np.array([obj.truncation for obj in obj_list])
annotations['occluded'] = np.array([obj.occlusion for obj in obj_list])
annotations['alpha'] = np.array([obj.alpha for obj in obj_list])
annotations['bbox'] = np.concatenate([obj.box2d.reshape(1, 4) for obj in obj_list], axis=0)
annotations['dimensions'] = np.array([[obj.l, obj.h, obj.w] for obj in obj_list]) # lhw(camera) format
annotations['location'] = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
annotations['rotation_y'] = np.array([obj.ry for obj in obj_list])
annotations['score'] = np.array([obj.score for obj in obj_list])
annotations['difficulty'] = np.array([obj.level for obj in obj_list], np.int32)
num_objects = len([obj.cls_type for obj in obj_list if obj.cls_type != 'DontCare'])
num_gt = len(annotations['name'])
index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
annotations['index'] = np.array(index, dtype=np.int32)
loc = annotations['location'][:num_objects]
dims = annotations['dimensions'][:num_objects]
rots = annotations['rotation_y'][:num_objects]
loc_lidar = calib.rect_to_lidar(loc)
l, h, w = dims[:, 0:1], dims[:, 1:2], dims[:, 2:3]
loc_lidar[:, 2] += h[:, 0] / 2
gt_boxes_lidar = np.concatenate([loc_lidar, l, w, h, -(np.pi / 2 + rots[..., np.newaxis])], axis=1)
annotations['gt_boxes_lidar'] = gt_boxes_lidar
info['annos'] = annotations
if count_inside_pts:
points = self.get_lidar(sample_idx)
calib = self.get_calib(sample_idx)
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, info['image']['image_shape'], calib)
pts_fov = points[fov_flag]
corners_lidar = box_utils.boxes_to_corners_3d(gt_boxes_lidar)
num_points_in_gt = -np.ones(num_gt, dtype=np.int32)
for k in range(num_objects):
flag = box_utils.in_hull(pts_fov[:, 0:3], corners_lidar[k])
num_points_in_gt[k] = flag.sum()
annotations['num_points_in_gt'] = num_points_in_gt
return info
sample_id_list = sample_id_list if sample_id_list is not None else self.sample_id_list
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(process_single_scene, sample_id_list)
return list(infos)
def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
import torch
database_save_path = Path(self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split))
db_info_save_path = Path(self.root_path) / ('kitti_dbinfos_%s.pkl' % split)
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
for k in range(len(infos)):
print('gt_database sample: %d/%d' % (k + 1, len(infos)))
info = infos[k]
sample_idx = info['point_cloud']['lidar_idx']
points = self.get_lidar(sample_idx)
annos = info['annos']
names = annos['name']
difficulty = annos['difficulty']
bbox = annos['bbox']
gt_boxes = annos['gt_boxes_lidar']
num_obj = gt_boxes.shape[0]
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
).numpy() # (nboxes, npoints)
for i in range(num_obj):
filename = '%s_%s_%d.bin' % (sample_idx, names[i], i)
filepath = database_save_path / filename
gt_points = points[point_indices[i] > 0]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or names[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
'difficulty': difficulty[i], 'bbox': bbox[i], 'score': annos['score'][i]}
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples),
'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples),
'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]),
'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples),
'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7])
}
return ret_dict
def generate_single_sample_dict(batch_index, box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
calib = batch_dict['calib'][batch_index]
image_shape = batch_dict['image_shape'][batch_index]
pred_boxes_camera = box_utils.boxes3d_lidar_to_kitti_camera(pred_boxes, calib)
pred_boxes_img = box_utils.boxes3d_kitti_camera_to_imageboxes(
pred_boxes_camera, calib, image_shape=image_shape
)
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['alpha'] = -np.arctan2(-pred_boxes[:, 1], pred_boxes[:, 0]) + pred_boxes_camera[:, 6]
pred_dict['bbox'] = pred_boxes_img
pred_dict['dimensions'] = pred_boxes_camera[:, 3:6]
pred_dict['location'] = pred_boxes_camera[:, 0:3]
pred_dict['rotation_y'] = pred_boxes_camera[:, 6]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_id = batch_dict['frame_id'][index]
single_pred_dict = generate_single_sample_dict(index, box_dict)
single_pred_dict['frame_id'] = frame_id
annos.append(single_pred_dict)
if output_path is not None:
cur_det_file = output_path / ('%s.txt' % frame_id)
with open(cur_det_file, 'w') as f:
bbox = single_pred_dict['bbox']
loc = single_pred_dict['location']
dims = single_pred_dict['dimensions'] # lhw -> hwl
for idx in range(len(bbox)):
print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f'
% (single_pred_dict['name'][idx], single_pred_dict['alpha'][idx],
bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3],
dims[idx][1], dims[idx][2], dims[idx][0], loc[idx][0],
loc[idx][1], loc[idx][2], single_pred_dict['rotation_y'][idx],
single_pred_dict['score'][idx]), file=f)
return annos
def evaluation(self, det_annos, class_names, **kwargs):
if 'annos' not in self.kitti_infos[0].keys():
return None, {}
from .kitti_object_eval_python import eval as kitti_eval
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.kitti_infos]
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(eval_gt_annos, eval_det_annos, class_names)
return ap_result_str, ap_dict
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.kitti_infos) * self.total_epochs
return len(self.kitti_infos)
def __getitem__(self, index):
# index = 4
if self._merge_all_iters_to_one_epoch:
index = index % len(self.kitti_infos)
info = copy.deepcopy(self.kitti_infos[index])
sample_idx = info['point_cloud']['lidar_idx']
points = self.get_lidar(sample_idx)
calib = self.get_calib(sample_idx)
img_shape = info['image']['image_shape']
if self.dataset_cfg.FOV_POINTS_ONLY:
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
input_dict = {
'points': points,
'frame_id': sample_idx,
'calib': calib,
}
if 'annos' in info:
annos = info['annos']
annos = common_utils.drop_info_with_name(annos, name='DontCare')
loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y']
gt_names = annos['name']
gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
gt_boxes_lidar = box_utils.boxes3d_kitti_camera_to_lidar(gt_boxes_camera, calib)
input_dict.update({
'gt_names': gt_names,
'gt_boxes': gt_boxes_lidar
})
road_plane = self.get_road_plane(sample_idx)
if road_plane is not None:
input_dict['road_plane'] = road_plane
data_dict = self.prepare_data(data_dict=input_dict)
data_dict['image_shape'] = img_shape
return data_dict
def create_kitti_infos(dataset_cfg, class_names, data_path, save_path, workers=4):
dataset = KittiDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
train_split, val_split, trainval_split = 'train', 'val', 'trainval'
train_filename = save_path / ('kitti_infos_%s.pkl' % train_split)
val_filename = save_path / ('kitti_infos_%s.pkl' % val_split)
trainval_filename = save_path / 'kitti_infos_trainval.pkl'
test_filename = save_path / 'kitti_infos_test.pkl'
print('---------------Start to generate data infos---------------')
dataset.set_split(train_split)
kitti_infos_train = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
with open(train_filename, 'wb') as f:
pickle.dump(kitti_infos_train, f)
print('Kitti info train file is saved to %s' % train_filename)
dataset.set_split(val_split)
kitti_infos_val = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
with open(val_filename, 'wb') as f:
pickle.dump(kitti_infos_val, f)
print('Kitti info val file is saved to %s' % val_filename)
with open(trainval_filename, 'wb') as f:
pickle.dump(kitti_infos_train + kitti_infos_val, f)
print('Kitti info trainval file is saved to %s' % trainval_filename)
dataset.set_split('test')
kitti_infos_test = dataset.get_infos(num_workers=workers, has_label=False, count_inside_pts=False)
with open(test_filename, 'wb') as f:
pickle.dump(kitti_infos_test, f)
print('Kitti info test file is saved to %s' % test_filename)
print('---------------Start create groundtruth database for data augmentation---------------')
dataset.set_split(train_split)
dataset.create_groundtruth_database(train_filename, split=train_split)
dataset.set_split(trainval_split)
dataset.create_groundtruth_database(trainval_filename, split=trainval_split)
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import sys
if sys.argv.__len__() > 1 and sys.argv[1] == 'create_kitti_infos':
import yaml
from pathlib import Path
from easydict import EasyDict
dataset_cfg = EasyDict(yaml.load(open(sys.argv[2])))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
create_kitti_infos(
dataset_cfg=dataset_cfg,
class_names=['Car', 'Pedestrian', 'Cyclist'],
data_path=ROOT_DIR / 'data' / 'kitti',
save_path=ROOT_DIR / 'data' / 'kitti'
)
| 19,046
| 41.995485
| 140
|
py
|
SASA
|
SASA-main/pcdet/datasets/kitti/kitti_utils.py
|
import numpy as np
from ...utils import box_utils
def transform_annotations_to_kitti_format(annos, map_name_to_kitti=None, info_with_fakelidar=False):
"""
Args:
annos:
map_name_to_kitti: dict, map name to KITTI names (Car, Pedestrian, Cyclist)
info_with_fakelidar:
Returns:
"""
for anno in annos:
for k in range(anno['name'].shape[0]):
anno['name'][k] = map_name_to_kitti[anno['name'][k]]
anno['bbox'] = np.zeros((len(anno['name']), 4))
anno['bbox'][:, 2:4] = 50 # [0, 0, 50, 50]
anno['truncated'] = np.zeros(len(anno['name']))
anno['occluded'] = np.zeros(len(anno['name']))
if 'boxes_lidar' in anno:
gt_boxes_lidar = anno['boxes_lidar'].copy()
else:
gt_boxes_lidar = anno['gt_boxes_lidar'].copy()
if len(gt_boxes_lidar) > 0:
if info_with_fakelidar:
gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(gt_boxes_lidar)
gt_boxes_lidar[:, 2] -= gt_boxes_lidar[:, 5] / 2
anno['location'] = np.zeros((gt_boxes_lidar.shape[0], 3))
anno['location'][:, 0] = -gt_boxes_lidar[:, 1] # x = -y_lidar
anno['location'][:, 1] = -gt_boxes_lidar[:, 2] # y = -z_lidar
anno['location'][:, 2] = gt_boxes_lidar[:, 0] # z = x_lidar
dxdydz = gt_boxes_lidar[:, 3:6]
anno['dimensions'] = dxdydz[:, [0, 2, 1]] # lwh ==> lhw
anno['rotation_y'] = -gt_boxes_lidar[:, 6] - np.pi / 2.0
anno['alpha'] = -np.arctan2(-gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0]) + anno['rotation_y']
else:
anno['location'] = anno['dimensions'] = np.zeros((0, 3))
anno['rotation_y'] = anno['alpha'] = np.zeros(0)
return annos
| 1,809
| 39.222222
| 105
|
py
|
SASA
|
SASA-main/pcdet/datasets/kitti/kitti_object_eval_python/rotate_iou.py
|
#####################
# Based on https://github.com/hongzhenwang/RRPN-revise
# Licensed under The MIT License
# Author: yanyan, scrin@foxmail.com
#####################
import math
import numba
import numpy as np
from numba import cuda
@numba.jit(nopython=True)
def div_up(m, n):
return m // n + (m % n > 0)
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True)
def trangle_area(a, b, c):
return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) *
(b[0] - c[0])) / 2.0
@cuda.jit('(float32[:], int32)', device=True, inline=True)
def area(int_pts, num_of_inter):
area_val = 0.0
for i in range(num_of_inter - 2):
area_val += abs(
trangle_area(int_pts[:2], int_pts[2 * i + 2:2 * i + 4],
int_pts[2 * i + 4:2 * i + 6]))
return area_val
@cuda.jit('(float32[:], int32)', device=True, inline=True)
def sort_vertex_in_convex_polygon(int_pts, num_of_inter):
if num_of_inter > 0:
center = cuda.local.array((2, ), dtype=numba.float32)
center[:] = 0.0
for i in range(num_of_inter):
center[0] += int_pts[2 * i]
center[1] += int_pts[2 * i + 1]
center[0] /= num_of_inter
center[1] /= num_of_inter
v = cuda.local.array((2, ), dtype=numba.float32)
vs = cuda.local.array((16, ), dtype=numba.float32)
for i in range(num_of_inter):
v[0] = int_pts[2 * i] - center[0]
v[1] = int_pts[2 * i + 1] - center[1]
d = math.sqrt(v[0] * v[0] + v[1] * v[1])
v[0] = v[0] / d
v[1] = v[1] / d
if v[1] < 0:
v[0] = -2 - v[0]
vs[i] = v[0]
j = 0
temp = 0
for i in range(1, num_of_inter):
if vs[i - 1] > vs[i]:
temp = vs[i]
tx = int_pts[2 * i]
ty = int_pts[2 * i + 1]
j = i
while j > 0 and vs[j - 1] > temp:
vs[j] = vs[j - 1]
int_pts[j * 2] = int_pts[j * 2 - 2]
int_pts[j * 2 + 1] = int_pts[j * 2 - 1]
j -= 1
vs[j] = temp
int_pts[j * 2] = tx
int_pts[j * 2 + 1] = ty
@cuda.jit(
'(float32[:], float32[:], int32, int32, float32[:])',
device=True,
inline=True)
def line_segment_intersection(pts1, pts2, i, j, temp_pts):
A = cuda.local.array((2, ), dtype=numba.float32)
B = cuda.local.array((2, ), dtype=numba.float32)
C = cuda.local.array((2, ), dtype=numba.float32)
D = cuda.local.array((2, ), dtype=numba.float32)
A[0] = pts1[2 * i]
A[1] = pts1[2 * i + 1]
B[0] = pts1[2 * ((i + 1) % 4)]
B[1] = pts1[2 * ((i + 1) % 4) + 1]
C[0] = pts2[2 * j]
C[1] = pts2[2 * j + 1]
D[0] = pts2[2 * ((j + 1) % 4)]
D[1] = pts2[2 * ((j + 1) % 4) + 1]
BA0 = B[0] - A[0]
BA1 = B[1] - A[1]
DA0 = D[0] - A[0]
CA0 = C[0] - A[0]
DA1 = D[1] - A[1]
CA1 = C[1] - A[1]
acd = DA1 * CA0 > CA1 * DA0
bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0])
if acd != bcd:
abc = CA1 * BA0 > BA1 * CA0
abd = DA1 * BA0 > BA1 * DA0
if abc != abd:
DC0 = D[0] - C[0]
DC1 = D[1] - C[1]
ABBA = A[0] * B[1] - B[0] * A[1]
CDDC = C[0] * D[1] - D[0] * C[1]
DH = BA1 * DC0 - BA0 * DC1
Dx = ABBA * DC0 - BA0 * CDDC
Dy = ABBA * DC1 - BA1 * CDDC
temp_pts[0] = Dx / DH
temp_pts[1] = Dy / DH
return True
return False
@cuda.jit(
'(float32[:], float32[:], int32, int32, float32[:])',
device=True,
inline=True)
def line_segment_intersection_v1(pts1, pts2, i, j, temp_pts):
a = cuda.local.array((2, ), dtype=numba.float32)
b = cuda.local.array((2, ), dtype=numba.float32)
c = cuda.local.array((2, ), dtype=numba.float32)
d = cuda.local.array((2, ), dtype=numba.float32)
a[0] = pts1[2 * i]
a[1] = pts1[2 * i + 1]
b[0] = pts1[2 * ((i + 1) % 4)]
b[1] = pts1[2 * ((i + 1) % 4) + 1]
c[0] = pts2[2 * j]
c[1] = pts2[2 * j + 1]
d[0] = pts2[2 * ((j + 1) % 4)]
d[1] = pts2[2 * ((j + 1) % 4) + 1]
area_abc = trangle_area(a, b, c)
area_abd = trangle_area(a, b, d)
if area_abc * area_abd >= 0:
return False
area_cda = trangle_area(c, d, a)
area_cdb = area_cda + area_abc - area_abd
if area_cda * area_cdb >= 0:
return False
t = area_cda / (area_abd - area_abc)
dx = t * (b[0] - a[0])
dy = t * (b[1] - a[1])
temp_pts[0] = a[0] + dx
temp_pts[1] = a[1] + dy
return True
@cuda.jit('(float32, float32, float32[:])', device=True, inline=True)
def point_in_quadrilateral(pt_x, pt_y, corners):
ab0 = corners[2] - corners[0]
ab1 = corners[3] - corners[1]
ad0 = corners[6] - corners[0]
ad1 = corners[7] - corners[1]
ap0 = pt_x - corners[0]
ap1 = pt_y - corners[1]
abab = ab0 * ab0 + ab1 * ab1
abap = ab0 * ap0 + ab1 * ap1
adad = ad0 * ad0 + ad1 * ad1
adap = ad0 * ap0 + ad1 * ap1
return abab >= abap and abap >= 0 and adad >= adap and adap >= 0
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True)
def quadrilateral_intersection(pts1, pts2, int_pts):
num_of_inter = 0
for i in range(4):
if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2):
int_pts[num_of_inter * 2] = pts1[2 * i]
int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1]
num_of_inter += 1
if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1):
int_pts[num_of_inter * 2] = pts2[2 * i]
int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1]
num_of_inter += 1
temp_pts = cuda.local.array((2, ), dtype=numba.float32)
for i in range(4):
for j in range(4):
has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts)
if has_pts:
int_pts[num_of_inter * 2] = temp_pts[0]
int_pts[num_of_inter * 2 + 1] = temp_pts[1]
num_of_inter += 1
return num_of_inter
@cuda.jit('(float32[:], float32[:])', device=True, inline=True)
def rbbox_to_corners(corners, rbbox):
# generate clockwise corners and rotate it clockwise
angle = rbbox[4]
a_cos = math.cos(angle)
a_sin = math.sin(angle)
center_x = rbbox[0]
center_y = rbbox[1]
x_d = rbbox[2]
y_d = rbbox[3]
corners_x = cuda.local.array((4, ), dtype=numba.float32)
corners_y = cuda.local.array((4, ), dtype=numba.float32)
corners_x[0] = -x_d / 2
corners_x[1] = -x_d / 2
corners_x[2] = x_d / 2
corners_x[3] = x_d / 2
corners_y[0] = -y_d / 2
corners_y[1] = y_d / 2
corners_y[2] = y_d / 2
corners_y[3] = -y_d / 2
for i in range(4):
corners[2 *
i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x
corners[2 * i
+ 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y
@cuda.jit('(float32[:], float32[:])', device=True, inline=True)
def inter(rbbox1, rbbox2):
corners1 = cuda.local.array((8, ), dtype=numba.float32)
corners2 = cuda.local.array((8, ), dtype=numba.float32)
intersection_corners = cuda.local.array((16, ), dtype=numba.float32)
rbbox_to_corners(corners1, rbbox1)
rbbox_to_corners(corners2, rbbox2)
num_intersection = quadrilateral_intersection(corners1, corners2,
intersection_corners)
sort_vertex_in_convex_polygon(intersection_corners, num_intersection)
# print(intersection_corners.reshape([-1, 2])[:num_intersection])
return area(intersection_corners, num_intersection)
@cuda.jit('(float32[:], float32[:], int32)', device=True, inline=True)
def devRotateIoUEval(rbox1, rbox2, criterion=-1):
area1 = rbox1[2] * rbox1[3]
area2 = rbox2[2] * rbox2[3]
area_inter = inter(rbox1, rbox2)
if criterion == -1:
return area_inter / (area1 + area2 - area_inter)
elif criterion == 0:
return area_inter / area1
elif criterion == 1:
return area_inter / area2
else:
return area_inter
@cuda.jit('(int64, int64, float32[:], float32[:], float32[:], int32)', fastmath=False)
def rotate_iou_kernel_eval(N, K, dev_boxes, dev_query_boxes, dev_iou, criterion=-1):
threadsPerBlock = 8 * 8
row_start = cuda.blockIdx.x
col_start = cuda.blockIdx.y
tx = cuda.threadIdx.x
row_size = min(N - row_start * threadsPerBlock, threadsPerBlock)
col_size = min(K - col_start * threadsPerBlock, threadsPerBlock)
block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32)
block_qboxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32)
dev_query_box_idx = threadsPerBlock * col_start + tx
dev_box_idx = threadsPerBlock * row_start + tx
if (tx < col_size):
block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0]
block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1]
block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2]
block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3]
block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4]
if (tx < row_size):
block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0]
block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1]
block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2]
block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3]
block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4]
cuda.syncthreads()
if tx < row_size:
for i in range(col_size):
offset = row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i
dev_iou[offset] = devRotateIoUEval(block_qboxes[i * 5:i * 5 + 5],
block_boxes[tx * 5:tx * 5 + 5], criterion)
def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0):
"""rotated box iou running in gpu. 500x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/pcdet/rotation).
Args:
boxes (float tensor: [N, 5]): rbboxes. format: centers, dims,
angles(clockwise when positive)
query_boxes (float tensor: [K, 5]): [description]
device_id (int, optional): Defaults to 0. [description]
Returns:
[type]: [description]
"""
box_dtype = boxes.dtype
boxes = boxes.astype(np.float32)
query_boxes = query_boxes.astype(np.float32)
N = boxes.shape[0]
K = query_boxes.shape[0]
iou = np.zeros((N, K), dtype=np.float32)
if N == 0 or K == 0:
return iou
threadsPerBlock = 8 * 8
cuda.select_device(device_id)
blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock))
stream = cuda.stream()
with stream.auto_synchronize():
boxes_dev = cuda.to_device(boxes.reshape([-1]), stream)
query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream)
iou_dev = cuda.to_device(iou.reshape([-1]), stream)
rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream](
N, K, boxes_dev, query_boxes_dev, iou_dev, criterion)
iou_dev.copy_to_host(iou.reshape([-1]), stream=stream)
return iou.astype(boxes.dtype)
| 11,552
| 33.903323
| 95
|
py
|
SASA
|
SASA-main/pcdet/datasets/kitti/kitti_object_eval_python/evaluate.py
|
import time
import fire
import .kitti_common as kitti
from .eval import get_coco_eval_result, get_official_eval_result
def _read_imageset_file(path):
with open(path, 'r') as f:
lines = f.readlines()
return [int(line) for line in lines]
def evaluate(label_path,
result_path,
label_split_file,
current_class=0,
coco=False,
score_thresh=-1):
dt_annos = kitti.get_label_annos(result_path)
if score_thresh > 0:
dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh)
val_image_ids = _read_imageset_file(label_split_file)
gt_annos = kitti.get_label_annos(label_path, val_image_ids)
if coco:
return get_coco_eval_result(gt_annos, dt_annos, current_class)
else:
return get_official_eval_result(gt_annos, dt_annos, current_class)
if __name__ == '__main__':
fire.Fire()
| 909
| 25.764706
| 74
|
py
|
SASA
|
SASA-main/pcdet/datasets/kitti/kitti_object_eval_python/kitti_common.py
|
import concurrent.futures as futures
import os
import pathlib
import re
from collections import OrderedDict
import numpy as np
from skimage import io
def get_image_index_str(img_idx):
return "{:06d}".format(img_idx)
def get_kitti_info_path(idx,
prefix,
info_type='image_2',
file_tail='.png',
training=True,
relative_path=True):
img_idx_str = get_image_index_str(idx)
img_idx_str += file_tail
prefix = pathlib.Path(prefix)
if training:
file_path = pathlib.Path('training') / info_type / img_idx_str
else:
file_path = pathlib.Path('testing') / info_type / img_idx_str
if not (prefix / file_path).exists():
raise ValueError("file not exist: {}".format(file_path))
if relative_path:
return str(file_path)
else:
return str(prefix / file_path)
def get_image_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'image_2', '.png', training,
relative_path)
def get_label_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'label_2', '.txt', training,
relative_path)
def get_velodyne_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training,
relative_path)
def get_calib_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'calib', '.txt', training,
relative_path)
def _extend_matrix(mat):
mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0)
return mat
def get_kitti_image_info(path,
training=True,
label_info=True,
velodyne=False,
calib=False,
image_ids=7481,
extend_matrix=True,
num_worker=8,
relative_path=True,
with_imageshape=True):
# image_infos = []
root_path = pathlib.Path(path)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
def map_func(idx):
image_info = {'image_idx': idx}
annotations = None
if velodyne:
image_info['velodyne_path'] = get_velodyne_path(
idx, path, training, relative_path)
image_info['img_path'] = get_image_path(idx, path, training,
relative_path)
if with_imageshape:
img_path = image_info['img_path']
if relative_path:
img_path = str(root_path / img_path)
image_info['img_shape'] = np.array(
io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
label_path = get_label_path(idx, path, training, relative_path)
if relative_path:
label_path = str(root_path / label_path)
annotations = get_label_anno(label_path)
if calib:
calib_path = get_calib_path(
idx, path, training, relative_path=False)
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array(
[float(info) for info in lines[0].split(' ')[1:13]]).reshape(
[3, 4])
P1 = np.array(
[float(info) for info in lines[1].split(' ')[1:13]]).reshape(
[3, 4])
P2 = np.array(
[float(info) for info in lines[2].split(' ')[1:13]]).reshape(
[3, 4])
P3 = np.array(
[float(info) for info in lines[3].split(' ')[1:13]]).reshape(
[3, 4])
if extend_matrix:
P0 = _extend_matrix(P0)
P1 = _extend_matrix(P1)
P2 = _extend_matrix(P2)
P3 = _extend_matrix(P3)
image_info['calib/P0'] = P0
image_info['calib/P1'] = P1
image_info['calib/P2'] = P2
image_info['calib/P3'] = P3
R0_rect = np.array([
float(info) for info in lines[4].split(' ')[1:10]
]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
image_info['calib/R0_rect'] = rect_4x4
Tr_velo_to_cam = np.array([
float(info) for info in lines[5].split(' ')[1:13]
]).reshape([3, 4])
Tr_imu_to_velo = np.array([
float(info) for info in lines[6].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam)
Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo)
image_info['calib/Tr_velo_to_cam'] = Tr_velo_to_cam
image_info['calib/Tr_imu_to_velo'] = Tr_imu_to_velo
if annotations is not None:
image_info['annos'] = annotations
add_difficulty_to_annos(image_info)
return image_info
with futures.ThreadPoolExecutor(num_worker) as executor:
image_infos = executor.map(map_func, image_ids)
return list(image_infos)
def filter_kitti_anno(image_anno,
used_classes,
used_difficulty=None,
dontcare_iou=None):
if not isinstance(used_classes, (list, tuple)):
used_classes = [used_classes]
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, x in enumerate(image_anno['name']) if x in used_classes
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
image_anno[key][relevant_annotation_indices])
if used_difficulty is not None:
relevant_annotation_indices = [
i for i, x in enumerate(img_filtered_annotations['difficulty'])
if x in used_difficulty
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
img_filtered_annotations[key][relevant_annotation_indices])
if 'DontCare' in used_classes and dontcare_iou is not None:
dont_care_indices = [
i for i, x in enumerate(img_filtered_annotations['name'])
if x == 'DontCare'
]
# bounding box format [y_min, x_min, y_max, x_max]
all_boxes = img_filtered_annotations['bbox']
ious = iou(all_boxes, all_boxes[dont_care_indices])
# Remove all bounding boxes that overlap with a dontcare region.
if ious.size > 0:
boxes_to_remove = np.amax(ious, axis=1) > dontcare_iou
for key in image_anno.keys():
img_filtered_annotations[key] = (img_filtered_annotations[key][
np.logical_not(boxes_to_remove)])
return img_filtered_annotations
def filter_annos_low_score(image_annos, thresh):
new_image_annos = []
for anno in image_annos:
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, s in enumerate(anno['score']) if s >= thresh
]
for key in anno.keys():
img_filtered_annotations[key] = (
anno[key][relevant_annotation_indices])
new_image_annos.append(img_filtered_annotations)
return new_image_annos
def kitti_result_line(result_dict, precision=4):
prec_float = "{" + ":.{}f".format(precision) + "}"
res_line = []
all_field_default = OrderedDict([
('name', None),
('truncated', -1),
('occluded', -1),
('alpha', -10),
('bbox', None),
('dimensions', [-1, -1, -1]),
('location', [-1000, -1000, -1000]),
('rotation_y', -10),
('score', None),
])
res_dict = [(key, None) for key, val in all_field_default.items()]
res_dict = OrderedDict(res_dict)
for key, val in result_dict.items():
if all_field_default[key] is None and val is None:
raise ValueError("you must specify a value for {}".format(key))
res_dict[key] = val
for key, val in res_dict.items():
if key == 'name':
res_line.append(val)
elif key in ['truncated', 'alpha', 'rotation_y', 'score']:
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append(prec_float.format(val))
elif key == 'occluded':
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append('{}'.format(val))
elif key in ['bbox', 'dimensions', 'location']:
if val is None:
res_line += [str(v) for v in all_field_default[key]]
else:
res_line += [prec_float.format(v) for v in val]
else:
raise ValueError("unknown key. supported key:{}".format(
res_dict.keys()))
return ' '.join(res_line)
def add_difficulty_to_annos(info):
min_height = [40, 25,
25] # minimum height for evaluated groundtruth/detections
max_occlusion = [
0, 1, 2
] # maximum occlusion level of the groundtruth used for eval_utils
max_trunc = [
0.15, 0.3, 0.5
] # maximum truncation level of the groundtruth used for eval_utils
annos = info['annos']
dims = annos['dimensions'] # lhw format
bbox = annos['bbox']
height = bbox[:, 3] - bbox[:, 1]
occlusion = annos['occluded']
truncation = annos['truncated']
diff = []
easy_mask = np.ones((len(dims), ), dtype=np.bool)
moderate_mask = np.ones((len(dims), ), dtype=np.bool)
hard_mask = np.ones((len(dims), ), dtype=np.bool)
i = 0
for h, o, t in zip(height, occlusion, truncation):
if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]:
easy_mask[i] = False
if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]:
moderate_mask[i] = False
if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]:
hard_mask[i] = False
i += 1
is_easy = easy_mask
is_moderate = np.logical_xor(easy_mask, moderate_mask)
is_hard = np.logical_xor(hard_mask, moderate_mask)
for i in range(len(dims)):
if is_easy[i]:
diff.append(0)
elif is_moderate[i]:
diff.append(1)
elif is_hard[i]:
diff.append(2)
else:
diff.append(-1)
annos["difficulty"] = np.array(diff, np.int32)
return diff
def get_label_anno(label_path):
annotations = {}
annotations.update({
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'dimensions': [],
'location': [],
'rotation_y': []
})
with open(label_path, 'r') as f:
lines = f.readlines()
# if len(lines) == 0 or len(lines[0]) < 15:
# content = []
# else:
content = [line.strip().split(' ') for line in lines]
annotations['name'] = np.array([x[0] for x in content])
annotations['truncated'] = np.array([float(x[1]) for x in content])
annotations['occluded'] = np.array([int(x[2]) for x in content])
annotations['alpha'] = np.array([float(x[3]) for x in content])
annotations['bbox'] = np.array(
[[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4)
# dimensions will convert hwl format to standard lhw(camera) format.
annotations['dimensions'] = np.array(
[[float(info) for info in x[8:11]] for x in content]).reshape(
-1, 3)[:, [2, 0, 1]]
annotations['location'] = np.array(
[[float(info) for info in x[11:14]] for x in content]).reshape(-1, 3)
annotations['rotation_y'] = np.array(
[float(x[14]) for x in content]).reshape(-1)
if len(content) != 0 and len(content[0]) == 16: # have score
annotations['score'] = np.array([float(x[15]) for x in content])
else:
annotations['score'] = np.zeros([len(annotations['bbox'])])
return annotations
def get_label_annos(label_folder, image_ids=None):
if image_ids is None:
filepaths = pathlib.Path(label_folder).glob('*.txt')
prog = re.compile(r'^\d{6}.txt$')
filepaths = filter(lambda f: prog.match(f.name), filepaths)
image_ids = [int(p.stem) for p in filepaths]
image_ids = sorted(image_ids)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
annos = []
label_folder = pathlib.Path(label_folder)
for idx in image_ids:
image_idx = get_image_index_str(idx)
label_filename = label_folder / (image_idx + '.txt')
annos.append(get_label_anno(label_filename))
return annos
def area(boxes, add1=False):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
if add1:
return (boxes[:, 2] - boxes[:, 0] + 1.0) * (
boxes[:, 3] - boxes[:, 1] + 1.0)
else:
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def intersection(boxes1, boxes2, add1=False):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
if add1:
all_pairs_min_ymax += 1.0
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
if add1:
all_pairs_min_xmax += 1.0
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxes1, boxes2, add1=False):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2, add1)
area1 = area(boxes1, add1)
area2 = area(boxes2, add1)
union = np.expand_dims(
area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
| 15,309
| 36.070218
| 79
|
py
|
SASA
|
SASA-main/pcdet/datasets/kitti/kitti_object_eval_python/eval.py
|
import io as sysio
import numba
import numpy as np
from .rotate_iou import rotate_iou_gpu_eval
@numba.jit
def get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41):
scores.sort()
scores = scores[::-1]
current_recall = 0
thresholds = []
for i, score in enumerate(scores):
l_recall = (i + 1) / num_gt
if i < (len(scores) - 1):
r_recall = (i + 2) / num_gt
else:
r_recall = l_recall
if (((r_recall - current_recall) < (current_recall - l_recall))
and (i < (len(scores) - 1))):
continue
# recall = l_recall
thresholds.append(score)
current_recall += 1 / (num_sample_pts - 1.0)
return thresholds
def clean_data(gt_anno, dt_anno, current_class, difficulty):
CLASS_NAMES = ['car', 'pedestrian', 'cyclist', 'van', 'person_sitting', 'truck']
MIN_HEIGHT = [40, 25, 25]
MAX_OCCLUSION = [0, 1, 2]
MAX_TRUNCATION = [0.15, 0.3, 0.5]
dc_bboxes, ignored_gt, ignored_dt = [], [], []
current_cls_name = CLASS_NAMES[current_class].lower()
num_gt = len(gt_anno["name"])
num_dt = len(dt_anno["name"])
num_valid_gt = 0
for i in range(num_gt):
bbox = gt_anno["bbox"][i]
gt_name = gt_anno["name"][i].lower()
height = bbox[3] - bbox[1]
valid_class = -1
if (gt_name == current_cls_name):
valid_class = 1
elif (current_cls_name == "Pedestrian".lower()
and "Person_sitting".lower() == gt_name):
valid_class = 0
elif (current_cls_name == "Car".lower() and "Van".lower() == gt_name):
valid_class = 0
else:
valid_class = -1
ignore = False
if ((gt_anno["occluded"][i] > MAX_OCCLUSION[difficulty])
or (gt_anno["truncated"][i] > MAX_TRUNCATION[difficulty])
or (height <= MIN_HEIGHT[difficulty])):
# if gt_anno["difficulty"][i] > difficulty or gt_anno["difficulty"][i] == -1:
ignore = True
if valid_class == 1 and not ignore:
ignored_gt.append(0)
num_valid_gt += 1
elif (valid_class == 0 or (ignore and (valid_class == 1))):
ignored_gt.append(1)
else:
ignored_gt.append(-1)
# for i in range(num_gt):
if gt_anno["name"][i] == "DontCare":
dc_bboxes.append(gt_anno["bbox"][i])
for i in range(num_dt):
if (dt_anno["name"][i].lower() == current_cls_name):
valid_class = 1
else:
valid_class = -1
height = abs(dt_anno["bbox"][i, 3] - dt_anno["bbox"][i, 1])
if height < MIN_HEIGHT[difficulty]:
ignored_dt.append(1)
elif valid_class == 1:
ignored_dt.append(0)
else:
ignored_dt.append(-1)
return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes
@numba.jit(nopython=True)
def image_box_overlap(boxes, query_boxes, criterion=-1):
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) *
(query_boxes[k, 3] - query_boxes[k, 1]))
for n in range(N):
iw = (min(boxes[n, 2], query_boxes[k, 2]) -
max(boxes[n, 0], query_boxes[k, 0]))
if iw > 0:
ih = (min(boxes[n, 3], query_boxes[k, 3]) -
max(boxes[n, 1], query_boxes[k, 1]))
if ih > 0:
if criterion == -1:
ua = (
(boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]) + qbox_area - iw * ih)
elif criterion == 0:
ua = ((boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]))
elif criterion == 1:
ua = qbox_area
else:
ua = 1.0
overlaps[n, k] = iw * ih / ua
return overlaps
def bev_box_overlap(boxes, qboxes, criterion=-1):
riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)
return riou
@numba.jit(nopython=True, parallel=True)
def d3_box_overlap_kernel(boxes, qboxes, rinc, criterion=-1):
# ONLY support overlap in CAMERA, not lider.
N, K = boxes.shape[0], qboxes.shape[0]
for i in range(N):
for j in range(K):
if rinc[i, j] > 0:
# iw = (min(boxes[i, 1] + boxes[i, 4], qboxes[j, 1] +
# qboxes[j, 4]) - max(boxes[i, 1], qboxes[j, 1]))
iw = (min(boxes[i, 1], qboxes[j, 1]) - max(
boxes[i, 1] - boxes[i, 4], qboxes[j, 1] - qboxes[j, 4]))
if iw > 0:
area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]
area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]
inc = iw * rinc[i, j]
if criterion == -1:
ua = (area1 + area2 - inc)
elif criterion == 0:
ua = area1
elif criterion == 1:
ua = area2
else:
ua = inc
rinc[i, j] = inc / ua
else:
rinc[i, j] = 0.0
def d3_box_overlap(boxes, qboxes, criterion=-1):
rinc = rotate_iou_gpu_eval(boxes[:, [0, 2, 3, 5, 6]],
qboxes[:, [0, 2, 3, 5, 6]], 2)
d3_box_overlap_kernel(boxes, qboxes, rinc, criterion)
return rinc
@numba.jit(nopython=True)
def compute_statistics_jit(overlaps,
gt_datas,
dt_datas,
ignored_gt,
ignored_det,
dc_bboxes,
metric,
min_overlap,
thresh=0,
compute_fp=False,
compute_aos=False):
det_size = dt_datas.shape[0]
gt_size = gt_datas.shape[0]
dt_scores = dt_datas[:, -1]
dt_alphas = dt_datas[:, 4]
gt_alphas = gt_datas[:, 4]
dt_bboxes = dt_datas[:, :4]
gt_bboxes = gt_datas[:, :4]
assigned_detection = [False] * det_size
ignored_threshold = [False] * det_size
if compute_fp:
for i in range(det_size):
if (dt_scores[i] < thresh):
ignored_threshold[i] = True
NO_DETECTION = -10000000
tp, fp, fn, similarity = 0, 0, 0, 0
# thresholds = [0.0]
# delta = [0.0]
thresholds = np.zeros((gt_size, ))
thresh_idx = 0
delta = np.zeros((gt_size, ))
delta_idx = 0
for i in range(gt_size):
if ignored_gt[i] == -1:
continue
det_idx = -1
valid_detection = NO_DETECTION
max_overlap = 0
assigned_ignored_det = False
for j in range(det_size):
if (ignored_det[j] == -1):
continue
if (assigned_detection[j]):
continue
if (ignored_threshold[j]):
continue
overlap = overlaps[j, i]
dt_score = dt_scores[j]
if (not compute_fp and (overlap > min_overlap)
and dt_score > valid_detection):
det_idx = j
valid_detection = dt_score
elif (compute_fp and (overlap > min_overlap)
and (overlap > max_overlap or assigned_ignored_det)
and ignored_det[j] == 0):
max_overlap = overlap
det_idx = j
valid_detection = 1
assigned_ignored_det = False
elif (compute_fp and (overlap > min_overlap)
and (valid_detection == NO_DETECTION)
and ignored_det[j] == 1):
det_idx = j
valid_detection = 1
assigned_ignored_det = True
if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0:
fn += 1
elif ((valid_detection != NO_DETECTION)
and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)):
assigned_detection[det_idx] = True
elif valid_detection != NO_DETECTION:
tp += 1
# thresholds.append(dt_scores[det_idx])
thresholds[thresh_idx] = dt_scores[det_idx]
thresh_idx += 1
if compute_aos:
# delta.append(gt_alphas[i] - dt_alphas[det_idx])
delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx]
delta_idx += 1
assigned_detection[det_idx] = True
if compute_fp:
for i in range(det_size):
if (not (assigned_detection[i] or ignored_det[i] == -1
or ignored_det[i] == 1 or ignored_threshold[i])):
fp += 1
nstuff = 0
if metric == 0:
overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0)
for i in range(dc_bboxes.shape[0]):
for j in range(det_size):
if (assigned_detection[j]):
continue
if (ignored_det[j] == -1 or ignored_det[j] == 1):
continue
if (ignored_threshold[j]):
continue
if overlaps_dt_dc[j, i] > min_overlap:
assigned_detection[j] = True
nstuff += 1
fp -= nstuff
if compute_aos:
tmp = np.zeros((fp + delta_idx, ))
# tmp = [0] * fp
for i in range(delta_idx):
tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0
# tmp.append((1.0 + np.cos(delta[i])) / 2.0)
# assert len(tmp) == fp + tp
# assert len(delta) == tp
if tp > 0 or fp > 0:
similarity = np.sum(tmp)
else:
similarity = -1
return tp, fp, fn, similarity, thresholds[:thresh_idx]
def get_split_parts(num, num_part):
same_part = num // num_part
remain_num = num % num_part
if same_part == 0:
return [num]
if remain_num == 0:
return [same_part] * num_part
else:
return [same_part] * num_part + [remain_num]
@numba.jit(nopython=True)
def fused_compute_statistics(overlaps,
pr,
gt_nums,
dt_nums,
dc_nums,
gt_datas,
dt_datas,
dontcares,
ignored_gts,
ignored_dets,
metric,
min_overlap,
thresholds,
compute_aos=False):
gt_num = 0
dt_num = 0
dc_num = 0
for i in range(gt_nums.shape[0]):
for t, thresh in enumerate(thresholds):
overlap = overlaps[dt_num:dt_num + dt_nums[i], gt_num:
gt_num + gt_nums[i]]
gt_data = gt_datas[gt_num:gt_num + gt_nums[i]]
dt_data = dt_datas[dt_num:dt_num + dt_nums[i]]
ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]]
ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]]
dontcare = dontcares[dc_num:dc_num + dc_nums[i]]
tp, fp, fn, similarity, _ = compute_statistics_jit(
overlap,
gt_data,
dt_data,
ignored_gt,
ignored_det,
dontcare,
metric,
min_overlap=min_overlap,
thresh=thresh,
compute_fp=True,
compute_aos=compute_aos)
pr[t, 0] += tp
pr[t, 1] += fp
pr[t, 2] += fn
if similarity != -1:
pr[t, 3] += similarity
gt_num += gt_nums[i]
dt_num += dt_nums[i]
dc_num += dc_nums[i]
def calculate_iou_partly(gt_annos, dt_annos, metric, num_parts=50):
"""fast iou algorithm. this function can be used independently to
do result analysis. Must be used in CAMERA coordinate system.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
metric: eval type. 0: bbox, 1: bev, 2: 3d
num_parts: int. a parameter for fast calculate algorithm
"""
assert len(gt_annos) == len(dt_annos)
total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0)
total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
parted_overlaps = []
example_idx = 0
for num_part in split_parts:
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
if metric == 0:
gt_boxes = np.concatenate([a["bbox"] for a in gt_annos_part], 0)
dt_boxes = np.concatenate([a["bbox"] for a in dt_annos_part], 0)
overlap_part = image_box_overlap(gt_boxes, dt_boxes)
elif metric == 1:
loc = np.concatenate(
[a["location"][:, [0, 2]] for a in gt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, [0, 2]] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
loc = np.concatenate(
[a["location"][:, [0, 2]] for a in dt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, [0, 2]] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
overlap_part = bev_box_overlap(gt_boxes, dt_boxes).astype(
np.float64)
elif metric == 2:
loc = np.concatenate([a["location"] for a in gt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
loc = np.concatenate([a["location"] for a in dt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
overlap_part = d3_box_overlap(gt_boxes, dt_boxes).astype(
np.float64)
else:
raise ValueError("unknown metric")
parted_overlaps.append(overlap_part)
example_idx += num_part
overlaps = []
example_idx = 0
for j, num_part in enumerate(split_parts):
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
gt_num_idx, dt_num_idx = 0, 0
for i in range(num_part):
gt_box_num = total_gt_num[example_idx + i]
dt_box_num = total_dt_num[example_idx + i]
overlaps.append(
parted_overlaps[j][gt_num_idx:gt_num_idx + gt_box_num,
dt_num_idx:dt_num_idx + dt_box_num])
gt_num_idx += gt_box_num
dt_num_idx += dt_box_num
example_idx += num_part
return overlaps, parted_overlaps, total_gt_num, total_dt_num
def _prepare_data(gt_annos, dt_annos, current_class, difficulty):
gt_datas_list = []
dt_datas_list = []
total_dc_num = []
ignored_gts, ignored_dets, dontcares = [], [], []
total_num_valid_gt = 0
for i in range(len(gt_annos)):
rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty)
num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets
ignored_gts.append(np.array(ignored_gt, dtype=np.int64))
ignored_dets.append(np.array(ignored_det, dtype=np.int64))
if len(dc_bboxes) == 0:
dc_bboxes = np.zeros((0, 4)).astype(np.float64)
else:
dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64)
total_dc_num.append(dc_bboxes.shape[0])
dontcares.append(dc_bboxes)
total_num_valid_gt += num_valid_gt
gt_datas = np.concatenate(
[gt_annos[i]["bbox"], gt_annos[i]["alpha"][..., np.newaxis]], 1)
dt_datas = np.concatenate([
dt_annos[i]["bbox"], dt_annos[i]["alpha"][..., np.newaxis],
dt_annos[i]["score"][..., np.newaxis]
], 1)
gt_datas_list.append(gt_datas)
dt_datas_list.append(dt_datas)
total_dc_num = np.stack(total_dc_num, axis=0)
return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares,
total_dc_num, total_num_valid_gt)
def eval_class(gt_annos,
dt_annos,
current_classes,
difficultys,
metric,
min_overlaps,
compute_aos=False,
num_parts=100):
"""Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
current_classes: list of int, 0: car, 1: pedestrian, 2: cyclist
difficultys: list of int. eval difficulty, 0: easy, 1: normal, 2: hard
metric: eval type. 0: bbox, 1: bev, 2: 3d
min_overlaps: float, min overlap. format: [num_overlap, metric, class].
num_parts: int. a parameter for fast calculate algorithm
Returns:
dict of recall, precision and aos
"""
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(dt_annos, gt_annos, metric, num_parts)
overlaps, parted_overlaps, total_dt_num, total_gt_num = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for m, current_class in enumerate(current_classes):
for l, difficulty in enumerate(difficultys):
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets,
dontcares, total_dc_num, total_num_valid_gt) = rets
for k, min_overlap in enumerate(min_overlaps[:, metric, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(
overlaps[i],
gt_datas_list[i],
dt_datas_list[i],
ignored_gts[i],
ignored_dets[i],
dontcares[i],
metric,
min_overlap=min_overlap,
thresh=0.0,
compute_fp=False)
tp, fp, fn, similarity, thresholds = rets
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
pr = np.zeros([len(thresholds), 4])
idx = 0
for j, num_part in enumerate(split_parts):
gt_datas_part = np.concatenate(
gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(
dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(
dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(
ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(
ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(
parted_overlaps[j],
pr,
total_gt_num[idx:idx + num_part],
total_dt_num[idx:idx + num_part],
total_dc_num[idx:idx + num_part],
gt_datas_part,
dt_datas_part,
dc_datas_part,
ignored_gts_part,
ignored_dets_part,
metric,
min_overlap=min_overlap,
thresholds=thresholds,
compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(
precision[m, l, k, i:], axis=-1)
recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
ret_dict = {
"recall": recall,
"precision": precision,
"orientation": aos,
}
return ret_dict
def get_mAP(prec):
sums = 0
for i in range(0, prec.shape[-1], 4):
sums = sums + prec[..., i]
return sums / 11 * 100
def get_mAP_R40(prec):
sums = 0
for i in range(1, prec.shape[-1]):
sums = sums + prec[..., i]
return sums / 40 * 100
def print_str(value, *arg, sstream=None):
if sstream is None:
sstream = sysio.StringIO()
sstream.truncate(0)
sstream.seek(0)
print(value, *arg, file=sstream)
return sstream.getvalue()
def do_eval(gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos=False,
PR_detail_dict=None):
# min_overlaps: [num_minoverlap, metric, num_class]
difficultys = [0, 1, 2]
N_SAMPLE_PTS = 41
mAP_recalls = np.linspace(0.0, 1.0, num=N_SAMPLE_PTS, endpoint=True)
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 0,
min_overlaps, compute_aos)
# ret: [num_class, num_diff, num_minoverlap, num_sample_points]
mAP_bbox_precisions = ret["precision"]
mAP_bbox = get_mAP(ret["precision"])
mAP_bbox_R40 = get_mAP_R40(ret["precision"])
if PR_detail_dict is not None:
PR_detail_dict['bbox'] = ret['precision']
mAP_aos_precisions = mAP_aos = mAP_aos_R40 = None
if compute_aos:
mAP_aos_precisions = ret["orientation"]
mAP_aos = get_mAP(ret["orientation"])
mAP_aos_R40 = get_mAP_R40(ret["orientation"])
if PR_detail_dict is not None:
PR_detail_dict['aos'] = ret['orientation']
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 1,
min_overlaps)
mAP_bev_precisions = ret["precision"]
mAP_bev = get_mAP(ret["precision"])
mAP_bev_R40 = get_mAP_R40(ret["precision"])
if PR_detail_dict is not None:
PR_detail_dict['bev'] = ret['precision']
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 2,
min_overlaps)
mAP_3d_precisions = ret["precision"]
mAP_3d = get_mAP(ret["precision"])
mAP_3d_R40 = get_mAP_R40(ret["precision"])
if PR_detail_dict is not None:
PR_detail_dict['3d'] = ret['precision']
return mAP_recalls, mAP_bbox_precisions, mAP_bev_precisions, mAP_3d_precisions, mAP_aos_precisions, \
mAP_bbox, mAP_bev, mAP_3d, mAP_aos, mAP_bbox_R40, mAP_bev_R40, mAP_3d_R40, mAP_aos_R40
def do_coco_style_eval(gt_annos, dt_annos, current_classes, overlap_ranges,
compute_aos):
# overlap_ranges: [range, metric, num_class]
min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]])
for i in range(overlap_ranges.shape[1]):
for j in range(overlap_ranges.shape[2]):
min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j])
mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval(
gt_annos, dt_annos, current_classes, min_overlaps, compute_aos)
# ret: [num_class, num_diff, num_minoverlap]
mAP_bbox = mAP_bbox.mean(-1)
mAP_bev = mAP_bev.mean(-1)
mAP_3d = mAP_3d.mean(-1)
if mAP_aos is not None:
mAP_aos = mAP_aos.mean(-1)
return mAP_bbox, mAP_bev, mAP_3d, mAP_aos
def get_official_eval_result(gt_annos, dt_annos, current_classes, PR_detail_dict=None):
overlap_0_7 = np.array([[0.7, 0.5, 0.5, 0.7,
0.5, 0.7], [0.7, 0.5, 0.5, 0.7, 0.5, 0.7],
[0.7, 0.5, 0.5, 0.7, 0.5, 0.7]])
overlap_0_5 = np.array([[0.7, 0.5, 0.5, 0.7,
0.5, 0.5], [0.5, 0.25, 0.25, 0.5, 0.25, 0.5],
[0.5, 0.25, 0.25, 0.5, 0.25, 0.5]])
min_overlaps = np.stack([overlap_0_7, overlap_0_5], axis=0) # [2, 3, 5]
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
5: 'Truck'
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
min_overlaps = min_overlaps[:, :, current_classes]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
mAP_recalls, mAPbbox_precisions, mAPbev_precisions, mAP3d_precisions, mAPaos_precisions, \
mAPbbox, mAPbev, mAP3d, mAPaos, mAPbbox_R40, mAPbev_R40, mAP3d_R40, mAPaos_R40 = do_eval(
gt_annos, dt_annos, current_classes, min_overlaps, compute_aos, PR_detail_dict=PR_detail_dict)
ret_dict = {}
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
for i in range(min_overlaps.shape[0]):
result += print_str(
(f"{class_to_name[curcls]} "
"AP@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j])))
result += print_str((f"bbox AP:{mAPbbox[j, 0, i]:.4f}, "
f"{mAPbbox[j, 1, i]:.4f}, "
f"{mAPbbox[j, 2, i]:.4f}"))
result += print_str((f"bev AP:{mAPbev[j, 0, i]:.4f}, "
f"{mAPbev[j, 1, i]:.4f}, "
f"{mAPbev[j, 2, i]:.4f}"))
result += print_str((f"3d AP:{mAP3d[j, 0, i]:.4f}, "
f"{mAP3d[j, 1, i]:.4f}, "
f"{mAP3d[j, 2, i]:.4f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos[j, 0, i]:.2f}, "
f"{mAPaos[j, 1, i]:.2f}, "
f"{mAPaos[j, 2, i]:.2f}"))
# if i == 0:
# ret_dict['%s_aos/easy' % class_to_name[curcls]] = mAPaos[j, 0, 0]
# ret_dict['%s_aos/moderate' % class_to_name[curcls]] = mAPaos[j, 1, 0]
# ret_dict['%s_aos/hard' % class_to_name[curcls]] = mAPaos[j, 2, 0]
result += print_str(
(f"{class_to_name[curcls]} "
"AP_R40@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j])))
result += print_str((f"bbox AP:{mAPbbox_R40[j, 0, i]:.4f}, "
f"{mAPbbox_R40[j, 1, i]:.4f}, "
f"{mAPbbox_R40[j, 2, i]:.4f}"))
result += print_str((f"bev AP:{mAPbev_R40[j, 0, i]:.4f}, "
f"{mAPbev_R40[j, 1, i]:.4f}, "
f"{mAPbev_R40[j, 2, i]:.4f}"))
result += print_str((f"3d AP:{mAP3d_R40[j, 0, i]:.4f}, "
f"{mAP3d_R40[j, 1, i]:.4f}, "
f"{mAP3d_R40[j, 2, i]:.4f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos_R40[j, 0, i]:.2f}, "
f"{mAPaos_R40[j, 1, i]:.2f}, "
f"{mAPaos_R40[j, 2, i]:.2f}"))
if i == 0:
ret_dict['%s_aos/mAP_curve_R40' % class_to_name[curcls]] = {
'precisions': {
'Easy': mAPaos_precisions[j, 0, 0, :],
'Moderate': mAPaos_precisions[j, 1, 0, :],
'Hard': mAPaos_precisions[j, 2, 0, :]
},
'recalls': mAP_recalls
}
ret_dict['%s_aos/easy_R40' % class_to_name[curcls]] = mAPaos_R40[j, 0, 0]
ret_dict['%s_aos/moderate_R40' % class_to_name[curcls]] = mAPaos_R40[j, 1, 0]
ret_dict['%s_aos/hard_R40' % class_to_name[curcls]] = mAPaos_R40[j, 2, 0]
if i == 0:
# ret_dict['%s_3d/easy' % class_to_name[curcls]] = mAP3d[j, 0, 0]
# ret_dict['%s_3d/moderate' % class_to_name[curcls]] = mAP3d[j, 1, 0]
# ret_dict['%s_3d/hard' % class_to_name[curcls]] = mAP3d[j, 2, 0]
# ret_dict['%s_bev/easy' % class_to_name[curcls]] = mAPbev[j, 0, 0]
# ret_dict['%s_bev/moderate' % class_to_name[curcls]] = mAPbev[j, 1, 0]
# ret_dict['%s_bev/hard' % class_to_name[curcls]] = mAPbev[j, 2, 0]
# ret_dict['%s_image/easy' % class_to_name[curcls]] = mAPbbox[j, 0, 0]
# ret_dict['%s_image/moderate' % class_to_name[curcls]] = mAPbbox[j, 1, 0]
# ret_dict['%s_image/hard' % class_to_name[curcls]] = mAPbbox[j, 2, 0]
ret_dict['%s_3d/mAP_curve_R40' % class_to_name[curcls]] = {
'precisions': {
'Easy': mAP3d_precisions[j, 0, 0, :],
'Moderate': mAP3d_precisions[j, 1, 0, :],
'Hard': mAP3d_precisions[j, 2, 0, :]
},
'recalls': mAP_recalls
}
ret_dict['%s_bev/mAP_curve_R40' % class_to_name[curcls]] = {
'precisions': {
'Easy': mAPbev_precisions[j, 0, 0, :],
'Moderate': mAPbev_precisions[j, 1, 0, :],
'Hard': mAPbev_precisions[j, 2, 0, :]
},
'recalls': mAP_recalls
}
ret_dict['%s_image/mAP_curve_R40' % class_to_name[curcls]] = {
'precisions': {
'Easy': mAPbbox_precisions[j, 0, 0, :],
'Moderate': mAPbbox_precisions[j, 1, 0, :],
'Hard': mAPbbox_precisions[j, 2, 0, :]
},
'recalls': mAP_recalls
}
ret_dict['%s_3d/easy_R40' % class_to_name[curcls]] = mAP3d_R40[j, 0, 0]
ret_dict['%s_3d/moderate_R40' % class_to_name[curcls]] = mAP3d_R40[j, 1, 0]
ret_dict['%s_3d/hard_R40' % class_to_name[curcls]] = mAP3d_R40[j, 2, 0]
ret_dict['%s_bev/easy_R40' % class_to_name[curcls]] = mAPbev_R40[j, 0, 0]
ret_dict['%s_bev/moderate_R40' % class_to_name[curcls]] = mAPbev_R40[j, 1, 0]
ret_dict['%s_bev/hard_R40' % class_to_name[curcls]] = mAPbev_R40[j, 2, 0]
ret_dict['%s_image/easy_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 0, 0]
ret_dict['%s_image/moderate_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 1, 0]
ret_dict['%s_image/hard_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 2, 0]
return result, ret_dict
def get_coco_eval_result(gt_annos, dt_annos, current_classes):
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
}
class_to_range = {
0: [0.5, 0.95, 10],
1: [0.25, 0.7, 10],
2: [0.25, 0.7, 10],
3: [0.5, 0.95, 10],
4: [0.25, 0.7, 10],
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
overlap_ranges = np.zeros([3, 3, len(current_classes)])
for i, curcls in enumerate(current_classes):
overlap_ranges[:, :, i] = np.array(
class_to_range[curcls])[:, np.newaxis]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
mAPbbox, mAPbev, mAP3d, mAPaos = do_coco_style_eval(
gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos)
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
o_range = np.array(class_to_range[curcls])[[0, 2, 1]]
o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1)
result += print_str((f"{class_to_name[curcls]} "
"coco AP@{:.2f}:{:.2f}:{:.2f}:".format(*o_range)))
result += print_str((f"bbox AP:{mAPbbox[j, 0]:.2f}, "
f"{mAPbbox[j, 1]:.2f}, "
f"{mAPbbox[j, 2]:.2f}"))
result += print_str((f"bev AP:{mAPbev[j, 0]:.2f}, "
f"{mAPbev[j, 1]:.2f}, "
f"{mAPbev[j, 2]:.2f}"))
result += print_str((f"3d AP:{mAP3d[j, 0]:.2f}, "
f"{mAP3d[j, 1]:.2f}, "
f"{mAP3d[j, 2]:.2f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos[j, 0]:.2f}, "
f"{mAPaos[j, 1]:.2f}, "
f"{mAPaos[j, 2]:.2f}"))
return result
| 35,743
| 41.051765
| 105
|
py
|
SASA
|
SASA-main/pcdet/utils/box_utils.py
|
import numpy as np
import scipy
import torch
import copy
from scipy.spatial import Delaunay
from ..ops.roiaware_pool3d import roiaware_pool3d_utils
from . import common_utils
def in_hull(p, hull):
"""
:param p: (N, K) test points
:param hull: (M, K) M corners of a box
:return (N) bool
"""
try:
if not isinstance(hull, Delaunay):
hull = Delaunay(hull)
flag = hull.find_simplex(p) >= 0
except scipy.spatial.qhull.QhullError:
print('Warning: not a hull %s' % str(hull))
flag = np.zeros(p.shape[0], dtype=np.bool)
return flag
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = common_utils.rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def mask_boxes_outside_range_numpy(boxes, limit_range, min_num_corners=1):
"""
Args:
boxes: (N, 7) [x, y, z, dx, dy, dz, heading, ...], (x, y, z) is the box center
limit_range: [minx, miny, minz, maxx, maxy, maxz]
min_num_corners:
Returns:
"""
if boxes.shape[1] > 7:
boxes = boxes[:, 0:7]
corners = boxes_to_corners_3d(boxes) # (N, 8, 3)
mask = ((corners >= limit_range[0:3]) & (corners <= limit_range[3:6])).all(axis=2)
mask = mask.sum(axis=1) >= min_num_corners # (N)
return mask
def remove_points_in_boxes3d(points, boxes3d):
"""
Args:
points: (num_points, 3 + C)
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
points, is_numpy = common_utils.check_numpy_to_torch(points)
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3], boxes3d)
points = points[point_masks.sum(dim=0) == 0]
return points.numpy() if is_numpy else points
def boxes3d_kitti_camera_to_lidar(boxes3d_camera, calib):
"""
Args:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
calib:
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
boxes3d_camera_copy = copy.deepcopy(boxes3d_camera)
xyz_camera, r = boxes3d_camera_copy[:, 0:3], boxes3d_camera_copy[:, 6:7]
l, h, w = boxes3d_camera_copy[:, 3:4], boxes3d_camera_copy[:, 4:5], boxes3d_camera_copy[:, 5:6]
xyz_lidar = calib.rect_to_lidar(xyz_camera)
xyz_lidar[:, 2] += h[:, 0] / 2
return np.concatenate([xyz_lidar, l, w, h, -(r + np.pi / 2)], axis=-1)
def boxes3d_kitti_fakelidar_to_lidar(boxes3d_lidar):
"""
Args:
boxes3d_fakelidar: (N, 7) [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
boxes3d_lidar_copy = copy.deepcopy(boxes3d_lidar)
w, l, h = boxes3d_lidar_copy[:, 3:4], boxes3d_lidar_copy[:, 4:5], boxes3d_lidar_copy[:, 5:6]
r = boxes3d_lidar_copy[:, 6:7]
boxes3d_lidar_copy[:, 2] += h[:, 0] / 2
return np.concatenate([boxes3d_lidar_copy[:, 0:3], l, w, h, -(r + np.pi / 2)], axis=-1)
def boxes3d_kitti_lidar_to_fakelidar(boxes3d_lidar):
"""
Args:
boxes3d_lidar: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
boxes3d_fakelidar: [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center
"""
boxes3d_lidar_copy = copy.deepcopy(boxes3d_lidar)
dx, dy, dz = boxes3d_lidar_copy[:, 3:4], boxes3d_lidar_copy[:, 4:5], boxes3d_lidar_copy[:, 5:6]
heading = boxes3d_lidar_copy[:, 6:7]
boxes3d_lidar_copy[:, 2] -= dz[:, 0] / 2
return np.concatenate([boxes3d_lidar_copy[:, 0:3], dy, dx, dz, -heading - np.pi / 2], axis=-1)
def enlarge_box3d(boxes3d, extra_width=(0, 0, 0)):
"""
Args:
boxes3d: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
extra_width: [extra_x, extra_y, extra_z]
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
large_boxes3d = boxes3d.clone()
large_boxes3d[:, 3:6] += boxes3d.new_tensor(extra_width)[None, :]
return large_boxes3d
def boxes3d_lidar_to_kitti_camera(boxes3d_lidar, calib):
"""
:param boxes3d_lidar: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
:param calib:
:return:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
"""
boxes3d_lidar_copy = copy.deepcopy(boxes3d_lidar)
xyz_lidar = boxes3d_lidar_copy[:, 0:3]
l, w, h = boxes3d_lidar_copy[:, 3:4], boxes3d_lidar_copy[:, 4:5], boxes3d_lidar_copy[:, 5:6]
r = boxes3d_lidar_copy[:, 6:7]
xyz_lidar[:, 2] -= h.reshape(-1) / 2
xyz_cam = calib.lidar_to_rect(xyz_lidar)
# xyz_cam[:, 1] += h.reshape(-1) / 2
r = -r - np.pi / 2
return np.concatenate([xyz_cam, l, h, w, r], axis=-1)
def boxes3d_to_corners3d_kitti_camera(boxes3d, bottom_center=True):
"""
:param boxes3d: (N, 7) [x, y, z, l, h, w, ry] in camera coords, see the definition of ry in KITTI dataset
:param bottom_center: whether y is on the bottom center of object
:return: corners3d: (N, 8, 3)
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
"""
boxes_num = boxes3d.shape[0]
l, h, w = boxes3d[:, 3], boxes3d[:, 4], boxes3d[:, 5]
x_corners = np.array([l / 2., l / 2., -l / 2., -l / 2., l / 2., l / 2., -l / 2., -l / 2], dtype=np.float32).T
z_corners = np.array([w / 2., -w / 2., -w / 2., w / 2., w / 2., -w / 2., -w / 2., w / 2.], dtype=np.float32).T
if bottom_center:
y_corners = np.zeros((boxes_num, 8), dtype=np.float32)
y_corners[:, 4:8] = -h.reshape(boxes_num, 1).repeat(4, axis=1) # (N, 8)
else:
y_corners = np.array([h / 2., h / 2., h / 2., h / 2., -h / 2., -h / 2., -h / 2., -h / 2.], dtype=np.float32).T
ry = boxes3d[:, 6]
zeros, ones = np.zeros(ry.size, dtype=np.float32), np.ones(ry.size, dtype=np.float32)
rot_list = np.array([[np.cos(ry), zeros, -np.sin(ry)],
[zeros, ones, zeros],
[np.sin(ry), zeros, np.cos(ry)]]) # (3, 3, N)
R_list = np.transpose(rot_list, (2, 0, 1)) # (N, 3, 3)
temp_corners = np.concatenate((x_corners.reshape(-1, 8, 1), y_corners.reshape(-1, 8, 1),
z_corners.reshape(-1, 8, 1)), axis=2) # (N, 8, 3)
rotated_corners = np.matmul(temp_corners, R_list) # (N, 8, 3)
x_corners, y_corners, z_corners = rotated_corners[:, :, 0], rotated_corners[:, :, 1], rotated_corners[:, :, 2]
x_loc, y_loc, z_loc = boxes3d[:, 0], boxes3d[:, 1], boxes3d[:, 2]
x = x_loc.reshape(-1, 1) + x_corners.reshape(-1, 8)
y = y_loc.reshape(-1, 1) + y_corners.reshape(-1, 8)
z = z_loc.reshape(-1, 1) + z_corners.reshape(-1, 8)
corners = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1), z.reshape(-1, 8, 1)), axis=2)
return corners.astype(np.float32)
def boxes3d_kitti_camera_to_imageboxes(boxes3d, calib, image_shape=None):
"""
:param boxes3d: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
:param calib:
:return:
box_2d_preds: (N, 4) [x1, y1, x2, y2]
"""
corners3d = boxes3d_to_corners3d_kitti_camera(boxes3d)
pts_img, _ = calib.rect_to_img(corners3d.reshape(-1, 3))
corners_in_image = pts_img.reshape(-1, 8, 2)
min_uv = np.min(corners_in_image, axis=1) # (N, 2)
max_uv = np.max(corners_in_image, axis=1) # (N, 2)
boxes2d_image = np.concatenate([min_uv, max_uv], axis=1)
if image_shape is not None:
boxes2d_image[:, 0] = np.clip(boxes2d_image[:, 0], a_min=0, a_max=image_shape[1] - 1)
boxes2d_image[:, 1] = np.clip(boxes2d_image[:, 1], a_min=0, a_max=image_shape[0] - 1)
boxes2d_image[:, 2] = np.clip(boxes2d_image[:, 2], a_min=0, a_max=image_shape[1] - 1)
boxes2d_image[:, 3] = np.clip(boxes2d_image[:, 3], a_min=0, a_max=image_shape[0] - 1)
return boxes2d_image
def boxes_iou_normal(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 4) [x1, y1, x2, y2]
boxes_b: (M, 4) [x1, y1, x2, y2]
Returns:
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 4
x_min = torch.max(boxes_a[:, 0, None], boxes_b[None, :, 0])
x_max = torch.min(boxes_a[:, 2, None], boxes_b[None, :, 2])
y_min = torch.max(boxes_a[:, 1, None], boxes_b[None, :, 1])
y_max = torch.min(boxes_a[:, 3, None], boxes_b[None, :, 3])
x_len = torch.clamp_min(x_max - x_min, min=0)
y_len = torch.clamp_min(y_max - y_min, min=0)
area_a = (boxes_a[:, 2] - boxes_a[:, 0]) * (boxes_a[:, 3] - boxes_a[:, 1])
area_b = (boxes_b[:, 2] - boxes_b[:, 0]) * (boxes_b[:, 3] - boxes_b[:, 1])
a_intersect_b = x_len * y_len
iou = a_intersect_b / torch.clamp_min(area_a[:, None] + area_b[None, :] - a_intersect_b, min=1e-6)
return iou
def boxes3d_lidar_to_aligned_bev_boxes(boxes3d):
"""
Args:
boxes3d: (N, 7 + C) [x, y, z, dx, dy, dz, heading] in lidar coordinate
Returns:
aligned_bev_boxes: (N, 4) [x1, y1, x2, y2] in the above lidar coordinate
"""
rot_angle = common_utils.limit_period(boxes3d[:, 6], offset=0.5, period=np.pi).abs()
choose_dims = torch.where(rot_angle[:, None] < np.pi / 4, boxes3d[:, [3, 4]], boxes3d[:, [4, 3]])
aligned_bev_boxes = torch.cat((boxes3d[:, 0:2] - choose_dims / 2, boxes3d[:, 0:2] + choose_dims / 2), dim=1)
return aligned_bev_boxes
def boxes3d_nearest_bev_iou(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
"""
boxes_bev_a = boxes3d_lidar_to_aligned_bev_boxes(boxes_a)
boxes_bev_b = boxes3d_lidar_to_aligned_bev_boxes(boxes_b)
return boxes_iou_normal(boxes_bev_a, boxes_bev_b)
| 10,568
| 34.466443
| 118
|
py
|
SASA
|
SASA-main/pcdet/utils/loss_utils.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import box_utils
from ..ops.roiaware_pool3d import roiaware_pool3d_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(SigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
self.code_weights = code_weights
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
self.code_weights = code_weights
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedBinaryCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedBinaryCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
loss = F.binary_cross_entropy_with_logits(input, target, reduction='none').mean(dim=-1) * weights
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
class PointSASALoss(nn.Module):
"""
Layer-wise point segmentation loss, used for SASA.
"""
def __init__(self,
func: str = 'BCE',
layer_weights: list = None,
extra_width: list = None,
set_ignore_flag: bool = False):
super(PointSASALoss, self).__init__()
self.layer_weights = layer_weights
if func == 'BCE':
self.loss_func = WeightedBinaryCrossEntropyLoss()
elif func == 'Focal':
self.loss_func = SigmoidFocalClassificationLoss()
else:
raise NotImplementedError
assert not set_ignore_flag or (set_ignore_flag and extra_width is not None)
self.extra_width = extra_width
self.set_ignore_flag = set_ignore_flag
def assign_target(self, points, gt_boxes):
"""
Args:
points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]
gt_boxes: (B, M, 8)
Returns:
point_cls_labels: (N1 + N2 + N3 + ...)
"""
assert len(points.shape) == 2 and points.shape[1] == 4, \
'points.shape=%s' % str(points.shape)
assert len(gt_boxes.shape) == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)
batch_size = gt_boxes.shape[0]
extend_gt_boxes = box_utils.enlarge_box3d(
gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.extra_width
).view(batch_size, -1, gt_boxes.shape[-1]) \
if self.extra_width is not None else gt_boxes
bs_idx = points[:, 0]
point_cls_labels = points.new_zeros(points.shape[0]).long()
for k in range(batch_size):
bs_mask = (bs_idx == k)
points_single = points[bs_mask][:, 1:4]
point_cls_labels_single = point_cls_labels.new_zeros(bs_mask.sum())
if not self.set_ignore_flag:
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0),
extend_gt_boxes[k:k + 1, :, 0:7].contiguous()
).long().squeeze(dim=0)
box_fg_flag = (box_idxs_of_pts >= 0)
else:
box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0),
gt_boxes[k:k + 1, :, 0:7].contiguous()
).long().squeeze(dim=0)
box_fg_flag = (box_idxs_of_pts >= 0)
extend_box_idx_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(
points_single.unsqueeze(dim=0),
extend_gt_boxes[k:k + 1, :, 0:7].contiguous()
).long().squeeze(dim=0)
ignore_flag = box_fg_flag ^ (extend_box_idx_of_pts >= 0)
point_cls_labels_single[ignore_flag] = -1
point_cls_labels_single[box_fg_flag] = 1
point_cls_labels[bs_mask] = point_cls_labels_single
return point_cls_labels # (N, ) 0: bg, 1: fg, -1: ignore
def forward(self, l_points, l_scores, gt_boxes):
"""
Args:
l_points: List of points, [(N, 4): bs_idx, x, y, z]
l_scores: List of points, [(N, 1): predicted point scores]
gt_boxes: (B, M, 8)
Returns:
l_labels: List of labels: [(N, 1): assigned segmentation labels]
"""
l_labels = []
for i in range(len(self.layer_weights)):
li_scores = l_scores[i]
if li_scores is None or self.layer_weights[i] == 0:
l_labels.append(None)
continue
# binary segmentation labels: 0: bg, 1: fg, -1: ignore
li_labels = self.assign_target(l_points[i], gt_boxes)
l_labels.append(li_labels)
return l_labels
def loss_forward(self, l_scores, l_labels):
"""
Args:
l_scores: List of points, [(N, 1): predicted point scores]
l_labels: List of points, [(N, 1): assigned segmentation labels]
Returns:
l_loss: List of segmentation loss
"""
l_loss = []
for i in range(len(self.layer_weights)):
li_scores, li_labels = l_scores[i], l_labels[i]
if li_scores is None or li_labels is None:
l_loss.append(None)
continue
positives, negatives = li_labels > 0, li_labels == 0
cls_weights = positives * 1.0 + negatives * 1.0 # (N, 1)
pos_normalizer = cls_weights.sum(dim=0).float()
one_hot_targets = li_scores.new_zeros(
*list(li_labels.shape), 2
)
one_hot_targets.scatter_(-1, (li_labels > 0).long().unsqueeze(-1), 1.0)
one_hot_targets = one_hot_targets[:, 1:] # (N, 1)
li_loss = self.loss_func(li_scores[None],
one_hot_targets[None],
cls_weights.reshape(1, -1))
li_loss = self.layer_weights[i] * li_loss.sum() / torch.clamp(
pos_normalizer, min=1.0)
l_loss.append(li_loss)
return l_loss
| 14,516
| 35.938931
| 105
|
py
|
SASA
|
SASA-main/pcdet/utils/box_coder_utils.py
|
import numpy as np
import torch
class ResidualCoder(object):
def __init__(self, code_size=7, encode_angle_by_sincos=False, **kwargs):
super().__init__()
self.code_size = code_size
self.encode_angle_by_sincos = encode_angle_by_sincos
if self.encode_angle_by_sincos:
self.code_size += 1
def encode_torch(self, boxes, anchors):
"""
Args:
boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
anchors: (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
Returns:
"""
anchors[:, 3:6] = torch.clamp_min(anchors[:, 3:6], min=1e-5)
boxes[:, 3:6] = torch.clamp_min(boxes[:, 3:6], min=1e-5)
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(boxes, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
dxt = torch.log(dxg / dxa)
dyt = torch.log(dyg / dya)
dzt = torch.log(dzg / dza)
if self.encode_angle_by_sincos:
rt_cos = torch.cos(rg) - torch.cos(ra)
rt_sin = torch.sin(rg) - torch.sin(ra)
rts = [rt_cos, rt_sin]
else:
rts = [rg - ra]
cts = [g - a for g, a in zip(cgs, cas)]
return torch.cat([xt, yt, zt, dxt, dyt, dzt, *rts, *cts], dim=-1)
def decode_torch(self, box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
if not self.encode_angle_by_sincos:
xt, yt, zt, dxt, dyt, dzt, rt, *cts = torch.split(box_encodings, 1, dim=-1)
else:
xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(dxt) * dxa
dyg = torch.exp(dyt) * dya
dzg = torch.exp(dzt) * dza
if self.encode_angle_by_sincos:
rg_cos = cost + torch.cos(ra)
rg_sin = sint + torch.sin(ra)
rg = torch.atan2(rg_sin, rg_cos)
else:
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PreviousResidualDecoder(object):
def __init__(self, code_size=7, **kwargs):
super().__init__()
self.code_size = code_size
@staticmethod
def decode_torch(box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + ?) x, y, z, w, l, h, r, custom values
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(lt) * dxa
dyg = torch.exp(wt) * dya
dzg = torch.exp(ht) * dza
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PreviousResidualRoIDecoder(object):
def __init__(self, code_size=7, **kwargs):
super().__init__()
self.code_size = code_size
@staticmethod
def decode_torch(box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + ?) x, y, z, w, l, h, r, custom values
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(lt) * dxa
dyg = torch.exp(wt) * dya
dzg = torch.exp(ht) * dza
rg = ra - rt
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PointResidualCoder(object):
def __init__(self, code_size=8, use_mean_size=True, **kwargs):
super().__init__()
self.code_size = code_size
self.use_mean_size = use_mean_size
if self.use_mean_size:
self.mean_size = torch.from_numpy(np.array(kwargs['mean_size'])).cuda().float()
assert self.mean_size.min() > 0
def encode_torch(self, gt_boxes, points, gt_classes=None):
"""
Args:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
points: (N, 3) [x, y, z]
gt_classes: (N) [1, num_classes]
Returns:
box_coding: (N, 8 + C)
"""
gt_boxes[:, 3:6] = torch.clamp_min(gt_boxes[:, 3:6], min=1e-5)
xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(gt_boxes, 1, dim=-1)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert gt_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[gt_classes - 1]
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
dxt = torch.log(dxg / dxa)
dyt = torch.log(dyg / dya)
dzt = torch.log(dzg / dza)
else:
xt = (xg - xa)
yt = (yg - ya)
zt = (zg - za)
dxt = torch.log(dxg)
dyt = torch.log(dyg)
dzt = torch.log(dzg)
cts = [g for g in cgs]
return torch.cat([xt, yt, zt, dxt, dyt, dzt, torch.cos(rg), torch.sin(rg), *cts], dim=-1)
def decode_torch(self, box_encodings, points, pred_classes=None):
"""
Args:
box_encodings: (N, 8 + C) [x, y, z, dx, dy, dz, cos, sin, ...]
points: [x, y, z]
pred_classes: (N) [1, num_classes]
Returns:
"""
xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split(box_encodings, 1, dim=-1)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert pred_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[pred_classes - 1]
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(dxt) * dxa
dyg = torch.exp(dyt) * dya
dzg = torch.exp(dzt) * dza
else:
xg = xt + xa
yg = yt + ya
zg = zt + za
dxg, dyg, dzg = torch.split(torch.exp(box_encodings[..., 3:6]), 1, dim=-1)
rg = torch.atan2(sint, cost)
safe_rg = torch.where(torch.isnan(rg), torch.zeros_like(rg), rg) # avoid possible NaN problem
cgs = [t for t in cts]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, safe_rg, *cgs], dim=-1)
class PointBinResidualCoder(object):
def __init__(self, code_size=30, use_mean_size=True, angle_bin_num=12, pred_velo=False, **kwargs):
super().__init__()
self.code_size = 6 + 2 * angle_bin_num
self.angle_bin_num = angle_bin_num
self.pred_velo = pred_velo
if pred_velo:
self.code_size += 2
self.use_mean_size = use_mean_size
if self.use_mean_size:
self.mean_size = torch.from_numpy(np.array(kwargs['mean_size'])).cuda().float()
assert self.mean_size.min() > 0
def encode_angle_torch(self, angle):
"""
Args:
angle: (N)
Returns:
angle_cls: (N, angle_bin_num)
angle_res: (N, angle_bin_num)
"""
angle = torch.remainder(angle, np.pi * 2.0)
angle_per_class = np.pi * 2.0 / float(self.angle_bin_num)
shifted_angle = torch.remainder(angle + angle_per_class / 2.0, np.pi * 2.0)
angle_cls_f = (shifted_angle / angle_per_class).floor()
angle_cls = angle_cls_f.new_zeros(*list(angle_cls_f.shape), self.angle_bin_num)
angle_cls.scatter_(-1, angle_cls_f.unsqueeze(-1).long(), 1.0)
angle_res = shifted_angle - (angle_cls_f * angle_per_class + angle_per_class / 2.0)
angle_res = angle_res / angle_per_class # normalize residual angle to [0, 1]
angle_res = angle_cls * angle_res.unsqueeze(-1)
return angle_cls, angle_res
def decode_angle_torch(self, angle_cls, angle_res):
"""
Args:
angle_cls: (N, angle_bin_num)
angle_res: (N, angle_bin_num)
Returns:
angle: (N)
"""
angle_cls_idx = angle_cls.argmax(dim=-1)
angle_cls_onehot = angle_cls.new_zeros(angle_cls.shape)
angle_cls_onehot.scatter_(-1, angle_cls_idx.unsqueeze(-1), 1.0)
angle_res = (angle_cls_onehot * angle_res).sum(dim=-1)
angle = (angle_cls_idx.float() + angle_res) * (np.pi * 2.0 / float(self.angle_bin_num))
return angle
def encode_torch(self, gt_boxes, points, gt_classes=None):
"""
Args:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
points: (N, 3) [x, y, z]
gt_classes: (N) [1, num_classes]
Returns:
box_coding: (N, 6 + 2 * B + C)
"""
gt_boxes[:, 3:6] = torch.clamp_min(gt_boxes[:, 3:6], min=1e-5)
xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(gt_boxes, 1, dim=-1)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert gt_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[gt_classes - 1]
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
dxt = torch.log(dxg / dxa)
dyt = torch.log(dyg / dya)
dzt = torch.log(dzg / dza)
else:
xt = (xg - xa)
yt = (yg - ya)
zt = (zg - za)
dxt = torch.log(dxg)
dyt = torch.log(dyg)
dzt = torch.log(dzg)
rg_cls, rg_reg = self.encode_angle_torch(rg.squeeze(-1))
cts = [g for g in cgs]
return torch.cat([xt, yt, zt, dxt, dyt, dzt, rg_cls, rg_reg, *cts], dim=-1)
def decode_torch_kernel(self, box_offsets, box_angle_cls, box_angle_reg, points, pred_classes=None):
"""
Args:
box_offsets: (N, 6) [x, y, z, dx, dy, dz]
box_angle_cls: (N, angle_bin_num)
box_angle_reg: (N, angle_bin_num)
points: [x, y, z]
pred_classes: (N) [1, num_classes]
Returns:
boxes3d: (N, 7)
"""
xt, yt, zt, dxt, dyt, dzt = torch.split(box_offsets, 1, dim=-1)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert pred_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[pred_classes - 1]
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(dxt) * dxa
dyg = torch.exp(dyt) * dya
dzg = torch.exp(dzt) * dza
else:
xg = xt + xa
yg = yt + ya
zg = zt + za
dxg = torch.exp(dxt)
dyg = torch.exp(dyt)
dzg = torch.exp(dzt)
rg = self.decode_angle_torch(box_angle_cls, box_angle_reg).unsqueeze(-1)
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg], dim=-1)
def decode_torch(self, box_encodings, points, pred_classes=None):
"""
Args:
box_encodings: (N, 8 + C) [x, y, z, dx, dy, dz, cos, sin, ...]
points: [x, y, z]
pred_classes: (N) [1, num_classes]
Returns:
boxes3d: (N, 7)
"""
box_offsets = box_encodings[:, :6]
box_angle_cls = box_encodings[:, 6:6 + self.angle_bin_num]
box_angle_reg = box_encodings[:, 6 + self.angle_bin_num:6 + self.angle_bin_num * 2]
cgs = box_encodings[:, 6 + self.angle_bin_num * 2:]
boxes3d = self.decode_torch_kernel(box_offsets, box_angle_cls, box_angle_reg, points, pred_classes)
return torch.cat([boxes3d, cgs], dim=-1)
| 13,258
| 35.226776
| 107
|
py
|
SASA
|
SASA-main/pcdet/utils/object3d_kitti.py
|
import numpy as np
def get_objects_from_label(label_file):
with open(label_file, 'r') as f:
lines = f.readlines()
objects = [Object3d(line) for line in lines]
return objects
def cls_type_to_id(cls_type):
type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4}
if cls_type not in type_to_id.keys():
return -1
return type_to_id[cls_type]
class Object3d(object):
def __init__(self, line):
label = line.strip().split(' ')
self.src = line
self.cls_type = label[0]
self.cls_id = cls_type_to_id(self.cls_type)
self.truncation = float(label[1])
self.occlusion = float(label[2]) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
self.alpha = float(label[3])
self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32)
self.h = float(label[8])
self.w = float(label[9])
self.l = float(label[10])
self.loc = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32)
self.dis_to_cam = np.linalg.norm(self.loc)
self.ry = float(label[14])
self.score = float(label[15]) if label.__len__() == 16 else -1.0
self.level_str = None
self.level = self.get_kitti_obj_level()
def get_kitti_obj_level(self):
height = float(self.box2d[3]) - float(self.box2d[1]) + 1
if height >= 40 and self.truncation <= 0.15 and self.occlusion <= 0:
self.level_str = 'Easy'
return 0 # Easy
elif height >= 25 and self.truncation <= 0.3 and self.occlusion <= 1:
self.level_str = 'Moderate'
return 1 # Moderate
elif height >= 25 and self.truncation <= 0.5 and self.occlusion <= 2:
self.level_str = 'Hard'
return 2 # Hard
else:
self.level_str = 'UnKnown'
return -1
def generate_corners3d(self):
"""
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
"""
l, h, w = self.l, self.h, self.w
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)],
[0, 1, 0],
[-np.sin(self.ry), 0, np.cos(self.ry)]])
corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
corners3d = np.dot(R, corners3d).T
corners3d = corners3d + self.loc
return corners3d
def to_str(self):
print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \
% (self.cls_type, self.truncation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l,
self.loc, self.ry)
return print_str
def to_kitti_format(self):
kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \
% (self.cls_type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],
self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.loc[0], self.loc[1], self.loc[2],
self.ry)
return kitti_str
| 3,449
| 40.071429
| 119
|
py
|
SASA
|
SASA-main/pcdet/utils/common_utils.py
|
import logging
import os
import pickle
import random
import shutil
import subprocess
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
def check_numpy_to_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def limit_period(val, offset=0.5, period=np.pi):
val, is_numpy = check_numpy_to_torch(val)
ans = val - torch.floor(val / period + offset) * period
return ans.numpy() if is_numpy else ans
def drop_info_with_name(info, name):
ret_info = {}
keep_indices = [i for i, x in enumerate(info['name']) if x != name]
for key in info.keys():
ret_info[key] = info[key][keep_indices]
return ret_info
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def mask_points_by_range(points, limit_range):
mask = (points[:, 0] >= limit_range[0]) & (points[:, 0] <= limit_range[3]) \
& (points[:, 1] >= limit_range[1]) & (points[:, 1] <= limit_range[4])
return mask
def get_voxel_centers(voxel_coords, downsample_times, voxel_size, point_cloud_range):
"""
Args:
voxel_coords: (N, 3)
downsample_times:
voxel_size:
point_cloud_range:
Returns:
"""
assert voxel_coords.shape[1] == 3
voxel_centers = voxel_coords[:, [2, 1, 0]].float() # (xyz)
voxel_size = torch.tensor(voxel_size, device=voxel_centers.device).float() * downsample_times
pc_range = torch.tensor(point_cloud_range[0:3], device=voxel_centers.device).float()
voxel_centers = (voxel_centers + 0.5) * voxel_size + pc_range
return voxel_centers
def create_logger(log_file=None, rank=0, log_level=logging.INFO):
logger = logging.getLogger(__name__)
logger.setLevel(log_level if rank == 0 else 'ERROR')
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(message)s')
console = logging.StreamHandler()
console.setLevel(log_level if rank == 0 else 'ERROR')
console.setFormatter(formatter)
logger.addHandler(console)
if log_file is not None:
file_handler = logging.FileHandler(filename=log_file)
file_handler.setLevel(log_level if rank == 0 else 'ERROR')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def keep_arrays_by_name(gt_names, used_classes):
inds = [i for i, x in enumerate(gt_names) if x in used_classes]
inds = np.array(inds, dtype=np.int64)
return inds
def init_dist_slurm(tcp_port, local_rank, backend='nccl'):
"""
modified from https://github.com/open-mmlab/mmdetection
Args:
tcp_port:
backend:
Returns:
"""
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput('scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(tcp_port)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend=backend)
total_gpus = dist.get_world_size()
rank = dist.get_rank()
return total_gpus, rank
def init_dist_pytorch(tcp_port, local_rank, backend='nccl'):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(local_rank % num_gpus)
dist.init_process_group(
backend=backend,
init_method='tcp://127.0.0.1:%d' % tcp_port,
rank=local_rank,
world_size=num_gpus
)
rank = dist.get_rank()
return num_gpus, rank
def get_dist_info():
if torch.__version__ < '1.0':
initialized = dist._initialized
else:
if dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def merge_results_dist(result_part, size, tmpdir):
rank, world_size = get_dist_info()
os.makedirs(tmpdir, exist_ok=True)
dist.barrier()
pickle.dump(result_part, open(os.path.join(tmpdir, 'result_part_{}.pkl'.format(rank)), 'wb'))
dist.barrier()
if rank != 0:
return None
part_list = []
for i in range(world_size):
part_file = os.path.join(tmpdir, 'result_part_{}.pkl'.format(i))
part_list.append(pickle.load(open(part_file, 'rb')))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results
| 5,750
| 28.341837
| 97
|
py
|
SASA
|
SASA-main/pcdet/utils/calibration_kitti.py
|
import numpy as np
def get_calib_from_file(calib_file):
with open(calib_file) as f:
lines = f.readlines()
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
return {'P2': P2.reshape(3, 4),
'P3': P3.reshape(3, 4),
'R0': R0.reshape(3, 3),
'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}
class Calibration(object):
def __init__(self, calib_file):
if not isinstance(calib_file, dict):
calib = get_calib_from_file(calib_file)
else:
calib = calib_file
self.P2 = calib['P2'] # 3 x 4
self.R0 = calib['R0'] # 3 x 3
self.V2C = calib['Tr_velo2cam'] # 3 x 4
# Camera intrinsics and extrinsics
self.cu = self.P2[0, 2]
self.cv = self.P2[1, 2]
self.fu = self.P2[0, 0]
self.fv = self.P2[1, 1]
self.tx = self.P2[0, 3] / (-self.fu)
self.ty = self.P2[1, 3] / (-self.fv)
def cart_to_hom(self, pts):
"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def rect_to_lidar(self, pts_rect):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_rect_hom = self.cart_to_hom(pts_rect) # (N, 4)
R0_ext = np.hstack((self.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
R0_ext = np.vstack((R0_ext, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
R0_ext[3, 3] = 1
V2C_ext = np.vstack((self.V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
V2C_ext[3, 3] = 1
pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(np.dot(R0_ext, V2C_ext).T))
return pts_lidar[:, 0:3]
def lidar_to_rect(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_lidar_hom = self.cart_to_hom(pts_lidar)
pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
# pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))
return pts_rect
def rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2] # depth in rect camera coord
return pts_img, pts_rect_depth
def lidar_to_img(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect = self.lidar_to_rect(pts_lidar)
pts_img, pts_depth = self.rect_to_img(pts_rect)
return pts_img, pts_depth
def img_to_rect(self, u, v, depth_rect):
"""
:param u: (N)
:param v: (N)
:param depth_rect: (N)
:return:
"""
x = ((u - self.cu) * depth_rect) / self.fu + self.tx
y = ((v - self.cv) * depth_rect) / self.fv + self.ty
pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1)
return pts_rect
def corners3d_to_img_boxes(self, corners3d):
"""
:param corners3d: (N, 8, 3) corners in rect coordinate
:return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
:return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
"""
sample_num = corners3d.shape[0]
corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4)
img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)
x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]
x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
x2, y2 = np.max(x, axis=1), np.max(y, axis=1)
boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)
boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)
return boxes, boxes_corner
| 4,464
| 34.436508
| 116
|
py
|
SASA
|
SASA-main/pcdet/utils/visual_utils/visualize_result.py
|
import argparse
import os
import numpy as np
import visualize_utils
from pcdet.utils import object3d_kitti
from pcdet.datasets import KittiDataset
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--sample_id', type=int, required=True, help='sample index')
parser.add_argument('--cfg_file', type=str, required=True, help='dataset config file')
parser.add_argument('--split', type=str, default='train', help='train or test')
parser.add_argument('--pred_path', type=str, default='default', help='directory for prediction files')
args = parser.parse_args()
return args
def process_boxes(obj_list, calib):
cls_to_idx = {
'Car': 1,
'Pedestrian': 2,
'Cyclist': 3
}
obj_list = [_ for _ in obj_list if _.cls_type in cls_to_idx.keys()]
cls = np.array([cls_to_idx[obj.cls_type] for obj in obj_list])
dim = np.array([[obj.l, obj.h, obj.w] for obj in obj_list])
loc = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
rot = np.array([obj.ry for obj in obj_list])
score = np.array([obj.score for obj in obj_list])
loc_lidar = calib.rect_to_lidar(loc)
l, h, w = dim[:, 0:1], dim[:, 1:2], dim[:, 2:3]
loc_lidar[:, 2] += h[:, 0] / 2
boxes = np.concatenate([loc_lidar, l, w, h, -(np.pi / 2 + rot[..., np.newaxis])], axis=1)
return boxes, score, cls
def main():
args = parse_config()
import yaml
from pathlib import Path
from easydict import EasyDict
dataset_cfg = EasyDict(yaml.load(open(args.cfg_file)))
ROOT_DIR = (Path(__file__).resolve().parent / '../../').resolve()
data_path = ROOT_DIR / 'data' / 'kitti'
print(data_path)
dataset = KittiDataset(
dataset_cfg=dataset_cfg,
class_names=['Car', 'Pedestrian', 'Cyclist'],
root_path=data_path,
training=False)
dataset.set_split(args.split)
sample_idx = '%06d' % args.sample_id
lidar = dataset.get_lidar(sample_idx)
calib = dataset.get_calib(sample_idx)
gt_boxes = None
pred_boxes, pred_scores, pred_labels = None, None, None
if args.split == 'train':
gt_labels = dataset.get_label(sample_idx)
gt_boxes, _, _ = process_boxes(gt_labels, calib)
if not args.pred_path == 'default':
pred_file = Path(args.pred_path) / (sample_idx + '.txt')
assert pred_file.exists()
pred_labels = object3d_kitti.get_objects_from_label(pred_file)
pred_boxes, pred_scores, pred_labels = process_boxes(pred_labels, calib)
visualize_utils.draw_scenes(lidar, gt_boxes=gt_boxes,
ref_boxes=pred_boxes, ref_scores=pred_scores, ref_labels=pred_labels)
input()
if __name__ == '__main__':
main()
| 2,783
| 30.636364
| 106
|
py
|
SASA
|
SASA-main/pcdet/utils/visual_utils/visualize_utils.py
|
import mayavi.mlab as mlab
import numpy as np
import torch
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def check_numpy_to_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def visualize_pts(pts, fig=None, bgcolor=(0, 0, 0), fgcolor=(1.0, 1.0, 1.0),
show_intensity=False, size=(800, 600), draw_origin=True):
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=fgcolor, engine=None, size=size)
if show_intensity:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 3], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
else:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
if draw_origin:
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), tube_radius=0.1)
return fig
def draw_sphere_pts(pts, color=(0, 1, 0), fig=None, bgcolor=(0, 0, 0), scale_factor=0.2):
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(600, 600))
if isinstance(color, np.ndarray) and color.shape[0] == 1:
color = color[0]
color = (color[0] / 255.0, color[1] / 255.0, color[2] / 255.0)
if isinstance(color, np.ndarray):
pts_color = np.zeros((pts.__len__(), 4), dtype=np.uint8)
pts_color[:, 0:3] = color
pts_color[:, 3] = 255
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], np.arange(0, pts_color.__len__()), mode='sphere',
scale_factor=scale_factor, figure=fig)
G.glyph.color_mode = 'color_by_scalar'
G.glyph.scale_mode = 'scale_by_vector'
G.module_manager.scalar_lut_manager.lut.table = pts_color
else:
mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='sphere', color=color,
colormap='gnuplot', scale_factor=scale_factor, figure=fig)
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), line_width=3, tube_radius=None, figure=fig)
return fig
def draw_grid(x1, y1, x2, y2, fig, tube_radius=None, color=(0.5, 0.5, 0.5)):
mlab.plot3d([x1, x1], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x2, x2], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y1, y1], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y2, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
return fig
def draw_multi_grid_range(fig, grid_size=20, bv_range=(-60, -60, 60, 60)):
for x in range(bv_range[0], bv_range[2], grid_size):
for y in range(bv_range[1], bv_range[3], grid_size):
fig = draw_grid(x, y, x + grid_size, y + grid_size, fig)
return fig
def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_scores=None, ref_labels=None):
if not isinstance(points, np.ndarray):
points = points.cpu().numpy()
if ref_boxes is not None and not isinstance(ref_boxes, np.ndarray):
ref_boxes = ref_boxes.cpu().numpy()
if gt_boxes is not None and not isinstance(gt_boxes, np.ndarray):
gt_boxes = gt_boxes.cpu().numpy()
if ref_scores is not None and not isinstance(ref_scores, np.ndarray):
ref_scores = ref_scores.cpu().numpy()
if ref_labels is not None and not isinstance(ref_labels, np.ndarray):
ref_labels = ref_labels.cpu().numpy()
fig = visualize_pts(points)
fig = draw_multi_grid_range(fig, bv_range=(0, -40, 80, 40))
if gt_boxes is not None:
corners3d = boxes_to_corners_3d(gt_boxes)
fig = draw_corners3d(corners3d, fig=fig, color=(0, 0, 1), max_num=100)
if ref_boxes is not None and len(ref_boxes) > 0:
ref_corners3d = boxes_to_corners_3d(ref_boxes)
if ref_labels is None:
fig = draw_corners3d(ref_corners3d, fig=fig, color=(0, 1, 0), cls=ref_scores, max_num=100)
else:
for k in range(ref_labels.min(), ref_labels.max() + 1):
cur_color = tuple(box_colormap[k % len(box_colormap)])
mask = (ref_labels == k)
fig = draw_corners3d(ref_corners3d[mask], fig=fig, color=cur_color, cls=ref_scores[mask], max_num=100)
mlab.view(azimuth=-179, elevation=54.0, distance=104.0, roll=90.0)
return fig
def draw_corners3d(corners3d, fig, color=(1, 1, 1), line_width=2, cls=None, tag='', max_num=500, tube_radius=None):
"""
:param corners3d: (N, 8, 3)
:param fig:
:param color:
:param line_width:
:param cls:
:param tag:
:param max_num:
:return:
"""
import mayavi.mlab as mlab
num = min(max_num, len(corners3d))
for n in range(num):
b = corners3d[n] # (8, 3)
if cls is not None:
if isinstance(cls, np.ndarray):
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%.2f' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
else:
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%s' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
for k in range(0, 4):
i, j = k, (k + 1) % 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k + 4, (k + 1) % 4 + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k, k + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 0, 5
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 1, 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
return fig
| 8,540
| 38.541667
| 121
|
py
|
SASA
|
SASA-main/pcdet/ops/roipoint_pool3d/roipoint_pool3d_utils.py
|
import torch
import torch.nn as nn
from torch.autograd import Function
from ...utils import box_utils
from . import roipoint_pool3d_cuda
class RoIPointPool3d(nn.Module):
def __init__(self, num_sampled_points=512, pool_extra_width=1.0):
super().__init__()
self.num_sampled_points = num_sampled_points
self.pool_extra_width = pool_extra_width
def forward(self, points, point_features, boxes3d):
"""
Args:
points: (B, N, 3)
point_features: (B, N, C)
boxes3d: (B, M, 7), [x, y, z, dx, dy, dz, heading]
Returns:
pooled_features: (B, M, 512, 3 + C)
pooled_empty_flag: (B, M)
"""
return RoIPointPool3dFunction.apply(
points, point_features, boxes3d, self.pool_extra_width, self.num_sampled_points
)
class RoIPointPool3dFunction(Function):
@staticmethod
def forward(ctx, points, point_features, boxes3d, pool_extra_width, num_sampled_points=512):
"""
Args:
ctx:
points: (B, N, 3)
point_features: (B, N, C)
boxes3d: (B, num_boxes, 7), [x, y, z, dx, dy, dz, heading]
pool_extra_width:
num_sampled_points:
Returns:
pooled_features: (B, num_boxes, 512, 3 + C)
pooled_empty_flag: (B, num_boxes)
"""
assert points.shape.__len__() == 3 and points.shape[2] == 3
batch_size, boxes_num, feature_len = points.shape[0], boxes3d.shape[1], point_features.shape[2]
pooled_boxes3d = box_utils.enlarge_box3d(boxes3d.view(-1, 7), pool_extra_width).view(batch_size, -1, 7)
pooled_features = point_features.new_zeros((batch_size, boxes_num, num_sampled_points, 3 + feature_len))
pooled_empty_flag = point_features.new_zeros((batch_size, boxes_num)).int()
roipoint_pool3d_cuda.forward(
points.contiguous(), pooled_boxes3d.contiguous(),
point_features.contiguous(), pooled_features, pooled_empty_flag
)
return pooled_features, pooled_empty_flag
@staticmethod
def backward(ctx, grad_out):
raise NotImplementedError
if __name__ == '__main__':
pass
| 2,226
| 31.75
| 112
|
py
|
SASA
|
SASA-main/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_utils.py
|
import torch
import torch.nn as nn
from torch.autograd import Function, Variable
from . import pointnet2_stack_cuda as pointnet2
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor,
new_xyz: torch.Tensor, new_xyz_batch_cnt):
"""
Args:
ctx:
radius: float, radius of the balls
nsample: int, maximum number of features in the balls
xyz: (N1 + N2 ..., 3) xyz coordinates of the features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
new_xyz: (M1 + M2 ..., 3) centers of the ball query
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
Returns:
idx: (M1 + M2, nsample) tensor with the indicies of the features that form the query balls
"""
assert new_xyz.is_contiguous()
assert new_xyz_batch_cnt.is_contiguous()
assert xyz.is_contiguous()
assert xyz_batch_cnt.is_contiguous()
B = xyz_batch_cnt.shape[0]
M = new_xyz.shape[0]
idx = torch.cuda.IntTensor(M, nsample).zero_()
pointnet2.ball_query_wrapper(B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx)
empty_ball_mask = (idx[:, 0] == -1)
idx[empty_ball_mask] = 0
return idx, empty_ball_mask
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, features_batch_cnt: torch.Tensor,
idx: torch.Tensor, idx_batch_cnt: torch.Tensor):
"""
Args:
ctx:
features: (N1 + N2 ..., C) tensor of features to group
features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with
idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with
idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with
Returns:
output: (M1 + M2, C, nsample) tensor
"""
assert features.is_contiguous()
assert features_batch_cnt.is_contiguous()
assert idx.is_contiguous()
assert idx_batch_cnt.is_contiguous()
assert features.shape[0] == features_batch_cnt.sum(), \
'features: %s, features_batch_cnt: %s' % (str(features.shape), str(features_batch_cnt))
assert idx.shape[0] == idx_batch_cnt.sum(), \
'idx: %s, idx_batch_cnt: %s' % (str(idx.shape), str(idx_batch_cnt))
M, nsample = idx.size()
N, C = features.size()
B = idx_batch_cnt.shape[0]
output = torch.cuda.FloatTensor(M, C, nsample)
pointnet2.group_points_wrapper(B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, output)
ctx.for_backwards = (B, N, idx, features_batch_cnt, idx_batch_cnt)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor):
"""
Args:
ctx:
grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the output from forward
Returns:
grad_features: (N1 + N2 ..., C) gradient of the features
"""
B, N, idx, features_batch_cnt, idx_batch_cnt = ctx.for_backwards
M, C, nsample = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(N, C).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.group_points_grad_wrapper(B, M, C, N, nsample, grad_out_data, idx,
idx_batch_cnt, features_batch_cnt, grad_features.data)
return grad_features, None, None, None
grouping_operation = GroupingOperation.apply
class QueryAndGroup(nn.Module):
def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
"""
Args:
radius: float, radius of ball
nsample: int, maximum number of features to gather in the ball
use_xyz:
"""
super().__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor,
new_xyz: torch.Tensor, new_xyz_batch_cnt: torch.Tensor,
features: torch.Tensor = None):
"""
Args:
xyz: (N1 + N2 ..., 3) xyz coordinates of the features
xyz_batch_cnt: (batch_size), [N1, N2, ...]
new_xyz: (M1 + M2 ..., 3) centers of the ball query
new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
features: (N1 + N2 ..., C) tensor of features to group
Returns:
new_features: (M1 + M2, C, nsample) tensor
"""
assert xyz.shape[0] == xyz_batch_cnt.sum(), 'xyz: %s, xyz_batch_cnt: %s' % (str(xyz.shape), str(new_xyz_batch_cnt))
assert new_xyz.shape[0] == new_xyz_batch_cnt.sum(), \
'new_xyz: %s, new_xyz_batch_cnt: %s' % (str(new_xyz.shape), str(new_xyz_batch_cnt))
# idx: (M1 + M2 ..., nsample), empty_ball_mask: (M1 + M2 ...)
idx, empty_ball_mask = ball_query(self.radius, self.nsample, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt)
grouped_xyz = grouping_operation(xyz, xyz_batch_cnt, idx, new_xyz_batch_cnt) # (M1 + M2, 3, nsample)
grouped_xyz -= new_xyz.unsqueeze(-1)
grouped_xyz[empty_ball_mask] = 0
if features is not None:
grouped_features = grouping_operation(features, xyz_batch_cnt, idx, new_xyz_batch_cnt) # (M1 + M2, C, nsample)
grouped_features[empty_ball_mask] = 0
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (M1 + M2 ..., C + 3, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features, idx
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, npoint: int):
"""
Args:
ctx:
xyz: (B, N, 3) where N > npoint
npoint: int, number of features in the sampled set
Returns:
output: (B, npoint) tensor containing the set
"""
assert xyz.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
pointnet2.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, unknown_batch_cnt, known, known_batch_cnt):
"""
Args:
ctx:
unknown: (N1 + N2..., 3)
unknown_batch_cnt: (batch_size), [N1, N2, ...]
known: (M1 + M2..., 3)
known_batch_cnt: (batch_size), [M1, M2, ...]
Returns:
dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors
idx: (N1 + N2 ..., 3) index of the three nearest neighbors, range [0, M1+M2+...]
"""
assert unknown.shape.__len__() == 2 and unknown.shape[1] == 3
assert known.shape.__len__() == 2 and known.shape[1] == 3
assert unknown_batch_cnt.__len__() == known_batch_cnt.__len__()
dist2 = unknown.new_zeros(unknown.shape)
idx = unknown_batch_cnt.new_zeros(unknown.shape).int()
pointnet2.three_nn_wrapper(
unknown.contiguous(), unknown_batch_cnt.contiguous(),
known.contiguous(), known_batch_cnt.contiguous(), dist2, idx
)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor):
"""
Args:
ctx:
features: (M1 + M2 ..., C)
idx: [N1 + N2 ..., 3]
weight: [N1 + N2 ..., 3]
Returns:
out_tensor: (N1 + N2 ..., C)
"""
assert idx.shape[0] == weight.shape[0] and idx.shape[1] == weight.shape[1] == 3
ctx.three_interpolate_for_backward = (idx, weight, features.shape[0])
output = features.new_zeros((idx.shape[0], features.shape[1]))
pointnet2.three_interpolate_wrapper(features.contiguous(), idx.contiguous(), weight.contiguous(), output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor):
"""
Args:
ctx:
grad_out: (N1 + N2 ..., C)
Returns:
grad_features: (M1 + M2 ..., C)
"""
idx, weight, M = ctx.three_interpolate_for_backward
grad_features = grad_out.new_zeros((M, grad_out.shape[1]))
pointnet2.three_interpolate_grad_wrapper(
grad_out.contiguous(), idx.contiguous(), weight.contiguous(), grad_features
)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
if __name__ == '__main__':
pass
| 9,462
| 34.441948
| 123
|
py
|
SASA
|
SASA-main/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_modules.py
|
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import pointnet2_utils
class StackSAModuleMSG(nn.Module):
def __init__(self, *, radii: List[float], nsamples: List[int], mlps: List[List[int]],
use_xyz: bool = True, pool_method='max_pool'):
"""
Args:
radii: list of float, list of radii to group with
nsamples: list of int, number of samples in each ball query
mlps: list of list of int, spec of the pointnet before the global pooling for each scale
use_xyz:
pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz))
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend([
nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[k + 1]),
nn.ReLU()
])
self.mlps.append(nn.Sequential(*shared_mlps))
self.pool_method = pool_method
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features=None, empty_voxel_set_zeros=True):
"""
:param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features
:param xyz_batch_cnt: (batch_size), [N1, N2, ...]
:param new_xyz: (M1 + M2 ..., 3)
:param new_xyz_batch_cnt: (batch_size), [M1, M2, ...]
:param features: (N1 + N2 ..., C) tensor of the descriptors of the the features
:return:
new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz
new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
for k in range(len(self.groupers)):
new_features, ball_idxs = self.groupers[k](
xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features
) # (M1 + M2, C, nsample)
new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M1 + M2 ..., nsample)
new_features = self.mlps[k](new_features) # (1, C, M1 + M2 ..., nsample)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
).squeeze(dim=-1) # (1, C, M1 + M2 ...)
else:
raise NotImplementedError
new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C)
new_features_list.append(new_features)
new_features = torch.cat(new_features_list, dim=1) # (M1 + M2 ..., C)
return new_xyz, new_features
class StackPointnetFPModule(nn.Module):
def __init__(self, *, mlp: List[int]):
"""
Args:
mlp: list of int
"""
super().__init__()
shared_mlps = []
for k in range(len(mlp) - 1):
shared_mlps.extend([
nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp[k + 1]),
nn.ReLU()
])
self.mlp = nn.Sequential(*shared_mlps)
def forward(self, unknown, unknown_batch_cnt, known, known_batch_cnt, unknown_feats=None, known_feats=None):
"""
Args:
unknown: (N1 + N2 ..., 3)
known: (M1 + M2 ..., 3)
unknow_feats: (N1 + N2 ..., C1)
known_feats: (M1 + M2 ..., C2)
Returns:
new_features: (N1 + N2 ..., C_out)
"""
dist, idx = pointnet2_utils.three_nn(unknown, unknown_batch_cnt, known, known_batch_cnt)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=-1, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
if unknown_feats is not None:
new_features = torch.cat([interpolated_feats, unknown_feats], dim=1) # (N1 + N2 ..., C2 + C1)
else:
new_features = interpolated_feats
new_features = new_features.permute(1, 0)[None, :, :, None] # (1, C, N1 + N2 ..., 1)
new_features = self.mlp(new_features)
new_features = new_features.squeeze(dim=0).squeeze(dim=-1).permute(1, 0) # (N1 + N2 ..., C)
return new_features
| 5,425
| 38.318841
| 113
|
py
|
SASA
|
SASA-main/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_utils.py
|
from typing import List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function, Variable
from . import pointnet2_batch_cuda as pointnet2
@torch.no_grad()
def calc_dist_matrix_for_sampling(xyz: torch.Tensor, features: torch.Tensor = None,
gamma: float = 1.0):
dist = torch.cdist(xyz, xyz)
if features is not None:
dist += torch.cdist(features, features) * gamma
return dist
@torch.no_grad()
def furthest_point_sample(xyz: torch.Tensor, npoint: int) -> torch.Tensor:
"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
:param ctx:
:param xyz: (B, N, 3) where N > npoint
:param npoint: int, number of features in the sampled set
:return:
output: (B, npoint) tensor containing the set
"""
assert xyz.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
pointnet2.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
@torch.no_grad()
def furthest_point_sample_matrix(matrix: torch.Tensor, npoint: int) -> torch.Tensor:
"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance with a pairwise distance matrix
:param matrix: (B, N, N) tensor of dist matrix
:param npoint: int, number of features in the sampled set
:return:
output: (B, npoint) tensor containing the set
"""
assert matrix.is_contiguous()
B, N, _ = matrix.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
pointnet2.furthest_point_sampling_matrix_wrapper(B, N, npoint, matrix, temp, output)
return output
@torch.no_grad()
def furthest_point_sample_weights(xyz: torch.Tensor, weights: torch.Tensor, npoint: int) -> torch.Tensor:
"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum weighted distance
Args:
xyz: (B, N, 3), tensor of xyz coordinates
weights: (B, N), tensor of point weights
npoint: int, number of points in the sampled set
Returns:
output: (B, npoint) tensor containing the set
"""
assert xyz.is_contiguous()
assert weights.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
pointnet2.furthest_point_sampling_weights_wrapper(B, N, npoint, xyz, weights, temp, output)
return output
class GatherOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N)
:param idx: (B, npoint) index tensor of the features to gather
:return:
output: (B, C, npoint)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
B, npoint = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, npoint)
pointnet2.gather_points_wrapper(B, C, N, npoint, features, idx, output)
ctx.for_backwards = (idx, C, N)
return output
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
B, npoint = idx.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.gather_points_grad_wrapper(B, C, N, npoint, grad_out_data, idx, grad_features.data)
return grad_features, None
gather_operation = GatherOperation.apply
def three_nn(unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
:param unknown: (B, N, 3)
:param known: (B, M, 3)
:return:
dist: (B, N, 3) l2 distance to the three nearest neighbors
idx: (B, N, 3) index of 3 nearest neighbors
"""
assert unknown.is_contiguous()
assert known.is_contiguous()
B, N, _ = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(B, N, 3)
idx = torch.cuda.IntTensor(B, N, 3)
pointnet2.three_nn_wrapper(B, N, m, unknown, known, dist2, idx)
return torch.sqrt(dist2), idx
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""
Performs weight linear interpolation on 3 features
:param ctx:
:param features: (B, C, M) Features descriptors to be interpolated from
:param idx: (B, n, 3) three nearest neighbors of the target features in features
:param weight: (B, n, 3) weights
:return:
output: (B, C, N) tensor of the interpolated features
"""
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(B, c, n)
pointnet2.three_interpolate_wrapper(B, c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, N) tensor with gradients of outputs
:return:
grad_features: (B, C, M) tensor with gradients of features
None:
None:
"""
idx, weight, m = ctx.three_interpolate_for_backward
B, c, n = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, c, m).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.three_interpolate_grad_wrapper(B, c, n, m, grad_out_data, idx, weight, grad_features.data)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N) tensor of features to group
:param idx: (B, npoint, nsample) tensor containing the indicies of features to group with
:return:
output: (B, C, npoint, nsample) tensor
"""
assert features.is_contiguous()
assert idx.is_contiguous()
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, nfeatures, nsample)
pointnet2.group_points_wrapper(B, C, N, nfeatures, nsample, features, idx, output)
ctx.for_backwards = (idx, N)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, npoint, nsample) tensor of the gradients of the output from forward
:return:
grad_features: (B, C, N) gradient of the features
"""
idx, N = ctx.for_backwards
B, C, npoint, nsample = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.group_points_grad_wrapper(B, C, N, npoint, nsample, grad_out_data, idx, grad_features.data)
return grad_features, None
grouping_operation = GroupingOperation.apply
@torch.no_grad()
def ball_query(radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor):
"""
:param radius: float, radius of the balls
:param nsample: int, maximum number of features in the balls
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centers of the ball query
:return:
idx_cnt: (B, npoint) tensor with the number of grouped points for each ball query
idx: (B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
assert new_xyz.is_contiguous()
assert xyz.is_contiguous()
B, N, _ = xyz.size()
npoint = new_xyz.size(1)
idx = torch.cuda.IntTensor(B, npoint, nsample).zero_()
idx_cnt = torch.cuda.IntTensor(B, npoint).zero_()
pointnet2.ball_query_wrapper(B, N, npoint, radius, nsample, new_xyz, xyz, idx_cnt, idx)
return idx_cnt, idx
@torch.no_grad()
def ball_query_dilated(radius_in: float, radius_out: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor):
"""
:param radius_in: float, radius of the inner balls
:param radius_out: float, radius of the outer balls
:param nsample: int, maximum number of features in the balls
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centers of the ball query
:return:
idx_cnt: (B, npoint) tensor with the number of grouped points for each ball query
idx: (B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
assert new_xyz.is_contiguous()
assert xyz.is_contiguous()
B, N, _ = xyz.size()
npoint = new_xyz.size(1)
idx_cnt = torch.cuda.IntTensor(B, npoint).zero_()
idx = torch.cuda.IntTensor(B, npoint, nsample).zero_()
pointnet2.ball_query_dilated_wrapper(B, N, npoint, radius_in, radius_out, nsample, new_xyz, xyz, idx_cnt, idx)
return idx_cnt, idx
class QueryAndGroup(nn.Module):
def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
"""
:param radius: float, radius of ball
:param nsample: int, maximum number of features to gather in the ball
:param use_xyz:
"""
super().__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None):
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centroids
:param features: (B, C, N) descriptors of the features
:return:
idx_cnt: (B, npoint) tensor with the number of grouped points for each ball query
new_features: (B, 3 + C, npoint, nsample)
"""
idx_cnt, idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return idx_cnt, new_features
class QueryAndGroupDilated(nn.Module):
def __init__(self, radius_in: float, radius_out: float, nsample: int, use_xyz: bool = True):
"""
:param radius_in: float, radius of inner ball
:param radius_out: float, radius of outer ball
:param nsample: int, maximum number of features to gather in the ball
:param use_xyz:
"""
super().__init__()
self.radius_in, self.radius_out, self.nsample, self.use_xyz = radius_in, radius_out, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None):
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centroids
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, 3 + C, npoint, nsample)
idx_cnt: (B, npoint) tensor with the number of grouped points for each ball query
"""
idx_cnt, idx = ball_query_dilated(self.radius_in, self.radius_out, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return idx_cnt, new_features
class GroupAll(nn.Module):
def __init__(self, use_xyz: bool = True):
super().__init__()
self.use_xyz = use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None):
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: ignored
:param features: (B, C, N) descriptors of the features
:return:
idx_cnt: (B, 1)
new_features: (B, C + 3, 1, N)
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
idx_cnt = new_features.new_ones(new_features.size(0), 1)
return idx_cnt, new_features
| 14,049
| 35.588542
| 116
|
py
|
SASA
|
SASA-main/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_modules.py
|
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import pointnet2_utils
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
self.pool_method = 'max_pool'
def forward(self, xyz: torch.Tensor, features: torch.Tensor = None, new_xyz=None) -> (torch.Tensor, torch.Tensor):
"""
:param xyz: (B, N, 3) tensor of the xyz coordinates of the features
:param features: (B, C, N) tensor of the descriptors of the the features
:param new_xyz: (B, npoint, 3) tensor of the xyz coordinates of the grouping centers if specified
:return:
new_xyz: (B, npoint, 3) tensor of the new features' xyz
new_features: (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if new_xyz is None:
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped,
pointnet2_utils.furthest_point_sample(xyz, self.npoint)
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
idx_cnt, new_features = self.groupers[i](xyz, new_xyz, features) # (B, C, npoint, nsample)
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
idx_cnt_mask = (idx_cnt > 0).float()
idx_cnt_mask = idx_cnt_mask.unsqueeze(dim=1).unsqueeze(dim=-1)
new_features *= idx_cnt_mask
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
else:
raise NotImplementedError
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
"""Pointnet set abstraction layer with multiscale grouping"""
def __init__(self, *, npoint: int, radii: List[float], nsamples: List[int], mlps: List[List[int]], bn: bool = True,
use_xyz: bool = True, pool_method='max_pool'):
"""
:param npoint: int
:param radii: list of float, list of radii to group with
:param nsamples: list of int, number of samples in each ball query
:param mlps: list of list of int, spec of the pointnet before the global pooling for each scale
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlps = []
for k in range(len(mlp_spec) - 1):
shared_mlps.extend([
nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[k + 1]),
nn.ReLU()
])
self.mlps.append(nn.Sequential(*shared_mlps))
self.pool_method = pool_method
class PointnetSAModule(PointnetSAModuleMSG):
"""Pointnet set abstraction layer"""
def __init__(self, *, mlp: List[int], npoint: int = None, radius: float = None, nsample: int = None,
bn: bool = True, use_xyz: bool = True, pool_method='max_pool'):
"""
:param mlp: list of int, spec of the pointnet before the global max_pool
:param npoint: int, number of features
:param radius: float, radius of ball
:param nsample: int, number of samples in the ball query
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
"""
super().__init__(
mlps=[mlp], npoint=npoint, radii=[radius], nsamples=[nsample], bn=bn, use_xyz=use_xyz,
pool_method=pool_method
)
class _PointnetSAModuleFSBase(nn.Module):
def __init__(self):
super().__init__()
self.groupers = None
self.mlps = None
self.npoint_list = []
self.sample_range_list = [[0, -1]]
self.sample_method_list = ['d-fps']
self.radii = []
self.pool_method = 'max_pool'
self.dilated_radius_group = False
self.weight_gamma = 1.0
self.skip_connection = False
self.aggregation_mlp = None
self.confidence_mlp = None
def forward(self,
xyz: torch.Tensor,
features: torch.Tensor = None,
new_xyz=None,
scores=None):
"""
:param xyz: (B, N, 3) tensor of the xyz coordinates of the features
:param features: (B, C, N) tensor of the descriptors of the features
:param new_xyz:
:param scores: (B, N) tensor of confidence scores of points, required when using s-fps
:return:
new_xyz: (B, npoint, 3) tensor of the new features' xyz
new_features: (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if new_xyz is None:
assert len(self.npoint_list) == len(self.sample_range_list) == len(self.sample_method_list)
sample_idx_list = []
for i in range(len(self.sample_method_list)):
xyz_slice = xyz[:, self.sample_range_list[i][0]:self.sample_range_list[i][1], :].contiguous()
if self.sample_method_list[i] == 'd-fps':
sample_idx = pointnet2_utils.furthest_point_sample(xyz_slice, self.npoint_list[i])
elif self.sample_method_list[i] == 'f-fps':
features_slice = features[:, :, self.sample_range_list[i][0]:self.sample_range_list[i][1]]
dist_matrix = pointnet2_utils.calc_dist_matrix_for_sampling(xyz_slice,
features_slice.permute(0, 2, 1),
self.weight_gamma)
sample_idx = pointnet2_utils.furthest_point_sample_matrix(dist_matrix, self.npoint_list[i])
elif self.sample_method_list[i] == 's-fps':
assert scores is not None
scores_slice = \
scores[:, self.sample_range_list[i][0]:self.sample_range_list[i][1]].contiguous()
scores_slice = scores_slice.sigmoid() ** self.weight_gamma
sample_idx = pointnet2_utils.furthest_point_sample_weights(
xyz_slice,
scores_slice,
self.npoint_list[i]
)
else:
raise NotImplementedError
sample_idx_list.append(sample_idx + self.sample_range_list[i][0])
sample_idx = torch.cat(sample_idx_list, dim=-1)
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped,
sample_idx
).transpose(1, 2).contiguous() # (B, npoint, 3)
if self.skip_connection:
old_features = pointnet2_utils.gather_operation(
features,
sample_idx
) if features is not None else None # (B, C, npoint)
for i in range(len(self.groupers)):
idx_cnt, new_features = self.groupers[i](xyz, new_xyz, features) # (B, C, npoint, nsample)
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
idx_cnt_mask = (idx_cnt > 0).float() # (B, npoint)
idx_cnt_mask = idx_cnt_mask.unsqueeze(1).unsqueeze(-1) # (B, 1, npoint, 1)
new_features *= idx_cnt_mask
if self.pool_method == 'max_pool':
pooled_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pool_method == 'avg_pool':
pooled_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
else:
raise NotImplementedError
new_features_list.append(pooled_features.squeeze(-1)) # (B, mlp[-1], npoint)
if self.skip_connection and old_features is not None:
new_features_list.append(old_features)
new_features = torch.cat(new_features_list, dim=1)
if self.aggregation_mlp is not None:
new_features = self.aggregation_mlp(new_features)
if self.confidence_mlp is not None:
new_scores = self.confidence_mlp(new_features)
new_scores = new_scores.squeeze(1) # (B, npoint)
return new_xyz, new_features, new_scores
return new_xyz, new_features, None
class PointnetSAModuleFSMSG(_PointnetSAModuleFSBase):
"""Pointnet set abstraction layer with fusion sampling and multiscale grouping"""
def __init__(self, *,
npoint_list: List[int] = None,
sample_range_list: List[List[int]] = None,
sample_method_list: List[str] = None,
radii: List[float],
nsamples: List[int],
mlps: List[List[int]],
bn: bool = True,
use_xyz: bool = True,
pool_method='max_pool',
dilated_radius_group: bool = False,
skip_connection: bool = False,
weight_gamma: float = 1.0,
aggregation_mlp: List[int] = None,
confidence_mlp: List[int] = None):
"""
:param npoint_list: list of int, number of samples for every sampling method
:param sample_range_list: list of list of int, sample index range [left, right] for every sampling method
:param sample_method_list: list of str, list of used sampling method, d-fps or f-fps
:param radii: list of float, list of radii to group with
:param nsamples: list of int, number of samples in each ball query
:param mlps: list of list of int, spec of the pointnet before the global pooling for each scale
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
:param dilated_radius_group: whether to use radius dilated group
:param skip_connection: whether to add skip connection
:param weight_gamma: gamma for s-fps, default: 1.0
:param aggregation_mlp: list of int, spec aggregation mlp
:param confidence_mlp: list of int, spec confidence mlp
"""
super().__init__()
assert npoint_list is None or len(npoint_list) == len(sample_range_list) == len(sample_method_list)
assert len(radii) == len(nsamples) == len(mlps)
self.npoint_list = npoint_list
self.sample_range_list = sample_range_list
self.sample_method_list = sample_method_list
self.radii = radii
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
former_radius = 0.0
in_channels, out_channels = 0, 0
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
if dilated_radius_group:
self.groupers.append(
pointnet2_utils.QueryAndGroupDilated(former_radius, radius, nsample, use_xyz=use_xyz)
)
else:
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
)
former_radius = radius
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
shared_mlp = []
for k in range(len(mlp_spec) - 1):
shared_mlp.extend([
nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp_spec[k + 1]),
nn.ReLU()
])
self.mlps.append(nn.Sequential(*shared_mlp))
in_channels = mlp_spec[0] - 3 if use_xyz else mlp_spec[0]
out_channels += mlp_spec[-1]
self.pool_method = pool_method
self.dilated_radius_group = dilated_radius_group
self.skip_connection = skip_connection
self.weight_gamma = weight_gamma
if skip_connection:
out_channels += in_channels
if aggregation_mlp is not None:
shared_mlp = []
for k in range(len(aggregation_mlp)):
shared_mlp.extend([
nn.Conv1d(out_channels, aggregation_mlp[k], kernel_size=1, bias=False),
nn.BatchNorm1d(aggregation_mlp[k]),
nn.ReLU()
])
out_channels = aggregation_mlp[k]
self.aggregation_mlp = nn.Sequential(*shared_mlp)
else:
self.aggregation_mlp = None
if confidence_mlp is not None:
shared_mlp = []
for k in range(len(confidence_mlp)):
shared_mlp.extend([
nn.Conv1d(out_channels, confidence_mlp[k], kernel_size=1, bias=False),
nn.BatchNorm1d(confidence_mlp[k]),
nn.ReLU()
])
out_channels = confidence_mlp[k]
shared_mlp.append(
nn.Conv1d(out_channels, 1, kernel_size=1, bias=True),
)
self.confidence_mlp = nn.Sequential(*shared_mlp)
else:
self.confidence_mlp = None
class PointnetSAModuleFS(PointnetSAModuleFSMSG):
"""Pointnet set abstraction layer with fusion sampling"""
def __init__(self, *,
mlp: List[int],
npoint_list: List[int] = None,
sample_range_list: List[List[int]] = None,
sample_method_list: List[str] = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True,
pool_method='max_pool',
dilated_radius_group: bool = False,
skip_connection: bool = False,
weight_gamma: float = 1.0,
aggregation_mlp: List[int] = None,
confidence_mlp: List[int] = None):
"""
:param mlp: list of int, spec of the pointnet before the global max_pool
:param npoint_list: list of int, number of samples for every sampling method
:param sample_range_list: list of list of int, sample index range [left, right] for every sampling method
:param sample_method_list: list of str, list of used sampling method, d-fps, f-fps or c-fps
:param radius: float, radius of ball
:param nsample: int, number of samples in the ball query
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
:param dilated_radius_group: whether to use radius dilated group
:param skip_connection: whether to add skip connection
:param weight_gamma: gamma for s-fps, default: 1.0
:param aggregation_mlp: list of int, spec aggregation mlp
:param confidence_mlp: list of int, spec confidence mlp
"""
super().__init__(
mlps=[mlp], npoint_list=npoint_list, sample_range_list=sample_range_list,
sample_method_list=sample_method_list, radii=[radius], nsamples=[nsample],
bn=bn, use_xyz=use_xyz, pool_method=pool_method, dilated_radius_group=dilated_radius_group,
skip_connection=skip_connection, weight_gamma=weight_gamma,
aggregation_mlp=aggregation_mlp, confidence_mlp=confidence_mlp
)
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another"""
def __init__(self, *, mlp: List[int], bn: bool = True):
"""
:param mlp: list of int
:param bn: whether to use batchnorm
"""
super().__init__()
shared_mlps = []
for k in range(len(mlp) - 1):
shared_mlps.extend([
nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False),
nn.BatchNorm2d(mlp[k + 1]),
nn.ReLU()
])
self.mlp = nn.Sequential(*shared_mlps)
def forward(
self, unknown: torch.Tensor, known: torch.Tensor, unknow_feats: torch.Tensor, known_feats: torch.Tensor
) -> torch.Tensor:
"""
:param unknown: (B, n, 3) tensor of the xyz positions of the unknown features
:param known: (B, m, 3) tensor of the xyz positions of the known features
:param unknow_feats: (B, C1, n) tensor of the features to be propigated to
:param known_feats: (B, C2, m) tensor of features to be propigated
:return:
new_features: (B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
else:
interpolated_feats = known_feats.expand(*known_feats.size()[0:2], unknown.size(1))
if unknow_feats is not None:
new_features = torch.cat([interpolated_feats, unknow_feats], dim=1) # (B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
if __name__ == "__main__":
pass
| 18,910
| 41.688488
| 119
|
py
|
SASA
|
SASA-main/pcdet/ops/iou3d_nms/iou3d_nms_utils.py
|
"""
3D IoU Calculation and Rotated NMS
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
"""
import torch
from ...utils import common_utils
from . import iou3d_nms_cuda
def boxes_bev_iou_cpu(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
"""
boxes_a, is_numpy = common_utils.check_numpy_to_torch(boxes_a)
boxes_b, is_numpy = common_utils.check_numpy_to_torch(boxes_b)
assert not (boxes_a.is_cuda or boxes_b.is_cuda), 'Only support CPU tensors'
assert boxes_a.shape[1] == 7 and boxes_b.shape[1] == 7
ans_iou = boxes_a.new_zeros(torch.Size((boxes_a.shape[0], boxes_b.shape[0])))
iou3d_nms_cuda.boxes_iou_bev_cpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou)
return ans_iou.numpy() if is_numpy else ans_iou
def boxes_iou_bev(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ans_iou: (N, M)
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7
ans_iou = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_()
iou3d_nms_cuda.boxes_iou_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou)
return ans_iou
def boxes_iou3d_gpu(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ans_iou: (N, M)
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7
# height overlap
boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).view(-1, 1)
boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).view(-1, 1)
boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).view(1, -1)
boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).view(1, -1)
# bev overlap
overlaps_bev = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() # (N, M)
iou3d_nms_cuda.boxes_overlap_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), overlaps_bev)
max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min)
min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max)
overlaps_h = torch.clamp(min_of_max - max_of_min, min=0)
# 3d iou
overlaps_3d = overlaps_bev * overlaps_h
vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).view(-1, 1)
vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).view(1, -1)
iou3d = overlaps_3d / torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-6)
return iou3d
def nms_gpu(boxes, scores, thresh, pre_maxsize=None, **kwargs):
"""
:param boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
:param scores: (N)
:param thresh:
:return:
"""
assert boxes.shape[1] == 7
order = scores.sort(0, descending=True)[1]
if pre_maxsize is not None:
order = order[:pre_maxsize]
boxes = boxes[order].contiguous()
keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_nms_cuda.nms_gpu(boxes, keep, thresh)
return order[keep[:num_out].cuda()].contiguous(), None
def nms_normal_gpu(boxes, scores, thresh, **kwargs):
"""
:param boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
:param scores: (N)
:param thresh:
:return:
"""
assert boxes.shape[1] == 7
order = scores.sort(0, descending=True)[1]
boxes = boxes[order].contiguous()
keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_nms_cuda.nms_normal_gpu(boxes, keep, thresh)
return order[keep[:num_out].cuda()].contiguous(), None
| 3,650
| 30.205128
| 109
|
py
|
SASA
|
SASA-main/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py
|
import torch
import torch.nn as nn
from torch.autograd import Function
from ...utils import common_utils
from . import roiaware_pool3d_cuda
def points_in_boxes_cpu(points, boxes):
"""
Args:
points: (num_points, 3)
boxes: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps
Returns:
point_indices: (N, num_points)
"""
assert boxes.shape[1] == 7
assert points.shape[1] == 3
points, is_numpy = common_utils.check_numpy_to_torch(points)
boxes, is_numpy = common_utils.check_numpy_to_torch(boxes)
point_indices = points.new_zeros((boxes.shape[0], points.shape[0]), dtype=torch.int)
roiaware_pool3d_cuda.points_in_boxes_cpu(boxes.float().contiguous(), points.float().contiguous(), point_indices)
return point_indices.numpy() if is_numpy else point_indices
def points_in_boxes_gpu(points, boxes):
"""
:param points: (B, M, 3)
:param boxes: (B, T, 7), num_valid_boxes <= T
:return box_idxs_of_pts: (B, M), default background = -1
"""
assert boxes.shape[0] == points.shape[0]
assert boxes.shape[2] == 7 and points.shape[2] == 3
batch_size, num_points, _ = points.shape
box_idxs_of_pts = points.new_zeros((batch_size, num_points), dtype=torch.int).fill_(-1)
roiaware_pool3d_cuda.points_in_boxes_gpu(boxes.contiguous(), points.contiguous(), box_idxs_of_pts)
return box_idxs_of_pts
def points_to_boxes_dist_gpu(points, boxes):
"""
Args:
points: (B, M, 3)
boxes: (B, T, 7), num_valid_boxes <= T
Returns:
pts_dist: (B, M), distance from pts to the nearest box
"""
assert boxes.shape[0] == points.shape[0]
assert boxes.shape[2] == 7 and points.shape[2] == 3
batch_size, num_points, _ = points.shape
num_boxes = boxes.shape[1]
pts_dist = points.new_zeros((batch_size, num_points), dtype=torch.float32).fill_(1e5)
roiaware_pool3d_cuda.points_to_boxes_dist_gpu(boxes.contiguous(), points.contiguous(), pts_dist)
return pts_dist
class RoIAwarePool3d(nn.Module):
def __init__(self, out_size, max_pts_each_voxel=128):
super().__init__()
self.out_size = out_size
self.max_pts_each_voxel = max_pts_each_voxel
def forward(self, rois, pts, pts_feature, pool_method='max'):
assert pool_method in ['max', 'avg']
return RoIAwarePool3dFunction.apply(rois, pts, pts_feature, self.out_size, self.max_pts_each_voxel, pool_method)
class RoIAwarePool3dFunction(Function):
@staticmethod
def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method):
"""
Args:
ctx:
rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
pts: (npoints, 3)
pts_feature: (npoints, C)
out_size: int or tuple, like 7 or (7, 7, 7)
max_pts_each_voxel:
pool_method: 'max' or 'avg'
Returns:
pooled_features: (N, out_x, out_y, out_z, C)
"""
assert rois.shape[1] == 7 and pts.shape[1] == 3
if isinstance(out_size, int):
out_x = out_y = out_z = out_size
else:
assert len(out_size) == 3
for k in range(3):
assert isinstance(out_size[k], int)
out_x, out_y, out_z = out_size
num_rois = rois.shape[0]
num_channels = pts_feature.shape[-1]
num_pts = pts.shape[0]
pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels))
argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int)
pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_each_voxel), dtype=torch.int)
pool_method_map = {'max': 0, 'avg': 1}
pool_method = pool_method_map[pool_method]
roiaware_pool3d_cuda.forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, pool_method)
ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels)
return pooled_features
@staticmethod
def backward(ctx, grad_out):
"""
:param grad_out: (N, out_x, out_y, out_z, C)
:return:
grad_in: (npoints, C)
"""
pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels = ctx.roiaware_pool3d_for_backward
grad_in = grad_out.new_zeros((num_pts, num_channels))
roiaware_pool3d_cuda.backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, pool_method)
return None, None, grad_in, None, None, None
if __name__ == '__main__':
pass
| 4,688
| 34.522727
| 120
|
py
|
chainer
|
chainer-master/setup.py
|
#!/usr/bin/env python
import os
import pkg_resources
import sys
from setuptools import setup
import chainerx_build_helper
if sys.version_info[:3] == (3, 5, 0):
if not int(os.getenv('CHAINER_PYTHON_350_FORCE', '0')):
msg = """
Chainer does not work with Python 3.5.0.
We strongly recommend to use another version of Python.
If you want to use Chainer with Python 3.5.0 at your own risk,
set CHAINER_PYTHON_350_FORCE environment variable to 1."""
print(msg)
sys.exit(1)
requirements = {
'install': [
'setuptools',
'typing_extensions',
'filelock',
'numpy>=1.9.0',
'protobuf>=3.0.0',
'six>=1.9.0',
],
'stylecheck': [
'autopep8>=1.4.1,<1.5',
'flake8>=3.7,<3.8',
'pycodestyle>=2.5,<2.6',
],
'test': [
'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.
'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0
'mock',
],
'doctest': [
'sphinx==1.8.2',
'matplotlib',
'theano',
],
'docs': [
'sphinx==1.8.2',
'sphinx_rtd_theme',
'onnx<1.7.0',
'packaging',
],
'appveyor': [
'-r test',
# pytest-timeout>=1.3.0 requires pytest>=3.6.
# TODO(niboshi): Consider upgrading pytest to >=3.6
'pytest-timeout<1.3.0',
],
'jenkins': [
'-r test',
# pytest-timeout>=1.3.0 requires pytest>=3.6.
# TODO(niboshi): Consider upgrading pytest to >=3.6
'pytest-timeout<1.3.0',
'pytest-cov<2.10', # pytest-cov 2.10 requires pytest>=4.6
'nose',
'coveralls',
'codecov',
'coverage<5', # Otherwise, Python must be built with sqlite
],
}
def reduce_requirements(key):
# Resolve recursive requirements notation (-r)
reqs = requirements[key]
resolved_reqs = []
for req in reqs:
if req.startswith('-r'):
depend_key = req[2:].lstrip()
reduce_requirements(depend_key)
resolved_reqs += requirements[depend_key]
else:
resolved_reqs.append(req)
requirements[key] = resolved_reqs
for k in requirements.keys():
reduce_requirements(k)
extras_require = {k: v for k, v in requirements.items() if k != 'install'}
setup_requires = []
install_requires = requirements['install']
tests_require = requirements['test']
def find_any_distribution(pkgs):
for pkg in pkgs:
try:
return pkg_resources.get_distribution(pkg)
except pkg_resources.DistributionNotFound:
pass
return None
for pkg_name in ('ChainerMN', 'ONNX-Chainer'):
distribution_name = pkg_name.lower().replace('-', '_')
found_error = find_any_distribution([distribution_name])
if found_error is not None:
msg = """
We detected that {name} is installed in your environment.
{name} has been integrated to Chainer and no separate installation
is necessary. Please uninstall the old {name} in advance.
"""
print(msg.format(name=pkg_name))
exit(1)
here = os.path.abspath(os.path.dirname(__file__))
# Get __version__ variable
exec(open(os.path.join(here, 'chainer', '_version.py')).read())
setup_kwargs = dict(
name='chainer',
version=__version__, # NOQA
description='A flexible framework of neural networks',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='https://chainer.org/',
license='MIT License',
packages=['chainer',
'chainer.backends',
'chainer.dataset',
'chainer.dataset.tabular',
'chainer.datasets',
'chainer.distributions',
'chainer.exporters',
'chainer.functions',
'chainer.functions.activation',
'chainer.functions.array',
'chainer.functions.connection',
'chainer.functions.evaluation',
'chainer.functions.loss',
'chainer.functions.math',
'chainer.functions.noise',
'chainer.functions.normalization',
'chainer.functions.pooling',
'chainer.functions.rnn',
'chainer.functions.theano',
'chainer.functions.util',
'chainer.function_hooks',
'chainer.iterators',
'chainer.initializers',
'chainer.links',
'chainer.links.activation',
'chainer.links.caffe',
'chainer.links.caffe.protobuf3',
'chainer.links.connection',
'chainer.links.loss',
'chainer.links.model',
'chainer.links.model.vision',
'chainer.links.normalization',
'chainer.links.rnn',
'chainer.links.theano',
'chainer.link_hooks',
'chainer.graph_optimizations',
'chainer.optimizers',
'chainer.optimizer_hooks',
'chainer.serializers',
'chainer.testing',
'chainer.training',
'chainer.training.extensions',
'chainer.training.triggers',
'chainer.training.updaters',
'chainer.utils',
'chainermn',
'chainermn.communicators',
'chainermn.datasets',
'chainermn.extensions',
'chainermn.functions',
'chainermn.iterators',
'chainermn.links',
'chainermn.testing',
'onnx_chainer',
'onnx_chainer.functions',
'onnx_chainer.testing'],
package_data={
'chainer': ['py.typed'],
},
zip_safe=False,
setup_requires=setup_requires,
install_requires=install_requires,
tests_require=tests_require,
extras_require=extras_require,
python_requires='>=3.5.0',
)
build_chainerx = 0 != int(os.getenv('CHAINER_BUILD_CHAINERX', '0'))
if (os.getenv('READTHEDOCS', None) == 'True'
and os.getenv('READTHEDOCS_PROJECT', None) == 'chainer'):
# ChainerX must be built in order to build the docs (on Read the Docs).
build_chainerx = True
# Try to prevent Read the Docs build timeouts.
os.environ['MAKEFLAGS'] = '-j2'
chainerx_build_helper.config_setup_kwargs(setup_kwargs, build_chainerx)
setup(**setup_kwargs)
| 6,477
| 28.990741
| 78
|
py
|
chainer
|
chainer-master/chainerx_build_helper.py
|
# This script is based on pybind11's example script. See the original via the
# following URL: https://github.com/pybind/cmake_example/blob/master/setup.py
import distutils
import os
import platform
import re
import subprocess
import sys
import setuptools
from setuptools.command import build_ext
def emit_build_info(build_chainerx):
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'chainerx/_build_info.py')
with open(filename, mode='w') as f:
f.write('build_chainerx = {}\n'.format(build_chainerx))
class CMakeExtension(setuptools.Extension):
def __init__(self, name, build_targets, sourcedir=''):
setuptools.Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
self.build_targets = build_targets
class CMakeBuild(build_ext.build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError('CMake must be installed to build ChainerX')
cmake_version = distutils.version.LooseVersion(
re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError('CMake >= 3.1.0 is required to build ChainerX')
generator = os.getenv('CHAINERX_CMAKE_GENERATOR', '').lower()
if generator not in ['', 'ninja']:
raise RuntimeError("Generator %s is not supported." % generator)
self.use_ninja = generator == 'ninja'
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
# Decide the build type: release/debug
build_type = os.getenv('CHAINERX_BUILD_TYPE', None)
if build_type is not None:
# Use environment variable
pass
elif self.debug:
# Being built with `python setup.py build --debug`
build_type = 'Debug'
elif os.getenv('READTHEDOCS', None) == 'True':
# on ReadTheDocs
build_type = 'Debug'
else:
# default
build_type = 'Release'
extdir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-GNinja'] if self.use_ninja else []
cmake_args += [
'-DCHAINERX_BUILD_PYTHON=1',
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable,
'-DCHAINERX_BUILD_TEST=OFF',
'-DCMAKE_BUILD_TYPE=' + build_type,
]
build_args = ['--config', build_type]
if platform.system() == 'Windows':
cmake_args += [
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(
build_type.upper(), extdir)]
if not self.use_ninja:
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
build_args += ['--']
build_args += ext.build_targets
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''), self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(
['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp,
env=env)
subprocess.check_call(
['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
def config_setup_kwargs(setup_kwargs, build_chainerx):
# TODO(imanishi): Call this function with setuptools.
emit_build_info(build_chainerx)
if not build_chainerx:
# `chainerx` package needs to be able to be imported even if ChainerX
# is unavailable.
setup_kwargs['packages'] += ['chainerx']
return
if sys.version_info < (3, 5):
raise RuntimeError(
'ChainerX is only available for Python 3.5 or later.')
setup_kwargs['packages'] += [
'chainerx',
'chainerx._docs',
'chainerx.creation',
'chainerx.manipulation',
'chainerx.math',
'chainerx.random',
'chainerx.testing',
]
setup_kwargs['package_data'] = {
'chainerx': ['py.typed', '*.pyi'],
}
setup_kwargs.update(dict(
cmdclass={'build_ext': CMakeBuild},
ext_modules=[CMakeExtension(
name='chainerx._core',
build_targets=['_core.so'],
sourcedir='chainerx_cc')],
))
| 4,576
| 31.928058
| 78
|
py
|
chainer
|
chainer-master/chainerx/_cuda.py
|
import chainerx
try:
# _pybind_cuda is unavailable if ChainerX is built without CUDA.
from chainerx import _pybind_cuda
_available = True
except Exception:
_available = False
try:
import cupy
_cupy_available = True
except Exception:
_cupy_available = False
def cupy_share_allocator():
# Replace CuPy's allocator with ChainerX's if ChainerX is available with
# the CUDA backend. This is needed in order to share the GPU memory
# without having both modules using separate memory pools.
if not _available:
raise RuntimeError(
'Cannot share allocator with CuPy without the CUDA backend.')
if not _cupy_available:
raise RuntimeError(
'Cannot share allocator with CuPy since CuPy is not available.')
c_allocator = _pybind_cuda.get_c_allocator()
chainerx_allocator = cupy.cuda.memory.CFunctionAllocator(
*c_allocator, chainerx._global_context)
cupy.cuda.set_allocator(chainerx_allocator.malloc)
| 1,005
| 27.742857
| 76
|
py
|
chainer
|
chainer-master/chainerx/_ndarray.py
|
# This file implements chainerx.ndarray methods that can be defined only in
# Python.
import chainerx
def populate():
def clip(self, a_min, a_max):
"""Returns an array with values limited to [``a_min``, ``a_max``].
.. seealso:: :func:`chainerx.clip` for full documentation,
:meth:`numpy.ndarray.clip`
"""
return chainerx.clip(self, a_min, a_max)
chainerx.ndarray.clip = clip
| 434
| 21.894737
| 75
|
py
|
chainer
|
chainer-master/chainerx/_fallback_workarounds.py
|
# This file defines workaround implementation for
# NumPy-compatibility functions that fall back to NumPy/CuPy functions
# for native/cuda devices respecitvely.
# The workaround does not support backprop, and also requires external
# libraries mentioned above.
# Functions defined in this file should be considered to have high priority for
# genuine implementations.
import numpy
import chainerx
try:
import cupy
except Exception:
cupy = None
class _DummyContext:
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
_dummy_context = _DummyContext()
def _to_numpy(array):
assert isinstance(array, chainerx.ndarray)
return chainerx.to_numpy(array, copy=False)
def _from_numpy(array):
assert isinstance(array, numpy.ndarray)
return chainerx.array(array, copy=False)
def _to_cupy(array):
assert cupy is not None
# Convert to cupy.ndarray on the same device as source array
return chainerx._to_cupy(array)
def _from_cupy(array):
assert cupy is not None
assert isinstance(array, cupy.ndarray)
device = chainerx.get_device('cuda', array.device.id)
return chainerx._core._fromrawpointer(
array.data.mem.ptr,
array.shape,
array.dtype,
array.strides,
device,
array.data.ptr - array.data.mem.ptr,
array)
def _from_chx(array, check_backprop=True):
# Converts chainerx.ndarray to numpy/cupy.ndarray.
# Objects with other types are kept intact.
# Returns a pair: (xp, cupy device or dummy context, numpy/cupy.ndarray).
if not isinstance(array, chainerx.ndarray):
if (isinstance(array, numpy.ndarray)
or (cupy and isinstance(array, cupy.ndarray))):
raise TypeError(
'ChainerX function fallback using NumPy/CuPy arrays '
'is not supported.')
# _from_chx is also called for slice and tuple objects
# Used to index a chx array
return None, _dummy_context, array
if check_backprop and array.is_backprop_required():
raise RuntimeError(
'ChainerX function fallback using NumPy/CuPy is not '
'supported for arrays that are connected to a graph.')
backend_name = array.device.backend.name
if backend_name == 'native':
return numpy, _dummy_context, _to_numpy(array)
if backend_name == 'cuda':
if cupy is None:
raise RuntimeError(
'ChainerX fallback implementation for cuda backend requires '
'cupy to be installed.')
array_cupy = _to_cupy(array)
return cupy, array_cupy.device, array_cupy
raise RuntimeError(
'ChainerX fallback implementation only supports native or cuda '
'backends.')
def _to_chx(array):
# Converts numpy/cupy.ndarray to chainerx.ndarray.
# Objects with other types are kept intact.
if isinstance(array, numpy.ndarray):
return _from_numpy(array)
elif cupy is not None and isinstance(array, cupy.ndarray):
return _from_cupy(array)
return array
def _populate_module_functions():
def _fix(arr):
xp, dev, arr = _from_chx(arr)
with dev:
ret = xp.fix(arr)
ret = xp.asarray(ret)
return _to_chx(ret)
def _broadcast_arrays(*args):
xps, devs, arrs = zip(*(_from_chx(arr) for arr in args))
backend = xps[0]
if not all([xp is backend for xp in xps]):
raise TypeError(
'ChainerX function fallback using mixed NumPy/CuPy '
'arrays is not supported.')
bcasted = backend.broadcast_arrays(*arrs)
return [_to_chx(ret) for ret in bcasted]
def _copysign(*args):
xps, devs, arrs = zip(*(_from_chx(arr) for arr in args))
backend = xps[0]
if not all([xp is backend for xp in xps]):
raise TypeError(
'ChainerX function fallback using mixed NumPy/CuPy '
'arrays is not supported.')
with devs[0]:
y = backend.copysign(*arrs)
return _to_chx(y)
chainerx.fix = _fix
chainerx.broadcast_arrays = _broadcast_arrays
chainerx.copysign = _copysign
def _populate_ndarray():
ndarray = chainerx.ndarray
# __getitem__ with advanced indexing
old_getitem = ndarray.__getitem__
def __getitem__(arr, key):
if not isinstance(key, chainerx.ndarray):
return old_getitem(arr, key)
is_backprop_required = arr.is_backprop_required()
xp, dev, arr = _from_chx(arr, check_backprop=False)
# The elements used for indexing the array might be
# also ChainerX arrays. _from_chx ignores
# other types and return them as-is
if isinstance(key, tuple):
key = tuple([_from_chx(k, check_backprop=False)[2] for k in key])
else:
_, _, key = _from_chx(key, check_backprop=False)
with dev:
ret = arr[key]
# Doing this check after the fallback __getitem__ because the error
# which caused the fallback might not be due to advanced indexing.
# In such case the fallback __getitem__ should also raise the error.
if is_backprop_required:
raise RuntimeError(
'ChainerX getitem fallback for advanced indexing is not '
'supported for arrays that are connected to a graph.')
return _to_chx(ret)
# __setitem__ with advanced indexing
def __setitem__(self, key, value):
if self.is_backprop_required():
raise RuntimeError(
'ChainerX setitem fallback for advanced indexing is not '
'supported for arrays that are connected to a graph.')
xp, dev, self = _from_chx(self)
if isinstance(key, tuple):
key = tuple([_from_chx(k)[2] for k in key])
else:
_, _, key = _from_chx(key)
_, _, value = _from_chx(value)
with dev:
self[key] = value
ndarray.__setitem__ = __setitem__
ndarray.__getitem__ = __getitem__
def tolist(arr):
_, dev, arr = _from_chx(arr)
with dev:
ret = arr.tolist()
return ret
ndarray.tolist = tolist
def populate():
_populate_module_functions()
_populate_ndarray()
| 6,347
| 30.117647
| 79
|
py
|
chainer
|
chainer-master/chainerx/__init__.py
|
import os
import warnings
try:
from chainerx import _build_info
except ImportError:
raise ImportError(
'''\
Cannot import chainerx because _build_info.py cannot be found.
The chainer and chainerx module being imported was not correctly \
installed by `pip install`.
It may be caused by either of the following reasons.
1. You are directly importing chainer source files without installing it with \
`pip install`.
2. You installed chainer in non-editable mode (`pip install` without -e) and \
are importing chainer source files instead of the installed module.''')
if _build_info.build_chainerx:
from chainerx import _core
_available = True
else:
_available = False
if _available:
from numpy import dtype # NOQA
from numpy import ( # NOQA
Inf, Infinity, NAN, NINF, NZERO, NaN, PINF, PZERO,
e, euler_gamma,
inf, infty, nan,
newaxis,
pi)
from numpy import (
bool_, int8, int16, int32, int64, uint8, float16, float32, float64) # NOQA
all_dtypes = (
bool_, int8, int16, int32, int64, uint8, float16, float32, float64)
from chainerx._core import * # NOQA
from chainerx._core import _to_cupy # NOQA
from builtins import bool, int, float # NOQA
from chainerx import _device # NOQA
from chainerx.creation.from_data import asanyarray # NOQA
from chainerx.creation.from_data import fromfile # NOQA
from chainerx.creation.from_data import fromfunction # NOQA
from chainerx.creation.from_data import fromiter # NOQA
from chainerx.creation.from_data import fromstring # NOQA
from chainerx.creation.from_data import loadtxt # NOQA
from chainerx.math.misc import clip # NOQA
from chainerx import random # NOQA
_global_context = _core.Context()
_core.set_global_default_context(_global_context)
# Implements ndarray methods in Python
from chainerx import _ndarray
_ndarray.populate()
# Temporary workaround implementations that fall back to NumPy/CuPy's
# respective functions.
from chainerx import _fallback_workarounds
_fallback_workarounds.populate()
# Dynamically inject docstrings
from chainerx import _docs
_docs.set_docs()
from chainerx import _cuda
# Share memory pool with CuPy.
if bool(int(os.getenv('CHAINERX_CUDA_CUPY_SHARE_ALLOCATOR', '0'))):
_cuda.cupy_share_allocator()
else:
class ndarray(object):
"""Dummy class for type testing."""
def __init__(self, *args, **kwargs):
raise RuntimeError('chainerx is not available.')
def is_available():
return _available
if _available and _core._is_debug():
# Warn if the ChainerX core binary is built in debug mode
warnings.warn(
'ChainerX core binary is built in debug mode.', stacklevel=2)
| 2,837
| 28.5625
| 83
|
py
|
chainer
|
chainer-master/chainerx/_device.py
|
import chainerx
def _recover_device(backend_name, device_index):
# Recovers the device instance.
# This function is used together with chainerx.Device.__reduce__.
# TODO(niboshi): Save the context name and lookup the context with it.
context = chainerx.get_default_context()
backend = context.get_backend(backend_name)
device = backend.get_device(device_index)
return device
| 405
| 32.833333
| 74
|
py
|
chainer
|
chainer-master/chainerx/random/distributions.py
|
import numpy
import chainerx
# TODO(sonots): Implement in C++, especially in CUDA
def normal(*args, **kwargs):
"""normal(*args, **kwargs, device=None)
Draws random samples from a normal (Gaussian) distribution.
This is currently equivalent to :func:`numpy.random.normal`
wrapped by :func:`chainerx.array`, given the device argument.
.. seealso:: :func:`numpy.random.normal`
"""
device = kwargs.pop('device', None)
a = numpy.random.normal(*args, **kwargs)
return chainerx.array(a, device=device, copy=False)
# TODO(sonots): Implement in C++, especially in CUDA
def uniform(*args, **kwargs):
"""uniform(*args, **kwargs, device=None)
Draws samples from a uniform distribution.
This is currently equivalent to :func:`numpy.random.normal`
wrapped by :func:`chainerx.array`, given the device argument.
.. seealso:: :func:`numpy.random.uniform`
"""
device = kwargs.pop('device', None)
a = numpy.random.uniform(*args, **kwargs)
return chainerx.array(a, device=device, copy=False)
| 1,057
| 26.842105
| 65
|
py
|
chainer
|
chainer-master/chainerx/random/__init__.py
|
from chainerx.random.distributions import normal # NOQA
from chainerx.random.distributions import uniform # NOQA
| 115
| 37.666667
| 57
|
py
|
chainer
|
chainer-master/chainerx/testing/array.py
|
import numpy.testing
import chainerx
# NumPy-like assertion functions that accept both NumPy and ChainerX arrays
def _as_numpy(x):
if isinstance(x, chainerx.ndarray):
return chainerx.to_numpy(x)
assert isinstance(x, numpy.ndarray) or numpy.isscalar(x)
return x
def _check_dtype_and_strides(x, y, dtype_check, strides_check):
if (strides_check is not None
and dtype_check is not None
and strides_check
and not dtype_check):
raise ValueError(
'Combination of dtype_check=False and strides_check=True is not '
'allowed')
if dtype_check is None:
dtype_check = True
if strides_check is None:
strides_check = dtype_check
if (isinstance(x, (numpy.ndarray, chainerx.ndarray))
and isinstance(y, (numpy.ndarray, chainerx.ndarray))):
if strides_check:
assert x.strides == y.strides, (
'Strides mismatch: x: {}, y: {}'.format(x.strides, y.strides))
if dtype_check:
assert x.dtype.name == y.dtype.name, (
'Dtype mismatch: x: {}, y: {}'.format(x.dtype, y.dtype))
def _preprocess_input(a):
# Convert to something NumPy can handle and return
return _as_numpy(a)
def assert_allclose(
x, y, rtol=1e-7, atol=0, equal_nan=True, err_msg='', verbose=True):
"""Raises an AssertionError if two array_like objects are not equal up to a
tolerance.
Args:
x(numpy.ndarray or chainerx.ndarray): The actual object to check.
y(numpy.ndarray or chainerx.ndarray): The desired, expected object.
rtol(float): Relative tolerance.
atol(float): Absolute tolerance.
equal_nan(bool): Allow NaN values if True. Otherwise, fail the
assertion if any NaN is found.
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values
are appended to the error message.
.. seealso:: :func:`numpy.testing.assert_allclose`
"""
x = _preprocess_input(x)
y = _preprocess_input(y)
numpy.testing.assert_allclose(
x, y, rtol=rtol, atol=atol, equal_nan=equal_nan, err_msg=err_msg,
verbose=verbose)
def assert_array_equal(x, y, err_msg='', verbose=True):
"""Raises an AssertionError if two array_like objects are not equal.
Args:
x(numpy.ndarray or chainerx.ndarray): The actual object to check.
y(numpy.ndarray or chainerx.ndarray): The desired, expected object.
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values
are appended to the error message.
.. seealso:: :func:`numpy.testing.assert_array_equal`
"""
x = _preprocess_input(x)
y = _preprocess_input(y)
numpy.testing.assert_array_equal(x, y, err_msg=err_msg, verbose=verbose)
def assert_allclose_ex(x, y, rtol=1e-7, atol=0, equal_nan=True, err_msg='',
verbose=True, **kwargs):
"""assert_allclose_ex(
x, y, rtol=1e-7, atol=0, equal_nan=True, err_msg='', verbose=True,
*, dtype_check=True, strides_check=True)
Raises an AssertionError if two array_like objects are not equal up to a
tolerance.
Args:
x(numpy.ndarray or chainerx.ndarray): The actual object to check.
y(numpy.ndarray or chainerx.ndarray): The desired, expected object.
rtol(float): Relative tolerance.
atol(float): Absolute tolerance.
equal_nan(bool): Allow NaN values if True. Otherwise, fail the
assertion if any NaN is found.
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values
are appended to the error message.
dtype_check(bool): If ``True``, consistency of dtype is also checked.
Disabling ``dtype_check`` also implies ``strides_check=False``.
strides_check(bool): If ``True``, consistency of strides is also
checked.
float16_rtol(float): Relative tolerance for float16 dtype.
float16_atol(float): Absolute tolerance for float16 dtype.
float32_rtol(float): Relative tolerance for float32 dtype.
float32_atol(float): Absolute tolerance for float32 dtype.
float64_rtol(float): Relative tolerance for float64 dtype.
float64_atol(float): Absolute tolerance for float64 dtype.
.. seealso:: :func:`numpy.testing.assert_allclose`
"""
dtype_check = kwargs.pop('dtype_check', None)
strides_check = kwargs.pop('strides_check', None)
atol = kwargs.pop(x.dtype.name + '_atol', atol)
rtol = kwargs.pop(x.dtype.name + '_rtol', rtol)
assert_allclose(x, y, rtol, atol, equal_nan, err_msg, verbose)
_check_dtype_and_strides(x, y, dtype_check, strides_check)
def assert_array_equal_ex(x, y, *args, **kwargs):
"""assert_array_equal_ex(
x, y, err_msg='', verbose=True, *, dtype_check=True,
strides_check=True)
Raises an AssertionError if two array_like objects are not equal.
Args:
x(numpy.ndarray or chainerx.ndarray): The actual object to check.
y(numpy.ndarray or chainerx.ndarray): The desired, expected object.
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values
are appended to the error message.
dtype_check(bool): If ``True``, consistency of dtype is also checked.
Disabling ``dtype_check`` also implies ``strides_check=False``.
strides_check(bool): If ``True``, consistency of strides is also
checked.
.. seealso::
:func:`numpy.testing.assert_array_equal`
"""
dtype_check = kwargs.pop('dtype_check', None)
strides_check = kwargs.pop('strides_check', None)
assert_array_equal(x, y, *args, **kwargs)
_check_dtype_and_strides(x, y, dtype_check, strides_check)
| 6,059
| 39.4
| 79
|
py
|
chainer
|
chainer-master/chainerx/testing/helper.py
|
import functools
import traceback
import warnings
import numpy
import pytest
import chainerx
from chainerx.testing import array
# A test returning this object will have its return value ignored.
#
# This is e.g. useful when a combination of parametrizations and operations
# unintentionally cover non-supported function calls.
# For instance, you might parametrize over shapes (tuples) which are unpacked
# and passed to a function.
# While you might want to test empty tuples for module functions, they should
# maybe be ignored for ndarray functions.
#
# If either chainerx or numpy returns this object, the other module should too.
# Otherwise, the test will be considered inconsistent and be treated as a
# failure.
_ignored_result = object()
# A wrapper to obtain the ignore object.
def ignore():
return _ignored_result
class _ResultsCheckFailure(Exception):
def __init__(self, msg, indices, condense_results_func=None):
self.msg = msg
self.indices = tuple(indices)
if condense_results_func is None:
def condense_results_func(np_r, chx_r):
return 'chainerx: {} numpy: {}'.format(chx_r, np_r)
self.condense_results_func = condense_results_func
def condense_results(self, numpy_result, chainerx_result):
# Generates a condensed error message for a pair of lowest-level numpy
# and chainerx results.
return self.condense_results_func(numpy_result, chainerx_result)
def _call_func(impl, args, kw):
try:
result = impl(*args, **kw)
error = None
tb = None
except Exception as e:
result = None
error = e
tb = traceback.format_exc()
return result, error, tb
def _check_chainerx_numpy_error(chainerx_error, chainerx_tb, numpy_error,
numpy_tb, accept_error=()):
# TODO(sonots): Change error class names of ChainerX to be similar with
# NumPy, and check names.
if chainerx_error is None and numpy_error is None:
pytest.fail(
'Both chainerx and numpy are expected to raise errors, but not')
elif chainerx_error is None:
pytest.fail('Only numpy raises error\n\n' + numpy_tb)
elif numpy_error is None:
pytest.fail('Only chainerx raises error\n\n' + chainerx_tb)
elif not (isinstance(chainerx_error, accept_error) and
isinstance(numpy_error, accept_error)):
msg = '''Both chainerx and numpy raise exceptions
chainerx
%s
numpy
%s
''' % (chainerx_tb, numpy_tb)
pytest.fail(msg)
def _is_numpy_type(result):
return isinstance(result, (numpy.ndarray, numpy.generic))
def _check_chainerx_numpy_result_array(
check_result_func, chainerx_result, numpy_result, indices):
# Compares `chainerx_result` and `numpy_result` as arrays.
is_chainerx_valid_type = isinstance(chainerx_result, chainerx.ndarray)
is_numpy_valid_type = _is_numpy_type(numpy_result)
if not (is_chainerx_valid_type and is_numpy_valid_type):
raise _ResultsCheckFailure(
'Using decorator without returning ndarrays. '
'If you want to explicitly ignore certain tests, '
'return chainerx.testing.ignore() to avoid this error', indices)
if chainerx_result.shape != numpy_result.shape:
raise _ResultsCheckFailure(
'Shape mismatch', indices,
lambda np_r, chx_r: (
'chainerx: {}, numpy: {}'.format(chx_r.shape, np_r.shape)))
if chainerx_result.device is not chainerx.get_default_device():
raise _ResultsCheckFailure(
'ChainerX bad device', indices,
lambda np_r, chx_r: (
'default: {}, chainerx: {}'.format(
chainerx.get_default_device(), chx_r.device)))
try:
check_result_func(chainerx_result, numpy_result)
except AssertionError as e:
# Convert AssertionError to _ResultsCheckFailure
raise _ResultsCheckFailure(str(e), indices)
def _check_chainerx_numpy_result_impl(
check_result_func, chainerx_result, numpy_result, indices):
# This function raises _ResultsCheckFailure if any failure occurs.
# `indices` is a tuple of indices to reach both `chainerx_results` and
# `numpy_results` from top-level results.
if chainerx_result is _ignored_result and numpy_result is _ignored_result:
return
if isinstance(chainerx_result, (list, tuple)):
if type(chainerx_result) is not type(numpy_result):
raise _ResultsCheckFailure('Different result types', indices)
if len(chainerx_result) != len(numpy_result):
raise _ResultsCheckFailure('Result length mismatch', indices)
for i, (chx_r, np_r) in enumerate(zip(chainerx_result, numpy_result)):
_check_chainerx_numpy_result_impl(
check_result_func, chx_r, np_r, indices + (i,))
elif isinstance(chainerx_result, chainerx.ndarray):
_check_chainerx_numpy_result_array(
check_result_func, chainerx_result, numpy_result, indices)
else:
if _is_numpy_type(chainerx_result):
raise _ResultsCheckFailure(
'chainerx result should not be a NumPy type', indices)
if type(chainerx_result) != type(numpy_result):
raise _ResultsCheckFailure('Type mismatch', indices)
if chainerx_result != numpy_result:
raise _ResultsCheckFailure('Not equal', indices)
def _check_chainerx_numpy_result(
check_result_func, chainerx_result, numpy_result):
# Catch _ResultsCheckFailure and generate a comprehensible error message.
try:
_check_chainerx_numpy_result_impl(
check_result_func, chainerx_result, numpy_result, indices=())
except _ResultsCheckFailure as e:
indices = e.indices
chx_r = chainerx_result
np_r = numpy_result
i = 0
while indices[i:]:
chx_r = chx_r[indices[i]]
np_r = np_r[indices[i]]
i += 1
def make_message(e):
indices_str = ''.join('[{}]'.format(i) for i in indices)
s = '{}: {}\n\n'.format(e.msg, e.condense_results(np_r, chx_r))
if indices:
s += 'chainerx results{}: {}\n'.format(
indices_str, type(chx_r))
s += '{}\n\n'.format(chx_r)
s += 'numpy results{}: {}\n'.format(indices_str, type(np_r))
s += '{}\n\n'.format(np_r)
s += 'chainerx results: {}\n'.format(type(chainerx_result))
s += '{}\n\n'.format(chainerx_result)
s += 'numpy results: {}\n'.format(type(numpy_result))
s += '{}\n\n'.format(numpy_result)
return s
raise AssertionError(make_message(e))
def _make_decorator(check_result_func, name, accept_error):
def decorator(impl):
@functools.wraps(impl)
def test_func(*args, **kw):
kw[name] = chainerx
chainerx_result, chainerx_error, chainerx_tb = _call_func(
impl, args, kw)
kw[name] = numpy
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
numpy_result, numpy_error, numpy_tb = _call_func(
impl, args, kw)
if chainerx_error or numpy_error:
_check_chainerx_numpy_error(chainerx_error, chainerx_tb,
numpy_error, numpy_tb,
accept_error=accept_error)
return
assert chainerx_result is not None and numpy_result is not None, (
'Either or both of ChainerX and numpy returned None. '
'chainerx: {}, numpy: {}'.format(
chainerx_result, numpy_result))
_check_chainerx_numpy_result(
check_result_func, chainerx_result, numpy_result)
# Apply dummy parametrization on `name` (e.g. 'xp') to avoid pytest
# error when collecting test functions.
return pytest.mark.parametrize(name, [None])(test_func)
return decorator
def numpy_chainerx_allclose(**kwargs):
"""numpy_chainerx_allclose(
*, rtol=1e-7, atol=0, equal_nan=True, err_msg='', verbose=True,
name='xp', dtype_check=True, strides_check=True, accept_error=())
Decorator that checks that NumPy and ChainerX results are equal up to a
tolerance.
Args:
rtol(float): Relative tolerance.
atol(float): Absolute tolerance.
equal_nan(bool): Allow NaN values if True. Otherwise, fail the
assertion if any NaN is found.
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values are
appended to the error message.
name(str): Argument name whose value is either ``numpy`` or
``chainerx`` module.
dtype_check(bool): If ``True``, consistency of dtype is also checked.
Disabling ``dtype_check`` also implies ``strides_check=False``.
strides_check(bool): If ``True``, consistency of strides is also
checked.
accept_error(Exception or tuple of Exception): Specify
acceptable errors. When both NumPy test and ChainerX test raises
the same type of errors, and the type of the errors is specified
with this argument, the errors are ignored and not raised.
float16_rtol(float): Relative tolerance for float16 dtype.
float16_atol(float): Absolute tolerance for float16 dtype.
float32_rtol(float): Relative tolerance for float32 dtype.
float32_atol(float): Absolute tolerance for float32 dtype.
float64_rtol(float): Relative tolerance for float64 dtype.
float64_atol(float): Absolute tolerance for float64 dtype.
Decorated test fixture is required to return the same arrays
in the sense of :func:`numpy_chainerx_allclose`
(except the type of array module) even if ``xp`` is ``numpy`` or
``chainerx``.
.. seealso:: :func:`chainerx.testing.assert_allclose_ex`
""" # NOQA
rtol = kwargs.pop('rtol', 1e-7)
atol = kwargs.pop('atol', 0)
equal_nan = kwargs.pop('equal_nan', True)
err_msg = kwargs.pop('err_msg', '')
verbose = kwargs.pop('verbose', True)
name = kwargs.pop('name', 'xp')
dtype_check = kwargs.pop('dtype_check', None)
strides_check = kwargs.pop('strides_check', None)
accept_error = kwargs.pop('accept_error', ())
tol_kwargs = {k: kwargs[k] for k in
['float16_rtol', 'float32_rtol', 'float64_rtol',
'float16_atol', 'float32_atol', 'float64_atol']
if k in kwargs}
def check_result_func(x, y):
array.assert_allclose_ex(
x, y, rtol, atol, equal_nan, err_msg, verbose,
dtype_check=dtype_check, strides_check=strides_check, **tol_kwargs)
return _make_decorator(check_result_func, name, accept_error)
def numpy_chainerx_array_equal(**kwargs):
"""numpy_chainerx_array_equal(
*, err_msg='', verbose=True, name='xp', dtype_check=True,
strides_check=True, accept_error=()):
Decorator that checks that NumPy and ChainerX results are equal.
Args:
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values are
appended to the error message.
name(str): Argument name whose value is either ``numpy`` or
``chainerx`` module.
dtype_check(bool): If ``True``, consistency of dtype is also checked.
Disabling ``dtype_check`` also implies ``strides_check=False``
strides_check(bool): If ``True``, consistency of strides is also
checked.
accept_error(Exception or tuple of Exception): Specify
acceptable errors. When both NumPy test and ChainerX test raises
the same type of errors, and the type of the errors is specified
with this argument, the errors are ignored and not raised.
Decorated test fixture is required to return the same arrays
in the sense of :func:`numpy_chainerx_array_equal`
(except the type of array module) even if ``xp`` is ``numpy`` or
``chainerx``.
.. seealso:: :func:`chainerx.testing.assert_array_equal_ex`
"""
err_msg = kwargs.pop('err_msg', '')
verbose = kwargs.pop('verbose', True)
name = kwargs.pop('name', 'xp')
dtype_check = kwargs.pop('dtype_check', None)
strides_check = kwargs.pop('strides_check', None)
accept_error = kwargs.pop('accept_error', ())
def check_result_func(x, y):
array.assert_array_equal_ex(
x, y, err_msg, verbose, dtype_check=dtype_check,
strides_check=strides_check)
return _make_decorator(check_result_func, name, accept_error)
| 12,987
| 39.461059
| 79
|
py
|
chainer
|
chainer-master/chainerx/testing/__init__.py
|
import pytest
pytest.register_assert_rewrite('chainerx.testing.array')
pytest.register_assert_rewrite('chainerx.testing.helper')
from chainerx._testing import _DeviceBuffer # NOQA
from chainerx._testing import _fromnumpy # NOQA
from chainerx.testing import array # NOQA
from chainerx.testing import helper # NOQA
from chainerx.testing.array import assert_allclose # NOQA
from chainerx.testing.array import assert_allclose_ex # NOQA
from chainerx.testing.array import assert_array_equal # NOQA
from chainerx.testing.array import assert_array_equal_ex # NOQA
from chainerx.testing.dtypes import all_dtypes # NOQA
from chainerx.testing.dtypes import float_dtypes # NOQA
from chainerx.testing.dtypes import integral_dtypes # NOQA
from chainerx.testing.dtypes import signed_integral_dtypes # NOQA
from chainerx.testing.dtypes import nonfloat_dtypes # NOQA
from chainerx.testing.dtypes import numeric_dtypes # NOQA
from chainerx.testing.dtypes import parametrize_dtype_specifier # NOQA
from chainerx.testing.dtypes import signed_dtypes # NOQA
from chainerx.testing.dtypes import unsigned_dtypes # NOQA
from chainerx.testing.helper import ignore # NOQA
from chainerx.testing.helper import numpy_chainerx_allclose # NOQA
from chainerx.testing.helper import numpy_chainerx_array_equal # NOQA
| 1,307
| 45.714286
| 71
|
py
|
chainer
|
chainer-master/chainerx/testing/dtypes.py
|
import numpy
import pytest
import chainerx
float_dtypes = (
'float16',
'float32',
'float64',
)
signed_dtypes = (
'int8',
'int16',
'int32',
'int64',
'float16',
'float32',
'float64',
)
unsigned_dtypes = (
'uint8',
)
integral_dtypes = (
'uint8',
'int8',
'int16',
'int32',
'int64',
)
signed_integral_dtypes = (
'int8',
'int16',
'int32',
'int64',
)
nonfloat_dtypes = (
'bool_',
'int8',
'int16',
'int32',
'int64',
'uint8',
)
numeric_dtypes = signed_dtypes + unsigned_dtypes
all_dtypes = ('bool_',) + numeric_dtypes
def parametrize_dtype_specifier(argname, dtypes=None, additional_args=None):
"""Parametrizes a test with various arguments that can be used as dtypes.
Args:
argname(str): Argument name to pass the value that can be used as a
dtype.
dtypes(list of strs): List of dtype names.
additional_args(tuple of list): Additional values to be included.
"""
if dtypes is None:
dtypes = all_dtypes
assert isinstance(argname, str)
assert isinstance(dtypes, (tuple, list))
assert all(isinstance(dt, str) for dt in dtypes)
lst = []
# dtype names
lst += list(dtypes)
# numpy dtypes
lst += [numpy.dtype(dt) for dt in dtypes]
# char codes
lst += [chainerx.dtype(dt).char for dt in dtypes]
# User-specified args
if additional_args is not None:
assert isinstance(additional_args, (tuple, list))
lst += list(additional_args)
return pytest.mark.parametrize(argname, lst)
| 1,613
| 16.933333
| 77
|
py
|
chainer
|
chainer-master/chainerx/manipulation/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainerx/_docs/routines.py
|
import chainerx
from chainerx import _docs
def set_docs():
_docs_creation()
_docs_evaluation()
_docs_indexing()
_docs_linalg()
_docs_logic()
_docs_loss()
_docs_manipulation()
_docs_math()
_docs_sorting()
_docs_statistics()
_docs_connection()
_docs_normalization()
_docs_pooling()
_docs_rnn()
def _docs_creation():
_docs.set_doc(
chainerx.empty,
"""empty(shape, dtype, device=None)
Returns an array without initializing the elements.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type of the array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
:class:`~chainerx.ndarray`: New array with elements not initialized.
.. seealso:: :func:`numpy.empty`
""")
_docs.set_doc(
chainerx.empty_like,
"""empty_like(a, device=None)
Returns a new array with same shape and dtype of a given array.
Args:
a (~chainerx.ndarray): Prototype array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
:class:`~chainerx.ndarray`: New array with same shape and dtype as ``a`` \
with elements not initialized.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.empty_like`
""")
_docs.set_doc(
chainerx.eye,
"""eye(N, M=None, k=0, dtype=float64, device=None)
Returns a 2-D array with ones on the diagonals and zeros elsewhere.
Args:
N (int): Number of rows.
M (int): Number of columns. M == N by default.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D array with given diagonals filled with ones and
zeros elsewhere.
.. seealso:: :func:`numpy.eye`
""")
_docs.set_doc(
chainerx.tri,
"""tri(N, M=None, k=0, dtype=float32, device=None)
Returns a 2-D array with ones at and below the given diagonal
and zeros elsewhere.
Args:
N (int): Number of rows.
M (int): Number of columns. M == N by default.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D array with given diagonals filled ones at and
below the given diagonal and zeros elsewhere.
.. seealso:: :func:`numpy.tri`
""")
_docs.set_doc(
chainerx.tril,
"""tril(m, k=0)
Lower triangle of an array.
Returns a copy of an array with elements above the k-th diagonal zeroed.
Args:
m (~chainerx.ndarray): Input array.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
Returns:
~chainerx.ndarray: Lower triangle of ``m``.
.. seealso:: :func:`numpy.tril`
""")
_docs.set_doc(
chainerx.triu,
"""triu(m, k=0)
Upper triangle of an array.
Returns a copy of an array with elements below the k-th diagonal zeroed.
Args:
m (~chainerx.ndarray): Input array.
k (int): Index of the diagonal. Zero indicates the main diagonal,
a positive index an upper diagonal, and a negative index a lower
diagonal.
Returns:
~chainerx.ndarray: Upper triangle of ``m``.
.. seealso:: :func:`numpy.triu`
""")
_docs.set_doc(
chainerx.identity,
"""identity(n, dtype=None, device=None)
Returns a 2-D identity array.
It is equivalent to ``eye(n, n, dtype)``.
Args:
n (int): Number of rows and columns.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D identity array.
.. seealso:: :func:`numpy.identity`
""")
_docs.set_doc(
chainerx.ones,
"""ones(shape, dtype, device=None)
Returns a new array of given shape and dtype, filled with ones.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
.. seealso:: :func:`numpy.ones`
""")
_docs.set_doc(
chainerx.ones_like,
"""ones_like(a, device=None)
Returns an array of ones with same shape and dtype as a given array.
Args:
a (~chainerx.ndarray): Prototype array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.ones_like`
""")
_docs.set_doc(
chainerx.zeros,
"""zeros(shape, dtype, device=None)
Returns a new array of given shape and dtype, filled with zeros.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
.. seealso:: :func:`numpy.zeros`
""")
_docs.set_doc(
chainerx.zeros_like,
"""zeros_like(a, device=None)
Returns an array of zeros with same shape and dtype as a given array.
Args:
a (~chainerx.ndarray): Prototype array.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.zeros_like`
""")
_docs.set_doc(
chainerx.full,
"""full(shape, fill_value, dtype, device=None)
Returns a new array of given shape and dtype, filled with a given value.
Args:
shape (tuple of ints): Shape of the array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
.. seealso:: :func:`numpy.full`
""")
_docs.set_doc(
chainerx.full_like,
"""full_like(a, fill_value, dtype=None, device=None)
Returns a full array with same shape and dtype as a given array.
Args:
a (~chainerx.ndarray): Prototype array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the prototype array.
.. seealso:: :func:`numpy.full_like`
""")
_docs.set_doc(
chainerx.array,
"""array(object, dtype=None, copy=True, device=None)
Creates an array.
Args:
object: A :class:`~chainerx.ndarray` object or any other object that can be
passed to :func:`numpy.array`.
dtype: Data type. If omitted, it's inferred from the input.
copy (bool): If ``True``, the object is always copied. Otherwise, a copy
will only be made if it is needed to satisfy any of the other
requirements (dtype, device, etc.).
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: New array.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the input array.
.. seealso:: :func:`numpy.array`
""")
_docs.set_doc(
chainerx.asarray,
"""asarray(a, dtype=None, device=None)
Converts an object to an array.
Args:
a: The source object.
dtype: Data type. If omitted, it's inferred from the input.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: Array interpretation of ``a``. If ``a`` is already an \
ndarray on the given device with matching dtype, no copy is performed.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the input array.
.. seealso:: :func:`numpy.asarray`
""")
_docs.set_doc(
chainerx.ascontiguousarray,
"""ascontiguousarray(a, dtype=None, device=None)
Returns a C-contiguous array.
Args:
a (~chainerx.ndarray): Source array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: C-contiguous array. A copy will be made only if needed.
Warning:
If ``device`` argument is omitted, the new array is created on the default
device, not the device of the input array.
.. seealso:: :func:`numpy.ascontiguousarray`
""")
_docs.set_doc(
chainerx.copy,
"""copy(a)
Creates a copy of a given array.
Args:
a (~chainerx.ndarray): Source array.
Returns:
~chainerx.ndarray: A copy array on the same device as ``a``.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.copy`
""")
_docs.set_doc(
chainerx.frombuffer,
"""frombuffer(buffer, dtype=float, count=-1, offset=0, device=None)
Returns a 1-D array interpretation of a buffer.
The given ``buffer`` memory must be usable on the given device, otherwise,
an error is raised.
Note:
The ``native`` backend requires a buffer of main memory, and
the ``cuda`` backend requires a buffer of CUDA memory.
No copy is performed.
Args:
buffer: An object that exposes the buffer interface.
dtype: Data type of the returned array.
count (int): Number of items to read. -1 means all data in the buffer.
offset (int): Start reading the buffer from this offset (in bytes).
device (~chainerx.Device): Device of the returned array.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: 1-D array interpretation of ``buffer``.
.. seealso:: :func:`numpy.frombuffer`
""")
_docs.set_doc(
chainerx.arange,
"""arange([start=0, ]stop, [step=1, ]dtype=None, device=None)
Returns an array with evenly spaced values within a given interval.
Values are generated within the half-open interval [``start``, ``stop``).
The first three arguments are mapped like the ``range`` built-in function,
i.e. ``start`` and ``step`` are optional.
Args:
start: Start of the interval.
stop: End of the interval.
step: Step width between each pair of consecutive values.
dtype: Data type specifier. It is inferred from other arguments by
default.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: The 1-D array of range values.
.. seealso:: :func:`numpy.arange`
""")
_docs.set_doc(
chainerx.linspace,
"""linspace(start, stop, num=50, endpoint=True, dtype=None, device=None)
Returns an array with evenly spaced numbers over a specified interval.
Instead of specifying the step width like :func:`chainerx.arange()`,
this function requires the total number of elements specified.
Args:
start: Start of the interval.
stop: End of the interval.
num: Number of elements.
endpoint (bool): If ``True``, the stop value is included as the last
element. Otherwise, the stop value is omitted.
dtype: Data type specifier. It is inferred from the start and stop
arguments by default.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: The 1-D array of ranged values.
.. seealso:: :func:`numpy.linspace`
""") # NOQA
_docs.set_doc(
chainerx.diag,
"""diag(v, k=0, device=None)
Returns a diagonal or a diagonal array.
Args:
v (~chainerx.ndarray): Array object.
k (int): Index of diagonals. Zero indicates the main diagonal, a
positive value an upper diagonal, and a negative value a lower
diagonal.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: If ``v`` is a 1-D array, then it returns a 2-D
array with the specified diagonal filled by ``v``. If ``v`` is a
2-D array, then it returns the specified diagonal of ``v``. In latter
case, if ``v`` is a :class:`chainerx.ndarray` object, then its view is
returned.
Note:
The argument ``v`` does not support array-like objects yet.
.. seealso:: :func:`numpy.diag`
""")
_docs.set_doc(
chainerx.diagflat,
"""diagflat(v, k=0, device=None)
Creates a diagonal array from the flattened input.
Args:
v (~chainerx.ndarray): Array object.
k (int): Index of diagonals. See :func:`chainerx.diag`.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
Returns:
~chainerx.ndarray: A 2-D diagonal array with the diagonal copied
from ``v``.
Note:
The argument ``v`` does not support array-like objects yet.
.. seealso:: :func:`numpy.diagflat`
""")
_docs.set_doc(
chainerx.meshgrid,
"""meshgrid(xi, indexing='xy')
Returns coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of N-D scalar/vector
fields over N-D grids, given one-dimensional coordinate arrays x1, x2,…, xn.
Args:
xi (sequence of :class:`~chainerx.ndarray`\\ s): 1-D arrays
representing the coordinates of a grid.
indexing (str): {‘xy’, ‘ij’}, optional
Cartesian (‘xy’, default) or matrix (‘ij’) indexing of output.
Returns:
list of :class:`~chainerx.ndarray`\\ s: For vectors x1, x2,…, ‘xn’ with
lengths Ni=len(xi), return (N1, N2, N3,...Nn) shaped arrays if
indexing=’ij’ or (N2, N1, N3,...Nn) shaped arrays if indexing=’xy’
with the elements of xi repeated to fill the matrix along the first
dimension for x1, the second for x2 and so on.
.. seealso:: :func:`numpy.meshgrid`
""")
def _docs_evaluation():
_docs.set_doc(
chainerx.accuracy,
"""accuracy(y, t, ignore_label=None)
Computes multiclass classification accuracy of the minibatch.
Args:
y (~chainerx.ndarray):
Array whose (i, j, k, ...)-th element indicates the score of
the class j at the (i, k, ...)-th sample.
The prediction label :math:`\\hat t` is calculated by the formula
:math:`\\hat t(i, k, ...) = \\operatorname{\\mathrm{argmax}}_j \
y(i, j, k, ...)`.
t (~chainerx.ndarray):
Array of ground truth labels.
ignore_label (int or None): Skip calculating accuracy
if the true label is ``ignore_label``.
Returns:
:func:`~chainerx.ndarray`: A variable holding a scalar \
array of the accuracy.
Note:
This function is non-differentiable.
.. seealso:: :func:`chainer.functions.accuracy`
.. admonition:: Example
We show the most common case, when ``y`` is the two dimensional array.
>>> y = chainerx.array([[0.1, 0.7, 0.2], # prediction label is 1
... [8.0, 1.0, 2.0], # prediction label is 0
... [-8.0, 1.0, 2.0], # prediction label is 2
... [-8.0, -1.0, -2.0]]) # prediction label is 1
>>> t = chainerx.array([1, 0, 2, 1], chainerx.int32)
>>> chainerx.accuracy(y, t) \
# 100% accuracy because all samples are correct
array(1., shape=(), dtype=float64, device='native:0')
>>> t = chainerx.array([1, 0, 0, 0], chainerx.int32)
>>> chainerx.accuracy(y, t) \
# 50% accuracy because 1st and 2nd samples are correct
array(0.5, shape=(), dtype=float64, device='native:0')
>>> chainerx.accuracy(y, t, ignore_label=0) \
# 100% accuracy because of ignoring the 2nd, 3rd and 4th samples.
array(1., shape=(), dtype=float64, device='native:0')
""")
def _docs_indexing():
_docs.set_doc(
chainerx.take,
"""take(a, indices, axis)
Takes elements from an array along an axis.
Args:
a (~chainerx.ndarray): Source array.
indices (~chainerx.ndarray):
The indices of the values to extract. When indices are out of bounds,
they are wrapped around.
axis (int): The axis over which to select values.
mode (str): Specifies how out-of-bounds indices will behave.
'raise' - raise an error
'wrap' - wrap around
'clip' - clip to the range
Returns:
:func:`~chainerx.ndarray`: Output array.
Note:
This function currently does not support ``axis=None``
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
Note:
The default mode for the native backend is 'raise', while for the cuda
backend is 'wrap' in order to prevent device synchronization.
'raise' mode is currently not supported in the CUDA backend.
.. seealso:: :func:`numpy.take`
""")
_docs.set_doc(
chainerx.where,
"""where(condition, x, y)
Return elements chosen from ``x`` or ``y`` depending on condition.
Args:
condition (~chainerx.ndarray): Where True, yield ``x``, otherwise
yield ``y``.
x (~chainerx.ndarray): Values from which to choose.
y (~chainerx.ndarray): Values from which to choose.
Returns:
:func:`~chainerx.ndarray`: An array with elements
from ``x`` where condition is True, and elements from ``y`` elsewhere.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x`` and ``y``.
.. seealso:: :func:`numpy.where`
""")
_docs.set_doc(
chainerx.nonzero,
"""nonzero(a)
Return the indices of the elements that are non-zero.
Args:
a (~chainerx.ndarray): Input array.
Returns:
tuple of :func:`~chainerx.ndarray`: Indices of elements that are non-zero.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :func:`numpy.nonzero`
""")
def _docs_linalg():
_docs.set_doc(
chainerx.dot,
"""dot(a, b)
Returns a dot product of two arrays.
For arrays with more than one axis, it computes the dot product along the last
axis of ``a`` and the second-to-last axis of ``b``. This is just a matrix
product if the both arrays are 2-D. For 1-D arrays, it uses their unique axis
as an axis to take dot product over.
Args:
a (~chainerx.ndarray): The left argument.
b (~chainerx.ndarray): The right argument.
Returns:
:class:`~chainerx.ndarray`: Output array.
Note:
This function currently does not support N > 2 dimensional arrays.
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``a`` and ``b``.
.. seealso:: :func:`numpy.dot`
""")
_docs.set_doc(
chainerx.linalg.solve,
"""solve(a, b)
Solves a linear matrix equation, or system of linear scalar equations.
It computes the exact solution of ``x`` in ``ax = b``,
where ``a`` is a square and full rank matrix,
``b`` can be a vector, or a rectangular matrix.
When ``b`` is matrix, its columns are treated as separate vectors
representing multiple right-hand sides.
Args:
a (~chainerx.ndarray): Coefficient matrix.
b (~chainerx.ndarray): "dependent variable" values.
Returns:
:class:`~chainerx.ndarray`:
Solution to the system ``ax = b``.
Shape is identical to ``b``.
Note:
The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.solve`
""")
_docs.set_doc(
chainerx.linalg.inv,
"""inv(a)
Computes the inverse of a matrix.
This function computes matrix ``a_inv`` from square matrix
``a`` such that ``dot(a, a_inv) = dot(a_inv, a) = eye(a.shape[0])``.
Args:
a (~chainerx.ndarray): The matrix to be inverted.
Returns:
:class:`~chainerx.ndarray`: The inverse of a matrix.
Note:
The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.inv`
""")
_docs.set_doc(
chainerx.linalg.svd,
"""svd(a, full_matrices=True, compute_uv=True)
Singular Value Decomposition.
Factorizes the matrix ``a`` into two unitary matrices ``U`` and ``Vt``, and
a 1-D array ``s`` of singular values such that
``a == U * S * Vt``, where ``S`` is a suitably shaped matrix of zeros with
main diagonal ``s`` and ``*`` represents a dot product.
Args:
a (~chainerx.ndarray): The input matrix with dimension ``(M, N)``.
full_matrices (bool): If True, it returns u and v with dimensions
``(M, M)`` and ``(N, N)``. Otherwise, the dimensions of u and v
are respectively ``(M, K)`` and ``(K, N)``, where
``K = min(M, N)``.
compute_uv (bool): If False, only singular values are computed.
Returns:
tuple of :class:`chainerx.ndarray`:
A tuple of ``(U, s, Vt)`` such that ``a = U * diag(s) * Vt``.
When ``compute_uv`` is False only singular values ``s`` are returned.
Note:
* The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
* The SVD is commonly written as `a = U * diag(s) * V^T`.
The ``Vt`` returned by this function is `V^T`.
* During backpropagation, this function requires ``U`` and ``Vt`` computed,
therefore differentiation does not work for ``compute_uv=False``.
* Backpropagation is not implemented for ``full_matrices=True``.
.. seealso:: :func:`numpy.linalg.svd`
""")
_docs.set_doc(
chainerx.linalg.pinv,
"""pinv(a, rcond=1e-15)
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its singular-value
decomposition (SVD) and including all large singular values.
Args:
a (~chainerx.ndarray): The input matrix to be pseudo-inverted.
rcond (float): Cutoff for small singular values.
Returns:
:class:`~chainerx.ndarray`: The pseudo-inverse of ``a``.
Note:
The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.pinv`
""")
_docs.set_doc(
chainerx.linalg.qr,
"""qr(a, mode='reduced')
Compute the qr factorization of a matrix.
Factor the matrix ``a`` as *qr*, where ``q`` is orthonormal and ``r`` is
upper-triangular.
Args:
a (~chainerx.ndarray): Matrix to be factored.
mode (str): The mode of decomposition.
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,),
where ``(M, N)`` is the shape of the input matrix and ``K = min(M, N)``
Returns:
q (~chainerx.ndarray): A matrix with orthonormal columns.
r (~chainerx.ndarray): The upper-triangular matrix.
Note:
* The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
* Backpropagation is not implemented for non-square output matrix ``r``.
* Backpropagation is not implemented for 'r' or 'raw' modes.
.. seealso:: :func:`numpy.linalg.qr`
""")
_docs.set_doc(
chainerx.linalg.cholesky,
"""cholesky(a)
Computes the Cholesky decomposition of a matrix.
Returns the Cholesky decomposition, :math:`A = L L^T`,
for the square matrix ``a``.
Args:
a (~chainerx.ndarray): Symmetric positive-definite input matrix.
Returns:
:class:`~chainerx.ndarray`: Output array. Cholesky factor of ``a``.
Note:
The forward computation does not necessarily check if the input matrix is
symmetric (e.g. the native backend relying on LAPACK does not). However,
both the forward and the backward computations assume that it is and their
results are unspecified otherwise. The computed gradient is always a
symmetric matrix. More specifically, the gradient is computed as if the
function is restricted to a Riemannian submanifold of
:math:`R^{n \\times n}` consisting just of positive-definite symmetric
matrices and is faithful to the mathematical definition of the Cholesky
decomposition.
Note:
* GPU implementation of the Cholesky decomposition routine is based on
cuSOLVER library. Older versions (<10.1) of it might not raise an error
for some non positive-definite matrices.
* The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.cholesky`
""")
_docs.set_doc(
chainerx.linalg.eigh,
"""eigh(a, UPLO='L')
Compute the eigenvalues and eigenvectors of a real symmetric matrix.
Args:
a (~chainerx.ndarray): Real symmetric matrix whose eigenvalues
and eigenvectors are to be computed.
UPLO (str): Specifies whether the calculation is done with the lower
triangular part of a ('L', default) or the upper triangular part ('U').
Returns:
tuple of :class:`~chainerx.ndarray`:
Returns a tuple ``(w, v)``. ``w`` contains eigenvalues and
``v`` contains eigenvectors. ``v[:, i]`` is an eigenvector
corresponding to an eigenvalue ``w[i]``.
Note:
Although ``UPLO`` can be specified to ignore either the strictly lower or
upper part of the input matrix, the backward computation assumes that the
inputs is symmetric and the computed gradient is always a symmetric matrix
with respect to ``UPLO``. More specifically, the gradient is computed as if
the function is restricted to a Riemannian submanifold of
:math:`R^{n \\times n}` consisting just of symmetric matrices and is
faithful to the mathematical definition of the eigenvalue decomposition of
symmetric matrices.
Note:
The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
.. seealso:: :func:`numpy.linalg.eigh`
""")
_docs.set_doc(
chainerx.linalg.eigvalsh,
"""eigvalsh(a, UPLO='L')
Compute the eigenvalues of a real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Args:
a (~chainerx.ndarray): Real symmetric matrix whose eigenvalues
and eigenvectors are to be computed.
UPLO (str): Specifies whether the calculation is done with the lower
triangular part of a (‘L’, default) or the upper triangular part (‘U’).
(optional).
Returns:
:class:`~chainerx.ndarray`: Returns eigenvalues as a vector.
Note:
* The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
* Backpropagation requires eigenvectors and, therefore, is not implemented
for this function. ``linalg.eigh`` should be used instead.
.. seealso:: :func:`numpy.linalg.eigvalsh`
""")
def _docs_logic():
_docs.set_doc(
chainerx.all,
"""all(x)
Test whether all array elements along a given axis evaluate to True.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which AND reduction is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.all`
""")
_docs.set_doc(
chainerx.any,
"""any(x)
Test whether any array element along a given axis evaluate to True.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which OR reduction is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.any`
""")
_docs.set_doc(
chainerx.logical_not,
"""logical_not(x)
Returns an array of NOT x element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_not`
""")
_docs.set_doc(
chainerx.logical_and,
"""logical_and(x1, x2)
Returns an array of x1 AND x2 element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_and`
""")
_docs.set_doc(
chainerx.logical_or,
"""logical_or(x1, x2)
Returns an array of x1 OR x2 element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_or`
""")
_docs.set_doc(
chainerx.logical_xor,
"""logical_xor(x1, x2)
Returns an array of x1 XOR x2 element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.logical_xor`
""")
_docs.set_doc(
chainerx.greater,
"""greater(x1, x2)
Returns an array of (x1 > x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.greater`
""")
_docs.set_doc(
chainerx.greater_equal,
"""greater_equal(x1, x2)
Returns an array of (x1 >= x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.greater_equal`
""")
_docs.set_doc(
chainerx.less,
"""less(x1, x2)
Returns an array of (x1 < x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.less`
""")
_docs.set_doc(
chainerx.less_equal,
"""less_equal(x1, x2)
Returns an array of (x1 <= x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.less_equal`
""")
_docs.set_doc(
chainerx.equal,
"""equal(x1, x2)
Returns an array of (x1 == x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.equal`
""")
_docs.set_doc(
chainerx.not_equal,
"""not_equal(x1, x2)
Returns an array of (x1 != x2) element-wise.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Output array of type bool.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.not_equal`
""")
def _docs_loss():
_docs.set_doc(
chainerx.absolute_error,
"""Element-wise absolute error function.
Computes the element-wise absolute error :math:`L` between two inputs
:math:`x_1` and :math:`x_2` defined as follows.
.. math::
L = |x_1 - x_2|
Args:
x1 (~chainerx.ndarray): Input variable.
x2 (~chainerx.ndarray): Input variable.
Returns:
:class:`~chainerx.ndarray`: A variable holding an array representing
the absolute error of two inputs.
.. seealso:: :func:`chainer.functions.absolute_error`
""")
_docs.set_doc(
chainerx.squared_error,
"""Element-wise squared error function.
Computes the element-wise squared error :math:`L` between two inputs
:math:`x_1` and :math:`x_2` defined as follows.
.. math::
L = (x_1 - x_2)^2
Can be used to compute mean squared error by just calling `mean()`
on the output array.
Args:
x0 (~chainerx.ndarray): Input variable.
x1 (~chainerx.ndarray): Input variable.
Returns:
:class:`~chainerx.ndarray`: A variable holding an array representing
the squared error of two inputs.
.. seealso:: :func:`chainer.functions.squared_error`
""")
_docs.set_doc(
chainerx.huber_loss,
"""Element-wise Huber loss.
The Huber loss is similar to the squared error but is less sensitive to
outliers in the data. It is defined as
.. math::
L_{\\delta}(a) = \\left \\{ \\begin{array}{cc}
\\frac{1}{2} a^2 & {\\rm if~|a| \\leq \\delta} \\\\
\\delta (|a| - \\frac{1}{2} \\delta) & {\\rm otherwise,}
\\end{array} \\right.
where :math:`a = x - t` is the difference between the input :math:`x`
and the target :math:`t`.
See: `Huber loss - Wikipedia <https://en.wikipedia.org/wiki/Huber_loss>`_.
Args:
x (~chainerx.ndarray): Input variable.
t (~chainerx.ndarray): Target variable for regression.
delta (float): Constant variable for Huber loss function as used in
definition.
Returns:
:class:`~chainerx.ndarray`:
A variable object holding an array representing the Huber loss
:math:`L_{\\delta}` of the two inputs.
.. seealso:: :func:`chainer.functions.huber_loss`
""")
_docs.set_doc(
chainerx.gaussian_kl_divergence,
"""Element-wise KL-divergence of Gaussian variables from the standard one.
Given two variable ``mean`` representing :math:`\\mu` and ``ln_var``
representing :math:`\\log(\\sigma^2)`, this function calculates
the element-wise KL-divergence between the given multi-dimensional
Gaussian :math:`N(\\mu, S)` and the standard Gaussian :math:`N(0, I)`
.. math::
D_{\\mathbf{KL}}(N(\\mu, S) \\| N(0, I)),
where :math:`S` is a diagonal matrix such that :math:`S_{ii} = \\sigma_i^2`
and :math:`I` is an identity matrix.
Args:
mean (~chainerx.ndarray):
A variable representing mean of given
gaussian distribution, :math:`\\mu`.
ln_var (~chainerx.ndarray):
A variable representing logarithm of
variance of given gaussian distribution, :math:`\\log(\\sigma^2)`.
Returns:
:class:`~chainerx.ndarray`:
A variable representing KL-divergence between
given gaussian distribution and the standard gaussian.
.. seealso:: :func:`chainer.functions.gaussian_kl_divergence`
""")
_docs.set_doc(
chainerx.sigmoid_cross_entropy,
"""sigmoid_cross_entropy(x1, x2)
Element-wise cross entropy loss for pre-sigmoid activations.
Args:
x1 (~chainerx.ndarray): An array whose (i, j)-th element indicates the
unnormalized log probability of the j-th unit at the i-th example.
x2 (~chainerx.ndarray): An array whose (i, j)-th element indicates a signed
integer vector of ground truth labels 0 or 1. If ``x2[i, j] == -1``,
corresponding ``x1[i, j]`` is ignored. Loss is zero if all ground truth
labels are -1.
Returns:
:class:`~chainerx.ndarray`: An array of the cross entropy.
Note:
During backpropagation, this function propagates the gradient of the output
array to the input array ``x1`` only.
""")
_docs.set_doc(
chainerx.softmax_cross_entropy,
"""softmax_cross_entropy(x1, x2)
Element-wise cross entropy loss for pre-softmax activations.
Args:
x1 (~chainerx.ndarray): An array whose element indicates unnormalized log
probability: the first axis of the array represents the number of
samples, and the second axis represents the number of classes.
x2 (~chainerx.ndarray): A signed integer vector of ground truth labels. If
``x2[i] == -1``, corresponding ``x1[i]`` is ignored.
Returns:
:class:`~chainerx.ndarray`: An array of the cross entropy.
Note:
During backpropagation, this function propagates the gradient of the output
array to the input array ``x1`` only.
""")
def _docs_manipulation():
_docs.set_doc(
chainerx.reshape,
"""reshape(a, newshape)
Returns a reshaped array.
Args:
a (~chainerx.ndarray): Array to be reshaped.
newshape (int or tuple of ints): The new shape of the array to return.
If it is an integer, then it is treated as a tuple of length one.
It should be compatible with ``a.size``. One of the elements can be
-1, which is automatically replaced with the appropriate value to
make the shape compatible with ``a.size``.
Returns:
:class:`~chainerx.ndarray`: A reshaped view of ``a`` if possible,
otherwise a copy.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.reshape`
""")
_docs.set_doc(
chainerx.ravel,
"""ravel(a)
Returns a flattened array.
Args:
a (~chainerx.ndarray): Array to be flattened.
Returns:
:class:`~chainerx.ndarray`: A flattened view of ``a`` if possible,
otherwise a copy.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.ravel`
""")
_docs.set_doc(
chainerx.transpose,
"""transpose(a, axes=None)
Permutes the dimensions of an array.
Args:
a (~chainerx.ndarray): Array to permute the dimensions.
axes (tuple of ints): Permutation of the dimensions. This function reverses
the shape by default.
Returns:
~chainerx.ndarray: A view of ``a`` with the dimensions permuted.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.transpose`
""")
_docs.set_doc(
chainerx.broadcast_to,
"""broadcast_to(array, shape)
Broadcasts an array to a given shape.
Args:
array (~chainerx.ndarray): Array to broadcast.
shape (tuple of ints): The shape of the desired array.
Returns:
~chainerx.ndarray: Broadcasted view.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``array``.
.. seealso:: :func:`numpy.broadcast_to`
""")
_docs.set_doc(
chainerx.squeeze,
"""squeeze(a, axis=None)
Removes size-one axes from the shape of an array.
Args:
a (~chainerx.ndarray): Array to be reshaped.
axis (int or tuple of ints): Axes to be removed. This function removes all
size-one axes by default. If one of the specified axes is not of size
one, an exception is raised.
Returns:
~chainerx.ndarray: An array without (specified) size-one axes.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.squeeze`
""")
_docs.set_doc(
chainerx.concatenate,
"""concatenate(arrays, axis=0)
Joins arrays along an axis.
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be joined.
All of these should have the same dimensionalities except the specified
axis.
axis (int): The axis to join arrays along.
Returns:
~chainerx.ndarray: Joined array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.concatenate`
""")
_docs.set_doc(
chainerx.stack,
"""stack(arrays, axis=0)
Stacks arrays along a new axis.
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
axis (int): Axis along which the arrays are stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.stack`
""")
_docs.set_doc(
chainerx.hstack,
"""hstack(arrays)
Stack arrays in sequence horizontally (column wise).
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.hstack`
""")
_docs.set_doc(
chainerx.vstack,
"""vstack(arrays)
Stack arrays in sequence vertically (row wise).
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.vstack`
""")
_docs.set_doc(
chainerx.dstack,
"""dstack(arrays)
Stack arrays in sequence depth wise (along third axis).
Args:
arrays (sequence of :class:`~chainerx.ndarray`\\ s): Arrays to be stacked.
Returns:
~chainerx.ndarray: Stacked array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``arrays``.
.. seealso:: :func:`numpy.dstack`
""")
_docs.set_doc(
chainerx.atleast_2d,
"""atleast_2d(a)
View inputs as arrays with at least two dimensions.
Args:
a (~chainerx.ndarray): Array.
Returns:
~chainerx.ndarray: An array with a.ndim >= 2.
Copies are avoided where possible, and views with
two or more dimensions are returned.
Note:
* Arrays that already have two or more dimensions are preserved.
* During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``a``.
.. seealso:: :func:`numpy.atleast_2d`
""")
_docs.set_doc(
chainerx.atleast_3d,
"""atleast_3d(a)
View inputs as arrays with at least three dimensions.
Args:
a (~chainerx.ndarray): Array.
Returns:
~chainerx.ndarray: An array with a.ndim >= 3.
Copies are avoided where possible, and views with
three or more dimensions are returned.
Note:
* Arrays that already have three or more dimensions are preserved.
* During backpropagation, this function propagates the gradient of the
output array to the input arrays in ``a``.
.. seealso:: :func:`numpy.atleast_3d`
""")
_docs.set_doc(
chainerx.split,
"""split(ary, indices_or_sections, axis=0)
Splits an array into multiple sub arrays along a given axis.
Args:
ary (~chainerx.ndarray): Array to split.
indices_or_sections (int or sequence of ints): A value indicating how to
divide the axis. If it is an integer, then is treated as the number of
sections, and the axis is evenly divided. Otherwise, the integers
indicate indices to split at. Note that a sequence on the device
memory is not allowed.
axis (int): Axis along which the array is split.
Returns:
list of :class:`~chainerx.ndarray`\\ s: A list of sub arrays. Each array \
is a partial view of the input array.
Note:
During backpropagation, this function propagates the gradients of the
output arrays to the input array ``ary``.
.. seealso:: :func:`numpy.split`
""")
_docs.set_doc(
chainerx.dsplit,
"""dsplit(ary, indices_or_sections)
Split array into multiple sub-arrays along the 3rd axis (depth).
Args:
ary (~chainerx.ndarray): Array to split.
indices_or_sections (int or sequence of ints): A value indicating how to
divide the axis. If it is an integer, then is treated as the number of
sections, and the axis is evenly divided. Otherwise, the integers
indicate indices to split at. Note that a sequence on the device
memory is not allowed.
Returns:
list of :class:`~chainerx.ndarray`\\ s: A list of sub arrays. Each array \
is a partial view of the input array.
Note:
During backpropagation, this function propagates the gradients of the
output arrays to the input array ``ary``.
.. seealso:: :func:`numpy.dsplit`
""")
_docs.set_doc(
chainerx.vsplit,
"""vsplit(ary, indices_or_sections)
Splits an array into multiple sub-arrays vertically (row-wise).
Args:
ary (~chainerx.ndarray): Array to split.
indices_or_sections (int or sequence of ints): A value indicating how to
divide the axis. If it is an integer, then is treated as the number of
sections, and the axis is evenly divided. Otherwise, the integers
indicate indices to split at. Note that a sequence on the device
memory is not allowed.
Returns:
list of :class:`~chainerx.ndarray`\\ s: A list of sub arrays. Each array \
is a partial view of the input array.
Note:
During backpropagation, this function propagates the gradients of the
output arrays to the input array ``ary``.
.. seealso:: :func:`numpy.vsplit`
""")
_docs.set_doc(
chainerx.hsplit,
"""hsplit(ary, indices_or_sections)
Split an array into multiple sub-arrays horizontally (column-wise).
Args:
ary (~chainerx.ndarray): Array to split.
indices_or_sections (int or sequence of ints): A value indicating how to
divide the axis. If it is an integer, then is treated as the number of
sections, and the axis is evenly divided. Otherwise, the integers
indicate indices to split at. Note that a sequence on the device
memory is not allowed.
Returns:
list of :class:`~chainerx.ndarray`\\ s: A list of sub arrays. Each array \
is a partial view of the input array.
Note:
During backpropagation, this function propagates the gradients of the
output arrays to the input array ``ary``.
.. seealso:: :func:`numpy.hsplit`
""")
_docs.set_doc(
chainerx.swapaxes,
"""swapaxes(a, axis1, axis2)
Interchange two axes of an array.
Args:
a (~chainerx.ndarray): Array to swapaxes.
axis1 (int): First Axis
axis2 (int): Second Axis
Returns:
~chainerx.ndarray: Swaped array.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``a``.
.. seealso:: :func:`numpy.swapaxes`
""")
_docs.set_doc(
chainerx.repeat,
"""repeat(a, repeats, axis=None)
Constructs an array by repeating a given array.
Args:
a (~chainerx.ndarray): Array to repeat.
repeats (int or tuple of ints): The number of times which each
element of a is repeated.
axis (int): The axis along which to repeat values.
Returns:
~chainerx.ndarray: The repeated output array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.repeat`
""")
_docs.set_doc(
chainerx.expand_dims,
"""expand_dims(a, axis)
Expand the shape of an array.
Args:
a (~chainerx.ndarray): Input Array.
axis (int): Position in the expanded axes where the new axis is placed.
Returns:
~chainerx.ndarray: Output array.
Note:
* Output array may or may not be a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``a``.
.. seealso:: :func:`numpy.expand_dims`
""")
_docs.set_doc(
chainerx.flip,
"""flip(m, axis)
Reverse the order of elements in an array along the given axis.
Args:
m (~chainerx.ndarray): Input Array.
axis (int or tuple of ints): Axis or axes along which to flip over.
The default, axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the
axes specified in the tuple.
Returns:
~chainerx.ndarray: A view of m with the entries of axis reversed.
Since a view is returned, this operation is done in constant time.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``m``.
.. seealso:: :func:`numpy.flip`
""")
_docs.set_doc(
chainerx.fliplr,
"""fliplr(m)
Flip array in the left/right direction.
Args:
m (~chainerx.ndarray): Input Array.
Returns:
~chainerx.ndarray: A view of m with the columns reversed.
Since a view is returned, this operation is done in constant time.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``m``.
.. seealso:: :func:`numpy.fliplr`
""")
_docs.set_doc(
chainerx.flipud,
"""flipud(m)
Flip array in the up/down direction.
Args:
m (~chainerx.ndarray): Input Array.
Returns:
~chainerx.ndarray: A view of m with the rows reversed.
Since a view is returned, this operation is done in constant time.
Note:
* Output array is a view of the input array.
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``m``.
.. seealso:: :func:`numpy.flipud`
""")
_docs.set_doc(
chainerx.moveaxis,
"""moveaxis(a, source, destination)
Move axes of an array to new positions.
Other axes remain in their original order.
Args:
a (~chainerx.ndarray): Input Array.
source (int or tuple of ints): Original positions of the axes to move.
These must be unique.
destintation (int or tuple of ints): Destination positions for each of
the original axes. These must also be unique.
Returns:
~chainerx.ndarray: Array with moved axes. This array is a view of the
input array.
Note:
* During backpropagation, this function propagates the gradients of the
output arrays to the input array ``a``.
.. seealso:: :func:`numpy.moveaxis`
""")
def _docs_math():
_docs.set_doc(
chainerx.negative,
"""negative(x)
Numerical negative, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = -x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.negative`
""")
_docs.set_doc(
chainerx.add,
"""add(x1, x2)
Add arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 + x_2`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.add`
""")
_docs.set_doc(
chainerx.subtract,
"""subtract(x1, x2)
Subtract arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 - x_2`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.subtract`
""")
_docs.set_doc(
chainerx.multiply,
"""multiply(x1, x2)
Multiply arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 \\times x_2`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.multiply`
""")
_docs.set_doc(
chainerx.divide,
"""divide(x1, x2)
Divide arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\frac{x_1}{x_2}`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.divide`
""")
_docs.set_doc(
chainerx.sum,
"""sum(a, axis=None, keepdims=False)
Sum of array elements over a given axis.
Args:
a (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which a sum is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: The sum of input elements over a given axis.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.sum`
""")
_docs.set_doc(
chainerx.maximum,
"""maximum(x1, x2)
Maximum arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`:
Returned array: :math:`y = max(\\{x_1, x_2\\})`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.maximum`
""")
_docs.set_doc(
chainerx.minimum,
"""minimum(x1, x2)
Minimum arguments, element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`:
Returned array: :math:`y = min(\\{x_1, x_2\\})`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.minimum`
""")
_docs.set_doc(
chainerx.remainder,
"""remainder(x1, x2)
Return element-wise remainder of division.
Args:
x1 (~chainerx.ndarray or scalar): Input array.
x2 (~chainerx.ndarray or scalar): Input array.
Returns:
:class:`~chainerx.ndarray`:
Returned array: The element-wise remainder of
the quotient ``floor_divide(x1, x2)``.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x1`` and ``x2``.
.. seealso:: :data:`numpy.remainder`
""")
_docs.set_doc(
chainerx.exp,
"""exp(x)
Numerical exponential, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\exp x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.exp`
""")
_docs.set_doc(
chainerx.log,
"""log(x)
Natural logarithm, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\ln x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log`
""")
_docs.set_doc(
chainerx.log10,
"""log10(x)
Base 10 logarithm, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\log_{10} x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log10`
""")
_docs.set_doc(
chainerx.log2,
"""log2(x)
Base 2 logarithm, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\log_{2} x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log2`
""")
_docs.set_doc(
chainerx.log1p,
"""log1p(x)
Natural logarithm of one plus the input, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\log(1 + x)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.log1p`
""")
_docs.set_doc(
chainerx.logsumexp,
"""logsumexp(x, axis=None, keepdims=False)
The log of the sum of exponentials of input array.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which a sum is performed.
The flattened array is used by default.
keepdims (bool):
If this is set to ``True``, the reduced axes are left in the result
as dimensions with size one.
Returns:
:class:`~chainerx.ndarray`: The log of the sum of exponentials of
input elements over a given axis.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
""")
_docs.set_doc(
chainerx.log_softmax,
"""log_softmax(x, axis=None)
The log of the softmax of input array.
Args:
x (~chainerx.ndarray): Input array.
axis (None or int or tuple of ints):
Axis or axes along which a sum is performed.
The flattened array is used by default.
Returns:
:class:`~chainerx.ndarray`: The log of the softmax of input elements
over a given axis.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
""")
_docs.set_doc(
chainerx.square,
"""square(x)
Returns the element-wise square of the input.
Args:
x (~chainerx.ndarray or scalar): Input data
Returns:
~chainerx.ndarray: Returned array: :math:`y = x * x`.
A scalar is returned if ``x`` is a scalar.
Note:
During backpropagation, this function propagates the gradient
of the output array to the input array ``x``.
.. seealso:: :data:`numpy.square`
""")
_docs.set_doc(
chainerx.sqrt,
"""sqrt(x)
Non-negative square-root, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\sqrt x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.sqrt`
""")
_docs.set_doc(
chainerx.sinh,
"""sinh(x)
Hyperbolic Sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\sinh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.sinh`
""")
_docs.set_doc(
chainerx.cosh,
"""cosh(x)
Hyperbolic Cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\cosh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.cosh`
""")
_docs.set_doc(
chainerx.tanh,
"""tanh(x)
Element-wise hyperbolic tangent function.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\tanh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.tanh`
""")
_docs.set_doc(
chainerx.sigmoid,
"""sigmoid(x)
Element-wise sigmoid logistic function.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array:
:math:`f(x) = (1 + \\exp(-x))^{-1}`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :func:`chainer.functions.sigmoid`
""")
_docs.set_doc(
chainerx.sin,
"""sin(x)
Sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\sin x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.sin`
""")
_docs.set_doc(
chainerx.cos,
"""cos(x)
Cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\cos x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.cos`
""")
_docs.set_doc(
chainerx.ceil,
"""ceil(x)
Return the ceiling of the input, element-wise..
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The ceiling of each element in array.
.. seealso:: :data:`numpy.ceil`
""")
_docs.set_doc(
chainerx.tan,
"""tan(x)
Tangent, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\tan x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.tan`
""")
_docs.set_doc(
chainerx.relu,
"""Rectified Linear Unit function.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\max (0, x)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
""")
_docs.set_doc(
chainerx.tree_lstm,
"""tree_lstm(*inputs)
TreeLSTM unit as an activation function.
This function implements TreeLSTM units both for
N-ary TreeLSTM and Child-Sum TreeLSTM.
Let the children cell states
:math:`c_{\\text{1}}, c_{\\text{2}}, \\dots, c_{\\text{N}}`,
and the incoming signal :math:`x`.
First, the incoming signal :math:`x` is split into (3 + N) arrays
:math:`a, i, o, f_{\\text{1}}, f_{\\text{2}}, ..., f_{\\text{N}}`
of the same shapes along the second axis.
It means that :math:`x` 's second axis must have (3 + N) times
of the length of each :math:`c_{n}`.
The splitted input signals are corresponding to
- :math:`a` : sources of cell input
- :math:`i` : sources of input gate
- :math:`o` : sources of output gate
- :math:`f_{n}` : sources of forget gate for n-th ary
Second, it computes outputs as
.. math::
c &= \\tanh(a) \\text{sigmoid}(i) \\\\
& + c_{\\text{1}} \\text{sigmoid}(f_{\\text{1}}), \\\\
& + c_{\\text{2}} \\text{sigmoid}(f_{\\text{2}}), \\\\
& + ..., \\\\
& + c_{\\text{N}} \\text{sigmoid}(f_{\\text{N}}), \\\\
h &= \\tanh(c) \\text{sigmoid}(o).
These are returned as a tuple of (N + 1) variables.
Args:
inputs (list of :class:`~chainerx.array`): Variable arguments which
include all cell vectors from child-nodes, and an input vector.
Each of the cell vectors and the input vector is
:class:`~chainerx.array`.
The input vector must have the second dimension whose size
is (N + 3) times of that of each cell,
where N denotes the total number of cells.
Returns:
tuple: Two :class:`~chainerx.array` objects ``c`` and ``h``. ``c`` is
the updated cell state. ``h`` indicates the outgoing signal.
See the papers for details: `Improved Semantic Representations From
Tree-Structured Long Short-Term Memory Networks
<https://www.aclweb.org/anthology/P15-1150>`_ and
`A Fast Unified Model for Parsing and Sentence Understanding
<https://arxiv.org/pdf/1603.06021.pdf>`_.
Tai et al.'s N-Ary TreeLSTM is little extended in
Bowman et al., and this link is based on
the variant by Bowman et al.
Specifically, eq. 10 in Tai et al. only has one :math:`W` matrix
to be applied to :math:`x`, consistently for all children.
On the other hand, Bowman et al.'s model has multiple matrices,
each of which affects the forget gate for each child's cell individually.
.. admonition:: Example
Assuming ``y`` is the current input signal, ``c`` is the previous cell
state, and ``h`` is the previous output signal from an
:meth:`~chainerx.tree_lstm` function.
Each of ``y``, ``c`` and ``h`` has ``n_units`` channels.
Using 2-ary (binary) TreeLSTM,
most typical preparation of ``x`` is
>>> c1 = chainerx.ones((4, 10), dtype = chainerx.float32)
>>> c2 = chainerx.ones((4, 10), dtype = chainerx.float32)
>>> x = chainerx.ones((4, 50), dtype = chainerx.float32)
>>> c, h = chainerx.tree_lstm(c1, c2, x)
""")
_docs.set_doc(
chainerx.slstm,
"""slstm(c_prev1, c_prev2, x1, x2)
S-LSTM units as an activation function.
This function implements S-LSTM unit. It is an extension of LSTM unit
applied to tree structures.
The function is applied to binary trees. Each node has two child nodes.
It gets four arguments, previous cell states ``c_prev1`` and ``c_prev2``,
and input arrays ``x1`` and ``x2``.
First both input arrays ``x1`` and ``x2`` are split into eight arrays
:math:`a_1, i_1, f_1, o_1`, and :math:`a_2, i_2, f_2, o_2`. They have the
same shape along the second axis.
It means that ``x1`` and ``x2`` 's second axis must have 4 times
the length of ``c_prev1`` and ``c_prev2``.
The split input arrays are corresponding to
- :math:`a_i` : sources of cell input
- :math:`i_i` : sources of input gate
- :math:`f_i` : sources of forget gate
- :math:`o_i` : sources of output gate
It computes the updated cell state ``c`` and the outgoing signal
``h`` as.
.. math::
c &= \\tanh(a_1 + a_2) \\sigma(i_1 + i_2)
+ c_{\\text{prev}1} \\sigma(f_1)
+ c_{\\text{prev}2} \\sigma(f_2), \\\\
h &= \\tanh(c) \\sigma(o_1 + o_2),
where :math:`\\sigma` is the elementwise sigmoid function.
The function returns ``c`` and ``h`` as a tuple.
Args:
c_prev1 (:class:`~chainerx.array`):
Variable that holds the previous cell state of the first child
node. The cell state should be a zero array or the output of
the previous call of LSTM.
c_prev2 (:class:`~chainerx.array`):
Variable that holds the previous cell state of the second child
node.
x1 (:class:`~chainerx.array`):
Variable that holds the sources of cell input, input gate, forget
gate and output gate from the first child node. It must have the
second dimension whose size is four times of that of the cell
state.
x2 (:class:`~chainerx.array`):
Variable that holds the input sources from the second child node.
Returns:
tuple: Two :class:`~chainerx.array` objects ``c`` and ``h``. ``c`` is
the cell state. ``h`` indicates the outgoing signal.
See detail in paper: `Long Short-Term Memory Over Tree Structures
<https://arxiv.org/abs/1503.04881>`_.
.. admonition:: Example
Assuming ``c1``, ``c2`` is the previous cell state of children,
and ``h1``, ``h2`` is the previous outgoing signal from children.
Each of ``c1``, ``c2``, ``h1`` and ``h2`` has ``n_units`` channels.
Most typical preparation of ``x1``, ``x2`` is:
>>> n_units = 100
>>> c1 = chainerx.ones((1, n_units), np.float32)
>>> c2 = chainerx.ones((1, n_units), np.float32)
>>> x1 = chainerx.ones((1, 4 * n_units), chainerx.float32)
>>> x2 = chainerx.ones((1, 4 * n_units), chainerx.float32)
>>> c, h = chainerx.slstm(c1, c2, x1, x2)
""")
_docs.set_doc(
chainerx.arcsin,
"""arcsin(x)
Inverse sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arcsin x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arcsin`
""")
_docs.set_doc(
chainerx.arccos,
"""arccos(x)
Trigonometric inverse cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arccos x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arccos`
""")
_docs.set_doc(
chainerx.arctan,
"""arctan(x)
Trigonometric inverse tangent, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arctan x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arctan`
""")
_docs.set_doc(
chainerx.arctan2,
"""arctan2(x1, x2)
Element-wise arc tangent of :math:`\\frac{x_1}{x_2}` choosing the quadrant
correctly.
Args:
x1 (~chainerx.ndarray): Input array.
x2 (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returns an array where each element
represents :math:`\\theta` in the range :math:`[-\\pi, \\pi]`, such
that :math:`x_1 = r \\sin(\\theta)` and :math:`x_2 = r \\cos(\\theta)`
for some :math:`r > 0`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x1`` and/or ``x2``.
.. seealso:: :data:`numpy.arctan2`
""")
_docs.set_doc(
chainerx.arcsinh,
"""arcsinh(x)
Inverse hyperbolic sine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arcsinh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arcsinh`
""")
_docs.set_doc(
chainerx.arccosh,
"""arccosh(x)
Inverse hypberbolic inverse cosine, element-wise
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = \\arccosh x`.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. seealso:: :data:`numpy.arccosh`
""")
_docs.set_doc(
chainerx.fabs,
"""fabs(x)
Compute the absolute values element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The absolute values of x, the returned values
are always floats.
.. seealso:: :data:`numpy.fabs`
""")
_docs.set_doc(
chainerx.sign,
"""sign(x)
Returns an element-wise indication of the sign of a number.
The sign function returns :math:`-1 if x < 0, 0 if x==0, 1 if x > 0`.
``nan`` is returned for ``nan`` inputs.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The sign of x.
.. seealso:: :data:`numpy.sign`
""")
_docs.set_doc(
chainerx.floor,
"""floor(x)
Return the floor of the input, element-wise.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: The floor of each element in array.
.. seealso:: :data:`numpy.floor`
""")
_docs.set_doc(
chainerx.isnan,
"""isnan(x)
Test element-wise for NaN and return result as a boolean array.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: True where ``x`` is NaN, false otherwise
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.isnan`
""")
_docs.set_doc(
chainerx.isfinite,
"""isfinite(x)
Test element-wise for finiteness (not infinity or not Not a Number).
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: True where x is not positive infinity,
negative infinity, or NaN; false otherwise.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.isfinite`
""")
_docs.set_doc(
chainerx.isinf,
"""isinf(x)
Test element-wise for positive or negative infinity.
Args:
x (~chainerx.ndarray): Input array.
Returns:
:class:`~chainerx.ndarray`: True where ``x`` is positive or negative
infinity, false otherwise.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.isinf`
""")
_docs.set_doc(
chainerx.bitwise_and,
"""bitwise_and(x1, x2)
Compute the bit-wise AND of two arrays element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 \\& x_2`
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.bitwise_and`
""")
_docs.set_doc(
chainerx.bitwise_or,
"""bitwise_or(x1, x2)
Compute the bit-wise OR of two arrays element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 | x_2`
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.bitwise_or`
""")
_docs.set_doc(
chainerx.bitwise_xor,
"""bitwise_xor(x1, x2)
Compute the bit-wise XOR of two arrays element-wise.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Returned array: :math:`y = x_1 \\oplus x_2`
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.bitwise_xor`
""")
_docs.set_doc(
chainerx.left_shift,
"""left_shift(x1, x2)
Shift the bits of an integer to the left.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Return `x1` with bits shifted `x2` times to the left.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.left_shift`
""") # NOQA
_docs.set_doc(
chainerx.right_shift,
"""right_shift(x1, x2)
Shift the bits of an integer to the right.
Args:
x1 (~chainerx.ndarray or scalar): Input array of integers.
x2 (~chainerx.ndarray or scalar): Input array of integers.
Returns:
:class:`~chainerx.ndarray`: Return `x1` with bits shifted `x2` times to the right.
Note:
During backpropagation, this function does not propagate gradients.
.. seealso:: :data:`numpy.right_shift`
""") # NOQA
def _docs_sorting():
_docs.set_doc(
chainerx.argmax,
"""argmax(a, axis=None)
Returns the indices of the maximum along an axis.
Args:
a (~chainerx.ndarray): Array to take the indices of the maximum of.
axis (None or int): Along which axis to compute the maximum. The flattened
array is used by default.
Returns:
:class:`~chainerx.ndarray`: The indices of the maximum of ``a``, along the
axis if specified.
.. seealso:: :func:`numpy.argmax`
""")
_docs.set_doc(
chainerx.argmin,
"""argmin(a, axis=None)
Returns the indices of the minimum along an axis.
Args:
a (~chainerx.ndarray): Array to take the indices of the minimum of.
axis (None or int): Along which axis to compute the minimum. The flattened
array is used by default.
Returns:
:class:`~chainerx.ndarray`: The indices of the minimum of ``a``, along the
axis if specified.
.. seealso:: :func:`numpy.argmin`
""")
def _docs_statistics():
_docs.set_doc(
chainerx.amax,
"""amax(a, axis=None, keepdims=False)
Returns the maximum of an array or the maximum along an axis.
Note:
When at least one element is NaN, the corresponding max value will be NaN.
Args:
a (~chainerx.ndarray): Array to take the maximum.
axis (None or int or tuple of ints): Along which axis to take the maximum.
The flattened array is used by default.
If this is a tuple of ints, the maximum is selected over multiple
axes, instead of a single axis or all the axes.
keepdims (bool): If ``True``, the axis is remained as an axis of size one.
Returns:
:class:`~chainerx.ndarray`: The maximum of ``a``, along the axis if
specified.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.amax`
""")
_docs.set_doc(
chainerx.amin,
"""amin(a, axis=None, keepdims=False)
Returns the minimum of an array or the minimum along an axis.
Note:
When at least one element is NaN, the corresponding min value will be NaN.
Args:
a (~chainerx.ndarray): Array to take the minimum.
axis (None or int or tuple of ints): Along which axis to take the minimum.
The flattened array is used by default.
If this is a tuple of ints, the minimum is selected over multiple
axes, instead of a single axis or all the axes.
keepdims (bool): If ``True``, the axis is remained as an axis of size one.
Returns:
:class:`~chainerx.ndarray`: The minimum of ``a``, along the axis if
specified.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``a``.
.. seealso:: :func:`numpy.amin`
""")
_docs.set_doc(
chainerx.mean,
"""mean(a, axis=None, keepdims=False)
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over the
flattened array by default, otherwise over the specified axis.
Args:
a (~chainerx.ndarray): Array to take the mean of.
axis (None or int or tuple of ints): Along which axis or axes to compute
the mean. The flattened array is used by default.
keepdims (bool): If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
Returns:
:class:`~chainerx.ndarray`: The mean of ``a``, along the axis or axes if
specified.
.. seealso:: :func:`numpy.mean`
""")
_docs.set_doc(
chainerx.var,
"""var(a, axis=None, keepdims=False)
Compute the arithmetic var along the specified axis.
Returns the var of the array elements. The var is taken over the flattened
array by default, otherwise over the specified axis.
Args:
a (~chainerx.ndarray): Array to take the var of.
axis (None or int or tuple of ints): Along which axis or axes to compute
the var. The flattened array is used by default.
keepdims (bool): If this is set to True, the axes which are reduced are
left in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
Returns:
:class:`~chainerx.ndarray`: The var of ``a``, along the axis or axes if
specified.
.. seealso:: :func:`numpy.var`
""")
def _docs_connection():
_docs.set_doc(
chainerx.conv,
"""conv(x, w, b=None, stride=1, pad=0, cover_all=False)
N-dimensional convolution.
This is an implementation of N-dimensional convolution which is generalized
two-dimensional convolution in ConvNets. It takes three arrays: the
input ``x``, the filter weight ``w`` and the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`N` is the number of spatial dimensions.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`d_1, d_2, ..., d_N` are the size of each axis of the input's
spatial dimensions, respectively.
- :math:`k_1, k_2, ..., k_N` are the size of each axis of the filters,
respectively.
- :math:`l_1, l_2, ..., l_N` are the size of each axis of the output's
spatial dimensions, respectively.
- :math:`p_1, p_2, ..., p_N` are the size of each axis of the spatial
padding size, respectively.
Then the ``conv`` function computes correlations between filters
and patches of size :math:`(k_1, k_2, ..., k_N)` in ``x``.
Note that correlation here is equivalent to the inner product between
expanded tensors.
Patches are extracted at positions shifted by multiples of ``stride`` from
the first position ``(-p_1, -p_2, ..., -p_N)`` for each spatial axis.
Let :math:`(s_1, s_2, ..., s_N)` be the stride of filter application.
Then, the output size :math:`(l_1, l_2, ..., l_N)` is determined by the
following equations:
.. math::
l_n = (d_n + 2p_n - k_n) / s_n + 1 \\ \\ (n = 1, ..., N)
If ``cover_all`` option is ``True``, the filter will cover the all
spatial locations. So, if the last stride of filter does not cover the
end of spatial locations, an additional stride will be applied to the end
part of spatial locations. In this case, the output size is determined by
the following equations:
.. math::
l_n = (d_n + 2p_n - k_n + s_n - 1) / s_n + 1 \\ \\ (n = 1, ..., N)
Args:
x (:class:`~chainerx.ndarray`):
Input array of shape :math:`(n, c_I, d_1, d_2, ..., d_N)`.
w (:class:`~chainerx.ndarray`):
Weight array of shape :math:`(c_O, c_I, k_1, k_2, ..., k_N)`.
b (None or :class:`~chainerx.ndarray`):
One-dimensional bias array with length :math:`c_O` (optional).
stride (:class:`int` or :class:`tuple` of :class:`int` s):
Stride of filter applications :math:`(s_1, s_2, ..., s_N)`.
``stride=s`` is equivalent to ``(s, s, ..., s)``.
pad (:class:`int` or :class:`tuple` of :class:`int` s):
Spatial padding width for input arrays
:math:`(p_1, p_2, ..., p_N)`. ``pad=p`` is equivalent to
``(p, p, ..., p)``.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels. It may make the output size larger.
`cover_all` needs to be ``False`` if you want to use ``cuda`` backend.
Returns:
~chainerx.ndarray:
Output array of shape :math:`(n, c_O, l_1, l_2, ..., l_N)`.
Note:
In ``cuda`` backend, this function uses cuDNN implementation for its
forward and backward computation.
Note:
In ``cuda`` backend, this function has following limitations yet:
- The ``cover_all=True`` option is not supported yet.
- The ``dtype`` must be ``float32`` or ``float64`` (``float16`` is not
supported yet.)
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``x``, ``w``, and ``b``.
.. seealso:: :func:`chainer.functions.convolution_nd`
.. admonition:: Example
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 30, 40, 50
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = chainerx.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32)
>>> x.shape
(10, 3, 30, 40, 50)
>>> w = chainerx.random.uniform(0, 1, (c_o, c_i, k1, k2, k3)).\
astype(np.float32)
>>> w.shape
(1, 3, 10, 10, 10)
>>> b = chainerx.random.uniform(0, 1, (c_o)).astype(np.float32)
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> y = chainerx.conv(x, w, b, stride=(s1, s2, s3),\
pad=(p1, p2, p3))
>>> y.shape
(10, 1, 16, 11, 9)
>>> l1 = int((d1 + 2 * p1 - k1) / s1 + 1)
>>> l2 = int((d2 + 2 * p2 - k2) / s2 + 1)
>>> l3 = int((d3 + 2 * p3 - k3) / s3 + 1)
>>> y.shape == (n, c_o, l1, l2, l3)
True
>>> y = chainerx.conv(x, w, b, stride=(s1, s2, s3),\
pad=(p1, p2, p3), cover_all=True)
>>> y.shape == (n, c_o, l1, l2, l3 + 1)
True
""")
_docs.set_doc(
chainerx.conv_transpose,
"""conv_transpose(x, w, b=None, stride=1, pad=0, outsize=None)
N-dimensional transposed convolution.
This is an implementation of N-dimensional transposed convolution, which is
previously known as **deconvolution** in Chainer.
.. _Deconvolutional Networks: \
://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf
It takes three arrays: the input ``x``, the filter weight ``w``, and the
bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`N` is the number of spatial dimensions.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`d_1, d_2, ..., d_N` are the size of each axis of the input's
spatial dimensions, respectively.
- :math:`k_1, k_2, ..., k_N` are the size of each axis of the filters,
respectively.
- :math:`p_1, p_2, ..., p_N` are the size of each axis of the spatial
padding size, respectively.
- :math:`s_1, s_2, ..., s_N` are the stride of each axis of filter
application, respectively.
If ``outsize`` option is ``None``, the output size
:math:`(l_1, l_2, ..., l_N)` is determined by the following equations with
the items in the above list:
.. math::
l_n = s_n (d_n - 1) + k_n - 2 p_n \\ \\ (n = 1, ..., N)
If ``outsize`` option is given, the output size is determined by
``outsize``. In this case, the ``outsize`` :math:`(l_1, l_2, ..., l_N)`
must satisfy the following equations:
.. math::
d_n = \\lfloor (l_n + 2p_n - k_n) / s_n \\rfloor + 1 \\ \\ \
(n = 1, ..., N)
Args:
x (:class:`~chainerx.ndarray`):
Input array of shape :math:`(n, c_I, d_1, d_2, ..., d_N)`.
w (:class:`~chainerx.ndarray`):
Weight array of shape :math:`(c_I, c_O, k_1, k_2, ..., k_N)`.
b (None or :class:`~chainerx.ndarray`):
One-dimensional bias array with length :math:`c_O` (optional).
stride (:class:`int` or :class:`tuple` of :class:`int` s):
Stride of filter applications :math:`(s_1, s_2, ..., s_N)`.
``stride=s`` is equivalent to ``(s, s, ..., s)``.
pad (:class:`int` or :class:`tuple` of :class:`int` s):
Spatial padding width for input arrays
:math:`(p_1, p_2, ..., p_N)`. ``pad=p`` is equivalent to
``(p, p, ..., p)``.
outsize (None or :class:`tuple` of :class:`int` s):
Expected output size of deconvolutional operation. It should be a
tuple of ints :math:`(l_1, l_2, ..., l_N)`. Default value is
``None`` and the outsize is estimated by input size, stride and
pad.
Returns:
~chainerx.ndarray:
Output array of shape :math:`(n, c_O, l_1, l_2, ..., l_N)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``x``, ``w``, and ``b``.
.. seealso:: :func:`chainer.functions.deconvolution_nd`
.. admonition:: Example
**Example1**: the case when ``outsize`` is not given.
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 5, 10, 15
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = chainerx.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32)
>>> x.shape
(10, 3, 5, 10, 15)
>>> w = chainerx.random.uniform(0, 1, (c_i, c_o, k1, k2, k3)).\
astype(np.float32)
>>> w.shape
(3, 1, 10, 10, 10)
>>> b = chainerx.random.uniform(0, 1, (c_o)).astype(np.float32)
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> y = chainerx.conv_transpose(x, w, b, stride=(s1, s2, s3), \
pad=(p1, p2, p3))
>>> y.shape
(10, 1, 8, 36, 84)
>>> l1 = s1 * (d1 - 1) + k1 - 2 * p1
>>> l2 = s2 * (d2 - 1) + k2 - 2 * p2
>>> l3 = s3 * (d3 - 1) + k3 - 2 * p3
>>> y.shape == (n, c_o, l1, l2, l3)
True
**Example2**: the case when ``outsize`` is given.
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 5, 10, 15
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = chainerx.array(np.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32))
>>> x.shape
(10, 3, 5, 10, 15)
>>> w = chainerx.array(np.random.uniform(0, 1, (c_i, c_o, k1, k2, k3)).\
astype(np.float32))
>>> w.shape
(3, 1, 10, 10, 10)
>>> b = chainerx.array(np.random.uniform(0, 1, (c_o)).astype(np.float32))
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> l1, l2, l3 = 9, 38, 87
>>> d1 == int((l1 + 2 * p1 - k1) / s1) + 1
True
>>> d2 == int((l2 + 2 * p2 - k2) / s2) + 1
True
>>> d3 == int((l3 + 2 * p3 - k3) / s3) + 1
True
>>> y = chainerx.conv_transpose(x, w, b, stride=(s1, s2, s3), \
pad=(p1, p2, p3), outsize=(l1, l2, l3))
>>> y.shape
(10, 1, 9, 38, 87)
>>> y.shape == (n, c_o, l1, l2, l3)
True
""")
_docs.set_doc(
chainerx.linear,
"""linear(x, W, b=None, n_batch_axis=1)
Linear function, or affine transformation.
It accepts two or three arguments: an input minibatch ``x``, a weight
matrix ``W``, and optionally a bias vector ``b``. It computes
.. math:: Y = xW^\\top + b.
Args:
x (~chainerx.ndarray):
Input array, which is a :math:`(s_1, s_2, ..., s_n)`-shaped array.
W (~chainerx.ndarray):
Weight variable of shape :math:`(M, N)`,
where :math:`(N = s_{\\rm n\\_batch\\_axes} * ... * s_n)`.
b (~chainerx.ndarray):
Bias variable (optional) of shape :math:`(M,)`.
n_batch_axes (int):
The number of batch axes. The default is 1. The input variable is
reshaped into (:math:`{\\rm n\\_batch\\_axes} + 1`)-dimensional
tensor. This should be greater than 0.
Returns:
:class:`~chainerx.ndarray`:
Output array with shape of
:math:`(s_1, ..., s_{\\rm n\\_batch\\_axes}, M)`.
Note:
During backpropagation, this function propagates the gradient of the
output array to input arrays ``x``, ``W`` and ``b``.
""")
_docs.set_doc(
chainerx.lstm,
"""lstm(c_prev, x)
Long Short-Term Memory units as an activation function.
This function implements LSTM units with forget gates. Let the previous
cell state ``c_prev`` and the input array ``x``.
First, the input array ``x`` is split into four arrays
:math:`a, i, f, o` of the same shapes along the second axis. It means that
``x`` 's second axis must have 4 times the ``c_prev`` 's second axis.
The split input arrays are corresponding to:
- :math:`a` : sources of cell input
- :math:`i` : sources of input gate
- :math:`f` : sources of forget gate
- :math:`o` : sources of output gate
Second, it computes the updated cell state ``c`` and the outgoing signal
``h`` as
.. math::
c &= \\tanh(a) \\sigma(i)
+ c_{\\text{prev}} \\sigma(f), \\\\
h &= \\tanh(c) \\sigma(o),
where :math:`\\sigma` is the elementwise sigmoid function.
These are returned as a tuple of two variables.
This function supports variable length inputs. The mini-batch size of
the current input must be equal to or smaller than that of the previous
one. When mini-batch size of ``x`` is smaller than that of ``c``, this
function only updates ``c[0:len(x)]`` and doesn't change the rest of ``c``,
``c[len(x):]``. So,
please sort input sequences in descending order of lengths before
applying the function.
Args:
c_prev (:class:`~chainerx.array`):
Variable that holds the previous cell state. The cell state
should be a zero array or the output of the previous call of LSTM.
x (:class:`~chainer.array`):
Variable that holds the sources of cell input, input gate, forget
gate and output gate. It must have the second dimension whose size
is four times of that of the cell state.
Returns:
tuple: Two :class:`~chainerx.array` objects ``c`` and ``h``.
``c`` is the updated cell state. ``h`` indicates the outgoing signal.
See the original paper proposing LSTM with forget gates:
`Long Short-Term Memory in Recurrent Neural Networks
<http://www.felixgers.de/papers/phd.pdf>`_.
.. admonition:: Example
Assuming ``y`` is the current incoming signal, ``c`` is the previous
cell state, and ``h`` is the previous outgoing signal from an ``lstm``
function. Each of ``y``, ``c`` and ``h`` has ``n_units`` channels.
Most typical preparation of ``x`` is
>>> n_units = 100
>>> c_prev = chainerx.zeros((1, n_units), chainerx.float32)
>>> x = chainerx.zeros((1, 4 * n_units), chainerx.float32)
>>> c, h = chainerx.lstm(c_prev, x)
It corresponds to calculate the input array ``x``, or the input
sources :math:`a, i, f, o`, from the current incoming signal ``y`` and
the previous outgoing signal ``h``. Different parameters are used for
different kind of input sources.
""")
def _docs_normalization():
_docs.set_doc(
chainerx.batch_norm,
"""batch_norm(x, gamma, beta, running_mean, running_var, eps=2e-5, \
decay=0.9, axis=None)
Batch normalization function.
It takes the input array ``x`` and two parameter arrays ``gamma`` and
``beta``. The parameter arrays must both have the same size.
Args:
x (~chainerx.ndarray): Input array.
gamma (~chainerx.ndarray): Scaling parameter of normalized data.
beta (~chainerx.ndarray): Shifting parameter of scaled normalized data.
running_mean (~chainerx.ndarray):
Running average of the mean. This is a running average of
the mean over several mini-batches using the decay parameter.
The function takes a previous running average, and updates
the array in-place by the new running average.
running_var (~chainerx.ndarray):
Running average of the variance. This is a running average of
the variance over several mini-batches using the decay parameter.
The function takes a previous running average, and updates
the array in-place by the new running average.
eps (float): Epsilon value for numerical stability.
decay (float): Decay rate of moving average. It is used during training.
axis (int, tuple of int or None):
Axis over which normalization is performed. When axis is ``None``,
the first axis is treated as the batch axis and will be reduced
during normalization.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input arrays ``x``, ``gamma`` and ``beta``.
See: `Batch Normalization: Accelerating Deep Network Training by Reducing\
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_
""")
_docs.set_doc(
chainerx.fixed_batch_norm,
"""fixed_batch_norm(x, gamma, beta, mean, var, eps=2e-5, axis=None)
Batch normalization function with fixed statistics.
This is a variant of :func:`~chainerx.batch_norm`, where the mean
and array statistics are given by the caller as fixed variables.
Args:
x (~chainerx.ndarray): Input array.
gamma (~chainerx.ndarray): Scaling parameter of normalized data.
beta (~chainerx.ndarray): Shifting parameter of scaled normalized data.
mean (~chainerx.ndarray): Shifting parameter of input.
var (~chainerx.ndarray): Square of scaling parameter of input.
eps (float): Epsilon value for numerical stability.
axis (int, tuple of int or None):
Axis over which normalization is performed. When axis is ``None``,
the first axis is treated as the batch axis and will be reduced
during normalization.
Note:
During backpropagation, this function does not propagate gradients.
""")
def _docs_pooling():
_docs.set_doc(
chainerx.max_pool,
"""max_pool(x, ksize, stride=None, pad=0, cover_all=False)
Spatial max pooling function.
This acts similarly to :func:`~chainerx.conv`, but it computes the maximum
of input spatial patch for each channel without any parameter instead of
computing the inner products.
Args:
x (~chainerx.ndarray): Input array.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are pooled into
some output pixels. It may make the output size larger.
Returns:
:class:`~chainerx.ndarray`: Output array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``. This function is only
differentiable up to the second order.
.. note::
In ``cuda`` backend, only 2 and 3 dim arrays are supported as ``x``
because cuDNN pooling supports 2 and 3 spatial dimensions.
""")
_docs.set_doc(
chainerx.average_pool,
"""average_pool(x, ksize, stride=None, pad=0, pad_mode='ignore')
Spatial average pooling function.
This acts similarly to :func:`~chainerx.conv`, but it computes the average
of input spatial patch for each channel without any parameter instead of
computing the inner products.
Args:
x (~chainerx.ndarray): Input array.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
pad_mode ({'zero', 'ignore'}): Specifies how padded region is treated.
* 'zero' -- the values in the padded region are treated as 0
* 'ignore' -- padded region is ignored (default)
Returns:
:class:`~chainerx.ndarray`: Output array.
Note:
During backpropagation, this function propagates the gradient of the
output array to the input array ``x``.
.. note::
In ``cuda`` backend, only 2 and 3 dim arrays are supported as ``x``
because cuDNN pooling supports 2 and 3 spatial dimensions.
""")
def _docs_rnn():
_docs.set_doc(
chainerx.n_step_lstm,
"""n_step_lstm(n_layers, hx, cx, ws, bs, xs)
Stacked Uni-directional Long Short-Term Memory function.
This function calculates stacked Uni-directional LSTM with sequences.
This function gets an initial hidden state :math:`h_0`, an initial cell
state :math:`c_0`, an input sequence :math:`x`, weight matrices :math:`W`,
and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
i_t &= \\sigma(W_0 x_t + W_4 h_{t-1} + b_0 + b_4) \\\\
f_t &= \\sigma(W_1 x_t + W_5 h_{t-1} + b_1 + b_5) \\\\
o_t &= \\sigma(W_2 x_t + W_6 h_{t-1} + b_2 + b_6) \\\\
a_t &= \\tanh(W_3 x_t + W_7 h_{t-1} + b_3 + b_7) \\\\
c_t &= f_t \\cdot c_{t-1} + i_t \\cdot a_t \\\\
h_t &= o_t \\cdot \\tanh(c_t)
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Eight weight matrices and eight bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`8S` weight matrices and :math:`8S` bias vectors.
If the number of layers ``n_layers`` is greater than :math:`1`, the input
of the ``k``-th layer is the hidden state ``h_t`` of the ``k-1``-th layer.
Note that all input variables except the first layer may have different
shape from the first layer.
Args:
n_layers(int): The number of layers.
hx (:class:`~chainerx.array`):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units.
cx (:class:`~chainerx.array`): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[i]`` represents the weights for the i-th layer.
Each ``ws[i]`` is a list containing eight matrices.
``ws[i][j]`` corresponds to :math:`W_j` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 4`` are ``(N, I)``-shaped as
they are multiplied with input variables, where ``I`` is the size
of the input and ``N`` is the dimension of the hidden units. All
other matrices are ``(N, N)``-shaped.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[i]`` represents the biases for the i-th layer.
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)`` where ``N`` is the dimension
of the hidden units.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``.
When sequences has different lengths, they must be
sorted in descending order of their lengths.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainerx.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
the mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
.. note::
The dimension of hidden units is limited to only one size ``N``. If you
want to use variable dimension of hidden units, please use
:class:`chainerx.lstm`.
.. seealso::
:func:`chainerx.lstm`
.. admonition:: Example
>>> import chainerx as chx
>>> batchs = [3, 2, 1] # support variable length sequences
>>> in_size, out_size, n_layers = 3, 2, 2
>>> xs = [chx.ones((b, in_size)).astype(chx.float32) for b in batchs]
>>> [x.shape for x in xs]
[(3, 3), (2, 3), (1, 3)]
>>> h_shape = (n_layers, batchs[0], out_size)
>>> hx = chx.ones(h_shape).astype(chx.float32)
>>> cx = chx.ones(h_shape).astype(chx.float32)
>>> w_in = lambda i, j: in_size if i == 0 and j < 4 else out_size
>>> ws = []
>>> bs = []
>>> for n in range(n_layers):
... ws.append([chx.ones((out_size, w_in(n, i))).\
astype(np.float32) for i in range(8)])
... bs.append([chx.ones((out_size,)).astype(chx.float32) \
for _ in range(8)])
...
>>> ws[0][0].shape # ws[0][:4].shape are (out_size, in_size)
(2, 3)
>>> ws[1][0].shape # others are (out_size, out_size)
(2, 2)
>>> bs[0][0].shape
(2,)
>>> hy, cy, ys = chx.n_step_lstm(
... n_layers, hx, cx, ws, bs, xs)
>>> hy.shape
(2, 3, 2)
>>> cy.shape
(2, 3, 2)
>>> [y.shape for y in ys]
[(3, 2), (2, 2), (1, 2)]
""")
_docs.set_doc(
chainerx.n_step_bilstm,
"""n_step_bilstm(n_layers, hx, cx, ws, bs, xs)
Stacked Bi-directional Long Short-Term Memory function.
This function calculates stacked Bi-directional LSTM with sequences.
This function gets an initial hidden state :math:`h_0`, an initial cell
state :math:`c_0`, an input sequence :math:`x`, weight matrices :math:`W`,
and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
i^{f}_t &=& \\sigma(W^{f}_0 x_t + W^{f}_4 h_{t-1} + b^{f}_0 + b^{f}_4),
\\\\
f^{f}_t &=& \\sigma(W^{f}_1 x_t + W^{f}_5 h_{t-1} + b^{f}_1 + b^{f}_5),
\\\\
o^{f}_t &=& \\sigma(W^{f}_2 x_t + W^{f}_6 h_{t-1} + b^{f}_2 + b^{f}_6),
\\\\
a^{f}_t &=& \\tanh(W^{f}_3 x_t + W^{f}_7 h_{t-1} + b^{f}_3 + b^{f}_7),
\\\\
c^{f}_t &=& f^{f}_t \\cdot c^{f}_{t-1} + i^{f}_t \\cdot a^{f}_t,
\\\\
h^{f}_t &=& o^{f}_t \\cdot \\tanh(c^{f}_t),
\\\\
i^{b}_t &=& \\sigma(W^{b}_0 x_t + W^{b}_4 h_{t-1} + b^{b}_0 + b^{b}_4),
\\\\
f^{b}_t &=& \\sigma(W^{b}_1 x_t + W^{b}_5 h_{t-1} + b^{b}_1 + b^{b}_5),
\\\\
o^{b}_t &=& \\sigma(W^{b}_2 x_t + W^{b}_6 h_{t-1} + b^{b}_2 + b^{b}_6),
\\\\
a^{b}_t &=& \\tanh(W^{b}_3 x_t + W^{b}_7 h_{t-1} + b^{b}_3 + b^{b}_7),
\\\\
c^{b}_t &=& f^{b}_t \\cdot c^{b}_{t-1} + i^{b}_t \\cdot a^{b}_t, \\\\
h^{b}_t &=& o^{b}_t \\cdot \\tanh(c^{b}_t), \\\\
h_t &=& [h^{f}_t; h^{b}_t]
where :math:`W^{f}` is the weight matrices for forward-LSTM, :math:`W^{b}`
is weight matrices for backward-LSTM.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Eight weight matrices and eight bias vectors are
required for each layer of each direction. So, when :math:`S` layers
exist, you need to prepare :math:`16S` weight matrices and :math:`16S`
bias vectors.
If the number of layers ``n_layers`` is greater than :math:`1`, the input
of the ``k``-th layer is the hidden state ``h_t`` of the ``k-1``-th layer.
Note that all input variables except the first layer may have different
shape from the first layer.
Args:
n_layers(int): The number of layers.
hx (:class:`~chainerx.array`):
Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units. Because of bi-direction, the
first dimension length is ``2S``.
cx (:class:`~chainerx.array`): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[2 * l + m]`` represents the weights for the l-th layer of
the m-th direction. (``m == 0`` means the forward direction and
``m == 1`` means the backward direction.) Each ``ws[i]`` is a
list containing eight matrices. ``ws[i][j]`` corresponds to
:math:`W_j` in the equation. ``ws[0][j]`` and ``ws[1][j]`` where
``0 <= j < 4`` are ``(N, I)``-shaped because they are multiplied
with input variables, where ``I`` is the size of the input.
``ws[i][j]`` where ``2 <= i`` and ``0 <= j < 4`` are
``(N, 2N)``-shaped because they are multiplied with two hidden
layers :math:`h_t = [h^{f}_t; h^{b}_t]`. All other matrices are
``(N, N)``-shaped.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[2 * l + m]`` represents the weights for the l-th layer of
m-th direction. (``m == 0`` means the forward direction and
``m == 1`` means the backward direction.)
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)``.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``.
When sequences has different lengths, they must be
sorted in descending order of their lengths.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, 2N)`` where ``B_t``
is the mini-batch size for time ``t``, and ``N`` is size of
hidden units. Note that ``B_t`` is the same value as ``xs[t]``.
.. admonition:: Example
>>> import chainerx as chx
>>> batchs = [3, 2, 1] # support variable length sequences
>>> in_size, out_size, n_layers = 3, 2, 2
>>> dropout_ratio = 0.0
>>> xs = [chx.ones((b, in_size)).astype(chx.float32) for b in batchs]
>>> [x.shape for x in xs]
[(3, 3), (2, 3), (1, 3)]
>>> h_shape = (n_layers * 2, batchs[0], out_size)
>>> hx = chx.ones(h_shape).astype(chx.float32)
>>> cx = chx.ones(h_shape).astype(chx.float32)
>>> def w_in(i, j):
... if i == 0 and j < 4:
... return in_size
... elif i > 0 and j < 4:
... return out_size * 2
... else:
... return out_size
...
>>> ws = []
>>> bs = []
>>> for n in range(n_layers):
... for direction in (0, 1):
... ws.append([chx.ones((out_size, w_in(n, i))).\
astype(np.float32) for i in range(8)])
... bs.append([chx.ones((out_size,)).astype(chx.float32) \
for _ in range(8)])
...
>>> ws[0][0].shape # ws[0:2][:4].shape are (out_size, in_size)
(2, 3)
>>> ws[2][0].shape # ws[2:][:4].shape are (out_size, 2 * out_size)
(2, 4)
>>> ws[0][4].shape # others are (out_size, out_size)
(2, 2)
>>> bs[0][0].shape
(2,)
>>> hy, cy, ys = chx.n_step_bilstm(
... n_layers, hx, cx, ws, bs, xs)
>>> hy.shape
(4, 3, 2)
>>> cy.shape
(4, 3, 2)
>>> [y.shape for y in ys]
[(3, 4), (2, 4), (1, 4)]
""")
_docs.set_doc(
chainerx.n_step_gru,
"""n_step_gru(n_layers, hx, ws, bs, xs)
Stacked Uni-directional Gated Recurrent Unit function.
This function calculates stacked Uni-directional GRU with sequences.
This function gets an initial hidden state :math:`h_0`, an input
sequence :math:`x`, weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` for each time :math:`t`
from input :math:`x_t`.
.. math::
r_t &= \\sigma(W_0 x_t + W_3 h_{t-1} + b_0 + b_3) \\\\
z_t &= \\sigma(W_1 x_t + W_4 h_{t-1} + b_1 + b_4) \\\\
h'_t &= \\tanh(W_2 x_t + b_2 + r_t \\cdot (W_5 h_{t-1} + b_5)) \\\\
h_t &= (1 - z_t) \\cdot h'_t + z_t \\cdot h_{t-1}
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Six weight matrices and six bias vectors are
required for each layers. So, when :math:`S` layers exists, you need to
prepare :math:`6S` weight matrices and :math:`6S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
hx (~chainerx.array):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing six matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 3`` is ``(N, I)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing six vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing two elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainerx.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``
""")
_docs.set_doc(
chainerx.n_step_bigru,
"""n_step_bigru(n_layers, hx, ws, bs, xs)
Stacked Bi-directional Gated Recurrent Unit function.
This function calculates stacked Bi-directional GRU with sequences.
This function gets an initial hidden state :math:`h_0`, an input
sequence :math:`x`, weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` for each time :math:`t`
from input :math:`x_t`.
.. math::
r^{f}_t &= \\sigma(W^{f}_0 x_t + W^{f}_3 h_{t-1} + b^{f}_0 + b^{f}_3)
\\\\
z^{f}_t &= \\sigma(W^{f}_1 x_t + W^{f}_4 h_{t-1} + b^{f}_1 + b^{f}_4)
\\\\
h^{f'}_t &= \\tanh(W^{f}_2 x_t + b^{f}_2 + r^{f}_t \\cdot (W^{f}_5
h_{t-1} + b^{f}_5)) \\\\
h^{f}_t &= (1 - z^{f}_t) \\cdot h^{f'}_t + z^{f}_t \\cdot h_{t-1}
\\\\
r^{b}_t &= \\sigma(W^{b}_0 x_t + W^{b}_3 h_{t-1} + b^{b}_0 + b^{b}_3)
\\\\
z^{b}_t &= \\sigma(W^{b}_1 x_t + W^{b}_4 h_{t-1} + b^{b}_1 + b^{b}_4)
\\\\
h^{b'}_t &= \\tanh(W^{b}_2 x_t + b^{b}_2 + r^{b}_t \\cdot (W^{b}_5
h_{t-1} + b^{b}_5)) \\\\
h^{b}_t &= (1 - z^{b}_t) \\cdot h^{b'}_t + z^{b}_t \\cdot h_{t-1}
\\\\
h_t &= [h^{f}_t; h^{b}_t] \\\\
where :math:`W^{f}` is weight matrices for forward-GRU, :math:`W^{b}` is
weight matrices for backward-GRU.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Six weight matrices and six bias vectors are
required for each layers. So, when :math:`S` layers exists, you need to
prepare :math:`6S` weight matrices and :math:`6S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
hx (:class:`~chainerx.array`):
Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing six matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 3`` is ``(N, I)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing six vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array` holding input values.
Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing two elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainerx.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
""")
_docs.set_doc(
chainerx.n_step_rnn,
"""n_step_rnn(n_layers, hx, ws, bs, xs, activation='tanh')
Stacked Uni-directional RNN function for sequence inputs.
This function calculates stacked Uni-directional RNN with sequences.
This function gets an initial hidden state :math:`h_0`,
an initial cell state :math:`c_0`, an input sequence :math:`x`,
weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
h_t = f(W_0 x_t + W_1 h_{t-1} + b_0 + b_1)
where :math:`f` is an activation function.
Weight matrices :math:`W` contains two matrices :math:`W_0` and
:math:`W_1`. :math:`W_0` is a parameter for an input sequence.
:math:`W_1` is a parameter for a hidden state.
Bias matrices :math:`b` contains two matrices :math:`b_0` and :math:`b_1`.
:math:`b_0` is a parameter for an input sequence.
:math:`b_1` is a parameter for a hidden state.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Two weight matrices and two bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`2S` weight matrices and :math:`2S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
hx (:class:`~chainerx.array`):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing two matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 1`` is ``(N, I)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing two vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array` holding input values.
Each element ``xs[t]`` holds input value for time ``t``.
Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
Returns:
tuple: This function returns a tuple containing two elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainerx.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
""")
_docs.set_doc(
chainerx.n_step_birnn,
"""n_step_birnn(n_layers, hx, ws, bs, xs, activation='tanh')
Stacked Bi-directional RNN function for sequence inputs.
This function calculates stacked Bi-directional RNN with sequences.
This function gets an initial hidden state :math:`h_0`, an initial
cell state :math:`c_0`, an input sequence :math:`x`,
weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
h^{f}_t &=& f(W^{f}_0 x_t + W^{f}_1 h_{t-1} + b^{f}_0 + b^{f}_1), \\\\
h^{b}_t &=& f(W^{b}_0 x_t + W^{b}_1 h_{t-1} + b^{b}_0 + b^{b}_1), \\\\
h_t &=& [h^{f}_t; h^{f}_t], \\\\
where :math:`f` is an activation function.
Weight matrices :math:`W` contains two matrices :math:`W^{f}` and
:math:`W^{b}`. :math:`W^{f}` is weight matrices for forward directional
RNN. :math:`W^{b}` is weight matrices for backward directional RNN.
:math:`W^{f}` contains :math:`W^{f}_0` for an input sequence and
:math:`W^{f}_1` for a hidden state.
:math:`W^{b}` contains :math:`W^{b}_0` for an input sequence and
:math:`W^{b}_1` for a hidden state.
Bias matrices :math:`b` contains two matrices :math:`b^{f}` and
:math:`b^{f}`. :math:`b^{f}` contains :math:`b^{f}_0` for an input sequence
and :math:`b^{f}_1` for a hidden state.
:math:`b^{b}` contains :math:`b^{b}_0` for an input sequence and
:math:`b^{b}_1` for a hidden state.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Two weight matrices and two bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`2S` weight matrices and :math:`2S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
hx (:class:`~chainerx.array`):
Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units. Because of bi-direction, the
first dimension length is ``2S``.
ws (list of list of :class:`~chainerx.array`): Weight matrices.
``ws[i + di]`` represents weights for i-th layer.
Note that ``di = 0`` for forward-RNN and ``di = 1`` for
backward-RNN.
Each ``ws[i + di]`` is a list containing two matrices.
``ws[i + di][j]`` is corresponding with ``W^{f}_j`` if ``di = 0``
and corresponding with ``W^{b}_j`` if ``di = 1`` in the equation.
Only ``ws[0][j]`` and ``ws[1][j]`` where ``0 <= j < 1`` are
``(I, N)`` shape as they are multiplied with input variables.
All other matrices has ``(N, N)`` shape.
bs (list of list of :class:`~chainerx.array`): Bias vectors.
``bs[i + di]`` represnents biases for i-th layer.
Note that ``di = 0`` for forward-RNN and ``di = 1`` for
backward-RNN.
Each ``bs[i + di]`` is a list containing two vectors.
``bs[i + di][j]`` is corresponding with ``b^{f}_j`` if ``di = 0``
and corresponding with ``b^{b}_j`` if ``di = 1`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainerx.array`):
A list of :class:`~chainerx.array` holding input values.
Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
Returns:
tuple: This function returns a tuple containing two elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainerx.array` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t``
is mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
""")
| 127,329
| 31.26812
| 86
|
py
|
chainer
|
chainer-master/chainerx/_docs/context.py
|
import chainerx
from chainerx import _docs
def set_docs():
Context = chainerx.Context
_docs.set_doc(
Context,
"""Context()
An isolated execution environment of ChainerX.
In Python binding, a single context is automatically created and set as the
global default context on import. Only advanced users will have to care about
contexts.
""")
| 367
| 20.647059
| 77
|
py
|
chainer
|
chainer-master/chainerx/_docs/array.py
|
import chainerx
from chainerx import _docs
def set_docs():
ndarray = chainerx.ndarray
_docs.set_doc(
ndarray,
"""ndarray(shape, dtype, device=None)
Multi-dimensional array, the central data structure of ChainerX.
This class, along with other APIs in the :mod:`chainerx` module, provides a
subset of NumPy APIs. This class works similar to :class:`numpy.ndarray`,
except for some differences including the following noticeable points:
- :class:`chainerx.ndarray` has a :attr:`device` attribute. It indicates on
which device the array is allocated.
- :class:`chainerx.ndarray` supports :ref:`Define-by-Run <define_by_run>`
backpropagation. Once you call :meth:`require_grad`, the array starts
recording the operations applied to it recursively. Gradient of the result
with respect to the original array can be computed then with the
:meth:`backward` method or the :func:`chainerx.backward` function.
Args:
shape (tuple of ints): Shape of the new array.
dtype: Data type.
device (~chainerx.Device): Device on which the array is allocated.
If omitted, :ref:`the default device <chainerx_device>` is chosen.
.. seealso:: :class:`numpy.ndarray`
""")
_docs.set_doc(
ndarray.data_ptr,
"""int: Address of the underlying memory allocation.
The meaning of the address is device-dependent.
""")
_docs.set_doc(
ndarray.data_size,
'int: Total size of the underlying memory allocation.')
_docs.set_doc(
ndarray.device, '~chainerx.Device: Device on which the data exists.')
_docs.set_doc(ndarray.dtype, 'Data type of the array.')
# TODO(beam2d): Write about backprop id.
_docs.set_doc(
ndarray.grad,
"""~chainerx.ndarray: Gradient held by the array.
It is ``None`` if the gradient is not available.
Setter of this property overwrites the gradient.
""")
_docs.set_doc(
ndarray.is_contiguous,
'bool: ``True`` iff the array is stored in the C-contiguous order.')
_docs.set_doc(ndarray.itemsize, 'int: Size of each element in bytes.')
_docs.set_doc(
ndarray.nbytes,
"""int: Total size of all elements in bytes.
It does not count skips between elements.""")
_docs.set_doc(ndarray.ndim, 'int: Number of dimensions.')
_docs.set_doc(
ndarray.offset,
'int: Offset of the first element from the memory allocation in bytes.'
)
_docs.set_doc(
ndarray.shape,
"""tuple of int: Lengths of axes.
.. note::
Currently, this property does not support setter.""")
_docs.set_doc(ndarray.size, 'int: Number of elements in the array.')
_docs.set_doc(ndarray.strides, 'tuple of int: Strides of axes in bytes.')
_docs.set_doc(
ndarray.T,
"""~chainerx.ndarray: Shape-reversed view of the array.
New array is created at every access to this property.
``x.T`` is just a shorthand of ``x.transpose()``.
""")
_docs.set_doc(
ndarray.__getitem__,
"""___getitem__(self, key)
Returns self[key].
.. note::
Currently, only basic indexing is supported not advanced indexing.
""")
def unary_op(name, s):
_docs.set_doc(getattr(ndarray, name), '{}()\n{}'.format(name, s))
unary_op('__bool__', 'Casts a size-one array into a :class:`bool` value.')
unary_op('__float__',
'Casts a size-one array into a :class:`float` value.')
unary_op('__int__', 'Casts a size-one array into :class:`int` value.')
unary_op('__len__', 'Returns the length of the first axis.')
unary_op('__neg__', 'Computes ``-x`` elementwise.')
def binary_op(name, s):
_docs.set_doc(getattr(ndarray, name), '{}(other)\n{}'.format(name, s))
binary_op('__eq__', 'Computes ``x == y`` elementwise.')
binary_op('__ne__', 'Computes ``x != y`` elementwise.')
binary_op('__lt__', 'Computes ``x < y`` elementwise.')
binary_op('__le__', 'Computes ``x <= y`` elementwise.')
binary_op('__ge__', 'Computes ``x >= y`` elementwise.')
binary_op('__gt__', 'Computes ``x > y`` elementwise.')
binary_op('__iadd__', 'Computes ``x += y`` elementwise.')
binary_op('__isub__', 'Computes ``x -= y`` elementwise.')
binary_op('__imul__', 'Computes ``x *= y`` elementwise.')
binary_op('__itruediv__', 'Computes ``x /= y`` elementwise.')
binary_op('__iand__', 'Computes ``x &= y`` elementwise.')
binary_op('__ior__', 'Computes ``x |= y`` elementwise.')
binary_op('__ixor__', 'Computes ``x ^= y`` elementwise.')
binary_op('__add__', 'Computes ``x + y`` elementwise.')
binary_op('__sub__', 'Computes ``x - y`` elementwise.')
binary_op('__mul__', 'Computes ``x * y`` elementwise.')
binary_op('__truediv__', 'Computes ``x / y`` elementwise.')
binary_op('__and__', 'Computes ``x & y`` elementwise.')
binary_op('__or__', 'Computes ``x | y`` elementwise.')
binary_op('__xor__', 'Computes ``x ^ y`` elementwise.')
binary_op('__radd__', 'Computes ``y + x`` elementwise.')
binary_op('__rsub__', 'Computes ``y - x`` elementwise.')
binary_op('__rmul__', 'Computes ``y * x`` elementwise.')
binary_op('__rand__', 'Computes ``y & x`` elementwise.')
binary_op('__ror__', 'Computes ``y | x`` elementwise.')
binary_op('__rxor__', 'Computes ``y ^ x`` elementwise.')
# TODO(beam2d): Write about as_grad_stopped(backprop_ids, copy) overload.
_docs.set_doc(
ndarray.as_grad_stopped,
"""as_grad_stopped(copy=False)
Creates a view or a copy of the array that stops gradient propagation.
This method behaves similar to :meth:`view` and :meth:`copy`, except that
the gradient is not propagated through this operation (internally, this
method creates a copy or view of the array without connecting the computational
graph for backprop).
Args:
copy (bool): If ``True``, it copies the array. Otherwise, it returns a view
of the original array.
Returns:
~chainerx.ndarray:
A view or a copy of the array without propagating the gradient on
backprop.
""")
_docs.set_doc(
ndarray.argmax,
"""argmax(axis=None)
Returns the indices of the maximum elements along a given axis.
See :func:`chainerx.argmax` for the full documentation.
""")
_docs.set_doc(
ndarray.argmin,
"""argmin(axis=None)
Returns the indices of the minimum elements along a given axis.
See :func:`chainerx.argmin` for the full documentation.
""")
_docs.set_doc(
ndarray.astype,
"""astype(dtype, copy=True)
Casts each element to the specified data type.
Args:
dtype: Data type of the new array.
copy (bool): If ``True``, this method always copies the data. Otherwise,
it creates a view of the array if possible.
Returns:
~chainerx.ndarray: An array with the specified dtype.
""")
_docs.set_doc(
ndarray.backward,
"""backward(backprop_id=None, enable_double_backprop=False)
Performs backpropagation starting from this array.
This method is equivalent to ``chainerx.backward([self], *args)``.
See :func:`chainerx.backward` for the full documentation.
""")
# TODO(beam2d): Write about backprop id.
_docs.set_doc(
ndarray.cleargrad,
"""cleargrad()
Clears the gradient held by this array.
""")
_docs.set_doc(
ndarray.copy,
"""copy()
Creates an array and copies all the elements to it.
The copied array is allocated on the same device as ``self``.
.. seealso:: :func:`chainerx.copy`
""")
_docs.set_doc(
ndarray.dot,
"""dot(b)
Returns the dot product with a given array.
See :func:`chainerx.dot` for the full documentation.
""")
_docs.set_doc(
ndarray.fill,
"""fill(value)
Fills the array with a scalar value in place.
Args:
value: Scalar value with which the array will be filled.
""")
# TODO(beam2d): Write about backprop_id argument.
_docs.set_doc(
ndarray.get_grad,
"""get_grad()
Returns the gradient held by the array.
If the gradient is not available, it returns ``None``.
""")
# TODO(beam2d): Write about backprop_id argument.
_docs.set_doc(
ndarray.is_backprop_required,
"""is_backprop_required()
Returns ``True`` if gradient propagates through this array on backprop.
See the note on :meth:`require_grad` for details.
""")
# TODO(beam2d): Write about backprop_id argument.
_docs.set_doc(
ndarray.is_grad_required,
"""is_grad_required()
Returns ``True`` if the gradient will be set after backprop.
See the note on :meth:`require_grad` for details.
""")
_docs.set_doc(
ndarray.item,
"""item()
Copies an element of an array to a standard Python scalar and returns it.
Returns:
z:
A copy of the specified element of the array as a suitable Python
scalar.
.. seealso:: :func:`numpy.item`
""")
_docs.set_doc(
ndarray.max,
"""max(axis=None, keepdims=False)
Returns the maximum along a given axis.
See :func:`chainerx.amax` for the full documentation.
""")
_docs.set_doc(
ndarray.min,
"""min(axis=None, keepdims=False)
Returns the minimum along a given axis.
See :func:`chainerx.amin` for the full documentation.
""")
# TODO(beam2d): Write about backprop_id argument.
_docs.set_doc(
ndarray.require_grad,
"""require_grad()
Declares that a gradient for this array will be made available after backprop.
Once calling this method, any operations applied to this array are recorded for
later backprop. After backprop, the :attr:`grad` attribute holds the gradient
array.
.. note::
ChainerX distinguishes *gradient requirements* and *backprop requirements*
strictly. They are strongly related, but different concepts as follows.
- *Gradient requirement* indicates that the gradient array should be made
available after backprop. This attribute **is not propagated** through
any operations. It implicates the backprop requirement.
- *Backprop requirement* indicates that the gradient should be propagated
through the array during backprop. This attribute **is propagated**
through differentiable operations.
:meth:`require_grad` sets the gradient requirement flag. If you need to
extract the gradient after backprop, you have to call :meth:`require_grad`
on the array even if the array is an intermediate result of differentiable
computations.
Returns:
~chainerx.ndarray: ``self``
""")
_docs.set_doc(
ndarray.reshape,
"""reshape(newshape)
Creates an array with a new shape and the same data.
See :func:`chainerx.reshape` for the full documentation.
""")
_docs.set_doc(
ndarray.set_grad,
"""set_grad(grad)
Sets a gradient to the array.
This method overwrites the gradient with a given array.
Args:
grad (~chainerx.ndarray): New gradient array.
""")
_docs.set_doc(
ndarray.squeeze,
"""squeeze(axis=None)
Removes size-one axes from an array.
See :func:`chainerx.squeeze` for the full documentation.
""")
_docs.set_doc(
ndarray.swapaxes,
"""swapaxes(axis1, axis2)
Interchange two axes of an array..
See :func:`chainerx.swapaxes` for the full documentation.
""")
_docs.set_doc(
ndarray.repeat,
"""repeat(repeats, axis=None)
Constructs an array by repeating a given array.
See :func:`chainerx.repeats` for the full documentation.
""")
_docs.set_doc(
ndarray.sum,
"""sum(axis=None, keepdims=False)
Returns the sum of an array along given axes.
See :func:`chainerx.sum` for the full documentation.
""")
_docs.set_doc(
ndarray.take,
"""take(indices, axis)
Takes elements from the array along an axis.
See :func:`chainerx.take` for the full documentation.
""")
_docs.set_doc(
ndarray.to_device,
"""to_device(device, index=None)
Transfers the array to the specified device.
Args:
device (~chainerx.Device or str): Device to which the array is transferred,
or a backend name. If it is a backend name, ``index`` should also be
specified.
index (int): Index of the device for the backend specified by ``device``.
Returns:
~chainerx.ndarray:
An array on the target device.
If the original array is already on the device, it is a view of that.
Otherwise, it is a copy of the array on the target device.
""")
_docs.set_doc(
ndarray.transpose,
"""transpose(axes=None)
Creates a view of an array with permutated axes.
See :func:`chainerx.transpose` for the full documentation.
""")
_docs.set_doc(
ndarray.view,
"""view()
Returns a view of the array.
The returned array shares the underlying buffer, though it has a different
identity as a Python object.
""")
| 12,815
| 29.297872
| 79
|
py
|
chainer
|
chainer-master/chainerx/_docs/utils.py
|
import chainerx
from chainerx import _docs
def set_docs():
_docs.set_doc(
chainerx.to_numpy,
"""to_numpy(array, copy=True)
Converts a ChainerX array to NumPy
Args:
array (~chainerx.ndarray): ChainerX array.
copy (bool): If ``True``, a copy is always made. Otherwise, the resulting
array may be aliased with the input array.
Returns:
numpy.ndarray: NumPy array.
""")
| 410
| 20.631579
| 77
|
py
|
chainer
|
chainer-master/chainerx/_docs/backend.py
|
import chainerx
from chainerx import _docs
def _set_docs_backend():
Backend = chainerx.Backend
_docs.set_doc(
Backend,
"""Pluggable entity that abstracts various computing platforms.
A backend holds one or more :class:`~chainerx.Device`\\ s, each of which
represents a physical computing unit.
""")
_docs.set_doc(
Backend.name,
"""Backend name.
Returns:
str: Backend name.
""")
_docs.set_doc(
Backend.context,
"""Context to which this backend belongs.
Returns:
~chainerx.Context: Context object.
""")
_docs.set_doc(
Backend.get_device,
"""get_device(index)
Returns a device specified by the given index.
Args:
index (int): Device index.
Returns:
~chainerx.Device: Device object.
""")
_docs.set_doc(
Backend.get_device_count,
"""get_device_count()
Returns the number of devices available in this backend.
Returns:
int: Number of devices.
""")
def set_docs():
_set_docs_backend()
_docs.set_doc(
chainerx.get_backend,
"""get_backend(backend_name)
Returns a backend specified by the name.
Args:
backend_name (str): Backend name.
Returns:
~chainerx.Backend: Backend object.
""")
| 1,249
| 17.115942
| 72
|
py
|
chainer
|
chainer-master/chainerx/_docs/backprop.py
|
import chainerx
from chainerx import _docs
def set_docs():
_docs.set_doc(
chainerx.backward,
"""backward(outputs, *, enable_double_backprop=False)
Runs backpropagation.
On backpropagation (a.k.a. backprop),
the computational graph is traversed backward starting from the output arrays,
up until the root arrays on which :func:`ndarray.require_grad()` have been
called.
Backpropagation uses :data:`ndarray.grad <chainerx.ndarray.grad>` held by
the output arrays as the initial gradients.
You can manually assign them before calling this function.
Otherwise, they are assumed to be 1.
To enable higher order differentiation, pass ``enable_double_backprop=True``
so that you can further run backpropagation from the resulting gradient arrays.
Note that enabling it results in larger memory consumption needed to store the
gradients w.r.t intermediate arrays that are required for the second gradient
computation.
Note:
The whole process of backpropagation is executed in C++, except those
operations whose backward computation falls back to the corresponding
Python implementation. Currently this function does not release the GIL at
all.
Args:
outputs (~chainerx.ndarray or list of ndarrays):
Output arrays from which backpropagation starts.
enable_double_backprop (bool): If ``True``,
a computational trace of the whole backpropagation procedure is
recorded to the computational graph so that one can further do
backpropagation from the resulting gradients.
.. seealso::
* :meth:`chainerx.ndarray.backward`
""")
_docs.set_doc(
chainerx.grad,
"""grad(outputs, inputs, *, enable_double_backprop=False)
Computes and returns the gradients of the outputs w.r.t. the inputs.
This function differs from :func:`chainerx.backward` in the sense that
gradients are returned instead of being added to the gradients held by the
inputs. Gradients held by the inputs are not modified. Also, instead of
traversing through the whole graph starting from the outputs, a sub-graph is
extracted for computation. This means that is is more efficient, especially
for larger computational graphs.
Args:
outputs (list of ndarrays):
Output arrays from which backpropagation starts.
inputs (list of ndarrays):
Input arrays of which this function computes the gradients w.r.t.
enable_double_backprop (bool): If ``True``,
a computational trace of the whole backpropagation procedure is
recorded to the computational graph so that one can further do
backpropagation from the resulting gradients.
Returns:
list of :class:`~chainerx.ndarray`\\ s:
A list of gradients. The list always has the same length as the number
of inputs.
.. seealso::
* :func:`chainerx.backward`
* :func:`chainer.grad`
""")
_docs.set_doc(
chainerx.no_backprop_mode,
"""no_backprop_mode()
Creates a context manager which temporarily disables backpropagation.
Within this context, no computational graph will be formed unless
:meth:`~chainerx.force_backprop_mode` is used.
Arrays resulting from operations enclosed with this context will be
disconnected from the computational graph. Trying to perform backpropagation
from such arrays would result in an error.
.. code-block:: py
x = chainerx.array([4, 3], numpy.float32)
x.require_grad()
with chainerx.no_backprop_mode():
y = 2 * x + 1
y.backward() # ! error
Benefits of ``no_backprop_mode`` include reduced CPU overhead of building
computational graphs, and reduced consumption of device memory that
would be otherwise retained for backward propagation.
.. seealso::
* :func:`chainerx.force_backprop_mode`
* :func:`chainerx.is_backprop_required`
* :func:`chainer.no_backprop_mode`
""")
_docs.set_doc(
chainerx.force_backprop_mode,
"""force_backprop_mode()
Creates a context manager which temporarily enables backpropagation.
This context re-enables backpropagation that is disabled by
any surrounding :func:`~chainerx.no_backprop_mode` context.
.. code-block:: py
x = chainerx.array([4, 3], numpy.float32)
x.require_grad()
with chainerx.no_backprop_mode():
with chainerx.force_backprop_mode():
y = 2 * x + 1
y.backward()
x.grad
# array([2., 2.], shape=(2,), dtype=float32, device='native:0')
.. seealso::
* :func:`chainerx.no_backprop_mode`
* :func:`chainerx.is_backprop_required`
* :func:`chainer.force_backprop_mode`
""")
_docs.set_doc(
chainerx.is_backprop_required,
"""is_backprop_required()
Returns whether the backpropagation is enabled in the current thread.
The result is affect by :func:`chainerx.no_backprop_mode` and
:func:`chainerx.force_backprop_mode`.
.. seealso::
* :func:`chainerx.no_backprop_mode`
* :func:`chainerx.force_backprop_mode`
""")
| 4,923
| 32.27027
| 79
|
py
|
chainer
|
chainer-master/chainerx/_docs/__init__.py
|
import inspect
from chainerx import _core
from chainerx._docs import array
from chainerx._docs import backend
from chainerx._docs import backprop
from chainerx._docs import context
from chainerx._docs import device
from chainerx._docs import routines
from chainerx._docs import utils
def set_doc(obj, docstring):
if ((inspect.ismethoddescriptor(obj) or inspect.isroutine(obj))
and not inspect.isfunction(obj)):
# pybind-generated functions and methods
_core._set_pybind_doc(obj, docstring)
return
obj.__doc__ = docstring
def set_docs():
for m in (array, backend, backprop, context, device, routines, utils):
m.set_docs()
| 683
| 25.307692
| 74
|
py
|
chainer
|
chainer-master/chainerx/_docs/device.py
|
import chainerx
from chainerx import _docs
def _set_docs_device():
Device = chainerx.Device
_docs.set_doc(
Device,
"""Represents a physical computing unit.
""")
_docs.set_doc(
Device.synchronize,
"""Synchronizes the device.
""")
_docs.set_doc(
Device.name,
"""Device name.
It is the backend name and the device index concatenated with a colon, e.g.
``native:0``.
Returns:
str: Device name.
""")
_docs.set_doc(
Device.backend,
"""Backend to which this device belongs.
Returns:
~chainerx.Backend: Backend object.
""")
_docs.set_doc(
Device.context,
"""Context to which this device belongs.
Returns:
~chainerx.Context: Context object.
""")
_docs.set_doc(
Device.index,
"""Index of this device.
Returns:
int: Index of this device.
""")
def set_docs():
_set_docs_device()
_docs.set_doc(
chainerx.get_device,
"""get_device(*device)
Returns a device specified by the arguments.
If the argument is a single :class:`~chainerx.Device` instance, it's simply
returned.
Otherwise, there are three ways to specify a device:
.. testcode::
# Specify a backend name and a device index separately.
chainerx.get_device('native', 0)
# Specify a backend name and a device index in a single string.
chainerx.get_device('native:0')
# Specify only a backend name. In this case device index 0 is chosen.
chainerx.get_device('native')
Returns:
~chainerx.Device: Device object.
""")
_docs.set_doc(
chainerx.get_default_device,
"""get_default_device()
Returns the default device associated with the current thread.
Returns:
~chainerx.Device: The default device.
.. seealso::
* :func:`chainerx.set_default_device`
* :func:`chainerx.using_device`
""")
_docs.set_doc(
chainerx.set_default_device,
"""set_default_device(device)
Sets the given device as the default device of the current thread.
Args:
device (~chainerx.Device or str): Device object or device name to set as
the default device.
.. seealso::
* :func:`chainerx.get_default_device`
* :func:`chainerx.using_device`
""")
_docs.set_doc(
chainerx.using_device,
"""using_device(device)
Creates a context manager to temporarily set the default device.
Args:
device (~chainerx.Device or str): Device object or device name to set as
the default device during the context. See :data:`chainerx.Device.name`
for the specification of device names.
.. seealso::
* :func:`chainerx.get_default_device`
* :func:`chainerx.set_default_device`
""")
| 2,706
| 20.830645
| 79
|
py
|
chainer
|
chainer-master/chainerx/math/misc.py
|
import chainerx
# TODO(sonots): Implement in C++
def clip(a, a_min, a_max):
"""Clips the values of an array to a given interval.
Given an interval, values outside the interval are clipped to the
interval edges. For example, if an interval of ``[0, 1]`` is specified,
values smaller than 0 become 0, and values larger than 1 become 1.
Args:
a (~chainerx.ndarray): Array containing elements to clip.
a_min (scalar): Maximum value.
a_max (scalar): Minimum value.
Returns:
~chainerx.ndarray: An array with the elements of ``a``, but where
values < ``a_min`` are replaced with ``a_min``,
and those > ``a_max`` with ``a_max``.
Note:
The :class:`~chainerx.ndarray` typed ``a_min`` and ``a_max`` are
not supported yet.
Note:
During backpropagation, this function propagates the gradient
of the output array to the input array ``a``.
.. seealso:: :func:`numpy.clip`
"""
if a_min is None and a_max is None:
raise ValueError('Must set either a_min or a_max.')
if a_min is not None:
a = chainerx.maximum(a, a_min)
if a_max is not None:
a = chainerx.minimum(a, a_max)
return a
| 1,236
| 27.767442
| 75
|
py
|
chainer
|
chainer-master/chainerx/math/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainerx/creation/from_data.py
|
import numpy
import chainerx
# TODO(sonots): Support subclassing
def asanyarray(a, dtype=None, device=None):
"""Converts an object to an array.
This is currently equivalent to :func:`~chainerx.asarray`, since there are
no subclasses of ndarray in ChainerX. Note that the original
:func:`numpy.asanyarray` returns the input array as is, if it is an
instance of a subtype of :class:`numpy.ndarray`.
.. seealso:: :func:`chainerx.asarray`, :func:`numpy.asanyarray`
"""
return chainerx.asarray(a, dtype, device)
def fromfile(file, dtype=float, count=-1, sep='', device=None):
"""Constructs an array from data in a text or binary file.
This is currently equivalent to :func:`numpy.fromfile`
wrapped by :func:`chainerx.array`, given the device argument.
.. seealso:: :func:`numpy.fromfile`
"""
return chainerx.array(
numpy.fromfile(
file, dtype=dtype, count=count, sep=sep),
device=device)
def fromfunction(function, shape, **kwargs):
""" Constructs an array by executing a function over each coordinate.
This is currently equivalent to :func:`numpy.fromfunction`
wrapped by :func:`chainerx.array`, given the device argument.
Note:
Keywords other than ``dtype`` and ``device`` are passed to
```function```.
.. seealso:: :func:`numpy.fromfunction`
"""
dtype = kwargs.pop('dtype', float)
device = kwargs.pop('device', None)
return chainerx.array(
numpy.fromfunction(
function, shape, dtype=dtype, **kwargs),
device=device)
# TODO(hvy): Optimize with pre-allocated memory using count for non-native
# devices.
def fromiter(iterable, dtype, count=-1, device=None):
"""Constructs a new 1-D array from an iterable object.
This is currently equivalent to :func:`numpy.fromiter`
wrapped by :func:`chainerx.array`, given the device argument.
.. seealso:: :func:`numpy.fromiter`
"""
return chainerx.array(
numpy.fromiter(iterable, dtype=dtype, count=count),
device=device)
def fromstring(string, dtype=float, count=-1, sep='', device=None):
"""Constructs a new 1-D array initialized from text data in a string.
This is currently equivalent to :func:`numpy.fromstring`
wrapped by :func:`chainerx.array`, given the device argument.
.. seealso:: :func:`numpy.fromstring`
"""
# sep should always be specified in numpy.fromstring since its default
# argument has been deprecated since 1.14.
# https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.fromstring.html
return chainerx.array(
numpy.fromstring(
string, dtype=dtype, count=count, sep=sep),
device=device)
def loadtxt(
fname, dtype=float, comments='#', delimiter=None, converters=None,
skiprows=0, usecols=None, unpack=False, ndmin=0, encoding='bytes',
device=None):
"""Constructs an array by loading data from a text file.
This is currently equivalent to :func:`numpy.loadtxt`
wrapped by :func:`chainerx.array`, given the device argument.
.. seealso:: :func:`numpy.loadtxt`
"""
return chainerx.array(
numpy.loadtxt(
fname, dtype=dtype, comments=comments, delimiter=delimiter,
converters=converters, skiprows=skiprows, usecols=usecols,
unpack=unpack, ndmin=ndmin, encoding=encoding),
device=device)
| 3,445
| 30.907407
| 87
|
py
|
chainer
|
chainer-master/chainerx/creation/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/examples/test_utils.py
|
import contextlib
import os
import re
import shutil
import subprocess
import sys
import tempfile
class OutputEvaluator(object):
def check(self, outdata):
raise NotImplementedError()
class TemplateOutputEvaluator(OutputEvaluator):
def __init__(self, template, **checks):
self.template = template
self.checks = checks
def check(self, outdata):
template = self.template
checks = self.checks
lines = outdata.split(b'\n')
tmpl_lines = template.split(b'\n')
# Collect placeholders included in the template
temps = {}
for iline, tmpl_line in enumerate(tmpl_lines):
for m in re.finditer(rb'{(?P<key>[_a-zA-Z0-9]+) *}', tmpl_line):
key = m.groupdict()['key'].decode('utf8')
assert key not in temps
temps[key] = (m, iline)
# Keys of the placeholders and the checks must match.
assert set(temps.keys()) == set(checks.keys())
# Evaluate the checks
for key, (m, iline) in temps.items():
line = lines[iline]
c = checks[key]
typ, checkfunc = c
i1, i2 = m.span()
i2 = min(i2, len(line))
s = line[i1:i2]
if typ is float:
value = float(s)
if not checkfunc(value):
raise RuntimeError('Check fail: key={}'.format(key))
else:
raise TypeError('Invalid check type: {}'.format(typ))
REPO_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
EXAMPLES_ROOT = os.path.join(REPO_ROOT, 'examples')
@contextlib.contextmanager
def tempdir(**kwargs):
# A context manager that defines a lifetime of a temporary directory.
temp_dir = tempfile.mkdtemp(**kwargs)
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir)
@contextlib.contextmanager
def chdir(path):
# A context manager that changes the current directory temporarily.
old_chdir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_chdir)
class ReplacementFileCorrectnessError(Exception):
def __init__(
self,
message,
*,
orig_path,
replace_path,
orig_line_num=None,
replace_line_num=None,
orig_line=None,
replace_line=None):
infos = [
'Original file: {}'.format(orig_path),
'Replacement file: {}'.format(replace_path),
]
if orig_line_num is not None:
infos.append(
'Line number in the original file: {}'.format(orig_line_num))
if replace_line_num is not None:
infos.append(
'Line number in the replacement file: {}'.format(
replace_line_num))
if orig_line is not None:
infos.append(
'Original line: [{}]'.format(orig_line))
if replace_line is not None:
infos.append(
'Replacement line: [{}]\n'.format(replace_line))
message_ = (
'Replacement file correctness check failed: {message}\n\n'
'If you\'re seeing this error message, it\'s likely that you '
'edited a file within the example directory but did not edit the '
'matching replacement file which is used for testing. '
'Please ensure the two files are synchronized.\n\n'
'{infos}'.format(
message=message,
infos='\n'.join(infos)))
super().__init__(message_)
class ExampleRunner(object):
"""Example runner.
A single runner can run multiple script commands.
A runner creates a temporary directory and files in the respective example
directory are copied there.
All the runs are executed within the temporary directory as the current
directory.
"""
contexts = None
work_dir = None # assigned on __enter__
def __init__(self, root_dir):
self.root_dir = root_dir
def __enter__(self):
contexts = []
# Create a temporary directory.
tempd = tempdir()
work_dir = tempd.__enter__()
contexts.append(tempd)
# Change the current directory.
chdir_ = chdir(work_dir)
chdir_.__enter__()
contexts.append(chdir_)
self.work_dir = work_dir
self.contexts = contexts
# Initialize the work directory.
self._init_work_dir()
return self
def __exit__(self, typ, value, traceback):
for c in reversed(self.contexts):
c.__exit__(typ, value, traceback)
self.contexts = None
def _init_work_dir(self):
# Copies files in the directory.
# If a replacement file exists for each file, copy it instead of the
# original file.
# Correctness of the replacement files are also checked.
root_dir = self.root_dir
work_dir = self.work_dir
assert os.path.isdir(root_dir), root_dir
assert os.path.isdir(work_dir), work_dir
replace_dir = os.path.join(root_dir, '.testdata', 'replacements')
for dirpath, dirnames, filenames in os.walk(root_dir):
# Skip directories starting with '.'.
dirnames[:] = [dn for dn in dirnames if not dn.startswith('.')]
if dirpath == root_dir:
dir_relpath = ''
else:
dir_relpath = os.path.relpath(dirpath, root_dir)
for filename in filenames:
relpath = os.path.join(dir_relpath, filename)
orig_path = os.path.join(root_dir, relpath)
dst_path = os.path.join(work_dir, relpath)
# Check to see if the replace file exists.
replace_path = os.path.join(replace_dir, relpath)
if os.path.isfile(replace_path):
# The replace file exists: check correctness of the file
# comparing with the original file.
self._check_replace_file_correct(orig_path, replace_path)
# Copy the replace file.
self._copyfile(replace_path, dst_path)
else:
# The replace file does not exist: copy the original file.
self._copyfile(orig_path, dst_path)
def _copyfile(self, src_path, dst_path):
dirpath = os.path.dirname(dst_path)
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
shutil.copyfile(src_path, dst_path)
def _check_replace_file_correct(self, orig_path, replace_path):
"""Checks correctness of the replacement file comparing with the
original file."""
MARKER_BEGIN = '# BEGIN ADDITIONAL TEST CODE'
MARKER_END = '# END ADDITIONAL TEST CODE'
# Read lines from both files.
with open(orig_path, 'r') as orig_file:
orig_lines = orig_file.readlines()
with open(replace_path, 'r') as replace_file:
replace_lines = replace_file.readlines()
j = 0 # line number (replace)
for i, orig_line in enumerate(orig_lines):
if len(replace_lines) <= j:
raise ReplacementFileCorrectnessError(
'Replacement file has less lines than the original.',
orig_path=orig_path,
replace_path=replace_path)
replace_line = replace_lines[j]
j += 1
# Check if the line is a starting marker comment.
# Marker line can start with arbitrary number of spaces (indent).
if (replace_line.endswith(MARKER_BEGIN + '\n')
and all(' ' == c
for c in replace_line[:-len(MARKER_BEGIN)-1])):
# Starting marker is found: find the corresponding ending
# marker and retrieve the next line.
indent_count = len(replace_line) - len(MARKER_BEGIN) - 1
end_marker_line = ' ' * indent_count + MARKER_END + '\n'
j_ = j
while True:
if len(replace_lines) <= j_:
raise ReplacementFileCorrectnessError(
'Matching ending marker could not be found in a '
'replacement file.',
orig_path=orig_path,
replace_path=replace_path,
orig_line_num=i,
replace_line_num=j,
orig_line=orig_line.rstrip('\n'),
replace_line=replace_line.rstrip('\n'))
if replace_lines[j_] == end_marker_line:
break
j_ += 1
j = j_ + 1
replace_line = replace_lines[j]
j += 1
# Compare the next non-marked lines.
if orig_line != replace_line:
raise ReplacementFileCorrectnessError(
'Line mismatch between the original and the replacement '
'file.',
orig_path=orig_path,
replace_path=replace_path,
orig_line_num=i,
replace_line_num=j,
orig_line=orig_line.rstrip('\n'),
replace_line=replace_line.rstrip('\n'))
if j != len(replace_lines):
raise ReplacementFileCorrectnessError(
'Replacement file has more lines than the original.',
orig_path=orig_path,
replace_path=replace_path)
def run(self, script_name, args, *, output_evaluator=None):
# Runs a command.
assert self.contexts is not None, (
'__enter__ has not been called on the example runner.')
assert isinstance(script_name, str), type(script_name)
assert isinstance(args, list), type(args)
assert (output_evaluator is None
or isinstance(output_evaluator, OutputEvaluator)), (
type(output_evaluator))
work_dir = self.work_dir
script_path = os.path.join(work_dir, script_name)
assert os.path.isfile(script_path), script_path
command = [
sys.executable,
script_path] + args
# Run the command.
proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdoutdata, stderrdata = proc.communicate()
def fail(msg):
err_fmt = '''\
{message}
== command ==
{command}
== stdout ==
{stdout}
== stderr ==
{stderr}
'''
err = err_fmt.format(
message=msg,
command=' '.join(command),
stdout=stdoutdata.decode('utf8'),
stderr=stderrdata.decode('utf8'))
raise RuntimeError(err)
if proc.returncode != 0:
fail('Script exited with {}.'.format(proc.returncode))
if output_evaluator is not None:
try:
output_evaluator.check(stdoutdata)
except RuntimeError as e:
fail(
'Script output does not meet expectation:\n'
'{}'.format(e))
| 11,391
| 33.416918
| 78
|
py
|
chainer
|
chainer-master/examples/dcgan/updater.py
|
#!/usr/bin/env python
import chainer
import chainer.functions as F
from chainer import Variable
class DCGANUpdater(chainer.training.updaters.StandardUpdater):
def __init__(self, *args, **kwargs):
self.gen, self.dis = kwargs.pop('models')
super(DCGANUpdater, self).__init__(*args, **kwargs)
def loss_dis(self, dis, y_fake, y_real):
batchsize = len(y_fake)
L1 = F.sum(F.softplus(-y_real)) / batchsize
L2 = F.sum(F.softplus(y_fake)) / batchsize
loss = L1 + L2
chainer.report({'loss': loss}, dis)
return loss
def loss_gen(self, gen, y_fake):
batchsize = len(y_fake)
loss = F.sum(F.softplus(-y_fake)) / batchsize
chainer.report({'loss': loss}, gen)
return loss
def update_core(self):
gen_optimizer = self.get_optimizer('gen')
dis_optimizer = self.get_optimizer('dis')
batch = self.get_iterator('main').next()
device = self.device
x_real = Variable(self.converter(batch, device)) / 255.
gen, dis = self.gen, self.dis
batchsize = len(batch)
y_real = dis(x_real)
z = Variable(device.xp.asarray(gen.make_hidden(batchsize)))
x_fake = gen(z)
y_fake = dis(x_fake)
dis_optimizer.update(self.loss_dis, dis, y_fake, y_real)
gen_optimizer.update(self.loss_gen, gen, y_fake)
| 1,385
| 29.130435
| 67
|
py
|
chainer
|
chainer-master/examples/dcgan/net.py
|
#!/usr/bin/env python
import numpy
import chainer
import chainer.functions as F
import chainer.links as L
import chainerx
def add_noise(device, h, sigma=0.2):
if chainer.config.train:
xp = device.xp
# TODO(niboshi): Support random.randn in ChainerX
if device.xp is chainerx:
fallback_device = device.fallback_device
with chainer.using_device(fallback_device):
randn = device.send(fallback_device.xp.random.randn(*h.shape))
else:
randn = xp.random.randn(*h.shape)
return h + sigma * randn
else:
return h
class Generator(chainer.Chain):
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02):
super(Generator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
self.bn1 = L.BatchNormalization(ch // 2)
self.bn2 = L.BatchNormalization(ch // 4)
self.bn3 = L.BatchNormalization(ch // 8)
def make_hidden(self, batchsize):
dtype = chainer.get_dtype()
return numpy.random.uniform(-1, 1, (batchsize, self.n_hidden, 1, 1))\
.astype(dtype)
def forward(self, z):
h = F.reshape(F.relu(self.bn0(self.l0(z))),
(len(z), self.ch, self.bottom_width, self.bottom_width))
h = F.relu(self.bn1(self.dc1(h)))
h = F.relu(self.bn2(self.dc2(h)))
h = F.relu(self.bn3(self.dc3(h)))
x = F.sigmoid(self.dc4(h))
return x
class Discriminator(chainer.Chain):
def __init__(self, bottom_width=4, ch=512, wscale=0.02):
w = chainer.initializers.Normal(wscale)
super(Discriminator, self).__init__()
with self.init_scope():
self.c0_0 = L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w)
self.c0_1 = L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w)
self.c1_0 = L.Convolution2D(ch // 4, ch // 4, 3, 1, 1, initialW=w)
self.c1_1 = L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w)
self.c2_0 = L.Convolution2D(ch // 2, ch // 2, 3, 1, 1, initialW=w)
self.c2_1 = L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w)
self.c3_0 = L.Convolution2D(ch // 1, ch // 1, 3, 1, 1, initialW=w)
self.l4 = L.Linear(bottom_width * bottom_width * ch, 1, initialW=w)
self.bn0_1 = L.BatchNormalization(ch // 4, use_gamma=False)
self.bn1_0 = L.BatchNormalization(ch // 4, use_gamma=False)
self.bn1_1 = L.BatchNormalization(ch // 2, use_gamma=False)
self.bn2_0 = L.BatchNormalization(ch // 2, use_gamma=False)
self.bn2_1 = L.BatchNormalization(ch // 1, use_gamma=False)
self.bn3_0 = L.BatchNormalization(ch // 1, use_gamma=False)
def forward(self, x):
device = self.device
h = add_noise(device, x)
h = F.leaky_relu(add_noise(device, self.c0_0(h)))
h = F.leaky_relu(add_noise(device, self.bn0_1(self.c0_1(h))))
h = F.leaky_relu(add_noise(device, self.bn1_0(self.c1_0(h))))
h = F.leaky_relu(add_noise(device, self.bn1_1(self.c1_1(h))))
h = F.leaky_relu(add_noise(device, self.bn2_0(self.c2_0(h))))
h = F.leaky_relu(add_noise(device, self.bn2_1(self.c2_1(h))))
h = F.leaky_relu(add_noise(device, self.bn3_0(self.c3_0(h))))
return self.l4(h)
| 4,005
| 42.075269
| 79
|
py
|
chainer
|
chainer-master/examples/dcgan/train_dcgan.py
|
#!/usr/bin/env python
import argparse
import os
import warnings
import numpy
import chainer
from chainer import training
from chainer.training import extensions
from net import Discriminator
from net import Generator
from updater import DCGANUpdater
from visualize import out_generated_image
def main():
parser = argparse.ArgumentParser(description='Chainer example: DCGAN')
parser.add_argument('--batchsize', '-b', type=int, default=50,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=1000,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--dataset', '-i', default='',
help='Directory of image files. Default is cifar-10.')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', type=str,
help='Resume the training from snapshot')
parser.add_argument('--n_hidden', '-n', type=int, default=100,
help='Number of hidden units (z)')
parser.add_argument('--seed', type=int, default=0,
help='Random seed of z at visualization stage')
parser.add_argument('--snapshot_interval', type=int, default=1000,
help='Interval of snapshot')
parser.add_argument('--display_interval', type=int, default=100,
help='Interval of displaying log to console')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == numpy.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
device = chainer.get_device(args.device)
device.use()
print('Device: {}'.format(device))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# n_hidden: {}'.format(args.n_hidden))
print('# epoch: {}'.format(args.epoch))
print('')
# Set up a neural network to train
gen = Generator(n_hidden=args.n_hidden)
dis = Discriminator()
gen.to_device(device) # Copy the model to the device
dis.to_device(device)
# Setup an optimizer
def make_optimizer(model, alpha=0.0002, beta1=0.5):
optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
optimizer.setup(model)
optimizer.add_hook(
chainer.optimizer_hooks.WeightDecay(0.0001), 'hook_dec')
return optimizer
opt_gen = make_optimizer(gen)
opt_dis = make_optimizer(dis)
if args.dataset == '':
# Load the CIFAR10 dataset if args.dataset is not specified
train, _ = chainer.datasets.get_cifar10(withlabel=False, scale=255.)
else:
all_files = os.listdir(args.dataset)
image_files = [f for f in all_files if ('png' in f or 'jpg' in f)]
print('{} contains {} image files'
.format(args.dataset, len(image_files)))
train = chainer.datasets\
.ImageDataset(paths=image_files, root=args.dataset)
# Setup an iterator
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
# Setup an updater
updater = DCGANUpdater(
models=(gen, dis),
iterator=train_iter,
optimizer={
'gen': opt_gen, 'dis': opt_dis},
device=device)
# Setup a trainer
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
snapshot_interval = (args.snapshot_interval, 'iteration')
display_interval = (args.display_interval, 'iteration')
trainer.extend(
extensions.snapshot(filename='snapshot_iter_{.updater.iteration}.npz'),
trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(
gen, 'gen_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(
dis, 'dis_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
trainer.extend(extensions.LogReport(trigger=display_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'gen/loss', 'dis/loss',
]), trigger=display_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(
out_generated_image(
gen, dis,
10, 10, args.seed, args.out),
trigger=snapshot_interval)
if args.resume is not None:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
if __name__ == '__main__':
main()
| 5,172
| 37.318519
| 79
|
py
|
chainer
|
chainer-master/examples/dcgan/visualize.py
|
#!/usr/bin/env python
import os
import numpy as np
from PIL import Image
import chainer
import chainer.backends.cuda
from chainer import Variable
def out_generated_image(gen, dis, rows, cols, seed, dst):
@chainer.training.make_extension()
def make_image(trainer):
np.random.seed(seed)
n_images = rows * cols
xp = gen.xp
z = Variable(xp.asarray(gen.make_hidden(n_images)))
with chainer.using_config('train', False):
x = gen(z)
x = chainer.backends.cuda.to_cpu(x.array)
np.random.seed()
x = np.asarray(np.clip(x * 255, 0.0, 255.0), dtype=np.uint8)
_, _, H, W = x.shape
x = x.reshape((rows, cols, 3, H, W))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape((rows * H, cols * W, 3))
preview_dir = '{}/preview'.format(dst)
preview_path = preview_dir +\
'/image{:0>8}.png'.format(trainer.updater.iteration)
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
Image.fromarray(x).save(preview_path)
return make_image
| 1,096
| 27.868421
| 68
|
py
|
chainer
|
chainer-master/examples/reinforcement_learning/dqn_cartpole.py
|
#!/usr/bin/env python
"""Example code of DQN and DoubleDQN on OpenAI Gym environments.
For DQN, see: https://www.nature.com/articles/nature14236
For DoubleDQN, see: https://arxiv.org/abs/1509.06461
"""
from __future__ import division
import argparse
import collections
import copy
import random
import warnings
import gym
import numpy as np
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import optimizers
class QFunction(chainer.Chain):
"""Q-function represented by a MLP."""
def __init__(self, obs_size, n_actions, n_units=100):
super(QFunction, self).__init__()
with self.init_scope():
self.l0 = L.Linear(obs_size, n_units)
self.l1 = L.Linear(n_units, n_units)
self.l2 = L.Linear(n_units, n_actions)
def forward(self, x):
"""Compute Q-values of actions for given observations."""
h = F.relu(self.l0(x))
h = F.relu(self.l1(h))
return self.l2(h)
def get_greedy_action(Q, obs):
"""Get a greedy action wrt a given Q-function."""
dtype = chainer.get_dtype()
obs = Q.xp.asarray(obs[None], dtype=dtype)
with chainer.no_backprop_mode():
q = Q(obs).array[0]
return int(q.argmax())
def mean_clipped_loss(y, t):
return F.mean(F.huber_loss(y, t, delta=1.0, reduce='no'))
def update(Q, target_Q, opt, samples, gamma=0.99, target_type='double_dqn'):
"""Update a Q-function with given samples and a target Q-function."""
dtype = chainer.get_dtype()
xp = Q.xp
obs = xp.asarray([sample[0] for sample in samples], dtype=dtype)
action = xp.asarray([sample[1] for sample in samples], dtype=np.int32)
reward = xp.asarray([sample[2] for sample in samples], dtype=dtype)
done = xp.asarray([sample[3] for sample in samples], dtype=dtype)
obs_next = xp.asarray([sample[4] for sample in samples], dtype=dtype)
# Predicted values: Q(s,a)
y = F.select_item(Q(obs), action)
# Target values: r + gamma * max_b Q(s',b)
with chainer.no_backprop_mode():
if target_type == 'dqn':
next_q = F.max(target_Q(obs_next), axis=1)
elif target_type == 'double_dqn':
next_q = F.select_item(target_Q(obs_next),
F.argmax(Q(obs_next), axis=1))
else:
raise ValueError('Unsupported target_type: {}'.format(target_type))
target = reward + gamma * (1 - done) * next_q
loss = mean_clipped_loss(y, target)
Q.cleargrads()
loss.backward()
opt.update()
def main():
parser = argparse.ArgumentParser(description='Chainer example: DQN')
parser.add_argument('--env', type=str, default='CartPole-v0',
help='Name of the OpenAI Gym environment')
parser.add_argument('--batch-size', '-b', type=int, default=64,
help='Number of transitions in each mini-batch')
parser.add_argument('--episodes', '-e', type=int, default=1000,
help='Number of episodes to run')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='dqn_result',
help='Directory to output the result')
parser.add_argument('--unit', '-u', type=int, default=100,
help='Number of units')
parser.add_argument('--target-type', type=str, default='dqn',
help='Target type', choices=['dqn', 'double_dqn'])
parser.add_argument('--reward-scale', type=float, default=1e-2,
help='Reward scale factor')
parser.add_argument('--replay-start-size', type=int, default=500,
help=('Number of iterations after which replay is '
'started'))
parser.add_argument('--iterations-to-decay-epsilon', type=int,
default=5000,
help='Number of steps used to linearly decay epsilon')
parser.add_argument('--min-epsilon', type=float, default=0.01,
help='Minimum value of epsilon')
parser.add_argument('--target-update-freq', type=int, default=100,
help='Frequency of target network update')
parser.add_argument('--record', action='store_true', default=True,
help='Record performance')
parser.add_argument('--no-record', action='store_false', dest='record')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == np.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
device = chainer.get_device(args.device)
device.use()
# Initialize an environment
env = gym.make(args.env)
assert isinstance(env.observation_space, gym.spaces.Box)
assert isinstance(env.action_space, gym.spaces.Discrete)
obs_size = env.observation_space.low.size
n_actions = env.action_space.n
if args.record:
env = gym.wrappers.Monitor(env, args.out, force=True)
reward_threshold = env.spec.reward_threshold
if reward_threshold is not None:
print('{} defines "solving" as getting average reward of {} over 100 '
'consecutive trials.'.format(args.env, reward_threshold))
else:
print('{} is an unsolved environment, which means it does not have a '
'specified reward threshold at which it\'s considered '
'solved.'.format(args.env))
# Initialize variables
D = collections.deque(maxlen=10 ** 6) # Replay buffer
Rs = collections.deque(maxlen=100) # History of returns
iteration = 0
# Initialize a model and its optimizer
Q = QFunction(obs_size, n_actions, n_units=args.unit)
Q.to_device(device)
target_Q = copy.deepcopy(Q)
opt = optimizers.Adam(eps=1e-2)
opt.setup(Q)
for episode in range(args.episodes):
obs = env.reset()
done = False
R = 0.0 # Return (sum of rewards obtained in an episode)
timestep = 0
while not done and timestep < env.spec.timestep_limit:
# Epsilon is linearly decayed
epsilon = 1.0 if len(D) < args.replay_start_size else \
max(args.min_epsilon,
np.interp(
iteration,
[0, args.iterations_to_decay_epsilon],
[1.0, args.min_epsilon]))
# Select an action epsilon-greedily
if np.random.rand() < epsilon:
action = env.action_space.sample()
else:
action = get_greedy_action(Q, obs)
# Execute an action
new_obs, reward, done, _ = env.step(action)
R += reward
# Store a transition
D.append((obs, action, reward * args.reward_scale, done, new_obs))
obs = new_obs
# Sample a random minibatch of transitions and replay
if len(D) >= args.replay_start_size:
sample_indices = random.sample(range(len(D)), args.batch_size)
samples = [D[i] for i in sample_indices]
update(Q, target_Q, opt, samples, target_type=args.target_type)
# Update the target network
if iteration % args.target_update_freq == 0:
target_Q = copy.deepcopy(Q)
iteration += 1
timestep += 1
Rs.append(R)
average_R = np.mean(Rs)
print('episode: {} iteration: {} R: {} average_R: {}'.format(
episode, iteration, R, average_R))
if reward_threshold is not None and average_R >= reward_threshold:
print('Solved {} by getting average reward of '
'{} >= {} over 100 consecutive episodes.'.format(
args.env, average_R, reward_threshold))
break
if __name__ == '__main__':
main()
| 8,420
| 38.167442
| 79
|
py
|
chainer
|
chainer-master/examples/reinforcement_learning/ddpg_pendulum.py
|
#!/usr/bin/env python
"""Example code of DDPG on OpenAI Gym environments.
For DDPG, see: https://arxiv.org/abs/1509.02971
"""
from __future__ import division
import argparse
import collections
import copy
import random
import warnings
import gym
import numpy as np
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import optimizers
class QFunction(chainer.Chain):
"""Q-function represented by a MLP."""
def __init__(self, obs_size, action_size, n_units=100):
super(QFunction, self).__init__()
with self.init_scope():
self.l0 = L.Linear(obs_size + action_size, n_units)
self.l1 = L.Linear(n_units, n_units)
self.l2 = L.Linear(n_units, 1,
initialW=chainer.initializers.HeNormal(1e-3))
def forward(self, obs, action):
"""Compute Q-values for given state-action pairs."""
x = F.concat((obs, action), axis=1)
h = F.relu(self.l0(x))
h = F.relu(self.l1(h))
return self.l2(h)
def squash(x, low, high):
"""Squash values to fit [low, high] via tanh."""
center = (high + low) / 2
scale = (high - low) / 2
return F.tanh(x) * scale + center
class Policy(chainer.Chain):
"""Policy represented by a MLP."""
def __init__(self, obs_size, action_size, action_low, action_high,
n_units=100):
super(Policy, self).__init__()
self.action_high = action_high
self.action_low = action_low
with self.init_scope():
self.l0 = L.Linear(obs_size, n_units)
self.l1 = L.Linear(n_units, n_units)
self.l2 = L.Linear(n_units, action_size,
initialW=chainer.initializers.HeNormal(1e-3))
def forward(self, x):
"""Compute actions for given observations."""
h = F.relu(self.l0(x))
h = F.relu(self.l1(h))
return squash(self.l2(h),
self.xp.asarray(self.action_low),
self.xp.asarray(self.action_high))
def get_action(policy, obs):
"""Get an action by evaluating a given policy."""
dtype = chainer.get_dtype()
obs = policy.xp.asarray(obs[None], dtype=dtype)
with chainer.no_backprop_mode():
action = policy(obs).array[0]
return chainer.backends.cuda.to_cpu(action)
def update(Q, target_Q, policy, target_policy, opt_Q, opt_policy,
samples, gamma=0.99):
"""Update a Q-function and a policy."""
dtype = chainer.get_dtype()
xp = Q.xp
obs = xp.asarray([sample[0] for sample in samples], dtype=dtype)
action = xp.asarray([sample[1] for sample in samples], dtype=dtype)
reward = xp.asarray([sample[2] for sample in samples], dtype=dtype)
done = xp.asarray([sample[3] for sample in samples], dtype=dtype)
obs_next = xp.asarray([sample[4] for sample in samples], dtype=dtype)
def update_Q():
# Predicted values: Q(s,a)
y = F.squeeze(Q(obs, action), axis=1)
# Target values: r + gamma * Q(s,policy(s))
with chainer.no_backprop_mode():
next_q = F.squeeze(target_Q(obs_next, target_policy(obs_next)),
axis=1)
target = reward + gamma * (1 - done) * next_q
loss = F.mean_squared_error(y, target)
Q.cleargrads()
loss.backward()
opt_Q.update()
def update_policy():
# Maximize Q(s,policy(s))
q = Q(obs, policy(obs))
q = q[:] # Avoid https://github.com/chainer/chainer/issues/2744
loss = - F.mean(q)
policy.cleargrads()
loss.backward()
opt_policy.update()
update_Q()
update_policy()
def soft_copy_params(source, target, tau):
"""Make the parameters of a link close to the ones of another link.
Making tau close to 0 slows the pace of updates, and close to 1 might lead
to faster, but more volatile updates.
"""
# Sort params by name
source_params = [param for _, param in sorted(source.namedparams())]
target_params = [param for _, param in sorted(target.namedparams())]
for s, t in zip(source_params, target_params):
t.array[:] += tau * (s.array - t.array)
def main():
parser = argparse.ArgumentParser(description='Chainer example: DDPG')
parser.add_argument('--env', type=str, default='Pendulum-v0',
help='Name of the OpenAI Gym environment')
parser.add_argument('--batch-size', '-b', type=int, default=64,
help='Number of transitions in each mini-batch')
parser.add_argument('--episodes', '-e', type=int, default=1000,
help='Number of episodes to run')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='ddpg_result',
help='Directory to output the result')
parser.add_argument('--unit', '-u', type=int, default=100,
help='Number of units')
parser.add_argument('--reward-scale', type=float, default=1e-3,
help='Reward scale factor')
parser.add_argument('--replay-start-size', type=int, default=500,
help=('Number of iterations after which replay is '
'started'))
parser.add_argument('--tau', type=float, default=1e-2,
help='Softness of soft target update (0, 1]')
parser.add_argument('--noise-scale', type=float, default=0.4,
help='Scale of additive Gaussian noises')
parser.add_argument('--record', action='store_true', default=True,
help='Record performance')
parser.add_argument('--no-record', action='store_false', dest='record')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == np.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
device = chainer.get_device(args.device)
device.use()
# Initialize an environment
env = gym.make(args.env)
assert isinstance(env.observation_space, gym.spaces.Box)
assert isinstance(env.action_space, gym.spaces.Box)
obs_size = env.observation_space.low.size
action_size = env.action_space.low.size
if args.record:
env = gym.wrappers.Monitor(env, args.out, force=True)
reward_threshold = env.spec.reward_threshold
if reward_threshold is not None:
print('{} defines "solving" as getting average reward of {} over 100 '
'consecutive trials.'.format(args.env, reward_threshold))
else:
print('{} is an unsolved environment, which means it does not have a '
'specified reward threshold at which it\'s considered '
'solved.'.format(args.env))
# Initialize variables
D = collections.deque(maxlen=10 ** 6) # Replay buffer
Rs = collections.deque(maxlen=100) # History of returns
iteration = 0
# Initialize models and optimizers
Q = QFunction(obs_size, action_size, n_units=args.unit)
policy = Policy(obs_size, action_size,
env.action_space.low, env.action_space.high,
n_units=args.unit)
Q.to_device(device)
policy.to_device(device)
target_Q = copy.deepcopy(Q)
target_policy = copy.deepcopy(policy)
opt_Q = optimizers.Adam(eps=1e-5) # Use larger eps in case of FP16 mode
opt_Q.setup(Q)
opt_policy = optimizers.Adam(alpha=1e-4)
opt_policy.setup(policy)
for episode in range(args.episodes):
obs = env.reset()
done = False
R = 0.0 # Return (sum of rewards obtained in an episode)
timestep = 0
while not done and timestep < env.spec.timestep_limit:
# Select an action with additive noises for exploration
action = (get_action(policy, obs) +
np.random.normal(scale=args.noise_scale))
# Execute an action
new_obs, reward, done, _ = env.step(
np.clip(action, env.action_space.low, env.action_space.high))
R += reward
# Store a transition
D.append((obs, action, reward * args.reward_scale, done, new_obs))
obs = new_obs
# Sample a random minibatch of transitions and replay
if len(D) >= args.replay_start_size:
sample_indices = random.sample(range(len(D)), args.batch_size)
samples = [D[i] for i in sample_indices]
update(Q, target_Q, policy, target_policy,
opt_Q, opt_policy, samples)
# Soft update of the target networks
soft_copy_params(Q, target_Q, args.tau)
soft_copy_params(policy, target_policy, args.tau)
iteration += 1
timestep += 1
Rs.append(R)
average_R = np.mean(Rs)
print('episode: {} iteration: {} R:{} average_R:{}'.format(
episode, iteration, R, average_R))
if reward_threshold is not None and average_R >= reward_threshold:
print('Solved {} by getting average reward of '
'{} >= {} over 100 consecutive episodes.'.format(
args.env, average_R, reward_threshold))
break
if __name__ == '__main__':
main()
| 9,863
| 37.084942
| 78
|
py
|
chainer
|
chainer-master/examples/ptb/train_ptb_custom_loop.py
|
#!/usr/bin/env python
"""Sample script of recurrent neural network language model.
This code is ported from the following implementation written in Torch.
https://github.com/tomsercu/lstm
This code is a custom loop version of train_ptb.py. That is, we train
models without using the Trainer class in chainer and instead write a
training loop that manually computes the loss of minibatches and
applies an optimizer to update the model.
"""
import argparse
import os
import sys
import numpy as np
import chainer
from chainer import configuration
from chainer.dataset import convert
import chainer.links as L
from chainer import serializers
import chainerx
import train_ptb
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--batchsize', '-b', type=int, default=20,
help='Number of examples in each mini-batch')
parser.add_argument('--bproplen', '-l', type=int, default=35,
help='Number of words in each mini-batch '
'(= length of truncated BPTT)')
parser.add_argument('--epoch', '-e', type=int, default=39,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--gradclip', '-c', type=float, default=5,
help='Gradient norm threshold to clip')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', type=str,
help='Directory that has `rnnln.model`'
' and `rnnlm.state`')
parser.add_argument('--test', action='store_true',
help='Use tiny datasets for quick tests')
parser.set_defaults(test=False)
parser.add_argument('--unit', '-u', type=int, default=650,
help='Number of LSTM units in each layer')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
if device.xp is chainerx:
sys.stderr.write('This example does not support ChainerX devices.\n')
sys.exit(1)
device.use()
def evaluate(model, iter):
# Evaluation routine to be used for validation and test.
evaluator = model.copy() # to use different state
evaluator.predictor.reset_state() # initialize state
sum_perp = 0
data_count = 0
# Enable evaluation mode.
with configuration.using_config('train', False):
# This is optional but can reduce computational overhead.
with chainer.using_config('enable_backprop', False):
iter.reset()
for batch in iter:
x, t = convert.concat_examples(batch, device)
loss = evaluator(x, t)
sum_perp += loss.array
data_count += 1
return np.exp(float(sum_perp) / data_count)
# Load the Penn Tree Bank long word sequence dataset
train, val, test = chainer.datasets.get_ptb_words()
n_vocab = max(train) + 1 # train is just an array of integers
print('#vocab = {}'.format(n_vocab))
if args.test:
train = train[:100]
val = val[:100]
test = test[:100]
# Create the dataset iterators
train_iter = train_ptb.ParallelSequentialIterator(train, args.batchsize)
val_iter = train_ptb.ParallelSequentialIterator(val, 1, repeat=False)
test_iter = train_ptb.ParallelSequentialIterator(test, 1, repeat=False)
# Prepare an RNNLM model
rnn = train_ptb.RNNForLM(n_vocab, args.unit)
model = L.Classifier(rnn)
model.compute_accuracy = False # we only want the perplexity
model.to_device(device)
# Set up an optimizer
optimizer = chainer.optimizers.SGD(lr=1.0)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.GradientClipping(args.gradclip))
# Load model and optimizer
if args.resume is not None:
resume = args.resume
if os.path.exists(resume):
serializers.load_npz(os.path.join(resume, 'rnnlm.model'), model)
serializers.load_npz(
os.path.join(resume, 'rnnlm.state'), optimizer)
else:
raise ValueError(
'`args.resume` ("{}") is specified,'
' but it does not exist'.format(resume)
)
sum_perp = 0
count = 0
iteration = 0
while train_iter.epoch < args.epoch:
loss = 0
iteration += 1
# Progress the dataset iterator for bprop_len words at each iteration.
for i in range(args.bproplen):
# Get the next batch (a list of tuples of two word IDs)
batch = train_iter.__next__()
# Concatenate the word IDs to matrices and send them to the device
# self.converter does this job
# (it is chainer.dataset.concat_examples by default)
x, t = convert.concat_examples(batch, device)
# Compute the loss at this time step and accumulate it
loss += optimizer.target(x, t)
count += 1
sum_perp += loss.array
optimizer.target.cleargrads() # Clear the parameter gradients
loss.backward() # Backprop
loss.unchain_backward() # Truncate the graph
optimizer.update() # Update the parameters
if iteration % 20 == 0:
print('iteration: {}'.format(iteration))
print('training perplexity: {}'.format(
np.exp(float(sum_perp) / count)))
sum_perp = 0
count = 0
if train_iter.is_new_epoch:
print('epoch: {}'.format(train_iter.epoch))
print('validation perplexity: {}'.format(
evaluate(model, val_iter)))
# Evaluate on test dataset
print('test')
test_perp = evaluate(model, test_iter)
print('test perplexity: {}'.format(test_perp))
# Save the model and the optimizer
out = args.out
if not os.path.exists(out):
os.makedirs(out)
print('save the model')
serializers.save_npz(os.path.join(out, 'rnnlm.model'), model)
print('save the optimizer')
serializers.save_npz(os.path.join(out, 'rnnlm.state'), optimizer)
if __name__ == '__main__':
main()
| 6,844
| 37.672316
| 78
|
py
|
chainer
|
chainer-master/examples/ptb/train_ptb.py
|
#!/usr/bin/env python
"""Sample script of recurrent neural network language model.
This code is ported from the following implementation written in Torch.
https://github.com/tomsercu/lstm
Note for contributors:
This example code is referred to from the "RNN Language Models" tutorial.
If this file is to be modified, please also update the line numbers in
`docs/source/examples/ptb.rst` accordingly.
"""
from __future__ import division
import argparse
import sys
import numpy as np
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import chainerx
# Definition of a recurrent net for language modeling
class RNNForLM(chainer.Chain):
def __init__(self, n_vocab, n_units):
super(RNNForLM, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(n_vocab, n_units)
self.l1 = L.LSTM(n_units, n_units)
self.l2 = L.LSTM(n_units, n_units)
self.l3 = L.Linear(n_units, n_vocab)
for param in self.params():
param.array[...] = np.random.uniform(-0.1, 0.1, param.shape)
def reset_state(self):
self.l1.reset_state()
self.l2.reset_state()
def forward(self, x):
h0 = self.embed(x)
h1 = self.l1(F.dropout(h0))
h2 = self.l2(F.dropout(h1))
y = self.l3(F.dropout(h2))
return y
# Dataset iterator to create a batch of sequences at different positions.
# This iterator returns a pair of current words and the next words. Each
# example is a part of sequences starting from the different offsets
# equally spaced within the whole sequence.
class ParallelSequentialIterator(chainer.dataset.Iterator):
def __init__(self, dataset, batch_size, repeat=True):
super(ParallelSequentialIterator, self).__init__()
self.dataset = dataset
self.batch_size = batch_size # batch size
self.repeat = repeat
length = len(dataset)
# Offsets maintain the position of each sequence in the mini-batch.
self.offsets = [i * length // batch_size for i in range(batch_size)]
self.reset()
def reset(self):
# Number of completed sweeps over the dataset. In this case, it is
# incremented if every word is visited at least once after the last
# increment.
self.epoch = 0
# True if the epoch is incremented at the last iteration.
self.is_new_epoch = False
# NOTE: this is not a count of parameter updates. It is just a count of
# calls of ``__next__``.
self.iteration = 0
# use -1 instead of None internally
self._previous_epoch_detail = -1.
def __next__(self):
# This iterator returns a list representing a mini-batch. Each item
# indicates a different position in the original sequence. Each item is
# represented by a pair of two word IDs. The first word is at the
# "current" position, while the second word at the next position.
# At each iteration, the iteration count is incremented, which pushes
# forward the "current" position.
length = len(self.dataset)
if not self.repeat and self.iteration * self.batch_size >= length:
# If not self.repeat, this iterator stops at the end of the first
# epoch (i.e., when all words are visited once).
raise StopIteration
cur_words = self.get_words()
self._previous_epoch_detail = self.epoch_detail
self.iteration += 1
next_words = self.get_words()
epoch = self.iteration * self.batch_size // length
self.is_new_epoch = self.epoch < epoch
if self.is_new_epoch:
self.epoch = epoch
return list(zip(cur_words, next_words))
@property
def epoch_detail(self):
# Floating point version of epoch.
return self.iteration * self.batch_size / len(self.dataset)
@property
def previous_epoch_detail(self):
if self._previous_epoch_detail < 0:
return None
return self._previous_epoch_detail
def get_words(self):
# It returns a list of current words.
return [self.dataset[(offset + self.iteration) % len(self.dataset)]
for offset in self.offsets]
def serialize(self, serializer):
# It is important to serialize the state to be recovered on resume.
self.iteration = serializer('iteration', self.iteration)
self.epoch = serializer('epoch', self.epoch)
try:
self._previous_epoch_detail = serializer(
'previous_epoch_detail', self._previous_epoch_detail)
except KeyError:
# guess previous_epoch_detail for older version
self._previous_epoch_detail = self.epoch + \
(self.current_position - self.batch_size) / len(self.dataset)
if self.epoch_detail > 0:
self._previous_epoch_detail = max(
self._previous_epoch_detail, 0.)
else:
self._previous_epoch_detail = -1.
# Custom updater for truncated BackProp Through Time (BPTT)
class BPTTUpdater(training.updaters.StandardUpdater):
def __init__(self, train_iter, optimizer, bprop_len, device):
super(BPTTUpdater, self).__init__(
train_iter, optimizer, device=device)
self.bprop_len = bprop_len
# The core part of the update routine can be customized by overriding.
def update_core(self):
loss = 0
# When we pass one iterator and optimizer to StandardUpdater.__init__,
# they are automatically named 'main'.
train_iter = self.get_iterator('main')
optimizer = self.get_optimizer('main')
# Progress the dataset iterator for bprop_len words at each iteration.
for i in range(self.bprop_len):
# Get the next batch (a list of tuples of two word IDs)
batch = train_iter.__next__()
# Concatenate the word IDs to matrices and send them to the device
# self.converter does this job
# (it is chainer.dataset.concat_examples by default)
x, t = self.converter(batch, self.device)
# Compute the loss at this time step and accumulate it
loss += optimizer.target(x, t)
optimizer.target.cleargrads() # Clear the parameter gradients
loss.backward() # Backprop
loss.unchain_backward() # Truncate the graph
optimizer.update() # Update the parameters
# Routine to rewrite the result dictionary of LogReport to add perplexity
# values
def compute_perplexity(result):
result['perplexity'] = np.exp(result['main/loss'])
if 'validation/main/loss' in result:
result['val_perplexity'] = np.exp(result['validation/main/loss'])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--batchsize', '-b', type=int, default=20,
help='Number of examples in each mini-batch')
parser.add_argument('--bproplen', '-l', type=int, default=35,
help='Number of words in each mini-batch '
'(= length of truncated BPTT)')
parser.add_argument('--epoch', '-e', type=int, default=39,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--gradclip', '-c', type=float, default=5,
help='Gradient norm threshold to clip')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', type=str,
help='Resume the training from snapshot')
parser.add_argument('--test', action='store_true',
help='Use tiny datasets for quick tests')
parser.set_defaults(test=False)
parser.add_argument('--unit', '-u', type=int, default=650,
help='Number of LSTM units in each layer')
parser.add_argument('--model', '-m', default='model.npz',
help='Model file name to serialize')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
if device.xp is chainerx:
sys.stderr.write('This example does not support ChainerX devices.\n')
sys.exit(1)
device.use()
# Load the Penn Tree Bank long word sequence dataset
train, val, test = chainer.datasets.get_ptb_words()
n_vocab = max(train) + 1 # train is just an array of integers
print('#vocab = {}'.format(n_vocab))
if args.test:
train = train[:100]
val = val[:100]
test = test[:100]
train_iter = ParallelSequentialIterator(train, args.batchsize)
val_iter = ParallelSequentialIterator(val, 1, repeat=False)
test_iter = ParallelSequentialIterator(test, 1, repeat=False)
# Prepare an RNNLM model
rnn = RNNForLM(n_vocab, args.unit)
model = L.Classifier(rnn)
model.compute_accuracy = False # we only want the perplexity
model.to_device(device)
# Set up an optimizer
optimizer = chainer.optimizers.SGD(lr=1.0)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer_hooks.GradientClipping(args.gradclip))
# Set up a trainer
updater = BPTTUpdater(train_iter, optimizer, args.bproplen, device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
eval_model = model.copy() # Model with shared params and distinct states
eval_rnn = eval_model.predictor
trainer.extend(extensions.Evaluator(
val_iter, eval_model, device=device,
# Reset the RNN state at the beginning of each evaluation
eval_hook=lambda _: eval_rnn.reset_state()))
interval = 10 if args.test else 500
trainer.extend(extensions.LogReport(postprocess=compute_perplexity,
trigger=(interval, 'iteration')))
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'perplexity', 'val_perplexity']
), trigger=(interval, 'iteration'))
trainer.extend(extensions.ProgressBar(
update_interval=1 if args.test else 10))
trainer.extend(extensions.snapshot())
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'))
if args.resume is not None:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
# Evaluate the final model
print('test')
eval_rnn.reset_state()
evaluator = extensions.Evaluator(test_iter, eval_model, device=device)
result = evaluator()
print('test perplexity: {}'.format(np.exp(float(result['main/loss']))))
# Serialize the final model
chainer.serializers.save_npz(args.model, model)
if __name__ == '__main__':
main()
| 11,423
| 38.666667
| 79
|
py
|
chainer
|
chainer-master/examples/ptb/gentxt.py
|
#!/usr/bin/env python
"""Example to generate text from a recurrent neural network language model.
This code is ported from following implementation.
https://github.com/longjie/chainer-char-rnn/blob/master/sample.py
"""
import argparse
import sys
import numpy as np
import six
import chainer
from chainer.backends import cuda
import chainer.functions as F
import chainer.links as L
from chainer import serializers
import train_ptb
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', '-m', type=str, required=True,
help='model data, saved by train_ptb.py')
parser.add_argument('--primetext', '-p', type=str, required=True,
default='',
help='base text data, used for text generation')
parser.add_argument('--seed', '-s', type=int, default=123,
help='random seeds for text generation')
parser.add_argument('--unit', '-u', type=int, default=650,
help='number of units')
parser.add_argument('--sample', type=int, default=1,
help='negative value indicates NOT use random choice')
parser.add_argument('--length', type=int, default=20,
help='length of the generated text')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=-1,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
np.random.seed(args.seed)
chainer.config.train = False
device = chainer.get_device(args.device)
device.use()
# load vocabulary
vocab = chainer.datasets.get_ptb_words_vocabulary()
ivocab = {}
for c, i in vocab.items():
ivocab[i] = c
# should be same as n_units , described in train_ptb.py
n_units = args.unit
lm = train_ptb.RNNForLM(len(vocab), n_units)
model = L.Classifier(lm)
serializers.load_npz(args.model, model)
model.to_device(device)
model.predictor.reset_state()
primetext = args.primetext
if isinstance(primetext, six.binary_type):
primetext = primetext.decode('utf-8')
xp = device.xp
if primetext in vocab:
prev_word = chainer.Variable(
xp.array([vocab[primetext]], xp.int32), requires_grad=False)
else:
print('ERROR: Unfortunately ' + primetext + ' is unknown.')
exit()
prob = F.softmax(model.predictor(prev_word))
sys.stdout.write(primetext + ' ')
for i in six.moves.range(args.length):
prob = F.softmax(model.predictor(prev_word))
if args.sample > 0:
probability = cuda.to_cpu(prob.array)[0].astype(np.float64)
probability /= np.sum(probability)
index = np.random.choice(range(len(probability)), p=probability)
else:
index = np.argmax(cuda.to_cpu(prob.array))
if ivocab[index] == '<eos>':
sys.stdout.write('.')
else:
sys.stdout.write(ivocab[index] + ' ')
prev_word = chainer.Variable(
xp.array([index], dtype=xp.int32), requires_grad=False)
sys.stdout.write('\n')
if __name__ == '__main__':
main()
| 3,635
| 32.054545
| 78
|
py
|
chainer
|
chainer-master/examples/caffe_export/export.py
|
import argparse
import os
import numpy as np
import chainer
from chainer.exporters import caffe
from chainer.links.model.vision import googlenet
from chainer.links.model.vision import resnet
from chainer.links.model.vision import vgg
archs = {
'googlenet': googlenet.GoogLeNet,
'resnet50': resnet.ResNet50Layers,
'resnet101': resnet.ResNet101Layers,
'resnet152': resnet.ResNet152Layers,
'vgg16': vgg.VGG16Layers,
}
class DumpModel(chainer.Chain):
def __init__(self, arch_name):
super(DumpModel, self).__init__()
with self.init_scope():
self.base_model = archs[arch_name]()
def forward(self, img):
return self.base_model(img, layers=['prob'])['prob']
def get_network_for_imagenet(arch_name):
model = DumpModel(arch_name)
input_image = np.ones((1, 3, 224, 224), dtype=np.float32)
input = chainer.Variable(input_image)
return model, input
def main():
parser = argparse.ArgumentParser(description='Export')
parser.add_argument(
'--arch', '-a', type=str, required=True,
choices=archs.keys(),
help='Arch name. models: ' + ', '.join(archs.keys()) + '.')
parser.add_argument(
'--out-dir', '-o', type=str, required=True,
help='Output directory name. '
'chainer_model.prototxt, chainer_model.caffemodel'
' will be created in it')
args = parser.parse_args()
if not os.path.exists(args.out_dir):
print('Created output directory: ' + args.out_dir)
os.mkdir(args.out_dir)
else:
print('Overwriting the existing directory: ' + args.out_dir)
if not os.path.isdir(args.out_dir):
raise ValueError(args.out_dir + ' exists but not a directory!')
print('load model')
model, input = get_network_for_imagenet(args.arch)
print('convert to caffe model')
caffe.export(model, [input], args.out_dir, True)
if __name__ == '__main__':
main()
| 1,953
| 27.318841
| 71
|
py
|
chainer
|
chainer-master/examples/word2vec/train_word2vec.py
|
#!/usr/bin/env python
"""Sample script of word embedding model.
This code implements skip-gram model and continuous-bow model.
"""
import argparse
import collections
import os
import six
import warnings
import numpy as np
import chainer
from chainer.backends import cuda
import chainer.functions as F
import chainer.initializers as I
import chainer.links as L
import chainer.optimizers as O
from chainer import reporter
from chainer import training
from chainer.training import extensions
class ContinuousBoW(chainer.Chain):
"""Definition of Continuous Bag of Words Model"""
def __init__(self, n_vocab, n_units, loss_func):
super(ContinuousBoW, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(
n_vocab, n_units, initialW=I.Uniform(1. / n_units))
self.loss_func = loss_func
def forward(self, x, contexts):
e = self.embed(contexts)
h = F.sum(e, axis=1) * (1. / contexts.shape[1])
loss = self.loss_func(h, x)
reporter.report({'loss': loss}, self)
return loss
class SkipGram(chainer.Chain):
"""Definition of Skip-gram Model"""
def __init__(self, n_vocab, n_units, loss_func):
super(SkipGram, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(
n_vocab, n_units, initialW=I.Uniform(1. / n_units))
self.loss_func = loss_func
def forward(self, x, contexts):
e = self.embed(contexts)
batch_size, n_context, n_units = e.shape
x = F.broadcast_to(x[:, None], (batch_size, n_context))
e = F.reshape(e, (batch_size * n_context, n_units))
x = F.reshape(x, (batch_size * n_context,))
loss = self.loss_func(e, x)
reporter.report({'loss': loss}, self)
return loss
class SoftmaxCrossEntropyLoss(chainer.Chain):
"""Softmax cross entropy loss function preceded by linear transformation.
"""
def __init__(self, n_in, n_out):
super(SoftmaxCrossEntropyLoss, self).__init__()
with self.init_scope():
self.out = L.Linear(n_in, n_out, initialW=0)
def forward(self, x, t):
return F.softmax_cross_entropy(self.out(x), t)
class WindowIterator(chainer.dataset.Iterator):
"""Dataset iterator to create a batch of sequences at different positions.
This iterator returns a pair of the current words and the context words.
"""
def __init__(self, dataset, window, batch_size, repeat=True):
self.dataset = np.array(dataset, np.int32)
self.window = window # size of context window
self.batch_size = batch_size
self._repeat = repeat
# order is the array which is shuffled ``[window, window + 1, ...,
# len(dataset) - window - 1]``
self.order = np.random.permutation(
len(dataset) - window * 2).astype(np.int32)
self.order += window
self.current_position = 0
# Number of completed sweeps over the dataset. In this case, it is
# incremented if every word is visited at least once after the last
# increment.
self.epoch = 0
# True if the epoch is incremented at the last iteration.
self.is_new_epoch = False
def __next__(self):
"""This iterator returns a list representing a mini-batch.
Each item indicates a different position in the original sequence.
"""
if not self._repeat and self.epoch > 0:
raise StopIteration
i = self.current_position
i_end = i + self.batch_size
position = self.order[i:i_end]
w = np.random.randint(self.window - 1) + 1
offset = np.concatenate([np.arange(-w, 0), np.arange(1, w + 1)])
pos = position[:, None] + offset[None, :]
contexts = self.dataset.take(pos)
center = self.dataset.take(position)
if i_end >= len(self.order):
np.random.shuffle(self.order)
self.epoch += 1
self.is_new_epoch = True
self.current_position = 0
else:
self.is_new_epoch = False
self.current_position = i_end
return center, contexts
@property
def epoch_detail(self):
return self.epoch + float(self.current_position) / len(self.order)
def serialize(self, serializer):
self.current_position = serializer('current_position',
self.current_position)
self.epoch = serializer('epoch', self.epoch)
self.is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)
if self.order is not None:
serializer('order', self.order)
@chainer.dataset.converter()
def convert(batch, device):
center, contexts = batch
center = device.send(center)
contexts = device.send(contexts)
return center, contexts
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--unit', '-u', default=100, type=int,
help='number of units')
parser.add_argument('--window', '-w', default=5, type=int,
help='window size')
parser.add_argument('--batchsize', '-b', type=int, default=1000,
help='learning minibatch size')
parser.add_argument('--epoch', '-e', default=20, type=int,
help='number of epochs to learn')
parser.add_argument('--model', '-m', choices=['skipgram', 'cbow'],
default='skipgram',
help='model type ("skipgram", "cbow")')
parser.add_argument('--negative-size', default=5, type=int,
help='number of negative samples')
parser.add_argument('--out-type', '-o', choices=['hsm', 'ns', 'original'],
default='hsm',
help='output model type ("hsm": hierarchical softmax, '
'"ns": negative sampling, "original": '
'no approximation)')
parser.add_argument('--out', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', type=str,
help='Resume the training from snapshot')
parser.add_argument('--snapshot-interval', type=int,
help='Interval of snapshots')
parser.add_argument('--test', dest='test', action='store_true')
parser.set_defaults(test=False)
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == np.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
device = chainer.get_device(args.device)
device.use()
if args.snapshot_interval is None:
args.snapshot_interval = args.epoch
args.snapshot_interval = min(args.snapshot_interval, args.epoch)
print('Device: {}'.format(device))
print('# unit: {}'.format(args.unit))
print('Window: {}'.format(args.window))
print('Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('Training model: {}'.format(args.model))
print('Output type: {}'.format(args.out_type))
print('')
# Load the dataset
train, val, _ = chainer.datasets.get_ptb_words()
counts = collections.Counter(train)
counts.update(collections.Counter(val))
n_vocab = max(train) + 1
if args.test:
train = train[:100]
val = val[:100]
vocab = chainer.datasets.get_ptb_words_vocabulary()
index2word = {wid: word for word, wid in six.iteritems(vocab)}
print('n_vocab: %d' % n_vocab)
print('data length: %d' % len(train))
if args.out_type == 'hsm':
HSM = L.BinaryHierarchicalSoftmax
tree = HSM.create_huffman_tree(counts)
loss_func = HSM(args.unit, tree)
loss_func.W.array[...] = 0
elif args.out_type == 'ns':
cs = [counts[w] for w in range(len(counts))]
loss_func = L.NegativeSampling(args.unit, cs, args.negative_size)
loss_func.W.array[...] = 0
elif args.out_type == 'original':
loss_func = SoftmaxCrossEntropyLoss(args.unit, n_vocab)
else:
raise Exception('Unknown output type: {}'.format(args.out_type))
# Choose the model
if args.model == 'skipgram':
model = SkipGram(n_vocab, args.unit, loss_func)
elif args.model == 'cbow':
model = ContinuousBoW(n_vocab, args.unit, loss_func)
else:
raise Exception('Unknown model type: {}'.format(args.model))
model.to_device(device)
# Set up an optimizer
optimizer = O.Adam()
optimizer.setup(model)
# Set up an iterator
train_iter = WindowIterator(train, args.window, args.batchsize)
val_iter = WindowIterator(val, args.window, args.batchsize, repeat=False)
# Set up an updater
updater = training.updaters.StandardUpdater(
train_iter, optimizer, converter=convert, device=device)
# Set up a trainer
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(extensions.Evaluator(
val_iter, model, converter=convert, device=device))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss']))
trainer.extend(extensions.ProgressBar())
trainer.extend(
extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'),
trigger=(args.snapshot_interval, 'epoch'))
if args.resume is not None:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
# Save the word2vec model
with open(os.path.join(args.out, 'word2vec.model'), 'w') as f:
f.write('%d %d\n' % (len(index2word), args.unit))
w = cuda.to_cpu(model.embed.W.array)
for i, wi in enumerate(w):
v = ' '.join(map(str, wi))
f.write('%s %s\n' % (index2word[i], v))
if __name__ == '__main__':
main()
| 10,528
| 34.935154
| 79
|
py
|
chainer
|
chainer-master/examples/word2vec/search.py
|
#!/usr/bin/env python
import argparse
import os
import numpy
import six
n_result = 5 # number of search result to show
parser = argparse.ArgumentParser()
parser.add_argument('--result', default='result',
help='Directory of a training result')
args = parser.parse_args()
with open(os.path.join(args.result, 'word2vec.model'), 'r') as f:
ss = f.readline().split()
n_vocab, n_units = int(ss[0]), int(ss[1])
word2index = {}
index2word = {}
w = numpy.empty((n_vocab, n_units), dtype=numpy.float32)
for i, line in enumerate(f):
ss = line.split()
assert len(ss) == n_units + 1
word = ss[0]
word2index[word] = i
index2word[i] = word
w[i] = numpy.array([float(s) for s in ss[1:]], dtype=numpy.float32)
s = numpy.sqrt((w * w).sum(1))
w /= s.reshape((s.shape[0], 1)) # normalize
try:
while True:
q = six.moves.input('>> ')
if q not in word2index:
print('"{0}" is not found'.format(q))
continue
v = w[word2index[q]]
similarity = w.dot(v)
print('query: {}'.format(q))
count = 0
for i in (-similarity).argsort():
if numpy.isnan(similarity[i]):
continue
if index2word[i] == q:
continue
print('{0}: {1}'.format(index2word[i], similarity[i]))
count += 1
if count == n_result:
break
except EOFError:
pass
| 1,487
| 25.105263
| 75
|
py
|
chainer
|
chainer-master/examples/chainermn/dcgan/updater.py
|
#!/usr/bin/env python
from __future__ import print_function
import chainer
import chainer.functions as F
from chainer import Variable
class DCGANUpdater(chainer.training.StandardUpdater):
def __init__(self, *args, **kwargs):
self.gen, self.dis = kwargs.pop('models')
super(DCGANUpdater, self).__init__(*args, **kwargs)
def loss_dis(self, dis, y_fake, y_real):
batchsize = len(y_fake)
L1 = F.sum(F.softplus(-y_real)) / batchsize
L2 = F.sum(F.softplus(y_fake)) / batchsize
loss = L1 + L2
chainer.report({'loss': loss}, dis)
return loss
def loss_gen(self, gen, y_fake):
batchsize = len(y_fake)
loss = F.sum(F.softplus(-y_fake)) / batchsize
chainer.report({'loss': loss}, gen)
return loss
def update_core(self):
gen_optimizer = self.get_optimizer('gen')
dis_optimizer = self.get_optimizer('dis')
batch = self.get_iterator('main').next()
x_real = Variable(self.converter(batch, self.device)) / 255.
xp = chainer.backend.get_array_module(x_real.array)
gen, dis = self.gen, self.dis
batchsize = len(batch)
y_real = dis(x_real)
z = Variable(xp.asarray(gen.make_hidden(batchsize)))
x_fake = gen(z)
y_fake = dis(x_fake)
dis_optimizer.update(self.loss_dis, dis, y_fake, y_real)
gen_optimizer.update(self.loss_gen, gen, y_fake)
| 1,445
| 28.510204
| 68
|
py
|
chainer
|
chainer-master/examples/chainermn/dcgan/net.py
|
#!/usr/bin/env python
from __future__ import print_function
import numpy
import chainer
from chainer import backend
import chainer.functions as F
import chainer.links as L
def add_noise(h, sigma=0.2):
xp = backend.get_array_module(h.array)
if chainer.config.train:
return h + sigma * xp.random.randn(*h.shape)
else:
return h
class Generator(chainer.Chain):
def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02):
super(Generator, self).__init__()
self.n_hidden = n_hidden
self.ch = ch
self.bottom_width = bottom_width
with self.init_scope():
w = chainer.initializers.Normal(wscale)
self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch,
initialW=w)
self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w)
self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w)
self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w)
self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w)
self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch)
self.bn1 = L.BatchNormalization(ch // 2)
self.bn2 = L.BatchNormalization(ch // 4)
self.bn3 = L.BatchNormalization(ch // 8)
def make_hidden(self, batchsize):
return numpy.random.uniform(-1, 1, (batchsize, self.n_hidden, 1, 1))\
.astype(numpy.float32)
def __call__(self, z):
h = F.reshape(F.relu(self.bn0(self.l0(z))),
(len(z), self.ch, self.bottom_width, self.bottom_width))
h = F.relu(self.bn1(self.dc1(h)))
h = F.relu(self.bn2(self.dc2(h)))
h = F.relu(self.bn3(self.dc3(h)))
x = F.sigmoid(self.dc4(h))
return x
class Discriminator(chainer.Chain):
def __init__(self, bottom_width=4, ch=512, wscale=0.02):
w = chainer.initializers.Normal(wscale)
super(Discriminator, self).__init__()
with self.init_scope():
self.c0_0 = L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w)
self.c0_1 = L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w)
self.c1_0 = L.Convolution2D(ch // 4, ch // 4, 3, 1, 1, initialW=w)
self.c1_1 = L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w)
self.c2_0 = L.Convolution2D(ch // 2, ch // 2, 3, 1, 1, initialW=w)
self.c2_1 = L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w)
self.c3_0 = L.Convolution2D(ch // 1, ch // 1, 3, 1, 1, initialW=w)
self.l4 = L.Linear(bottom_width * bottom_width * ch, 1, initialW=w)
self.bn0_1 = L.BatchNormalization(ch // 4, use_gamma=False)
self.bn1_0 = L.BatchNormalization(ch // 4, use_gamma=False)
self.bn1_1 = L.BatchNormalization(ch // 2, use_gamma=False)
self.bn2_0 = L.BatchNormalization(ch // 2, use_gamma=False)
self.bn2_1 = L.BatchNormalization(ch // 1, use_gamma=False)
self.bn3_0 = L.BatchNormalization(ch // 1, use_gamma=False)
def __call__(self, x):
h = add_noise(x)
h = F.leaky_relu(add_noise(self.c0_0(h)))
h = F.leaky_relu(add_noise(self.bn0_1(self.c0_1(h))))
h = F.leaky_relu(add_noise(self.bn1_0(self.c1_0(h))))
h = F.leaky_relu(add_noise(self.bn1_1(self.c1_1(h))))
h = F.leaky_relu(add_noise(self.bn2_0(self.c2_0(h))))
h = F.leaky_relu(add_noise(self.bn2_1(self.c2_1(h))))
h = F.leaky_relu(add_noise(self.bn3_0(self.c3_0(h))))
return self.l4(h)
| 3,630
| 40.735632
| 79
|
py
|
chainer
|
chainer-master/examples/chainermn/dcgan/train_dcgan.py
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import chainer
from chainer import training
from chainer.training import extensions
from net import Discriminator
from net import Generator
from updater import DCGANUpdater
from visualize import out_generated_image
import chainermn
def main():
parser = argparse.ArgumentParser(description='ChainerMN example: DCGAN')
parser.add_argument('--batchsize', '-b', type=int, default=50,
help='Number of images in each mini-batch')
parser.add_argument('--communicator', type=str,
default='pure_nccl', help='Type of communicator')
parser.add_argument('--epoch', '-e', type=int, default=1000,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', action='store_true',
help='Use GPU')
parser.add_argument('--dataset', '-i', default='',
help='Directory of image files. Default is cifar-10.')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--gen_model', '-r', default='',
help='Use pre-trained generator for training')
parser.add_argument('--dis_model', '-d', default='',
help='Use pre-trained discriminator for training')
parser.add_argument('--n_hidden', '-n', type=int, default=100,
help='Number of hidden units (z)')
parser.add_argument('--seed', type=int, default=0,
help='Random seed of z at visualization stage')
parser.add_argument('--snapshot_interval', type=int, default=1000,
help='Interval of snapshot')
parser.add_argument('--display_interval', type=int, default=100,
help='Interval of displaying log to console')
args = parser.parse_args()
# Prepare ChainerMN communicator.
if args.gpu:
if args.communicator == 'naive':
print('Error: \'naive\' communicator does not support GPU.\n')
exit(-1)
comm = chainermn.create_communicator(args.communicator)
device = comm.intra_rank
else:
if args.communicator != 'naive':
print('Warning: using naive communicator '
'because only naive supports CPU-only execution')
comm = chainermn.create_communicator('naive')
device = -1
if comm.rank == 0:
print('==========================================')
print('Num process (COMM_WORLD): {}'.format(comm.size))
if args.gpu:
print('Using GPUs')
print('Using {} communicator'.format(args.communicator))
print('Num hidden unit: {}'.format(args.n_hidden))
print('Num Minibatch-size: {}'.format(args.batchsize))
print('Num epoch: {}'.format(args.epoch))
print('==========================================')
# Set up a neural network to train
gen = Generator(n_hidden=args.n_hidden)
dis = Discriminator()
if device >= 0:
# Make a specified GPU current
chainer.cuda.get_device_from_id(device).use()
gen.to_gpu() # Copy the model to the GPU
dis.to_gpu()
# Setup an optimizer
def make_optimizer(model, comm, alpha=0.0002, beta1=0.5):
# Create a multi node optimizer from a standard Chainer optimizer.
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(alpha=alpha, beta1=beta1), comm)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001), 'hook_dec')
return optimizer
opt_gen = make_optimizer(gen, comm)
opt_dis = make_optimizer(dis, comm)
# Split and distribute the dataset. Only worker 0 loads the whole dataset.
# Datasets of worker 0 are evenly split and distributed to all workers.
if comm.rank == 0:
if args.dataset == '':
# Load the CIFAR10 dataset if args.dataset is not specified
train, _ = chainer.datasets.get_cifar10(withlabel=False,
scale=255.)
else:
all_files = os.listdir(args.dataset)
image_files = [f for f in all_files if ('png' in f or 'jpg' in f)]
print('{} contains {} image files'
.format(args.dataset, len(image_files)))
train = chainer.datasets\
.ImageDataset(paths=image_files, root=args.dataset)
else:
train = None
train = chainermn.scatter_dataset(train, comm)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
# Set up a trainer
updater = DCGANUpdater(
models=(gen, dis),
iterator=train_iter,
optimizer={
'gen': opt_gen, 'dis': opt_dis},
device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# Some display and output extensions are necessary only for one worker.
# (Otherwise, there would just be repeated outputs.)
if comm.rank == 0:
snapshot_interval = (args.snapshot_interval, 'iteration')
display_interval = (args.display_interval, 'iteration')
# Save only model parameters.
# `snapshot` extension will save all the trainer module's attribute,
# including `train_iter`.
# However, `train_iter` depends on scattered dataset, which means that
# `train_iter` may be different in each process.
# Here, instead of saving whole trainer module, only the network models
# are saved.
trainer.extend(extensions.snapshot_object(
gen, 'gen_iter_{.updater.iteration}.npz'),
trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(
dis, 'dis_iter_{.updater.iteration}.npz'),
trigger=snapshot_interval)
trainer.extend(extensions.LogReport(trigger=display_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'gen/loss', 'dis/loss', 'elapsed_time',
]), trigger=display_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(
out_generated_image(
gen, dis,
10, 10, args.seed, args.out),
trigger=snapshot_interval)
# Start the training using pre-trained model, saved by snapshot_object
if args.gen_model:
chainer.serializers.load_npz(args.gen_model, gen)
if args.dis_model:
chainer.serializers.load_npz(args.dis_model, dis)
# Run the training
trainer.run()
if __name__ == '__main__':
main()
| 6,762
| 39.740964
| 79
|
py
|
chainer
|
chainer-master/examples/chainermn/dcgan/visualize.py
|
#!/usr/bin/env python
import os
import numpy as np
from PIL import Image
import chainer
import chainer.cuda
from chainer import Variable
def out_generated_image(gen, dis, rows, cols, seed, dst):
@chainer.training.make_extension()
def make_image(trainer):
np.random.seed(seed)
n_images = rows * cols
xp = gen.xp
z = Variable(xp.asarray(gen.make_hidden(n_images)))
with chainer.using_config('train', False):
x = gen(z)
x = chainer.cuda.to_cpu(x.array)
np.random.seed()
x = np.asarray(np.clip(x * 255, 0.0, 255.0), dtype=np.uint8)
_, _, H, W = x.shape
x = x.reshape((rows, cols, 3, H, W))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape((rows * H, cols * W, 3))
preview_dir = '{}/preview'.format(dst)
preview_path = preview_dir +\
'/image{:0>8}.png'.format(trainer.updater.iteration)
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
Image.fromarray(x).save(preview_path)
return make_image
| 1,078
| 27.394737
| 68
|
py
|
chainer
|
chainer-master/examples/chainermn/imagenet/train_imagenet.py
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import multiprocessing
import random
import sys
import numpy as np
import chainer
import chainer.cuda
from chainer import training
from chainer.training import extensions
import chainermn
import models.alex as alex
import models.googlenet as googlenet
import models.googlenetbn as googlenetbn
import models.nin as nin
import models.resnet50 as resnet50
# Check Python version if it supports multiprocessing.set_start_method,
# which was introduced in Python 3.4
major, minor, _, _, _ = sys.version_info
if major <= 2 or (major == 3 and minor < 4):
sys.stderr.write('Error: ImageNet example uses '
'chainer.iterators.MultiprocessIterator, '
'which works only with Python >= 3.4.\n'
'For more details, see '
'http://chainermn.readthedocs.io/en/master/'
'tutorial/tips_faqs.html#using-multiprocessiterator\n')
exit(-1)
class PreprocessedDataset(chainer.dataset.DatasetMixin):
def __init__(self, path, root, mean, crop_size, random=True):
self.base = chainer.datasets.LabeledImageDataset(path, root)
self.mean = mean.astype(np.float32)
self.crop_size = crop_size
self.random = random
def __len__(self):
return len(self.base)
def get_example(self, i):
# It reads the i-th image/label pair and return a preprocessed image.
# It applies following preprocesses:
# - Cropping (random or center rectangular)
# - Random flip
# - Scaling to [0, 1] value
crop_size = self.crop_size
image, label = self.base[i]
_, h, w = image.shape
if self.random:
# Randomly crop a region and flip the image
top = random.randint(0, h - crop_size - 1)
left = random.randint(0, w - crop_size - 1)
if random.randint(0, 1):
image = image[:, :, ::-1]
else:
# Crop the center
top = (h - crop_size) // 2
left = (w - crop_size) // 2
bottom = top + crop_size
right = left + crop_size
image = image[:, top:bottom, left:right]
image -= self.mean[:, top:bottom, left:right]
image *= (1.0 / 255.0) # Scale to [0, 1]
return image, label
# chainermn.create_multi_node_evaluator can be also used with user customized
# evaluator classes that inherit chainer.training.extensions.Evaluator.
class TestModeEvaluator(extensions.Evaluator):
def evaluate(self):
model = self.get_target('main')
model.train = False
ret = super(TestModeEvaluator, self).evaluate()
model.train = True
return ret
def main():
# Check if GPU is available
# (ImageNet example does not support CPU execution)
if not chainer.cuda.available:
raise RuntimeError('ImageNet requires GPU support.')
archs = {
'alex': alex.Alex,
'googlenet': googlenet.GoogLeNet,
'googlenetbn': googlenetbn.GoogLeNetBN,
'nin': nin.NIN,
'resnet50': resnet50.ResNet50,
}
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument('--arch', '-a', choices=archs.keys(), default='nin',
help='Convnet architecture')
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--epoch', '-E', type=int, default=10,
help='Number of epochs to train')
parser.add_argument('--initmodel',
help='Initialize the model from given file')
parser.add_argument('--loaderjob', '-j', type=int,
help='Number of parallel data loading processes')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Mean file (computed by compute_mean.py)')
parser.add_argument('--resume', '-r', default='',
help='Initialize the trainer from given file')
parser.add_argument('--out', '-o', default='result',
help='Output directory')
parser.add_argument('--root', '-R', default='.',
help='Root directory path of image files')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--test', action='store_true')
parser.add_argument('--communicator', default='pure_nccl')
parser.set_defaults(test=False)
args = parser.parse_args()
# Start method of multiprocessing module need to be changed if we
# are using InfiniBand and MultiprocessIterator. This is because
# processes often crash when calling fork if they are using
# Infiniband. (c.f.,
# https://www.open-mpi.org/faq/?category=tuning#fork-warning )
# Also, just setting the start method does not seem to be
# sufficient to actually launch the forkserver processes, so also
# start a dummy process.
# See also our document:
# https://chainermn.readthedocs.io/en/stable/tutorial/tips_faqs.html#using-multiprocessiterator
# This must be done *before* ``chainermn.create_communicator``!!!
multiprocessing.set_start_method('forkserver')
p = multiprocessing.Process()
p.start()
p.join()
# Prepare ChainerMN communicator.
comm = chainermn.create_communicator(args.communicator)
device = comm.intra_rank
if comm.rank == 0:
print('==========================================')
print('Num process (COMM_WORLD): {}'.format(comm.size))
print('Using {} communicator'.format(args.communicator))
print('Using {} arch'.format(args.arch))
print('Num Minibatch-size: {}'.format(args.batchsize))
print('Num epoch: {}'.format(args.epoch))
print('==========================================')
model = archs[args.arch]()
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
chainer.cuda.get_device_from_id(device).use() # Make the GPU current
model.to_gpu()
# Split and distribute the dataset. Only worker 0 loads the whole dataset.
# Datasets of worker 0 are evenly split and distributed to all workers.
mean = np.load(args.mean)
if comm.rank == 0:
train = PreprocessedDataset(args.train, args.root, mean, model.insize)
val = PreprocessedDataset(
args.val, args.root, mean, model.insize, False)
else:
train = None
val = None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
val = chainermn.scatter_dataset(val, comm)
# A workaround for processes crash should be done before making
# communicator above, when using fork (e.g. MultiProcessIterator)
# along with Infiniband.
train_iter = chainer.iterators.MultiprocessIterator(
train, args.batchsize, n_processes=args.loaderjob)
val_iter = chainer.iterators.MultiprocessIterator(
val, args.val_batchsize, repeat=False, n_processes=args.loaderjob)
# Create a multi node optimizer from a standard Chainer optimizer.
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.MomentumSGD(lr=0.01, momentum=0.9), comm)
optimizer.setup(model)
# Set up a trainer
updater = training.StandardUpdater(train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.out)
val_interval = (10, 'iteration') if args.test else (1, 'epoch')
log_interval = (10, 'iteration') if args.test else (1, 'epoch')
# Create a multi node evaluator from an evaluator.
evaluator = TestModeEvaluator(val_iter, model, device=device)
evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
trainer.extend(evaluator, trigger=val_interval)
# Some display and output extensions are necessary only for one worker.
# (Otherwise, there would just be repeated outputs.)
if comm.rank == 0:
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'lr'
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
| 8,957
| 38.117904
| 99
|
py
|
chainer
|
chainer-master/examples/chainermn/imagenet/compute_mean.py
|
#!/usr/bin/env python
import argparse
import sys
import numpy as np
import chainer
def compute_mean(dataset):
print('compute mean image')
sum_image = 0
N = len(dataset)
for i, (image, _) in enumerate(dataset):
sum_image += image
sys.stderr.write('{} / {}\r'.format(i, N))
sys.stderr.flush()
sys.stderr.write('\n')
return sum_image / N
def main():
parser = argparse.ArgumentParser(description='Compute images mean array')
parser.add_argument('dataset',
help='Path to training image-label list file')
parser.add_argument('--root', '-R', default='.',
help='Root directory path of image files')
parser.add_argument('--output', '-o', default='mean.npy',
help='path to output mean array')
args = parser.parse_args()
dataset = chainer.datasets.LabeledImageDataset(args.dataset, args.root)
mean = compute_mean(dataset)
np.save(args.output, mean)
if __name__ == '__main__':
main()
| 1,037
| 25.615385
| 77
|
py
|
chainer
|
chainer-master/examples/chainermn/imagenet/models/nin.py
|
import chainer
import chainer.functions as F
import chainer.initializers as I
import chainer.links as L
class NIN(chainer.Chain):
"""Network-in-Network example model."""
insize = 227
def __init__(self):
super(NIN, self).__init__()
conv_init = I.HeNormal() # MSRA scaling
with self.init_scope():
self.mlpconv1 = L.MLPConvolution2D(
None, (96, 96, 96), 11, stride=4, conv_init=conv_init)
self.mlpconv2 = L.MLPConvolution2D(
None, (256, 256, 256), 5, pad=2, conv_init=conv_init)
self.mlpconv3 = L.MLPConvolution2D(
None, (384, 384, 384), 3, pad=1, conv_init=conv_init)
self.mlpconv4 = L.MLPConvolution2D(
None, (1024, 1024, 1000), 3, pad=1, conv_init=conv_init)
def __call__(self, x, t):
h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2)
h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2)
h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2)
h = self.mlpconv4(F.dropout(h))
h = F.reshape(F.average_pooling_2d(h, 6), (len(x), 1000))
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
| 1,296
| 34.054054
| 74
|
py
|
chainer
|
chainer-master/examples/chainermn/imagenet/models/googlenet.py
|
import chainer
import chainer.functions as F
import chainer.links as L
class GoogLeNet(chainer.Chain):
insize = 224
def __init__(self):
super(GoogLeNet, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(None, 64, 7, stride=2, pad=3)
self.conv2_reduce = L.Convolution2D(None, 64, 1)
self.conv2 = L.Convolution2D(None, 192, 3, stride=1, pad=1)
self.inc3a = L.Inception(None, 64, 96, 128, 16, 32, 32)
self.inc3b = L.Inception(None, 128, 128, 192, 32, 96, 64)
self.inc4a = L.Inception(None, 192, 96, 208, 16, 48, 64)
self.inc4b = L.Inception(None, 160, 112, 224, 24, 64, 64)
self.inc4c = L.Inception(None, 128, 128, 256, 24, 64, 64)
self.inc4d = L.Inception(None, 112, 144, 288, 32, 64, 64)
self.inc4e = L.Inception(None, 256, 160, 320, 32, 128, 128)
self.inc5a = L.Inception(None, 256, 160, 320, 32, 128, 128)
self.inc5b = L.Inception(None, 384, 192, 384, 48, 128, 128)
self.loss3_fc = L.Linear(None, 1000)
self.loss1_conv = L.Convolution2D(None, 128, 1)
self.loss1_fc1 = L.Linear(None, 1024)
self.loss1_fc2 = L.Linear(None, 1000)
self.loss2_conv = L.Convolution2D(None, 128, 1)
self.loss2_fc1 = L.Linear(None, 1024)
self.loss2_fc2 = L.Linear(None, 1000)
def __call__(self, x, t):
h = F.relu(self.conv1(x))
h = F.local_response_normalization(
F.max_pooling_2d(h, 3, stride=2), n=5)
h = F.relu(self.conv2_reduce(h))
h = F.relu(self.conv2(h))
h = F.max_pooling_2d(
F.local_response_normalization(h, n=5), 3, stride=2)
h = self.inc3a(h)
h = self.inc3b(h)
h = F.max_pooling_2d(h, 3, stride=2)
h = self.inc4a(h)
l = F.average_pooling_2d(h, 5, stride=3)
l = F.relu(self.loss1_conv(l))
l = F.relu(self.loss1_fc1(l))
l = self.loss1_fc2(l)
loss1 = F.softmax_cross_entropy(l, t)
h = self.inc4b(h)
h = self.inc4c(h)
h = self.inc4d(h)
l = F.average_pooling_2d(h, 5, stride=3)
l = F.relu(self.loss2_conv(l))
l = F.relu(self.loss2_fc1(l))
l = self.loss2_fc2(l)
loss2 = F.softmax_cross_entropy(l, t)
h = self.inc4e(h)
h = F.max_pooling_2d(h, 3, stride=2)
h = self.inc5a(h)
h = self.inc5b(h)
h = F.average_pooling_2d(h, 7, stride=1)
h = self.loss3_fc(F.dropout(h, 0.4))
loss3 = F.softmax_cross_entropy(h, t)
loss = 0.3 * (loss1 + loss2) + loss3
accuracy = F.accuracy(h, t)
chainer.report({
'loss': loss,
'loss1': loss1,
'loss2': loss2,
'loss3': loss3,
'accuracy': accuracy
}, self)
return loss
| 2,922
| 33.388235
| 71
|
py
|
chainer
|
chainer-master/examples/chainermn/imagenet/models/googlenetbn.py
|
import chainer
import chainer.functions as F
import chainer.links as L
class GoogLeNetBN(chainer.Chain):
"""New GoogLeNet of BatchNormalization version."""
insize = 224
def __init__(self):
super(GoogLeNetBN, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(
None, 64, 7, stride=2, pad=3, nobias=True)
self.norm1 = L.BatchNormalization(64)
self.conv2 = L.Convolution2D(None, 192, 3, pad=1, nobias=True)
self.norm2 = L.BatchNormalization(192)
self.inc3a = L.InceptionBN(
None, 64, 64, 64, 64, 96, 'avg', 32)
self.inc3b = L.InceptionBN(
None, 64, 64, 96, 64, 96, 'avg', 64)
self.inc3c = L.InceptionBN(
None, 0, 128, 160, 64, 96, 'max', stride=2)
self.inc4a = L.InceptionBN(
None, 224, 64, 96, 96, 128, 'avg', 128)
self.inc4b = L.InceptionBN(
None, 192, 96, 128, 96, 128, 'avg', 128)
self.inc4c = L.InceptionBN(
None, 128, 128, 160, 128, 160, 'avg', 128)
self.inc4d = L.InceptionBN(
None, 64, 128, 192, 160, 192, 'avg', 128)
self.inc4e = L.InceptionBN(
None, 0, 128, 192, 192, 256, 'max', stride=2)
self.inc5a = L.InceptionBN(
None, 352, 192, 320, 160, 224, 'avg', 128)
self.inc5b = L.InceptionBN(
None, 352, 192, 320, 192, 224, 'max', 128)
self.out = L.Linear(None, 1000)
self.conva = L.Convolution2D(None, 128, 1, nobias=True)
self.norma = L.BatchNormalization(128)
self.lina = L.Linear(None, 1024, nobias=True)
self.norma2 = L.BatchNormalization(1024)
self.outa = L.Linear(None, 1000)
self.convb = L.Convolution2D(None, 128, 1, nobias=True)
self.normb = L.BatchNormalization(128)
self.linb = L.Linear(None, 1024, nobias=True)
self.normb2 = L.BatchNormalization(1024)
self.outb = L.Linear(None, 1000)
def __call__(self, x, t):
h = F.max_pooling_2d(
F.relu(self.norm1(self.conv1(x))), 3, stride=2, pad=1)
h = F.max_pooling_2d(
F.relu(self.norm2(self.conv2(h))), 3, stride=2, pad=1)
h = self.inc3a(h)
h = self.inc3b(h)
h = self.inc3c(h)
h = self.inc4a(h)
a = F.average_pooling_2d(h, 5, stride=3)
a = F.relu(self.norma(self.conva(a)))
a = F.relu(self.norma2(self.lina(a)))
a = self.outa(a)
loss1 = F.softmax_cross_entropy(a, t)
h = self.inc4b(h)
h = self.inc4c(h)
h = self.inc4d(h)
b = F.average_pooling_2d(h, 5, stride=3)
b = F.relu(self.normb(self.convb(b)))
b = F.relu(self.normb2(self.linb(b)))
b = self.outb(b)
loss2 = F.softmax_cross_entropy(b, t)
h = self.inc4e(h)
h = self.inc5a(h)
h = F.average_pooling_2d(self.inc5b(h), 7)
h = self.out(h)
loss3 = F.softmax_cross_entropy(h, t)
loss = 0.3 * (loss1 + loss2) + loss3
accuracy = F.accuracy(h, t)
chainer.report({
'loss': loss,
'loss1': loss1,
'loss2': loss2,
'loss3': loss3,
'accuracy': accuracy,
}, self)
return loss
| 3,429
| 34
| 74
|
py
|
chainer
|
chainer-master/examples/chainermn/imagenet/models/resnet50.py
|
# Original author: yasunorikudo
# (https://github.com/yasunorikudo/chainer-ResNet)
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
class BottleNeckA(chainer.Chain):
def __init__(self, in_size, ch, out_size, stride=2):
super(BottleNeckA, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, stride, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(out_size)
self.conv4 = L.Convolution2D(
in_size, out_size, 1, stride, 0,
initialW=initialW, nobias=True)
self.bn4 = L.BatchNormalization(out_size)
def __call__(self, x):
h1 = F.relu(self.bn1(self.conv1(x)))
h1 = F.relu(self.bn2(self.conv2(h1)))
h1 = self.bn3(self.conv3(h1))
h2 = self.bn4(self.conv4(x))
return F.relu(h1 + h2)
class BottleNeckB(chainer.Chain):
def __init__(self, in_size, ch):
super(BottleNeckB, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, 1, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, in_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(in_size)
def __call__(self, x):
h = F.relu(self.bn1(self.conv1(x)))
h = F.relu(self.bn2(self.conv2(h)))
h = self.bn3(self.conv3(h))
return F.relu(h + x)
class Block(chainer.ChainList):
def __init__(self, layer, in_size, ch, out_size, stride=2):
super(Block, self).__init__()
self.add_link(BottleNeckA(in_size, ch, out_size, stride))
for i in range(layer - 1):
self.add_link(BottleNeckB(out_size, ch))
def __call__(self, x):
for f in self.children():
x = f(x)
return x
class ResNet50(chainer.Chain):
insize = 224
def __init__(self):
super(ResNet50, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(
3, 64, 7, 2, 3, initialW=initializers.HeNormal())
self.bn1 = L.BatchNormalization(64)
self.res2 = Block(3, 64, 64, 256, 1)
self.res3 = Block(4, 256, 128, 512)
self.res4 = Block(6, 512, 256, 1024)
self.res5 = Block(3, 1024, 512, 2048)
self.fc = L.Linear(2048, 1000)
def __call__(self, x, t):
h = self.bn1(self.conv1(x))
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = self.res2(h)
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
h = F.average_pooling_2d(h, 7, stride=1)
h = self.fc(h)
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
| 3,533
| 31.422018
| 74
|
py
|
chainer
|
chainer-master/examples/chainermn/imagenet/models/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/examples/chainermn/imagenet/models/alex.py
|
import chainer
import chainer.functions as F
import chainer.links as L
class Alex(chainer.Chain):
"""Single-GPU AlexNet without partition toward the channel axis."""
insize = 227
def __init__(self):
super(Alex, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(None, 96, 11, stride=4)
self.conv2 = L.Convolution2D(None, 256, 5, pad=2)
self.conv3 = L.Convolution2D(None, 384, 3, pad=1)
self.conv4 = L.Convolution2D(None, 384, 3, pad=1)
self.conv5 = L.Convolution2D(None, 256, 3, pad=1)
self.fc6 = L.Linear(None, 4096)
self.fc7 = L.Linear(None, 4096)
self.fc8 = L.Linear(None, 1000)
def __call__(self, x, t):
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv1(x))), 3, stride=2)
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv2(h))), 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
h = F.dropout(F.relu(self.fc6(h)))
h = F.dropout(F.relu(self.fc7(h)))
h = self.fc8(h)
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
| 1,365
| 34.025641
| 74
|
py
|
chainer
|
chainer-master/examples/chainermn/mnist/train_mnist_model_parallel.py
|
#!/usr/bin/env python
# coding: utf-8
import argparse
import chainer
import chainer.cuda
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import chainermn
import chainermn.datasets
import chainermn.functions
chainer.disable_experimental_feature_warning = True
class MLP0SubA(chainer.Chain):
def __init__(self, comm, n_out):
super(MLP0SubA, self).__init__(
l1=L.Linear(784, n_out))
def __call__(self, x):
return F.relu(self.l1(x))
class MLP0SubB(chainer.Chain):
def __init__(self, comm):
super(MLP0SubB, self).__init__()
def __call__(self, y):
return y
class MLP0(chainermn.MultiNodeChainList):
# Model on worker 0.
def __init__(self, comm, n_out):
super(MLP0, self).__init__(comm=comm)
self.add_link(MLP0SubA(comm, n_out), rank_in=None, rank_out=1)
self.add_link(MLP0SubB(comm), rank_in=1, rank_out=None)
class MLP1Sub(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP1Sub, self).__init__(
l2=L.Linear(None, n_units),
l3=L.Linear(None, n_out))
def __call__(self, h0):
h1 = F.relu(self.l2(h0))
return self.l3(h1)
class MLP1(chainermn.MultiNodeChainList):
# Model on worker 1.
def __init__(self, comm, n_units, n_out):
super(MLP1, self).__init__(comm=comm)
self.add_link(MLP1Sub(n_units, n_out), rank_in=0, rank_out=0)
def main():
parser = argparse.ArgumentParser(
description='ChainerMN example: pipelined neural network')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', action='store_true',
help='Use GPU')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
args = parser.parse_args()
# Prepare ChainerMN communicator.
if args.gpu:
comm = chainermn.create_communicator('pure_nccl')
device = comm.intra_rank
else:
comm = chainermn.create_communicator('naive')
device = -1
if comm.size != 2:
raise ValueError(
'This example can only be executed on exactly 2 processes.')
if comm.rank == 0:
print('==========================================')
if args.gpu:
print('Using GPUs')
print('Num unit: {}'.format(args.unit))
print('Num Minibatch-size: {}'.format(args.batchsize))
print('Num epoch: {}'.format(args.epoch))
print('==========================================')
if comm.rank == 0:
model = L.Classifier(MLP0(comm, args.unit))
elif comm.rank == 1:
model = MLP1(comm, args.unit, 10)
if device >= 0:
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
# Iterate dataset only on worker 0.
train, test = chainer.datasets.get_mnist()
if comm.rank == 1:
train = chainermn.datasets.create_empty_dataset(train)
test = chainermn.datasets.create_empty_dataset(test)
train_iter = chainer.iterators.SerialIterator(
train, args.batchsize, shuffle=False)
test_iter = chainer.iterators.SerialIterator(
test, args.batchsize, repeat=False, shuffle=False)
updater = training.StandardUpdater(train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(extensions.Evaluator(test_iter, model, device=device))
# Some display and output extentions are necessary only for worker 0.
if comm.rank == 0:
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
trainer.run()
if __name__ == '__main__':
main()
| 4,438
| 30.48227
| 76
|
py
|
chainer
|
chainer-master/examples/chainermn/mnist/train_mnist_dual_parallel.py
|
#!/usr/bin/env python
# coding: utf-8
import argparse
import chainer
import chainer.cuda
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import chainermn
import chainermn.datasets
import chainermn.functions
chainer.disable_experimental_feature_warning = True
class MLP0SubA(chainer.Chain):
def __init__(self, comm, n_out):
super(MLP0SubA, self).__init__(
l1=L.Linear(784, n_out))
def __call__(self, x):
return F.relu(self.l1(x))
class MLP0SubB(chainer.Chain):
def __init__(self, comm):
super(MLP0SubB, self).__init__()
def __call__(self, y):
return y
class MLP0(chainermn.MultiNodeChainList):
# Model on worker 0.
def __init__(self, comm, n_out):
super(MLP0, self).__init__(comm=comm)
self.add_link(MLP0SubA(comm, n_out), rank_in=None, rank_out=1)
self.add_link(MLP0SubB(comm), rank_in=1, rank_out=None)
class MLP1Sub(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP1Sub, self).__init__(
l2=L.Linear(None, n_units),
l3=L.Linear(None, n_out))
def __call__(self, h0):
h1 = F.relu(self.l2(h0))
return self.l3(h1)
class MLP1(chainermn.MultiNodeChainList):
# Model on worker 1.
def __init__(self, comm, n_units, n_out):
super(MLP1, self).__init__(comm=comm)
self.add_link(MLP1Sub(n_units, n_out), rank_in=0, rank_out=0)
def main():
parser = argparse.ArgumentParser(
description='ChainerMN example: pipelined neural network')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', action='store_true',
help='Use GPU')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
args = parser.parse_args()
# Prepare ChainerMN communicator.
if args.gpu:
comm = chainermn.create_communicator('pure_nccl')
data_axis, model_axis = comm.rank % 2, comm.rank // 2
data_comm = comm.split(data_axis, comm.rank)
model_comm = comm.split(model_axis, comm.rank)
device = comm.intra_rank
else:
comm = chainermn.create_communicator('naive')
data_axis, model_axis = comm.rank % 2, comm.rank // 2
data_comm = comm.split(data_axis, comm.rank)
model_comm = comm.split(model_axis, comm.rank)
device = -1
if model_comm.size != 2:
raise ValueError(
'This example can only be executed on the even number '
'of processes.')
if comm.rank == 0:
print('==========================================')
if args.gpu:
print('Using GPUs')
print('Num unit: {}'.format(args.unit))
print('Num Minibatch-size: {}'.format(args.batchsize))
print('Num epoch: {}'.format(args.epoch))
print('==========================================')
if data_axis == 0:
model = L.Classifier(MLP0(model_comm, args.unit))
elif data_axis == 1:
model = MLP1(model_comm, args.unit, 10)
if device >= 0:
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(), data_comm)
optimizer.setup(model)
# Original dataset on worker 0 and 1.
# Datasets of worker 0 and 1 are split and distributed to all workers.
if model_axis == 0:
train, test = chainer.datasets.get_mnist()
if data_axis == 1:
train = chainermn.datasets.create_empty_dataset(train)
test = chainermn.datasets.create_empty_dataset(test)
else:
train, test = None, None
train = chainermn.scatter_dataset(train, data_comm, shuffle=True)
test = chainermn.scatter_dataset(test, data_comm, shuffle=True)
train_iter = chainer.iterators.SerialIterator(
train, args.batchsize, shuffle=False)
test_iter = chainer.iterators.SerialIterator(
test, args.batchsize, repeat=False, shuffle=False)
updater = training.StandardUpdater(train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
evaluator = extensions.Evaluator(test_iter, model, device=device)
evaluator = chainermn.create_multi_node_evaluator(evaluator, data_comm)
trainer.extend(evaluator)
# Some display and output extensions are necessary only for worker 0.
if comm.rank == 0:
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
trainer.run()
if __name__ == '__main__':
main()
| 5,279
| 32.630573
| 76
|
py
|
chainer
|
chainer-master/examples/chainermn/mnist/train_mnist.py
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import chainerx
import chainermn
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
# the size of the inputs to each layer will be inferred
self.l1 = L.Linear(None, n_units) # n_in -> n_units
self.l2 = L.Linear(None, n_units) # n_units -> n_units
self.l3 = L.Linear(None, n_out) # n_units -> n_out
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
def main():
parser = argparse.ArgumentParser(description='ChainerMN example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--communicator', type=str,
default='pure_nccl', help='Type of communicator')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', action='store_true',
help='Use GPU')
parser.add_argument('--chainerx', '-x', action='store_true',
default=False, help='Use ChainerX')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
args = parser.parse_args()
# Prepare ChainerMN communicator.
if args.gpu:
if args.communicator == 'naive':
print('Error: \'naive\' communicator does not support GPU.\n')
exit(-1)
comm = chainermn.create_communicator(args.communicator)
if args.chainerx:
device = chainer.get_device('cuda:{}'.format(comm.intra_rank))
else:
device = chainer.get_device(comm.intra_rank)
else:
if args.communicator != 'naive':
print('Warning: using naive communicator '
'because only naive supports CPU-only execution')
comm = chainermn.create_communicator('naive')
if args.chainerx:
device = chainer.get_device('native')
else:
device = chainer.get_device(-1)
if comm.rank == 0:
print('==========================================')
print('Num process (COMM_WORLD): {}'.format(comm.size))
if args.gpu:
print('Using GPUs')
print('Using {} communicator'.format(args.communicator))
print('Num unit: {}'.format(args.unit))
print('Num Minibatch-size: {}'.format(args.batchsize))
print('Num epoch: {}'.format(args.epoch))
print('==========================================')
model = L.Classifier(MLP(args.unit, 10))
model.to_device(device)
# Create a multi node optimizer from a standard Chainer optimizer.
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(), comm)
optimizer.setup(model)
# Split and distribute the dataset. Only worker 0 loads the whole dataset.
# Datasets of worker 0 are evenly split and distributed to all workers.
if comm.rank == 0:
train, test = chainer.datasets.get_mnist()
else:
train, test = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
test = chainermn.scatter_dataset(test, comm, shuffle=True)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
updater = training.StandardUpdater(train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# Create a multi node evaluator from a standard Chainer evaluator.
evaluator = extensions.Evaluator(test_iter, model, device=device)
evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
trainer.extend(evaluator)
# Some display and output extensions are necessary only for one worker.
# (Otherwise, there would just be repeated outputs.)
if comm.rank == 0:
if device.xp is not chainerx:
# Disabled for ChainerX.
# This is because ChainerX doesn't have a public API set
# to traverse computational graphs.
# See examples/mnist/train_mnist.py
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
| 5,320
| 37.839416
| 78
|
py
|
chainer
|
chainer-master/examples/chainermn/cifar/train_cifar.py
|
import argparse
import chainer
import chainer.links as L
from chainer import training
from chainer.training import extensions
from chainer.datasets import get_cifar10
from chainer.datasets import get_cifar100
import chainermn
import models.VGG
def main():
parser = argparse.ArgumentParser(description='ChainerMN example: CIFAR')
parser.add_argument('--dataset', '-d', default='cifar10',
help='The dataset to use: cifar10 or cifar100')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='Number of images per GPU in a mini-batch')
parser.add_argument('--learnrate', '-l', type=float, default=0.05,
help='Learning rate for SGD')
parser.add_argument('--epoch', '-e', type=int, default=300,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', action='store_true',
help='Use GPU (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--communicator', type=str, default='flat',
help='Type of communicator')
args = parser.parse_args()
# Prepare ChainerMN communicator.
if args.gpu:
comm = chainermn.create_communicator(args.communicator)
device = comm.intra_rank
else:
comm = chainermn.create_communicator('naive')
device = -1
if comm.rank == 0:
print('==========================================')
print('Num process (COMM_WORLD): {}'.format(comm.size))
if args.gpu:
print('Using GPUs')
print('Using {} communicator'.format(args.communicator))
print('Num Minibatch-size: {}'.format(args.batchsize))
print('Num epoch: {}'.format(args.epoch))
print('==========================================')
# Set up a neural network to train.
# Classifier reports softmax cross entropy loss and accuracy at every
# iteration, which will be used by the PrintReport extension below.
dataset_info = {
'cifar10': {'n_class_labels': 10, 'load_func': get_cifar10},
'cifar100': {'n_class_labels': 100, 'load_func': get_cifar100},
}
if args.dataset not in dataset_info:
raise RuntimeError('Invalid dataset choice.')
n_class_labels = dataset_info[args.dataset]['n_class_labels']
if comm.rank == 0:
train, test = dataset_info[args.dataset]['load_func']()
else:
train, test = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
test = chainermn.scatter_dataset(test, comm, shuffle=True)
model = L.Classifier(models.VGG.VGG(n_class_labels))
if device >= 0:
# Make a specified GPU current
chainer.cuda.get_device_from_id(device).use()
model.to_gpu() # Copy the model to the GPU
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.MomentumSGD(args.learnrate), comm)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(5e-4))
train_iter = chainer.iterators.SerialIterator(train, args.batchsize,
shuffle=False)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
# Set up a trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# Evaluate the model with the test dataset for each epoch
evaluator = extensions.Evaluator(test_iter, model, device=device)
evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
trainer.extend(evaluator)
trainer.extend(extensions.ExponentialShift('lr', 0.5),
trigger=(25, 'epoch'))
if comm.rank == 0:
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(
filename='snaphot_epoch_{.updater.epoch}'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
if args.resume:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
if __name__ == '__main__':
main()
| 4,765
| 37.128
| 77
|
py
|
chainer
|
chainer-master/examples/chainermn/cifar/models/VGG.py
|
from chainer.utils import argument
import chainer
import chainer.functions as F
import chainer.links as L
import warnings
class Block(chainer.Chain):
"""A convolution, batch norm, ReLU block.
A block in a feedforward network that performs a
convolution followed by batch normalization followed
by a ReLU activation.
For the convolution operation, a square filter size is used.
Args:
out_channels (int): The number of output channels.
ksize (int): The size of the filter is ksize x ksize.
pad (int): The padding to use for the convolution.
"""
def __init__(self, out_channels, ksize, pad=1):
super(Block, self).__init__()
with self.init_scope():
self.conv = L.Convolution2D(None, out_channels, ksize, pad=pad,
nobias=True)
self.bn = L.BatchNormalization(out_channels)
def forward(self, x):
h = self.conv(x)
h = self.bn(h)
return F.relu(h)
class VGG(chainer.Chain):
"""A VGG-style network for very small images.
This model is based on the VGG-style model from
http://torch.ch/blog/2015/07/30/cifar.html
which is based on the network architecture from the paper:
https://arxiv.org/pdf/1409.1556v6.pdf
This model is intended to be used with either RGB or greyscale input
images that are of size 32x32 pixels, such as those in the CIFAR10
and CIFAR100 datasets.
On CIFAR10, it achieves approximately 89% accuracy on the test set with
no data augmentation.
On CIFAR100, it achieves approximately 63% accuracy on the test set with
no data augmentation.
Args:
n_class_labels (int): The number of class labels.
num_class_labels (int): The number of class labels. (deprecated)
"""
def __init__(self, n_class_labels=10, **kwargs):
old_n_class_labels = argument.parse_kwargs(
kwargs, ('num_class_labels', None))
if old_n_class_labels is not None:
n_class_labels = old_n_class_labels
warnings.warn(
'num_class_labels is deprecated.'
'Please consider using num_class_labels',
DeprecationWarning)
super(VGG, self).__init__()
with self.init_scope():
self.block1_1 = Block(64, 3)
self.block1_2 = Block(64, 3)
self.block2_1 = Block(128, 3)
self.block2_2 = Block(128, 3)
self.block3_1 = Block(256, 3)
self.block3_2 = Block(256, 3)
self.block3_3 = Block(256, 3)
self.block4_1 = Block(512, 3)
self.block4_2 = Block(512, 3)
self.block4_3 = Block(512, 3)
self.block5_1 = Block(512, 3)
self.block5_2 = Block(512, 3)
self.block5_3 = Block(512, 3)
self.fc1 = L.Linear(None, 512, nobias=True)
self.bn_fc1 = L.BatchNormalization(512)
self.fc2 = L.Linear(None, n_class_labels, nobias=True)
def forward(self, x):
# 64 channel blocks:
h = self.block1_1(x)
h = F.dropout(h, ratio=0.3)
h = self.block1_2(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 128 channel blocks:
h = self.block2_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block2_2(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 256 channel blocks:
h = self.block3_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block3_2(h)
h = F.dropout(h, ratio=0.4)
h = self.block3_3(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 512 channel blocks:
h = self.block4_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block4_2(h)
h = F.dropout(h, ratio=0.4)
h = self.block4_3(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 512 channel blocks:
h = self.block5_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block5_2(h)
h = F.dropout(h, ratio=0.4)
h = self.block5_3(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
h = F.dropout(h, ratio=0.5)
h = self.fc1(h)
h = self.bn_fc1(h)
h = F.relu(h)
h = F.dropout(h, ratio=0.5)
return self.fc2(h)
| 4,303
| 30.647059
| 76
|
py
|
chainer
|
chainer-master/examples/chainermn/cifar/models/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/examples/chainermn/seq2seq/europal.py
|
from __future__ import unicode_literals
import collections
import gzip
import io
import os
import re
import numpy
import progressbar
split_pattern = re.compile(r'([.,!?"\':;)(])')
digit_pattern = re.compile(r'\d')
def split_sentence(s):
s = s.lower()
s = s.replace('\u2019', '\'')
s = digit_pattern.sub('0', s)
words = []
for word in s.strip().split():
words.extend(split_pattern.split(word))
words = [w for w in words if w]
return words
def open_file(path):
if path.endswith('.gz'):
return gzip.open(path, 'rt', encoding='utf-8')
else:
# Find gzipped version of the file
gz = path + '.gz'
if os.path.exists(gz):
return open_file(gz)
else:
return io.open(path, encoding='utf-8', errors='ignore')
def count_lines(path):
print(path)
with open_file(path) as f:
return sum([1 for _ in f])
def read_file(path):
n_lines = count_lines(path)
bar = progressbar.ProgressBar()
with open_file(path) as f:
for line in bar(f, max_value=n_lines):
words = split_sentence(line)
yield words
def count_words(path):
counts = collections.Counter()
for words in read_file(path):
for word in words:
counts[word] += 1
vocab = [word for (word, _) in counts.most_common(40000)]
return vocab
def make_dataset(path, vocab):
word_id = {word: index for index, word in enumerate(vocab)}
dataset = []
token_count = 0
unknown_count = 0
for words in read_file(path):
array = make_array(word_id, words)
dataset.append(array)
token_count += array.size
unknown_count += (array == 1).sum()
print('# of tokens: %d' % token_count)
print('# of unknown: %d (%.2f %%)'
% (unknown_count, 100. * unknown_count / token_count))
return dataset
def make_array(word_id, words):
ids = [word_id.get(word, 1) for word in words]
return numpy.array(ids, numpy.int32)
if __name__ == '__main__':
vocab = count_words('wmt/giga-fren.release2.fixed.en')
make_dataset('wmt/giga-fren.release2.fixed.en', vocab)
| 2,164
| 23.325843
| 67
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.