code stringlengths 17 6.64M |
|---|
class BottleneckBase(nn.Module):
expansion = 4
NORM_TYPE = NormType.BATCH_NORM
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, conv_type=ConvType.HYPERCUBE, bn_momentum=0.1, D=3):
super(BottleneckBase, self).__init__()
self.conv1 = conv(inplanes, planes, kernel_size=1, D=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(planes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv3 = conv(planes, (planes * self.expansion), kernel_size=1, D=D)
self.norm3 = get_norm(self.NORM_TYPE, (planes * self.expansion), D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class Bottleneck(BottleneckBase):
NORM_TYPE = NormType.BATCH_NORM
|
class ResNetEncoder(ResNet):
def __init__(self, **kwargs):
super().__init__(**kwargs)
del self.fc
del self.avgpool
def get_stages(self):
return [nn.Identity(), nn.Sequential(self.conv1, self.bn1, self.relu), nn.Sequential(self.maxpool, self.layer1), self.layer2, self.layer3, self.layer4]
def forward(self, x):
stages = self.get_stages()
features = []
for i in range(6):
x = stages[i](x)
features.append(x)
return features[5]
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop('fc.bias', None)
state_dict.pop('fc.weight', None)
super().load_state_dict(state_dict, **kwargs)
|
class Model(MinkowskiNetwork):
OUT_PIXEL_DIST = (- 1)
def __init__(self, in_channels, out_channels, config, D, **kwargs):
super(Model, self).__init__(D)
self.in_channels = in_channels
self.out_channels = out_channels
self.config = config
|
class ResNetBase(Model):
BLOCK = None
LAYERS = ()
INIT_DIM = 64
PLANES = (64, 128, 256, 512)
OUT_PIXEL_DIST = 32
HAS_LAST_BLOCK = False
CONV_TYPE = ConvType.HYPERCUBE
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
assert (self.BLOCK is not None)
assert (self.OUT_PIXEL_DIST > 0)
super(ResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
self.network_initialization(in_channels, out_channels, config, D)
self.weight_initialization()
def network_initialization(self, in_channels, out_channels, config, D):
def space_n_time_m(n, m):
return (n if (D == 3) else [n, n, n, m])
if (D == 4):
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
dilations = config.dilations
bn_momentum = config.opt.bn_momentum
self.inplanes = self.INIT_DIM
self.conv1 = conv(in_channels, self.inplanes, kernel_size=space_n_time_m(config.conv1_kernel_size, 1), stride=1, D=D)
self.bn1 = get_norm(NormType.BATCH_NORM, self.inplanes, D=self.D, bn_momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.pool = sum_pool(kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D)
self.layer1 = self._make_layer(self.BLOCK, self.PLANES[0], self.LAYERS[0], stride=space_n_time_m(2, 1), dilation=space_n_time_m(dilations[0], 1))
self.layer2 = self._make_layer(self.BLOCK, self.PLANES[1], self.LAYERS[1], stride=space_n_time_m(2, 1), dilation=space_n_time_m(dilations[1], 1))
self.layer3 = self._make_layer(self.BLOCK, self.PLANES[2], self.LAYERS[2], stride=space_n_time_m(2, 1), dilation=space_n_time_m(dilations[2], 1))
self.layer4 = self._make_layer(self.BLOCK, self.PLANES[3], self.LAYERS[3], stride=space_n_time_m(2, 1), dilation=space_n_time_m(dilations[3], 1))
self.final = conv((self.PLANES[3] * self.BLOCK.expansion), out_channels, kernel_size=1, bias=True, D=D)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_type=NormType.BATCH_NORM, bn_momentum=0.1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False, D=self.D), get_norm(norm_type, (planes * block.expansion), D=self.D, bn_momentum=bn_momentum))
layers = []
layers.append(block(self.inplanes, planes, stride=stride, dilation=dilation, downsample=downsample, conv_type=self.CONV_TYPE, D=self.D))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, stride=1, dilation=dilation, conv_type=self.CONV_TYPE, D=self.D))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.final(x)
return x
|
def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0, conv_type='subm', norm_fn=None):
if (conv_type == 'subm'):
conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key)
elif (conv_type == 'spconv'):
conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False, indice_key=indice_key)
elif (conv_type == 'inverseconv'):
conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False)
elif (conv_type == 'transposeconv'):
conv = spconv.SparseConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False, indice_key=indice_key)
else:
raise NotImplementedError
m = spconv.SparseSequential(conv, norm_fn(out_channels), nn.ReLU())
return m
|
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, norm_fn=None, downsample=None, indice_key=None):
super(SparseBasicBlock, self).__init__()
assert (norm_fn is not None)
bias = (norm_fn is not None)
self.conv1 = spconv.SubMConv3d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out.features = self.bn1(out.features)
out.features = self.relu(out.features)
out = self.conv2(out)
out.features = self.bn2(out.features)
if (self.downsample is not None):
identity = self.downsample(x)
out.features += identity.features
out.features = self.relu(out.features)
return out
|
class VoxelBackBone8x(nn.Module):
def __init__(self, input_channels, grid_size, **kwargs):
super().__init__()
norm_fn = partial(nn.BatchNorm1d, eps=0.001, momentum=0.01)
self.sparse_shape = (grid_size[::(- 1)] + [1, 0, 0])
self.conv_input = spconv.SparseSequential(spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'), norm_fn(16), nn.ReLU())
block = post_act_block
self.conv1 = spconv.SparseSequential(block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'))
self.conv2 = spconv.SparseSequential(block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'), block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'), block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'))
self.conv3 = spconv.SparseSequential(block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'), block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'), block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'))
self.conv4 = spconv.SparseSequential(block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'), block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'), block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'))
last_pad = 0
self.conv_out = spconv.SparseSequential(spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad, bias=False, indice_key='spconv_down2'), norm_fn(128), nn.ReLU())
self.num_point_features = 128
self.backbone_channels = {'x_conv1': 16, 'x_conv2': 32, 'x_conv3': 64, 'x_conv4': 64}
def forward(self, input_sp_tensor):
'\n Args:\n batch_dict:\n batch_size: int\n vfe_features: (num_voxels, C)\n voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]\n Returns:\n batch_dict:\n encoded_spconv_tensor: sparse tensor\n '
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
out = self.conv_out(x_conv4)
return out
|
class VoxelResBackBone8x(nn.Module):
def __init__(self, input_channels, grid_size, **kwargs):
super().__init__()
norm_fn = partial(nn.BatchNorm1d, eps=0.001, momentum=0.01)
self.sparse_shape = (grid_size[::(- 1)] + [1, 0, 0])
self.conv_input = spconv.SparseSequential(spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'), norm_fn(16), nn.ReLU())
block = post_act_block
self.conv1 = spconv.SparseSequential(SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'), SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'))
self.conv2 = spconv.SparseSequential(block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'), SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'), SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'))
self.conv3 = spconv.SparseSequential(block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'), SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'), SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'))
self.conv4 = spconv.SparseSequential(block(64, 128, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'), SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'), SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'))
last_pad = 0
self.conv_out = spconv.SparseSequential(spconv.SparseConv3d(128, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad, bias=False, indice_key='spconv_down2'), norm_fn(128), nn.ReLU())
self.num_point_features = 128
self.backbone_channels = {'x_conv1': 16, 'x_conv2': 32, 'x_conv3': 64, 'x_conv4': 128}
def forward(self, batch_dict):
'\n Args:\n batch_dict:\n batch_size: int\n vfe_features: (num_voxels, C)\n voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]\n Returns:\n batch_dict:\n encoded_spconv_tensor: sparse tensor\n '
(voxel_features, voxel_coords) = (batch_dict['voxel_features'], batch_dict['voxel_coords'])
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(features=voxel_features, indices=voxel_coords.int(), spatial_shape=self.sparse_shape, batch_size=batch_size)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
out = self.conv_out(x_conv4)
batch_dict.update({'encoded_spconv_tensor': out, 'encoded_spconv_tensor_stride': 8})
batch_dict.update({'multi_scale_3d_features': {'x_conv1': x_conv1, 'x_conv2': x_conv2, 'x_conv3': x_conv3, 'x_conv4': x_conv4}})
return batch_dict
|
class HeightCompression(nn.Module):
def __init__(self, **kwargs):
super().__init__()
def forward(self, encoded_spconv_tensor):
'\n Args:\n batch_dict:\n encoded_spconv_tensor: sparse tensor\n Returns:\n batch_dict:\n spatial_features:\n\n '
spatial_features = encoded_spconv_tensor.dense()
(N, C, D, H, W) = spatial_features.shape
spatial_features = spatial_features.view(N, (C * D), H, W)
return spatial_features
|
class VoxelNet(VoxelBackBone8x):
def __init__(self, in_channels, out_channels, config, D=3):
self.bev_stride = 8
voxel_size = [0.1, 0.1, 0.2]
point_cloud_range = np.array([(- 51.2), (- 51.2), (- 5.0), 51.2, 51.2, 3.0], dtype=np.float32)
self.grid_size = ((point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size).astype(int)[::(- 1)]
self.bach_size = config['batch_size']
super().__init__(in_channels, self.grid_size)
self.final = spconv.SparseConv3d(128, (out_channels // 1), 1, stride=1, padding=0, bias=False, indice_key='final')
self.height_compression = HeightCompression()
def forward(self, voxels, coordinates):
sp_tensor = spconv.SparseConvTensor(features=voxels, indices=coordinates, spatial_shape=self.grid_size, batch_size=self.bach_size)
sp_tensor = super(VoxelNet, self).forward(sp_tensor)
sp_tensor = self.final(sp_tensor)
sp_tensor = self.height_compression(sp_tensor)
return sp_tensor
|
def main():
'\n Code for launching the pretraining\n '
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default='config/slidr_minkunet.yaml', help='specify the config for training')
parser.add_argument('--resume_path', type=str, default=None, help='provide a path to resume an incomplete training')
args = parser.parse_args()
config = generate_config(args.cfg_file)
if args.resume_path:
config['resume_path'] = args.resume_path
if (os.environ.get('LOCAL_RANK', 0) == 0):
print(('\n' + '\n'.join(list(map((lambda x: f'{x[0]:20}: {x[1]}'), config.items())))))
dm = PretrainDataModule(config)
(model_points, model_images, model_fusion) = make_model(config)
if (config['num_gpus'] > 1):
model_images = nn.SyncBatchNorm.convert_sync_batchnorm(model_images)
model_points = model_points
model_fusion = nn.SyncBatchNorm.convert_sync_batchnorm(model_fusion)
if (config['model_points'] == 'minkunet'):
module = LightningPretrain(model_points, model_images, model_fusion, config)
elif (config['model_points'] == 'voxelnet'):
module = LightningPretrainSpconv(model_points, model_images, config)
path = os.path.join(config['working_dir'], config['datetime'])
trainer = pl.Trainer(gpus=config['num_gpus'], accelerator='ddp', default_root_dir=path, checkpoint_callback=True, max_epochs=config['num_epochs'], plugins=DDPPlugin(find_unused_parameters=True), num_sanity_val_steps=0, resume_from_checkpoint=config['resume_path'], check_val_every_n_epoch=10)
print('Starting the training')
trainer.fit(module, dm)
|
class NCELoss(nn.Module):
'\n Compute the PointInfoNCE loss\n '
def __init__(self, temperature):
super(NCELoss, self).__init__()
self.temperature = temperature
self.criterion = nn.CrossEntropyLoss()
def forward(self, k, q):
logits = torch.mm(k, q.transpose(1, 0))
target = torch.arange(k.shape[0], device=k.device).long()
out = torch.div(logits, self.temperature)
out = out.contiguous()
import pdb
pdb.set_trace()
loss = self.criterion(out, target)
return loss
|
class semantic_NCELoss(nn.Module):
'\n Compute the PointInfoNCE loss\n '
def __init__(self, temperature):
super(semantic_NCELoss, self).__init__()
self.temperature = temperature
self.criterion = nn.CrossEntropyLoss()
def forward(self, k, q, pseudo_label):
logits = torch.mm(k, q.transpose(1, 0))
target = torch.arange(k.shape[0], device=k.device).long()
logits = torch.div(logits, self.temperature)
permute = pseudo_label.unsqueeze((- 1)).repeat(1, pseudo_label.shape[0])
mask = (permute == permute.permute(1, 0))
mask_diag = torch.diag_embed(torch.Tensor(([True] * pseudo_label.shape[0]))).to(k.device).bool()
mask = (mask & (~ mask_diag))
logits[mask] = 0
logits_sparse = logits.to_sparse()
logits_sparse = torch.sparse.log_softmax(logits_sparse, dim=1).to_dense()
loss = F.nll_loss(logits_sparse, target)
return loss
|
class DistillKL(nn.Module):
'Distilling the Knowledge in a Neural Network'
def __init__(self, T):
super(DistillKL, self).__init__()
self.T = T
def forward(self, y_s, y_t):
p_s = F.log_softmax((y_s / self.T), dim=1)
p_t = F.softmax((y_t / self.T), dim=1)
loss = ((F.kl_div(p_s, p_t, size_average=False) * (self.T ** 2)) / y_s.shape[0])
return loss
|
class CRDLoss(nn.Module):
"CRD Loss function\n includes two symmetric parts:\n (a) using teacher as anchor, choose positive and negatives over the student side\n (b) using student as anchor, choose positive and negatives over the teacher side\n Args:\n opt.s_dim: the dimension of student's feature\n opt.t_dim: the dimension of teacher's feature\n opt.feat_dim: the dimension of the projection space\n opt.nce_k: number of negatives paired with each positive\n opt.nce_t: the temperature\n opt.nce_m: the momentum for updating the memory buffer\n opt.n_data: the number of samples in the training set, therefor the memory buffer is: opt.n_data x opt.feat_dim\n "
def __init__(self, opt):
super(CRDLoss, self).__init__()
self.embed_s = Embed(opt.s_dim, opt.feat_dim)
self.embed_t = Embed(opt.t_dim, opt.feat_dim)
self.contrast = ContrastMemory(opt.feat_dim, opt.n_data, opt.nce_k, opt.nce_t, opt.nce_m)
self.criterion_t = ContrastLoss(opt.n_data)
self.criterion_s = ContrastLoss(opt.n_data)
def forward(self, f_s, f_t, idx, contrast_idx=None):
'\n Args:\n f_s: the feature of student network, size [batch_size, s_dim]\n f_t: the feature of teacher network, size [batch_size, t_dim]\n idx: the indices of these positive samples in the dataset, size [batch_size]\n contrast_idx: the indices of negative samples, size [batch_size, nce_k]\n Returns:\n The contrastive loss\n '
f_s = self.embed_s(f_s)
f_t = self.embed_t(f_t)
(out_s, out_t) = self.contrast(f_s, f_t, idx, contrast_idx)
s_loss = self.criterion_s(out_s)
t_loss = self.criterion_t(out_t)
loss = (s_loss + t_loss)
return loss
|
class ContrastLoss(nn.Module):
'\n contrastive loss, corresponding to Eq (18)\n '
def __init__(self, n_data):
super(ContrastLoss, self).__init__()
self.n_data = n_data
def forward(self, x):
bsz = x.shape[0]
m = (x.size(1) - 1)
Pn = (1 / float(self.n_data))
P_pos = x.select(1, 0)
log_D1 = torch.div(P_pos, P_pos.add(((m * Pn) + eps))).log_()
P_neg = x.narrow(1, 1, m)
log_D0 = torch.div(P_neg.clone().fill_((m * Pn)), P_neg.add(((m * Pn) + eps))).log_()
loss = ((- (log_D1.sum(0) + log_D0.view((- 1), 1).sum(0))) / bsz)
return loss
|
class Embed(nn.Module):
'Embedding module'
def __init__(self, dim_in=1024, dim_out=128):
super(Embed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], (- 1))
x = self.linear(x)
x = self.l2norm(x)
return x
|
class Normalize(nn.Module):
'normalization layer'
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow((1.0 / self.power))
out = x.div(norm)
return out
|
class ContrastMemory(nn.Module):
'\n memory buffer that supplies large amount of negative samples.\n '
def __init__(self, inputSize, outputSize, K, T=0.07, momentum=0.5):
super(ContrastMemory, self).__init__()
self.nLem = outputSize
self.unigrams = torch.ones(self.nLem)
self.multinomial = AliasMethod(self.unigrams)
self.multinomial.cuda()
self.K = K
self.register_buffer('params', torch.tensor([K, T, (- 1), (- 1), momentum]))
stdv = (1.0 / math.sqrt((inputSize / 3)))
self.register_buffer('memory_v1', torch.rand(outputSize, inputSize).mul_((2 * stdv)).add_((- stdv)))
self.register_buffer('memory_v2', torch.rand(outputSize, inputSize).mul_((2 * stdv)).add_((- stdv)))
def forward(self, v1, v2, y, idx=None):
K = int(self.params[0].item())
T = self.params[1].item()
Z_v1 = self.params[2].item()
Z_v2 = self.params[3].item()
momentum = self.params[4].item()
batchSize = v1.size(0)
outputSize = self.memory_v1.size(0)
inputSize = self.memory_v1.size(1)
if (idx is None):
idx = self.multinomial.draw((batchSize * (self.K + 1))).view(batchSize, (- 1))
idx.select(1, 0).copy_(y.data)
weight_v1 = torch.index_select(self.memory_v1, 0, idx.view((- 1))).detach()
weight_v1 = weight_v1.view(batchSize, (K + 1), inputSize)
out_v2 = torch.bmm(weight_v1, v2.view(batchSize, inputSize, 1))
out_v2 = torch.exp(torch.div(out_v2, T))
weight_v2 = torch.index_select(self.memory_v2, 0, idx.view((- 1))).detach()
weight_v2 = weight_v2.view(batchSize, (K + 1), inputSize)
out_v1 = torch.bmm(weight_v2, v1.view(batchSize, inputSize, 1))
out_v1 = torch.exp(torch.div(out_v1, T))
if (Z_v1 < 0):
self.params[2] = (out_v1.mean() * outputSize)
Z_v1 = self.params[2].clone().detach().item()
print('normalization constant Z_v1 is set to {:.1f}'.format(Z_v1))
if (Z_v2 < 0):
self.params[3] = (out_v2.mean() * outputSize)
Z_v2 = self.params[3].clone().detach().item()
print('normalization constant Z_v2 is set to {:.1f}'.format(Z_v2))
out_v1 = torch.div(out_v1, Z_v1).contiguous()
out_v2 = torch.div(out_v2, Z_v2).contiguous()
with torch.no_grad():
l_pos = torch.index_select(self.memory_v1, 0, y.view((- 1)))
l_pos.mul_(momentum)
l_pos.add_(torch.mul(v1, (1 - momentum)))
l_norm = l_pos.pow(2).sum(1, keepdim=True).pow(0.5)
updated_v1 = l_pos.div(l_norm)
self.memory_v1.index_copy_(0, y, updated_v1)
ab_pos = torch.index_select(self.memory_v2, 0, y.view((- 1)))
ab_pos.mul_(momentum)
ab_pos.add_(torch.mul(v2, (1 - momentum)))
ab_norm = ab_pos.pow(2).sum(1, keepdim=True).pow(0.5)
updated_v2 = ab_pos.div(ab_norm)
self.memory_v2.index_copy_(0, y, updated_v2)
return (out_v1, out_v2)
|
class AliasMethod(object):
'\n From: https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/\n '
def __init__(self, probs):
if (probs.sum() > 1):
probs.div_(probs.sum())
K = len(probs)
self.prob = torch.zeros(K)
self.alias = torch.LongTensor(([0] * K))
smaller = []
larger = []
for (kk, prob) in enumerate(probs):
self.prob[kk] = (K * prob)
if (self.prob[kk] < 1.0):
smaller.append(kk)
else:
larger.append(kk)
while ((len(smaller) > 0) and (len(larger) > 0)):
small = smaller.pop()
large = larger.pop()
self.alias[small] = large
self.prob[large] = ((self.prob[large] - 1.0) + self.prob[small])
if (self.prob[large] < 1.0):
smaller.append(large)
else:
larger.append(large)
for last_one in (smaller + larger):
self.prob[last_one] = 1
def cuda(self):
self.prob = self.prob.cuda()
self.alias = self.alias.cuda()
def draw(self, N):
' Draw N samples from multinomial '
K = self.alias.size(0)
kk = torch.zeros(N, dtype=torch.long, device=self.prob.device).random_(0, K)
prob = self.prob.index_select(0, kk)
alias = self.alias.index_select(0, kk)
b = torch.bernoulli(prob)
oq = kk.mul(b.long())
oj = alias.mul((1 - b).long())
return (oq + oj)
|
class PretrainDataModule(pl.LightningDataModule):
def __init__(self, config):
super().__init__()
self.config = config
if config['num_gpus']:
self.batch_size = (config['batch_size'] // config['num_gpus'])
else:
self.batch_size = config['batch_size']
def setup(self, stage):
cloud_transforms_train = make_transforms_clouds(self.config)
mixed_transforms_train = make_transforms_asymmetrical(self.config)
cloud_transforms_val = None
mixed_transforms_val = make_transforms_asymmetrical_val(self.config)
if ((self.config['dataset'].lower() == 'nuscenes') and (self.config['model_points'] == 'minkunet')):
Dataset = NuScenesMatchDataset
elif (self.config['dataset'].lower() == 'kitti'):
Dataset = KittiMatchDataset
elif (self.config['dataset'].lower() == 'scannet'):
Dataset = scannet_Dataset
elif ((self.config['dataset'].lower() == 'nuscenes') and (self.config['model_points'] == 'voxelnet')):
Dataset = NuScenesMatchDatasetSpconv
else:
raise Exception('Dataset Unknown')
if (self.config['training'] in ('parametrize', 'parametrizing')):
phase_train = 'parametrizing'
phase_val = 'verifying'
else:
phase_train = 'train'
phase_val = 'val'
self.train_dataset = Dataset(phase=phase_train, config=self.config, shuffle=True, cloud_transforms=cloud_transforms_train, mixed_transforms=mixed_transforms_train)
print('Dataset Loaded')
print('training size: ', len(self.train_dataset))
if (self.config['dataset'].lower() == 'nuscenes'):
self.val_dataset = Dataset(phase=phase_val, shuffle=False, cloud_transforms=cloud_transforms_val, mixed_transforms=mixed_transforms_val, config=self.config, cached_nuscenes=self.train_dataset.nusc)
else:
self.val_dataset = Dataset(phase=phase_val, shuffle=False, cloud_transforms=cloud_transforms_val, mixed_transforms=mixed_transforms_val, config=self.config)
print('validation size: ', len(self.val_dataset))
def train_dataloader(self):
if self.config['num_gpus']:
num_workers = (self.config['num_threads'] // self.config['num_gpus'])
else:
num_workers = self.config['num_threads']
if (self.config['dataset'].lower() == 'nuscenes'):
default_collate_pair_fn = minkunet_collate_pair_fn
elif (self.config['dataset'].lower() == 'kitti'):
default_collate_pair_fn = kitti_collate_pair_fn
elif (self.config['dataset'].lower() == 'scannet'):
default_collate_pair_fn = scannet_collate_pair_fn
return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=num_workers, collate_fn=default_collate_pair_fn, pin_memory=True, drop_last=True, worker_init_fn=(lambda id: np.random.seed(((torch.initial_seed() // (2 ** 32)) + id))))
def val_dataloader(self):
if self.config['num_gpus']:
num_workers = (self.config['num_threads'] // self.config['num_gpus'])
else:
num_workers = self.config['num_threads']
if (self.config['dataset'].lower() == 'nuscenes'):
default_collate_pair_fn = minkunet_collate_pair_fn
elif (self.config['dataset'].lower() == 'kitti'):
default_collate_pair_fn = kitti_collate_pair_fn
elif (self.config['dataset'].lower() == 'scannet'):
default_collate_pair_fn = scannet_collate_pair_fn
return DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=False, num_workers=num_workers, collate_fn=default_collate_pair_fn, pin_memory=True, drop_last=False, worker_init_fn=(lambda id: np.random.seed(((torch.initial_seed() // (2 ** 32)) + id))))
|
def forgiving_state_restore(net, loaded_dict):
"\n Handle partial loading when some tensors don't match up in size.\n Because we want to use models that were trained off a different\n number of classes.\n "
loaded_dict = {k.replace('module.', ''): v for (k, v) in loaded_dict.items()}
net_state_dict = net.state_dict()
new_loaded_dict = {}
for k in net_state_dict:
new_k = k
if ((new_k in loaded_dict) and (net_state_dict[k].size() == loaded_dict[new_k].size())):
new_loaded_dict[k] = loaded_dict[new_k]
else:
print('Skipped loading parameter {}'.format(k))
net_state_dict.update(new_loaded_dict)
net.load_state_dict(net_state_dict)
return net
|
def make_model(config):
'\n Build points and image models according to what is in the config\n '
model_fusion = fusionNet(config)
if (config['dataset'] == 'scannet'):
model_points = MinkUNet(3, config['model_n_out'], config)
else:
model_points = SPVCNN(1, config['model_n_out'], config)
if (config['images_encoder'].find('vit_') != (- 1)):
model_images = DinoVitFeatureExtractor(config, preprocessing=Preprocessing())
elif (config['images_encoder'] == 'maskclip'):
model_images = maskClipFeatureExtractor(config, preprocessing=Preprocessing())
elif (config['decoder'] == 'dilation'):
model_images = DilationFeatureExtractor(config, preprocessing=Preprocessing())
elif (config['decoder'] == 'ppkt'):
model_images = PPKTFeatureExtractor(config, preprocessing=Preprocessing())
else:
raise Exception(f"Model not found: {config['decoder']}")
return (model_points, model_images, model_fusion)
|
def _lookup_type(type_str):
if (type_str not in _data_type_reverse):
try:
type_str = _data_types[type_str]
except KeyError:
raise ValueError(('field type %r not in %r' % (type_str, _types_list)))
return _data_type_reverse[type_str]
|
def _split_line(line, n):
fields = line.split(None, n)
if (len(fields) == n):
fields.append('')
assert (len(fields) == (n + 1))
return fields
|
def make2d(array, cols=None, dtype=None):
"\n Make a 2D array from an array of arrays. The `cols' and `dtype'\n arguments can be omitted if the array is not empty.\n\n "
if (((cols is None) or (dtype is None)) and (not len(array))):
raise RuntimeError('cols and dtype must be specified for empty array')
if (cols is None):
cols = len(array[0])
if (dtype is None):
dtype = array[0].dtype
return _np.fromiter(array, [('_', dtype, (cols,))], count=len(array))['_']
|
class PlyParseError(Exception):
"\n Raised when a PLY file cannot be parsed.\n\n The attributes `element', `row', `property', and `message' give\n additional information.\n\n "
def __init__(self, message, element=None, row=None, prop=None):
self.message = message
self.element = element
self.row = row
self.prop = prop
s = ''
if self.element:
s += ('element %r: ' % self.element.name)
if (self.row is not None):
s += ('row %d: ' % self.row)
if self.prop:
s += ('property %r: ' % self.prop.name)
s += self.message
Exception.__init__(self, s)
def __repr__(self):
return (('PlyParseError(%r, element=%r, row=%r, prop=%r)' % self.message), self.element, self.row, self.prop)
|
class PlyData(object):
'\n PLY file header and data.\n\n A PlyData instance is created in one of two ways: by the static\n method PlyData.read (to read a PLY file), or directly from __init__\n given a sequence of elements (which can then be written to a PLY\n file).\n\n '
def __init__(self, elements=[], text=False, byte_order='=', comments=[], obj_info=[]):
'\n elements: sequence of PlyElement instances.\n\n text: whether the resulting PLY file will be text (True) or\n binary (False).\n\n byte_order: \'<\' for little-endian, \'>\' for big-endian, or \'=\'\n for native. This is only relevant if `text\' is False.\n\n comments: sequence of strings that will be placed in the header\n between the \'ply\' and \'format ...\' lines.\n\n obj_info: like comments, but will be placed in the header with\n "obj_info ..." instead of "comment ...".\n\n '
if ((byte_order == '=') and (not text)):
byte_order = _native_byte_order
self.byte_order = byte_order
self.text = text
self.comments = list(comments)
self.obj_info = list(obj_info)
self.elements = elements
def _get_elements(self):
return self._elements
def _set_elements(self, elements):
self._elements = tuple(elements)
self._index()
elements = property(_get_elements, _set_elements)
def _get_byte_order(self):
return self._byte_order
def _set_byte_order(self, byte_order):
if (byte_order not in ['<', '>', '=']):
raise ValueError("byte order must be '<', '>', or '='")
self._byte_order = byte_order
byte_order = property(_get_byte_order, _set_byte_order)
def _index(self):
self._element_lookup = dict(((elt.name, elt) for elt in self._elements))
if (len(self._element_lookup) != len(self._elements)):
raise ValueError('two elements with same name')
@staticmethod
def _parse_header(stream):
'\n Parse a PLY header from a readable file-like stream.\n\n '
lines = []
comments = {'comment': [], 'obj_info': []}
while True:
line = stream.readline().decode('ascii').strip()
fields = _split_line(line, 1)
if (fields[0] == 'end_header'):
break
elif (fields[0] in comments.keys()):
lines.append(fields)
else:
lines.append(line.split())
a = 0
if (lines[a] != ['ply']):
raise PlyParseError("expected 'ply'")
a += 1
while (lines[a][0] in comments.keys()):
comments[lines[a][0]].append(lines[a][1])
a += 1
if (lines[a][0] != 'format'):
raise PlyParseError("expected 'format'")
if (lines[a][2] != '1.0'):
raise PlyParseError("expected version '1.0'")
if (len(lines[a]) != 3):
raise PlyParseError("too many fields after 'format'")
fmt = lines[a][1]
if (fmt not in _byte_order_map):
raise PlyParseError(("don't understand format %r" % fmt))
byte_order = _byte_order_map[fmt]
text = (fmt == 'ascii')
a += 1
while ((a < len(lines)) and (lines[a][0] in comments.keys())):
comments[lines[a][0]].append(lines[a][1])
a += 1
return PlyData(PlyElement._parse_multi(lines[a:]), text, byte_order, comments['comment'], comments['obj_info'])
@staticmethod
def read(stream):
'\n Read PLY data from a readable file-like object or filename.\n\n '
(must_close, stream) = _open_stream(stream, 'read')
try:
data = PlyData._parse_header(stream)
for elt in data:
elt._read(stream, data.text, data.byte_order)
finally:
if must_close:
stream.close()
return data
def write(self, stream):
'\n Write PLY data to a writeable file-like object or filename.\n\n '
(must_close, stream) = _open_stream(stream, 'write')
try:
stream.write(self.header.encode('ascii'))
stream.write(b'\r\n')
for elt in self:
elt._write(stream, self.text, self.byte_order)
finally:
if must_close:
stream.close()
@property
def header(self):
'\n Provide PLY-formatted metadata for the instance.\n\n '
lines = ['ply']
if self.text:
lines.append('format ascii 1.0')
else:
lines.append((('format ' + _byte_order_reverse[self.byte_order]) + ' 1.0'))
for c in self.comments:
lines.append(('comment ' + c))
for c in self.obj_info:
lines.append(('obj_info ' + c))
lines.extend((elt.header for elt in self.elements))
lines.append('end_header')
return '\r\n'.join(lines)
def __iter__(self):
return iter(self.elements)
def __len__(self):
return len(self.elements)
def __contains__(self, name):
return (name in self._element_lookup)
def __getitem__(self, name):
return self._element_lookup[name]
def __str__(self):
return self.header
def __repr__(self):
return ('PlyData(%r, text=%r, byte_order=%r, comments=%r, obj_info=%r)' % (self.elements, self.text, self.byte_order, self.comments, self.obj_info))
|
def _open_stream(stream, read_or_write):
if hasattr(stream, read_or_write):
return (False, stream)
try:
return (True, open(stream, (read_or_write[0] + 'b')))
except TypeError:
raise RuntimeError('expected open file or filename')
|
class PlyElement(object):
"\n PLY file element.\n\n A client of this library doesn't normally need to instantiate this\n directly, so the following is only for the sake of documenting the\n internals.\n\n Creating a PlyElement instance is generally done in one of two ways:\n as a byproduct of PlyData.read (when reading a PLY file) and by\n PlyElement.describe (before writing a PLY file).\n\n "
def __init__(self, name, properties, count, comments=[]):
'\n This is not part of the public interface. The preferred methods\n of obtaining PlyElement instances are PlyData.read (to read from\n a file) and PlyElement.describe (to construct from a numpy\n array).\n\n '
self._name = str(name)
self._check_name()
self._count = count
self._properties = tuple(properties)
self._index()
self.comments = list(comments)
self._have_list = any((isinstance(p, PlyListProperty) for p in self.properties))
@property
def count(self):
return self._count
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
self._count = len(data)
self._check_sanity()
data = property(_get_data, _set_data)
def _check_sanity(self):
for prop in self.properties:
if (prop.name not in self._data.dtype.fields):
raise ValueError(('dangling property %r' % prop.name))
def _get_properties(self):
return self._properties
def _set_properties(self, properties):
self._properties = tuple(properties)
self._check_sanity()
self._index()
properties = property(_get_properties, _set_properties)
def _index(self):
self._property_lookup = dict(((prop.name, prop) for prop in self._properties))
if (len(self._property_lookup) != len(self._properties)):
raise ValueError('two properties with same name')
def ply_property(self, name):
return self._property_lookup[name]
@property
def name(self):
return self._name
def _check_name(self):
if any((c.isspace() for c in self._name)):
msg = ('element name %r contains spaces' % self._name)
raise ValueError(msg)
def dtype(self, byte_order='='):
'\n Return the numpy dtype of the in-memory representation of the\n data. (If there are no list properties, and the PLY format is\n binary, then this also accurately describes the on-disk\n representation of the element.)\n\n '
return [(prop.name, prop.dtype(byte_order)) for prop in self.properties]
@staticmethod
def _parse_multi(header_lines):
'\n Parse a list of PLY element definitions.\n\n '
elements = []
while header_lines:
(elt, header_lines) = PlyElement._parse_one(header_lines)
elements.append(elt)
return elements
@staticmethod
def _parse_one(lines):
'\n Consume one element definition. The unconsumed input is\n returned along with a PlyElement instance.\n\n '
a = 0
line = lines[a]
if (line[0] != 'element'):
raise PlyParseError("expected 'element'")
if (len(line) > 3):
raise PlyParseError("too many fields after 'element'")
if (len(line) < 3):
raise PlyParseError("too few fields after 'element'")
(name, count) = (line[1], int(line[2]))
comments = []
properties = []
while True:
a += 1
if (a >= len(lines)):
break
if (lines[a][0] == 'comment'):
comments.append(lines[a][1])
elif (lines[a][0] == 'property'):
properties.append(PlyProperty._parse_one(lines[a]))
else:
break
return (PlyElement(name, properties, count, comments), lines[a:])
@staticmethod
def describe(data, name, len_types={}, val_types={}, comments=[]):
"\n Construct a PlyElement from an array's metadata.\n\n len_types and val_types can be given as mappings from list\n property names to type strings (like 'u1', 'f4', etc., or\n 'int8', 'float32', etc.). These can be used to define the length\n and value types of list properties. List property lengths\n always default to type 'u1' (8-bit unsigned integer), and value\n types default to 'i4' (32-bit integer).\n\n "
if (not isinstance(data, _np.ndarray)):
raise TypeError('only numpy arrays are supported')
if (len(data.shape) != 1):
raise ValueError('only one-dimensional arrays are supported')
count = len(data)
properties = []
descr = data.dtype.descr
for t in descr:
if (not isinstance(t[1], str)):
raise ValueError('nested records not supported')
if (not t[0]):
raise ValueError('field with empty name')
if ((len(t) != 2) or (t[1][1] == 'O')):
if (t[1][1] == 'O'):
if (len(t) != 2):
raise ValueError('non-scalar object fields not supported')
len_str = _data_type_reverse[len_types.get(t[0], 'u1')]
if (t[1][1] == 'O'):
val_type = val_types.get(t[0], 'i4')
val_str = _lookup_type(val_type)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyListProperty(t[0], len_str, val_str)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyProperty(t[0], val_str)
properties.append(prop)
elt = PlyElement(name, properties, count, comments)
elt.data = data
return elt
def _read(self, stream, text, byte_order):
'\n Read the actual data from a PLY file.\n\n '
if text:
self._read_txt(stream)
elif self._have_list:
self._read_bin(stream, byte_order)
else:
self._data = _np.fromfile(stream, self.dtype(byte_order), self.count)
if (len(self._data) < self.count):
k = len(self._data)
del self._data
raise PlyParseError('early end-of-file', self, k)
self._check_sanity()
def _write(self, stream, text, byte_order):
'\n Write the data to a PLY file.\n\n '
if text:
self._write_txt(stream)
elif self._have_list:
self._write_bin(stream, byte_order)
else:
self.data.astype(self.dtype(byte_order), copy=False).tofile(stream)
def _read_txt(self, stream):
'\n Load a PLY element from an ASCII-format PLY file. The element\n may contain list properties.\n\n '
self._data = _np.empty(self.count, dtype=self.dtype())
k = 0
for line in _islice(iter(stream.readline, b''), self.count):
fields = iter(line.strip().split())
for prop in self.properties:
try:
self._data[prop.name][k] = prop._from_fields(fields)
except StopIteration:
raise PlyParseError('early end-of-line', self, k, prop)
except ValueError:
raise PlyParseError('malformed input', self, k, prop)
try:
next(fields)
except StopIteration:
pass
else:
raise PlyParseError('expected end-of-line', self, k)
k += 1
if (k < self.count):
del self._data
raise PlyParseError('early end-of-file', self, k)
def _write_txt(self, stream):
'\n Save a PLY element to an ASCII-format PLY file. The element may\n contain list properties.\n\n '
for rec in self.data:
fields = []
for prop in self.properties:
fields.extend(prop._to_fields(rec[prop.name]))
_np.savetxt(stream, [fields], '%.18g', newline='\r\n')
def _read_bin(self, stream, byte_order):
'\n Load a PLY element from a binary PLY file. The element may\n contain list properties.\n\n '
self._data = _np.empty(self.count, dtype=self.dtype(byte_order))
for k in _range(self.count):
for prop in self.properties:
try:
self._data[prop.name][k] = prop._read_bin(stream, byte_order)
except StopIteration:
raise PlyParseError('early end-of-file', self, k, prop)
def _write_bin(self, stream, byte_order):
'\n Save a PLY element to a binary PLY file. The element may\n contain list properties.\n\n '
for rec in self.data:
for prop in self.properties:
prop._write_bin(rec[prop.name], stream, byte_order)
@property
def header(self):
"\n Format this element's metadata as it would appear in a PLY\n header.\n\n "
lines = [('element %s %d' % (self.name, self.count))]
for c in self.comments:
lines.append(('comment ' + c))
lines.extend(list(map(str, self.properties)))
return '\r\n'.join(lines)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __str__(self):
return self.header
def __repr__(self):
return ('PlyElement(%r, %r, count=%d, comments=%r)' % (self.name, self.properties, self.count, self.comments))
|
class PlyProperty(object):
'\n PLY property description. This class is pure metadata; the data\n itself is contained in PlyElement instances.\n\n '
def __init__(self, name, val_dtype):
self._name = str(name)
self._check_name()
self.val_dtype = val_dtype
def _get_val_dtype(self):
return self._val_dtype
def _set_val_dtype(self, val_dtype):
self._val_dtype = _data_types[_lookup_type(val_dtype)]
val_dtype = property(_get_val_dtype, _set_val_dtype)
@property
def name(self):
return self._name
def _check_name(self):
if any((c.isspace() for c in self._name)):
msg = ('Error: property name %r contains spaces' % self._name)
raise RuntimeError(msg)
@staticmethod
def _parse_one(line):
assert (line[0] == 'property')
if (line[1] == 'list'):
if (len(line) > 5):
raise PlyParseError("too many fields after 'property list'")
if (len(line) < 5):
raise PlyParseError("too few fields after 'property list'")
return PlyListProperty(line[4], line[2], line[3])
else:
if (len(line) > 3):
raise PlyParseError("too many fields after 'property'")
if (len(line) < 3):
raise PlyParseError("too few fields after 'property'")
return PlyProperty(line[2], line[1])
def dtype(self, byte_order='='):
'\n Return the numpy dtype description for this property (as a tuple\n of strings).\n\n '
return (byte_order + self.val_dtype)
def _from_fields(self, fields):
'\n Parse from generator. Raise StopIteration if the property could\n not be read.\n\n '
return _np.dtype(self.dtype()).type(next(fields))
def _to_fields(self, data):
'\n Return generator over one item.\n\n '
(yield _np.dtype(self.dtype()).type(data))
def _read_bin(self, stream, byte_order):
'\n Read data from a binary stream. Raise StopIteration if the\n property could not be read.\n\n '
try:
return _np.fromfile(stream, self.dtype(byte_order), 1)[0]
except IndexError:
raise StopIteration
def _write_bin(self, data, stream, byte_order):
'\n Write data to a binary stream.\n\n '
_np.dtype(self.dtype(byte_order)).type(data).tofile(stream)
def __str__(self):
val_str = _data_type_reverse[self.val_dtype]
return ('property %s %s' % (val_str, self.name))
def __repr__(self):
return ('PlyProperty(%r, %r)' % (self.name, _lookup_type(self.val_dtype)))
|
class PlyListProperty(PlyProperty):
'\n PLY list property description.\n\n '
def __init__(self, name, len_dtype, val_dtype):
PlyProperty.__init__(self, name, val_dtype)
self.len_dtype = len_dtype
def _get_len_dtype(self):
return self._len_dtype
def _set_len_dtype(self, len_dtype):
self._len_dtype = _data_types[_lookup_type(len_dtype)]
len_dtype = property(_get_len_dtype, _set_len_dtype)
def dtype(self, byte_order='='):
'\n List properties always have a numpy dtype of "object".\n\n '
return '|O'
def list_dtype(self, byte_order='='):
'\n Return the pair (len_dtype, val_dtype) (both numpy-friendly\n strings).\n\n '
return ((byte_order + self.len_dtype), (byte_order + self.val_dtype))
def _from_fields(self, fields):
(len_t, val_t) = self.list_dtype()
n = int(_np.dtype(len_t).type(next(fields)))
data = _np.loadtxt(list(_islice(fields, n)), val_t, ndmin=1)
if (len(data) < n):
raise StopIteration
return data
def _to_fields(self, data):
'\n Return generator over the (numerical) PLY representation of the\n list data (length followed by actual data).\n\n '
(len_t, val_t) = self.list_dtype()
data = _np.asarray(data, dtype=val_t).ravel()
(yield _np.dtype(len_t).type(data.size))
for x in data:
(yield x)
def _read_bin(self, stream, byte_order):
(len_t, val_t) = self.list_dtype(byte_order)
try:
n = _np.fromfile(stream, len_t, 1)[0]
except IndexError:
raise StopIteration
data = _np.fromfile(stream, val_t, n)
if (len(data) < n):
raise StopIteration
return data
def _write_bin(self, data, stream, byte_order):
'\n Write data to a binary stream.\n\n '
(len_t, val_t) = self.list_dtype(byte_order)
data = _np.asarray(data, dtype=val_t).ravel()
_np.array(data.size, dtype=len_t).tofile(stream)
data.tofile(stream)
def __str__(self):
len_str = _data_type_reverse[self.len_dtype]
val_str = _data_type_reverse[self.val_dtype]
return ('property list %s %s %s' % (len_str, val_str, self.name))
def __repr__(self):
return ('PlyListProperty(%r, %r, %r)' % (self.name, _lookup_type(self.len_dtype), _lookup_type(self.val_dtype)))
|
def compute_slic(cam_token):
cam = nusc.get('sample_data', cam_token)
im = Image.open(os.path.join(nusc.dataroot, cam['filename']))
segments_slic = slic(im, n_segments=150, compactness=6, sigma=3.0, start_label=0).astype(np.uint8)
im = Image.fromarray(segments_slic)
im.save((('./superpixels/nuscenes/superpixels_slic/' + cam['token']) + '.png'))
|
def compute_slic_30(cam_token):
cam = nusc.get('sample_data', cam_token)
im = Image.open(os.path.join(nusc.dataroot, cam['filename']))
segments_slic = slic(im, n_segments=30, compactness=6, sigma=3.0, start_label=0).astype(np.uint8)
im = Image.fromarray(segments_slic)
im.save((('./superpixels/nuscenes/superpixels_slic_30/' + cam['token']) + '.png'))
|
def confusion_matrix(preds, labels, num_classes):
hist = torch.bincount(((num_classes * labels) + preds), minlength=(num_classes ** 2)).reshape(num_classes, num_classes).float()
return hist
|
def compute_IoU_from_cmatrix(hist, ignore_index=None):
'Computes the Intersection over Union (IoU).\n Args:\n hist: confusion matrix.\n Returns:\n m_IoU, fw_IoU, and matrix IoU\n '
if (ignore_index is not None):
hist[ignore_index] = 0.0
intersection = torch.diag(hist)
union = ((hist.sum(dim=1) + hist.sum(dim=0)) - intersection)
IoU = (intersection.float() / union.float())
IoU[(union == 0)] = 1.0
if (ignore_index is not None):
IoU = torch.cat((IoU[:ignore_index], IoU[(ignore_index + 1):]))
m_IoU = torch.mean(IoU).item()
fw_IoU = (torch.sum(intersection) / ((2 * torch.sum(hist)) - torch.sum(intersection))).item()
return (m_IoU, fw_IoU, IoU)
|
def compute_IoU(preds, labels, num_classes, ignore_index=None):
'Computes the Intersection over Union (IoU).'
hist = confusion_matrix(preds, labels, num_classes)
return compute_IoU_from_cmatrix(hist, ignore_index)
|
def _lookup_type(type_str):
if (type_str not in _data_type_reverse):
try:
type_str = _data_types[type_str]
except KeyError:
raise ValueError(('field type %r not in %r' % (type_str, _types_list)))
return _data_type_reverse[type_str]
|
def _split_line(line, n):
fields = line.split(None, n)
if (len(fields) == n):
fields.append('')
assert (len(fields) == (n + 1))
return fields
|
def make2d(array, cols=None, dtype=None):
"\n Make a 2D array from an array of arrays. The `cols' and `dtype'\n arguments can be omitted if the array is not empty.\n\n "
if (((cols is None) or (dtype is None)) and (not len(array))):
raise RuntimeError('cols and dtype must be specified for empty array')
if (cols is None):
cols = len(array[0])
if (dtype is None):
dtype = array[0].dtype
return _np.fromiter(array, [('_', dtype, (cols,))], count=len(array))['_']
|
class PlyParseError(Exception):
"\n Raised when a PLY file cannot be parsed.\n\n The attributes `element', `row', `property', and `message' give\n additional information.\n\n "
def __init__(self, message, element=None, row=None, prop=None):
self.message = message
self.element = element
self.row = row
self.prop = prop
s = ''
if self.element:
s += ('element %r: ' % self.element.name)
if (self.row is not None):
s += ('row %d: ' % self.row)
if self.prop:
s += ('property %r: ' % self.prop.name)
s += self.message
Exception.__init__(self, s)
def __repr__(self):
return (('PlyParseError(%r, element=%r, row=%r, prop=%r)' % self.message), self.element, self.row, self.prop)
|
class PlyData(object):
'\n PLY file header and data.\n\n A PlyData instance is created in one of two ways: by the static\n method PlyData.read (to read a PLY file), or directly from __init__\n given a sequence of elements (which can then be written to a PLY\n file).\n\n '
def __init__(self, elements=[], text=False, byte_order='=', comments=[], obj_info=[]):
'\n elements: sequence of PlyElement instances.\n\n text: whether the resulting PLY file will be text (True) or\n binary (False).\n\n byte_order: \'<\' for little-endian, \'>\' for big-endian, or \'=\'\n for native. This is only relevant if `text\' is False.\n\n comments: sequence of strings that will be placed in the header\n between the \'ply\' and \'format ...\' lines.\n\n obj_info: like comments, but will be placed in the header with\n "obj_info ..." instead of "comment ...".\n\n '
if ((byte_order == '=') and (not text)):
byte_order = _native_byte_order
self.byte_order = byte_order
self.text = text
self.comments = list(comments)
self.obj_info = list(obj_info)
self.elements = elements
def _get_elements(self):
return self._elements
def _set_elements(self, elements):
self._elements = tuple(elements)
self._index()
elements = property(_get_elements, _set_elements)
def _get_byte_order(self):
return self._byte_order
def _set_byte_order(self, byte_order):
if (byte_order not in ['<', '>', '=']):
raise ValueError("byte order must be '<', '>', or '='")
self._byte_order = byte_order
byte_order = property(_get_byte_order, _set_byte_order)
def _index(self):
self._element_lookup = dict(((elt.name, elt) for elt in self._elements))
if (len(self._element_lookup) != len(self._elements)):
raise ValueError('two elements with same name')
@staticmethod
def _parse_header(stream):
'\n Parse a PLY header from a readable file-like stream.\n\n '
lines = []
comments = {'comment': [], 'obj_info': []}
while True:
line = stream.readline().decode('ascii').strip()
fields = _split_line(line, 1)
if (fields[0] == 'end_header'):
break
elif (fields[0] in comments.keys()):
lines.append(fields)
else:
lines.append(line.split())
a = 0
if (lines[a] != ['ply']):
raise PlyParseError("expected 'ply'")
a += 1
while (lines[a][0] in comments.keys()):
comments[lines[a][0]].append(lines[a][1])
a += 1
if (lines[a][0] != 'format'):
raise PlyParseError("expected 'format'")
if (lines[a][2] != '1.0'):
raise PlyParseError("expected version '1.0'")
if (len(lines[a]) != 3):
raise PlyParseError("too many fields after 'format'")
fmt = lines[a][1]
if (fmt not in _byte_order_map):
raise PlyParseError(("don't understand format %r" % fmt))
byte_order = _byte_order_map[fmt]
text = (fmt == 'ascii')
a += 1
while ((a < len(lines)) and (lines[a][0] in comments.keys())):
comments[lines[a][0]].append(lines[a][1])
a += 1
return PlyData(PlyElement._parse_multi(lines[a:]), text, byte_order, comments['comment'], comments['obj_info'])
@staticmethod
def read(stream):
'\n Read PLY data from a readable file-like object or filename.\n\n '
(must_close, stream) = _open_stream(stream, 'read')
try:
data = PlyData._parse_header(stream)
for elt in data:
elt._read(stream, data.text, data.byte_order)
finally:
if must_close:
stream.close()
return data
def write(self, stream):
'\n Write PLY data to a writeable file-like object or filename.\n\n '
(must_close, stream) = _open_stream(stream, 'write')
try:
stream.write(self.header.encode('ascii'))
stream.write(b'\r\n')
for elt in self:
elt._write(stream, self.text, self.byte_order)
finally:
if must_close:
stream.close()
@property
def header(self):
'\n Provide PLY-formatted metadata for the instance.\n\n '
lines = ['ply']
if self.text:
lines.append('format ascii 1.0')
else:
lines.append((('format ' + _byte_order_reverse[self.byte_order]) + ' 1.0'))
for c in self.comments:
lines.append(('comment ' + c))
for c in self.obj_info:
lines.append(('obj_info ' + c))
lines.extend((elt.header for elt in self.elements))
lines.append('end_header')
return '\r\n'.join(lines)
def __iter__(self):
return iter(self.elements)
def __len__(self):
return len(self.elements)
def __contains__(self, name):
return (name in self._element_lookup)
def __getitem__(self, name):
return self._element_lookup[name]
def __str__(self):
return self.header
def __repr__(self):
return ('PlyData(%r, text=%r, byte_order=%r, comments=%r, obj_info=%r)' % (self.elements, self.text, self.byte_order, self.comments, self.obj_info))
|
def _open_stream(stream, read_or_write):
if hasattr(stream, read_or_write):
return (False, stream)
try:
return (True, open(stream, (read_or_write[0] + 'b')))
except TypeError:
raise RuntimeError('expected open file or filename')
|
class PlyElement(object):
"\n PLY file element.\n\n A client of this library doesn't normally need to instantiate this\n directly, so the following is only for the sake of documenting the\n internals.\n\n Creating a PlyElement instance is generally done in one of two ways:\n as a byproduct of PlyData.read (when reading a PLY file) and by\n PlyElement.describe (before writing a PLY file).\n\n "
def __init__(self, name, properties, count, comments=[]):
'\n This is not part of the public interface. The preferred methods\n of obtaining PlyElement instances are PlyData.read (to read from\n a file) and PlyElement.describe (to construct from a numpy\n array).\n\n '
self._name = str(name)
self._check_name()
self._count = count
self._properties = tuple(properties)
self._index()
self.comments = list(comments)
self._have_list = any((isinstance(p, PlyListProperty) for p in self.properties))
@property
def count(self):
return self._count
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
self._count = len(data)
self._check_sanity()
data = property(_get_data, _set_data)
def _check_sanity(self):
for prop in self.properties:
if (prop.name not in self._data.dtype.fields):
raise ValueError(('dangling property %r' % prop.name))
def _get_properties(self):
return self._properties
def _set_properties(self, properties):
self._properties = tuple(properties)
self._check_sanity()
self._index()
properties = property(_get_properties, _set_properties)
def _index(self):
self._property_lookup = dict(((prop.name, prop) for prop in self._properties))
if (len(self._property_lookup) != len(self._properties)):
raise ValueError('two properties with same name')
def ply_property(self, name):
return self._property_lookup[name]
@property
def name(self):
return self._name
def _check_name(self):
if any((c.isspace() for c in self._name)):
msg = ('element name %r contains spaces' % self._name)
raise ValueError(msg)
def dtype(self, byte_order='='):
'\n Return the numpy dtype of the in-memory representation of the\n data. (If there are no list properties, and the PLY format is\n binary, then this also accurately describes the on-disk\n representation of the element.)\n\n '
return [(prop.name, prop.dtype(byte_order)) for prop in self.properties]
@staticmethod
def _parse_multi(header_lines):
'\n Parse a list of PLY element definitions.\n\n '
elements = []
while header_lines:
(elt, header_lines) = PlyElement._parse_one(header_lines)
elements.append(elt)
return elements
@staticmethod
def _parse_one(lines):
'\n Consume one element definition. The unconsumed input is\n returned along with a PlyElement instance.\n\n '
a = 0
line = lines[a]
if (line[0] != 'element'):
raise PlyParseError("expected 'element'")
if (len(line) > 3):
raise PlyParseError("too many fields after 'element'")
if (len(line) < 3):
raise PlyParseError("too few fields after 'element'")
(name, count) = (line[1], int(line[2]))
comments = []
properties = []
while True:
a += 1
if (a >= len(lines)):
break
if (lines[a][0] == 'comment'):
comments.append(lines[a][1])
elif (lines[a][0] == 'property'):
properties.append(PlyProperty._parse_one(lines[a]))
else:
break
return (PlyElement(name, properties, count, comments), lines[a:])
@staticmethod
def describe(data, name, len_types={}, val_types={}, comments=[]):
"\n Construct a PlyElement from an array's metadata.\n\n len_types and val_types can be given as mappings from list\n property names to type strings (like 'u1', 'f4', etc., or\n 'int8', 'float32', etc.). These can be used to define the length\n and value types of list properties. List property lengths\n always default to type 'u1' (8-bit unsigned integer), and value\n types default to 'i4' (32-bit integer).\n\n "
if (not isinstance(data, _np.ndarray)):
raise TypeError('only numpy arrays are supported')
if (len(data.shape) != 1):
raise ValueError('only one-dimensional arrays are supported')
count = len(data)
properties = []
descr = data.dtype.descr
for t in descr:
if (not isinstance(t[1], str)):
raise ValueError('nested records not supported')
if (not t[0]):
raise ValueError('field with empty name')
if ((len(t) != 2) or (t[1][1] == 'O')):
if (t[1][1] == 'O'):
if (len(t) != 2):
raise ValueError('non-scalar object fields not supported')
len_str = _data_type_reverse[len_types.get(t[0], 'u1')]
if (t[1][1] == 'O'):
val_type = val_types.get(t[0], 'i4')
val_str = _lookup_type(val_type)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyListProperty(t[0], len_str, val_str)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyProperty(t[0], val_str)
properties.append(prop)
elt = PlyElement(name, properties, count, comments)
elt.data = data
return elt
def _read(self, stream, text, byte_order):
'\n Read the actual data from a PLY file.\n\n '
if text:
self._read_txt(stream)
elif self._have_list:
self._read_bin(stream, byte_order)
else:
self._data = _np.fromfile(stream, self.dtype(byte_order), self.count)
if (len(self._data) < self.count):
k = len(self._data)
del self._data
raise PlyParseError('early end-of-file', self, k)
self._check_sanity()
def _write(self, stream, text, byte_order):
'\n Write the data to a PLY file.\n\n '
if text:
self._write_txt(stream)
elif self._have_list:
self._write_bin(stream, byte_order)
else:
self.data.astype(self.dtype(byte_order), copy=False).tofile(stream)
def _read_txt(self, stream):
'\n Load a PLY element from an ASCII-format PLY file. The element\n may contain list properties.\n\n '
self._data = _np.empty(self.count, dtype=self.dtype())
k = 0
for line in _islice(iter(stream.readline, b''), self.count):
fields = iter(line.strip().split())
for prop in self.properties:
try:
self._data[prop.name][k] = prop._from_fields(fields)
except StopIteration:
raise PlyParseError('early end-of-line', self, k, prop)
except ValueError:
raise PlyParseError('malformed input', self, k, prop)
try:
next(fields)
except StopIteration:
pass
else:
raise PlyParseError('expected end-of-line', self, k)
k += 1
if (k < self.count):
del self._data
raise PlyParseError('early end-of-file', self, k)
def _write_txt(self, stream):
'\n Save a PLY element to an ASCII-format PLY file. The element may\n contain list properties.\n\n '
for rec in self.data:
fields = []
for prop in self.properties:
fields.extend(prop._to_fields(rec[prop.name]))
_np.savetxt(stream, [fields], '%.18g', newline='\r\n')
def _read_bin(self, stream, byte_order):
'\n Load a PLY element from a binary PLY file. The element may\n contain list properties.\n\n '
self._data = _np.empty(self.count, dtype=self.dtype(byte_order))
for k in _range(self.count):
for prop in self.properties:
try:
self._data[prop.name][k] = prop._read_bin(stream, byte_order)
except StopIteration:
raise PlyParseError('early end-of-file', self, k, prop)
def _write_bin(self, stream, byte_order):
'\n Save a PLY element to a binary PLY file. The element may\n contain list properties.\n\n '
for rec in self.data:
for prop in self.properties:
prop._write_bin(rec[prop.name], stream, byte_order)
@property
def header(self):
"\n Format this element's metadata as it would appear in a PLY\n header.\n\n "
lines = [('element %s %d' % (self.name, self.count))]
for c in self.comments:
lines.append(('comment ' + c))
lines.extend(list(map(str, self.properties)))
return '\r\n'.join(lines)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __str__(self):
return self.header
def __repr__(self):
return ('PlyElement(%r, %r, count=%d, comments=%r)' % (self.name, self.properties, self.count, self.comments))
|
class PlyProperty(object):
'\n PLY property description. This class is pure metadata; the data\n itself is contained in PlyElement instances.\n\n '
def __init__(self, name, val_dtype):
self._name = str(name)
self._check_name()
self.val_dtype = val_dtype
def _get_val_dtype(self):
return self._val_dtype
def _set_val_dtype(self, val_dtype):
self._val_dtype = _data_types[_lookup_type(val_dtype)]
val_dtype = property(_get_val_dtype, _set_val_dtype)
@property
def name(self):
return self._name
def _check_name(self):
if any((c.isspace() for c in self._name)):
msg = ('Error: property name %r contains spaces' % self._name)
raise RuntimeError(msg)
@staticmethod
def _parse_one(line):
assert (line[0] == 'property')
if (line[1] == 'list'):
if (len(line) > 5):
raise PlyParseError("too many fields after 'property list'")
if (len(line) < 5):
raise PlyParseError("too few fields after 'property list'")
return PlyListProperty(line[4], line[2], line[3])
else:
if (len(line) > 3):
raise PlyParseError("too many fields after 'property'")
if (len(line) < 3):
raise PlyParseError("too few fields after 'property'")
return PlyProperty(line[2], line[1])
def dtype(self, byte_order='='):
'\n Return the numpy dtype description for this property (as a tuple\n of strings).\n\n '
return (byte_order + self.val_dtype)
def _from_fields(self, fields):
'\n Parse from generator. Raise StopIteration if the property could\n not be read.\n\n '
return _np.dtype(self.dtype()).type(next(fields))
def _to_fields(self, data):
'\n Return generator over one item.\n\n '
(yield _np.dtype(self.dtype()).type(data))
def _read_bin(self, stream, byte_order):
'\n Read data from a binary stream. Raise StopIteration if the\n property could not be read.\n\n '
try:
return _np.fromfile(stream, self.dtype(byte_order), 1)[0]
except IndexError:
raise StopIteration
def _write_bin(self, data, stream, byte_order):
'\n Write data to a binary stream.\n\n '
_np.dtype(self.dtype(byte_order)).type(data).tofile(stream)
def __str__(self):
val_str = _data_type_reverse[self.val_dtype]
return ('property %s %s' % (val_str, self.name))
def __repr__(self):
return ('PlyProperty(%r, %r)' % (self.name, _lookup_type(self.val_dtype)))
|
class PlyListProperty(PlyProperty):
'\n PLY list property description.\n\n '
def __init__(self, name, len_dtype, val_dtype):
PlyProperty.__init__(self, name, val_dtype)
self.len_dtype = len_dtype
def _get_len_dtype(self):
return self._len_dtype
def _set_len_dtype(self, len_dtype):
self._len_dtype = _data_types[_lookup_type(len_dtype)]
len_dtype = property(_get_len_dtype, _set_len_dtype)
def dtype(self, byte_order='='):
'\n List properties always have a numpy dtype of "object".\n\n '
return '|O'
def list_dtype(self, byte_order='='):
'\n Return the pair (len_dtype, val_dtype) (both numpy-friendly\n strings).\n\n '
return ((byte_order + self.len_dtype), (byte_order + self.val_dtype))
def _from_fields(self, fields):
(len_t, val_t) = self.list_dtype()
n = int(_np.dtype(len_t).type(next(fields)))
data = _np.loadtxt(list(_islice(fields, n)), val_t, ndmin=1)
if (len(data) < n):
raise StopIteration
return data
def _to_fields(self, data):
'\n Return generator over the (numerical) PLY representation of the\n list data (length followed by actual data).\n\n '
(len_t, val_t) = self.list_dtype()
data = _np.asarray(data, dtype=val_t).ravel()
(yield _np.dtype(len_t).type(data.size))
for x in data:
(yield x)
def _read_bin(self, stream, byte_order):
(len_t, val_t) = self.list_dtype(byte_order)
try:
n = _np.fromfile(stream, len_t, 1)[0]
except IndexError:
raise StopIteration
data = _np.fromfile(stream, val_t, n)
if (len(data) < n):
raise StopIteration
return data
def _write_bin(self, data, stream, byte_order):
'\n Write data to a binary stream.\n\n '
(len_t, val_t) = self.list_dtype(byte_order)
data = _np.asarray(data, dtype=val_t).ravel()
_np.array(data.size, dtype=len_t).tofile(stream)
data.tofile(stream)
def __str__(self):
len_str = _data_type_reverse[self.len_dtype]
val_str = _data_type_reverse[self.val_dtype]
return ('property list %s %s %s' % (len_str, val_str, self.name))
def __repr__(self):
return ('PlyListProperty(%r, %r, %r)' % (self.name, _lookup_type(self.len_dtype), _lookup_type(self.val_dtype)))
|
def collect_point_data(scene_name):
label_map = scannet_utils.read_label_mapping(opt.label_map_file, label_from='raw_category', label_to='nyu40id')
data_folder = os.path.join(opt.scannet_path, scene_name)
out_filename = os.path.join(data_folder, (scene_name + '_new_semantic.npy'))
seg_filename = os.path.join(data_folder, ('%s_vh_clean_2.0.010000.segs.json' % scene_name))
(seg_to_verts, num_verts) = scannet_utils.read_segmentation(seg_filename)
ply_filename = os.path.join(data_folder, ('%s_vh_clean_2.ply' % scene_name))
label_filename = os.path.join(data_folder, ('%s_vh_clean_2.labels.ply' % scene_name))
points = pc_utils.read_ply_rgba_normal(ply_filename)
plydata = PlyData().read(label_filename)
labels = np.expand_dims(remapper[np.array(plydata.elements[0]['label'])], 1)
"\n label_ids = np.zeros(shape=(num_verts), dtype=np.uint32) # 0: unannotated\n for label, segs in label_to_segs.items():\n # convert scannet raw label to nyu40 label (1~40), 0 for unannotated, 41 for unknown\n label_id = label_map[label]\n\n # only evaluate 20 class in nyu40 label\n # map nyu40 to 1~21, 0 for unannotated, unknown and not evalutated\n if label_id in g_label_ids: # IDS for 20 classes in nyu40 for evaluation (1~21)\n eval_label_id = g_label_ids.index(label_id)\n else: # IDS unannotated, unknow or not for evaluation go to unannotate label (0)\n eval_label_id = g_label_names.index('unannotate')\n for seg in segs:\n verts = seg_to_verts[seg]\n label_ids[verts] = eval_label_id\n "
instance_ids = np.zeros(shape=num_verts, dtype=np.uint32)
for (object_id, segs) in object_id_to_segs.items():
for seg in segs:
verts = seg_to_verts[seg]
instance_ids[verts] = object_id
for i in range(max(instance_ids)):
index = (instance_ids == i)
min_label = min(labels[index])
max_label = max(labels[index])
if (min_label != max_label):
print('error')
points = np.delete(points, 6, 1)
data = np.concatenate((points, instance_ids, labels), 1)
print(out_filename)
if os.path.exists(out_filename):
return
np.save(out_filename, data)
|
def preprocess_scenes(scene_name):
try:
collect_point_data(scene_name)
print('name: ', scene_name)
except Exception as e:
sys.stderr.write((scene_name + 'ERROR!!'))
sys.stderr.write(str(e))
sys.exit((- 1))
|
def main():
scenes = [d for d in os.listdir(opt.scannet_path) if os.path.isdir(os.path.join(opt.scannet_path, d))]
scenes.sort()
print(opt.scannet_path)
print(('Find %d scenes' % len(scenes)))
print('Extract points (Vertex XYZ, RGB, NxNyNz, Label, Instance-label)')
pool = mp.Pool(opt.num_proc)
pool.map(preprocess_scenes, scenes)
|
def parse_args():
parser = argparse.ArgumentParser(description='Prompt engeering script')
parser.add_argument('--model', default='RN50', choices=['RN50', 'RN101', 'RN50x4', 'RN50x16', 'ViT32', 'ViT16'], help='clip model name')
parser.add_argument('--class-set', default=['voc'], nargs='+', choices=['kitti', 'nuscenes', 'scannet', 'city', 'ade', 'stuff', 'voc', 'context', 'acontext', 'mickey', 'batman', 'mario', 'gates', 'blur', 'sports', 'car_brands', 'batman_ext', 'car_color'], help='the set of class names')
parser.add_argument('--no-prompt-eng', action='store_true', help='disable prompt engineering')
args = parser.parse_args()
return args
|
def zeroshot_classifier(model_name, classnames, templates):
(model, preprocess) = clip.load(model_name)
with torch.no_grad():
zeroshot_weights = []
for classname in classnames:
texts = [template.format(classname) for template in templates]
texts = clip.tokenize(texts).cuda()
class_embeddings = model.encode_text(texts)
class_embeddings /= class_embeddings.norm(dim=(- 1), keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).cuda()
return zeroshot_weights
|
def generate_config(file):
with open(file, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config['datetime'] = dt.today().strftime('%d%m%y-%H%M')
return config
|
def save_checkpoint(self):
trained_epoch = (self.cur_epoch + 1)
ckpt_name = (self.ckpt_dir / ('checkpoint_epoch_%d' % trained_epoch))
checkpoint_state = {}
checkpoint_state['epoch'] = trained_epoch
checkpoint_state['it'] = self.it
if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(self.model.module.state_dict())
else:
model_state = model_state_to_cpu(self.model.state_dict())
checkpoint_state['model_state'] = model_state
checkpoint_state['optimizer_state'] = self.optimizer.state_dict()
checkpoint_state['scaler'] = self.scaler.state_dict()
checkpoint_state['lr_scheduler_state'] = self.lr_scheduler.state_dict()
torch.save(checkpoint_state, f'{ckpt_name}.pth')
|
def resume(self, filename):
if (not os.path.isfile(filename)):
raise FileNotFoundError
self.logger.info(f'==> Loading parameters from checkpoint {filename}')
checkpoint = torch.load(filename, map_location='cpu')
self.model.load_params(checkpoint['model_state'], strict=True)
self.logger.info('==> Done')
return
|
def represents_int(s):
try:
int(s)
return True
except ValueError:
return False
|
def read_aggregation(filename):
assert os.path.isfile(filename)
object_id_to_segs = {}
label_to_segs = {}
with open(filename) as f:
data = json.load(f)
num_objects = len(data['segGroups'])
for i in range(num_objects):
object_id = (data['segGroups'][i]['objectId'] + 1)
label = data['segGroups'][i]['label']
segs = data['segGroups'][i]['segments']
object_id_to_segs[object_id] = segs
if (label in label_to_segs):
label_to_segs[label].extend(segs)
else:
label_to_segs[label] = segs
return (object_id_to_segs, label_to_segs)
|
def read_segmentation(filename):
assert os.path.isfile(filename)
seg_to_verts = {}
with open(filename) as f:
data = json.load(f)
num_verts = len(data['segIndices'])
for i in range(num_verts):
seg_id = data['segIndices'][i]
if (seg_id in seg_to_verts):
seg_to_verts[seg_id].append(i)
else:
seg_to_verts[seg_id] = [i]
return (seg_to_verts, num_verts)
|
def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'):
assert os.path.isfile(filename)
mapping = dict()
with open(filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
mapping[row[label_from]] = int(row[label_to])
if represents_int(list(mapping.keys())[0]):
mapping = {int(k): v for (k, v) in mapping.items()}
return mapping
|
def read_scene_types_mapping(filename, remove_spaces=True):
assert os.path.isfile(filename)
mapping = dict()
lines = open(filename).read().splitlines()
lines = [line.split('\t') for line in lines]
if remove_spaces:
mapping = {x[1].strip(): int(x[0]) for x in lines}
else:
mapping = {x[1]: int(x[0]) for x in lines}
return mapping
|
def visualize_label_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
for (idx, color) in enumerate(color_palette):
vis_image[(image == idx)] = color
imageio.imwrite(filename, vis_image)
|
def visualize_instance_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
instances = np.unique(image)
for (idx, inst) in enumerate(instances):
vis_image[(image == inst)] = color_palette[(inst % len(color_palette))]
imageio.imwrite(filename, vis_image)
|
def create_color_palette():
return [(174, 199, 232), (152, 223, 138), (31, 119, 180), (255, 187, 120), (188, 189, 34), (140, 86, 75), (255, 152, 150), (214, 39, 40), (197, 176, 213), (148, 103, 189), (196, 156, 148), (23, 190, 207), (247, 182, 210), (219, 219, 141), (255, 127, 14), (158, 218, 229), (44, 160, 44), (112, 128, 144), (227, 119, 194), (82, 84, 163), (0, 0, 0)]
|
class BatchgeneratorsTransform(tfm.Transform):
'Example wrapper for `batchgenerators <https://github.com/MIC-DKFZ/batchgenerators>`_ transformations.'
def __init__(self, transforms, entries=(defs.KEY_IMAGES, defs.KEY_LABELS)) -> None:
super().__init__()
self.transforms = transforms
self.entries = entries
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if (entry not in sample):
if tfm.raise_error_if_entry_not_extracted:
raise ValueError(tfm.ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
continue
np_entry = tfm.check_and_return(sample[entry], np.ndarray)
sample[entry] = np.expand_dims(np_entry, 0)
for t in self.transforms:
sample = t(**sample)
for entry in self.entries:
np_entry = tfm.check_and_return(sample[entry], np.ndarray)
sample[entry] = np_entry.squeeze(0)
return sample
|
class TorchIOTransform(tfm.Transform):
'Example wrapper for `TorchIO <https://github.com/fepegar/torchio>`_ transformations.'
def __init__(self, transforms: list, entries=(defs.KEY_IMAGES, defs.KEY_LABELS)) -> None:
super().__init__()
self.transforms = transforms
self.entries = entries
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if (entry not in sample):
if tfm.raise_error_if_entry_not_extracted:
raise ValueError(tfm.ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
continue
np_entry = tfm.check_and_return(sample[entry], np.ndarray)
sample[entry] = np.expand_dims(np_entry, (- 1))
for t in self.transforms:
sample = t(sample)
for entry in self.entries:
np_entry = tfm.check_and_return(sample[entry].numpy(), np.ndarray)
sample[entry] = np_entry.squeeze((- 1))
return sample
|
class BatchgeneratorsTransform(tfm.Transform):
'Example wrapper for `batchgenerators <https://github.com/MIC-DKFZ/batchgenerators>`_ transformations.'
def __init__(self, transforms, entries=(defs.KEY_IMAGES, defs.KEY_LABELS)) -> None:
super().__init__()
self.transforms = transforms
self.entries = entries
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if (entry not in sample):
if tfm.raise_error_if_entry_not_extracted:
raise ValueError(tfm.ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
continue
np_entry = tfm.check_and_return(sample[entry], np.ndarray)
sample[entry] = np.expand_dims(np_entry, 0)
for t in self.transforms:
sample = t(**sample)
for entry in self.entries:
np_entry = tfm.check_and_return(sample[entry], np.ndarray)
sample[entry] = np_entry.squeeze(0)
return sample
|
class TorchIOTransform(tfm.Transform):
'Example wrapper for `TorchIO <https://github.com/fepegar/torchio>`_ transformations.'
def __init__(self, transforms: list, entries=(defs.KEY_IMAGES, defs.KEY_LABELS)) -> None:
super().__init__()
self.transforms = transforms
self.entries = entries
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if (entry not in sample):
if tfm.raise_error_if_entry_not_extracted:
raise ValueError(tfm.ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
continue
np_entry = tfm.check_and_return(sample[entry], np.ndarray)
sample[entry] = np.expand_dims(np_entry, (- 1))
for t in self.transforms:
sample = t(sample)
for entry in self.entries:
np_entry = tfm.check_and_return(sample[entry].numpy(), np.ndarray)
sample[entry] = np_entry.squeeze((- 1))
return sample
|
def plot_sample(plot_dir: str, id_: str, sample: dict):
plt.imsave(os.path.join(plot_dir, f'{id_}_image_channel0.png'), sample[defs.KEY_IMAGES][0])
plt.imsave(os.path.join(plot_dir, f'{id_}_image_channel1.png'), sample[defs.KEY_IMAGES][1])
plt.imsave(os.path.join(plot_dir, f'{id_}_label.png'), sample[defs.KEY_LABELS])
|
def main(hdf_file, plot_dir):
os.makedirs(plot_dir, exist_ok=True)
extractor = extr.DataExtractor(categories=(defs.KEY_IMAGES, defs.KEY_LABELS))
indexing_strategy = extr.SliceIndexing()
dataset = extr.PymiaDatasource(hdf_file, indexing_strategy, extractor)
seed = 1
np.random.seed(seed)
sample_idx = 55
transforms_augmentation = []
transforms_before_augmentation = [tfm.Permute(permutation=(2, 0, 1))]
transforms_after_augmentation = [tfm.Squeeze(entries=(defs.KEY_LABELS,))]
train_transforms = tfm.ComposeTransform(((transforms_before_augmentation + transforms_augmentation) + transforms_after_augmentation))
dataset.set_transform(train_transforms)
sample = dataset[sample_idx]
plot_sample(plot_dir, 'none', sample)
transforms_augmentation = [augm.RandomRotation90(axes=((- 2), (- 1))), augm.RandomMirror()]
train_transforms = tfm.ComposeTransform(((transforms_before_augmentation + transforms_augmentation) + transforms_after_augmentation))
dataset.set_transform(train_transforms)
sample = dataset[sample_idx]
plot_sample(plot_dir, 'pymia', sample)
transforms_augmentation = [BatchgeneratorsTransform([bg_tfm.spatial_transforms.MirrorTransform(axes=(0, 1), data_key=defs.KEY_IMAGES, label_key=defs.KEY_LABELS), bg_tfm.noise_transforms.GaussianBlurTransform(blur_sigma=(0.2, 1.0), data_key=defs.KEY_IMAGES, label_key=defs.KEY_LABELS)])]
train_transforms = tfm.ComposeTransform(((transforms_before_augmentation + transforms_augmentation) + transforms_after_augmentation))
dataset.set_transform(train_transforms)
sample = dataset[sample_idx]
plot_sample(plot_dir, 'batchgenerators', sample)
transforms_augmentation = [TorchIOTransform([tio.RandomFlip(axes='LR', flip_probability=1.0, keys=(defs.KEY_IMAGES, defs.KEY_LABELS), seed=seed), tio.RandomAffine(scales=(0.9, 1.2), degrees=10, isotropic=False, default_pad_value='otsu', image_interpolation='NEAREST', keys=(defs.KEY_IMAGES, defs.KEY_LABELS), seed=seed)])]
train_transforms = tfm.ComposeTransform(((transforms_before_augmentation + transforms_augmentation) + transforms_after_augmentation))
dataset.set_transform(train_transforms)
sample = dataset[sample_idx]
plot_sample(plot_dir, 'torchio', sample)
|
class FileTypes(enum.Enum):
T1 = 1
T2 = 2
GT = 3
MASK = 4
AGE = 5
GPA = 6
GENDER = 7
|
class Subject(data.SubjectFile):
def __init__(self, subject: str, files: dict):
super().__init__(subject, images={FileTypes.T1.name: files[FileTypes.T1], FileTypes.T2.name: files[FileTypes.T2]}, labels={FileTypes.GT.name: files[FileTypes.GT]}, mask={FileTypes.MASK.name: files[FileTypes.MASK]}, numerical={FileTypes.AGE.name: files[FileTypes.AGE], FileTypes.GPA.name: files[FileTypes.GPA]}, gender={FileTypes.GENDER.name: files[FileTypes.GENDER]})
self.subject_path = files.get(subject, '')
|
class LoadData(file_load.Load):
def __call__(self, file_name: str, id_: str, category: str, subject_id: str) -> typing.Tuple[(np.ndarray, typing.Union[(conv.ImageProperties, None)])]:
if (id_ == FileTypes.AGE.name):
with open(file_name, 'r') as f:
value = np.asarray([int(f.readline().split(':')[1].strip())])
return (value, None)
if (id_ == FileTypes.GPA.name):
with open(file_name, 'r') as f:
value = np.asarray([float(f.readlines()[1].split(':')[1].strip())])
return (value, None)
if (id_ == FileTypes.GENDER.name):
with open(file_name, 'r') as f:
value = np.array(f.readlines()[2].split(':')[1].strip())
return (value, None)
if (category == defs.KEY_IMAGES):
img = sitk.ReadImage(file_name, sitk.sitkFloat32)
else:
img = sitk.ReadImage(file_name, sitk.sitkUInt8)
return (sitk.GetArrayFromImage(img), conv.ImageProperties(img))
|
class FileTypes(enum.Enum):
T1 = 1
T2 = 2
GT = 3
MASK = 4
AGE = 5
GPA = 6
GENDER = 7
|
class LoadData(file_load.Load):
def __call__(self, file_name: str, id_: str, category: str, subject_id: str) -> typing.Tuple[(np.ndarray, typing.Union[(conv.ImageProperties, None)])]:
if (id_ == FileTypes.AGE.name):
with open(file_name, 'r') as f:
value = np.asarray([int(f.readline().split(':')[1].strip())])
return (value, None)
if (id_ == FileTypes.GPA.name):
with open(file_name, 'r') as f:
value = np.asarray([float(f.readlines()[1].split(':')[1].strip())])
return (value, None)
if (id_ == FileTypes.GENDER.name):
with open(file_name, 'r') as f:
value = np.array(f.readlines()[2].split(':')[1].strip())
return (value, None)
if (category == defs.KEY_IMAGES):
img = sitk.ReadImage(file_name, sitk.sitkFloat32)
else:
img = sitk.ReadImage(file_name, sitk.sitkUInt8)
return (sitk.GetArrayFromImage(img), conv.ImageProperties(img))
|
class Subject(data.SubjectFile):
def __init__(self, subject: str, files: dict):
super().__init__(subject, images={FileTypes.T1.name: files[FileTypes.T1], FileTypes.T2.name: files[FileTypes.T2]}, labels={FileTypes.GT.name: files[FileTypes.GT]}, mask={FileTypes.MASK.name: files[FileTypes.MASK]}, numerical={FileTypes.AGE.name: files[FileTypes.AGE], FileTypes.GPA.name: files[FileTypes.GPA]}, gender={FileTypes.GENDER.name: files[FileTypes.GENDER]})
self.subject_path = files.get(subject, '')
|
def main(hdf_file: str, data_dir: str, meta: bool):
subjects = get_subject_files(data_dir)
if os.path.exists(hdf_file):
os.remove(hdf_file)
with crt.get_writer(hdf_file) as writer:
callbacks = crt.get_default_callbacks(writer, meta_only=meta)
transform = tfm.IntensityNormalization(loop_axis=3, entries=(defs.KEY_IMAGES,))
traverser = crt.Traverser()
traverser.traverse(subjects, callback=callbacks, load=LoadData(), transform=transform)
|
def get_subject_files(data_dir: str) -> typing.List[Subject]:
'Collects the files for all the subjects in the data directory.\n\n Args:\n data_dir (str): The data directory.\n\n Returns:\n typing.List[data.SubjectFile]: The list of the collected subject files.\n\n '
subject_dirs = [subject_dir for subject_dir in glob.glob(os.path.join(data_dir, '*')) if (os.path.isdir(subject_dir) and os.path.basename(subject_dir).startswith('Subject'))]
sorted(subject_dirs)
keys = [FileTypes.T1, FileTypes.T2, FileTypes.GT, FileTypes.MASK, FileTypes.AGE, FileTypes.GPA, FileTypes.GENDER]
subjects = []
for subject_dir in subject_dirs:
id_ = os.path.basename(subject_dir)
file_dict = {id_: subject_dir}
for key in keys:
file_path = get_full_file_path(id_, subject_dir, key)
file_dict[key] = file_path
subjects.append(Subject(id_, file_dict))
return subjects
|
def get_full_file_path(id_: str, root_dir: str, file_key) -> str:
"Gets the full file path for an image.\n Args:\n id_ (str): The image identification.\n root_dir (str): The image' root directory.\n file_key (object): A human readable identifier used to identify the image.\n Returns:\n str: The images' full file path.\n "
if (file_key == FileTypes.T1):
file_name = f'{id_}_T1.mha'
elif (file_key == FileTypes.T2):
file_name = f'{id_}_T2.mha'
elif (file_key == FileTypes.GT):
file_name = f'{id_}_GT.mha'
elif (file_key == FileTypes.MASK):
file_name = f'{id_}_MASK.nii.gz'
elif ((file_key == FileTypes.AGE) or (file_key == FileTypes.GPA) or (file_key == FileTypes.GENDER)):
file_name = f'{id_}_demographic.txt'
else:
raise ValueError('Unknown key')
return os.path.join(root_dir, file_name)
|
def main(hdf_file, is_meta):
if (not is_meta):
extractor = extr.DataExtractor(categories=(defs.KEY_IMAGES,))
else:
extractor = extr.FilesystemDataExtractor(categories=(defs.KEY_IMAGES,))
transform = tfm.Permute(permutation=(2, 0, 1), entries=(defs.KEY_IMAGES,))
indexing_strategy = extr.SliceIndexing()
dataset = extr.PymiaDatasource(hdf_file, indexing_strategy, extractor, transform)
direct_extractor = extr.ComposeExtractor([extr.ImagePropertiesExtractor(), extr.DataExtractor(categories=(defs.KEY_LABELS, defs.KEY_IMAGES))])
assembler = assm.SubjectAssembler(dataset)
pytorch_dataset = pymia_torch.PytorchDatasetAdapter(dataset)
loader = torch_data.dataloader.DataLoader(pytorch_dataset, batch_size=2, shuffle=False)
dummy_network = nn.Sequential(nn.Conv2d(in_channels=2, out_channels=8, kernel_size=3, padding=1), nn.Conv2d(in_channels=8, out_channels=1, kernel_size=3, padding=1), nn.Sigmoid())
torch.set_grad_enabled(False)
nb_batches = len(loader)
for (i, batch) in enumerate(loader):
(x, sample_indices) = (batch[defs.KEY_IMAGES], batch[defs.KEY_SAMPLE_INDEX])
prediction = dummy_network(x)
numpy_prediction = prediction.numpy().transpose((0, 2, 3, 1))
is_last = (i == (nb_batches - 1))
assembler.add_batch(numpy_prediction, sample_indices.numpy(), is_last)
for subject_index in assembler.subjects_ready:
subject_prediction = assembler.get_assembled_subject(subject_index)
direct_sample = dataset.direct_extract(direct_extractor, subject_index)
(target, image_properties) = (direct_sample[defs.KEY_LABELS], direct_sample[defs.KEY_PROPERTIES])
|
def main(hdf_file):
extractor = extr.PadDataExtractor((2, 2, 2), extr.DataExtractor(categories=(defs.KEY_IMAGES,)))
transform = tfm.Permute(permutation=(3, 0, 1, 2), entries=(defs.KEY_IMAGES,))
indexing_strategy = extr.PatchWiseIndexing(patch_shape=(32, 32, 32))
dataset = extr.PymiaDatasource(hdf_file, indexing_strategy, extractor, transform)
direct_extractor = extr.ComposeExtractor([extr.ImagePropertiesExtractor(), extr.DataExtractor(categories=(defs.KEY_LABELS, defs.KEY_IMAGES))])
assembler = assm.SubjectAssembler(dataset)
pytorch_dataset = pymia_torch.PytorchDatasetAdapter(dataset)
loader = torch_data.dataloader.DataLoader(pytorch_dataset, batch_size=2, shuffle=False)
dummy_network = nn.Sequential(nn.Conv3d(in_channels=2, out_channels=8, kernel_size=3, padding=0), nn.Conv3d(in_channels=8, out_channels=1, kernel_size=3, padding=0), nn.Sigmoid())
torch.set_grad_enabled(False)
nb_batches = len(loader)
for (i, batch) in enumerate(loader):
(x, sample_indices) = (batch[defs.KEY_IMAGES], batch[defs.KEY_SAMPLE_INDEX])
prediction = dummy_network(x)
numpy_prediction = prediction.numpy().transpose((0, 2, 3, 4, 1))
is_last = (i == (nb_batches - 1))
assembler.add_batch(numpy_prediction, sample_indices.numpy(), is_last)
for subject_index in assembler.subjects_ready:
subject_prediction = assembler.get_assembled_subject(subject_index)
direct_sample = dataset.direct_extract(direct_extractor, subject_index)
(target, image_properties) = (direct_sample[defs.KEY_LABELS], direct_sample[defs.KEY_PROPERTIES])
|
def main(hdf_file):
extractor = extr.DataExtractor(categories=(defs.KEY_IMAGES,))
transform = None
indexing_strategy = extr.SliceIndexing()
dataset = extr.PymiaDatasource(hdf_file, indexing_strategy, extractor, transform)
direct_extractor = extr.ComposeExtractor([extr.ImagePropertiesExtractor(), extr.DataExtractor(categories=(defs.KEY_LABELS, defs.KEY_IMAGES))])
assembler = assm.SubjectAssembler(dataset)
gen_fn = pymia_tf.get_tf_generator(dataset)
tf_dataset = tf.data.Dataset.from_generator(generator=gen_fn, output_types={defs.KEY_IMAGES: tf.float32, defs.KEY_SAMPLE_INDEX: tf.int64})
tf_dataset = tf_dataset.batch(2)
dummy_network = keras.Sequential([layers.Conv2D(8, kernel_size=3, padding='same'), layers.Conv2D(2, kernel_size=3, padding='same', activation='sigmoid')])
nb_batches = (len(dataset) // 2)
for (i, batch) in enumerate(tf_dataset):
(x, sample_indices) = (batch[defs.KEY_IMAGES], batch[defs.KEY_SAMPLE_INDEX])
prediction = dummy_network(x)
numpy_prediction = prediction.numpy()
is_last = (i == (nb_batches - 1))
assembler.add_batch(numpy_prediction, sample_indices.numpy(), is_last)
for subject_index in assembler.subjects_ready:
subject_prediction = assembler.get_assembled_subject(subject_index)
direct_sample = dataset.direct_extract(direct_extractor, subject_index)
|
def main(data_dir: str, result_file: str, result_summary_file: str):
metrics = [metric.DiceCoefficient(), metric.HausdorffDistance(percentile=95, metric='HDRFDST95'), metric.VolumeSimilarity()]
labels = {1: 'WHITEMATTER', 2: 'GREYMATTER', 5: 'THALAMUS'}
evaluator = eval_.SegmentationEvaluator(metrics, labels)
subject_dirs = [subject for subject in glob.glob(os.path.join(data_dir, '*')) if (os.path.isdir(subject) and os.path.basename(subject).startswith('Subject'))]
for subject_dir in subject_dirs:
subject_id = os.path.basename(subject_dir)
print(f'Evaluating {subject_id}...')
ground_truth = sitk.ReadImage(os.path.join(subject_dir, f'{subject_id}_GT.mha'))
prediction = ground_truth
for label_val in labels.keys():
prediction = sitk.BinaryErode(prediction, ([1] * prediction.GetDimension()), sitk.sitkBall, 0, label_val)
evaluator.evaluate(prediction, ground_truth, subject_id)
writer.CSVWriter(result_file).write(evaluator.results)
print('\nSubject-wise results...')
writer.ConsoleWriter().write(evaluator.results)
functions = {'MEAN': np.mean, 'STD': np.std}
writer.CSVStatisticsWriter(result_summary_file, functions=functions).write(evaluator.results)
print('\nAggregated statistic results...')
writer.ConsoleStatisticsWriter(functions=functions).write(evaluator.results)
evaluator.clear()
|
class DummyNetwork(nn.Module):
def forward(self, x):
return torch.randint(0, 5, (x.size(0), 1, *x.size()[2:]))
|
def main(hdf_file: str, log_dir: str):
metrics = [metric.DiceCoefficient()]
labels = {1: 'WHITEMATTER', 2: 'GREYMATTER', 5: 'THALAMUS'}
evaluator = eval_.SegmentationEvaluator(metrics, labels)
functions = {'MEAN': np.mean, 'STD': np.std}
statistics_aggregator = writer.StatisticsAggregator(functions=functions)
tb = tf.summary.create_file_writer(os.path.join(log_dir, 'logging-example-tensorflow'))
dataset = extr.PymiaDatasource(hdf_file, extr.SliceIndexing(), extr.DataExtractor(categories=(defs.KEY_IMAGES,)))
gen_fn = pymia_tf.get_tf_generator(dataset)
tf_dataset = tf.data.Dataset.from_generator(generator=gen_fn, output_types={defs.KEY_IMAGES: tf.float32, defs.KEY_SAMPLE_INDEX: tf.int64})
loader = tf_dataset.batch(100)
assembler = assm.SubjectAssembler(dataset)
direct_extractor = extr.ComposeExtractor([extr.SubjectExtractor(), extr.ImagePropertiesExtractor(), extr.DataExtractor(categories=(defs.KEY_LABELS,))])
class DummyNetwork(tf.keras.Model):
def call(self, inputs):
return tf.random.uniform((*inputs.shape[:(- 1)], 1), 0, 6, dtype=tf.int32)
dummy_network = DummyNetwork()
tf.random.set_seed(0)
nb_batches = (len(dataset) // 2)
epochs = 10
for epoch in range(epochs):
print(f'Epoch {(epoch + 1)}/{epochs}')
for (i, batch) in enumerate(loader):
(x, sample_indices) = (batch[defs.KEY_IMAGES], batch[defs.KEY_SAMPLE_INDEX])
prediction = dummy_network(x)
numpy_prediction = prediction.numpy()
is_last = (i == (nb_batches - 1))
assembler.add_batch(numpy_prediction, sample_indices.numpy(), is_last)
for subject_index in assembler.subjects_ready:
subject_prediction = assembler.get_assembled_subject(subject_index)
direct_sample = dataset.direct_extract(direct_extractor, subject_index)
(reference, image_properties) = (direct_sample[defs.KEY_LABELS], direct_sample[defs.KEY_PROPERTIES])
evaluator.evaluate(subject_prediction[(..., 0)], reference[(..., 0)], direct_sample[defs.KEY_SUBJECT])
results = statistics_aggregator.calculate(evaluator.results)
for result in results:
with tb.as_default():
tf.summary.scalar(f'train/{result.metric}-{result.id_}', result.value, epoch)
evaluator.clear()
|
def main(hdf_file: str, log_dir: str):
metrics = [metric.DiceCoefficient()]
labels = {1: 'WHITEMATTER', 2: 'GREYMATTER', 5: 'THALAMUS'}
evaluator = eval_.SegmentationEvaluator(metrics, labels)
functions = {'MEAN': np.mean, 'STD': np.std}
statistics_aggregator = writer.StatisticsAggregator(functions=functions)
tb = tensorboard.SummaryWriter(os.path.join(log_dir, 'logging-example-torch'))
transform = tfm.Permute(permutation=(2, 0, 1), entries=(defs.KEY_IMAGES,))
dataset = extr.PymiaDatasource(hdf_file, extr.SliceIndexing(), extr.DataExtractor(categories=(defs.KEY_IMAGES,)), transform)
pytorch_dataset = pymia_torch.PytorchDatasetAdapter(dataset)
loader = torch_data.dataloader.DataLoader(pytorch_dataset, batch_size=100, shuffle=False)
assembler = assm.SubjectAssembler(dataset)
direct_extractor = extr.ComposeExtractor([extr.SubjectExtractor(), extr.ImagePropertiesExtractor(), extr.DataExtractor(categories=(defs.KEY_LABELS,))])
class DummyNetwork(nn.Module):
def forward(self, x):
return torch.randint(0, 6, (x.size(0), 1, *x.size()[2:]))
dummy_network = DummyNetwork()
torch.manual_seed(0)
nb_batches = len(loader)
epochs = 10
for epoch in range(epochs):
print(f'Epoch {(epoch + 1)}/{epochs}')
for (i, batch) in enumerate(loader):
(x, sample_indices) = (batch[defs.KEY_IMAGES], batch[defs.KEY_SAMPLE_INDEX])
prediction = dummy_network(x)
numpy_prediction = prediction.numpy().transpose((0, 2, 3, 1))
is_last = (i == (nb_batches - 1))
assembler.add_batch(numpy_prediction, sample_indices.numpy(), is_last)
for subject_index in assembler.subjects_ready:
subject_prediction = assembler.get_assembled_subject(subject_index)
direct_sample = dataset.direct_extract(direct_extractor, subject_index)
(reference, image_properties) = (direct_sample[defs.KEY_LABELS], direct_sample[defs.KEY_PROPERTIES])
evaluator.evaluate(subject_prediction[(..., 0)], reference[(..., 0)], direct_sample[defs.KEY_SUBJECT])
results = statistics_aggregator.calculate(evaluator.results)
for result in results:
tb.add_scalar(f'train/{result.metric}-{result.id_}', result.value, epoch)
evaluator.clear()
|
def main(url: str, data_dir: str):
print(f'Downloading... ({url})')
resp = request.urlopen(url)
zip_ = zipfile.ZipFile(io.BytesIO(resp.read()))
print(f'Extracting... (to {data_dir})')
members = zip_.infolist()
for member in members:
if (member.filename.startswith('Subject_') or member.filename.endswith('.h5')):
if (not os.path.basename(member.filename)):
continue
zip_.extract(member, data_dir)
print(f'extract {os.path.join(data_dir, member.filename)}')
print('Finished')
|
class ConvDONormReLu2D(nn.Sequential):
def __init__(self, in_ch, out_ch, dropout_p: float=0.0, norm: str='bn'):
super().__init__()
self.add_module('conv', nn.Conv2d(in_ch, out_ch, 3, padding=1))
if (dropout_p > 0):
self.add_module('dropout', nn.Dropout2d(p=dropout_p, inplace=True))
if (norm == 'bn'):
self.add_module(norm, nn.BatchNorm2d(out_ch))
self.add_module('relu', nn.ReLU(inplace=True))
def forward(self, x):
return super().forward(x)
|
class DownConv2D(nn.Module):
def __init__(self, in_ch, out_ch, dropout_p: float=0.0, norm: str='bn'):
super().__init__()
self.double_conv = nn.Sequential(ConvDONormReLu2D(in_ch, out_ch, dropout_p, norm), ConvDONormReLu2D(out_ch, out_ch, dropout_p, norm))
self.pool = nn.MaxPool2d(2)
def forward(self, x):
skip_x = self.double_conv(x)
x = self.pool(skip_x)
return (x, skip_x)
|
class UpConv2D(nn.Module):
def __init__(self, in_ch, out_ch, dropout_p: float=0.0, norm: str='bn', transpose: bool=False):
super().__init__()
self.transpose = transpose
if self.transpose:
self.upconv = nn.ConvTranspose2d(in_ch, out_ch, 2, stride=2)
else:
self.upconv = nn.Conv2d(in_ch, out_ch, 3, padding=1)
self.double_conv = nn.Sequential(ConvDONormReLu2D((2 * out_ch), out_ch, dropout_p, norm), ConvDONormReLu2D(out_ch, out_ch, dropout_p, norm))
def forward(self, x, skip_x):
if (not self.transpose):
x = torch.nn.functional.interpolate(x, mode='nearest', scale_factor=2)
up = self.upconv(x)
(up_shape, skip_shape) = (up.size()[(- 2):], skip_x.size()[(- 2):])
if (up_shape < skip_shape):
x_diff = (skip_shape[(- 1)] - up_shape[(- 1)])
y_diff = (skip_shape[(- 2)] - up_shape[(- 2)])
x_pad = ((x_diff // 2), ((x_diff // 2) + (x_diff % 2)))
y_pad = ((y_diff // 2), ((y_diff // 2) + (y_diff % 2)))
up = F.pad(up, (x_pad + y_pad))
x = torch.cat((up, skip_x), 1)
x = self.double_conv(x)
return x
|
class UNetModel(nn.Module):
def __init__(self, ch_in: int, ch_out: int, n_channels: int=32, n_pooling: int=3, dropout_p: float=0.2, norm: str='bn', **kwargs):
super().__init__()
n_classes = ch_out
ch_out = n_channels
self.down_convs = nn.ModuleList()
for i in range(n_pooling):
down_conv = DownConv2D(ch_in, ch_out, dropout_p, norm)
self.down_convs.append(down_conv)
ch_in = ch_out
ch_out *= 2
self.bottleneck = nn.Sequential(ConvDONormReLu2D(ch_in, ch_out, dropout_p, norm), ConvDONormReLu2D(ch_out, ch_out, dropout_p, norm))
self.up_convs = nn.ModuleList()
for i in range(n_pooling, 0, (- 1)):
ch_in = ch_out
ch_out = (ch_in // 2)
up_conv = UpConv2D(ch_in, ch_out, dropout_p, norm)
self.up_convs.append(up_conv)
ch_in = ch_out
self.conv_cls = nn.Conv2d(ch_in, n_classes, 1)
def forward(self, x):
skip_connections = []
for down_conv in self.down_convs:
(x, skip_x) = down_conv(x)
skip_connections.append(skip_x)
x = self.bottleneck(x)
for (inv_depth, up_conv) in enumerate(self.up_convs, 1):
skip_x = skip_connections[(- inv_depth)]
x = up_conv(x, skip_x)
logits = self.conv_cls(x)
return logits
|
class ConvBlock(tf.keras.layers.Layer):
def __init__(self, layer_idx, filters_root, kernel_size, dropout_rate, padding, activation, **kwargs):
super(ConvBlock, self).__init__(**kwargs)
self.layer_idx = layer_idx
self.filters_root = filters_root
self.kernel_size = kernel_size
self.dropout_rate = dropout_rate
self.padding = padding
self.activation = activation
filters = _get_filter_count(layer_idx, filters_root)
self.conv2d_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=(kernel_size, kernel_size), strides=1, padding=padding)
self.dropout_1 = tf.keras.layers.Dropout(rate=dropout_rate)
self.activation_1 = tf.keras.layers.Activation(activation)
self.conv2d_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=(kernel_size, kernel_size), strides=1, padding=padding)
self.dropout_2 = tf.keras.layers.Dropout(rate=dropout_rate)
self.activation_2 = tf.keras.layers.Activation(activation)
def call(self, inputs, training=None, **kwargs):
x = inputs
x = self.conv2d_1(x)
if training:
x = self.dropout_1(x)
x = self.activation_1(x)
x = self.conv2d_2(x)
if training:
x = self.dropout_2(x)
x = self.activation_2(x)
return x
|
class UpconvBlock(tf.keras.layers.Layer):
def __init__(self, layer_idx, filters_root, kernel_size, pool_size, padding, activation, **kwargs):
super(UpconvBlock, self).__init__(**kwargs)
self.layer_idx = layer_idx
self.filters_root = filters_root
self.kernel_size = kernel_size
self.pool_size = pool_size
self.padding = padding
self.activation = activation
filters = _get_filter_count((layer_idx + 1), filters_root)
self.upconv = tf.keras.layers.Conv2DTranspose((filters // 2), kernel_size=(pool_size, pool_size), strides=pool_size, padding=padding)
self.activation_1 = tf.keras.layers.Activation(activation)
def call(self, inputs, **kwargs):
x = inputs
x = self.upconv(x)
x = self.activation_1(x)
return x
|
class CropConcatBlock(tf.keras.layers.Layer):
def call(self, x, skip_x, **kwargs):
skip_shape = tf.shape(skip_x)
up_shape = tf.shape(x)
height_diff = (skip_shape[1] - up_shape[1])
width_diff = (skip_shape[2] - up_shape[2])
height_pad = [(height_diff // 2), ((height_diff // 2) + (height_diff % 2))]
width_pad = [(width_diff // 2), ((width_diff // 2) + (width_diff % 2))]
if ((height_diff > 0) or (width_diff > 0)):
x = tf.pad(x, tf.convert_to_tensor([[0, 0], height_pad, width_pad, [0, 0]]))
x = tf.concat([skip_x, x], axis=(- 1))
return x
|
def _get_filter_count(layer_idx, filters_root):
return ((2 ** layer_idx) * filters_root)
|
def build_model(nx=None, ny=None, channels: int=1, num_classes: int=2, layer_depth: int=5, filters_root: int=64, kernel_size: int=3, pool_size: int=2, dropout_rate: int=0.0, padding: str='same', activation='relu') -> tf.keras.Model:
inputs = tf.keras.Input(shape=(nx, ny, channels), name='inputs')
x = inputs
contracting_layers = {}
conv_params = dict(filters_root=filters_root, kernel_size=kernel_size, dropout_rate=dropout_rate, padding=padding, activation=activation)
for layer_idx in range(0, (layer_depth - 1)):
x = ConvBlock(layer_idx, **conv_params)(x)
contracting_layers[layer_idx] = x
x = tf.keras.layers.MaxPooling2D((pool_size, pool_size))(x)
x = ConvBlock((layer_idx + 1), **conv_params)(x)
for layer_idx in range(layer_idx, (- 1), (- 1)):
x = UpconvBlock(layer_idx, filters_root, kernel_size, pool_size, padding, activation)(x)
x = CropConcatBlock()(x, contracting_layers[layer_idx])
x = ConvBlock(layer_idx, **conv_params)(x)
x = tf.keras.layers.Conv2D(filters=num_classes, kernel_size=(1, 1), strides=1, padding=padding)(x)
outputs = tf.keras.layers.Activation(None, name='outputs')(x)
model = tf.keras.Model(inputs, outputs, name='unet')
return model
|
def main(hdf_file, log_dir):
metrics = [metric.DiceCoefficient()]
labels = {1: 'WHITEMATTER', 2: 'GREYMATTER', 3: 'HIPPOCAMPUS', 4: 'AMYGDALA', 5: 'THALAMUS'}
evaluator = eval_.SegmentationEvaluator(metrics, labels)
functions = {'MEAN': np.mean, 'STD': np.std}
statistics_aggregator = writer.StatisticsAggregator(functions=functions)
console_writer = writer.ConsoleStatisticsWriter(functions=functions)
summary_writer = tf.summary.create_file_writer(os.path.join(log_dir, 'logging-example-tensorflow'))
(train_subjects, valid_subjects) = (['Subject_1', 'Subject_2', 'Subject_3'], ['Subject_4'])
extractor = extr.DataExtractor(categories=(defs.KEY_IMAGES, defs.KEY_LABELS))
indexing_strategy = extr.SliceIndexing()
augmentation_transforms = [augm.RandomElasticDeformation(), augm.RandomMirror()]
transforms = [tfm.Squeeze(entries=(defs.KEY_LABELS,))]
train_transforms = tfm.ComposeTransform((augmentation_transforms + transforms))
train_dataset = extr.PymiaDatasource(hdf_file, indexing_strategy, extractor, train_transforms, subject_subset=train_subjects)
batch_size = 16
valid_transforms = tfm.ComposeTransform([])
valid_dataset = extr.PymiaDatasource(hdf_file, indexing_strategy, extractor, valid_transforms, subject_subset=valid_subjects)
direct_extractor = extr.ComposeExtractor([extr.SubjectExtractor(), extr.ImagePropertiesExtractor(), extr.DataExtractor(categories=(defs.KEY_LABELS,))])
assembler = assm.SubjectAssembler(valid_dataset)
train_gen_fn = pymia_tf.get_tf_generator(train_dataset)
tf_train_dataset = tf.data.Dataset.from_generator(generator=train_gen_fn, output_types={defs.KEY_IMAGES: tf.float32, defs.KEY_LABELS: tf.int64, defs.KEY_SAMPLE_INDEX: tf.int64})
tf_train_dataset = tf_train_dataset.batch(batch_size).shuffle(len(train_dataset))
valid_gen_fn = pymia_tf.get_tf_generator(valid_dataset)
tf_valid_dataset = tf.data.Dataset.from_generator(generator=valid_gen_fn, output_types={defs.KEY_IMAGES: tf.float32, defs.KEY_LABELS: tf.int64, defs.KEY_SAMPLE_INDEX: tf.int64})
tf_valid_dataset = tf_valid_dataset.batch(batch_size)
u_net = unet.build_model(channels=2, num_classes=6, layer_depth=3, filters_root=16)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
train_batches = (len(train_dataset) // batch_size)
epochs = 100
for epoch in range(epochs):
print(f'Epoch {(epoch + 1)}/{epochs}')
print('training')
for (i, batch) in enumerate(tf_train_dataset):
(x, y) = (batch[defs.KEY_IMAGES], batch[defs.KEY_LABELS])
with tf.GradientTape() as tape:
logits = u_net(x, training=True)
loss = tf.keras.losses.sparse_categorical_crossentropy(y, logits, from_logits=True)
grads = tape.gradient(loss, u_net.trainable_variables)
optimizer.apply_gradients(zip(grads, u_net.trainable_variables))
train_loss(loss)
with summary_writer.as_default():
tf.summary.scalar('train/loss', train_loss.result(), step=((epoch * train_batches) + i))
print(f'[{(i + 1)}/{train_batches}] loss: {train_loss.result().numpy()}')
print('validation')
valid_batches = (len(valid_dataset) // batch_size)
for (i, batch) in enumerate(tf_valid_dataset):
(x, sample_indices) = (batch[defs.KEY_IMAGES], batch[defs.KEY_SAMPLE_INDEX])
logits = u_net(x)
prediction = tf.expand_dims(tf.math.argmax(logits, (- 1)), (- 1))
numpy_prediction = prediction.numpy()
is_last = (i == (valid_batches - 1))
assembler.add_batch(numpy_prediction, sample_indices.numpy(), is_last)
for subject_index in assembler.subjects_ready:
subject_prediction = assembler.get_assembled_subject(subject_index)
direct_sample = train_dataset.direct_extract(direct_extractor, subject_index)
(target, image_properties) = (direct_sample[defs.KEY_LABELS], direct_sample[defs.KEY_PROPERTIES])
evaluator.evaluate(subject_prediction[(..., 0)], target[(..., 0)], direct_sample[defs.KEY_SUBJECT])
results = statistics_aggregator.calculate(evaluator.results)
with summary_writer.as_default():
for result in results:
tf.summary.scalar(f'valid/{result.metric}-{result.id_}', result.value, epoch)
console_writer.write(evaluator.results)
evaluator.clear()
|
def main(hdf_file, log_dir):
metrics = [metric.DiceCoefficient()]
labels = {1: 'WHITEMATTER', 2: 'GREYMATTER', 3: 'HIPPOCAMPUS', 4: 'AMYGDALA', 5: 'THALAMUS'}
evaluator = eval_.SegmentationEvaluator(metrics, labels)
functions = {'MEAN': np.mean, 'STD': np.std}
statistics_aggregator = writer.StatisticsAggregator(functions=functions)
console_writer = writer.ConsoleStatisticsWriter(functions=functions)
tb = tensorboard.SummaryWriter(os.path.join(log_dir, 'logging-example-torch'))
(train_subjects, valid_subjects) = (['Subject_1', 'Subject_2', 'Subject_3'], ['Subject_4'])
extractor = extr.DataExtractor(categories=(defs.KEY_IMAGES, defs.KEY_LABELS))
indexing_strategy = extr.SliceIndexing()
augmentation_transforms = [augm.RandomElasticDeformation(), augm.RandomMirror()]
transforms = [tfm.Permute(permutation=(2, 0, 1)), tfm.Squeeze(entries=(defs.KEY_LABELS,))]
train_transforms = tfm.ComposeTransform((augmentation_transforms + transforms))
train_dataset = extr.PymiaDatasource(hdf_file, indexing_strategy, extractor, train_transforms, subject_subset=train_subjects)
valid_transforms = tfm.ComposeTransform([tfm.Permute(permutation=(2, 0, 1))])
valid_dataset = extr.PymiaDatasource(hdf_file, indexing_strategy, extractor, valid_transforms, subject_subset=valid_subjects)
direct_extractor = extr.ComposeExtractor([extr.SubjectExtractor(), extr.ImagePropertiesExtractor(), extr.DataExtractor(categories=(defs.KEY_LABELS,))])
assembler = assm.SubjectAssembler(valid_dataset)
pytorch_train_dataset = pymia_torch.PytorchDatasetAdapter(train_dataset)
train_loader = torch_data.dataloader.DataLoader(pytorch_train_dataset, batch_size=16, shuffle=True)
pytorch_valid_dataset = pymia_torch.PytorchDatasetAdapter(valid_dataset)
valid_loader = torch_data.dataloader.DataLoader(pytorch_valid_dataset, batch_size=16, shuffle=False)
u_net = unet.UNetModel(ch_in=2, ch_out=6, n_channels=16, n_pooling=3).to(device)
print(u_net)
optimizer = optim.Adam(u_net.parameters(), lr=0.001)
train_batches = len(train_loader)
epochs = 100
for epoch in range(epochs):
u_net.train()
print(f'Epoch {(epoch + 1)}/{epochs}')
print('training')
for (i, batch) in enumerate(train_loader):
(x, y) = (batch[defs.KEY_IMAGES].to(device), batch[defs.KEY_LABELS].to(device).long())
logits = u_net(x)
optimizer.zero_grad()
loss = F.cross_entropy(logits, y)
loss.backward()
optimizer.step()
tb.add_scalar('train/loss', loss.item(), ((epoch * train_batches) + i))
print(f'[{(i + 1)}/{train_batches}] loss: {loss.item()}')
print('validation')
with torch.no_grad():
u_net.eval()
valid_batches = len(valid_loader)
for (i, batch) in enumerate(valid_loader):
(x, sample_indices) = (batch[defs.KEY_IMAGES].to(device), batch[defs.KEY_SAMPLE_INDEX])
logits = u_net(x)
prediction = logits.argmax(dim=1, keepdim=True)
numpy_prediction = prediction.cpu().numpy().transpose((0, 2, 3, 1))
is_last = (i == (valid_batches - 1))
assembler.add_batch(numpy_prediction, sample_indices.numpy(), is_last)
for subject_index in assembler.subjects_ready:
subject_prediction = assembler.get_assembled_subject(subject_index)
direct_sample = train_dataset.direct_extract(direct_extractor, subject_index)
(target, image_properties) = (direct_sample[defs.KEY_LABELS], direct_sample[defs.KEY_PROPERTIES])
evaluator.evaluate(subject_prediction[(..., 0)], target[(..., 0)], direct_sample[defs.KEY_SUBJECT])
results = statistics_aggregator.calculate(evaluator.results)
for result in results:
tb.add_scalar(f'valid/{result.metric}-{result.id_}', result.value, epoch)
console_writer.write(evaluator.results)
evaluator.clear()
|
class Assembler(abc.ABC):
'Interface for assembling images from batch, which contain parts (chunks) of the images only.'
@abc.abstractmethod
def add_batch(self, to_assemble, sample_indices, last_batch=False, **kwargs):
'Add the batch results to be assembled.\n\n Args:\n to_assemble (object, dict): object or dictionary of objects to be assembled to an image.\n sample_indices (iterable): iterable of all the sample indices in the processed batch\n last_batch (bool): Whether the current batch is the last.\n '
pass
@abc.abstractmethod
def get_assembled_subject(self, subject_index: int):
'\n Args:\n subject_index (int): Index of the assembled subject to be retrieved.\n\n Returns:\n object: The assembled data of the subject (might be multiple arrays).\n '
pass
@property
@abc.abstractmethod
def subjects_ready(self):
'list, set: The indices of the subjects that are finished assembling.'
pass
|
def numpy_zeros(shape: tuple, assembling_key: str, subject_index: int):
return np.zeros(shape)
|
class SubjectAssembler(Assembler):
def __init__(self, datasource: extr.PymiaDatasource, zero_fn=numpy_zeros, assemble_interaction_fn=None):
"Assembles predictions of one or multiple subjects.\n\n Assumes that the network output, i.e. to_assemble, is of shape (B, ..., C)\n where B is the batch size and C is the numbers of channels (must be at least 1) and ... refers to an arbitrary image\n dimension.\n\n Args:\n datasource (.PymiaDatasource): The datasource.\n zero_fn: A function that initializes the numpy array to hold the predictions.\n Args: shape: tuple with the shape of the subject's labels.\n Returns: A np.ndarray\n assemble_interaction_fn (callable, optional): A `callable` that may modify the sample and indexing before adding\n the data to the assembled array. This enables handling special cases. Must follow the\n :code:`.AssembleInteractionFn.__call__` interface. By default neither data nor indexing is modified.\n "
self.datasource = datasource
self.zero_fn = zero_fn
self.assemble_interaction_fn = assemble_interaction_fn
self._subjects_ready = set()
self.predictions = {}
@property
def subjects_ready(self):
'see :meth:`Assembler.subjects_ready`'
return self._subjects_ready.copy()
def add_batch(self, to_assemble: typing.Union[(np.ndarray, typing.Dict[(str, np.ndarray)])], sample_indices: np.ndarray, last_batch=False, **kwargs):
if (not isinstance(to_assemble, dict)):
to_assemble = {'__prediction': to_assemble}
sample_indices = sample_indices.tolist()
for (batch_idx, sample_idx) in enumerate(sample_indices):
self.add_sample(to_assemble, batch_idx, sample_idx)
if last_batch:
self.end()
def end(self):
self._subjects_ready = set(self.predictions.keys())
def add_sample(self, to_assemble, batch_idx, sample_idx):
(subject_index, index_expression) = self.datasource.indices[sample_idx]
if ((subject_index not in self.predictions) and (not self.predictions)):
self.predictions[subject_index] = self._init_new_subject(to_assemble, subject_index)
elif (subject_index not in self.predictions):
self._subjects_ready = set(self.predictions.keys())
self.predictions[subject_index] = self._init_new_subject(to_assemble, subject_index)
for key in to_assemble:
data = to_assemble[key][batch_idx]
if self.assemble_interaction_fn:
(data, index_expression) = self.assemble_interaction_fn(key, data, index_expression)
self.predictions[subject_index][key][index_expression.expression] = data
def _init_new_subject(self, to_assemble, subject_index):
subject_prediction = {}
extractor = extr.ImagePropertyShapeExtractor(numpy_format=True)
subject_shape = self.datasource.direct_extract(extractor, subject_index)[defs.KEY_SHAPE]
for key in to_assemble:
assemble_shape = (subject_shape + (to_assemble[key].shape[(- 1)],))
subject_prediction[key] = self.zero_fn(assemble_shape, key, subject_index)
return subject_prediction
def get_assembled_subject(self, subject_index: int):
'see :meth:`Assembler.get_assembled_subject`'
try:
self._subjects_ready.remove(subject_index)
except KeyError:
if (subject_index not in self.predictions):
raise ValueError('Subject with index {} not in assembler'.format(subject_index))
assembled = self.predictions.pop(subject_index)
if ('__prediction' in assembled):
return assembled['__prediction']
return assembled
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.