code stringlengths 17 6.64M |
|---|
def mask_rcnn_fcn_head_v1up(dim_in, roi_xform_func, spatial_scale):
'v1up design: 2 * (conv 3x3), convT 2x2.'
return mask_rcnn_fcn_head_v1upXconvs(dim_in, roi_xform_func, spatial_scale, 2)
|
class mask_rcnn_fcn_head_v1upXconvs(nn.Module):
'v1upXconvs design: X * (conv 3x3), convT 2x2.'
def __init__(self, dim_in, roi_xform_func, spatial_scale, num_convs):
super().__init__()
self.dim_in = dim_in
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
self.num_convs = num_convs
dilation = cfg.MRCNN.DILATION
dim_inner = cfg.MRCNN.DIM_REDUCED
self.dim_out = dim_inner
module_list = []
for i in range(num_convs):
module_list.extend([nn.Conv2d(dim_in, dim_inner, 3, 1, padding=(1 * dilation), dilation=dilation), nn.ReLU(inplace=True)])
dim_in = dim_inner
self.conv_fcn = nn.Sequential(*module_list)
self.upconv = nn.ConvTranspose2d(dim_inner, dim_inner, 2, 2, 0)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
if (cfg.MRCNN.CONV_INIT == 'GaussianFill'):
init.normal_(m.weight, std=0.001)
elif (cfg.MRCNN.CONV_INIT == 'MSRAFill'):
mynn.init.MSRAFill(m.weight)
else:
raise ValueError
init.constant_(m.bias, 0)
def detectron_weight_mapping(self):
mapping_to_detectron = {}
for i in range(self.num_convs):
mapping_to_detectron.update({('conv_fcn.%d.weight' % (2 * i)): ('_[mask]_fcn%d_w' % (i + 1)), ('conv_fcn.%d.bias' % (2 * i)): ('_[mask]_fcn%d_b' % (i + 1))})
mapping_to_detectron.update({'upconv.weight': 'conv5_mask_w', 'upconv.bias': 'conv5_mask_b'})
return (mapping_to_detectron, [])
def forward(self, x, rpn_ret):
x = self.roi_xform(x, rpn_ret, blob_rois='mask_rois', method=cfg.MRCNN.ROI_XFORM_METHOD, resolution=cfg.MRCNN.ROI_XFORM_RESOLUTION, spatial_scale=self.spatial_scale, sampling_ratio=cfg.MRCNN.ROI_XFORM_SAMPLING_RATIO)
x = self.conv_fcn(x)
return F.relu(self.upconv(x), inplace=True)
|
class mask_rcnn_fcn_head_v1upXconvs_gn(nn.Module):
'v1upXconvs design: X * (conv 3x3), convT 2x2, with GroupNorm'
def __init__(self, dim_in, roi_xform_func, spatial_scale, num_convs):
super().__init__()
self.dim_in = dim_in
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
self.num_convs = num_convs
dilation = cfg.MRCNN.DILATION
dim_inner = cfg.MRCNN.DIM_REDUCED
self.dim_out = dim_inner
module_list = []
for i in range(num_convs):
module_list.extend([nn.Conv2d(dim_in, dim_inner, 3, 1, padding=(1 * dilation), dilation=dilation, bias=False), nn.GroupNorm(net_utils.get_group_gn(dim_inner), dim_inner, eps=cfg.GROUP_NORM.EPSILON), nn.ReLU(inplace=True)])
dim_in = dim_inner
self.conv_fcn = nn.Sequential(*module_list)
self.upconv = nn.ConvTranspose2d(dim_inner, dim_inner, 2, 2, 0)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
if (cfg.MRCNN.CONV_INIT == 'GaussianFill'):
init.normal_(m.weight, std=0.001)
elif (cfg.MRCNN.CONV_INIT == 'MSRAFill'):
mynn.init.MSRAFill(m.weight)
else:
raise ValueError
if (m.bias is not None):
init.constant_(m.bias, 0)
def detectron_weight_mapping(self):
mapping_to_detectron = {}
for i in range(self.num_convs):
mapping_to_detectron.update({('conv_fcn.%d.weight' % (3 * i)): ('_mask_fcn%d_w' % (i + 1)), ('conv_fcn.%d.weight' % ((3 * i) + 1)): ('_mask_fcn%d_gn_s' % (i + 1)), ('conv_fcn.%d.bias' % ((3 * i) + 1)): ('_mask_fcn%d_gn_b' % (i + 1))})
mapping_to_detectron.update({'upconv.weight': 'conv5_mask_w', 'upconv.bias': 'conv5_mask_b'})
return (mapping_to_detectron, [])
def forward(self, x, rpn_ret):
x = self.roi_xform(x, rpn_ret, blob_rois='mask_rois', method=cfg.MRCNN.ROI_XFORM_METHOD, resolution=cfg.MRCNN.ROI_XFORM_RESOLUTION, spatial_scale=self.spatial_scale, sampling_ratio=cfg.MRCNN.ROI_XFORM_SAMPLING_RATIO)
x = self.conv_fcn(x)
return F.relu(self.upconv(x), inplace=True)
|
class mask_rcnn_fcn_head_v0upshare(nn.Module):
'Use a ResNet "conv5" / "stage5" head for mask prediction. Weights and\n computation are shared with the conv5 box head. Computation can only be\n shared during training, since inference is cascaded.\n\n v0upshare design: conv5, convT 2x2.\n '
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.dim_in = dim_in
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
self.dim_out = cfg.MRCNN.DIM_REDUCED
self.SHARE_RES5 = True
assert cfg.MODEL.SHARE_RES5
self.res5 = None
dim_conv5 = 2048
self.upconv5 = nn.ConvTranspose2d(dim_conv5, self.dim_out, 2, 2, 0)
self._init_weights()
def _init_weights(self):
if (cfg.MRCNN.CONV_INIT == 'GaussianFill'):
init.normal_(self.upconv5.weight, std=0.001)
elif (cfg.MRCNN.CONV_INIT == 'MSRAFill'):
mynn.init.MSRAFill(self.upconv5.weight)
init.constant_(self.upconv5.bias, 0)
def share_res5_module(self, res5_target):
' Share res5 block with box head on training '
self.res5 = res5_target
def detectron_weight_mapping(self):
(detectron_weight_mapping, orphan_in_detectron) = ResNet.residual_stage_detectron_mapping(self.res5, 'res5', 3, 5)
for k in detectron_weight_mapping:
detectron_weight_mapping[k] = None
detectron_weight_mapping.update({'upconv5.weight': 'conv5_mask_w', 'upconv5.bias': 'conv5_mask_b'})
return (detectron_weight_mapping, orphan_in_detectron)
def forward(self, x, rpn_ret, roi_has_mask_int32=None):
if self.training:
inds = np.nonzero((roi_has_mask_int32 > 0))[0]
inds = Variable(torch.from_numpy(inds)).cuda(x.get_device())
x = x[inds]
else:
x = self.roi_xform(x, rpn_ret, blob_rois='mask_rois', method=cfg.MRCNN.ROI_XFORM_METHOD, resolution=cfg.MRCNN.ROI_XFORM_RESOLUTION, spatial_scale=self.spatial_scale, sampling_ratio=cfg.MRCNN.ROI_XFORM_SAMPLING_RATIO)
x = self.res5(x)
x = self.upconv5(x)
x = F.relu(x, inplace=True)
return x
|
class mask_rcnn_fcn_head_v0up(nn.Module):
'v0up design: conv5, deconv 2x2 (no weight sharing with the box head).'
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.dim_in = dim_in
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
self.dim_out = cfg.MRCNN.DIM_REDUCED
(self.res5, dim_out) = ResNet_roi_conv5_head_for_masks(dim_in)
self.upconv5 = nn.ConvTranspose2d(dim_out, self.dim_out, 2, 2, 0)
self.res5.apply((lambda m: (ResNet.freeze_params(m) if isinstance(m, mynn.AffineChannel2d) else None)))
self._init_weights()
def _init_weights(self):
if (cfg.MRCNN.CONV_INIT == 'GaussianFill'):
init.normal_(self.upconv5.weight, std=0.001)
elif (cfg.MRCNN.CONV_INIT == 'MSRAFill'):
mynn.init.MSRAFill(self.upconv5.weight)
init.constant_(self.upconv5.bias, 0)
def detectron_weight_mapping(self):
(detectron_weight_mapping, orphan_in_detectron) = ResNet.residual_stage_detectron_mapping(self.res5, 'res5', 3, 5)
detectron_weight_mapping.update({'upconv5.weight': 'conv5_mask_w', 'upconv5.bias': 'conv5_mask_b'})
return (detectron_weight_mapping, orphan_in_detectron)
def forward(self, x, rpn_ret):
x = self.roi_xform(x, rpn_ret, blob_rois='mask_rois', method=cfg.MRCNN.ROI_XFORM_METHOD, resolution=cfg.MRCNN.ROI_XFORM_RESOLUTION, spatial_scale=self.spatial_scale, sampling_ratio=cfg.MRCNN.ROI_XFORM_SAMPLING_RATIO)
x = self.res5(x)
x = self.upconv5(x)
x = F.relu(x, inplace=True)
return x
|
def ResNet_roi_conv5_head_for_masks(dim_in):
'ResNet "conv5" / "stage5" head for predicting masks.'
dilation = cfg.MRCNN.DILATION
stride_init = (cfg.MRCNN.ROI_XFORM_RESOLUTION // 7)
(module, dim_out) = ResNet.add_stage(dim_in, 2048, 512, 3, dilation, stride_init)
return (module, dim_out)
|
def _import_symbols(locals):
for symbol in dir(_lib):
fn = getattr(_lib, symbol)
if callable(fn):
locals[symbol] = _wrap_function(fn, _ffi)
else:
locals[symbol] = fn
__all__.append(symbol)
|
class RoIAlignFunction(Function):
def __init__(self, aligned_height, aligned_width, spatial_scale, sampling_ratio):
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
self.sampling_ratio = int(sampling_ratio)
self.rois = None
self.feature_size = None
def forward(self, features, rois):
self.rois = rois
self.feature_size = features.size()
(batch_size, num_channels, data_height, data_width) = features.size()
num_rois = rois.size(0)
output = features.new(num_rois, num_channels, self.aligned_height, self.aligned_width).zero_()
if features.is_cuda:
roi_align.roi_align_forward_cuda(self.aligned_height, self.aligned_width, self.spatial_scale, self.sampling_ratio, features, rois, output)
else:
raise NotImplementedError
return output
def backward(self, grad_output):
assert ((self.feature_size is not None) and grad_output.is_cuda)
(batch_size, num_channels, data_height, data_width) = self.feature_size
grad_input = self.rois.new(batch_size, num_channels, data_height, data_width).zero_()
roi_align.roi_align_backward_cuda(self.aligned_height, self.aligned_width, self.spatial_scale, self.sampling_ratio, grad_output, self.rois, grad_input)
return (grad_input, None)
|
class RoIAlign(Module):
def __init__(self, aligned_height, aligned_width, spatial_scale, sampling_ratio):
super(RoIAlign, self).__init__()
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
self.sampling_ratio = int(sampling_ratio)
def forward(self, features, rois):
return RoIAlignFunction(self.aligned_height, self.aligned_width, self.spatial_scale, self.sampling_ratio)(features, rois)
|
class RoIAlignAvg(Module):
def __init__(self, aligned_height, aligned_width, spatial_scale, sampling_ratio):
super(RoIAlignAvg, self).__init__()
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
self.sampling_ratio = int(sampling_ratio)
def forward(self, features, rois):
x = RoIAlignFunction((self.aligned_height + 1), (self.aligned_width + 1), self.spatial_scale, self.sampling_ratio)(features, rois)
return avg_pool2d(x, kernel_size=2, stride=1)
|
class RoIAlignMax(Module):
def __init__(self, aligned_height, aligned_width, spatial_scale, sampling_ratio):
super(RoIAlignMax, self).__init__()
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
self.sampling_ratio = int(sampling_ratio)
def forward(self, features, rois):
x = RoIAlignFunction((self.aligned_height + 1), (self.aligned_width + 1), self.spatial_scale, self.sampling_ratio)(features, rois)
return max_pool2d(x, kernel_size=2, stride=1)
|
def group_norm(x, num_groups, weight=None, bias=None, eps=1e-05):
input_shape = x.shape
ndim = len(input_shape)
(N, C) = input_shape[:2]
G = num_groups
assert ((C % G) == 0), 'input channel dimension must divisible by number of groups'
x = x.view(N, G, (- 1))
mean = x.mean((- 1), keepdim=True)
var = x.var((- 1), keepdim=True)
x = ((x - mean) / (var + eps).sqrt())
x = x.view(input_shape)
view_shape = ((1, (- 1)) + ((1,) * (ndim - 2)))
if (weight is not None):
return ((x * weight.view(view_shape)) + bias.view(view_shape))
return x
|
def XavierFill(tensor):
'Caffe2 XavierFill Implementation'
size = reduce(operator.mul, tensor.shape, 1)
fan_in = (size / tensor.shape[0])
scale = math.sqrt((3 / fan_in))
return init.uniform_(tensor, (- scale), scale)
|
def MSRAFill(tensor):
'Caffe2 MSRAFill Implementation'
size = reduce(operator.mul, tensor.shape, 1)
fan_out = (size / tensor.shape[1])
scale = math.sqrt((2 / fan_out))
return init.normal_(tensor, 0, scale)
|
class AffineChannel2d(nn.Module):
' A simple channel-wise affine transformation operation '
def __init__(self, num_features):
super().__init__()
self.num_features = num_features
self.weight = nn.Parameter(torch.Tensor(num_features))
self.bias = nn.Parameter(torch.Tensor(num_features))
self.weight.data.uniform_()
self.bias.data.zero_()
def forward(self, x):
return ((x * self.weight.view(1, self.num_features, 1, 1)) + self.bias.view(1, self.num_features, 1, 1))
|
class GroupNorm(nn.Module):
def __init__(self, num_groups, num_channels, eps=1e-05, affine=True):
super().__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.Tensor(num_channels))
self.bias = nn.Parameter(torch.Tensor(num_channels))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.affine:
self.weight.data.fill_(1)
self.bias.data.zero_()
def forward(self, x):
return myF.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
def extra_repr(self):
return '{num_groups}, {num_channels}, eps={eps}, affine={affine}'.format(**self.__dict__)
|
class Broadcast(Function):
@staticmethod
def forward(ctx, target_gpus, *inputs):
if (not all((input.is_cuda for input in inputs))):
raise TypeError('Broadcast function not implemented for CPU tensors')
ctx.target_gpus = target_gpus
if (len(inputs) == 0):
return tuple()
ctx.num_inputs = len(inputs)
ctx.input_device = inputs[0].get_device()
outputs = comm.broadcast_coalesced(inputs, ctx.target_gpus)
non_differentiables = []
for (idx, input_requires_grad) in enumerate(ctx.needs_input_grad[1:]):
if (not input_requires_grad):
for output in outputs:
non_differentiables.append(output[idx])
ctx.mark_non_differentiable(*non_differentiables)
return tuple([t for tensors in outputs for t in tensors])
@staticmethod
def backward(ctx, *grad_outputs):
return ((None,) + ReduceAddCoalesced.apply(ctx.input_device, ctx.num_inputs, *grad_outputs))
|
class ReduceAddCoalesced(Function):
@staticmethod
def forward(ctx, destination, num_inputs, *grads):
ctx.target_gpus = [grads[i].get_device() for i in range(0, len(grads), num_inputs)]
grads = [grads[i:(i + num_inputs)] for i in range(0, len(grads), num_inputs)]
return comm.reduce_add_coalesced(grads, destination)
@staticmethod
def backward(ctx, *grad_outputs):
return ((None, None) + Broadcast.apply(ctx.target_gpus, *grad_outputs))
|
class Gather(Function):
@staticmethod
def forward(ctx, target_device, dim, *inputs):
assert all(map((lambda i: i.is_cuda), inputs))
ctx.target_device = target_device
ctx.dim = dim
ctx.input_gpus = tuple(map((lambda i: i.get_device()), inputs))
ctx.input_sizes = tuple(map((lambda i: i.size(ctx.dim)), inputs))
return comm.gather(inputs, ctx.dim, ctx.target_device)
@staticmethod
def backward(ctx, grad_output):
return ((None, None) + Scatter.apply(ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output))
|
class Scatter(Function):
@staticmethod
def forward(ctx, target_gpus, chunk_sizes, dim, input):
ctx.target_gpus = target_gpus
ctx.chunk_sizes = chunk_sizes
ctx.dim = dim
ctx.input_device = (input.get_device() if input.is_cuda else (- 1))
streams = None
if (ctx.input_device == (- 1)):
streams = [_get_stream(device) for device in ctx.target_gpus]
outputs = comm.scatter(input, ctx.target_gpus, ctx.chunk_sizes, ctx.dim, streams)
if (streams is not None):
for (i, output) in enumerate(outputs):
with torch.cuda.device(ctx.target_gpus[i]):
main_stream = torch.cuda.current_stream()
main_stream.wait_stream(streams[i])
output.record_stream(main_stream)
return outputs
@staticmethod
def backward(ctx, *grad_output):
return (None, None, None, Gather.apply(ctx.input_device, ctx.dim, *grad_output))
|
def _get_stream(device):
'Gets a background stream for copying between CPU and GPU'
global _streams
if (device == (- 1)):
return None
if (_streams is None):
_streams = ([None] * torch.cuda.device_count())
if (_streams[device] is None):
_streams[device] = torch.cuda.Stream(device)
return _streams[device]
|
class DataParallel(Module):
"Implements data parallelism at the module level.\n\n This container parallelizes the application of the given module by\n splitting the input across the specified devices by chunking in the batch\n dimension. In the forward pass, the module is replicated on each device,\n and each replica handles a portion of the input. During the backwards\n pass, gradients from each replica are summed into the original module.\n\n The batch size should be larger than the number of GPUs used. It should\n also be an integer multiple of the number of GPUs so that each chunk is the\n same size (so that each GPU processes the same number of samples).\n\n See also: :ref:`cuda-nn-dataparallel-instead`\n\n Arbitrary positional and keyword inputs are allowed to be passed into\n DataParallel EXCEPT Tensors. All variables will be scattered on dim\n specified (default 0). Primitive types will be broadcasted, but all\n other types will be a shallow copy and can be corrupted if written to in\n the model's forward pass.\n\n .. warning::\n Forward and backwrad hooks defined on :attr:`module` and its submodules\n won't be invoked anymore, unless the hooks are initialized in the\n :meth:`forward` method.\n\n Args:\n module: module to be parallelized\n device_ids: CUDA devices (default: all devices)\n output_device: device location of output (default: device_ids[0])\n cpu_keywords: list of argument keywords that could be used in `forward` to\n indicating not moving the argument to gpu. Currently, only support\n argument of type: Variable\n\n Example::\n\n >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])\n >>> output = net(input_var)\n "
def __init__(self, module, device_ids=None, output_device=None, dim=0, cpu_keywords=[], minibatch=False, batch_outputs=True):
super(DataParallel, self).__init__()
if (not torch.cuda.is_available()):
self.module = module
self.device_ids = []
return
if (device_ids is None):
device_ids = list(range(torch.cuda.device_count()))
if (output_device is None):
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = device_ids
self.output_device = output_device
if (len(self.device_ids) == 1):
self.module.cuda(device_ids[0])
self.cpu_keywords = cpu_keywords
self.minibatch = minibatch
self.batch_outputs = batch_outputs
def forward(self, *inputs, **kwargs):
if (not self.device_ids):
return self.module(*inputs, **kwargs)
if self.minibatch:
(inputs_list, kwargs_list) = ([], [])
for (i, device_id) in enumerate(self.device_ids):
mini_inputs = [x[i] for x in inputs]
mini_kwargs = dict([(k, v[i]) for (k, v) in kwargs.items()])
(a, b) = self._minibatch_scatter(device_id, *mini_inputs, **mini_kwargs)
inputs_list.append(a)
kwargs_list.append(b)
inputs = inputs_list
kwargs = kwargs_list
else:
kwargs_cpu = {}
for k in kwargs:
if (k in self.cpu_keywords):
v = kwargs[k]
kwargs_cpu[k] = v
for k in self.cpu_keywords:
kwargs.pop(k, None)
(inputs, kwargs) = self.scatter(inputs, kwargs, self.device_ids)
for (k, v) in kwargs_cpu.items():
split_size = (v.size(self.dim) / len(self.device_ids))
assert split_size.is_integer()
kwargs_cpu[k] = list(map(Variable, torch.split(v.data, int(split_size), self.dim)))
kwargs_cpu = list(map(dict, zip(*[[(k, v) for v in vs] for (k, vs) in kwargs_cpu.items()])))
for (d_gpu, d_cpu) in zip(kwargs, kwargs_cpu):
d_gpu.update(d_cpu)
if (len(self.device_ids) == 1):
outputs = [self.module(*inputs[0], **kwargs[0])]
else:
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
if self.batch_outputs:
return self.gather(outputs, self.output_device)
else:
return [self.gather([x], self.output_device) for x in outputs]
def _minibatch_scatter(self, device_id, *inputs, **kwargs):
kwargs_cpu = {}
for k in kwargs:
if (k in self.cpu_keywords):
kwargs_cpu[k] = kwargs[k]
for k in self.cpu_keywords:
kwargs.pop(k, None)
(inputs, kwargs) = self.scatter(inputs, kwargs, [device_id])
kwargs_cpu = [kwargs_cpu]
for (d_gpu, d_cpu) in zip(kwargs, kwargs_cpu):
d_gpu.update(d_cpu)
return (inputs[0], kwargs[0])
def replicate(self, module, device_ids):
return replicate(module, device_ids)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
|
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
'Evaluates module(input) in parallel across the GPUs given in device_ids.\n\n This is the functional version of the DataParallel module.\n\n Args:\n module: the module to evaluate in parallel\n inputs: inputs to the module\n device_ids: GPU ids on which to replicate module\n output_device: GPU location of the output Use -1 to indicate the CPU.\n (default: device_ids[0])\n Returns:\n a Variable containing the result of module(input) located on\n output_device\n '
if (not isinstance(inputs, tuple)):
inputs = (inputs,)
if (device_ids is None):
device_ids = list(range(torch.cuda.device_count()))
if (output_device is None):
output_device = device_ids[0]
(inputs, module_kwargs) = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if (len(device_ids) == 1):
return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[:len(inputs)]
replicas = replicate(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim)
|
def get_a_var(obj):
if isinstance(obj, Variable):
return obj
if (isinstance(obj, list) or isinstance(obj, tuple)):
results = map(get_a_var, obj)
for result in results:
if isinstance(result, Variable):
return result
if isinstance(obj, dict):
results = map(get_a_var, obj.items())
for result in results:
if isinstance(result, Variable):
return result
return None
|
def parallel_apply(modules, inputs, kwargs_tup=None, devices=None):
assert (len(modules) == len(inputs))
if (kwargs_tup is not None):
assert (len(modules) == len(kwargs_tup))
else:
kwargs_tup = (({},) * len(modules))
if (devices is not None):
assert (len(modules) == len(devices))
else:
devices = ([None] * len(modules))
lock = threading.Lock()
results = {}
def _worker(i, module, input, kwargs, results, lock, device=None):
if (device is None):
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
output = module(*input, **kwargs)
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e
if (len(modules) > 1):
threads = [threading.Thread(target=_worker, args=(i, module, input, kwargs, results, lock, device)) for (i, (module, input, kwargs, device)) in enumerate(zip(modules, inputs, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], results, lock, devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs
|
def replicate(network, devices):
from ._functions import Broadcast
devices = tuple(devices)
num_replicas = len(devices)
params = list(network.parameters())
param_indices = {param: idx for (idx, param) in enumerate(params)}
param_copies = Broadcast.apply(devices, *params)
if (len(params) > 0):
param_copies = [param_copies[i:(i + len(params))] for i in range(0, len(param_copies), len(params))]
buffers = list(network.buffers())
buffer_indices = {buf: idx for (idx, buf) in enumerate(buffers)}
buffer_copies = comm.broadcast_coalesced(buffers, devices)
modules = list(network.modules())
module_copies = [[] for device in devices]
module_indices = {}
for (i, module) in enumerate(modules):
module_indices[module] = i
for j in range(num_replicas):
replica = module.__new__(type(module))
replica.__dict__ = module.__dict__.copy()
replica._parameters = replica._parameters.copy()
replica._buffers = replica._buffers.copy()
replica._modules = replica._modules.copy()
module_copies[j].append(replica)
for (i, module) in enumerate(modules):
for (key, child) in module._modules.items():
if (child is None):
for j in range(num_replicas):
replica = module_copies[j][i]
replica._modules[key] = None
else:
module_idx = module_indices[child]
for j in range(num_replicas):
replica = module_copies[j][i]
replica._modules[key] = module_copies[j][module_idx]
for (key, param) in module._parameters.items():
if (param is None):
for j in range(num_replicas):
replica = module_copies[j][i]
replica._parameters[key] = None
else:
param_idx = param_indices[param]
for j in range(num_replicas):
replica = module_copies[j][i]
replica._parameters[key] = param_copies[j][param_idx]
for (key, buf) in module._buffers.items():
if (buf is None):
for j in range(num_replicas):
replica = module_copies[j][i]
replica._buffers[key] = None
else:
buffer_idx = buffer_indices[buf]
for j in range(num_replicas):
replica = module_copies[j][i]
replica._buffers[key] = buffer_copies[j][buffer_idx]
return [module_copies[j][0] for j in range(num_replicas)]
|
def scatter(inputs, target_gpus, dim=0):
'\n Slices variables into approximately equal chunks and\n distributes them across given GPUs. Duplicates\n references to objects that are not variables. Does not\n support Tensors.\n '
def scatter_map(obj):
if isinstance(obj, Variable):
return Scatter.apply(target_gpus, None, dim, obj)
assert (not torch.is_tensor(obj)), 'Tensors not supported in scatter.'
if (isinstance(obj, tuple) and (len(obj) > 0)):
return list(zip(*map(scatter_map, obj)))
if (isinstance(obj, list) and (len(obj) > 0)):
return list(map(list, zip(*map(scatter_map, obj))))
if (isinstance(obj, dict) and (len(obj) > 0)):
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
try:
return scatter_map(inputs)
finally:
scatter_map = None
|
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
'Scatter with support for kwargs dictionary'
inputs = (scatter(inputs, target_gpus, dim) if inputs else [])
kwargs = (scatter(kwargs, target_gpus, dim) if kwargs else [])
if (len(inputs) < len(kwargs)):
inputs.extend([() for _ in range((len(kwargs) - len(inputs)))])
elif (len(kwargs) < len(inputs)):
kwargs.extend([{} for _ in range((len(inputs) - len(kwargs)))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return (inputs, kwargs)
|
def gather(outputs, target_device, dim=0):
'\n Gathers variables from different GPUs on a specified device\n (-1 means the CPU).\n '
error_msg = 'outputs must contain tensors, numbers, dicts or lists; found {}'
def gather_map(outputs):
out = outputs[0]
elem_type = type(out)
if isinstance(out, Variable):
return Gather.apply(target_device, dim, *outputs)
if (out is None):
return None
if isinstance(out, collections.Sequence):
return type(out)(map(gather_map, zip(*outputs)))
elif isinstance(out, collections.Mapping):
return {key: gather_map([d[key] for d in outputs]) for key in out}
elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')):
elem = out
if (elem_type.__name__ == 'ndarray'):
if (re.search('[SaUO]', elem.dtype.str) is not None):
raise TypeError(error_msg.format(elem.dtype))
return Variable(torch.from_numpy(np.concatenate(outputs, dim)))
if (elem.shape == ()):
py_type = (float if elem.dtype.name.startswith('float') else int)
return Variable(numpy_type_map[elem.dtype.name](list(map(py_type, outputs))))
elif isinstance(out, int_classes):
return Variable(torch.LongTensor(outputs))
elif isinstance(out, float):
return Variable(torch.DoubleTensor(outputs))
elif isinstance(out, string_classes):
return outputs
raise TypeError(error_msg.format(elem_type))
try:
return gather_map(outputs)
finally:
gather_map = None
|
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, 'model', 'csrc')
main_file = glob.glob(os.path.join(extensions_dir, '*.cpp'))
source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp'))
source_cuda = glob.glob(os.path.join(extensions_dir, 'cuda', '*.cu'))
sources = (main_file + source_cpu)
extension = CppExtension
extra_compile_args = {'cxx': []}
define_macros = []
if (torch.cuda.is_available() and (CUDA_HOME is not None)):
extension = CUDAExtension
sources += source_cuda
define_macros += [('WITH_CUDA', None)]
extra_compile_args['nvcc'] = ['-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__']
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [extension('model._C', sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args)]
return ext_modules
|
class AttrDict(dict):
IMMUTABLE = '__immutable__'
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__[AttrDict.IMMUTABLE] = False
def __getattr__(self, name):
if (name in self.__dict__):
return self.__dict__[name]
elif (name in self):
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if (not self.__dict__[AttrDict.IMMUTABLE]):
if (name in self.__dict__):
self.__dict__[name] = value
else:
self[name] = value
else:
raise AttributeError('Attempted to set "{}" to "{}", but AttrDict is immutable'.format(name, value))
def immutable(self, is_immutable):
'Set immutability to is_immutable and recursively apply the setting\n to all nested AttrDicts.\n '
self.__dict__[AttrDict.IMMUTABLE] = is_immutable
for v in self.__dict__.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
for v in self.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
def is_immutable(self):
return self.__dict__[AttrDict.IMMUTABLE]
|
class keydefaultdict(defaultdict):
def __missing__(self, key):
if (self.default_factory is None):
raise KeyError(key)
else:
ret = self[key] = self.default_factory(key)
return ret
|
def load_detectron_weight(net, detectron_weight_file):
(name_mapping, orphan_in_detectron) = net.detectron_weight_mapping
with open(detectron_weight_file, 'rb') as fp:
src_blobs = pickle.load(fp, encoding='latin1')
if ('blobs' in src_blobs):
src_blobs = src_blobs['blobs']
params = net.state_dict()
for (p_name, p_tensor) in params.items():
d_name = name_mapping[p_name]
if isinstance(d_name, str):
p_tensor.copy_(torch.Tensor(src_blobs[d_name]))
|
def resnet_weights_name_pattern():
pattern = re.compile('conv1_w|conv1_gn_[sb]|res_conv1_.+|res\\d+_\\d+_.+')
return pattern
|
def get_runtime_dir():
'Retrieve the path to the runtime directory.'
return os.getcwd()
|
def get_py_bin_ext():
'Retrieve python binary extension.'
return '.py'
|
def set_up_matplotlib():
'Set matplotlib up.'
import matplotlib
matplotlib.use('Agg')
|
def exit_on_error():
"Exit from a detectron tool when there's an error."
sys.exit(1)
|
def aspect_ratio_rel(im, aspect_ratio):
'Performs width-relative aspect ratio transformation.'
(im_h, im_w) = im.shape[:2]
im_ar_w = int(round((aspect_ratio * im_w)))
im_ar = cv2.resize(im, dsize=(im_ar_w, im_h))
return im_ar
|
def aspect_ratio_abs(im, aspect_ratio):
'Performs absolute aspect ratio transformation.'
(im_h, im_w) = im.shape[:2]
im_area = (im_h * im_w)
im_ar_w = np.sqrt((im_area * aspect_ratio))
im_ar_h = np.sqrt((im_area / aspect_ratio))
assert np.isclose((im_ar_w / im_ar_h), aspect_ratio)
im_ar = cv2.resize(im, dsize=(int(im_ar_w), int(im_ar_h)))
return im_ar
|
def save_object(obj, file_name):
'Save a Python object by pickling it.'
file_name = os.path.abspath(file_name)
with open(file_name, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
|
def cache_url(url_or_file, cache_dir):
'Download the file specified by the URL to the cache_dir and return the\n path to the cached file. If the argument is not a URL, simply return it as\n is.\n '
is_url = (re.match('^(?:http)s?://', url_or_file, re.IGNORECASE) is not None)
if (not is_url):
return url_or_file
url = url_or_file
assert url.startswith(_DETECTRON_S3_BASE_URL), 'Detectron only automatically caches URLs in the Detectron S3 bucket: {}'.format(_DETECTRON_S3_BASE_URL)
cache_file_path = url.replace(_DETECTRON_S3_BASE_URL, cache_dir)
if os.path.exists(cache_file_path):
assert_cache_file_is_ok(url, cache_file_path)
return cache_file_path
cache_file_dir = os.path.dirname(cache_file_path)
if (not os.path.exists(cache_file_dir)):
os.makedirs(cache_file_dir)
logger.info('Downloading remote file {} to {}'.format(url, cache_file_path))
download_url(url, cache_file_path)
assert_cache_file_is_ok(url, cache_file_path)
return cache_file_path
|
def assert_cache_file_is_ok(url, file_path):
'Check that cache file has the correct hash.'
cache_file_md5sum = _get_file_md5sum(file_path)
ref_md5sum = _get_reference_md5sum(url)
assert (cache_file_md5sum == ref_md5sum), 'Target URL {} appears to be downloaded to the local cache file {}, but the md5 hash of the local file does not match the reference (actual: {} vs. expected: {}). You may wish to delete the cached file and try again to trigger automatic download.'.format(url, file_path, cache_file_md5sum, ref_md5sum)
|
def _progress_bar(count, total):
'Report download progress.\n Credit:\n https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/27871113\n '
bar_len = 60
filled_len = int(round(((bar_len * count) / float(total))))
percents = round(((100.0 * count) / float(total)), 1)
bar = (('=' * filled_len) + ('-' * (bar_len - filled_len)))
sys.stdout.write(' [{}] {}% of {:.1f}MB file \r'.format(bar, percents, ((total / 1024) / 1024)))
sys.stdout.flush()
if (count >= total):
sys.stdout.write('\n')
|
def download_url(url, dst_file_path, chunk_size=8192, progress_hook=_progress_bar):
'Download url and write it to dst_file_path.\n Credit:\n https://stackoverflow.com/questions/2028517/python-urllib2-progress-hook\n '
response = urlopen(url)
total_size = response.info().getheader('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
with open(dst_file_path, 'wb') as f:
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if (not chunk):
break
if progress_hook:
progress_hook(bytes_so_far, total_size)
f.write(chunk)
return bytes_so_far
|
def _get_file_md5sum(file_name):
'Compute the md5 hash of a file.'
hash_obj = hashlib.md5()
with open(file_name, 'r') as f:
hash_obj.update(f.read())
return hash_obj.hexdigest()
|
def _get_reference_md5sum(url):
"By convention the md5 hash for url is stored in url + '.md5sum'."
url_md5sum = (url + '.md5sum')
md5sum = urlopen(url_md5sum).read().strip()
return md5sum
|
def log_json_stats(stats, sort_keys=True):
print('json_stats: {:s}'.format(json.dumps(stats, sort_keys=sort_keys)))
|
def log_stats(stats, misc_args):
'Log training statistics to terminal'
if hasattr(misc_args, 'epoch'):
lines = ('[%s][%s][Epoch %d][Iter %d / %d]\n' % (misc_args.run_name, misc_args.cfg_filename, misc_args.epoch, misc_args.step, misc_args.iters_per_epoch))
else:
lines = ('[%s][%s][Step %d / %d]\n' % (misc_args.run_name, misc_args.cfg_filename, stats['iter'], cfg.SOLVER.MAX_ITER))
lines += ('\t\tloss: %.6f, lr: %.6f time: %.6f, eta: %s\n' % (stats['loss'], stats['lr'], stats['time'], stats['eta']))
if stats['metrics']:
lines += (('\t\t' + ', '.join((('%s: %.6f' % (k, v)) for (k, v) in stats['metrics'].items()))) + '\n')
if stats['head_losses']:
lines += (('\t\t' + ', '.join((('%s: %.6f' % (k, v)) for (k, v) in stats['head_losses'].items()))) + '\n')
if cfg.RPN.RPN_ON:
lines += (('\t\t' + ', '.join((('%s: %.6f' % (k, v)) for (k, v) in stats['rpn_losses'].items()))) + '\n')
if cfg.FPN.FPN_ON:
lines += (('\t\t' + ', '.join((('%s: %.6f' % (k, v)) for (k, v) in stats['rpn_fpn_cls_losses'].items()))) + '\n')
lines += (('\t\t' + ', '.join((('%s: %.6f' % (k, v)) for (k, v) in stats['rpn_fpn_bbox_losses'].items()))) + '\n')
print(lines[:(- 1)])
|
class SmoothedValue(object):
'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n '
def __init__(self, window_size):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def AddValue(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
def GetMedianValue(self):
return np.median(self.deque)
def GetAverageValue(self):
return np.mean(self.deque)
def GetGlobalAverageValue(self):
return (self.total / self.count)
|
def send_email(subject, body, to):
s = smtplib.SMTP('localhost')
mime = MIMEText(body)
mime['Subject'] = subject
mime['To'] = to
s.sendmail('detectron', to, mime.as_string())
|
def setup_logging(name):
FORMAT = '%(levelname)s %(filename)s:%(lineno)4d: %(message)s'
logging.root.handlers = []
logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
logger = logging.getLogger(name)
return logger
|
def get_run_name(args):
' A unique name for each run '
if (len(args.id) > 0):
return args.id
return ((datetime.now().strftime('%b%d-%H-%M-%S') + '_') + socket.gethostname())
|
def get_output_dir(args, run_name):
' Get root output directory for each run '
(cfg_filename, _) = os.path.splitext(os.path.split(args.cfg_file)[1])
return os.path.join(cfg.OUTPUT_DIR, cfg_filename, run_name)
|
def infer_load_ckpt(args):
' Get the latest checkpoint from output_dir '
args.run_name = (get_run_name(args) + '_step')
output_dir = get_output_dir(args, args.run_name)
sfiles = os.path.join(output_dir, 'ckpt', '*.pth')
sfiles = glob.glob(sfiles)
sfiles.sort(key=os.path.getmtime)
if (len(sfiles) > 0):
args.load_ckpt = sfiles[(- 1)]
if hasattr(args, 'resume'):
args.resume = True
|
def is_image_file(filename):
'Checks if a file is an image.\n Args:\n filename (string): path to a file\n Returns:\n bool: True if the filename ends with a known image extension\n '
filename_lower = filename.lower()
return any((filename_lower.endswith(ext) for ext in IMG_EXTENSIONS))
|
def get_imagelist_from_dir(dirpath):
images = []
for f in os.listdir(dirpath):
if is_image_file(f):
images.append(os.path.join(dirpath, f))
return images
|
def ensure_optimizer_ckpt_params_order(param_groups_names, checkpoint):
'Reorder the parameter ids in the SGD optimizer checkpoint to match\n the current order in the program, in case parameter insertion order is changed.\n '
assert (len(param_groups_names) == len(checkpoint['optimizer']['param_groups']))
param_lens = (len(g) for g in param_groups_names)
saved_lens = (len(g['params']) for g in checkpoint['optimizer']['param_groups'])
if any(((p_len != s_len) for (p_len, s_len) in zip(param_lens, saved_lens))):
raise ValueError("loaded state dict contains a parameter group that doesn't match the size of optimizer's group")
name_to_curpos = {}
for (i, p_names) in enumerate(param_groups_names):
for (j, name) in enumerate(p_names):
name_to_curpos[name] = (i, j)
param_groups_inds = [[] for _ in range(len(param_groups_names))]
cnts = ([0] * len(param_groups_names))
for key in checkpoint['model']:
pos = name_to_curpos.get(key)
if pos:
saved_p_id = checkpoint['optimizer']['param_groups'][pos[0]]['params'][cnts[pos[0]]]
assert (checkpoint['model'][key].shape == checkpoint['optimizer']['state'][saved_p_id]['momentum_buffer'].shape), 'param and momentum_buffer shape mismatch in checkpoint. param_name: {}, param_id: {}'.format(key, saved_p_id)
param_groups_inds[pos[0]].append(pos[1])
cnts[pos[0]] += 1
for (cnt, param_inds) in enumerate(param_groups_inds):
ckpt_params = checkpoint['optimizer']['param_groups'][cnt]['params']
assert (len(ckpt_params) == len(param_inds))
ckpt_params = [x for (x, _) in sorted(zip(ckpt_params, param_inds), key=(lambda x: x[1]))]
checkpoint['optimizer']['param_groups'][cnt]['params'] = ckpt_params
|
def load_optimizer_state_dict(optimizer, state_dict):
state_dict = deepcopy(state_dict)
groups = optimizer.param_groups
saved_groups = state_dict['param_groups']
if (len(groups) != len(saved_groups)):
raise ValueError('loaded state dict has a different number of parameter groups')
param_lens = (len(g['params']) for g in groups)
saved_lens = (len(g['params']) for g in saved_groups)
if any(((p_len != s_len) for (p_len, s_len) in zip(param_lens, saved_lens))):
raise ValueError("loaded state dict contains a parameter group that doesn't match the size of optimizer's group")
id_map = {old_id: p for (old_id, p) in zip(chain(*(g['params'] for g in saved_groups)), chain(*(g['params'] for g in groups)))}
def cast(param, value):
'Make a deep copy of value, casting all tensors to device of param.'
if torch.is_tensor(value):
if isinstance(param.data, (torch.FloatTensor, torch.cuda.FloatTensor, torch.DoubleTensor, torch.cuda.DoubleTensor, torch.HalfTensor, torch.cuda.HalfTensor)):
value = value.type_as(param.data)
value = (value.cuda(param.get_device()) if param.is_cuda else value.cpu())
return value
elif isinstance(value, dict):
return {k: cast(param, v) for (k, v) in value.items()}
elif isinstance(value, Iterable):
return type(value)((cast(param, v) for v in value))
else:
return value
state = defaultdict(dict)
for (k, v) in state_dict['state'].items():
if (k in id_map):
param = id_map[k]
state[param] = cast(param, v)
else:
state[k] = v
def update_group(group, new_group):
new_group['params'] = group['params']
return new_group
param_groups = [update_group(g, ng) for (g, ng) in zip(groups, saved_groups)]
optimizer.__setstate__({'state': state, 'param_groups': param_groups})
|
def load_pretrained_imagenet_weights(model):
'Load pretrained weights\n Args:\n num_layers: 50 for res50 and so on.\n model: the generalized rcnnn module\n '
(_, ext) = os.path.splitext(cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS)
if (ext == '.pkl'):
with open(cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS, 'rb') as fp:
src_blobs = pickle.load(fp, encoding='latin1')
if ('blobs' in src_blobs):
src_blobs = src_blobs['blobs']
pretrianed_state_dict = src_blobs
else:
weights_file = os.path.join(cfg.ROOT_DIR, cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS)
pretrianed_state_dict = convert_state_dict(torch.load(weights_file))
for (name, mod) in model.named_modules():
if isinstance(mod, mynn.AffineChannel2d):
if cfg.FPN.FPN_ON:
pretrianed_name = name.split('.', 2)[(- 1)]
else:
pretrianed_name = name.split('.', 1)[(- 1)]
bn_mean = pretrianed_state_dict[(pretrianed_name + '.running_mean')]
bn_var = pretrianed_state_dict[(pretrianed_name + '.running_var')]
scale = pretrianed_state_dict[(pretrianed_name + '.weight')]
bias = pretrianed_state_dict[(pretrianed_name + '.bias')]
std = torch.sqrt((bn_var + 1e-05))
new_scale = (scale / std)
new_bias = (bias - ((bn_mean * scale) / std))
pretrianed_state_dict[(pretrianed_name + '.weight')] = new_scale
pretrianed_state_dict[(pretrianed_name + '.bias')] = new_bias
model_state_dict = model.state_dict()
pattern = dwh.resnet_weights_name_pattern()
(name_mapping, _) = model.detectron_weight_mapping
for (k, v) in name_mapping.items():
if isinstance(v, str):
if pattern.match(v):
if cfg.FPN.FPN_ON:
pretrianed_key = k.split('.', 2)[(- 1)]
else:
pretrianed_key = k.split('.', 1)[(- 1)]
if (ext == '.pkl'):
model_state_dict[k].copy_(torch.Tensor(pretrianed_state_dict[v]))
else:
model_state_dict[k].copy_(pretrianed_state_dict[pretrianed_key])
|
def convert_state_dict(src_dict):
'Return the correct mapping of tensor name and value\n\n Mapping from the names of torchvision model to our resnet conv_body and box_head.\n '
dst_dict = {}
for (k, v) in src_dict.items():
toks = k.split('.')
if k.startswith('layer'):
assert (len(toks[0]) == 6)
res_id = (int(toks[0][5]) + 1)
name = '.'.join(([('res%d' % res_id)] + toks[1:]))
dst_dict[name] = v
elif k.startswith('fc'):
continue
else:
name = '.'.join((['res1'] + toks))
dst_dict[name] = v
return dst_dict
|
def mobilenet_load_pretrained_imagenet_weights(model):
'Load pretrained weights\n Args:\n model: the generalized rcnnn module\n '
(_, ext) = os.path.splitext(cfg.TRAIN.IMAGENET_PRETRAINED_WEIGHTS)
if (ext == '.pkl'):
with open(cfg.TRAIN.IMAGENET_PRETRAINED_WEIGHTS, 'rb') as fp:
src_blobs = pickle.load(fp, encoding='latin1')
if ('blobs' in src_blobs):
src_blobs = src_blobs['blobs']
pretrianed_state_dict = src_blobs
else:
weights_file = os.path.join(cfg.ROOT_DIR, cfg.TRAIN.IMAGENET_PRETRAINED_WEIGHTS)
pretrianed_state_dict = mobilenet_convert_state_dict(torch.load(weights_file))
model.Conv_Body.conv.load_state_dict(pretrianed_state_dict, strict=False)
if hasattr(model, 'Box_Head'):
model.Box_Head.conv.load_state_dict(pretrianed_state_dict, strict=False)
|
def mobilenet_convert_state_dict(src_dict):
'Return the correct mapping of tensor name and value\n\n Mapping from the names of torchvision model to our resnet conv_body and box_head.\n '
dst_dict = {}
for (k, v) in src_dict.items():
if ('features' in k):
dst_dict[k[len('features.'):]] = v
return dst_dict
|
def vgg_load_pretrained_imagenet_weights(model, convert_state_dict):
'Load pretrained weights\n Args:\n model: the generalized rcnnn module\n '
(_, ext) = os.path.splitext(cfg.TRAIN.IMAGENET_PRETRAINED_WEIGHTS)
if (ext == '.pkl'):
with open(cfg.TRAIN.IMAGENET_PRETRAINED_WEIGHTS, 'rb') as fp:
src_blobs = pickle.load(fp, encoding='latin1')
if ('blobs' in src_blobs):
src_blobs = src_blobs['blobs']
pretrianed_state_dict = src_blobs
else:
weights_file = os.path.join(cfg.ROOT_DIR, cfg.TRAIN.IMAGENET_PRETRAINED_WEIGHTS)
pretrianed_state_dict = convert_state_dict(torch.load(weights_file))
model.Conv_Body.load_state_dict(pretrianed_state_dict, strict=False)
if hasattr(model, 'Box_Head'):
model.Box_Head.load_state_dict(pretrianed_state_dict, strict=False)
|
def vgg16_load_pretrained_imagenet_weights(model):
vgg_load_pretrained_imagenet_weights(model, vgg16_convert_state_dict)
|
def vgg16_convert_state_dict(src_dict):
'Return the correct mapping of tensor name and value\n\n Mapping from the names of torchvision model to our resnet conv_body and box_head.\n '
mapping = {'features.0': 'conv1_1', 'features.2': 'conv1_2', 'features.5': 'conv2_1', 'features.7': 'conv2_2', 'features.10': 'conv3_1', 'features.12': 'conv3_2', 'features.14': 'conv3_3', 'features.17': 'conv4_1', 'features.19': 'conv4_2', 'features.21': 'conv4_3', 'features.24': 'conv5_1', 'features.26': 'conv5_2', 'features.28': 'conv5_3', 'classifier.0': 'fc6', 'classifier.3': 'fc7', 'classifier.6': 'fc8'}
dst_dict = {}
for k in mapping.keys():
dst_dict[(mapping[k] + '.weight')] = src_dict[(k + '.weight')]
dst_dict[(mapping[k] + '.bias')] = src_dict[(k + '.bias')]
return dst_dict
|
def vggm_load_pretrained_imagenet_weights(model):
vgg_load_pretrained_imagenet_weights(model, vggm_convert_state_dict)
|
def vggm_convert_state_dict(src_dict):
'Return the correct mapping of tensor name and value\n\n Mapping from the names of torchvision model to our resnet conv_body and box_head.\n '
mapping = {'features.0': 'conv1', 'features.4': 'conv2', 'features.8': 'conv3', 'features.10': 'conv4', 'features.12': 'conv5', 'classifier.0': 'fc6', 'classifier.3': 'fc7', 'classifier.6': 'fc8'}
dst_dict = {}
for k in mapping.keys():
dst_dict[(mapping[k] + '.weight')] = src_dict[(k + '.weight')]
dst_dict[(mapping[k] + '.bias')] = src_dict[(k + '.bias')]
return dst_dict
|
def process_in_parallel(tag, total_range_size, binary, output_dir, load_ckpt, load_detectron, opts=''):
'Run the specified binary NUM_GPUS times in parallel, each time as a\n subprocess that uses one GPU. The binary must accept the command line\n arguments `--range {start} {end}` that specify a data processing range.\n '
cfg_file = os.path.join(output_dir, '{}_range_config.yaml'.format(tag))
with open(cfg_file, 'w') as f:
yaml.dump(cfg, stream=f)
subprocess_env = os.environ.copy()
processes = []
NUM_GPUS = torch.cuda.device_count()
subinds = np.array_split(range(total_range_size), NUM_GPUS)
cuda_visible_devices = os.environ.get('CUDA_VISIBLE_DEVICES')
if cuda_visible_devices:
gpu_inds = list(map(int, cuda_visible_devices.split(',')))
assert ((- 1) not in gpu_inds), "Hiding GPU indices using the '-1' index is not supported"
else:
gpu_inds = range(cfg.NUM_GPUS)
gpu_inds = list(gpu_inds)
for (i, gpu_ind) in enumerate(gpu_inds):
start = subinds[i][0]
end = (subinds[i][(- 1)] + 1)
subprocess_env['CUDA_VISIBLE_DEVICES'] = str(gpu_ind)
cmd = 'python {binary} --range {start} {end} --cfg {cfg_file} --set {opts} --output_dir {output_dir}'
if (load_ckpt is not None):
cmd += ' --load_ckpt {load_ckpt}'
elif (load_detectron is not None):
cmd += ' --load_detectron {load_detectron}'
cmd = cmd.format(binary=shlex_quote(binary), start=int(start), end=int(end), cfg_file=shlex_quote(cfg_file), output_dir=output_dir, load_ckpt=load_ckpt, load_detectron=load_detectron, opts=' '.join([shlex_quote(opt) for opt in opts]))
logger.info('{} range command {}: {}'.format(tag, i, cmd))
if (i == 0):
subprocess_stdout = subprocess.PIPE
else:
filename = os.path.join(output_dir, ('%s_range_%s_%s.stdout' % (tag, start, end)))
subprocess_stdout = open(filename, 'w')
p = subprocess.Popen(cmd, shell=True, env=subprocess_env, stdout=subprocess_stdout, stderr=subprocess.STDOUT, bufsize=1)
processes.append((i, p, start, end, subprocess_stdout))
outputs = []
for (i, p, start, end, subprocess_stdout) in processes:
log_subprocess_output(i, p, output_dir, tag, start, end)
if isinstance(subprocess_stdout, IOBase):
subprocess_stdout.close()
range_file = os.path.join(output_dir, ('%s_range_%s_%s.pkl' % (tag, start, end)))
range_data = pickle.load(open(range_file, 'rb'))
outputs.append(range_data)
return outputs
|
def log_subprocess_output(i, p, output_dir, tag, start, end):
"Capture the output of each subprocess and log it in the parent process.\n The first subprocess's output is logged in realtime. The output from the\n other subprocesses is buffered and then printed all at once (in order) when\n subprocesses finish.\n "
outfile = os.path.join(output_dir, ('%s_range_%s_%s.stdout' % (tag, start, end)))
logger.info((('# ' + ('-' * 76)) + ' #'))
logger.info(('stdout of subprocess %s with range [%s, %s]' % (i, (start + 1), end)))
logger.info((('# ' + ('-' * 76)) + ' #'))
if (i == 0):
with open(outfile, 'w') as f:
for line in iter(p.stdout.readline, b''):
print(line.rstrip().decode('ascii'))
f.write(str(line, encoding='ascii'))
p.stdout.close()
ret = p.wait()
else:
ret = p.wait()
with open(outfile, 'r') as f:
print(''.join(f.readlines()))
assert (ret == 0), 'Range subprocess failed (exit code: {})'.format(ret)
|
class Timer(object):
'A simple timer.'
def __init__(self):
self.reset()
def tic(self):
self.start_time = time.time()
def toc(self, average=True):
self.diff = (time.time() - self.start_time)
self.total_time += self.diff
self.calls += 1
self.average_time = (self.total_time / self.calls)
if average:
return self.average_time
else:
return self.diff
def reset(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
self.average_time = 0.0
|
class TrainingStats(object):
'Track vital training statistics.'
def __init__(self, misc_args, log_period=20, tensorboard_logger=None):
self.misc_args = misc_args
self.LOG_PERIOD = log_period
self.tblogger = tensorboard_logger
self.tb_ignored_keys = ['iter', 'eta']
self.iter_timer = Timer()
self.WIN_SZ = 20
def create_smoothed_value():
return SmoothedValue(self.WIN_SZ)
self.smoothed_losses = defaultdict(create_smoothed_value)
self.smoothed_metrics = defaultdict(create_smoothed_value)
self.smoothed_total_loss = SmoothedValue(self.WIN_SZ)
self.inner_total_loss = []
self.inner_losses = defaultdict(list)
if cfg.FPN.FPN_ON:
self.inner_loss_rpn_cls = []
self.inner_loss_rpn_bbox = []
self.inner_metrics = defaultdict(list)
def IterTic(self):
self.iter_timer.tic()
def IterToc(self):
return self.iter_timer.toc(average=False)
def ResetIterTimer(self):
self.iter_timer.reset()
def UpdateIterStats(self, model_out, inner_iter=None):
'Update tracked iteration statistics.'
if ((inner_iter is not None) and (self.misc_args.iter_size > 1)):
return self._UpdateIterStats_inner(model_out, inner_iter)
total_loss = 0
if cfg.FPN.FPN_ON:
loss_rpn_cls_data = 0
loss_rpn_bbox_data = 0
for (k, loss) in model_out['losses'].items():
assert (loss.shape[0] == cfg.NUM_GPUS)
loss = loss.mean(dim=0, keepdim=True)
total_loss += loss
loss_data = loss.item()
model_out['losses'][k] = loss
if cfg.FPN.FPN_ON:
if k.startswith('loss_rpn_cls_'):
loss_rpn_cls_data += loss_data
elif k.startswith('loss_rpn_bbox_'):
loss_rpn_bbox_data += loss_data
self.smoothed_losses[k].AddValue(loss_data)
model_out['total_loss'] = total_loss
self.smoothed_total_loss.AddValue(total_loss.item())
if cfg.FPN.FPN_ON:
self.smoothed_losses['loss_rpn_cls'].AddValue(loss_rpn_cls_data)
self.smoothed_losses['loss_rpn_bbox'].AddValue(loss_rpn_bbox_data)
for (k, metric) in model_out['metrics'].items():
metric = metric.mean(dim=0, keepdim=True)
self.smoothed_metrics[k].AddValue(metric.item())
def _UpdateIterStats_inner(self, model_out, inner_iter):
'Update tracked iteration statistics for the case of iter_size > 1'
assert (inner_iter < self.misc_args.iter_size)
total_loss = 0
if cfg.FPN.FPN_ON:
loss_rpn_cls_data = 0
loss_rpn_bbox_data = 0
if (inner_iter == 0):
self.inner_total_loss = []
for k in model_out['losses']:
self.inner_losses[k] = []
if cfg.FPN.FPN_ON:
self.inner_loss_rpn_cls = []
self.inner_loss_rpn_bbox = []
for k in model_out['metrics']:
self.inner_metrics[k] = []
for (k, loss) in model_out['losses'].items():
assert (loss.shape[0] == cfg.NUM_GPUS)
loss = loss.mean(dim=0, keepdim=True)
total_loss += loss
loss_data = loss.item()
model_out['losses'][k] = loss
if cfg.FPN.FPN_ON:
if k.startswith('loss_rpn_cls_'):
loss_rpn_cls_data += loss_data
elif k.startswith('loss_rpn_bbox_'):
loss_rpn_bbox_data += loss_data
self.inner_losses[k].append(loss_data)
if (inner_iter == (self.misc_args.iter_size - 1)):
loss_data = self._mean_and_reset_inner_list('inner_losses', k)
self.smoothed_losses[k].AddValue(loss_data)
model_out['total_loss'] = total_loss
total_loss_data = total_loss.item()
self.inner_total_loss.append(total_loss_data)
if cfg.FPN.FPN_ON:
self.inner_loss_rpn_cls.append(loss_rpn_cls_data)
self.inner_loss_rpn_bbox.append(loss_rpn_bbox_data)
if (inner_iter == (self.misc_args.iter_size - 1)):
total_loss_data = self._mean_and_reset_inner_list('inner_total_loss')
self.smoothed_total_loss.AddValue(total_loss_data)
if cfg.FPN.FPN_ON:
loss_rpn_cls_data = self._mean_and_reset_inner_list('inner_loss_rpn_cls')
loss_rpn_bbox_data = self._mean_and_reset_inner_list('inner_loss_rpn_bbox')
self.smoothed_losses['loss_rpn_cls'].AddValue(loss_rpn_cls_data)
self.smoothed_losses['loss_rpn_bbox'].AddValue(loss_rpn_bbox_data)
for (k, metric) in model_out['metrics'].items():
metric = metric.mean(dim=0, keepdim=True)
metric_data = metric.item()
self.inner_metrics[k].append(metric_data)
if (inner_iter == (self.misc_args.iter_size - 1)):
metric_data = self._mean_and_reset_inner_list('inner_metrics', k)
self.smoothed_metrics[k].AddValue(metric_data)
def _mean_and_reset_inner_list(self, attr_name, key=None):
'Take the mean and reset list empty'
if key:
mean_val = (sum(getattr(self, attr_name)[key]) / self.misc_args.iter_size)
getattr(self, attr_name)[key] = []
else:
mean_val = (sum(getattr(self, attr_name)) / self.misc_args.iter_size)
setattr(self, attr_name, [])
return mean_val
def LogIterStats(self, cur_iter, lr):
'Log the tracked statistics.'
if (((cur_iter % self.LOG_PERIOD) == 0) or (cur_iter == (cfg.SOLVER.MAX_ITER - 1))):
stats = self.GetStats(cur_iter, lr)
log_stats(stats, self.misc_args)
if self.tblogger:
self.tb_log_stats(stats, cur_iter)
def tb_log_stats(self, stats, cur_iter):
'Log the tracked statistics to tensorboard'
for k in stats:
if (k not in self.tb_ignored_keys):
v = stats[k]
if isinstance(v, dict):
self.tb_log_stats(v, cur_iter)
else:
self.tblogger.add_scalar(k, v, cur_iter)
def GetStats(self, cur_iter, lr):
eta_seconds = (self.iter_timer.average_time * (cfg.SOLVER.MAX_ITER - cur_iter))
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
stats = OrderedDict(iter=(cur_iter + 1), time=self.iter_timer.average_time, eta=eta, loss=self.smoothed_total_loss.GetMedianValue(), lr=lr)
stats['metrics'] = OrderedDict()
for k in sorted(self.smoothed_metrics):
stats['metrics'][k] = self.smoothed_metrics[k].GetMedianValue()
head_losses = []
rpn_losses = []
rpn_fpn_cls_losses = []
rpn_fpn_bbox_losses = []
for (k, v) in self.smoothed_losses.items():
toks = k.split('_')
if (len(toks) == 2):
head_losses.append((k, v.GetMedianValue()))
elif (len(toks) == 3):
rpn_losses.append((k, v.GetMedianValue()))
elif ((len(toks) == 4) and (toks[2] == 'cls')):
rpn_fpn_cls_losses.append((k, v.GetMedianValue()))
elif ((len(toks) == 4) and (toks[2] == 'bbox')):
rpn_fpn_bbox_losses.append((k, v.GetMedianValue()))
else:
raise ValueError(('Unexpected loss key: %s' % k))
stats['head_losses'] = OrderedDict(head_losses)
stats['rpn_losses'] = OrderedDict(rpn_losses)
stats['rpn_fpn_cls_losses'] = OrderedDict(rpn_fpn_cls_losses)
stats['rpn_fpn_bbox_losses'] = OrderedDict(rpn_fpn_bbox_losses)
return stats
|
def add_path(path):
if (path not in sys.path):
sys.path.insert(0, path)
|
def parse_args():
'Parser command line argumnets'
parser = argparse.ArgumentParser(formatter_class=ColorHelpFormatter)
parser.add_argument('--output_dir', help='Directory to save downloaded weight files', default=os.path.join(cfg.DATA_DIR, 'pretrained_model'))
parser.add_argument('-t', '--targets', nargs='+', metavar='file_name', help=('Files to download. Allowed values are: ' + ', '.join(map((lambda s: ((Fore.YELLOW + s) + Fore.RESET)), list(PRETRAINED_WEIGHTS.keys())))), choices=list(PRETRAINED_WEIGHTS.keys()), default=list(PRETRAINED_WEIGHTS.keys()))
return parser.parse_args()
|
def download_file_from_google_drive(id, destination):
URL = 'https://docs.google.com/uc?export=download'
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
|
def get_confirm_token(response):
for (key, value) in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
|
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, 'wb') as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk:
f.write(chunk)
|
def main():
init()
args = parse_args()
for filename in args.targets:
file_id = PRETRAINED_WEIGHTS[filename]
if (not os.path.exists(args.output_dir)):
os.makedirs(args.output_dir)
destination = os.path.join(args.output_dir, filename)
download_file_from_google_drive(file_id, destination)
print('Download {} to {}'.format(filename, destination))
|
def parse_args():
'Parse in command line arguments'
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--dataset', help='training dataset')
parser.add_argument('--cfg', dest='cfg_file', required=True, help='optional config file')
parser.add_argument('--load_ckpt', help='path of checkpoint to load')
parser.add_argument('--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument('--id', type=str, default='', help='The id you want to use to save the model or autoresume')
parser.add_argument('--output_dir', help='output directory to save the testing results. If not provided, defaults to [args.load_ckpt|args.load_detectron]/../test.')
parser.add_argument('--output_dir_append', help='will be [args.load_ckpt|args.load_detectron]/../test/[output_dir_append].This allows us to save different test setting in different directory.')
parser.add_argument('--set', dest='set_cfgs', help='set config keys, will overwrite config in the cfg_file. See lib/core/config.py for all options', default=[], nargs='*')
parser.add_argument('--range', help='start (inclusive) and end (exclusive) indices', type=int, nargs=2)
parser.add_argument('--multi-gpu-testing', help='using multiple gpus for inference', action='store_true')
parser.add_argument('--vis', dest='vis', help='visualize detections', action='store_true')
return parser.parse_args()
|
def parse_args():
'Parse input arguments'
parser = argparse.ArgumentParser(description='Train a X-RCNN network')
parser.add_argument('--dataset', dest='dataset', required=True, help='Dataset to use')
parser.add_argument('--cfg', dest='cfg_file', required=True, help='Config file for training (and optionally testing)')
parser.add_argument('--set', dest='set_cfgs', help='Set config keys. Key value sequence seperate by whitespace.e.g. [key] [value] [key] [value]', default=[], nargs='+')
parser.add_argument('--disp_interval', help='Display training info every N iterations', default=100, type=int)
parser.add_argument('--no_cuda', dest='cuda', help='Do not use CUDA device', action='store_false')
parser.add_argument('--bs', dest='batch_size', help='Explicitly specify to overwrite the value comed from cfg_file.', type=int)
parser.add_argument('--nw', dest='num_workers', help='Explicitly specify to overwrite number of workers to load data. Defaults to 4', type=int)
parser.add_argument('--o', dest='optimizer', help='Training optimizer.', default=None)
parser.add_argument('--lr', help='Base learning rate.', default=None, type=float)
parser.add_argument('--lr_decay_gamma', help='Learning rate decay rate.', default=None, type=float)
parser.add_argument('--lr_decay_epochs', help='Epochs to decay the learning rate on. Decay happens on the beginning of a epoch. Epoch is 0-indexed.', default=[4, 5], nargs='+', type=int)
parser.add_argument('--start_iter', help='Starting iteration for first training epoch. 0-indexed.', default=0, type=int)
parser.add_argument('--start_epoch', help='Starting epoch count. Epoch is 0-indexed.', default=0, type=int)
parser.add_argument('--epochs', dest='num_epochs', help='Number of epochs to train', default=6, type=int)
parser.add_argument('--resume', help='resume to training on a checkpoint', action='store_true')
parser.add_argument('--no_save', help='do not save anything', action='store_true')
parser.add_argument('--ckpt_num_per_epoch', help='number of checkpoints to save in each epoch. Not include the one at the end of an epoch.', default=3, type=int)
parser.add_argument('--load_ckpt', help='checkpoint path to load')
parser.add_argument('--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument('--use_tfboard', help='Use tensorflow tensorboard to log training info', action='store_true')
return parser.parse_args()
|
def main():
'Main function'
args = parse_args()
print('Called with args:')
print(args)
if (not torch.cuda.is_available()):
sys.exit('Need a CUDA device to run the code.')
if (args.cuda or (cfg.NUM_GPUS > 0)):
cfg.CUDA = True
else:
raise ValueError('Need Cuda device to run !')
if (args.dataset == 'coco2017'):
cfg.TRAIN.DATASETS = ('coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 81
elif (args.dataset == 'keypoints_coco2017'):
cfg.TRAIN.DATASETS = ('keypoints_coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 2
else:
raise ValueError('Unexpected args.dataset: {}'.format(args.dataset))
cfg_from_file(args.cfg_file)
if (args.set_cfgs is not None):
cfg_from_list(args.set_cfgs)
original_batch_size = (cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH)
if (args.batch_size is None):
args.batch_size = original_batch_size
cfg.NUM_GPUS = torch.cuda.device_count()
assert ((args.batch_size % cfg.NUM_GPUS) == 0), ('batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS))
cfg.TRAIN.IMS_PER_BATCH = (args.batch_size // cfg.NUM_GPUS)
print('Batch size change from {} (in config file) to {}'.format(original_batch_size, args.batch_size))
print(('NUM_GPUs: %d, TRAIN.IMS_PER_BATCH: %d' % (cfg.NUM_GPUS, cfg.TRAIN.IMS_PER_BATCH)))
if (args.num_workers is not None):
cfg.DATA_LOADER.NUM_THREADS = args.num_workers
print(('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS))
old_base_lr = cfg.SOLVER.BASE_LR
cfg.SOLVER.BASE_LR *= (args.batch_size / original_batch_size)
print('Adjust BASE_LR linearly according to batch size change: {} --> {}'.format(old_base_lr, cfg.SOLVER.BASE_LR))
if (args.optimizer is not None):
cfg.SOLVER.TYPE = args.optimizer
if (args.lr is not None):
cfg.SOLVER.BASE_LR = args.lr
if (args.lr_decay_gamma is not None):
cfg.SOLVER.GAMMA = args.lr_decay_gamma
timers = defaultdict(Timer)
timers['roidb'].tic()
(roidb, ratio_list, ratio_index) = combined_roidb_for_training(cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)
timers['roidb'].toc()
train_size = len(roidb)
logger.info('{:d} roidb entries'.format(train_size))
logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time)
sampler = MinibatchSampler(ratio_list, ratio_index)
dataset = RoiDataLoader(roidb, cfg.MODEL.NUM_CLASSES, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, sampler=sampler, num_workers=cfg.DATA_LOADER.NUM_THREADS, collate_fn=collate_minibatch)
assert_and_infer_cfg()
maskRCNN = Generalized_RCNN()
if cfg.CUDA:
maskRCNN.cuda()
bias_params = []
nonbias_params = []
for (key, value) in dict(maskRCNN.named_parameters()).items():
if value.requires_grad:
if ('bias' in key):
bias_params.append(value)
else:
nonbias_params.append(value)
params = [{'params': nonbias_params, 'lr': cfg.SOLVER.BASE_LR, 'weight_decay': cfg.SOLVER.WEIGHT_DECAY}, {'params': bias_params, 'lr': (cfg.SOLVER.BASE_LR * (cfg.SOLVER.BIAS_DOUBLE_LR + 1)), 'weight_decay': (cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0)}]
if (cfg.SOLVER.TYPE == 'SGD'):
optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)
elif (cfg.SOLVER.TYPE == 'Adam'):
optimizer = torch.optim.Adam(params)
if args.load_ckpt:
load_name = args.load_ckpt
logging.info('loading checkpoint %s', load_name)
checkpoint = torch.load(load_name, map_location=(lambda storage, loc: storage))
net_utils.load_ckpt(maskRCNN, checkpoint['model'])
if args.resume:
assert (checkpoint['iters_per_epoch'] == (train_size // args.batch_size)), 'iters_per_epoch should match for resume'
misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer'])
if (checkpoint['step'] == (checkpoint['iters_per_epoch'] - 1)):
args.start_epoch = (checkpoint['epoch'] + 1)
args.start_iter = 0
else:
args.start_epoch = checkpoint['epoch']
args.start_iter = (checkpoint['step'] + 1)
del checkpoint
torch.cuda.empty_cache()
if args.load_detectron:
logging.info('loading Detectron weights %s', args.load_detectron)
load_detectron_weight(maskRCNN, args.load_detectron)
lr = optimizer.param_groups[0]['lr']
maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'], minibatch=True)
args.run_name = misc_utils.get_run_name()
output_dir = misc_utils.get_output_dir(args, args.run_name)
args.cfg_filename = os.path.basename(args.cfg_file)
if (not args.no_save):
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
blob = {'cfg': yaml.dump(cfg), 'args': args}
with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:
pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)
if args.use_tfboard:
from tensorboardX import SummaryWriter
tblogger = SummaryWriter(output_dir)
maskRCNN.train()
training_stats = TrainingStats(args, args.disp_interval, (tblogger if (args.use_tfboard and (not args.no_save)) else None))
iters_per_epoch = int((train_size / args.batch_size))
args.iters_per_epoch = iters_per_epoch
ckpt_interval_per_epoch = (iters_per_epoch // args.ckpt_num_per_epoch)
try:
logger.info('Training starts !')
args.step = args.start_iter
global_step = ((iters_per_epoch * args.start_epoch) + args.step)
for args.epoch in range(args.start_epoch, (args.start_epoch + args.num_epochs)):
if (args.lr_decay_epochs and (args.epoch == args.lr_decay_epochs[0]) and (args.start_iter == 0)):
args.lr_decay_epochs.pop(0)
net_utils.decay_learning_rate(optimizer, lr, cfg.SOLVER.GAMMA)
lr *= cfg.SOLVER.GAMMA
for (args.step, input_data) in zip(range(args.start_iter, iters_per_epoch), dataloader):
for key in input_data:
if (key != 'roidb'):
input_data[key] = list(map(Variable, input_data[key]))
training_stats.IterTic()
net_outputs = maskRCNN(**input_data)
training_stats.UpdateIterStats(net_outputs)
loss = net_outputs['total_loss']
optimizer.zero_grad()
loss.backward()
optimizer.step()
training_stats.IterToc()
if (((args.step + 1) % ckpt_interval_per_epoch) == 0):
net_utils.save_ckpt(output_dir, args, maskRCNN, optimizer)
if ((args.step % args.disp_interval) == 0):
log_training_stats(training_stats, global_step, lr)
global_step += 1
net_utils.save_ckpt(output_dir, args, maskRCNN, optimizer)
args.start_iter = 0
if ((iters_per_epoch % args.disp_interval) != 0):
log_training_stats(training_stats, global_step, lr)
except (RuntimeError, KeyboardInterrupt):
logger.info('Save ckpt on exception ...')
net_utils.save_ckpt(output_dir, args, maskRCNN, optimizer)
logger.info('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
finally:
if (args.use_tfboard and (not args.no_save)):
tblogger.close()
|
def log_training_stats(training_stats, global_step, lr):
stats = training_stats.GetStats(global_step, lr)
log_stats(stats, training_stats.misc_args)
if training_stats.tblogger:
training_stats.tb_log_stats(stats, global_step)
|
def parse_args():
'Parse input arguments'
parser = argparse.ArgumentParser(description='Train a X-RCNN network')
parser.add_argument('--dataset', dest='dataset', required=True, help='Dataset to use')
parser.add_argument('--cfg', dest='cfg_file', required=True, help='Config file for training (and optionally testing)')
parser.add_argument('--set', dest='set_cfgs', help='Set config keys. Key value sequence seperate by whitespace.e.g. [key] [value] [key] [value]', default=[], nargs='+')
parser.add_argument('--disp_interval', help='Display training info every N iterations', default=20, type=int)
parser.add_argument('--no_cuda', dest='cuda', help='Do not use CUDA device', action='store_false')
parser.add_argument('--bs', dest='batch_size', help='Explicitly specify to overwrite the value comed from cfg_file.', type=int)
parser.add_argument('--nw', dest='num_workers', help='Explicitly specify to overwrite number of workers to load data. Defaults to 4', type=int)
parser.add_argument('--iter_size', help='Update once every iter_size steps, as in Caffe.', default=1, type=int)
parser.add_argument('--o', dest='optimizer', help='Training optimizer.', default=None)
parser.add_argument('--lr', help='Base learning rate.', default=None, type=float)
parser.add_argument('--lr_decay_gamma', help='Learning rate decay rate.', default=None, type=float)
parser.add_argument('--start_step', help='Starting step count for training epoch. 0-indexed.', default=0, type=int)
parser.add_argument('--id', type=str, default='', help='The id you want to use to save the model or autoresume')
parser.add_argument('--auto_resume', help='auto resume to training on a checkpoint', action='store_true')
parser.add_argument('--resume', help='resume to training on a checkpoint', action='store_true')
parser.add_argument('--no_save', help='do not save anything', action='store_true')
parser.add_argument('--load_ckpt', help='checkpoint path to load')
parser.add_argument('--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument('--use_tfboard', help='Use tensorflow tensorboard to log training info', action='store_true')
return parser.parse_args()
|
def save_ckpt(output_dir, args, step, train_size, model, optimizer, dataiterator=None, final=False):
'Save checkpoint'
if args.no_save:
return
ckpt_dir = os.path.join(output_dir, 'ckpt')
if (not os.path.exists(ckpt_dir)):
os.makedirs(ckpt_dir)
save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step))
if isinstance(model, mynn.DataParallel):
model = model.module
model_state_dict = model.state_dict()
if dataiterator:
if (dataiterator.num_workers > 0):
sampler_state_dict = dataiterator.batch_sampler.sampler.state_dict(((dataiterator.send_idx - dataiterator.rcvd_idx) * args.batch_size))
else:
sampler_state_dict = dataiterator.batch_sampler.sampler.state_dict(0)
else:
sampler_state_dict = None
torch.save({'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'sampler': sampler_state_dict}, save_name)
if final:
save_name = os.path.join(ckpt_dir, 'model_final.pth')
torch.save({'model': model.state_dict()}, save_name)
logger.info('save model: %s', save_name)
if final:
os._exit(0)
|
def main():
'Main function'
args = parse_args()
print('Called with args:')
print(args)
if (not torch.cuda.is_available()):
sys.exit('Need a CUDA device to run the code.')
if (args.cuda or (cfg.NUM_GPUS > 0)):
cfg.CUDA = True
else:
raise ValueError('Need Cuda device to run !')
if (args.dataset == 'coco2017'):
cfg.TRAIN.DATASETS = ('coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 81
elif (args.dataset == 'keypoints_coco2017'):
cfg.TRAIN.DATASETS = ('keypoints_coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 2
elif (args.dataset == 'pascal_voc'):
cfg.TRAIN.DATASETS = ('voc_2007_train', 'voc_2007_val')
cfg.MODEL.NUM_CLASSES = 21
elif (args.dataset == 'pascal_voc_0712'):
cfg.TRAIN.DATASETS = ('voc_2007_train', 'voc_2007_val', 'voc_2012_train', 'voc_2012_val')
cfg.MODEL.NUM_CLASSES = 21
elif args.dataset.startswith('vg'):
cfg.TRAIN.DATASETS = (('%s_train' % args.dataset),)
else:
raise ValueError('Unexpected args.dataset: {}'.format(args.dataset))
cfg_from_file(args.cfg_file)
if (args.set_cfgs is not None):
cfg_from_list(args.set_cfgs)
original_batch_size = (cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH)
original_ims_per_batch = cfg.TRAIN.IMS_PER_BATCH
original_num_gpus = cfg.NUM_GPUS
if (args.batch_size is None):
args.batch_size = original_batch_size
cfg.NUM_GPUS = torch.cuda.device_count()
assert ((args.batch_size % cfg.NUM_GPUS) == 0), ('batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS))
cfg.TRAIN.IMS_PER_BATCH = (args.batch_size // cfg.NUM_GPUS)
effective_batch_size = (args.iter_size * args.batch_size)
print(('effective_batch_size = batch_size * iter_size = %d * %d' % (args.batch_size, args.iter_size)))
print('Adaptive config changes:')
print((' effective_batch_size: %d --> %d' % (original_batch_size, effective_batch_size)))
print((' NUM_GPUS: %d --> %d' % (original_num_gpus, cfg.NUM_GPUS)))
print((' IMS_PER_BATCH: %d --> %d' % (original_ims_per_batch, cfg.TRAIN.IMS_PER_BATCH)))
old_base_lr = cfg.SOLVER.BASE_LR
cfg.SOLVER.BASE_LR *= (args.batch_size / original_batch_size)
print('Adjust BASE_LR linearly according to batch_size change:\n BASE_LR: {} --> {}'.format(old_base_lr, cfg.SOLVER.BASE_LR))
step_scale = (original_batch_size / effective_batch_size)
old_solver_steps = cfg.SOLVER.STEPS
old_max_iter = cfg.SOLVER.MAX_ITER
cfg.SOLVER.STEPS = list(map((lambda x: int(((x * step_scale) + 0.5))), cfg.SOLVER.STEPS))
cfg.SOLVER.MAX_ITER = int(((cfg.SOLVER.MAX_ITER * step_scale) + 0.5))
print('Adjust SOLVER.STEPS and SOLVER.MAX_ITER linearly based on effective_batch_size change:\n SOLVER.STEPS: {} --> {}\n SOLVER.MAX_ITER: {} --> {}'.format(old_solver_steps, cfg.SOLVER.STEPS, old_max_iter, cfg.SOLVER.MAX_ITER))
if (cfg.FPN.FPN_ON and cfg.MODEL.FASTER_RCNN):
cfg.FPN.RPN_COLLECT_SCALE = (cfg.TRAIN.IMS_PER_BATCH / original_ims_per_batch)
print('Scale FPN rpn_proposals collect size directly propotional to the change of IMS_PER_BATCH:\n cfg.FPN.RPN_COLLECT_SCALE: {}'.format(cfg.FPN.RPN_COLLECT_SCALE))
if (args.num_workers is not None):
cfg.DATA_LOADER.NUM_THREADS = args.num_workers
print(('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS))
if (args.optimizer is not None):
cfg.SOLVER.TYPE = args.optimizer
if (args.lr is not None):
cfg.SOLVER.BASE_LR = args.lr
if (args.lr_decay_gamma is not None):
cfg.SOLVER.GAMMA = args.lr_decay_gamma
assert_and_infer_cfg()
timers = defaultdict(Timer)
if args.auto_resume:
misc_utils.infer_load_ckpt(args)
if (args.resume and ('model_final.pth' in args.load_ckpt)):
logging.info('model_final.pth exists; no need to train!')
return
timers['roidb'].tic()
(roidb, ratio_list, ratio_index) = combined_roidb_for_training(cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)
timers['roidb'].toc()
roidb_size = len(roidb)
logger.info('{:d} roidb entries'.format(roidb_size))
logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time)
train_size = ((roidb_size // args.batch_size) * args.batch_size)
batchSampler = BatchSampler(sampler=MinibatchSampler(ratio_list, ratio_index), batch_size=args.batch_size, drop_last=True)
dataset = RoiDataLoader(roidb, cfg.MODEL.NUM_CLASSES, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_sampler=batchSampler, num_workers=cfg.DATA_LOADER.NUM_THREADS, collate_fn=collate_minibatch)
maskRCNN = Generalized_RCNN()
if ('word_embeddings' in dataset._extra_info):
maskRCNN.Box_Outs.set_word_embedding(dataset._extra_info['word_embeddings'])
if cfg.MODEL.IGNORE_CLASSES:
if (cfg.MODEL.IGNORE_CLASSES == 'all'):
dataset._extra_info['all'] = (dataset._extra_info['source'] + dataset._extra_info['target'])
maskRCNN._ignore_classes = dataset._extra_info[cfg.MODEL.IGNORE_CLASSES]
maskRCNN.Box_Outs._ignore_classes = dataset._extra_info[cfg.MODEL.IGNORE_CLASSES]
if (cfg.MODEL.NUM_RELATIONS > 0):
maskRCNN.Rel_Outs.relationship_dict = dataset._extra_info['relationships']
if ((cfg.MODEL.NUM_RELATIONS > 0) and cfg.TRAIN.FIX_BACKBONE):
for (key, value) in maskRCNN.named_parameters():
if ('Rel_Outs' not in key):
value.requires_grad = False
if cfg.TRAIN.FIX_CLASSIFIER:
for param in maskRCNN.Box_Outs.cls_score.parameters():
param.requires_grad = False
if ((cfg.FAST_RCNN.LOSS_TYPE == 'max_margin') and cfg.TRAIN.FIX_BACKBONE):
for (key, value) in maskRCNN.named_parameters():
if ('cls_score.mlp' not in key):
value.requires_grad = False
if cfg.CUDA:
maskRCNN.cuda()
gn_param_nameset = set()
for (name, module) in maskRCNN.named_modules():
if isinstance(module, nn.GroupNorm):
gn_param_nameset.add((name + '.weight'))
gn_param_nameset.add((name + '.bias'))
gn_params = []
gn_param_names = []
bias_params = []
bias_param_names = []
nonbias_params = []
nonbias_param_names = []
nograd_param_names = []
proj_params = []
proj_param_names = []
for (key, value) in maskRCNN.named_parameters():
if value.requires_grad:
if ('bias' in key):
bias_params.append(value)
bias_param_names.append(key)
elif (key in gn_param_nameset):
gn_params.append(value)
gn_param_names.append(key)
elif ((cfg.FAST_RCNN.PROJ_LR_SCALE != 1) and ('cls_score.mlp' in key)):
proj_params.append(value)
proj_param_names.append(key)
else:
nonbias_params.append(value)
nonbias_param_names.append(key)
else:
nograd_param_names.append(key)
assert (((gn_param_nameset - set(nograd_param_names)) - set(bias_param_names)) == set(gn_param_names))
params = [{'params': nonbias_params, 'lr': 0, 'weight_decay': cfg.SOLVER.WEIGHT_DECAY}, {'params': bias_params, 'lr': (0 * (cfg.SOLVER.BIAS_DOUBLE_LR + 1)), 'lr_scale': 'lambda x: x * (cfg.SOLVER.BIAS_DOUBLE_LR + 1)', 'weight_decay': (cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0)}, {'params': gn_params, 'lr': 0, 'weight_decay': cfg.SOLVER.WEIGHT_DECAY_GN}, {'params': proj_params, 'lr': 0, 'lr_scale': 'lambda x: x * cfg.FAST_RCNN.PROJ_LR_SCALE', 'weight_decay': cfg.SOLVER.WEIGHT_DECAY}]
param_names = [nonbias_param_names, bias_param_names, gn_param_names, proj_param_names]
if (cfg.SOLVER.TYPE == 'SGD'):
optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)
elif (cfg.SOLVER.TYPE == 'Adam'):
optimizer = torch.optim.Adam(params)
if args.load_ckpt:
load_name = args.load_ckpt
logging.info('loading checkpoint %s', load_name)
checkpoint = torch.load(load_name, map_location=(lambda storage, loc: storage))
net_utils.load_ckpt(maskRCNN, checkpoint['model'])
if args.resume:
args.start_step = (checkpoint['step'] + 1)
if ('train_size' in checkpoint):
if (checkpoint['train_size'] != train_size):
print(('train_size value: %d different from the one in checkpoint: %d' % (train_size, checkpoint['train_size'])))
misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer'])
batchSampler.sampler.load_state_dict(checkpoint.get('sampler', None))
del checkpoint
torch.cuda.empty_cache()
if args.load_detectron:
logging.info('loading Detectron weights %s', args.load_detectron)
load_detectron_weight(maskRCNN, args.load_detectron)
lr = optimizer.param_groups[0]['lr']
maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'], minibatch=True)
args.run_name = (misc_utils.get_run_name(args) + '_step')
output_dir = misc_utils.get_output_dir(args, args.run_name)
args.cfg_filename = os.path.basename(args.cfg_file)
if (not args.no_save):
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
blob = {'cfg': yaml.dump(cfg), 'args': args}
with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:
pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)
if args.use_tfboard:
from tensorboardX import SummaryWriter
tblogger = SummaryWriter(output_dir)
maskRCNN.train()
dataiterator = iter(dataloader)
CHECKPOINT_PERIOD = int((cfg.TRAIN.SNAPSHOT_ITERS / effective_batch_size))
decay_steps_ind = None
for i in range(1, len(cfg.SOLVER.STEPS)):
if (cfg.SOLVER.STEPS[i] >= args.start_step):
decay_steps_ind = i
break
if (decay_steps_ind is None):
decay_steps_ind = len(cfg.SOLVER.STEPS)
training_stats = TrainingStats(args, args.disp_interval, (tblogger if (args.use_tfboard and (not args.no_save)) else None))
try:
logger.info('Training starts !')
step = args.start_step
for step in range(args.start_step, cfg.SOLVER.MAX_ITER):
if (step < cfg.SOLVER.WARM_UP_ITERS):
method = cfg.SOLVER.WARM_UP_METHOD
if (method == 'constant'):
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR
elif (method == 'linear'):
alpha = (step / cfg.SOLVER.WARM_UP_ITERS)
warmup_factor = ((cfg.SOLVER.WARM_UP_FACTOR * (1 - alpha)) + alpha)
else:
raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method))
lr_new = (cfg.SOLVER.BASE_LR * warmup_factor)
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert (lr == lr_new)
elif (step == cfg.SOLVER.WARM_UP_ITERS):
net_utils.update_learning_rate(optimizer, lr, cfg.SOLVER.BASE_LR)
lr = optimizer.param_groups[0]['lr']
assert (lr == cfg.SOLVER.BASE_LR)
if ((decay_steps_ind < len(cfg.SOLVER.STEPS)) and (step == cfg.SOLVER.STEPS[decay_steps_ind])):
logger.info('Decay the learning on step %d', step)
lr_new = (lr * cfg.SOLVER.GAMMA)
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert (lr == lr_new)
decay_steps_ind += 1
training_stats.IterTic()
optimizer.zero_grad()
for inner_iter in range(args.iter_size):
try:
input_data = next(dataiterator)
except StopIteration:
dataiterator = iter(dataloader)
input_data = next(dataiterator)
for key in input_data:
if (key != 'roidb'):
input_data[key] = list(map(Variable, input_data[key]))
net_outputs = maskRCNN(**input_data)
training_stats.UpdateIterStats(net_outputs, inner_iter)
loss = net_outputs['total_loss']
loss.backward()
optimizer.step()
training_stats.IterToc()
training_stats.LogIterStats(step, lr)
if (((step + 1) % CHECKPOINT_PERIOD) == 0):
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer, dataiterator)
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer, final=True)
print('Checkpoint saved.')
except (RuntimeError, KeyboardInterrupt):
logger.info('Save ckpt on exception ...')
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer, dataiterator)
del dataiterator
logger.info('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
finally:
if (args.use_tfboard and (not args.no_save)):
tblogger.close()
|
class AverageMeter():
def __init__(self, *keys):
self.__data = dict()
for k in keys:
self.__data[k] = [0.0, 0]
def add(self, dict):
for (k, v) in dict.items():
if (k not in self.__data):
self.__data[k] = [0.0, 0]
self.__data[k][0] += v
self.__data[k][1] += 1
def get(self, *keys):
if (len(keys) == 1):
return (self.__data[keys[0]][0] / self.__data[keys[0]][1])
else:
v_list = [(self.__data[k][0] / self.__data[k][1]) for k in keys]
return tuple(v_list)
def pop(self, key=None):
if (key is None):
for k in self.__data.keys():
self.__data[k] = [0.0, 0]
else:
v = self.get(key)
self.__data[key] = [0.0, 0]
return v
|
def crf_inference(img, probs, t=10, scale_factor=1, labels=21):
(h, w) = img.shape[:2]
n_labels = labels
d = dcrf.DenseCRF2D(w, h, n_labels)
unary = unary_from_softmax(probs)
unary = np.ascontiguousarray(unary)
img_c = np.ascontiguousarray(img)
d.setUnaryEnergy(unary)
d.addPairwiseGaussian(sxy=(3 / scale_factor), compat=3)
d.addPairwiseBilateral(sxy=(80 / scale_factor), srgb=13, rgbim=np.copy(img_c), compat=10)
Q = d.inference(t)
return np.array(Q).reshape((n_labels, h, w))
|
def crf_inference_label(img, labels, t=10, n_labels=21, gt_prob=0.7):
(h, w) = img.shape[:2]
d = dcrf.DenseCRF2D(w, h, n_labels)
unary = unary_from_labels(labels, n_labels, gt_prob=gt_prob, zero_unsure=False)
d.setUnaryEnergy(unary)
d.addPairwiseGaussian(sxy=3, compat=3)
d.addPairwiseBilateral(sxy=50, srgb=5, rgbim=np.ascontiguousarray(np.copy(img)), compat=10)
q = d.inference(t)
return np.argmax(np.array(q).reshape((n_labels, h, w)), axis=0)
|
class DenseCRF(object):
def __init__(self, iter_max, pos_w, pos_xy_std, bi_w, bi_xy_std, bi_rgb_std):
self.iter_max = iter_max
self.pos_w = pos_w
self.pos_xy_std = pos_xy_std
self.bi_w = bi_w
self.bi_xy_std = bi_xy_std
self.bi_rgb_std = bi_rgb_std
def __call__(self, image, probmap):
(C, H, W) = probmap.shape
U = utils.unary_from_softmax(probmap)
U = np.ascontiguousarray(U)
image = np.ascontiguousarray(image)
d = dcrf.DenseCRF2D(W, H, C)
d.setUnaryEnergy(U)
d.addPairwiseGaussian(sxy=self.pos_xy_std, compat=self.pos_w)
d.addPairwiseBilateral(sxy=self.bi_xy_std, srgb=self.bi_rgb_std, rgbim=image, compat=self.bi_w)
Q = d.inference(self.iter_max)
Q = np.array(Q).reshape((C, H, W))
return Q
|
def multilabel_score(y_true, y_pred):
return metrics.f1_score(y_true, y_pred)
|
def _fast_hist(label_true, label_pred, num_classes):
mask = ((label_true >= 0) & (label_true < num_classes))
hist = np.bincount(((num_classes * label_true[mask].astype(int)) + label_pred[mask]), minlength=(num_classes ** 2))
return hist.reshape(num_classes, num_classes)
|
def scores(label_trues, label_preds, num_classes=21):
hist = np.zeros((num_classes, num_classes))
for (lt, lp) in zip(label_trues, label_preds):
hist += _fast_hist(lt.flatten(), lp.flatten(), num_classes)
acc = (np.diag(hist).sum() / hist.sum())
acc_cls = (np.diag(hist) / hist.sum(axis=1))
acc_cls = np.nanmean(acc_cls)
iu = (np.diag(hist) / ((hist.sum(axis=1) + hist.sum(axis=0)) - np.diag(hist)))
valid = (hist.sum(axis=1) > 0)
mean_iu = np.nanmean(iu[valid])
freq = (hist.sum(axis=1) / hist.sum())
cls_iu = dict(zip(range(num_classes), iu))
return {'pAcc': acc, 'mAcc': acc_cls, 'miou': mean_iu, 'iou': cls_iu}
|
def pseudo_scores(label_trues, label_preds, num_classes=21):
hist = np.zeros((num_classes, num_classes))
for (lt, lp) in zip(label_trues, label_preds):
lt = lt.flatten()
lp = lp.flatten()
lt[(lp == 255)] = 255
lp[(lp == 255)] = 0
hist += _fast_hist(lt, lp, num_classes)
acc = (np.diag(hist).sum() / hist.sum())
acc_cls = (np.diag(hist) / hist.sum(axis=1))
acc_cls = np.nanmean(acc_cls)
iu = (np.diag(hist) / ((hist.sum(axis=1) + hist.sum(axis=0)) - np.diag(hist)))
valid = (hist.sum(axis=1) > 0)
mean_iu = np.nanmean(iu[valid])
freq = (hist.sum(axis=1) / hist.sum())
cls_iu = dict(zip(range(num_classes), iu))
return {'pAcc': acc, 'mAcc': acc_cls, 'miou': mean_iu, 'iou': cls_iu}
|
class PolyWarmupAdamW(torch.optim.AdamW):
def __init__(self, params, lr, weight_decay, betas, warmup_iter=None, max_iter=None, warmup_ratio=None, power=None):
super().__init__(params, lr=lr, betas=betas, weight_decay=weight_decay, eps=1e-08)
self.global_step = 0
self.warmup_iter = warmup_iter
self.warmup_ratio = warmup_ratio
self.max_iter = max_iter
self.power = power
self.__init_lr = [group['lr'] for group in self.param_groups]
def step(self, closure=None):
if (self.global_step < self.warmup_iter):
lr_mult = (1 - ((1 - (self.global_step / self.warmup_iter)) * (1 - self.warmup_ratio)))
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = (self.__init_lr[i] * lr_mult)
elif (self.global_step < self.max_iter):
lr_mult = ((1 - (self.global_step / self.max_iter)) ** self.power)
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = (self.__init_lr[i] * lr_mult)
super().step(closure)
self.global_step += 1
|
class PolyWarmupSGD(torch.optim.SGD):
def __init__(self, params, lr, weight_decay, betas, warmup_iter=None, max_iter=None, warmup_ratio=None, power=None):
super().__init__(params, lr=lr, momentum=0.9, weight_decay=weight_decay)
self.global_step = 0
self.warmup_iter = warmup_iter
self.warmup_lr = warmup_ratio
self.max_iter = max_iter
self.power = power
self.__init_lr = [group['lr'] for group in self.param_groups]
def step(self, closure=None):
if (self.global_step < self.warmup_iter):
lr_mult = ((1 - (self.global_step / self.warmup_iter)) ** self.power)
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = ((self.__init_lr[i] * lr_mult) * 10)
elif (self.global_step < self.max_iter):
lr_mult = ((1 - ((self.global_step - self.warmup_iter) / (self.max_iter - self.warmup_iter))) ** self.power)
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = (self.__init_lr[i] * lr_mult)
super().step(closure)
self.global_step += 1
|
def get_kernel():
weight = torch.zeros(8, 1, 3, 3)
weight[(0, 0, 0, 0)] = 1
weight[(1, 0, 0, 1)] = 1
weight[(2, 0, 0, 2)] = 1
weight[(3, 0, 1, 0)] = 1
weight[(4, 0, 1, 2)] = 1
weight[(5, 0, 2, 0)] = 1
weight[(6, 0, 2, 1)] = 1
weight[(7, 0, 2, 2)] = 1
return weight
|
class PAR(nn.Module):
def __init__(self, dilations, num_iter):
super().__init__()
self.dilations = dilations
self.num_iter = num_iter
kernel = get_kernel()
self.register_buffer('kernel', kernel)
self.pos = self.get_pos()
self.dim = 2
self.w1 = 0.3
self.w2 = 0.01
def get_dilated_neighbors(self, x):
(b, c, h, w) = x.shape
x_aff = []
for d in self.dilations:
_x_pad = F.pad(x, ([d] * 4), mode='replicate', value=0)
_x_pad = _x_pad.reshape((b * c), (- 1), _x_pad.shape[(- 2)], _x_pad.shape[(- 1)])
_x = F.conv2d(_x_pad, self.kernel, dilation=d).view(b, c, (- 1), h, w)
x_aff.append(_x)
return torch.cat(x_aff, dim=2)
def get_pos(self):
pos_xy = []
ker = torch.ones(1, 1, 8, 1, 1)
ker[(0, 0, 0, 0, 0)] = np.sqrt(2)
ker[(0, 0, 2, 0, 0)] = np.sqrt(2)
ker[(0, 0, 5, 0, 0)] = np.sqrt(2)
ker[(0, 0, 7, 0, 0)] = np.sqrt(2)
for d in self.dilations:
pos_xy.append((ker * d))
return torch.cat(pos_xy, dim=2)
def forward(self, imgs, masks):
masks = F.interpolate(masks, size=imgs.size()[(- 2):], mode='bilinear', align_corners=True)
(b, c, h, w) = imgs.shape
_imgs = self.get_dilated_neighbors(imgs)
_pos = self.pos.to(_imgs.device)
_imgs_rep = imgs.unsqueeze(self.dim).repeat(1, 1, _imgs.shape[self.dim], 1, 1)
_pos_rep = _pos.repeat(b, 1, 1, h, w)
_imgs_abs = torch.abs((_imgs - _imgs_rep))
_imgs_std = torch.std(_imgs, dim=self.dim, keepdim=True)
_pos_std = torch.std(_pos_rep, dim=self.dim, keepdim=True)
aff = (- (((_imgs_abs / (_imgs_std + 1e-08)) / self.w1) ** 2))
aff = aff.mean(dim=1, keepdim=True)
pos_aff = (- (((_pos_rep / (_pos_std + 1e-08)) / self.w1) ** 2))
aff = (F.softmax(aff, dim=2) + (self.w2 * F.softmax(pos_aff, dim=2)))
for _ in range(self.num_iter):
_masks = self.get_dilated_neighbors(masks)
masks = (_masks * aff).sum(2)
return masks
|
def conv3x3(in_planes, out_planes, stride=1, dilation=1, padding=1):
' 3 x 3 conv'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=False)
|
def conv1x1(in_planes, out_planes, stride=1, dilation=1, padding=1):
' 1 x 1 conv'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=padding, dilation=dilation, bias=False)
|
class LargeFOV(nn.Module):
def __init__(self, in_planes, out_planes):
super(LargeFOV, self).__init__()
self.conv6 = conv3x3(in_planes=in_planes, out_planes=in_planes, padding=12, dilation=12)
self.relu6 = nn.ReLU(inplace=True)
self.conv7 = conv3x3(in_planes=in_planes, out_planes=in_planes, padding=12, dilation=12)
self.relu7 = nn.ReLU(inplace=True)
self.conv8 = conv1x1(in_planes=in_planes, out_planes=out_planes, padding=0)
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
return None
def forward(self, x):
x = self.conv6(x)
x = self.relu6(x)
x = self.conv7(x)
x = self.relu7(x)
out = self.conv8(x)
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.