code stringlengths 17 6.64M |
|---|
def _preprocess_subtract_imagenet_mean(inputs):
'Subtract Imagenet mean RGB value.'
mean_rgb = tf.reshape(_MEAN_RGB, [1, 1, 1, 3])
return (inputs - mean_rgb)
|
def _preprocess_zero_mean_unit_range(inputs):
'Map image values from [0, 255] to [-1, 1].'
return (((2.0 / 255.0) * tf.to_float(inputs)) - 1.0)
|
def mean_pixel(model_variant=None):
'Gets mean pixel value.\n\n This function returns different mean pixel value, depending on the input\n model_variant which adopts different preprocessing functions. We currently\n handle the following preprocessing functions:\n (1) _preprocess_subtract_imagenet_mean. We simply return mean pixel value.\n (2) _preprocess_zero_mean_unit_range. We return [127.5, 127.5, 127.5].\n The return values are used in a way that the padded regions after\n pre-processing will contain value 0.\n\n Args:\n model_variant: Model variant (string) for feature extraction. For\n backwards compatibility, model_variant=None returns _MEAN_RGB.\n\n Returns:\n Mean pixel value.\n '
if (model_variant is None):
return _MEAN_RGB
else:
return [127.5, 127.5, 127.5]
|
def extract_features(images, output_stride=8, multi_grid=None, depth_multiplier=1.0, final_endpoint=None, model_variant=None, weight_decay=0.0001, reuse=None, is_training=False, fine_tune_batch_norm=False, regularize_depthwise=False, preprocess_images=True, num_classes=None, global_pool=False):
'Extracts features by the parituclar model_variant.\n\n Args:\n images: A tensor of size [batch, height, width, channels].\n output_stride: The ratio of input to output spatial resolution.\n multi_grid: Employ a hierarchy of different atrous rates within network.\n depth_multiplier: Float multiplier for the depth (number of channels)\n for all convolution ops used in MobileNet.\n final_endpoint: The MobileNet endpoint to construct the network up to.\n model_variant: Model variant for feature extraction.\n weight_decay: The weight decay for model variables.\n reuse: Reuse the model variables or not.\n is_training: Is training or not.\n fine_tune_batch_norm: Fine-tune the batch norm parameters or not.\n regularize_depthwise: Whether or not apply L2-norm regularization on the\n depthwise convolution weights.\n preprocess_images: Performs preprocessing on images or not. Defaults to\n True. Set to False if preprocessing will be done by other functions. We\n supprot two types of preprocessing: (1) Mean pixel substraction and (2)\n Pixel values normalization to be [-1, 1].\n num_classes: Number of classes for image classification task. Defaults\n to None for dense prediction tasks.\n global_pool: Global pooling for image classification task. Defaults to\n False, since dense prediction tasks do not use this.\n\n Returns:\n features: A tensor of size [batch, feature_height, feature_width,\n feature_channels], where feature_height/feature_width are determined\n by the images height/width and output_stride.\n end_points: A dictionary from components of the network to the corresponding\n activation.\n\n Raises:\n ValueError: Unrecognized model variant.\n '
if ('xception' in model_variant):
arg_scope = arg_scopes_map[model_variant](weight_decay=weight_decay, batch_norm_decay=0.9997, batch_norm_epsilon=0.001, batch_norm_scale=True, regularize_depthwise=regularize_depthwise)
(features, end_points) = get_network(model_variant, preprocess_images, arg_scope)(inputs=images, num_classes=num_classes, is_training=(is_training and fine_tune_batch_norm), global_pool=global_pool, output_stride=output_stride, regularize_depthwise=regularize_depthwise, multi_grid=multi_grid, reuse=reuse, scope=name_scope[model_variant])
elif ('mobilenet' in model_variant):
arg_scope = arg_scopes_map[model_variant](is_training=(is_training and fine_tune_batch_norm), weight_decay=weight_decay)
(features, end_points) = get_network(model_variant, preprocess_images, arg_scope)(inputs=images, depth_multiplier=depth_multiplier, output_stride=output_stride, reuse=reuse, scope=name_scope[model_variant], final_endpoint=final_endpoint)
else:
raise ValueError(('Unknown model variant %s.' % model_variant))
return (features, end_points)
|
def get_network(network_name, preprocess_images, arg_scope=None):
'Gets the network.\n\n Args:\n network_name: Network name.\n preprocess_images: Preprocesses the images or not.\n arg_scope: Optional, arg_scope to build the network. If not provided the\n default arg_scope of the network would be used.\n\n Returns:\n A network function that is used to extract features.\n\n Raises:\n ValueError: network is not supported.\n '
if (network_name not in networks_map):
raise ValueError(('Unsupported network %s.' % network_name))
arg_scope = (arg_scope or arg_scopes_map[network_name]())
def _identity_function(inputs):
return inputs
if preprocess_images:
preprocess_function = _PREPROCESS_FN[network_name]
else:
preprocess_function = _identity_function
func = networks_map[network_name]
@functools.wraps(func)
def network_fn(inputs, *args, **kwargs):
with slim.arg_scope(arg_scope):
return func(preprocess_function(inputs), *args, **kwargs)
return network_fn
|
def flip_dim(tensor_list, prob=0.5, dim=1):
"Randomly flips a dimension of the given tensor.\n\n The decision to randomly flip the `Tensors` is made together. In other words,\n all or none of the images pass in are flipped.\n\n Note that tf.random_flip_left_right and tf.random_flip_up_down isn't used so\n that we can control for the probability as well as ensure the same decision\n is applied across the images.\n\n Args:\n tensor_list: A list of `Tensors` with the same number of dimensions.\n prob: The probability of a left-right flip.\n dim: The dimension to flip, 0, 1, ..\n\n Returns:\n outputs: A list of the possibly flipped `Tensors` as well as an indicator\n `Tensor` at the end whose value is `True` if the inputs were flipped and\n `False` otherwise.\n\n Raises:\n ValueError: If dim is negative or greater than the dimension of a `Tensor`.\n "
random_value = tf.random_uniform([])
def flip():
flipped = []
for tensor in tensor_list:
if ((dim < 0) or (dim >= len(tensor.get_shape().as_list()))):
raise ValueError('dim must represent a valid dimension.')
flipped.append(tf.reverse_v2(tensor, [dim]))
return flipped
is_flipped = tf.less_equal(random_value, prob)
outputs = tf.cond(is_flipped, flip, (lambda : tensor_list))
if (not isinstance(outputs, (list, tuple))):
outputs = [outputs]
outputs.append(is_flipped)
return outputs
|
def pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width, pad_value):
'Pads the given image with the given pad_value.\n\n Works like tf.image.pad_to_bounding_box, except it can pad the image\n with any given arbitrary pad value and also handle images whose sizes are not\n known during graph construction.\n\n Args:\n image: 3-D tensor with shape [height, width, channels]\n offset_height: Number of rows of zeros to add on top.\n offset_width: Number of columns of zeros to add on the left.\n target_height: Height of output image.\n target_width: Width of output image.\n pad_value: Value to pad the image tensor with.\n\n Returns:\n 3-D tensor of shape [target_height, target_width, channels].\n\n Raises:\n ValueError: If the shape of image is incompatible with the offset_* or\n target_* arguments.\n '
image_rank = tf.rank(image)
image_rank_assert = tf.Assert(tf.equal(image_rank, 3), ['Wrong image tensor rank [Expected] [Actual]', 3, image_rank])
with tf.control_dependencies([image_rank_assert]):
image -= pad_value
image_shape = tf.shape(image)
(height, width) = (image_shape[0], image_shape[1])
target_width_assert = tf.Assert(tf.greater_equal(target_width, width), ['target_width must be >= width'])
target_height_assert = tf.Assert(tf.greater_equal(target_height, height), ['target_height must be >= height'])
with tf.control_dependencies([target_width_assert]):
after_padding_width = ((target_width - offset_width) - width)
with tf.control_dependencies([target_height_assert]):
after_padding_height = ((target_height - offset_height) - height)
offset_assert = tf.Assert(tf.logical_and(tf.greater_equal(after_padding_width, 0), tf.greater_equal(after_padding_height, 0)), ['target size not possible with the given target offsets'])
height_params = tf.stack([offset_height, after_padding_height])
width_params = tf.stack([offset_width, after_padding_width])
channel_params = tf.stack([0, 0])
with tf.control_dependencies([offset_assert]):
paddings = tf.stack([height_params, width_params, channel_params])
padded = tf.pad(image, paddings)
return (padded + pad_value)
|
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"Crops the given image using the provided offsets and sizes.\n\n Note that the method doesn't assume we know the input image size but it does\n assume we know the input image rank.\n\n Args:\n image: an image of shape [height, width, channels].\n offset_height: a scalar tensor indicating the height offset.\n offset_width: a scalar tensor indicating the width offset.\n crop_height: the height of the cropped image.\n crop_width: the width of the cropped image.\n\n Returns:\n The cropped (and resized) image.\n\n Raises:\n ValueError: if `image` doesn't have rank of 3.\n InvalidArgumentError: if the rank is not 3 or if the image dimensions are\n less than the crop size.\n "
original_shape = tf.shape(image)
if (len(image.get_shape().as_list()) != 3):
raise ValueError('input must have rank of 3')
original_channels = image.get_shape().as_list()[2]
rank_assertion = tf.Assert(tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(tf.logical_and(tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
image = tf.reshape(image, cropped_shape)
image.set_shape([crop_height, crop_width, original_channels])
return image
|
def random_crop(image_list, crop_height, crop_width):
'Crops the given list of images.\n\n The function applies the same crop to each image in the list. This can be\n effectively applied when there are multiple image inputs of the same\n dimension such as:\n\n image, depths, normals = random_crop([image, depths, normals], 120, 150)\n\n Args:\n image_list: a list of image tensors of the same dimension but possibly\n varying channel.\n crop_height: the new height.\n crop_width: the new width.\n\n Returns:\n the image_list with cropped images.\n\n Raises:\n ValueError: if there are multiple image inputs provided with different size\n or the images are smaller than the crop dimensions.\n '
if (not image_list):
raise ValueError('Empty image_list.')
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(tf.equal(image_rank, 3), ['Wrong rank for tensor %s [expected] [actual]', image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
with tf.control_dependencies([rank_assertions[0]]):
image_shape = tf.shape(image_list[0])
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(tf.logical_and(tf.greater_equal(image_height, crop_height), tf.greater_equal(image_width, crop_width)), ['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
with tf.control_dependencies([rank_assertions[i]]):
shape = tf.shape(image)
height = shape[0]
width = shape[1]
height_assert = tf.Assert(tf.equal(height, image_height), ['Wrong height for tensor %s [expected][actual]', image.name, height, image_height])
width_assert = tf.Assert(tf.equal(width, image_width), ['Wrong width for tensor %s [expected][actual]', image.name, width, image_width])
asserts.extend([height_assert, width_assert])
with tf.control_dependencies(asserts):
max_offset_height = tf.reshape(((image_height - crop_height) + 1), [])
max_offset_width = tf.reshape(((image_width - crop_width) + 1), [])
offset_height = tf.random_uniform([], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform([], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width, crop_height, crop_width) for image in image_list]
|
def get_random_scale(min_scale_factor, max_scale_factor, step_size):
'Gets a random scale value.\n\n Args:\n min_scale_factor: Minimum scale value.\n max_scale_factor: Maximum scale value.\n step_size: The step size from minimum to maximum value.\n\n Returns:\n A random scale value selected between minimum and maximum value.\n\n Raises:\n ValueError: min_scale_factor has unexpected value.\n '
if ((min_scale_factor < 0) or (min_scale_factor > max_scale_factor)):
raise ValueError('Unexpected value of min_scale_factor.')
if (min_scale_factor == max_scale_factor):
return tf.to_float(min_scale_factor)
if (step_size == 0):
return tf.random_uniform([1], minval=min_scale_factor, maxval=max_scale_factor)
num_steps = int((((max_scale_factor - min_scale_factor) / step_size) + 1))
scale_factors = tf.lin_space(min_scale_factor, max_scale_factor, num_steps)
shuffled_scale_factors = tf.random_shuffle(scale_factors)
return shuffled_scale_factors[0]
|
def randomly_scale_image_and_label(image, label=None, scale=1.0):
'Randomly scales image and label.\n\n Args:\n image: Image with shape [height, width, 3].\n label: Label with shape [height, width, 1].\n scale: The value to scale image and label.\n\n Returns:\n Scaled image and label.\n '
if (scale == 1.0):
return (image, label)
image_shape = tf.shape(image)
new_dim = tf.to_int32((tf.to_float([image_shape[0], image_shape[1]]) * scale))
image = tf.squeeze(tf.image.resize_bilinear(tf.expand_dims(image, 0), new_dim, align_corners=True), [0])
if (label is not None):
label = tf.squeeze(tf.image.resize_nearest_neighbor(tf.expand_dims(label, 0), new_dim, align_corners=True), [0])
return (image, label)
|
def resolve_shape(tensor, rank=None, scope=None):
'Fully resolves the shape of a Tensor.\n\n Use as much as possible the shape components already known during graph\n creation and resolve the remaining ones during runtime.\n\n Args:\n tensor: Input tensor whose shape we query.\n rank: The rank of the tensor, provided that we know it.\n scope: Optional name scope.\n\n Returns:\n shape: The full shape of the tensor.\n '
with tf.name_scope(scope, 'resolve_shape', [tensor]):
if (rank is not None):
shape = tensor.get_shape().with_rank(rank).as_list()
else:
shape = tensor.get_shape().as_list()
if (None in shape):
shape_dynamic = tf.shape(tensor)
for i in range(len(shape)):
if (shape[i] is None):
shape[i] = shape_dynamic[i]
return shape
|
def resize_to_range(image, label=None, min_size=None, max_size=None, factor=None, align_corners=True, label_layout_is_chw=False, scope=None, method=tf.image.ResizeMethod.BILINEAR):
'Resizes image or label so their sides are within the provided range.\n\n The output size can be described by two cases:\n 1. If the image can be rescaled so its minimum size is equal to min_size\n without the other side exceeding max_size, then do so.\n 2. Otherwise, resize so the largest side is equal to max_size.\n\n An integer in `range(factor)` is added to the computed sides so that the\n final dimensions are multiples of `factor` plus one.\n\n Args:\n image: A 3D tensor of shape [height, width, channels].\n label: (optional) A 3D tensor of shape [height, width, channels] (default)\n or [channels, height, width] when label_layout_is_chw = True.\n min_size: (scalar) desired size of the smaller image side.\n max_size: (scalar) maximum allowed size of the larger image side. Note\n that the output dimension is no larger than max_size and may be slightly\n smaller than min_size when factor is not None.\n factor: Make output size multiple of factor plus one.\n align_corners: If True, exactly align all 4 corners of input and output.\n label_layout_is_chw: If true, the label has shape [channel, height, width].\n We support this case because for some instance segmentation dataset, the\n instance segmentation is saved as [num_instances, height, width].\n scope: Optional name scope.\n method: Image resize method. Defaults to tf.image.ResizeMethod.BILINEAR.\n\n Returns:\n A 3-D tensor of shape [new_height, new_width, channels], where the image\n has been resized (with the specified method) so that\n min(new_height, new_width) == ceil(min_size) or\n max(new_height, new_width) == ceil(max_size).\n\n Raises:\n ValueError: If the image is not a 3D tensor.\n '
with tf.name_scope(scope, 'resize_to_range', [image]):
new_tensor_list = []
min_size = tf.to_float(min_size)
if (max_size is not None):
max_size = tf.to_float(max_size)
if (factor is not None):
max_size = ((max_size + ((factor - ((max_size - 1) % factor)) % factor)) - factor)
[orig_height, orig_width, _] = resolve_shape(image, rank=3)
orig_height = tf.to_float(orig_height)
orig_width = tf.to_float(orig_width)
orig_min_size = tf.minimum(orig_height, orig_width)
large_scale_factor = (min_size / orig_min_size)
large_height = tf.to_int32(tf.ceil((orig_height * large_scale_factor)))
large_width = tf.to_int32(tf.ceil((orig_width * large_scale_factor)))
large_size = tf.stack([large_height, large_width])
new_size = large_size
if (max_size is not None):
orig_max_size = tf.maximum(orig_height, orig_width)
small_scale_factor = (max_size / orig_max_size)
small_height = tf.to_int32(tf.ceil((orig_height * small_scale_factor)))
small_width = tf.to_int32(tf.ceil((orig_width * small_scale_factor)))
small_size = tf.stack([small_height, small_width])
new_size = tf.cond((tf.to_float(tf.reduce_max(large_size)) > max_size), (lambda : small_size), (lambda : large_size))
if (factor is not None):
new_size += ((factor - ((new_size - 1) % factor)) % factor)
new_tensor_list.append(tf.image.resize_images(image, new_size, method=method, align_corners=align_corners))
if (label is not None):
if label_layout_is_chw:
resized_label = tf.expand_dims(label, 3)
resized_label = tf.image.resize_nearest_neighbor(resized_label, new_size, align_corners=align_corners)
resized_label = tf.squeeze(resized_label, 3)
else:
resized_label = tf.image.resize_images(label, new_size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, align_corners=align_corners)
new_tensor_list.append(resized_label)
else:
new_tensor_list.append(None)
return new_tensor_list
|
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
'A named tuple describing an Xception block.\n\n Its parts are:\n scope: The scope of the block.\n unit_fn: The Xception unit function which takes as input a tensor and\n returns another tensor with the output of the Xception unit.\n args: A list of length equal to the number of units in the block. The list\n contains one dictionary for each unit in the block to serve as argument to\n unit_fn.\n '
|
def fixed_padding(inputs, kernel_size, rate=1):
'Pads the input along the spatial dimensions independently of input size.\n\n Args:\n inputs: A tensor of size [batch, height_in, width_in, channels].\n kernel_size: The kernel to be used in the conv2d or max_pool2d operation.\n Should be a positive integer.\n rate: An integer, rate for atrous convolution.\n\n Returns:\n output: A tensor of size [batch, height_out, width_out, channels] with the\n input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).\n '
kernel_size_effective = (kernel_size + ((kernel_size - 1) * (rate - 1)))
pad_total = (kernel_size_effective - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs
|
@slim.add_arg_scope
def separable_conv2d_same(inputs, num_outputs, kernel_size, depth_multiplier, stride, rate=1, use_explicit_padding=True, regularize_depthwise=False, scope=None, **kwargs):
"Strided 2-D separable convolution with 'SAME' padding.\n\n If stride > 1 and use_explicit_padding is True, then we do explicit zero-\n padding, followed by conv2d with 'VALID' padding.\n\n Note that\n\n net = separable_conv2d_same(inputs, num_outputs, 3,\n depth_multiplier=1, stride=stride)\n\n is equivalent to\n\n net = slim.separable_conv2d(inputs, num_outputs, 3,\n depth_multiplier=1, stride=1, padding='SAME')\n net = resnet_utils.subsample(net, factor=stride)\n\n whereas\n\n net = slim.separable_conv2d(inputs, num_outputs, 3, stride=stride,\n depth_multiplier=1, padding='SAME')\n\n is different when the input's height or width is even, which is why we add the\n current function.\n\n Consequently, if the input feature map has even height or width, setting\n `use_explicit_padding=False` will result in feature misalignment by one pixel\n along the corresponding dimension.\n\n Args:\n inputs: A 4-D tensor of size [batch, height_in, width_in, channels].\n num_outputs: An integer, the number of output filters.\n kernel_size: An int with the kernel_size of the filters.\n depth_multiplier: The number of depthwise convolution output channels for\n each input channel. The total number of depthwise convolution output\n channels will be equal to `num_filters_in * depth_multiplier`.\n stride: An integer, the output stride.\n rate: An integer, rate for atrous convolution.\n use_explicit_padding: If True, use explicit padding to make the model fully\n compatible with the open source version, otherwise use the native\n Tensorflow 'SAME' padding.\n regularize_depthwise: Whether or not apply L2-norm regularization on the\n depthwise convolution weights.\n scope: Scope.\n **kwargs: additional keyword arguments to pass to slim.conv2d\n\n Returns:\n output: A 4-D tensor of size [batch, height_out, width_out, channels] with\n the convolution output.\n "
def _separable_conv2d(padding):
'Wrapper for separable conv2d.'
return slim.separable_conv2d(inputs, num_outputs, kernel_size, depth_multiplier=depth_multiplier, stride=stride, rate=rate, padding=padding, scope=scope, **kwargs)
def _split_separable_conv2d(padding):
'Splits separable conv2d into depthwise and pointwise conv2d.'
outputs = slim.separable_conv2d(inputs, None, kernel_size, depth_multiplier=depth_multiplier, stride=stride, rate=rate, padding=padding, scope=(scope + '_depthwise'), **kwargs)
return slim.conv2d(outputs, num_outputs, 1, scope=(scope + '_pointwise'), **kwargs)
if ((stride == 1) or (not use_explicit_padding)):
if regularize_depthwise:
outputs = _separable_conv2d(padding='SAME')
else:
outputs = _split_separable_conv2d(padding='SAME')
else:
inputs = fixed_padding(inputs, kernel_size, rate)
if regularize_depthwise:
outputs = _separable_conv2d(padding='VALID')
else:
outputs = _split_separable_conv2d(padding='VALID')
return outputs
|
@slim.add_arg_scope
def xception_module(inputs, depth_list, skip_connection_type, stride, unit_rate_list=None, rate=1, activation_fn_in_separable_conv=False, regularize_depthwise=False, outputs_collections=None, scope=None):
"An Xception module.\n\n The output of one Xception module is equal to the sum of `residual` and\n `shortcut`, where `residual` is the feature computed by three separable\n convolution. The `shortcut` is the feature computed by 1x1 convolution with\n or without striding. In some cases, the `shortcut` path could be a simple\n identity function or none (i.e, no shortcut).\n\n Note that we replace the max pooling operations in the Xception module with\n another separable convolution with striding, since atrous rate is not properly\n supported in current TensorFlow max pooling implementation.\n\n Args:\n inputs: A tensor of size [batch, height, width, channels].\n depth_list: A list of three integers specifying the depth values of one\n Xception module.\n skip_connection_type: Skip connection type for the residual path. Only\n supports 'conv', 'sum', or 'none'.\n stride: The block unit's stride. Determines the amount of downsampling of\n the units output compared to its input.\n unit_rate_list: A list of three integers, determining the unit rate for\n each separable convolution in the xception module.\n rate: An integer, rate for atrous convolution.\n activation_fn_in_separable_conv: Includes activation function in the\n separable convolution or not.\n regularize_depthwise: Whether or not apply L2-norm regularization on the\n depthwise convolution weights.\n outputs_collections: Collection to add the Xception unit output.\n scope: Optional variable_scope.\n\n Returns:\n The Xception module's output.\n\n Raises:\n ValueError: If depth_list and unit_rate_list do not contain three elements,\n or if stride != 1 for the third separable convolution operation in the\n residual path, or unsupported skip connection type.\n "
if (len(depth_list) != 3):
raise ValueError('Expect three elements in depth_list.')
if unit_rate_list:
if (len(unit_rate_list) != 3):
raise ValueError('Expect three elements in unit_rate_list.')
with tf.variable_scope(scope, 'xception_module', [inputs]) as sc:
residual = inputs
def _separable_conv(features, depth, kernel_size, depth_multiplier, regularize_depthwise, rate, stride, scope):
if activation_fn_in_separable_conv:
activation_fn = tf.nn.relu
else:
activation_fn = None
features = tf.nn.relu(features)
return separable_conv2d_same(features, depth, kernel_size, depth_multiplier=depth_multiplier, stride=stride, rate=rate, activation_fn=activation_fn, regularize_depthwise=regularize_depthwise, scope=scope)
for i in range(3):
residual = _separable_conv(residual, depth_list[i], kernel_size=3, depth_multiplier=1, regularize_depthwise=regularize_depthwise, rate=(rate * unit_rate_list[i]), stride=(stride if (i == 2) else 1), scope=('separable_conv' + str((i + 1))))
if (skip_connection_type == 'conv'):
shortcut = slim.conv2d(inputs, depth_list[(- 1)], [1, 1], stride=stride, activation_fn=None, scope='shortcut')
outputs = (residual + shortcut)
elif (skip_connection_type == 'sum'):
outputs = (residual + inputs)
elif (skip_connection_type == 'none'):
outputs = residual
else:
raise ValueError('Unsupported skip connection type.')
return slim.utils.collect_named_outputs(outputs_collections, sc.name, outputs)
|
@slim.add_arg_scope
def stack_blocks_dense(net, blocks, output_stride=None, outputs_collections=None):
"Stacks Xception blocks and controls output feature density.\n\n First, this function creates scopes for the Xception in the form of\n 'block_name/unit_1', 'block_name/unit_2', etc.\n\n Second, this function allows the user to explicitly control the output\n stride, which is the ratio of the input to output spatial resolution. This\n is useful for dense prediction tasks such as semantic segmentation or\n object detection.\n\n Control of the output feature density is implemented by atrous convolution.\n\n Args:\n net: A tensor of size [batch, height, width, channels].\n blocks: A list of length equal to the number of Xception blocks. Each\n element is an Xception Block object describing the units in the block.\n output_stride: If None, then the output will be computed at the nominal\n network stride. If output_stride is not None, it specifies the requested\n ratio of input to output spatial resolution, which needs to be equal to\n the product of unit strides from the start up to some level of Xception.\n For example, if the Xception employs units with strides 1, 2, 1, 3, 4, 1,\n then valid values for the output_stride are 1, 2, 6, 24 or None (which\n is equivalent to output_stride=24).\n outputs_collections: Collection to add the Xception block outputs.\n\n Returns:\n net: Output tensor with stride equal to the specified output_stride.\n\n Raises:\n ValueError: If the target output_stride is not valid.\n "
current_stride = 1
rate = 1
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
for (i, unit) in enumerate(block.args):
if ((output_stride is not None) and (current_stride > output_stride)):
raise ValueError('The target output_stride cannot be reached.')
with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]):
if ((output_stride is not None) and (current_stride == output_stride)):
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
if ((output_stride is not None) and (current_stride != output_stride)):
raise ValueError('The target output_stride cannot be reached.')
return net
|
def xception(inputs, blocks, num_classes=None, is_training=True, global_pool=True, keep_prob=0.5, output_stride=None, reuse=None, scope=None):
"Generator for Xception models.\n\n This function generates a family of Xception models. See the xception_*()\n methods for specific model instantiations, obtained by selecting different\n block instantiations that produce Xception of various depths.\n\n Args:\n inputs: A tensor of size [batch, height_in, width_in, channels]. Must be\n floating point. If a pretrained checkpoint is used, pixel values should be\n the same as during training (see go/slim-classification-models for\n specifics).\n blocks: A list of length equal to the number of Xception blocks. Each\n element is an Xception Block object describing the units in the block.\n num_classes: Number of predicted classes for classification tasks.\n If 0 or None, we return the features before the logit layer.\n is_training: whether batch_norm layers are in training mode.\n global_pool: If True, we perform global average pooling before computing the\n logits. Set to True for image classification, False for dense prediction.\n keep_prob: Keep probability used in the pre-logits dropout layer.\n output_stride: If None, then the output will be computed at the nominal\n network stride. If output_stride is not None, it specifies the requested\n ratio of input to output spatial resolution.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n\n Returns:\n net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].\n If global_pool is False, then height_out and width_out are reduced by a\n factor of output_stride compared to the respective height_in and width_in,\n else both height_out and width_out equal one. If num_classes is 0 or None,\n then net is the output of the last Xception block, potentially after\n global average pooling. If num_classes is a non-zero integer, net contains\n the pre-softmax activations.\n end_points: A dictionary from components of the network to the corresponding\n activation.\n\n Raises:\n ValueError: If the target output_stride is not valid.\n "
with tf.variable_scope(scope, 'xception', [inputs], reuse=reuse) as sc:
end_points_collection = (sc.original_name_scope + 'end_points')
with slim.arg_scope([slim.conv2d, slim.separable_conv2d, xception_module, stack_blocks_dense], outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if (output_stride is not None):
if ((output_stride % 2) != 0):
raise ValueError('The output_stride needs to be a multiple of 2.')
output_stride /= 2
net = resnet_utils.conv2d_same(net, 32, 3, stride=2, scope='entry_flow/conv1_1')
net = resnet_utils.conv2d_same(net, 64, 3, stride=1, scope='entry_flow/conv1_2')
net = stack_blocks_dense(net, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict(end_points_collection, clear_collection=True)
if global_pool:
net = tf.reduce_mean(net, [1, 2], name='global_pool', keepdims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='prelogits_dropout')
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits')
end_points[(sc.name + '/logits')] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return (net, end_points)
|
def xception_block(scope, depth_list, skip_connection_type, activation_fn_in_separable_conv, regularize_depthwise, num_units, stride, unit_rate_list=None):
"Helper function for creating a Xception block.\n\n Args:\n scope: The scope of the block.\n depth_list: The depth of the bottleneck layer for each unit.\n skip_connection_type: Skip connection type for the residual path. Only\n supports 'conv', 'sum', or 'none'.\n activation_fn_in_separable_conv: Includes activation function in the\n separable convolution or not.\n regularize_depthwise: Whether or not apply L2-norm regularization on the\n depthwise convolution weights.\n num_units: The number of units in the block.\n stride: The stride of the block, implemented as a stride in the last unit.\n All other units have stride=1.\n unit_rate_list: A list of three integers, determining the unit rate in the\n corresponding xception block.\n\n Returns:\n An Xception block.\n "
if (unit_rate_list is None):
unit_rate_list = _DEFAULT_MULTI_GRID
return Block(scope, xception_module, ([{'depth_list': depth_list, 'skip_connection_type': skip_connection_type, 'activation_fn_in_separable_conv': activation_fn_in_separable_conv, 'regularize_depthwise': regularize_depthwise, 'stride': stride, 'unit_rate_list': unit_rate_list}] * num_units))
|
def xception_65(inputs, num_classes=None, is_training=True, global_pool=True, keep_prob=0.5, output_stride=None, regularize_depthwise=False, multi_grid=None, reuse=None, scope='xception_65'):
'Xception-65 model.'
blocks = [xception_block('entry_flow/block1', depth_list=[128, 128, 128], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), xception_block('entry_flow/block2', depth_list=[256, 256, 256], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), xception_block('entry_flow/block3', depth_list=[728, 728, 728], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), xception_block('middle_flow/block1', depth_list=[728, 728, 728], skip_connection_type='sum', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=16, stride=1), xception_block('exit_flow/block1', depth_list=[728, 1024, 1024], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), xception_block('exit_flow/block2', depth_list=[1536, 1536, 2048], skip_connection_type='none', activation_fn_in_separable_conv=True, regularize_depthwise=regularize_depthwise, num_units=1, stride=1, unit_rate_list=multi_grid)]
return xception(inputs, blocks=blocks, num_classes=num_classes, is_training=is_training, global_pool=global_pool, keep_prob=keep_prob, output_stride=output_stride, reuse=reuse, scope=scope)
|
def xception_arg_scope(weight_decay=4e-05, batch_norm_decay=0.9997, batch_norm_epsilon=0.001, batch_norm_scale=True, weights_initializer_stddev=0.09, activation_fn=tf.nn.relu, regularize_depthwise=False, use_batch_norm=True):
'Defines the default Xception arg scope.\n\n Args:\n weight_decay: The weight decay to use for regularizing the model.\n batch_norm_decay: The moving average decay when estimating layer activation\n statistics in batch normalization.\n batch_norm_epsilon: Small constant to prevent division by zero when\n normalizing activations by their variance in batch normalization.\n batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the\n activations in the batch normalization layer.\n weights_initializer_stddev: The standard deviation of the trunctated normal\n weight initializer.\n activation_fn: The activation function in Xception.\n regularize_depthwise: Whether or not apply L2-norm regularization on the\n depthwise convolution weights.\n use_batch_norm: Whether or not to use batch normalization.\n\n Returns:\n An `arg_scope` to use for the Xception models.\n '
batch_norm_params = {'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale}
if regularize_depthwise:
depthwise_regularizer = slim.l2_regularizer(weight_decay)
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], weights_initializer=tf.truncated_normal_initializer(stddev=weights_initializer_stddev), activation_fn=activation_fn, normalizer_fn=(slim.batch_norm if use_batch_norm else None)):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.separable_conv2d], weights_regularizer=depthwise_regularizer) as arg_sc:
return arg_sc
|
def get_fn_with_worst_iou(seq):
result_fn = None
result_gt = None
result_measure = None
files = glob.glob((seq + '/*.png'))
seq_name = seq.split('/')[(- 1)]
for file in files:
fname = file.split('/')[(- 1)]
img = imread(file)
img = (img / 255)
gt_file = ((((DAVIS_PATH + '/Annotations/480p/') + seq_name) + '/') + fname)
gt = imread(gt_file)
gt = (gt / 255)
measure = compute_measures_for_binary_segmentation_single_image(img, gt)
if (measure is None):
print(fn_file, gt_file, measure)
if ((result_measure is None) or (measure[IOU] < result_measure[IOU])):
result_measure = measure
result_fn = ((((DAVIS_PATH + '/JPEGImages/480p/') + seq_name) + '/') + fname.replace('.png', '.jpg'))
result_gt = gt_file
return (result_fn, result_gt, result_measure)
|
class OSVOSWorst(FileListDataset):
def __init__(self, config, subset, name=NAME):
super(OSVOSWorst, self).__init__(config, name, subset, num_classes=2, default_path=DAVIS_PATH)
self.iterative_training = config.bool('iterative_training', True)
self.eval_pascal_dataset = EvalPascalMaskedDataset(config, subset)
self.previous_epoch_data = self.eval_pascal_dataset.previous_epoch_data
self.save_images = config.bool('save_images', False)
self.img_dir = config.string('img_dir', str(random.randrange(1, 10000)))
def get_extraction_keys(self):
return self.eval_pascal_dataset.get_extraction_keys()
def postproc_example_before_assembly(self, tensors):
return self.eval_pascal_dataset.postproc_example_before_assembly(tensors)
def postproc_annotation(self, ann_filename, ann):
mask = super().postproc_annotation(ann_filename, ann)
mask = (mask / 255)
return {DataKeys.SEGMENTATION_LABELS: mask, DataKeys.RAW_SEGMENTATION_LABELS: mask, DataKeys.IMAGE_FILENAMES: ann_filename}
def use_segmentation_mask(self, res):
self.eval_pascal_dataset.use_segmentation_mask(res)
def read_inputfile_lists(self):
pre_computed = (DAVIS_PATH + '/pre_computed/')
imgs = []
gts = []
measures = []
seqs = [os.path.join(pre_computed, f) for f in os.listdir(pre_computed) if os.path.isdir(os.path.join(pre_computed, f))]
for seq in seqs:
(fn, gt, measure) = get_fn_with_worst_iou(seq)
measures += [measure]
imgs += [fn]
gts += [gt]
print(measures)
ious = [m[IOU] for m in measures]
print('Average IOU Initial: ', np.average(ious))
return (imgs, gts)
|
class YamlConfig(dict):
def __init__(self, d, scope):
self.__immutable = False
self.__scope = scope
super(self.__class__, self).__init__()
for (k, v) in d.items():
if isinstance(v, dict):
self.__setattr__(k, self.__class__(v, ((self.__scope + k) + '.')))
else:
self.__setattr__(k, v)
self.__immutable = True
@property
def scope(self):
return self.__scope
def __getattr__(self, item):
attr = self.get(item, None)
if ((attr is None) and (not item.startswith((('_' + self.__class__.__name__) + '__')))):
raise ValueError(("No attribute named '%s' found in config scope '%s'" % (item, self.__scope)))
return attr
def __setattr__(self, key, value):
if (self.__immutable and (key != (('_' + self.__class__.__name__) + '__immutable'))):
raise ValueError('The config is immutable and cannot be modified')
return self.__setitem__(key, value)
def __setitem__(self, key, value):
if (self.__immutable and (key != (('_' + self.__class__.__name__) + '__immutable'))):
raise ValueError('The config is immutable and cannot be modified')
return super(self.__class__, self).__setitem__(key, value)
def __str__(self):
return self.pretty()
def __repr__(self):
return self.pretty()
def pretty(self, left_margin=0):
s = ''
for (k, v) in self.items():
if k.startswith((('_' + self.__class__.__name__) + '__')):
continue
for i in range(left_margin):
s += ' '
if isinstance(v, self.__class__):
s = (((s + k) + ':\n') + str(v.pretty((left_margin + 2))))
else:
s = ((((s + k) + ': ') + str(v)) + '\n')
return s
def merge_with(self, opts, strict=True, verbose=False):
self.__immutable = False
unexpected_keys = []
for (key, val) in opts.items():
if key.startswith('_YamlConfig__'):
continue
if (key not in self):
if strict:
self.__immutable = True
raise ValueError(("No option named '%s' exists in YamlConfig" % key))
else:
unexpected_keys.append(key)
else:
value = self[key]
if isinstance(value, self.__class__):
unexpected_keys.extend(value.merge_with(val, strict))
else:
self[key] = val
self.__immutable = True
return unexpected_keys
def merge_from_file(self, path, strict=True, verbose=False):
other_cfg = self.__class__.load_from_file(path)
return self.merge_with(other_cfg, strict=strict, verbose=verbose)
def update_param(self, name, new_value):
'\n Method to update the value of a given parameter.\n :param name:\n :param new_value:\n :return:\n '
if (name not in self):
raise ValueError("No parameter named '{}' exists".format(name))
self.__immutable = False
self[name] = new_value
self.__immutable = True
def update_from_args(self, args, verbose=False, prefix=''):
"\n Update the values based on user input given via 'argparse.ArgumentParser'.\n :param args:\n :param verbose:\n :param prefix: If the arg names have some prefix attached to them, provide it here so it is parsed correctly.\n :return:\n "
self.__immutable = False
for (arg_name, v) in vars(args).items():
if (v is None):
continue
arg_name = arg_name.lower().replace('-', '_')
n_skip = ((len(prefix) + 1) if prefix else 0)
arg_name = arg_name[n_skip:]
for k in self:
if (k.lower() == arg_name):
self[k] = v
if verbose:
print('{}{} --> {}'.format(self.__scope, k, v))
self.__immutable = True
def add_args_to_parser(self, parser, recursive=False, prefix=''):
'\n Populates an ArgumentParser instance with argument names from the config instance.\n :param parser: Instance of argparse.ArgumentParser\n :param recursive: If True, config values in nested scoped will also be added\n :param prefix: A string prefix that will be prepended to the arg names\n :return:\n '
def str2bool(v):
if (v.lower() in ('yes', 'true', 'on', 't', '1')):
return True
elif (v.lower() in ('no', 'false', 'off', 'f', '0')):
return False
else:
raise ValueError("Failed to cast '{}' to boolean type".format(v))
parser.register('type', 'bool', str2bool)
for (key, val) in self.items():
if key.startswith((('_' + self.__class__.__name__) + '__')):
continue
if isinstance(val, self.__class__):
if recursive:
val.add_args(parser, True, (prefix + self.__scope))
else:
continue
prefix_ = ((prefix + '_') if prefix else '')
if isinstance(val, (list, tuple)):
parser.add_argument('--{}{}'.format(prefix_, key.lower()), nargs='*', type=type(val[0]), required=False)
elif isinstance(val, bool):
parser.add_argument('--{}{}'.format(prefix_, key.lower()), type='bool', required=False)
else:
parser.add_argument('--{}{}'.format(prefix_, key.lower()), type=type(val), required=False)
return parser
def d(self):
'\n Converts the object instance to a standard Python dict\n :return: object instance parsed as dict\n '
d = dict()
for (k, v) in self.items():
if k.startswith((('_' + self.__class__.__name__) + '__')):
continue
if isinstance(v, self.__class__):
d[k] = v.d()
else:
d[k] = v
return d
@classmethod
def load_from_file(cls, config_file_path):
assert os.path.exists(config_file_path), ('config file not found at given path: %s' % config_file_path)
pyyaml_major_version = int(yaml.__version__.split('.')[0])
pyyaml_minor_version = int(yaml.__version__.split('.')[1])
required_loader_arg = ((pyyaml_major_version >= 5) and (pyyaml_minor_version >= 1))
with open(config_file_path, 'r') as readfile:
if required_loader_arg:
d = yaml.load(readfile, Loader=yaml.FullLoader)
else:
d = yaml.load(readfile)
yaml_config = cls(d, '')
return yaml_config
|
class SparseDataset(Dataset):
def __init__(self, dataset, num_samples):
assert (num_samples < len(dataset)), 'SparseDataset is only applicable when num_samples < len(dataset)'
self.dataset = dataset
self.num_samples = num_samples
random.seed(42)
self.idxes = list(range(len(dataset)))
random.shuffle(self.idxes)
self.idxes = self.idxes[:len(dataset)]
def __len__(self):
return self.num_samples
def __getitem__(self, index):
return self.dataset[self.idxes[index]]
|
class ConcatDataset(Dataset):
def __init__(self, datasets, total_samples, weights=None):
if (weights is None):
weights = [(1.0 / float(len(datasets))) for _ in range(len(datasets))]
assert (abs((sum(weights) - 1.0)) < 1e-06), 'Sum of weights is {}. Should be 1'.format(sum(weights))
self.id_mapping = []
self.samples_per_dataset = []
for (i, (wt, ds)) in enumerate(zip(weights, datasets)):
assert (0.0 < wt <= 1.0)
num_samples_ds = int(round((wt * total_samples)))
if (num_samples_ds < len(ds)):
ds = SparseDataset(ds, num_samples_ds)
repetitions = int(math.floor((num_samples_ds / float(len(ds)))))
idxes = sum([list(range(len(ds))) for _ in range(repetitions)], [])
rem_idxes = torch.linspace(0, (len(ds) - 1), (num_samples_ds - len(idxes))).round().long().tolist()
idxes += rem_idxes
self.id_mapping.extend([(i, j) for j in idxes])
self.samples_per_dataset.append(num_samples_ds)
self.datasets = datasets
self.weights = weights
assert (len(self.id_mapping) == total_samples)
def __len__(self):
return len(self.id_mapping)
def __getitem__(self, index):
(ds_idx, sample_idx) = self.id_mapping[index]
return self.datasets[ds_idx][sample_idx]
|
class DavisDataLoader(VideoDataset):
def __init__(self, base_dir, vds_json_file, samples_to_create=(- 1), apply_augmentation=False, single_instance_duplication=False, background_as_ignore_region=True):
super(DavisDataLoader, self).__init__(base_dir, vds_json_file, cfg.INPUT.NUM_FRAMES, apply_augmentation)
self.filter_zero_instance_frames()
self.samples = self.create_training_subsequences(samples_to_create)
self.instance_duplicator = InstanceDuplicator()
self.single_instance_duplication = single_instance_duplication
self.background_as_ignore_region = background_as_ignore_region
def create_training_subsequences(self, num_subsequences):
frame_range = list(range(cfg.DATA.DAVIS.FRAME_GAP_LOWER, (cfg.DATA.DAVIS.FRAME_GAP_UPPER + 1)))
subseq_length = self.clip_length
min_sequence_length = (frame_range[0] + 1)
sequences = [seq for seq in self.sequences if (len(seq) > min_sequence_length)]
total_frames = sum([len(seq) for seq in sequences])
samples_per_seq = [max(1, int(math.ceil(((len(seq) / total_frames) * num_subsequences)))) for seq in sequences]
subseq_span_range = frame_range.copy()
subsequence_idxes = []
for (sequence, num_samples) in zip(sequences, samples_per_seq):
for _ in range(num_samples):
subseq_span = min(random.choice(subseq_span_range), (len(sequence) - 1))
max_start_idx = ((len(sequence) - subseq_span) - 1)
assert (max_start_idx >= 0)
start_idx = (0 if (max_start_idx == 0) else random.randint(0, max_start_idx))
end_idx = (start_idx + subseq_span)
sample_idxes = np.round(np.linspace(start_idx, end_idx, subseq_length)).astype(np.int32).tolist()
assert (len(set(sample_idxes)) == len(sample_idxes))
subsequence_idxes.append((sequence.id, sample_idxes))
assert (len(subsequence_idxes) >= num_subsequences), '{} should be >= {}'.format(len(subsequence_idxes), num_subsequences)
subsequence_idxes = random.sample(subsequence_idxes, num_subsequences)
random.shuffle(subsequence_idxes)
sequences = {seq.id: seq for seq in sequences}
subsequences = [sequences[video_id].extract_subsequence(frame_idxes) for (video_id, frame_idxes) in subsequence_idxes]
return subsequences
def parse_sample_at(self, idx):
sample = self.samples[idx]
images = sample.load_images()
masks = sample.load_masks()
if ((len(sample.instance_ids) == 1) and self.single_instance_duplication):
masks_flat = [mask[0] for mask in masks]
(augmented_images, augmented_masks) = self.instance_duplicator(images, masks_flat)
if (augmented_images is not None):
images = augmented_images
masks = list(zip(*augmented_masks))
if self.background_as_ignore_region:
fg_masks = [np.any(np.stack(masks_t, 0), 0) for masks_t in masks]
ignore_masks = [BinaryMask((fg_mask == 0).astype(np.uint8)) for fg_mask in fg_masks]
else:
ignore_masks = [BinaryMask(np.zeros_like(masks[0][0], np.uint8)) for _ in range(len(masks))]
masks = [[BinaryMask(mask) for mask in masks_t] for masks_t in masks]
masks = BinaryMaskSequenceList(masks)
instance_categories = [1 for _ in range(masks.num_instances)]
return (images, masks, instance_categories, {'ignore_masks': ignore_masks, 'seq_name': sample.id})
def __len__(self):
return len(self.samples)
|
class DistributedSampler(Sampler):
'Sampler that restricts data loading to a subset of the dataset.\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n .. note::\n Dataset is assumed to be of constant size.\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n '
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if (num_replicas is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
num_replicas = dist.get_world_size()
if (rank is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(((len(self.dataset) * 1.0) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas)
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices += indices[:(self.total_size - len(indices))]
assert (len(indices) == self.total_size)
offset = (self.num_samples * self.rank)
indices = indices[offset:(offset + self.num_samples)]
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
def parse_generic_image_dataset(base_dir, dataset_json):
with open(dataset_json, 'r') as fh:
dataset = json.load(fh)
meta_info = dataset['meta']
meta_info['category_labels'] = {int(k): v for (k, v) in meta_info['category_labels'].items()}
samples = [GenericImageSample(base_dir, sample) for sample in dataset['images']]
return (samples, meta_info)
|
class GenericImageSample(object):
def __init__(self, base_dir, sample):
self.height = sample['height']
self.width = sample['width']
self.path = os.path.join(base_dir, sample['image_path'])
self.categories = [int(cat_id) for cat_id in sample['categories']]
self.segmentations = sample['segmentations']
self.ignore = sample.get('ignore', None)
def mask_areas(self):
rle_objs = [{'size': (self.height, self.width), 'counts': seg.encode('utf-8')} for seg in self.segmentations]
return [masktools.area(obj) for obj in rle_objs]
def load_image(self):
im = cv2.imread(self.path, cv2.IMREAD_COLOR)
if (im is None):
raise ValueError('No image found at path: {}'.format(self.path))
return im
def load_ignore_mask(self):
if (self.ignore is None):
return None
return np.ascontiguousarray(masktools.decode({'size': (self.height, self.width), 'counts': self.ignore.encode('utf-8')}).astype(np.uint8))
def load_masks(self):
return [np.ascontiguousarray(masktools.decode({'size': (self.height, self.width), 'counts': seg.encode('utf-8')}).astype(np.uint8)) for seg in self.segmentations]
def filter_categories(self, cat_ids_to_keep):
(self.categories, self.segmentations) = zip(*[(cat_id, seg) for (cat_id, seg) in zip(self.categories, self.segmentations) if (cat_id in cat_ids_to_keep)])
|
def parse_generic_video_dataset(base_dir, dataset_json):
with open(dataset_json, 'r') as fh:
dataset = json.load(fh)
meta_info = dataset['meta']
meta_info['category_labels'] = {int(k): v for (k, v) in meta_info['category_labels'].items()}
if ('segmentations' in dataset['sequences'][0]):
for seq in dataset['sequences']:
seq['categories'] = {int(iid): cat_id for (iid, cat_id) in seq['categories'].items()}
seq['segmentations'] = [{int(iid): seg for (iid, seg) in seg_t.items()} for seg_t in seq['segmentations']]
seg_iids = set(sum([list(seg_t.keys()) for seg_t in seq['segmentations']], []))
assert (seg_iids == set(seq['categories'].keys())), 'Instance ID mismatch: {} vs. {}'.format(seg_iids, set(seq['categories'].keys()))
seqs = [GenericVideoSequence(seq, base_dir) for seq in dataset['sequences']]
return (seqs, meta_info)
|
class GenericVideoSequence(object):
def __init__(self, seq_dict, base_dir):
self.base_dir = base_dir
self.image_paths = seq_dict['image_paths']
self.image_dims = (seq_dict['height'], seq_dict['width'])
self.id = seq_dict['id']
self.segmentations = seq_dict.get('segmentations', None)
self.instance_categories = seq_dict.get('categories', None)
@property
def instance_ids(self):
return list(self.instance_categories.keys())
@property
def category_labels(self):
return [self.instance_categories[instance_id] for instance_id in self.instance_ids]
def __len__(self):
return len(self.image_paths)
def load_images(self, frame_idxes=None):
if (frame_idxes is None):
frame_idxes = list(range(len(self.image_paths)))
images = []
for t in frame_idxes:
im = cv2.imread(os.path.join(self.base_dir, self.image_paths[t]), cv2.IMREAD_COLOR)
if (im is None):
raise ValueError('No image found at path: {}'.format(os.path.join(self.base_dir, self.image_paths[t])))
images.append(im)
return images
def load_masks(self, frame_idxes=None):
if (frame_idxes is None):
frame_idxes = list(range(len(self.image_paths)))
masks = []
for t in frame_idxes:
masks_t = []
for instance_id in self.instance_ids:
if (instance_id in self.segmentations[t]):
rle_mask = {'counts': self.segmentations[t][instance_id].encode('utf-8'), 'size': self.image_dims}
masks_t.append(np.ascontiguousarray(masktools.decode(rle_mask).astype(np.uint8)))
else:
masks_t.append(np.zeros(self.image_dims, np.uint8))
masks.append(masks_t)
return masks
def filter_categories(self, cat_ids_to_keep):
instance_ids_to_keep = sorted([iid for (iid, cat_id) in self.instance_categories.items() if (iid in cat_ids_to_keep)])
for t in range(len(self)):
self.segmentations[t] = {iid: seg for (iid, seg) in self.segmentations[t].items() if (iid in instance_ids_to_keep)}
def filter_zero_instance_frames(self):
t_to_keep = [t for t in range(len(self)) if (len(self.segmentations[t]) > 0)]
self.image_paths = [self.image_paths[t] for t in t_to_keep]
self.segmentations = [self.segmentations[t] for t in t_to_keep]
def apply_category_id_mapping(self, mapping):
assert (set(mapping.keys()) == set(self.instance_categories.keys()))
self.instance_categories = {iid: mapping[current_cat_id] for (iid, current_cat_id) in self.instance_categories.items()}
def extract_subsequence(self, frame_idxes, new_id=''):
assert all([(t in range(len(self))) for t in frame_idxes])
instance_ids_to_keep = set(sum([list(self.segmentations[t].keys()) for t in frame_idxes], []))
subseq_dict = {'id': (new_id if new_id else self.id), 'height': self.image_dims[0], 'width': self.image_dims[1], 'image_paths': [self.image_paths[t] for t in frame_idxes], 'categories': {iid: self.instance_categories[iid] for iid in instance_ids_to_keep}, 'segmentations': [{iid: segmentations_t[iid] for iid in segmentations_t if (iid in instance_ids_to_keep)} for (t, segmentations_t) in enumerate(self.segmentations) if (t in frame_idxes)]}
return self.__class__(subseq_dict, self.base_dir)
|
def visualize_generic_dataset(base_dir, dataset_json):
from stemseg.utils.vis import overlay_mask_on_image, create_color_map
(seqs, meta_info) = parse_generic_video_dataset(base_dir, dataset_json)
category_names = meta_info['category_labels']
cmap = create_color_map().tolist()
cv2.namedWindow('Image', cv2.WINDOW_NORMAL)
for seq in seqs:
if (len(seq) > 100):
frame_idxes = list(range(100, 150))
else:
frame_idxes = None
images = seq.load_images(frame_idxes)
masks = seq.load_masks(frame_idxes)
category_labels = seq.category_labels
print('[COLOR NAME] -> [CATEGORY NAME]')
color_key_printed = False
for (image_t, masks_t) in zip(images, masks):
for (i, (mask, cat_label)) in enumerate(zip(masks_t, category_labels), 1):
image_t = overlay_mask_on_image(image_t, mask, mask_color=cmap[i])
if (not color_key_printed):
print('{} -> {}'.format(cmap[i], category_names[cat_label]))
color_key_printed = True
cv2.imshow('Image', image_t)
if (cv2.waitKey(0) == 113):
exit(0)
|
class ImageToSeqAugmenter(object):
def __init__(self, perspective=True, affine=True, motion_blur=True, brightness_range=((- 50), 50), hue_saturation_range=((- 15), 15), perspective_magnitude=0.12, scale_range=1.0, translate_range={'x': ((- 0.15), 0.15), 'y': ((- 0.15), 0.15)}, rotation_range=((- 20), 20), motion_blur_kernel_sizes=(7, 9), motion_blur_prob=0.5):
self.basic_augmenter = iaa.SomeOf((1, None), [iaa.Add(brightness_range), iaa.AddToHueAndSaturation(hue_saturation_range)])
transforms = []
if perspective:
transforms.append(iaa.PerspectiveTransform(perspective_magnitude))
if affine:
transforms.append(iaa.Affine(scale=scale_range, translate_percent=translate_range, rotate=rotation_range, order=1, backend='auto'))
transforms = iaa.Sequential(transforms)
transforms = [transforms]
if motion_blur:
blur = iaa.Sometimes(motion_blur_prob, iaa.OneOf([iaa.MotionBlur(ksize) for ksize in motion_blur_kernel_sizes]))
transforms.append(blur)
self.frame_shift_augmenter = iaa.Sequential(transforms)
@staticmethod
def condense_masks(instance_masks):
condensed_mask = np.zeros_like(instance_masks[0], dtype=np.int8)
for (instance_id, mask) in enumerate(instance_masks, 1):
condensed_mask = np.where(mask, instance_id, condensed_mask)
return condensed_mask
@staticmethod
def expand_masks(condensed_mask, num_instances):
return [(condensed_mask == instance_id).astype(np.uint8) for instance_id in range(1, (num_instances + 1))]
def __call__(self, image, masks=None):
det_augmenter = self.frame_shift_augmenter.to_deterministic()
if masks:
(masks_np, is_binary_mask) = ([], [])
for mask in masks:
if isinstance(mask, BinaryMask):
masks_np.append(mask.tensor().numpy().astype(np.bool))
is_binary_mask.append(True)
elif isinstance(mask, np.ndarray):
masks_np.append(mask.astype(np.bool))
is_binary_mask.append(False)
else:
raise ValueError('Invalid mask type: {}'.format(type(mask)))
num_instances = len(masks_np)
masks_np = SegmentationMapsOnImage(self.condense_masks(masks_np), shape=image.shape[:2])
seed = int(datetime.now().strftime('%M%S%f')[(- 8):])
imgaug.seed(seed)
(aug_image, aug_masks) = det_augmenter(image=self.basic_augmenter(image=image), segmentation_maps=masks_np)
imgaug.seed(seed)
invalid_pts_mask = det_augmenter(image=np.ones((image.shape[:2] + (1,)), np.uint8)).squeeze(2)
aug_masks = self.expand_masks(aug_masks.get_arr(), num_instances)
aug_masks = [(BinaryMask(mask) if is_bm else mask) for (mask, is_bm) in zip(aug_masks, is_binary_mask)]
return (aug_image, aug_masks, (invalid_pts_mask == 0))
else:
masks = [SegmentationMapsOnImage(np.ones(image.shape[:2], np.bool), shape=image.shape[:2])]
(aug_image, invalid_pts_mask) = det_augmenter(image=image, segmentation_maps=masks)
return (aug_image, (invalid_pts_mask.get_arr() == 0))
|
class InferenceImageLoader(Dataset):
def __init__(self, images):
super().__init__()
self.np_to_tensor = transforms.ToTorchTensor(format='CHW')
self.images = images
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image = self.images[index]
if isinstance(image, str):
image = cv2.imread(image, cv2.IMREAD_COLOR)
elif (not isinstance(image, np.ndarray)):
raise ValueError('Unexpected type for image: {}'.format(type(image)))
(image_height, image_width) = image.shape[:2]
image = self.np_to_tensor(image).float()
(new_width, new_height, _) = compute_resize_params_2((image_width, image_height), cfg.INPUT.MIN_DIM, cfg.INPUT.MAX_DIM)
image = F.interpolate(image.unsqueeze(0), (new_height, new_width), mode='bilinear', align_corners=False)
image = scale_and_normalize_images(image, cfg.INPUT.IMAGE_MEAN, cfg.INPUT.IMAGE_STD, (not cfg.INPUT.BGR_INPUT), cfg.INPUT.NORMALIZE_TO_UNIT_SCALE)
return (image.squeeze(0), (image_width, image_height), index)
|
def collate_fn(samples):
(image_seqs, original_dims, idxes) = zip(*samples)
image_seqs = [[im] for im in image_seqs]
image_seqs = ImageList.from_image_sequence_list(image_seqs, original_dims)
return (image_seqs, idxes)
|
class IterationBasedBatchSampler(BatchSampler):
'\n Wraps a BatchSampler, resampling from it until\n a specified number of iterations have been sampled\n '
def __init__(self, batch_sampler, num_iterations, start_iter=0):
self.batch_sampler = batch_sampler
self.num_iterations = num_iterations
self.start_iter = start_iter
def __iter__(self):
iteration = self.start_iter
while (iteration <= self.num_iterations):
if hasattr(self.batch_sampler.sampler, 'set_epoch'):
self.batch_sampler.sampler.set_epoch(iteration)
for batch in self.batch_sampler:
iteration += 1
if (iteration > self.num_iterations):
break
(yield batch)
def __len__(self):
return self.num_iterations
|
class MOTSDataLoader(VideoDataset):
IGNORE_MASK_CAT_ID = 3
def __init__(self, base_dir, vds_json_file, samples_to_create, apply_augmentation=False):
super(MOTSDataLoader, self).__init__(base_dir, vds_json_file, cfg.INPUT.NUM_FRAMES, apply_augmentation)
split_sequences = []
for seq in self.sequences:
suffix = 1
current_gap_len = 0
current_seq_frame_idxes = []
for t in range(len(seq)):
instance_cats_t = set([seq.instance_categories[iid] for iid in seq.segmentations[t].keys()])
if (len((instance_cats_t - {self.IGNORE_MASK_CAT_ID})) == 0):
current_gap_len += 1
if ((current_gap_len == 6) and current_seq_frame_idxes):
split_sequences.append(seq.extract_subsequence(current_seq_frame_idxes, '{}_{}'.format(seq.id, str(suffix))))
suffix += 1
current_seq_frame_idxes = []
else:
current_gap_len = 0
current_seq_frame_idxes.append(t)
if current_seq_frame_idxes:
split_sequences.append(seq.extract_subsequence(current_seq_frame_idxes, '{}_{}'.format(seq.id, str(suffix))))
self.sequences = split_sequences
assert (samples_to_create > 0), 'Number of training samples is required for train mode'
self.samples = self.create_training_subsequences(samples_to_create)
def create_training_subsequences(self, num_subsequences):
frame_range = list(range(cfg.DATA.KITTI_MOTS.FRAME_GAP_LOWER, (cfg.DATA.KITTI_MOTS.FRAME_GAP_UPPER + 1)))
subseq_length = self.clip_length
min_sequence_length = (frame_range[0] + 1)
sequences = [seq for seq in self.sequences if (len(seq) > min_sequence_length)]
total_frames = sum([len(seq) for seq in sequences])
samples_per_seq = [max(1, int(math.ceil(((len(seq) / total_frames) * num_subsequences)))) for seq in sequences]
subseq_span_range = frame_range.copy()
subsequence_idxes = []
for (sequence, num_samples) in zip(sequences, samples_per_seq):
for _ in range(num_samples):
subseq_span = min(random.choice(subseq_span_range), (len(sequence) - 1))
max_start_idx = ((len(sequence) - subseq_span) - 1)
assert (max_start_idx >= 0)
start_idx = (0 if (max_start_idx == 0) else random.randint(0, max_start_idx))
end_idx = (start_idx + subseq_span)
sample_idxes = np.round(np.linspace(start_idx, end_idx, subseq_length)).astype(np.int32).tolist()
assert (len(set(sample_idxes)) == len(sample_idxes))
subsequence_idxes.append((sequence.id, sample_idxes))
assert (len(subsequence_idxes) >= num_subsequences), '{} should be >= {}'.format(len(subsequence_idxes), num_subsequences)
subsequence_idxes = random.sample(subsequence_idxes, num_subsequences)
random.shuffle(subsequence_idxes)
sequences = {seq.id: seq for seq in sequences}
subsequences = [sequences[video_id].extract_subsequence(frame_idxes) for (video_id, frame_idxes) in subsequence_idxes]
return subsequences
def parse_sample_at(self, idx):
sample = self.samples[idx]
images = sample.load_images()
masks = sample.load_masks()
instance_categories = sample.category_labels
if (3 in instance_categories):
ignore_mask_idx = instance_categories.index(self.IGNORE_MASK_CAT_ID)
instance_categories.remove(self.IGNORE_MASK_CAT_ID)
ignore_masks = [BinaryMask(masks_t[ignore_mask_idx]) for masks_t in masks]
other_idxes = list(range(len(sample.instance_ids)))
other_idxes.remove(ignore_mask_idx)
masks = [[BinaryMask(masks_t[i]) for i in other_idxes] for masks_t in masks]
else:
(height, width) = images[0].shape[:2]
ignore_masks = [BinaryMask(np.zeros((height, width), np.uint8)) for _ in range(len(images))]
masks = [[BinaryMask(mask) for mask in masks_t] for masks_t in masks]
masks = BinaryMaskSequenceList(masks)
if (masks.num_instances == 0):
raise ValueError('No instances exist in the masks (seq: {})'.format(sample.id))
return (images, masks, instance_categories, {'seq_name': sample.id, 'ignore_masks': ignore_masks})
def __len__(self):
return len(self.samples)
|
def _get_env_var(varname):
value = os.getenv(varname)
if (not value):
raise EnvironmentError("Required environment variable '{}' is not set.".format(varname))
return value
|
class CocoPaths(object):
def __init__(self):
raise ValueError("Static class 'CocoPaths' should not be instantiated")
@staticmethod
def images_dir():
return _get_env_var('COCO_TRAIN_IMAGES_DIR')
@staticmethod
def ids_file():
return os.path.join(_get_env_var('STEMSEG_JSON_ANNOTATIONS_DIR'), 'coco.json')
|
class YoutubeVISPaths(object):
def __init__(self):
raise ValueError("Static class 'YoutubeVISPaths' should not be instantiated")
@staticmethod
def training_base_dir():
return os.path.join(_get_env_var('YOUTUBE_VIS_BASE_DIR'), 'train')
@staticmethod
def val_base_dir():
return os.path.join(_get_env_var('YOUTUBE_VIS_BASE_DIR'), 'valid')
@staticmethod
def train_vds_file():
return os.path.join(_get_env_var('STEMSEG_JSON_ANNOTATIONS_DIR'), 'youtube_vis_train.json')
@staticmethod
def val_vds_file():
return os.path.join(_get_env_var('STEMSEG_JSON_ANNOTATIONS_DIR'), 'youtube_vis_val.json')
|
class DavisUnsupervisedPaths(object):
def __init__(self):
raise ValueError("Static class 'DavisUnsupervisedPaths' should not be instantiated")
@staticmethod
def trainval_base_dir():
return _get_env_var('DAVIS_BASE_DIR')
@staticmethod
def train_vds_file():
return os.path.join(_get_env_var('STEMSEG_JSON_ANNOTATIONS_DIR'), 'davis_train.json')
@staticmethod
def val_vds_file():
return os.path.join(_get_env_var('STEMSEG_JSON_ANNOTATIONS_DIR'), 'davis_val.json')
|
class KITTIMOTSPaths(object):
def __init__(self):
raise ValueError("Static class 'KITTIMOTSPaths' should not be instantiated")
@staticmethod
def train_images_dir():
return _get_env_var('KITTIMOTS_BASE_DIR')
@staticmethod
def train_vds_file():
return os.path.join(_get_env_var('STEMSEG_JSON_ANNOTATIONS_DIR'), 'kittimots_train.json')
@staticmethod
def val_vds_file():
return os.path.join(_get_env_var('STEMSEG_JSON_ANNOTATIONS_DIR'), 'kittimots_val.json')
|
class MapillaryPaths(object):
def __init__(self):
raise ValueError("Static class 'MapillaryPaths' should not be instantiated")
@staticmethod
def images_dir():
return _get_env_var('MAPILLARY_IMAGES_DIR')
@staticmethod
def ids_file():
return os.path.join(_get_env_var('STEMSEG_JSON_ANNOTATIONS_DIR'), 'mapillary.json')
|
class PascalVOCPaths(object):
def __init__(self):
raise ValueError("Static class 'PascalVOCPaths' should not be instantiated")
@staticmethod
def images_dir():
return os.path.join(_get_env_var('PASCAL_VOC_IMAGES_DIR'))
@staticmethod
def ids_file():
return os.path.join(_get_env_var('STEMSEG_JSON_ANNOTATIONS_DIR'), 'pascal_voc.json')
|
class VideoDataset(Dataset):
def __init__(self, base_dir, vds_json, clip_length, apply_augmentations, **kwargs):
super().__init__()
(self.sequences, self.meta_info) = parse_generic_video_dataset(base_dir, vds_json)
self.clip_length = clip_length
self.apply_augmentations = apply_augmentations
self.np_to_tensor = transforms.BatchImageTransform(transforms.ToTorchTensor(format='CHW'))
if (self.clip_length == 2):
self.augmenter = ImageToSeqAugmenter(perspective=kwargs.get('perspective_transform', False), affine=kwargs.get('affine_transform', True), motion_blur=kwargs.get('motion_blur', True), motion_blur_prob=kwargs.get('motion_blur_prob', 0.3), motion_blur_kernel_sizes=kwargs.get('motion_blur_kernel_sizes', (5, 7)), scale_range=kwargs.get('scale_range', (0.8, 1.2)), rotation_range=kwargs.get('rotation_range', ((- 15), 15)))
else:
self.augmenter = ImageToSeqAugmenter(perspective=kwargs.get('perspective_transform', False), affine=kwargs.get('affine_transform', False), motion_blur=kwargs.get('motion_blur', False), motion_blur_prob=kwargs.get('motion_blur_prob', 0.3), motion_blur_kernel_sizes=kwargs.get('motion_blur_kernel_sizes', (5, 7)), scale_range=kwargs.get('scale_range', (0.9, 1.1)), rotation_range=kwargs.get('rotation_range', ((- 7), 7)), translate_range=kwargs.get('translation_range', {'x': ((- 0.1), 0.1), 'y': ((- 0.1), 0.1)}))
def filter_zero_instance_frames(self):
for seq in self.sequences:
seq.filter_zero_instance_frames()
self.sequences = [seq for seq in self.sequences if (len(seq) > 0)]
def filter_categories(self, cat_ids_to_keep):
for seq in self.sequences:
seq.filter_categories(cat_ids_to_keep)
self.sequences = [seq for seq in self.sequences if (len(seq) > 0)]
def parse_sample_at(self, idx):
raise NotImplementedError('This method must be implemented by the derived class.')
def __getitem__(self, index):
(images, masks, category_labels, meta_info) = self.parse_sample_at(index)
ignore_masks = meta_info['ignore_masks']
(image_height, image_width) = images[0].shape[:2]
(images, masks, ignore_masks) = self.apply_random_flip(images, masks, ignore_masks)
(images, masks, ignore_masks, invalid_pts_mask) = self.apply_random_augmentation(images, masks, ignore_masks)
for t in range(self.clip_length):
images[t] = np.where(invalid_pts_mask[t][(..., None)], 0, images[t])
(images, masks, ignore_masks) = self.apply_random_sequence_reversal(images, masks, ignore_masks)
(new_width, new_height, scale_factor) = compute_resize_params(images[0], cfg.INPUT.MIN_DIM, cfg.INPUT.MAX_DIM)
images = torch.stack(self.np_to_tensor(*images), 0).float()
images = F.interpolate(images, (new_height, new_width), mode='bilinear', align_corners=False)
images = scale_and_normalize_images(images, cfg.INPUT.IMAGE_MEAN, cfg.INPUT.IMAGE_STD, (not cfg.INPUT.BGR_INPUT), cfg.INPUT.NORMALIZE_TO_UNIT_SCALE)
masks = masks.resize((new_width, new_height), None)
ignore_masks = [mask.resize((new_height, new_width)) for mask in ignore_masks]
masks = masks.tensor().permute(1, 0, 2, 3)
ignore_masks = torch.stack([mask.tensor() for mask in ignore_masks], 0)
targets = {'masks': masks, 'category_ids': torch.tensor(category_labels, dtype=torch.long), 'ignore_masks': ignore_masks}
return (images, targets, (image_width, image_height), meta_info)
def apply_random_flip(self, images, masks, ignore_masks):
if (self.apply_augmentations and (random.random() < 0.5)):
images = [np.flip(image, axis=1) for image in images]
masks = masks.flip_horizontal()
ignore_masks = [mask.flip_horizontal() for mask in ignore_masks]
return (images, masks, ignore_masks)
def apply_random_sequence_reversal(self, images, masks, ignore_masks):
if (self.apply_augmentations and (random.random() < 0.5)):
images = images[::(- 1)]
masks = masks.reverse()
ignore_masks = ignore_masks[::(- 1)]
return (images, masks, ignore_masks)
def apply_random_augmentation(self, images, masks, ignore_masks):
if self.apply_augmentations:
(augmented_images, augmented_masks, augmented_ignore_masks, invalid_pts_mask) = ([], [], [], [])
for t in range(self.clip_length):
concat_masks = (masks._mask_sequence_list[t] + [ignore_masks[t]])
(augmented_image, augmented_masks_t, invalid_pts_mask_t) = self.augmenter(images[t], concat_masks)
(augmented_masks_t, augmented_ignore_mask_t) = (augmented_masks_t[:(- 1)], augmented_masks_t[(- 1)])
augmented_images.append(augmented_image)
augmented_masks.append(augmented_masks)
augmented_ignore_masks.append(augmented_ignore_mask_t)
invalid_pts_mask.append(invalid_pts_mask_t)
return (augmented_images, augmented_masks, augmented_ignore_masks, invalid_pts_mask)
else:
(h, w) = images[0].shape[:2]
invalid_pts_mask = [np.zeros((h, w), np.uint8) for _ in range(self.clip_length)]
return (images, masks, ignore_masks, invalid_pts_mask)
|
def visualize_data_loader_output(dataset, num_workers, batch_size, shuffle):
print('Number of samples: {}'.format(len(dataset)))
data_loader = TorchDataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, collate_fn=collate_fn)
for t in range(cfg.INPUT.NUM_FRAMES):
cv2.namedWindow('Image {}'.format(t), cv2.WINDOW_NORMAL)
cv2.namedWindow('Ignore {}'.format(t), cv2.WINDOW_NORMAL)
cv2.namedWindow('Image {} semseg'.format(t), cv2.WINDOW_NORMAL)
cmap = create_color_map().tolist()
for (image_list, target, meta_info) in data_loader:
images = image_list.numpy()
for i in range(batch_size):
if meta_info[i]:
if ('seq_name' in meta_info[i]):
print('Seq name: {}'.format(meta_info[i]['seq_name']))
if ('category_labels' in meta_info[i]):
print('Category labels: {}'.format(str(meta_info[i]['category_labels'])))
seq_images = [np.copy(images[i][t]) for t in range(cfg.INPUT.NUM_FRAMES)]
masks = target[i]['masks']
num_instances = masks.shape[0]
print('Instances: {}'.format(num_instances))
semseg_mask = instance_masks_to_semseg_mask(masks, target[i]['category_ids'])
semseg_mask = semseg_mask.numpy().astype(np.uint8)
ignore_mask = target[i]['ignore_masks']
seq_images_instances = [im.copy() for im in seq_images]
for j in range(num_instances):
masks_j = [m.numpy() for m in masks[j].unbind(0)]
for t in range(cfg.INPUT.NUM_FRAMES):
seq_images_instances[t] = overlay_mask_on_image(seq_images_instances[t], masks_j[t], 0.5, cmap[(j + 1)])
seq_image_ignore_overlayed = []
for t in range(cfg.INPUT.NUM_FRAMES):
semseg_masked_t = visualize_semseg_masks(images[i][t], semseg_mask[t])
cv2.imshow('Image {} semseg'.format(t), semseg_masked_t)
seq_image_ignore_overlayed.append(overlay_mask_on_image(seq_images[t], ignore_mask[t].numpy()))
cv2.imshow('Image {}'.format(t), seq_images_instances[t])
cv2.imshow('Ignore {}'.format(t), seq_image_ignore_overlayed[t])
if (cv2.waitKey(0) == 113):
return
|
def main(args):
imgaug.seed(42)
torch.random.manual_seed(42)
random.seed(42)
if os.path.isabs(args.cfg):
cfg.merge_from_file(args.cfg)
else:
cfg.merge_from_file(os.path.join(RepoPaths.configs_dir(), args.cfg))
if (args.dataset == 'coco'):
dataset = CocoDataLoader(CocoPaths.images_dir(), CocoPaths.ids_file(), category_agnostic=False)
elif (args.dataset == 'mapillary'):
dataset = MapillaryDataLoader(MapillaryPaths.images_dir(), MapillaryPaths.ids_file())
elif (args.dataset == 'pascalvoc'):
dataset = PascalVOCDataLoader(PascalVOCPaths.images_dir(), PascalVOCPaths.ids_file(), category_agnostic=False)
elif (args.dataset == 'ytvis'):
dataset = YoutubeVISDataLoader(YoutubeVISPaths.training_base_dir(), YoutubeVISPaths.train_vds_file(), cfg.TRAINING.TRACKER.MAX_ITERATIONS, category_agnostic=False, single_instance_duplication=cfg.DATA.YOUTUBE_VIS.SINGLE_INSTANCE_DUPLICATION)
elif (args.dataset == 'davis'):
dataset = DavisDataLoader(DavisUnsupervisedPaths.trainval_base_dir(), DavisUnsupervisedPaths.train_vds_file(), apply_augmentation=False, samples_to_create=cfg.DATA.DAVIS.TRAINING_SUBSEQUENCES, single_instance_duplication=cfg.DATA.DAVIS.SINGLE_INSTANCE_DUPLICATION)
elif (args.dataset == 'kittimots'):
dataset = MOTSDataLoader(KITTIMOTSPaths.train_images_dir(), KITTIMOTSPaths.train_vds_file(), samples_to_create=cfg.TRAINING.TRACKER.MAX_ITERATIONS, apply_augmentation=cfg.DATA.KITTI_MOTS.AUGMENTATION, frame_gap_lower=cfg.DATA.KITTI_MOTS.FRAME_GAP_LOWER, frame_gap_upper=cfg.DATA.KITTI_MOTS.FRAME_GAP_UPPER)
else:
raise ValueError('Invalid dataset name given')
visualize_data_loader_output(dataset, args.num_workers, args.batch_size, args.shuffle)
|
class YoutubeVISDataLoader(VideoDataset):
def __init__(self, base_dir, vds_json_file, samples_to_create, apply_augmentation=False, category_agnostic=True, single_instance_duplication=False):
super(YoutubeVISDataLoader, self).__init__(base_dir, vds_json_file, cfg.INPUT.NUM_FRAMES, apply_augmentations=apply_augmentation)
self.filter_zero_instance_frames()
self.category_agnostic = category_agnostic
assert (samples_to_create > 0)
self.samples = self.create_training_subsequences(samples_to_create)
self.instance_duplicator = InstanceDuplicator()
self.single_instance_duplication = single_instance_duplication
def create_training_subsequences(self, num_subsequences):
frame_range = list(range(cfg.DATA.YOUTUBE_VIS.FRAME_GAP_LOWER, (cfg.DATA.YOUTUBE_VIS.FRAME_GAP_UPPER + 1)))
subseq_length = self.clip_length
assert (self.clip_length <= cfg.DATA.YOUTUBE_VIS.FRAME_GAP_LOWER <= cfg.DATA.YOUTUBE_VIS.FRAME_GAP_UPPER)
min_sequence_length = (frame_range[0] + 1)
sequences = [seq for seq in self.sequences if (len(seq) > min_sequence_length)]
total_frames = sum([len(seq) for seq in sequences])
samples_per_seq = [max(1, int(math.ceil(((len(seq) / total_frames) * num_subsequences)))) for seq in sequences]
subseq_span_range = frame_range.copy()
subsequence_idxes = []
for (sequence, num_samples) in zip(sequences, samples_per_seq):
for _ in range(num_samples):
subseq_span = min(random.choice(subseq_span_range), (len(sequence) - 1))
max_start_idx = ((len(sequence) - subseq_span) - 1)
assert (max_start_idx >= 0)
start_idx = (0 if (max_start_idx == 0) else random.randint(0, max_start_idx))
end_idx = (start_idx + subseq_span)
sample_idxes = np.round(np.linspace(start_idx, end_idx, subseq_length)).astype(np.int32).tolist()
assert (len(set(sample_idxes)) == len(sample_idxes))
subsequence_idxes.append((sequence.id, sample_idxes))
assert (len(subsequence_idxes) >= num_subsequences), '{} should be >= {}'.format(len(subsequence_idxes), num_subsequences)
subsequence_idxes = random.sample(subsequence_idxes, num_subsequences)
random.shuffle(subsequence_idxes)
sequences = {seq.id: seq for seq in sequences}
subsequences = [sequences[video_id].extract_subsequence(frame_idxes) for (video_id, frame_idxes) in subsequence_idxes]
return subsequences
def parse_sample_at(self, idx):
sample = self.samples[idx]
images = sample.load_images()
masks = sample.load_masks()
if self.category_agnostic:
instance_categories = [1 for _ in range(len(sample.instance_ids))]
else:
instance_categories = sample.category_labels
if ((len(sample.instance_ids) == 1) and self.single_instance_duplication):
masks_flat = [mask[0] for mask in masks]
(augmented_images, augmented_masks) = self.instance_duplicator(images, masks_flat)
if (augmented_images is not None):
images = augmented_images
masks = list(zip(*augmented_masks))
instance_categories.append(instance_categories[(- 1)])
(height, width) = images[0].shape[:2]
ignore_masks = [BinaryMask(np.zeros((height, width), np.uint8)) for _ in range(self.clip_length)]
masks = [[BinaryMask(mask) for mask in masks_t] for masks_t in masks]
masks = BinaryMaskSequenceList(masks)
return (images, masks, instance_categories, {'seq_name': sample.id, 'ignore_masks': ignore_masks})
def __len__(self):
return len(self.samples)
|
class ClustererBase(object):
def __init__(self):
self._time_log = defaultdict(list)
def __call__(self, embeddings, *args, **kwargs):
assert (embeddings.dtype == torch.float32)
start_time = current_time()
output = self._process(embeddings, *args, **kwargs)
duration = (current_time() - start_time)
self._time_log[embeddings.shape[0]].append(duration)
return output
def _process(self, embeddings, *args, **kwargs):
raise NotImplementedError('Must be implemented by derived class')
def reset_time_log(self):
self._time_log = defaultdict(list)
@property
def average_time(self):
all_times = sum(list(self._time_log.values()), [])
return (sum(all_times) / float(len(all_times)))
name = property(fget=(lambda self: self._name))
|
class SequentialClustering(ClustererBase):
def __init__(self, primary_prob_thresh, secondary_prob_thresh, min_seediness_prob, n_free_dims, free_dim_stds, device, max_instances=20):
super().__init__()
self.thresholding_mode = 'probability'
self.primary_prob_thresh = primary_prob_thresh
self.secondary_prob_thresh = secondary_prob_thresh
self.min_seediness_prob = min_seediness_prob
self.max_instances = max_instances
self.n_free_dims = n_free_dims
self.free_dim_stds = free_dim_stds
self.device = device
@staticmethod
def distances_to_prob(distances):
return ((- 0.5) * distances).exp()
@staticmethod
def compute_distance(embeddings, center, bandwidth):
return (torch.pow((embeddings - center), 2) * bandwidth).sum(dim=(- 1)).sqrt()
@torch.no_grad()
def _process(self, embeddings, bandwidths, seediness, cluster_label_start=1, *args, **kwargs):
if (embeddings.numel() == 0):
return (torch.zeros(0, dtype=torch.long, device=embeddings.device), {'instance_labels': [], 'instance_centers': [], 'instance_stds': [], 'instance_masks': []})
input_device = embeddings.device
embeddings = embeddings.to(device=self.device)
assert torch.is_tensor(bandwidths)
if (bandwidths.shape[0] != embeddings.shape[0]):
bandwidths = bandwidths.expand_as(embeddings)
bandwidths = bandwidths.to(device=self.device)
if (self.n_free_dims == 0):
assert (embeddings.shape == bandwidths.shape)
assert torch.is_tensor(seediness)
((seediness.shape[0] == embeddings.shape[0]), 'Seediness shape: {}, embeddings shape: {}'.format(seediness.shape, embeddings.shape))
seediness = seediness.squeeze(1).to(device=self.device)
label_masks = []
unique_labels = []
label_centers = []
label_stds = []
return_label_masks = kwargs.get('return_label_masks', False)
total_points = embeddings.shape[0]
labels = torch.full((total_points,), (- 1), dtype=torch.long, device=embeddings.device)
label_distances = []
num_unassigned_pts = total_points
if (self.n_free_dims > 0):
free_dim_stds = torch.tensor(self.free_dim_stds).to(embeddings)
free_dim_bandwidths = (1.0 / (free_dim_stds ** 2))
else:
(free_dim_stds, free_dim_bandwidths) = (torch.zeros(0).to(embeddings), torch.zeros(0).to(embeddings))
for i in range(self.max_instances):
available_embeddings_mask = (labels == (- 1))
num_unassigned_pts = available_embeddings_mask.sum(dtype=torch.long)
if (num_unassigned_pts == 0):
break
(next_center, bandwidth, prob) = self._get_next_instance_center(embeddings[available_embeddings_mask], bandwidths[available_embeddings_mask], seediness[available_embeddings_mask])
if (prob < self.min_seediness_prob):
break
bandwidth = torch.cat((bandwidth, free_dim_bandwidths), 0)
instance_label = (i + cluster_label_start)
unique_labels.append(instance_label)
label_centers.append(next_center.tolist())
label_stds.append((1.0 / bandwidth).clamp(min=1e-08).sqrt().tolist())
distances = torch.full_like(labels, 100000000.0, dtype=torch.float32, device=embeddings.device)
distances[available_embeddings_mask] = self.compute_distance(embeddings[available_embeddings_mask], next_center, bandwidth)
label_distances.append(distances)
probs = torch.zeros_like(distances)
probs[available_embeddings_mask] = self.distances_to_prob(distances[available_embeddings_mask])
match_mask = ((probs > self.primary_prob_thresh) & available_embeddings_mask)
labels = torch.where(match_mask, torch.tensor(instance_label, device=self.device), labels)
if return_label_masks:
label_masks.append(match_mask.cpu())
if ((num_unassigned_pts > 0) and label_distances):
label_distances = torch.stack(label_distances, dim=1)
(min_distance, min_distance_label) = label_distances.max(dim=1)
min_distance_label += cluster_label_start
probs = self.distances_to_prob(min_distance)
update_mask = ((probs > self.secondary_prob_thresh) & available_embeddings_mask)
labels = torch.where(update_mask, min_distance_label, labels)
return (labels.to(input_device), {'instance_labels': unique_labels, 'instance_centers': label_centers, 'instance_stds': label_stds, 'instance_masks': label_masks})
def _get_next_instance_center(self, embeddings, bandwidths, seediness):
if (self.n_free_dims == 0):
assert (embeddings.shape == bandwidths.shape)
assert (embeddings.numel() > 0)
assert (embeddings.shape[0] == seediness.shape[0])
max_prob_idx = seediness.argmax()
return (embeddings[max_prob_idx], bandwidths[max_prob_idx], seediness[max_prob_idx])
|
def get_subsequence_frames(seq_len, subseq_len, dataset_name, frame_overlap=(- 1)):
subseq_idxes = []
if (dataset_name == 'davis'):
frame_overlap = (cfg.DATA.DAVIS.INFERENCE_FRAME_OVERLAP if (frame_overlap <= 0) else frame_overlap)
elif (dataset_name == 'ytvis'):
frame_overlap = (cfg.DATA.YOUTUBE_VIS.INFERENCE_FRAME_OVERLAP if (frame_overlap <= 0) else frame_overlap)
elif (dataset_name == 'kittimots'):
frame_overlap = (cfg.DATA.KITTI_MOTS.INFERENCE_FRAME_OVERLAP if (frame_overlap <= 0) else frame_overlap)
else:
raise NotImplementedError()
assert (frame_overlap < subseq_len)
if (seq_len < subseq_len):
padded_frames = ([True for _ in range((subseq_len - seq_len))] + [False for _ in range(seq_len)])
return ([([0 for _ in range((subseq_len - seq_len))] + list(range(seq_len)))], padded_frames)
last_frame_idx = (- 1)
for t in range(0, ((seq_len - subseq_len) + 1), (subseq_len - frame_overlap)):
subseq_idxes.append(list(range(t, (t + subseq_len))))
last_frame_idx = subseq_idxes[(- 1)][(- 1)]
if (last_frame_idx != (seq_len - 1)):
subseq_idxes.append(list(range((seq_len - subseq_len), seq_len)))
return (subseq_idxes, None)
|
class TrackGenerator(object):
def __init__(self, sequences, dataset_name, output_generator, output_dir, model_ckpt_path, max_tracks, preload_images, resize_scale, semseg_averaging_on_gpu, **kwargs):
self.sequences = sequences
self.dataset_name = dataset_name
self.output_generator = output_generator
if (self.dataset_name == 'kittimots'):
semseg_output_type = 'argmax'
elif (self.dataset_name == 'ytvis'):
semseg_output_type = 'logits'
else:
semseg_output_type = None
self.model = InferenceModel(model_ckpt_path, semseg_output_type=semseg_output_type, preload_images=preload_images, resize_scale=resize_scale, semseg_generation_on_gpu=semseg_averaging_on_gpu).cuda()
self.resize_scale = resize_scale
self.vis_output_dir = os.path.join(output_dir, 'vis')
self.embeddings_output_dir = os.path.join(output_dir, 'embeddings')
self.max_tracks = max_tracks
self.save_vis = kwargs.get('save_vis', False)
self.seediness_fg_threshold = kwargs.get('seediness_thresh', 0.25)
self.ignore_fg_masks = kwargs.get('ignore_fg_masks', False)
self.frame_overlap = kwargs.get('frame_overlap', (- 1))
self.clustering_device = kwargs.get('clustering_device', 'cuda:0')
self.chainer = OnlineChainer(self.create_clusterer(), embedding_resize_factor=resize_scale)
self.total_frames_processed = 0.0
def create_clusterer(self):
_cfg = cfg.CLUSTERING
return SequentialClustering(primary_prob_thresh=_cfg.PRIMARY_PROB_THRESHOLD, secondary_prob_thresh=_cfg.SECONDARY_PROB_THRESHOLD, min_seediness_prob=_cfg.MIN_SEEDINESS_PROB, n_free_dims=get_nb_free_dims(cfg.MODEL.EMBEDDING_DIM_MODE), free_dim_stds=cfg.TRAINING.LOSSES.EMBEDDING.FREE_DIM_STDS, device=self.clustering_device)
def get_fg_masks_from_seediness(self, inference_output):
seediness_scores = defaultdict((lambda : [0.0, 0.0]))
for (subseq_frames, _, _, subseq_seediness) in inference_output['embeddings']:
subseq_seediness = subseq_seediness.cuda().squeeze(0)
for (i, t) in enumerate(subseq_frames):
seediness_scores[t][0] += subseq_seediness[i]
seediness_scores[t][1] += 1.0
fg_masks = [(seediness_scores[t][0] / seediness_scores[t][1]) for t in sorted(seediness_scores.keys())]
return (torch.stack(fg_masks, 0) > self.seediness_fg_threshold).byte().cpu()
def start(self, seqs_to_process):
if (not isinstance(self.max_tracks, (list, tuple))):
self.max_tracks = ([self.max_tracks] * len(self.sequences))
for i in range(len(self.sequences)):
sequence = self.sequences[i]
if (seqs_to_process and (str(sequence.seq_id) not in seqs_to_process)):
continue
print('Performing inference for sequence {}/{}'.format((i + 1), len(self.sequences)))
self.process_sequence(sequence, self.max_tracks[i])
print('----------------------------------------------------')
print('Model inference speed: {:.3f} fps'.format((self.total_frames_processed / Timer.get_duration('inference'))))
print('Clustering and postprocessing speed: {:.3f} fps'.format((self.total_frames_processed / Timer.get_duration('postprocessing'))))
print('Overall speed: {:.3f} fps'.format((self.total_frames_processed / Timer.get_durations_sum())))
print('----------------------------------------------------')
def process_sequence(self, sequence, max_tracks):
(embeddings, fg_masks, multiclass_masks) = self.do_inference(sequence)
self.do_clustering(sequence, embeddings, fg_masks, multiclass_masks, max_tracks)
self.total_frames_processed += len(sequence)
@Timer.log_duration('inference')
def do_inference(self, sequence):
(subseq_idxes, _) = get_subsequence_frames(len(sequence), cfg.INPUT.NUM_FRAMES, self.dataset_name, self.frame_overlap)
image_paths = [os.path.join(sequence.base_dir, path) for path in sequence.image_paths]
inference_output = self.model(image_paths, subseq_idxes)
(fg_masks, multiclass_masks) = (inference_output['fg_masks'], inference_output['multiclass_masks'])
if torch.is_tensor(fg_masks):
print("Obtaining foreground mask from model's foreground mask output")
fg_masks = (fg_masks > 0.5).byte()
else:
print('Obtaining foreground mask by thresholding seediness map at {}'.format(self.seediness_fg_threshold))
fg_masks = self.get_fg_masks_from_seediness(inference_output)
return (inference_output['embeddings'], fg_masks, multiclass_masks)
@Timer.log_duration('postprocessing')
def do_clustering(self, sequence, all_embeddings, fg_masks, multiclass_masks, max_tracks):
subseq_dicts = []
for (i, (subseq_frames, embeddings, bandwidths, seediness)) in tqdm(enumerate(all_embeddings), total=len(all_embeddings)):
subseq_dicts.append({'frames': subseq_frames, 'embeddings': embeddings, 'bandwidths': bandwidths, 'seediness': seediness})
((track_labels, instance_pt_counts, instance_lifetimes), framewise_mask_idxes, subseq_labels_list, fg_embeddings, subseq_clustering_meta_info) = self.chainer.process(fg_masks, subseq_dicts)
self.output_generator.process_sequence(sequence, framewise_mask_idxes, track_labels, instance_pt_counts, instance_lifetimes, multiclass_masks, fg_masks.shape[(- 2):], 4.0, max_tracks, device=self.clustering_device)
|
def configure_directories(args):
output_dir = args.output_dir
if (not output_dir):
output_dir = os.path.join(os.path.dirname(args.model_path), 'inference')
if (not os.path.isabs(output_dir)):
output_dir = os.path.join(os.path.dirname(args.model_path), output_dir)
os.makedirs(output_dir, exist_ok=True)
return output_dir
|
def load_cfg(args):
cfg_file = os.path.join(os.path.dirname(args.model_path), 'config.yaml')
if (not os.path.exists(cfg_file)):
dataset_cfgs = {'davis': 'davis_2.yaml', 'ytvis': 'youtube_vis.yaml', 'kittimots': 'kitti_mots_2.yaml'}
assert (args.dataset in dataset_cfgs), "Invalid '--dataset' argument. Should be either 'davis', 'ytvis' or 'kittimots'"
cfg_file = os.path.join(RepoPaths.configs_dir(), dataset_cfgs[args.dataset])
print('Loading config from {}'.format(cfg_file))
cfg.merge_from_file(cfg_file)
|
def configure_input_dims(args):
if ((not args.min_dim) and (not args.max_dim)):
return
elif (args.min_dim and args.max_dim):
assert (args.min_dim > 0)
assert (args.max_dim > 0)
cfg.INPUT.update_param('MIN_DIM', args.min_dim)
cfg.INPUT.update_param('MAX_DIM', args.max_dim)
elif (args.min_dim and (not args.max_dim)):
assert (args.min_dim > 0)
dim_ratio = (float(cfg.INPUT.MAX_DIM) / float(cfg.INPUT.MIN_DIM))
cfg.INPUT.update_param('MIN_DIM', args.min_dim)
cfg.INPUT.update_param('MAX_DIM', int(round((args.min_dim * dim_ratio))))
elif ((not args.min_dim) and args.max_dim):
assert (args.max_dim > 0)
dim_ratio = (float(cfg.INPUT.MAX_DIM) / float(cfg.INPUT.MIN_DIM))
cfg.INPUT.update_param('MIN_DIM', int(round((args.max_dim / dim_ratio))))
cfg.INPUT.update_param('MAX_DIM', args.max_dim)
else:
raise ValueError('Should never be here')
print('Network input image dimension limits: {}, {}'.format(cfg.INPUT.MIN_DIM, cfg.INPUT.MAX_DIM))
|
def main(args):
load_cfg(args)
if args.min_seediness_prob:
print('Min seediness prob for instance center --> {}'.format(args.min_seediness_prob))
cfg.CLUSTERING.update_param('MIN_SEEDINESS_PROB', args.min_seediness_prob)
configure_input_dims(args)
output_dir = configure_directories(args)
preload_images = True
cluster_full_scale = (cfg.TRAINING.LOSS_AT_FULL_RES or args.resize_embeddings)
output_resize_scale = (4.0 if cluster_full_scale else 1.0)
semseg_averaging_on_gpu = (not (((args.dataset == 'ytvis') or (args.dataset == 'kittimots')) and cluster_full_scale))
if (args.dataset == 'davis'):
(sequences, _) = parse_generic_video_dataset(DavisPaths.trainval_base_dir(), DavisPaths.val_vds_file())
output_generator = DavisOutputGenerator(output_dir, OnlineChainer.OUTLIER_LABEL, args.save_vis, upscaled_inputs=cluster_full_scale)
max_tracks = cfg.DATA.DAVIS.MAX_INFERENCE_TRACKS
elif (args.dataset in 'ytvis'):
(sequences, meta_info) = parse_generic_video_dataset(YoutubeVISPaths.val_base_dir(), YoutubeVISPaths.val_vds_file())
output_generator = YoutubeVISOutputGenerator(output_dir, OnlineChainer.OUTLIER_LABEL, args.save_vis, None, meta_info['category_labels'], upscaled_inputs=cluster_full_scale)
max_tracks = cfg.DATA.YOUTUBE_VIS.MAX_INFERENCE_TRACKS
elif (args.dataset == 'kittimots'):
(sequences, _) = parse_generic_video_dataset(KITTIMOTSPaths.train_images_dir(), KITTIMOTSPaths.val_vds_file())
output_generator = KittiMOTSOutputGenerator(output_dir, OnlineChainer.OUTLIER_LABEL, args.save_vis, upscaled_inputs=cluster_full_scale)
max_tracks = cfg.DATA.KITTI_MOTS.MAX_INFERENCE_TRACKS
preload_images = False
else:
raise ValueError('Invalid dataset name {} provided'.format(args.dataset))
max_tracks = (args.max_tracks if args.max_tracks else max_tracks)
track_generator = TrackGenerator(sequences, args.dataset, output_generator, output_dir, args.model_path, save_vis=args.save_vis, seediness_thresh=args.seediness_thresh, frame_overlap=args.frame_overlap, max_tracks=max_tracks, preload_images=preload_images, resize_scale=output_resize_scale, semseg_averaging_on_gpu=semseg_averaging_on_gpu, clustering_device=args.clustering_device)
track_generator.start(args.seqs)
output_generator.save()
print('Results saved to {}'.format(output_dir))
|
def masks_to_coord_list(masks):
'\n :param masks: tensor(T, H, W)\n :return: list(tuple(tensor(M), tensor(M)))\n '
fg_idxes_all = []
for t in range(masks.shape[0]):
fg_idxes = masks[t].nonzero()
fg_idxes = tuple(fg_idxes.unbind(1))
fg_idxes_all.append(fg_idxes)
return fg_idxes_all
|
class TrackContainer(object):
'\n Container for holding the final stitched labels assigned to every instance in a video sequence.\n '
def __init__(self, num_frames):
self._frame_labels = [None for _ in range(num_frames)]
self._is_frozen = [False for _ in range(num_frames)]
self._highest_instance_id = 0
def add_labels(self, frame_nums, labels):
'\n Assign labels to the foreground pixels of a given frame\n :param frame_nums: list(int)\n :param labels: list(tensor(N, E)). These should be the global track labels and not the cluster labels within a\n given sub-sequence.\n :return: The next available instance ID.\n '
assert all([(self._frame_labels[t] is None) for t in frame_nums])
for (t, labels_t) in zip(frame_nums, labels):
self._frame_labels[t] = labels_t
if (labels_t.numel() > 0):
self._highest_instance_id = max(self._highest_instance_id, labels_t.max().item())
return (self._highest_instance_id + 1)
def labels_exist(self, frame_num):
'\n Returns true if track labels have already been assigned to a given frame\n :param frame_num: int. The frame ID (0, ..., T-1)\n :return:\n '
return (self._frame_labels[frame_num] is not None)
def has_fg_pixels(self, frame_num):
assert self.labels_exist(frame_num)
return (self._frame_labels[frame_num].numel() > 0)
def get_labels(self, frame_nums):
assert all((self.labels_exist(t) for t in frame_nums))
return [self._frame_labels[t] for t in frame_nums]
def update_labels(self, frame_num, labels):
'\n Similar to add_labels, but is meant to be used when updating the labels for a given frame (e.g. using a\n long-range association measure). This method makes sure that the number of points in the previous and updated\n labels are the same.\n :param frame_num: int. The frame ID (0, ..., T-1)\n :param labels: tensor(N, E)\n :return:\n '
assert self.labels_exist(frame_num)
assert (not self._is_frozen[frame_num])
assert (self._frame_labels[frame_num].shape == self._frame_labels[frame_num].shape)
self._frame_labels[frame_num] = labels
if (labels.numel() > 0):
self._highest_instance_id = max(self._highest_instance_id, labels.max().item())
return self._highest_instance_id
def freeze_frame(self, frame_num):
"\n Safety precaution: when you're finished processing a given frame, call this method and it will ensure that no\n changes are made to the predicted labels of that frame in the future.\n :param frame_num:\n :return:\n "
assert self.labels_exist(frame_num)
self._is_frozen[frame_num] = True
def get_track_mask_idxes(self):
'\n Returns 3 dicts. The first contains final list of track as a dict with keys being the frame numbers and values\n being tensors containing the track ID for each foreground pixel. Note that this is just a flattened list of\n labels and not the final masks.\n\n The second dict contains the number of pixels belonging to each track ID (useful\n for breaking ties between tracks when generating the final masks).\n\n The third dict contains the temporal lifetime of each track ID (also useful\n for breaking ties between tracks when generating the final masks).\n :return: dict, dict\n '
instance_id_num_pts = defaultdict((lambda : 0))
instance_id_lifetimes = defaultdict((lambda : [10000, (- 1)]))
for (frame_num, labels_per_frame) in enumerate(self._frame_labels):
for id in labels_per_frame.unique().tolist():
instance_id_num_pts[id] += (labels_per_frame == id).long().sum().item()
instance_id_lifetimes[id][0] = min(frame_num, instance_id_lifetimes[id][0])
instance_id_lifetimes[id][1] = max(frame_num, instance_id_lifetimes[id][1])
instance_id_lifetimes = {k: (v[1] - v[0]) for (k, v) in instance_id_lifetimes.items()}
return (self._frame_labels, instance_id_num_pts, instance_id_lifetimes)
|
class OnlineChainer(object):
OUTLIER_LABEL = (- 1)
def __init__(self, clusterer, embedding_resize_factor):
self.clusterer = clusterer
self.resize_scale = embedding_resize_factor
@torch.no_grad()
def resize_tensors(self, subseq):
if (self.resize_scale == 1.0):
return
def resize(x):
x = x.unsqueeze(0)
x = F.interpolate(x, scale_factor=(1.0, self.resize_scale, self.resize_scale), mode='trilinear', align_corners=False)
return x.squeeze(0)
subseq['embeddings'] = resize(subseq['embeddings'])
subseq['seediness'] = resize(subseq['seediness'])
subseq['bandwidths'] = resize(subseq['bandwidths'])
@torch.no_grad()
def process(self, masks, subsequences, return_fg_embeddings=False):
"\n Performs clustering/stitching of tracklets for a video containing T frames.\n :param masks: foreground masks as tensor(T, H, W)\n :param subsequences: list(dict). The list contains one entry per sub-sequence. There can be an arbitrary number\n sub-sequences. Each dict must contain a 'frames' key with a list of frames belonging to that sub-sequence, and\n an 'embedding' key with a tensor of shape (E, T_subseq, H, W) containing the embeddings for that sub-sequence.\n :param return_fg_embeddings: bool\n :return:\n "
num_frames = masks.shape[0]
mask_idxes = masks_to_coord_list(masks)
subseq_labels_list = []
subseq_clustering_meta_info = []
track_container = TrackContainer(num_frames)
next_track_label = 1
fg_embeddings = []
print('Clustering subsequences...')
for i in tqdm(range(len(subsequences))):
subseq = subsequences[i]
if isinstance(subseq['frames'], dict):
subseq['frames'] = sorted(subseq['frames'].keys())
subseq_mask_idxes = [mask_idxes[t] for t in subseq['frames']]
subseq['embeddings'] = subseq['embeddings'].cuda()
subseq['bandwidths'] = subseq['bandwidths'].cuda()
subseq['seediness'] = subseq['seediness'].cuda()
self.resize_tensors(subseq)
assert (subseq['embeddings'].shape[(- 2):] == masks.shape[(- 2):]), 'Size mismatch between embeddings {} and masks {}'.format(subseq['embeddings'].shape, masks.shape)
(subseq_labels, subseq_fg_embeddings, meta_info) = self.cluster_subsequence(subseq_mask_idxes, subseq['embeddings'], subseq['bandwidths'], subseq['seediness'], next_track_label, return_fg_embeddings)
subseq_labels_list.append(subseq_labels)
if return_fg_embeddings:
fg_embeddings.append(subseq_fg_embeddings.cpu())
if (i == 0):
subseq_labels_cpu = [l.cpu() for l in subseq_labels]
next_track_label = track_container.add_labels(subseq['frames'], subseq_labels_cpu)
subseq_clustering_meta_info.append(meta_info)
continue
previous_subseq = subsequences[(i - 1)]
overlapping_frames = sorted(list(set(subseq['frames']).intersection(set(previous_subseq['frames']))))
overlapping_frame_existing_labels = track_container.get_labels(overlapping_frames)
overlapping_frames_current_labels = [subseq_labels[i] for (i, t) in enumerate(subseq['frames']) if (t in overlapping_frames)]
(associations, _, _, _, _) = self.associate_clusters(overlapping_frame_existing_labels, overlapping_frames_current_labels)
for (j, t) in enumerate(subseq['frames']):
if (t in overlapping_frames):
continue
for (associated_label, current_subseq_label) in associations:
subseq_labels[j] = torch.where((subseq_labels[j] == current_subseq_label), torch.tensor(associated_label).to(subseq_labels[j]), subseq_labels[j])
subseq_labels_cpu = [l.cpu() for l in subseq_labels]
next_track_label = track_container.add_labels([t], [subseq_labels_cpu[j]])
for (associated_label, current_subseq_label) in associations:
idx = meta_info['instance_labels'].index(current_subseq_label)
meta_info['instance_labels'][idx] = associated_label
subseq_clustering_meta_info.append(meta_info)
subseq['embeddings'] = subseq['bandwidths'] = subseq['seediness'] = None
return (track_container.get_track_mask_idxes(), mask_idxes, subseq_labels_list, fg_embeddings, subseq_clustering_meta_info)
def cluster_subsequence(self, mask_idxes, embeddings, bandwidths, seediness, label_start, return_fg_embeddings):
'\n Performs clustering within a sub-sequence\n :param mask_idxes: list(T, tuple(tensor(M), tensor(M))\n :param embeddings: tensor(E, T, H, W)\n :param bandwidths: tensor(E, T, H, W) or None\n :param seediness: tensor(1, T, H, W) or None\n :param label_start: int\n :param return_fg_embeddings: bool\n :return:\n '
assert (len(mask_idxes) == embeddings.shape[1])
embeddings = embeddings.permute(1, 2, 3, 0).unbind(0)
bandwidths = bandwidths.permute(1, 2, 3, 0)
bandwidths = bandwidths.unbind(0)
seediness = seediness.permute(1, 2, 3, 0).unbind(0)
(embeddings_flat, bandwidths_flat, seediness_flat, num_fg_embeddings) = ([], [], [], [])
for (t, (mask_idxes_per_frame, embeddings_per_frame)) in enumerate(zip(mask_idxes, embeddings)):
embeddings_flat.append(embeddings_per_frame[mask_idxes_per_frame])
num_fg_embeddings.append(mask_idxes_per_frame[0].numel())
if bandwidths:
bandwidths_flat.append(bandwidths[t][mask_idxes_per_frame])
if seediness:
seediness_flat.append(seediness[t][mask_idxes_per_frame])
embeddings_flat = torch.cat(embeddings_flat)
if bandwidths_flat:
bandwidths_flat = torch.cat(bandwidths_flat)
if seediness_flat:
seediness_flat = torch.cat(seediness_flat)
(cluster_labels, clustering_meta_info) = self.clusterer(embeddings_flat, bandwidths=bandwidths_flat, seediness=seediness_flat, cluster_label_start=label_start, return_label_masks=return_fg_embeddings)
assert (cluster_labels.numel() == embeddings_flat.shape[0])
return (list(cluster_labels.split(num_fg_embeddings, 0)), embeddings_flat, clustering_meta_info)
def associate_clusters(self, labels_1, labels_2):
'\n Associates clusters and resolves inconsistencies for a pair of labels for a given frame.\n :param labels_1: list(tensor(N, E)).\n :param labels_2: list(tensor(N, E)).\n :return:\n '
if (not torch.is_tensor(labels_1)):
labels_1 = torch.cat(labels_1).cuda()
if (not torch.is_tensor(labels_2)):
labels_2 = torch.cat(labels_2).cuda()
assert (labels_1.shape == labels_2.shape), 'Shape mismatch: {}, {}'.format(labels_1.shape, labels_2.shape)
unique_labels_1 = list((set(labels_1.unique().tolist()) - {self.OUTLIER_LABEL}))
unique_labels_2 = list((set(labels_2.unique().tolist()) - {self.OUTLIER_LABEL}))
assert (not set(unique_labels_1).intersection(set(unique_labels_2))), 'Labels overlap: {}, {}'.format(unique_labels_1, unique_labels_2)
association_costs = np.zeros((len(unique_labels_1), len(unique_labels_2)), np.float32)
recall_12 = np.zeros((len(unique_labels_1), len(unique_labels_2)), np.float32)
for (i1, i2) in [(i1, i2) for i1 in range(len(unique_labels_1)) for i2 in range(len(unique_labels_2))]:
(l1, l2) = (unique_labels_1[i1], unique_labels_2[i2])
l1_active_pts = (labels_1 == l1)
l2_active_pts = (labels_2 == l2)
intersection = (l1_active_pts & l2_active_pts).float().sum()
union = (l1_active_pts | l2_active_pts).float().sum()
iou = (intersection / union)
association_costs[(i1, i2)] = (1.0 - iou.item())
recall_12[(i1, i2)] = (intersection / l1_active_pts.sum(dtype=torch.float32))
(idxes_1, idxes_2) = linear_sum_assignment(association_costs)
associations = []
unassigned_labels_1 = set(unique_labels_1)
unassigned_labels_2 = set(unique_labels_2)
for (i1, i2) in zip(idxes_1, idxes_2):
(l1, l2) = (unique_labels_1[i1], unique_labels_2[i2])
associations.append((l1, l2))
unassigned_labels_1.remove(l1)
unassigned_labels_2.remove(l2)
return (associations, unassigned_labels_1, unassigned_labels_2, association_costs[(idxes_1, idxes_2)], (recall_12, unique_labels_1, unique_labels_2))
|
def bbox_from_mask(mask):
reduced_y = np.any(mask, axis=0)
reduced_x = np.any(mask, axis=1)
x_min = reduced_y.argmax()
if ((x_min == 0) and (reduced_y[0] == 0)):
return None
x_max = (len(reduced_y) - np.flip(reduced_y, 0).argmax())
y_min = reduced_x.argmax()
y_max = (len(reduced_x) - np.flip(reduced_x, 0).argmax())
return (x_min, y_min, x_max, y_max)
|
def annotate_instance(image, mask, color, text_label, font_size=0.5, draw_bbox=True):
'\n :param image: np.ndarray(H, W, 3)\n :param mask: np.ndarray(H, W)\n :param color: tuple/list(int, int, int) in range [0, 255]\n :param text_label: str\n :param font_size\n :param draw_bbox: bool\n :return: np.ndarray(H, W, 3)\n '
assert (image.shape[:2] == mask.shape), 'Shape mismatch between image {} and mask {}'.format(image.shape, mask.shape)
color = tuple(color)
overlayed_image = overlay_mask_on_image(image, mask, mask_color=color)
bbox = bbox_from_mask(mask)
if (not bbox):
return overlayed_image
(xmin, ymin, xmax, ymax) = bbox
cv2.rectangle(overlayed_image, (xmin, ymin), (xmax, ymax), color=color, thickness=2)
((text_width, text_height), _) = cv2.getTextSize(text_label, cv2.FONT_HERSHEY_SIMPLEX, font_size, thickness=1)
(text_offset_x, text_offset_y) = (int((xmin + 2)), int(((ymin + text_height) + 2)))
text_bg_box_pt1 = (int(text_offset_x), int((text_offset_y + 2)))
text_bg_box_pt2 = (int(((text_offset_x + text_width) + 2)), int(((text_offset_y - text_height) - 2)))
if draw_bbox:
cv2.rectangle(overlayed_image, text_bg_box_pt1, text_bg_box_pt2, color=(255, 255, 255), thickness=(- 1))
cv2.putText(overlayed_image, text_label, (text_offset_x, text_offset_y), cv2.FONT_HERSHEY_SIMPLEX, font_size, (0, 0, 0))
return overlayed_image
|
class Detection(object):
def __init__(self, frame_id, track_id, class_id, mask):
self.frame_id = frame_id
self.track_id = track_id
self.class_id = class_id
self._mask = mask
def as_txt(self):
return '{} {} {} {} {} {}'.format(self.frame_id, self.track_id, self.class_id, self._mask['size'][0], self._mask['size'][1], self._mask['counts'].decode('utf-8'))
@property
def mask(self):
return masktools.decode(self._mask)
@property
def pixel_area(self):
return masktools.area(self._mask)
@property
def bbox_area(self):
(x, y, w, h) = masktools.toBbox(self._mask)
return (w * h)
@property
def pixel_bbox_area_ratio(self):
bbox_area = self.bbox_area
if (bbox_area == 0):
return 0.0
else:
return (float(self.pixel_area) / float(self.bbox_area))
@classmethod
def from_txt(cls, txt):
fields = txt.strip().split(' ')
return cls(int(fields[0]), int(fields[1]), int(fields[2]), {'size': (int(fields[3]), int(fields[4])), 'counts': fields[5].encode('utf-8')})
|
def detections_to_tracks(detections):
tracks = defaultdict(list)
for det in detections:
tracks[det.track_id].append(det)
for track_id in tracks:
tracks[track_id] = sorted(tracks[track_id], key=(lambda d: d.frame_id))
return list(tracks.values())
|
def compute_track_span(track):
min_t = min(track, key=(lambda det: det.frame_id)).frame_id
max_t = max(track, key=(lambda det: det.frame_id)).frame_id
return ((max_t - min_t) + 1)
|
def compute_nbr_time_breaks(track):
n_breaks = 0
for i in range((len(track) - 1)):
n_breaks += int(((track[(i + 1)].frame_id - track[i].frame_id) > 1))
return n_breaks
|
def filter_tracks_by_length(detections, min_track_length_car, min_track_length_person):
tracks = detections_to_tracks(detections)
filtered_dets = []
for t in tracks:
if ((t[0].class_id == CAR_CLASS_ID) and (len(t) < min_track_length_car)):
continue
elif ((t[0].class_id == PERSON_CLASS_ID) and (len(t) < min_track_length_person)):
continue
filtered_dets.extend(t)
return filtered_dets
|
def filter_tracks_by_time_breaks(detections, max_time_break_ratio_car, max_time_break_ratio_person):
tracks = detections_to_tracks(detections)
filtered_dets = []
for t in tracks:
if ((t[0].class_id == CAR_CLASS_ID) and ((float(compute_nbr_time_breaks(t)) / float(len(t))) > max_time_break_ratio_car)):
continue
elif ((t[0].class_id == PERSON_CLASS_ID) and ((float(compute_nbr_time_breaks(t)) / float(len(t))) > max_time_break_ratio_person)):
continue
filtered_dets.extend(t)
return filtered_dets
|
def filter_detections_by_area(detections, min_car_area, min_person_area):
return [det for det in detections if (((det.class_id == CAR_CLASS_ID) and (det.pixel_area >= min_car_area)) or ((det.class_id == PERSON_CLASS_ID) and (det.pixel_area >= min_person_area)))]
|
def filter_detections_by_area_ratio(detections, min_ratio_cars, min_ratio_persons):
return [det for det in detections if (((det.class_id == CAR_CLASS_ID) and (det.pixel_bbox_area_ratio > min_ratio_cars)) or ((det.class_id == PERSON_CLASS_ID) and (det.pixel_bbox_area_ratio > min_ratio_persons)))]
|
def main(**kwargs):
result_files = sorted(glob(os.path.join(kwargs['results_dir'], '????.txt')))
output_dir = (kwargs['results_dir'] + '_{}'.format(kwargs.get('output_dir_suffix', 'nms')))
os.makedirs(output_dir, exist_ok=True)
min_area_car = kwargs.get('min_car_area', DEFAULT_MIN_AREA_CAR)
min_area_pedestrian = kwargs.get('min_person_area', DEFAULT_MIN_AREA_PEDESTRIAN)
min_area_ratio_car = kwargs.get('min_area_ratio_car', DEFAULT_MIN_AREA_RATIO_CAR)
min_area_ratio_pedestrian = kwargs.get('min_area_ratio_person', DEFAULT_MIN_AREA_RATIO_PEDESTRIAN)
min_track_length_car = kwargs.get('min_track_length_car', DEFAULT_MIN_TRACK_LENGTH_CAR)
min_track_length_pedestrian = kwargs.get('min_track_length_person', DEFAULT_MIN_TRACK_LENGTH_PEDESTRIAN)
max_time_break_ratio_car = kwargs.get('max_time_break_ratio_car', DEFAULT_MAX_TIME_BREAK_RATIO_CAR)
max_time_break_ratio_pedestrian = kwargs.get('max_time_break_ratio_person', DEFAULT_MAX_TIME_BREAK_RATIO_PEDESTRIAN)
for f in result_files:
seq_file_name = os.path.split(f)[(- 1)]
print('Processing {}'.format(seq_file_name))
with open(f, 'r') as fh:
detections = [Detection.from_txt(det_txt) for det_txt in fh.readlines()]
detections = filter_detections_by_area(detections, min_area_car, min_area_pedestrian)
detections = filter_detections_by_area_ratio(detections, min_area_ratio_car, min_area_ratio_pedestrian)
detections = filter_tracks_by_time_breaks(detections, max_time_break_ratio_car, max_time_break_ratio_pedestrian)
detections = filter_tracks_by_length(detections, min_track_length_car, min_track_length_pedestrian)
with open(os.path.join(output_dir, seq_file_name), 'w') as fh:
fh.writelines([(det.as_txt() + '\n') for det in detections])
print('Results after applying NMS written to: {}'.format(output_dir))
|
def build_resnet_fpn_backbone(cfg):
body = resnet.ResNet(cfg)
in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
fpn = fpn_module.FPN(in_channels_list=[in_channels_stage2, (in_channels_stage2 * 2), (in_channels_stage2 * 4), (in_channels_stage2 * 8)], out_channels=out_channels, conv_block=conv_with_kaiming_uniform(cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU))
model = nn.Sequential(OrderedDict([('body', body), ('fpn', fpn)]))
model.out_channels = out_channels
model.is_3d = False
return model
|
class FPN(nn.Module):
'\n Module that adds FPN on top of a list of feature maps.\n The feature maps are currently supposed to be in increasing depth\n order, and must be consecutive\n '
def __init__(self, in_channels_list, out_channels, conv_block, top_blocks=None):
'\n Arguments:\n in_channels_list (list[int]): number of channels for each feature map that\n will be fed\n out_channels (int): number of channels of the FPN representation\n top_blocks (nn.Module or None): if provided, an extra operation will\n be performed on the output of the last (smallest resolution)\n FPN output, and the result will extend the result list\n '
super(FPN, self).__init__()
self.inner_blocks = []
self.layer_blocks = []
for (idx, in_channels) in enumerate(in_channels_list, 1):
if (idx < 0):
continue
inner_block = 'fpn_inner{}'.format(idx)
layer_block = 'fpn_layer{}'.format(idx)
if (in_channels == 0):
continue
inner_block_module = conv_block(in_channels, out_channels, 1)
layer_block_module = conv_block(out_channels, out_channels, 3, 1)
self.add_module(inner_block, inner_block_module)
self.add_module(layer_block, layer_block_module)
self.inner_blocks.append(inner_block)
self.layer_blocks.append(layer_block)
self.top_blocks = top_blocks
self.to_discard = 0
def forward(self, x):
'\n Arguments:\n x (list[Tensor]): feature maps for each feature level.\n Returns:\n results (tuple[Tensor]): feature maps after FPN layers.\n They are ordered from highest resolution first.\n '
last_inner = getattr(self, self.inner_blocks[(- 1)])(x[(- 1)])
results = []
results.append(getattr(self, self.layer_blocks[(- 1)])(last_inner))
for (feature, inner_block, layer_block) in zip(x[self.to_discard:(- 1)][::(- 1)], self.inner_blocks[:(- 1)][::(- 1)], self.layer_blocks[:(- 1)][::(- 1)]):
if (not inner_block):
continue
inner_top_down = F.interpolate(last_inner, scale_factor=2, mode='bilinear', align_corners=False)
inner_lateral = getattr(self, inner_block)(feature)
last_inner = (inner_lateral + inner_top_down)
results.insert(0, getattr(self, layer_block)(last_inner))
return tuple(results)
|
class _NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return (_NewEmptyTensorOp.apply(grad, shape), None)
|
class Conv2d(torch.nn.Conv2d):
def forward(self, x):
if (x.numel() > 0):
return super(Conv2d, self).forward(x)
output_shape = [((((i + (2 * p)) - ((di * (k - 1)) + 1)) // d) + 1) for (i, p, di, k, d) in zip(x.shape[(- 2):], self.padding, self.dilation, self.kernel_size, self.stride)]
output_shape = ([x.shape[0], self.weight.shape[0]] + output_shape)
return _NewEmptyTensorOp.apply(x, output_shape)
|
class FrozenBatchNorm2d(nn.Module):
'\n BatchNorm2d where the batch statistics and the affine parameters\n are fixed\n '
def __init__(self, n, epsilon=0.0):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer('weight', torch.ones(n))
self.register_buffer('bias', torch.zeros(n))
self.register_buffer('running_mean', torch.zeros(n))
self.register_buffer('running_var', torch.ones(n))
self.epsilon = epsilon
def forward(self, x):
if (x.dtype == torch.float16):
self.weight = self.weight.half()
self.bias = self.bias.half()
self.running_mean = self.running_mean.half()
self.running_var = self.running_var.half()
scale = (self.weight * (self.running_var + self.epsilon).rsqrt())
bias = (self.bias - (self.running_mean * scale))
scale = scale.reshape(1, (- 1), 1, 1)
bias = bias.reshape(1, (- 1), 1, 1)
return ((x * scale) + bias)
|
def conv_with_kaiming_uniform(use_gn=False, use_relu=False):
def make_conv(in_channels, out_channels, kernel_size, stride=1, dilation=1):
conv = Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=((dilation * (kernel_size - 1)) // 2), dilation=dilation, bias=(False if use_gn else True))
nn.init.kaiming_uniform_(conv.weight, a=1)
if (not use_gn):
nn.init.constant_(conv.bias, 0)
module = [conv]
if use_gn:
module.append(group_norm(out_channels))
if use_relu:
module.append(nn.ReLU(inplace=True))
if (len(module) > 1):
return nn.Sequential(*module)
return conv
return make_conv
|
class ResNet(nn.Module):
def __init__(self, cfg):
super(ResNet, self).__init__()
stage_specs = _STAGE_SPECS[cfg.MODEL.BACKBONE.TYPE]
self.stem = StemWithFixedBatchNorm(cfg)
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
stage2_bottleneck_channels = (num_groups * width_per_group)
stage2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
self.stages = []
self.return_features = {}
for stage_spec in stage_specs:
name = ('layer' + str(stage_spec.index))
stage2_relative_factor = (2 ** (stage_spec.index - 1))
bottleneck_channels = (stage2_bottleneck_channels * stage2_relative_factor)
out_channels = (stage2_out_channels * stage2_relative_factor)
module = _make_stage(BottleneckWithFixedBatchNorm, in_channels, bottleneck_channels, out_channels, stage_spec.block_count, num_groups, cfg.MODEL.RESNETS.STRIDE_IN_1X1, first_stride=(int((stage_spec.index > 1)) + 1))
in_channels = out_channels
self.add_module(name, module)
self.stages.append(name)
self.return_features[name] = stage_spec.return_features
self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_AT_STAGE)
def _freeze_backbone(self, freeze_at):
if (freeze_at < 0):
return
for stage_index in range(freeze_at):
if (stage_index == 0):
m = self.stem
else:
m = getattr(self, ('layer' + str(stage_index)))
for p in m.parameters():
p.requires_grad = False
def forward(self, x):
outputs = []
x = self.stem(x)
for stage_name in self.stages:
x = getattr(self, stage_name)(x)
if self.return_features[stage_name]:
outputs.append(x)
return outputs
|
class ResNetHead(nn.Module):
def __init__(self, block_module, stages, num_groups=1, width_per_group=64, stride_in_1x1=True, stride_init=None, res2_out_channels=256, dilation=1):
super(ResNetHead, self).__init__()
stage2_relative_factor = (2 ** (stages[0].index - 1))
stage2_bottleneck_channels = (num_groups * width_per_group)
out_channels = (res2_out_channels * stage2_relative_factor)
in_channels = (out_channels // 2)
bottleneck_channels = (stage2_bottleneck_channels * stage2_relative_factor)
self.stages = []
stride = stride_init
for stage in stages:
name = ('layer' + str(stage.index))
if (not stride):
stride = (int((stage.index > 1)) + 1)
module = _make_stage(BottleneckWithFixedBatchNorm, in_channels, bottleneck_channels, out_channels, stage.block_count, num_groups, stride_in_1x1, first_stride=stride, dilation=dilation)
stride = None
self.add_module(name, module)
self.stages.append(name)
self.out_channels = out_channels
def forward(self, x):
for stage in self.stages:
x = getattr(self, stage)(x)
return x
|
def _make_stage(transformation_module, in_channels, bottleneck_channels, out_channels, block_count, num_groups, stride_in_1x1, first_stride, dilation=1):
blocks = []
stride = first_stride
for _ in range(block_count):
blocks.append(transformation_module(in_channels, bottleneck_channels, out_channels, num_groups, stride_in_1x1, stride, dilation=dilation))
stride = 1
in_channels = out_channels
return nn.Sequential(*blocks)
|
class Bottleneck(nn.Module):
def __init__(self, in_channels, bottleneck_channels, out_channels, num_groups, stride_in_1x1, stride, dilation, norm_func):
super(Bottleneck, self).__init__()
self.downsample = None
if (in_channels != out_channels):
down_stride = (stride if (dilation == 1) else 1)
self.downsample = nn.Sequential(Conv2d(in_channels, out_channels, kernel_size=1, stride=down_stride, bias=False), norm_func(out_channels))
for modules in [self.downsample]:
for l in modules.modules():
if isinstance(l, Conv2d):
nn.init.kaiming_uniform_(l.weight, a=1)
if (dilation > 1):
stride = 1
(stride_1x1, stride_3x3) = ((stride, 1) if stride_in_1x1 else (1, stride))
self.conv1 = Conv2d(in_channels, bottleneck_channels, kernel_size=1, stride=stride_1x1, bias=False)
self.bn1 = norm_func(bottleneck_channels)
self.conv2 = Conv2d(bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride_3x3, padding=dilation, bias=False, groups=num_groups, dilation=dilation)
nn.init.kaiming_uniform_(self.conv2.weight, a=1)
self.bn2 = norm_func(bottleneck_channels)
self.conv3 = Conv2d(bottleneck_channels, out_channels, kernel_size=1, bias=False)
self.bn3 = norm_func(out_channels)
for l in [self.conv1, self.conv3]:
nn.init.kaiming_uniform_(l.weight, a=1)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = F.relu_(out)
out = self.conv2(out)
out = self.bn2(out)
out = F.relu_(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = F.relu_(out)
return out
|
class BaseStem(nn.Module):
def __init__(self, cfg, norm_func):
super(BaseStem, self).__init__()
out_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
self.conv1 = Conv2d(3, out_channels, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_func(out_channels)
for l in [self.conv1]:
nn.init.kaiming_uniform_(l.weight, a=1)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
return x
|
class BottleneckWithFixedBatchNorm(Bottleneck):
def __init__(self, in_channels, bottleneck_channels, out_channels, num_groups=1, stride_in_1x1=True, stride=1, dilation=1):
super(BottleneckWithFixedBatchNorm, self).__init__(in_channels=in_channels, bottleneck_channels=bottleneck_channels, out_channels=out_channels, num_groups=num_groups, stride_in_1x1=stride_in_1x1, stride=stride, dilation=dilation, norm_func=FrozenBatchNorm2d)
|
class StemWithFixedBatchNorm(BaseStem):
def __init__(self, cfg):
super(StemWithFixedBatchNorm, self).__init__(cfg, norm_func=FrozenBatchNorm2d)
|
def get_pooling_layer_creator(PoolType):
def pooling_module_creator(*args, **kwargs):
return PoolType(*args, **kwargs)
def identity_module_creator(*args, **kwargs):
return nn.Identity(*args, **kwargs)
if (cfg.INPUT.NUM_FRAMES == 2):
return [identity_module_creator for _ in range(3)]
elif (cfg.INPUT.NUM_FRAMES == 4):
return ([pooling_module_creator] + [identity_module_creator for _ in range(2)])
elif (cfg.INPUT.NUM_FRAMES == 8):
return ([pooling_module_creator for _ in range(2)] + [identity_module_creator])
elif (cfg.INPUT.NUM_FRAMES in (16, 24, 32)):
return [pooling_module_creator for _ in range(3)]
else:
raise NotImplementedError()
|
def get_temporal_scales():
if (cfg.INPUT.NUM_FRAMES == 2):
return [1, 1, 1]
elif (cfg.INPUT.NUM_FRAMES == 4):
return [1, 1, 2]
elif (cfg.INPUT.NUM_FRAMES == 8):
return [1, 2, 2]
elif (cfg.INPUT.NUM_FRAMES in (16, 24, 32)):
return [2, 2, 2]
|
class AtrousPyramid3D(nn.Module):
def __init__(self, in_channels, pyramid_channels, dilation_rates, out_channels=None, include_1x1_conv=True):
super().__init__()
pyramid_channels = ([pyramid_channels] * len(dilation_rates))
atrous_convs = [nn.Conv3d(in_channels, channels, 3, padding=rate, dilation=rate, bias=False) for (channels, rate) in zip(pyramid_channels, dilation_rates)]
if include_1x1_conv:
atrous_convs.append(nn.Conv3d(in_channels, pyramid_channels[0], 1, bias=False))
total_channels = (sum(pyramid_channels) + pyramid_channels[0])
else:
total_channels = sum(pyramid_channels)
self.atrous_convs = nn.ModuleList(atrous_convs)
if out_channels:
self.conv_out = nn.Sequential(nn.ReLU(inplace=True), nn.Conv3d(total_channels, out_channels, 1, bias=False))
else:
self.conv_out = nn.Identity()
def forward(self, x):
x = torch.cat([conv(x) for conv in self.atrous_convs], dim=1)
return self.conv_out(x)
|
class UpsampleTrilinear3D(nn.Module):
def __init__(self, size=None, scale_factor=None, align_corners=None):
super().__init__()
self.size = size
self.scale_factor = scale_factor
self.align_corners = align_corners
def forward(self, x):
return F.interpolate(x, self.size, self.scale_factor, mode='trilinear', align_corners=self.align_corners)
|
@EMBEDDING_HEAD_REGISTRY.add('squeeze_expand_decoder')
class SqueezingExpandDecoder(nn.Module):
def __init__(self, in_channels, inter_channels, embedding_size, tanh_activation, seediness_output, experimental_dims, ConvType=nn.Conv3d, PoolType=nn.AvgPool3d, NormType=nn.Identity):
super().__init__()
PoolingLayerCallbacks = get_pooling_layer_creator(PoolType)
self.block_32x = nn.Sequential(ConvType(in_channels, inter_channels[0], 3, stride=1, padding=1, dilation=1), NormType(inter_channels[0]), nn.ReLU(inplace=True), PoolingLayerCallbacks[0](3, stride=(2, 1, 1), padding=1), ConvType(inter_channels[0], inter_channels[0], 3, stride=1, padding=1, dilation=1), NormType(inter_channels[0]), nn.ReLU(inplace=True), PoolingLayerCallbacks[1](3, stride=(2, 1, 1), padding=1), ConvType(inter_channels[0], inter_channels[0], 3, stride=1, padding=1, dilation=1), NormType(inter_channels[0]), nn.ReLU(inplace=True), PoolingLayerCallbacks[2](3, stride=(2, 1, 1), padding=1))
self.block_16x = nn.Sequential(ConvType(in_channels, inter_channels[1], 3, stride=1, padding=1), NormType(inter_channels[1]), nn.ReLU(inplace=True), PoolingLayerCallbacks[0](3, stride=(2, 1, 1), padding=1), ConvType(inter_channels[1], inter_channels[1], 3, stride=1, padding=1), NormType(inter_channels[1]), nn.ReLU(inplace=True), PoolingLayerCallbacks[1](3, stride=(2, 1, 1), padding=1))
self.block_8x = nn.Sequential(ConvType(in_channels, inter_channels[2], 3, stride=1, padding=1), NormType(inter_channels[2]), nn.ReLU(inplace=True), PoolingLayerCallbacks[0](3, stride=(2, 1, 1), padding=1))
self.block_4x = nn.Sequential(ConvType(in_channels, inter_channels[3], 3, stride=1, padding=1), NormType(inter_channels[3]), nn.ReLU(inplace=True))
t_scales = get_temporal_scales()
self.upsample_32_to_16 = nn.Sequential(UpsampleTrilinear3D(scale_factor=(t_scales[0], 2, 2), align_corners=False))
self.conv_16 = nn.Conv3d((inter_channels[0] + inter_channels[1]), inter_channels[1], 1, bias=False)
self.upsample_16_to_8 = nn.Sequential(UpsampleTrilinear3D(scale_factor=(t_scales[1], 2, 2), align_corners=False))
self.conv_8 = nn.Conv3d((inter_channels[1] + inter_channels[2]), inter_channels[2], 1, bias=False)
self.upsample_8_to_4 = nn.Sequential(UpsampleTrilinear3D(scale_factor=(t_scales[2], 2, 2), align_corners=False))
self.conv_4 = nn.Conv3d((inter_channels[2] + inter_channels[3]), inter_channels[3], 1, bias=False)
self.embedding_size = embedding_size
n_free_dims = get_nb_free_dims(experimental_dims)
self.variance_channels = (self.embedding_size - n_free_dims)
self.embedding_dim_mode = experimental_dims
embedding_output_size = get_nb_embedding_dims(self.embedding_dim_mode)
self.conv_embedding = nn.Conv3d(inter_channels[(- 1)], embedding_output_size, kernel_size=1, padding=0, bias=False)
self.conv_variance = nn.Conv3d(inter_channels[(- 1)], self.variance_channels, kernel_size=1, padding=0, bias=True)
(self.conv_seediness, self.seediness_channels) = (None, 0)
if seediness_output:
self.conv_seediness = nn.Conv3d(inter_channels[(- 1)], 1, kernel_size=1, padding=0, bias=False)
self.seediness_channels = 1
self.tanh_activation = tanh_activation
self.register_buffer('time_scale', torch.tensor(1.0, dtype=torch.float32))
def forward(self, x):
'\n :param x: list of multiscale feature map tensors of shape [N, C, T, H, W]. For this implementation, there\n should be 4 features maps in increasing order of spatial dimensions\n :return: embedding map of shape [N, E, T, H, W]\n '
assert (len(x) == 4), 'Expected 4 feature maps, got {}'.format(len(x))
(feat_map_32x, feat_map_16x, feat_map_8x, feat_map_4x) = x
feat_map_32x = self.block_32x(feat_map_32x)
x = self.upsample_32_to_16(feat_map_32x)
feat_map_16x = self.block_16x(feat_map_16x)
x = torch.cat((x, feat_map_16x), 1)
x = self.conv_16(x)
x = self.upsample_16_to_8(x)
feat_map_8x = self.block_8x(feat_map_8x)
x = torch.cat((x, feat_map_8x), 1)
x = self.conv_8(x)
x = self.upsample_8_to_4(x)
feat_map_4x = self.block_4x(feat_map_4x)
x = torch.cat((x, feat_map_4x), 1)
x = self.conv_4(x)
embeddings = self.conv_embedding(x)
if self.tanh_activation:
embeddings = (embeddings * 0.25).tanh()
embeddings = add_spatiotemporal_offset(embeddings, self.time_scale, self.embedding_dim_mode)
variances = self.conv_variance(x)
if (self.conv_seediness is not None):
seediness = self.conv_seediness(x).sigmoid()
output = torch.cat((embeddings, variances, seediness), dim=1)
else:
output = torch.cat((embeddings, variances), dim=1)
return output
|
class SqueezingExpandDilatedDecoder(nn.Module):
def __init__(self, in_channels, inter_channels, embedding_size, tanh_activation, seediness_output, experimental_dims, ConvType=nn.Conv3d, PoolType=nn.AvgPool3d, NormType=nn.Identity):
super().__init__()
PoolingLayerCallbacks = get_pooling_layer_creator(PoolType)
self.block_32x = nn.Sequential(AtrousPyramid3D(in_channels, 64, ((1, 3, 3), (1, 6, 6), (1, 9, 9)), inter_channels[0]), NormType(inter_channels[0]), nn.ReLU(inplace=True), PoolingLayerCallbacks[0]((3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0)), AtrousPyramid3D(inter_channels[0], 64, ((1, 3, 3), (1, 6, 6), (1, 9, 9)), inter_channels[0]), NormType(inter_channels[0]), nn.ReLU(inplace=True), PoolingLayerCallbacks[1]((3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0)), AtrousPyramid3D(inter_channels[0], 64, ((1, 3, 3), (1, 6, 6), (1, 9, 9)), inter_channels[0]), NormType(inter_channels[0]), nn.ReLU(inplace=True), PoolingLayerCallbacks[2]((3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0)))
self.block_16x = nn.Sequential(AtrousPyramid3D(in_channels, 64, ((1, 4, 4), (1, 8, 8), (1, 12, 12)), inter_channels[1]), NormType(inter_channels[1]), nn.ReLU(inplace=True), PoolingLayerCallbacks[0]((3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0)), AtrousPyramid3D(in_channels, 64, ((1, 4, 4), (1, 8, 8), (1, 12, 12)), inter_channels[1]), NormType(inter_channels[1]), nn.ReLU(inplace=True), PoolingLayerCallbacks[1]((3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0)))
self.block_8x = nn.Sequential(ConvType(in_channels, inter_channels[2], 3, stride=1, padding=1), NormType(inter_channels[2]), nn.ReLU(inplace=True), PoolingLayerCallbacks[0](3, stride=(2, 1, 1), padding=1))
self.block_4x = nn.Sequential(ConvType(in_channels, inter_channels[3], 3, stride=1, padding=1), NormType(inter_channels[3]), nn.ReLU(inplace=True))
t_scales = get_temporal_scales()
self.upsample_32_to_16 = nn.Sequential(UpsampleTrilinear3D(scale_factor=(t_scales[0], 2, 2), align_corners=False))
self.conv_16 = nn.Conv3d((inter_channels[0] + inter_channels[1]), inter_channels[1], 1, bias=False)
self.upsample_16_to_8 = nn.Sequential(UpsampleTrilinear3D(scale_factor=(t_scales[1], 2, 2), align_corners=False))
self.conv_8 = nn.Conv3d((inter_channels[1] + inter_channels[2]), inter_channels[2], 1, bias=False)
self.upsample_8_to_4 = nn.Sequential(UpsampleTrilinear3D(scale_factor=(t_scales[2], 2, 2), align_corners=False))
self.conv_4 = nn.Conv3d((inter_channels[2] + inter_channels[3]), inter_channels[3], 1, bias=False)
self.embedding_size = embedding_size
n_free_dims = get_nb_free_dims(experimental_dims)
self.variance_channels = (self.embedding_size - n_free_dims)
self.experimental_dim_mode = experimental_dims
embedding_output_size = get_nb_embedding_dims(self.experimental_dim_mode)
self.conv_embedding = nn.Conv3d(inter_channels[(- 1)], embedding_output_size, kernel_size=1, padding=0, bias=False)
self.conv_variance = nn.Conv3d(inter_channels[(- 1)], self.variance_channels, kernel_size=1, padding=0, bias=True)
(self.conv_seediness, self.seediness_channels) = (None, 0)
if seediness_output:
self.conv_seediness = nn.Conv3d(inter_channels[(- 1)], 1, kernel_size=1, padding=0, bias=False)
self.seediness_channels = 1
self.tanh_activation = tanh_activation
self.register_buffer('time_scale', torch.tensor(1.0, dtype=torch.float32))
def forward(self, x):
'\n :param x: list of multiscale feature map tensors of shape [N, C, T, H, W]. For this implementation, there\n should be 4 features maps in increasing order of spatial dimensions\n :return: embedding map of shape [N, E, T, H, W]\n '
assert (len(x) == 4)
(feat_map_32x, feat_map_16x, feat_map_8x, feat_map_4x) = x
feat_map_32x = self.block_32x(feat_map_32x)
x = self.upsample_32_to_16(feat_map_32x)
feat_map_16x = self.block_16x(feat_map_16x)
x = torch.cat((x, feat_map_16x), 1)
x = self.conv_16(x)
x = self.upsample_16_to_8(x)
feat_map_8x = self.block_8x(feat_map_8x)
x = torch.cat((x, feat_map_8x), 1)
x = self.conv_8(x)
x = self.upsample_8_to_4(x)
feat_map_4x = self.block_4x(feat_map_4x)
x = torch.cat((x, feat_map_4x), 1)
x = self.conv_4(x)
embeddings = self.conv_embedding(x)
if self.tanh_activation:
embeddings = (embeddings * 0.25).tanh()
embeddings = add_spatiotemporal_offset(embeddings, self.time_scale, self.experimental_dim_mode)
variances = self.conv_variance(x)
if (self.conv_seediness is not None):
seediness = self.conv_seediness(x).sigmoid()
output = torch.cat((embeddings, variances, seediness), dim=1)
else:
output = torch.cat((embeddings, variances), dim=1)
return output
|
def get_nb_embedding_dims(mode):
if (mode in ('xy', 'ff')):
return 2
elif (mode in ('xyt', 'xyf')):
return 3
elif (mode in ('xytf', 'xyff')):
return 4
elif (mode in ('xytff', 'xyfff')):
return 5
else:
raise ValueError('Invalid experimental embedding mode: {}'.format(mode))
|
def get_nb_free_dims(mode):
if (mode in ('xyf', 'xytf')):
return 1
elif (mode in ('xyff', 'xytff')):
return 2
elif (mode == 'xyfff'):
return 3
else:
return 0
|
@torch.no_grad()
def creat_spatiotemporal_grid(height, width, time, t_scale, dtype=torch.float32, device='cpu'):
x_abs = max(1.0, (width / float(height)))
y_abs = max(1.0, (height / float(width)))
x = torch.linspace((- x_abs), x_abs, width, dtype=torch.float32, device=device).to(dtype=dtype)
y = torch.linspace((- y_abs), y_abs, height, dtype=torch.float32, device=device).to(dtype=dtype)
t = torch.linspace((- t_scale), t_scale, time, dtype=torch.float32, device=device).to(dtype=dtype)
(t, y, x) = torch.meshgrid(t, y, x)
return (t, y, x)
|
def add_spatiotemporal_offset(embeddings, time_scale, mode):
(N, C, T, H, W) = embeddings.shape
(t, y, x) = creat_spatiotemporal_grid(H, W, T, time_scale, embeddings.dtype, embeddings.device)
if (mode == 'x'):
with torch.no_grad():
grid = x.unsqueeze(0)
return (embeddings + grid.detach())
elif (mode == 'xyf'):
with torch.no_grad():
zeros = torch.zeros_like(x)
grid = torch.stack((y, x, zeros), dim=0)
grid = grid.unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
return (embeddings + grid.detach())
elif (mode == 'ff'):
return embeddings
elif (mode == 'xytf'):
with torch.no_grad():
zeros = torch.zeros_like(x)
grid = torch.stack((t, y, x, zeros), dim=0)
grid = grid.unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
return (embeddings + grid.detach())
elif (mode == 'xytff'):
with torch.no_grad():
zeros = torch.zeros_like(x)
grid = torch.stack((t, y, x, zeros, zeros), dim=0)
grid = grid.unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
return (embeddings + grid.detach())
elif (mode == 'xyff'):
with torch.no_grad():
zeros = torch.zeros_like(x)
grid = torch.stack((y, x, zeros, zeros), dim=0)
grid = grid.unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
return (embeddings + grid.detach())
elif (mode == 'xyfff'):
with torch.no_grad():
zeros = torch.zeros_like(x)
grid = torch.stack((y, x, zeros, zeros, zeros), dim=0)
grid = grid.unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
return (embeddings + grid.detach())
elif (mode == 'xyffff'):
with torch.no_grad():
zeros = torch.zeros_like(x)
grid = torch.stack((y, x, zeros, zeros, zeros, zeros), dim=0)
grid = grid.unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
return (embeddings + grid.detach())
elif (mode == 'xy'):
with torch.no_grad():
grid = torch.stack((y, x), dim=0)
grid = grid.unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
return (embeddings + grid.detach())
elif (mode == 'xyt'):
with torch.no_grad():
grid = torch.stack((t, y, x), dim=0)
grid = grid.unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
return (embeddings + grid.detach())
else:
raise ValueError('Invalid experimental embedding mode: {}'.format(mode))
|
class CrossEntropyLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, semseg_logits, targets, output_dict):
'\n Computes the semantic segmentation loss\n :param semseg_logits: tensor of shape [N, T, cls, H, W]\n :param targets: list(dict(tensors))\n :return: scalar loss for semantic segmentation\n '
loss = 0.0
for (pred_semseg_logits_per_seq, targets_per_seq) in zip(semseg_logits, targets):
gt_semseg_masks_per_seq = targets_per_seq['semseg_masks']
ignore_masks_per_seq = targets_per_seq['ignore_masks']
assert (gt_semseg_masks_per_seq.shape[(- 2):] == pred_semseg_logits_per_seq.shape[(- 2):]), 'Shape mismatch between ground truth semseg masks {} and predicted semseg masks {}'.format(gt_semseg_masks_per_seq.shape, pred_semseg_logits_per_seq.shape)
assert (gt_semseg_masks_per_seq.shape[(- 2):] == ignore_masks_per_seq.shape[(- 2):]), 'Shape mismatch between ground truth semseg masks {} and ignore masks {} '.format(gt_semseg_masks_per_seq.shape, ignore_masks_per_seq.shape)
seq_loss = F.cross_entropy(pred_semseg_logits_per_seq, gt_semseg_masks_per_seq)
with torch.no_grad():
nonignore_masks_per_seq = (1.0 - ignore_masks_per_seq.float())
seq_loss = (seq_loss * nonignore_masks_per_seq)
seq_loss = (seq_loss.sum() / nonignore_masks_per_seq.sum().detach())
loss = (loss + seq_loss)
loss = (loss / len(targets))
output_dict[ModelOutputConsts.OTHERS][LossConsts.SEMSEG] = loss
output_dict[ModelOutputConsts.OPTIMIZATION_LOSSES][LossConsts.SEMSEG] = (loss * cfg.TRAINING.LOSSES.WEIGHT_SEMSEG)
|
class TrainingModel(nn.Module):
def __init__(self, backbone, embedding_head, embedding_head_feature_map_scale, embedding_loss_criterion, semseg_head, semseg_feature_map_scale, semseg_loss_criterion, seediness_head, seediness_head_feature_map_scale, multiclass_semseg_output, output_resize_scale, logger):
super(self.__class__, self).__init__()
self.backbone = backbone
self.embedding_head = embedding_head
self.embedding_head_feature_map_scale = embedding_head_feature_map_scale
self.embedding_head_output_scale = min(self.embedding_head_feature_map_scale)
self.embedding_loss_criterion = embedding_loss_criterion
self.semseg_head = semseg_head
self.semseg_feature_map_scale = semseg_feature_map_scale
self.semseg_output_scale = min(self.semseg_feature_map_scale)
self.semseg_loss_criterion = semseg_loss_criterion
self.seediness_head = seediness_head
self.seediness_head_feature_map_scale = seediness_head_feature_map_scale
all_feature_map_scales = self.embedding_head_feature_map_scale.copy()
if (self.semseg_head is not None):
all_feature_map_scales += self.semseg_feature_map_scale
min_scale_p = int(math.log2(min(all_feature_map_scales)))
max_scale_p = int(math.log2(max(all_feature_map_scales)))
self.feature_map_scales = [(2 ** p) for p in range(min_scale_p, (max_scale_p + 1))]
self.multiclass_semseg_output = multiclass_semseg_output
self.output_resize_scale = output_resize_scale
self.logger = logger
def train(self, mode=True):
self.training = mode
for (module_name, module) in self.named_children():
if ((module_name == 'backbone') and cfg.TRAINING.FREEZE_BACKBONE):
continue
module.train(mode)
return self
def restore_temporal_dimension(self, x, num_seqs, num_frames, format):
"\n Restores the temporal dimension given a flattened image/feature tensor\n :param x: tensor of shape [N*T, C, H, W]\n :param num_seqs: Number of image sequences (batch size)\n :param num_frames: Number of frames per image sequence\n :param format: Either 'NCTHW' or 'NTCHW'\n :return: tensor of shape defined by 'format' option\n "
(channels, height, width) = x.shape[(- 3):]
x = x.view(num_seqs, num_frames, channels, height, width)
assert (format in ['NCTHW', 'NTCHW'])
if (format == 'NCTHW'):
x = x.permute(0, 2, 1, 3, 4)
return x
def forward(self, image_seqs, targets):
targets = self.resize_masks(targets)
num_seqs = image_seqs.num_seqs
num_frames = image_seqs.num_frames
features = self.run_backbone(image_seqs)
(embeddings_map, semseg_logits) = self.forward_embeddings_and_semseg(features, num_seqs, num_frames)
output = {ModelOutput.INFERENCE: {ModelOutput.EMBEDDINGS: embeddings_map, ModelOutput.SEMSEG_MASKS: semseg_logits}}
self.embedding_loss_criterion(embeddings_map, targets, output)
if (self.semseg_head is not None):
if self.semseg_head.has_foreground_channel:
(semseg_logits, fg_logits) = semseg_logits.split(((semseg_logits.shape[2] - 1), 1), dim=2)
self.compute_fg_loss(fg_logits.squeeze(2), targets, output)
self.semseg_loss_criterion(semseg_logits, targets, output)
return output
@torch.no_grad()
def resize_masks(self, targets):
'\n Downscales masks to the required size\n :param targets:\n :return: dict\n '
assert (self.embedding_head_output_scale == self.semseg_output_scale)
for target in targets:
if (self.output_resize_scale == 1.0):
target['masks'] = F.interpolate(target['masks'].float(), scale_factor=(1.0 / self.embedding_head_output_scale), mode='bilinear', align_corners=False)
target['masks'] = target['masks'].byte().detach()
target['ignore_masks'] = F.interpolate(target['ignore_masks'].unsqueeze(0).float(), scale_factor=(1.0 / self.semseg_output_scale), mode='bilinear', align_corners=False)
target['ignore_masks'] = target['ignore_masks'].squeeze(0).byte().detach()
if (self.semseg_head is not None):
target['semseg_masks'] = instance_masks_to_semseg_mask(target['masks'], target['category_ids'])
return targets
def run_backbone(self, image_seqs):
'\n Computes backbone features for a set of image sequences.\n :param image_seqs: Instance of ImageList\n :return: A dictionary of feature maps with keys denoting the scale.\n '
(height, width) = image_seqs.tensors.shape[(- 2):]
images_tensor = image_seqs.tensors.view((image_seqs.num_seqs * image_seqs.num_frames), 3, height, width)
if cfg.TRAINING.FREEZE_BACKBONE:
with torch.no_grad():
features = self.backbone(images_tensor)
else:
features = self.backbone(images_tensor)
return OrderedDict([(k, v) for (k, v) in zip(self.feature_map_scales, features)])
def forward_embeddings_and_semseg(self, features, num_seqs, num_frames):
if (self.semseg_head is None):
semseg_logits = None
else:
semseg_input_features = [self.restore_temporal_dimension(features[scale], num_seqs, num_frames, 'NCTHW') for scale in self.semseg_feature_map_scale]
semseg_logits = self.semseg_head(semseg_input_features)
semseg_logits = semseg_logits.permute(0, 2, 1, 3, 4)
embedding_head_input_features = [self.restore_temporal_dimension(features[scale], num_seqs, num_frames, 'NCTHW') for scale in self.embedding_head_feature_map_scale]
embeddings_map = self.embedding_head(embedding_head_input_features)
if (self.seediness_head is not None):
seediness_input_features = [self.restore_temporal_dimension(features[scale], num_seqs, num_frames, 'NCTHW') for scale in self.seediness_head_feature_map_scale]
seediness_map = self.seediness_head(seediness_input_features)
embeddings_map = torch.cat((embeddings_map, seediness_map), dim=1)
if (self.output_resize_scale != 1.0):
embeddings_map = F.interpolate(embeddings_map, scale_factor=(1.0, self.output_resize_scale, self.output_resize_scale), mode='trilinear', align_corners=False)
if torch.is_tensor(semseg_logits):
semseg_logits = F.interpolate(semseg_logits, scale_factor=(1.0, self.output_resize_scale, self.output_resize_scale), mode='trilinear', align_corners=False)
return (embeddings_map, semseg_logits)
def compute_fg_loss(self, fg_logits, targets, output_dict):
'\n Computes the foreground/background loss\n :param fg_logits: tensor(N, T, H, W)\n :param targets: dict\n :param output_dict: dict\n :return: loss\n '
loss = 0.0
for (pred_fg_logits_per_seq, targets_per_seq) in zip(fg_logits, targets):
gt_semseg_masks_per_seq = targets_per_seq['semseg_masks']
ignore_masks_per_seq = targets_per_seq['ignore_masks']
assert (gt_semseg_masks_per_seq.shape[(- 2):] == pred_fg_logits_per_seq.shape[(- 2):]), 'Shape mismatch between ground truth semseg masks {} and predicted semseg masks {}'.format(gt_semseg_masks_per_seq.shape, pred_fg_logits_per_seq.shape)
assert (gt_semseg_masks_per_seq.shape[(- 2):] == ignore_masks_per_seq.shape[(- 2):]), 'Shape mismatch between ground truth semseg masks {} and ignore masks {} '.format(gt_semseg_masks_per_seq.shape, ignore_masks_per_seq.shape)
fg_masks_per_seq = (gt_semseg_masks_per_seq > 0).float()
seq_loss = F.binary_cross_entropy_with_logits(pred_fg_logits_per_seq, fg_masks_per_seq, reduction='none')
with torch.no_grad():
nonignore_masks_per_seq = (1.0 - ignore_masks_per_seq.float())
seq_loss = (seq_loss * nonignore_masks_per_seq)
seq_loss = (seq_loss.sum() / nonignore_masks_per_seq.sum().detach())
loss = (loss + seq_loss)
output_dict[ModelOutput.OPTIMIZATION_LOSSES][LossConsts.FOREGROUND] = (loss / len(targets))
|
def build_model(restore_pretrained_backbone_wts=False, logger=None):
print_fn = (logger.info if (logger is not None) else print)
torch.manual_seed(42)
backbone_type = cfg.MODEL.BACKBONE.TYPE
backbone_builder = BACKBONE_REGISTRY[backbone_type]
backbone = backbone_builder(cfg)
info_to_print = ['Backbone type: {}'.format(cfg.MODEL.BACKBONE.TYPE), 'Backbone frozen: {}'.format(('Yes' if cfg.TRAINING.FREEZE_BACKBONE else 'No'))]
if restore_pretrained_backbone_wts:
pretrained_wts_file = os.path.join(ModelPaths.pretrained_backbones_dir(), cfg.MODEL.BACKBONE.PRETRAINED_WEIGHTS)
print_fn("Restoring backbone weights from '{}'".format(pretrained_wts_file))
if os.path.exists(pretrained_wts_file):
restore_dict = torch.load(pretrained_wts_file)
backbone.load_state_dict(restore_dict, strict=True)
else:
raise ValueError("Could not find pre-trained backbone weights file at expected location: '{}'".format(pretrained_wts_file))
embedding_head_seediness_output = (not cfg.MODEL.USE_SEEDINESS_HEAD)
add_semseg_head = cfg.MODEL.USE_SEMSEG_HEAD
if (cfg.INPUT.NUM_CLASSES > 2):
assert add_semseg_head, "Number of object classes > 2, but 'USE_SEMSEG_HEAD' option is set to False"
EmbeddingHeadType = EMBEDDING_HEAD_REGISTRY[cfg.MODEL.EMBEDDINGS.HEAD_TYPE]
embedding_head = EmbeddingHeadType(backbone.out_channels, cfg.MODEL.EMBEDDINGS.INTER_CHANNELS, cfg.MODEL.EMBEDDINGS.EMBEDDING_SIZE, tanh_activation=cfg.MODEL.EMBEDDINGS.TANH_ACTIVATION, seediness_output=embedding_head_seediness_output, experimental_dims=cfg.MODEL.EMBEDDING_DIM_MODE, PoolType=POOLER_REGISTRY[cfg.MODEL.EMBEDDINGS.POOL_TYPE], NormType=NORM_REGISTRY[cfg.MODEL.EMBEDDINGS.NORMALIZATION_LAYER](cfg.MODEL.EMBEDDINGS.GN_NUM_GROUPS))
embedding_loss_criterion = EmbeddingLoss(min(cfg.MODEL.EMBEDDINGS.SCALE), embedding_size=cfg.MODEL.EMBEDDINGS.EMBEDDING_SIZE, nbr_free_dims=get_nb_free_dims(cfg.MODEL.EMBEDDING_DIM_MODE), **cfg.TRAINING.LOSSES.EMBEDDING.d())
info_to_print.append('Embedding head type: {}'.format(cfg.MODEL.EMBEDDINGS.HEAD_TYPE))
info_to_print.append('Embedding head channels: {}'.format(cfg.MODEL.EMBEDDINGS.INTER_CHANNELS))
info_to_print.append('Embedding dims: {}'.format(cfg.MODEL.EMBEDDINGS.EMBEDDING_SIZE))
info_to_print.append('Embedding dim mode: {}'.format(cfg.MODEL.EMBEDDING_DIM_MODE))
info_to_print.append('Embedding free dim stds: {}'.format(cfg.TRAINING.LOSSES.EMBEDDING.FREE_DIM_STDS))
info_to_print.append('Embedding head normalization: {}'.format(cfg.MODEL.EMBEDDINGS.NORMALIZATION_LAYER))
info_to_print.append('Embedding head pooling type: {}'.format(cfg.MODEL.EMBEDDINGS.POOL_TYPE))
if cfg.MODEL.USE_SEEDINESS_HEAD:
SeedinessHeadType = SEEDINESS_HEAD_REGISTRY[cfg.MODEL.SEEDINESS.HEAD_TYPE]
seediness_head = SeedinessHeadType(backbone.out_channels, cfg.MODEL.SEEDINESS.INTER_CHANNELS, PoolType=POOLER_REGISTRY[cfg.MODEL.SEEDINESS.POOL_TYPE], NormType=NORM_REGISTRY[cfg.MODEL.SEEDINESS.NORMALIZATION_LAYER](cfg.MODEL.SEEDINESS.GN_NUM_GROUPS))
info_to_print.append('Seediness head type: {}'.format(cfg.MODEL.SEEDINESS.HEAD_TYPE))
info_to_print.append('Seediness head channels: {}'.format(cfg.MODEL.SEEDINESS.INTER_CHANNELS))
info_to_print.append('Seediness head normalization: {}'.format(cfg.MODEL.SEEDINESS.NORMALIZATION_LAYER))
info_to_print.append('Seediness head pooling type: {}'.format(cfg.MODEL.SEEDINESS.POOL_TYPE))
else:
seediness_head = None
info_to_print.append('Seediness head type: N/A')
if add_semseg_head:
SemsegHeadType = SEMSEG_HEAD_REGISTRY[cfg.MODEL.SEMSEG.HEAD_TYPE]
semseg_head = SemsegHeadType(backbone.out_channels, cfg.INPUT.NUM_CLASSES, inter_channels=cfg.MODEL.SEMSEG.INTER_CHANNELS, feature_scales=cfg.MODEL.SEMSEG.FEATURE_SCALE, foreground_channel=cfg.MODEL.SEMSEG.FOREGROUND_CHANNEL, PoolType=POOLER_REGISTRY[cfg.MODEL.SEMSEG.POOL_TYPE], NormType=NORM_REGISTRY[cfg.MODEL.SEMSEG.NORMALIZATION_LAYER](cfg.MODEL.SEMSEG.GN_NUM_GROUPS))
SemsegLossType = SEMSEG_LOSS_REGISTRY[cfg.TRAINING.LOSSES.SEMSEG]
semseg_loss_criterion = SemsegLossType()
info_to_print.append('Semseg head type: {}'.format(cfg.MODEL.SEMSEG.HEAD_TYPE))
info_to_print.append('Semseg head channels: {}'.format(cfg.MODEL.SEMSEG.INTER_CHANNELS))
info_to_print.append('Sesmeg with foreground channel: {}'.format(('Yes' if cfg.MODEL.SEMSEG.FOREGROUND_CHANNEL else 'No')))
info_to_print.append('Semseg loss type: {}'.format(cfg.TRAINING.LOSSES.SEMSEG))
info_to_print.append('Semseg head normalization: {}'.format(cfg.MODEL.SEMSEG.NORMALIZATION_LAYER))
info_to_print.append('Semseg head pooling type: {}'.format(cfg.MODEL.SEMSEG.POOL_TYPE))
else:
semseg_head = None
semseg_loss_criterion = None
info_to_print.append('Semseg head type: N/A')
multiclass_semseg_output = (cfg.INPUT.NUM_CLASSES > 2)
output_resize_scale = (4.0 if cfg.TRAINING.LOSS_AT_FULL_RES else 1.0)
info_to_print.append('Output resize scale: {}'.format(output_resize_scale))
print_fn('Model configuration\n{}\n'.format('\n'.join([' - {}'.format(line) for line in info_to_print])))
return TrainingModel(backbone=backbone, embedding_head=embedding_head, embedding_head_feature_map_scale=cfg.MODEL.EMBEDDINGS.SCALE, embedding_loss_criterion=embedding_loss_criterion, semseg_head=semseg_head, semseg_feature_map_scale=cfg.MODEL.SEMSEG.FEATURE_SCALE, semseg_loss_criterion=semseg_loss_criterion, seediness_head=seediness_head, seediness_head_feature_map_scale=cfg.MODEL.SEEDINESS.FEATURE_SCALE, multiclass_semseg_output=multiclass_semseg_output, output_resize_scale=output_resize_scale, logger=logger)
|
@SEEDINESS_HEAD_REGISTRY.add('squeeze_expand_decoder')
class SqueezingExpandDecoder(nn.Module):
def __init__(self, in_channels, inter_channels, ConvType=nn.Conv3d, PoolType=nn.AvgPool3d, NormType=nn.Identity):
super().__init__()
PoolingLayerCallbacks = get_pooling_layer_creator(PoolType)
self.block_32x = nn.Sequential(ConvType(in_channels, inter_channels[0], 3, stride=1, padding=1), NormType(inter_channels[0]), nn.ReLU(inplace=True), PoolingLayerCallbacks[0](3, stride=(2, 1, 1), padding=1), ConvType(inter_channels[0], inter_channels[0], 3, stride=1, padding=1), NormType(inter_channels[0]), nn.ReLU(inplace=True), PoolingLayerCallbacks[1](3, stride=(2, 1, 1), padding=1), ConvType(inter_channels[0], inter_channels[0], 3, stride=1, padding=1), NormType(inter_channels[0]), nn.ReLU(inplace=True), PoolingLayerCallbacks[2](3, stride=(2, 1, 1), padding=1))
self.block_16x = nn.Sequential(ConvType(in_channels, inter_channels[1], 3, stride=1, padding=1), NormType(inter_channels[1]), nn.ReLU(inplace=True), PoolingLayerCallbacks[0](3, stride=(2, 1, 1), padding=1), ConvType(inter_channels[1], inter_channels[1], 3, stride=1, padding=1), NormType(inter_channels[1]), nn.ReLU(inplace=True), PoolingLayerCallbacks[1](3, stride=(2, 1, 1), padding=1))
self.block_8x = nn.Sequential(ConvType(in_channels, inter_channels[2], 3, stride=1, padding=1), NormType(inter_channels[2]), nn.ReLU(inplace=True), PoolingLayerCallbacks[0](3, stride=(2, 1, 1), padding=1))
self.block_4x = nn.Sequential(ConvType(in_channels, inter_channels[3], 3, stride=1, padding=1), NormType(inter_channels[3]), nn.ReLU(inplace=True))
t_scales = get_temporal_scales()
self.upsample_32_to_16 = nn.Sequential(UpsampleTrilinear3D(scale_factor=(t_scales[0], 2, 2), align_corners=False))
self.conv_16 = nn.Conv3d((inter_channels[0] + inter_channels[1]), inter_channels[1], 1, bias=False)
self.upsample_16_to_8 = nn.Sequential(UpsampleTrilinear3D(scale_factor=(t_scales[1], 2, 2), align_corners=False))
self.conv_8 = nn.Conv3d((inter_channels[1] + inter_channels[2]), inter_channels[2], 1, bias=False)
self.upsample_8_to_4 = nn.Sequential(UpsampleTrilinear3D(scale_factor=(t_scales[2], 2, 2), align_corners=False))
self.conv_4 = nn.Conv3d((inter_channels[2] + inter_channels[3]), inter_channels[3], 1, bias=False)
self.conv_out = nn.Conv3d(inter_channels[3], 1, kernel_size=1, padding=0, bias=False)
def forward(self, x):
'\n :param x: list of multiscale feature map tensors of shape [N, C, T, H, W]. For this implementation, there\n should be 4 features maps in increasing order of spatial dimensions\n :return: embedding map of shape [N, E, T, H, W]\n '
assert (len(x) == 4)
(feat_map_32x, feat_map_16x, feat_map_8x, feat_map_4x) = x
feat_map_32x = self.block_32x(feat_map_32x)
x = self.upsample_32_to_16(feat_map_32x)
feat_map_16x = self.block_16x(feat_map_16x)
x = torch.cat((x, feat_map_16x), 1)
x = self.conv_16(x)
x = self.upsample_16_to_8(x)
feat_map_8x = self.block_8x(feat_map_8x)
x = torch.cat((x, feat_map_8x), 1)
x = self.conv_8(x)
x = self.upsample_8_to_4(x)
feat_map_4x = self.block_4x(feat_map_4x)
x = torch.cat((x, feat_map_4x), 1)
x = self.conv_4(x)
return self.conv_out(x).sigmoid()
|
@SEMSEG_HEAD_REGISTRY.add('squeeze_expand_decoder')
class SqueezeExpandDecoder(nn.Module):
def __init__(self, in_channels, num_classes, inter_channels, feature_scales, foreground_channel=False, ConvType=nn.Conv3d, PoolType=nn.AvgPool3d, NormType=nn.Identity):
super().__init__()
self.is_3d = True
assert (tuple(feature_scales) == (4, 8, 16, 32))
PoolingLayerCallbacks = get_pooling_layer_creator(PoolType)
self.block_32x = nn.Sequential(ConvType(in_channels, inter_channels[0], 3, stride=1, padding=1), NormType(inter_channels[0]), nn.ReLU(inplace=True), PoolingLayerCallbacks[0](3, stride=(2, 1, 1), padding=1), ConvType(inter_channels[0], inter_channels[0], 3, stride=1, padding=1), NormType(inter_channels[0]), nn.ReLU(inplace=True), PoolingLayerCallbacks[1](3, stride=(2, 1, 1), padding=1), ConvType(inter_channels[0], inter_channels[0], 3, stride=1, padding=1), NormType(inter_channels[0]), nn.ReLU(inplace=True), PoolingLayerCallbacks[2](3, stride=(2, 1, 1), padding=1))
self.block_16x = nn.Sequential(ConvType(in_channels, inter_channels[1], 3, stride=1, padding=1), NormType(inter_channels[1]), nn.ReLU(inplace=True), PoolingLayerCallbacks[0](3, stride=(2, 1, 1), padding=1), ConvType(inter_channels[1], inter_channels[1], 3, stride=1, padding=1), NormType(inter_channels[1]), nn.ReLU(inplace=True), PoolingLayerCallbacks[1](3, stride=(2, 1, 1), padding=1))
self.block_8x = nn.Sequential(ConvType(in_channels, inter_channels[2], 3, stride=1, padding=1), NormType(inter_channels[2]), nn.ReLU(inplace=True), PoolingLayerCallbacks[0](3, stride=(2, 1, 1), padding=1))
self.block_4x = nn.Sequential(ConvType(in_channels, inter_channels[3], 3, stride=1, padding=1), NormType(inter_channels[3]), nn.ReLU(inplace=True))
t_scales = get_temporal_scales()
self.upsample_32_to_16 = nn.Sequential(UpsampleTrilinear3D(scale_factor=(t_scales[0], 2, 2), align_corners=False))
self.conv_16 = nn.Conv3d((inter_channels[0] + inter_channels[1]), inter_channels[1], 1, bias=False)
self.upsample_16_to_8 = nn.Sequential(UpsampleTrilinear3D(scale_factor=(t_scales[1], 2, 2), align_corners=False))
self.conv_8 = nn.Conv3d((inter_channels[1] + inter_channels[2]), inter_channels[2], 1, bias=False)
self.upsample_8_to_4 = nn.Sequential(UpsampleTrilinear3D(scale_factor=(t_scales[2], 2, 2), align_corners=False))
self.conv_4 = nn.Conv3d((inter_channels[2] + inter_channels[3]), inter_channels[3], 1, bias=False)
out_channels = ((num_classes + 1) if foreground_channel else num_classes)
self.conv_out = nn.Conv3d(inter_channels[3], out_channels, kernel_size=1, padding=0, bias=False)
self.has_foreground_channel = foreground_channel
def forward(self, x):
assert (len(x) == 4), 'Expected 4 feature maps, got {}'.format(len(x))
(feat_map_32x, feat_map_16x, feat_map_8x, feat_map_4x) = x[::(- 1)]
feat_map_32x = self.block_32x(feat_map_32x)
x = self.upsample_32_to_16(feat_map_32x)
feat_map_16x = self.block_16x(feat_map_16x)
x = torch.cat((x, feat_map_16x), 1)
x = self.conv_16(x)
x = self.upsample_16_to_8(x)
feat_map_8x = self.block_8x(feat_map_8x)
x = torch.cat((x, feat_map_8x), 1)
x = self.conv_8(x)
x = self.upsample_8_to_4(x)
feat_map_4x = self.block_4x(feat_map_4x)
x = torch.cat((x, feat_map_4x), 1)
x = self.conv_4(x)
return self.conv_out(x)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.