repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
fl-analysis | fl-analysis-master/src/subspace/keras_ext/engine_topology.py | # Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from tensorflow.keras.layers import Input
from keras.engine.topology import Network as Container
from .util import full_static_shape
class LazyContainer(Container):
'''Like Container. But lazy.'''
def __init__(self, container_function, use_method_disposable=True):
self._container_function = container_function
self._lazy_has_run = False
self.use_method_disposable = use_method_disposable
# Delay rest of construction until first call
def __call__(self, x, mask=None):
if not self._lazy_has_run:
# Make short-lived Input Layers for each x this was called with
# TODO: handle tuple or list x
x_shape = full_static_shape(x) # Uses var._keras_shape or var.get_shape()
if self.use_method_disposable:
inp_layer = Input(batch_shape=x_shape,
dtype=x.dtype,
name='tmp_input_from__%s' % x.name.replace('/','_').replace(':','_'))
else:
print('Warning: using non-disposable approach. May not work yet.')
inp_layer = Input(tensor=x,
batch_shape=x_shape,
dtype=x.dtype, name='real_input_from__%s' % x.name.replace('/','_').replace(':','_'))
# Call function of inputs to get output tensors
outputs = self._container_function(inp_layer)
# Initialize entire Container object here (finally)
super(LazyContainer, self).__init__(inp_layer, outputs)
self._lazy_has_run = True
if not self.use_method_disposable:
return outputs
# Non-disposable mode: actually call the Container only the *second* and later times
# Disposable mode: call the Container now
ret = super(LazyContainer, self).__call__(x, mask=mask)
return ret
| 3,056 | 45.318182 | 119 | py |
fl-analysis | fl-analysis-master/src/subspace/keras_ext/util.py | # Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Flatten, Input, Lambda
from src.subspace.general.tfutil import tf_assert_gpu, hist_summaries_traintest
########################
# General Keras helpers
########################
def make_image_input_preproc(im_dims, dtype='float32', flatten_in=False, shift_in=None, name=None):
'''Make an input for images and (optionally preprocess). Returns
both the Input layer (which should be used as Model input) and the
preproc version (which should be passed to the first layer of the
model). If no preprocessing is done, the Input layer and preproc
will be the same.
'''
assert isinstance(im_dims, tuple) and len(im_dims) == 3, 'should be tuple of 3 dims (0,1,c)'
assert dtype in ('float32', 'uint8'), 'unknown dtype'
input_images = Input(shape=im_dims, dtype=dtype, name=name)
preproc_images = input_images
if dtype == 'uint8':
preproc_images = Lambda(lambda x: K.cast(x, 'float32'))(preproc_images)
if shift_in is not None:
print('subtracting from each input:', shift_in)
preproc_images = Lambda(lambda x: x - shift_in)(preproc_images)
if flatten_in:
preproc_images = Flatten()(preproc_images)
return input_images, preproc_images
def make_classlabel_input(n_label_vals):
return Input(batch_shape=(None,), dtype='int64')
def setup_session_and_seeds(seed, assert_gpu=True, mem_fraction=None):
'''Start TF and register session with Keras'''
# Use InteractiveSession instead of Session so the default session will be set
if mem_fraction is not None:
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=mem_fraction)
sess = tf.compat.v1.InteractiveSession(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options))
else:
sess = tf.compat.v1.InteractiveSession()
K.set_session(sess)
np.random.seed(seed)
tf.compat.v1.set_random_seed(seed)
print('Set numpy and tensorflow random seeds to: %s' % repr(seed))
print('My PID is %d' % os.getpid())
if assert_gpu:
tf_assert_gpu(sess)
return sess
def add_act_summaries(model, quiet=False):
tensors = []
if not quiet:
print('\nActivations:')
for layer in model.layers:
for node in layer._inbound_nodes:
for tensor in node.output_tensors:
tensors.append(tensor)
tdict = {tt.name: tt for tt in set(tensors)}
for tname in sorted(tdict.keys()):
hist_summaries_traintest(tdict[tname], name=tname + '__act')
if not quiet:
print(' ', tname, tdict[tname])
def get_model_tensors(model, with_layers_nodes=False):
tensor_set = set()
tensor_list = []
layer_list = []
node_list = []
for layer in model.layers:
for node in layer._inbound_nodes:
for tensor in node.output_tensors:
if tensor not in tensor_set:
# Make a list with deteministic order, but check membership using a fast set
tensor_set.add(tensor)
tensor_list.append(tensor)
layer_list.append(layer)
node_list.append(node)
if with_layers_nodes:
return tensor_list, layer_list, node_list
else:
return tensor_list
def warn_misaligned_shapes(model):
printed = False
tlns = get_model_tensors(model, with_layers_nodes=True)
for tln in zip(tlns[0], tlns[1], tlns[2]):
tensor, layer, node = tln
tf_shape = tuple(tensor.get_shape().as_list())
try:
keras_shape = tensor._keras_shape
except AttributeError:
continue
if tf_shape != keras_shape:
if not printed:
print('\nWarning: found the following tensor shape mismatches, may indicate problems.')
print(' %-40s %-22s %-22s' % ('LAYER NAME', '', ''))
print(' %-40s %-22s %-22s' % ('TENSOR NAME', 'KERAS SHAPE', 'TF SHAPE'))
printed = True
print(' %-40s %-22s %-22s' % (layer.name, '', ''))
print(' %-40s %-22s %-22s' % (tensor.name, keras_shape, tf_shape))
def full_static_shape(var):
'''Returns the most fully-specified static shape possible for var (at
graph build time, not run time). Uses information in
var.get_shape() as well as var._keras_shape. Raises an Exception
if the two shapes are incompatible with each other.
'''
try:
tf_shape = [val.__int__() for val in var.get_shape()]
except ValueError:
raise Exception('Unclear why this would ever be encountered. If it pops up, debug here.')
if not hasattr(var, '_keras_shape'):
return tf_shape
k_shape = var._keras_shape
assert len(tf_shape) == len(k_shape), 'Shape lengths different; this probably should not occur'
shape = []
for tt, kk in zip(tf_shape, k_shape):
if tt == kk:
shape.append(tt)
else:
if tt is None:
shape.append(kk)
elif kk is None:
shape.append(tt)
else:
raise Exception('tf shape and Keras shape are contradictory: %s vs %s' % (tf_shape, k_shape))
return shape
| 6,451 | 38.10303 | 109 | py |
fl-analysis | fl-analysis-master/src/subspace/keras_ext/regularizers.py | # Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''Custom Keras regularizers.'''
import tensorflow.keras
import tensorflow.keras.backend as K
class WeightRegularizer(keras.regularizers.WeightRegularizer):
'''Subclass of Keras WeightRegularizer that doesn't use
K.in_train_phase, so that total loss can easily be compared
between train and val modes.
'''
def __init__(self, l1=0., l2=0.):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
self.uses_learning_phase = False
self.p = None
def get_loss(self):
loss = 0.0
if self.l1:
loss += K.sum(K.abs(self.p)) * self.l1
if self.l2:
loss += K.sum(K.square(self.p)) * self.l2
return loss
class WeightRegularizerMean(keras.regularizers.WeightRegularizer):
'''Subclass of Keras WeightRegularizer that doesn't use
K.in_train_phase, so that total loss can easily be compared
between train and val modes.
Uses mean instead of sum above.
'''
def __init__(self, l1=0., l2=0.):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
self.uses_learning_phase = False
self.p = None
def get_loss(self):
loss = 0.0
if self.l1:
loss += K.mean(K.abs(self.p)) * self.l1
if self.l2:
loss += K.mean(K.square(self.p)) * self.l2
return loss
class ActivityRegularizer(keras.regularizers.ActivityRegularizer):
'''Subclass of Keras ActivityRegularizer that doesn't use
K.in_train_phase, so that total loss can easily be compared
between train and val modes.
'''
def __init__(self, l1=0., l2=0.):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
self.uses_learning_phase = False
self.layer = None
def get_loss(self):
if self.layer is None:
raise Exception('Need to call `set_layer` on '
'ActivityRegularizer instance '
'before calling the instance.')
loss = 0.0
for i in range(len(self.layer.inbound_nodes)):
output = self.layer.get_output_at(i)
if self.l1:
loss += K.sum(self.l1 * K.abs(output))
if self.l2:
loss += K.sum(self.l2 * K.square(output))
return loss
def l1(l=0.01):
return WeightRegularizer(l1=l)
def l2(l=0.01):
return WeightRegularizer(l2=l)
def l1l2(l1=0.01, l2=0.01):
return WeightRegularizer(l1=l1, l2=l2)
def activity_l1(l=0.01):
return ActivityRegularizer(l1=l)
def activity_l2(l=0.01):
return ActivityRegularizer(l2=l)
def activity_l1l2(l1=0.01, l2=0.01):
return ActivityRegularizer(l1=l1, l2=l2)
| 3,830 | 30.925 | 80 | py |
fl-analysis | fl-analysis-master/src/subspace/keras_ext/rproj_layers.py | # Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Layer, InputSpec
import tensorflow.keras.backend as K
from tensorflow.python.keras.utils import conv_utils
from src.subspace.keras_ext.rproj_layers_util import _convert_string_dtype
# from keras.backend.tensorflow_backend import _convert_string_dtype
from tensorflow.keras import regularizers, constraints, initializers, activations
###########
#
# Low Rank Basis Layers
#
# These layers are modified versions of standard Keras layers that
# accept an OffsetCreator*Proj to create offsets from a weight basis
# in a Dense/Sparse/Fastfood agnostic manner.
#
###########
class LowRankBasisLayer(Layer):
'''Smarter version of Layer...'''
def __init__(self, offset_creator_class, weight_basis, *args, **kwargs):
super(LowRankBasisLayer, self).__init__(*args, **kwargs)
# offset_creator is an object that creates theta offsets
self.offset_creator = offset_creator_class()
self.weight_basis = weight_basis
# These may or may not be used by subclasses
#self._basis_matrices = []
#self._basis_matrix_normalizers = []
# TODO check for use of basis_matrices
@property
def basis_matrices(self):
print('USED HERE basis_matrices')
return self._basis_matrices
# TODO check for use of basis_matrix_normalizers
@property
def basis_matrix_normalizers(self):
print('USED HERE basis_matrix_normalizers')
return self._basis_matrix_normalizers
def add_weight(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
constraint=None):
'''Version of add_weight that creates a weight theta by instantiating
theta_0 and then adding to it an offset from the member
offset_creator.
'''
initializer = initializers.get(initializer)
if dtype is None:
dtype = K.floatx()
# Create Theta_0
value_0 = initializer(shape)
theta_0 = tf.Variable(value_0, trainable=False, dtype=_convert_string_dtype(dtype), name='%s_theta0' % name)
if isinstance(value_0, np.ndarray):
theta_0._keras_shape = value_0.shape
elif hasattr(value_0, 'get_shape'):
theta_0._keras_shape = tuple(map(int, value_0.get_shape()))
theta_0._uses_learning_phase = False
# Call offset creator
exec = self.offset_creator.create_theta_offset(self.weight_basis,
theta_0.get_shape(),
dtype=dtype,
name=name)
non_trainable_weights = exec.ww
# if regularizer is not None:
# self.add_loss(regularizer(theta))
# if constraint is not None:
# self.constraints[theta] = constraint
#self._base_thetas.append(theta_0)
#self._basis_matrices.append(ww)
#self._non_trainable_weights.extend([theta_0, ww])
self._non_trainable_weights.extend([theta_0] + [non_trainable_weights])
return theta_0, exec
def add_non_trainable_weight(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
constraint=None):
'''Adds a weight variable to the layer.
# Arguments
name: String, the name for the weight variable.
shape: The shape tuple of the weight.
dtype: The dtype of the weight.
initializer: An Initializer instance (callable).
regularizer: An optional Regularizer instance.
trainable: A boolean, whether the weight should
be trained via backprop or not (assuming
that the layer itself is also trainable).
constraint: An optional Constraint instance.
# Returns
The created weight variable.
'''
initializer = initializers.get(initializer)
if dtype is None:
dtype = K.floatx()
weight = tf.Variable(initializer(shape), dtype=dtype, name=name, trainable=False)
# weight = K.variable(initializer(shape), dtype=dtype, name=name)
if regularizer is not None:
self.add_loss(regularizer(weight))
if constraint is not None:
self.constraints[weight] = constraint
self._non_trainable_weights.append(weight)
return weight
class RProjDense(LowRankBasisLayer):
'''RProj version of Dense.'''
def __init__(self, offset_creator_class, weight_basis,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(RProjDense, self).__init__(offset_creator_class, weight_basis, **kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs, **kwargs):
kt0, eproj = self.kernel
k = tf.add(kt0, eproj())
bt0, eprojb = self.bias
b = tf.add(bt0, eprojb())
# Normal dense functionality
output = K.dot(inputs, k)
if self.use_bias:
output = K.bias_add(output, b)
if self.activation is not None:
output = self.activation(output)
return output
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[-1]
output_shape = list(input_shape)
output_shape[-1] = self.units
return tuple(output_shape)
class _RProjConv(LowRankBasisLayer):
'''Abstract nD convolution layer (private, used as implementation base).
Only the intrinsic parameters (RProj) are Trainable.'''
def __init__(self, offset_creator_class, weight_basis,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(_RProjConv, self).__init__(offset_creator_class, weight_basis, **kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
if self.rank == 1:
outputs = K.conv1d(
inputs,
self.kernel,
strides=self.strides[0],
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate[0])
if self.rank == 2:
outputs = K.conv2d(
inputs,
self.kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.rank == 3:
outputs = K.conv3d(
inputs,
self.kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0],) + tuple(new_space) + (self.filters,)
if self.data_format == 'channels_first':
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0], self.filters) + tuple(new_space)
class RProjConv2D(_RProjConv):
'''Low Rank Basis Conv2D
Filters if number of filters, output dimension is filters
TODO: Documentation / unit tests
'''
def __init__(self, offset_creator_class, weight_basis,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(RProjConv2D, self).__init__(
offset_creator_class=offset_creator_class,
weight_basis=weight_basis,
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.input_spec = InputSpec(ndim=4)
def build(self, input_shape):
assert self.data_format != 'channels_first','only b01c supported'
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[-1]
self.units = self.filters
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
assert self.rank == 2, 'only conv2d supported for now...'
kt0, eproj = self.kernel
k = tf.add(kt0, eproj())
bt0, eprojb = self.bias
b = tf.add(bt0, eprojb())
if self.rank == 2:
outputs = K.conv2d(
inputs,
k,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
b,
data_format=self.data_format)
#if self.activation is not None:
# assert False,'activation functions not supported'
# return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
#self.filters*2 to accomodate LU representation
return (input_shape[0],) + tuple(new_space) + (self.filters,)
class RProjBatchNormalization(LowRankBasisLayer):
'''RProj version of BatchNormalization.'''
def __init__(self, offset_creator_class, weight_basis,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
moving_mean_initializer='zeros',
moving_variance_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(RProjBatchNormalization, self).__init__(offset_creator_class, weight_basis, **kwargs)
self.supports_masking = True
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
self.moving_variance_initializer = initializers.get(moving_variance_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
dim = input_shape[self.axis]
if dim is None:
raise ValueError('Axis ' + str(self.axis) + ' of '
'input tensor should have a defined dimension '
'but the layer received an input with shape ' +
str(input_shape) + '.')
self.input_spec = InputSpec(ndim=len(input_shape),
axes={self.axis: dim})
shape = (dim,)
if self.scale:
self.gamma = self.add_weight(shape=shape,
name='gamma',
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(shape=shape,
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
self.moving_mean = self.add_non_trainable_weight(
shape=shape,
name='moving_mean',
initializer=self.moving_mean_initializer)
self.moving_variance = self.add_non_trainable_weight(
shape=shape,
name='moving_variance',
initializer=self.moving_variance_initializer)
self.built = True
def call(self, inputs, training=None):
# training = self._get_training_value(training)
input_shape = K.int_shape(inputs)
# Prepare broadcasting shape.
ndim = len(input_shape)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis]
# Determines whether broadcasting is needed.
needs_broadcasting = (sorted(reduction_axes) != list(range(ndim))[:-1])
# exec in call
gamma_init, gamma_exec = self.gamma
gamma = tf.add(gamma_init, gamma_exec())
beta_init, beta_exec = self.beta
beta = tf.add(beta_init, beta_exec())
def normalize_inference():
if needs_broadcasting:
# In this case we must explicitly broadcast all parameters.
broadcast_moving_mean = K.reshape(self.moving_mean,
broadcast_shape)
broadcast_moving_variance = K.reshape(self.moving_variance,
broadcast_shape)
if self.center:
broadcast_beta = K.reshape(beta, broadcast_shape)
else:
broadcast_beta = None
if self.scale:
broadcast_gamma = K.reshape(gamma,
broadcast_shape)
else:
broadcast_gamma = None
return K.batch_normalization(
inputs,
broadcast_moving_mean,
broadcast_moving_variance,
broadcast_beta,
broadcast_gamma,
epsilon=self.epsilon)
else:
return K.batch_normalization(
inputs,
self.moving_mean,
self.moving_variance,
beta,
gamma,
epsilon=self.epsilon)
# If the learning phase is *static* and set to inference:
# if tf.cond(training, tf.constant(True)):
# if training in {0, False}:
# return normalize_inference()
# If the learning is either dynamic, or set to training:
# print(inputs)
# print(gamma, beta)
normed_training, mean, variance = K.normalize_batch_in_training(
inputs, gamma, beta, reduction_axes,
epsilon=self.epsilon)
self.add_update([K.moving_average_update(self.moving_mean,
mean,
self.momentum),
K.moving_average_update(self.moving_variance,
variance,
self.momentum)],
inputs)
# Pick the normalized form corresponding to the training phase.
return K.in_train_phase(normed_training,
normalize_inference,
training=training)
# def _get_training_value(self, training=None):
# print(training)
# if training is None:
# training = K.learning_phase()
#
# if isinstance(training, int):
# training = bool(training)
# return training
# return training == tf.Tensor()
# if base_layer_utils.is_in_keras_graph():
# training = math_ops.logical_and(training, self._get_trainable_var())
# else:
# training = math_ops.logical_and(training, self.trainable)
# return training
class RProjLocallyConnected2D(LowRankBasisLayer):
"""Locally-connected layer for 2D inputs.
The `LocallyConnected2D` layer works similarly
to the `Conv2D` layer, except that weights are unshared,
that is, a different set of filters is applied at each
different patch of the input.
"""
# @interfaces.legacy_conv2d_support
def __init__(self, offset_creator_class, weight_basis,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(RProjLocallyConnected2D, self).__init__(offset_creator_class, weight_basis, **kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid':
raise ValueError('Invalid border mode for LocallyConnected2D '
'(only "valid" is supported): ' + padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=4)
def build(self, input_shape):
if self.data_format == 'channels_last':
input_row, input_col = input_shape[1:-1]
input_filter = input_shape[3]
else:
input_row, input_col = input_shape[2:]
input_filter = input_shape[1]
if input_row is None or input_col is None:
raise ValueError('The spatial dimensions of the inputs to '
' a LocallyConnected2D layer '
'should be fully-defined, but layer received '
'the inputs shape ' + str(input_shape))
output_row = conv_utils.conv_output_length(input_row, self.kernel_size[0],
self.padding, self.strides[0])
output_col = conv_utils.conv_output_length(input_col, self.kernel_size[1],
self.padding, self.strides[1])
self.output_row = output_row
self.output_col = output_col
self.kernel_shape = (output_row * output_col,
self.kernel_size[0] * self.kernel_size[1] * input_filter,
self.filters)
self.kernel = self.add_weight(shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(output_row, output_col, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
if self.data_format == 'channels_first':
self.input_spec = InputSpec(ndim=4, axes={1: input_filter})
else:
self.input_spec = InputSpec(ndim=4, axes={-1: input_filter})
self.built = True
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding, self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding, self.strides[1])
if self.data_format == 'channels_first':
return (input_shape[0], self.filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, self.filters)
def call(self, inputs):
_, _, filters = self.kernel_shape
output = K.local_conv2d(inputs,
self.kernel,
self.kernel_size,
self.strides,
(self.output_row, self.output_col),
self.data_format)
if self.use_bias:
if self.data_format == 'channels_first' or self.data_format == 'channels_last':
output = K.bias_add(output, self.bias, data_format=self.data_format)
output = self.activation(output)
return output
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(LocallyConnected2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 33,100 | 41.491656 | 116 | py |
fl-analysis | fl-analysis-master/src/data/emnist.py | import os
import h5py
import tensorflow as tf
import numpy as np
def load_data(only_digits=True, cache_dir=None):
"""Loads the Federated EMNIST dataset.
Downloads and caches the dataset locally. If previously downloaded, tries to
load the dataset from cache.
This dataset is derived from the Leaf repository
(https://github.com/TalwalkarLab/leaf) pre-processing of the Extended MNIST
dataset, grouping examples by writer. Details about Leaf were published in
"LEAF: A Benchmark for Federated Settings" https://arxiv.org/abs/1812.01097.
*Note*: This dataset does not include some additional preprocessing that
MNIST includes, such as size-normalization and centering.
In the Federated EMNIST data, the value of 1.0
corresponds to the background, and 0.0 corresponds to the color of the digits
themselves; this is the *inverse* of some MNIST representations,
e.g. in [tensorflow_datasets]
(https://github.com/tensorflow/datasets/blob/master/docs/datasets.md#mnist),
where 0 corresponds to the background color, and 255 represents the color of
the digit.
Data set sizes:
*only_digits=True*: 3,383 users, 10 label classes
- train: 341,873 examples
- test: 40,832 examples
*only_digits=False*: 3,400 users, 62 label classes
- train: 671,585 examples
- test: 77,483 examples
Rather than holding out specific users, each user's examples are split across
_train_ and _test_ so that all users have at least one example in _train_ and
one example in _test_. Writers that had less than 2 examples are excluded from
the data set.
The `tf.data.Datasets` returned by
`tff.simulation.ClientData.create_tf_dataset_for_client` will yield
`collections.OrderedDict` objects at each iteration, with the following keys
and values:
- `'pixels'`: a `tf.Tensor` with `dtype=tf.float32` and shape [28, 28],
containing the pixels of the handwritten digit, with values in
the range [0.0, 1.0].
- `'label'`: a `tf.Tensor` with `dtype=tf.int32` and shape [1], the class
label of the corresponding pixels. Labels [0-9] correspond to the digits
classes, labels [10-35] correspond to the uppercase classes (e.g., label
11 is 'B'), and labels [36-61] correspond to the lowercase classes
(e.g., label 37 is 'b').
Args:
only_digits: (Optional) whether to only include examples that are from the
digits [0-9] classes. If `False`, includes lower and upper case
characters, for a total of 62 class labels.
cache_dir: (Optional) directory to cache the downloaded file. If `None`,
caches in Keras' default cache directory.
Returns:
Tuple of (train, test) where the tuple elements are
`tff.simulation.ClientData` objects.
"""
if only_digits:
fileprefix = 'fed_emnist_digitsonly'
sha256 = '55333deb8546765427c385710ca5e7301e16f4ed8b60c1dc5ae224b42bd5b14b'
else:
fileprefix = 'fed_emnist'
sha256 = 'fe1ed5a502cea3a952eb105920bff8cffb32836b5173cb18a57a32c3606f3ea0'
filename = fileprefix + '.tar.bz2'
path = tf.keras.utils.get_file(
filename,
origin='https://storage.googleapis.com/tff-datasets-public/' + filename,
file_hash=sha256,
hash_algorithm='sha256',
extract=True,
archive_format='tar',
cache_dir=cache_dir)
dir_path = os.path.dirname(path)
train_client_data = process_h5py(os.path.join(dir_path, fileprefix + '_train.h5'))
test_client_data = process_h5py(os.path.join(dir_path, fileprefix + '_test.h5'))
return train_client_data, test_client_data
def process_h5py(filename):
file = h5py.File(filename, 'r')
drawers = file['examples']
out = []
for i, key in enumerate(drawers.keys()):
out.append({ 'pixels': drawers[key]['pixels'].value, 'label': drawers[key]['label'].value})
return np.asarray(out) | 3,842 | 43.172414 | 99 | py |
fl-analysis | fl-analysis-master/src/data/tf_data.py | import itertools
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from src.data import image_augmentation
from src.data import emnist
class Dataset:
def __init__(self, x_train, y_train, batch_size=50, x_test=None, y_test=None):
self.batch_size = batch_size
# LIMIT = 5000 # for debugging remove this
# x_train, y_train = x_train[:LIMIT], y_train[:LIMIT]
self.x_train, self.y_train = self.shuffle(x_train, y_train)
self.x_test, self.y_test = x_test, y_test
self.x_aux, self.y_aux, self.mal_aux_labels = None, None, None
self.x_aux_test, self.mal_aux_labels_test = None, None
self.fg = tf.data.Dataset.from_tensor_slices((self.x_train, self.y_train))
def shuffle(self, x, y):
perms = np.random.permutation(x.shape[0])
return x[perms, :], y[perms]
def get_data(self):
"""Creates one batch of data.
# This is a TERRIBLE way to load data... every epoch we get it in the same order !!!
Yields:˚
tuple of two: input data batch and corresponding labels
"""
# count = int(self.x_train.shape[0] / self.batch_size)
# if count == 0:
# yield self.x_train, self.y_train
# # return [(self.x_train, self.y_train)]
# for bid in range(count): # Note: Unsafe if batch_size is small!!!
# batch_x = self.x_train[bid * self.batch_size:(bid + 1) * self.batch_size]
# batch_y = self.y_train[bid * self.batch_size:(bid + 1) * self.batch_size]
#
# yield batch_x, batch_y
# bid = 0
shuffle_size = min(self.x_train.shape[0], 10000)
return self.fg \
.shuffle(shuffle_size) \
.batch(self.batch_size, drop_remainder=True) \
.prefetch(tf.data.experimental.AUTOTUNE)
def get_aux(self, mal_num_batch):
"""Creates one batch of data.
Yields:
tuple of two: input data batch and corresponding labels
"""
if int(self.x_aux.shape[0] / self.batch_size) < 1:
yield self.x_aux, self.mal_aux_labels
for bid in range(int(self.x_aux.shape[0] / self.batch_size)):
batch_x = self.x_aux[bid * self.batch_size:(bid + 1) * self.batch_size]
batch_y = self.mal_aux_labels[bid * self.batch_size:(bid + 1) * self.batch_size]
yield batch_x, batch_y
bid = 0
def get_data_with_aux(self, insert_aux_times, num_batches, pixel_pattern=None, noise_level=None):
"""Creates one batch of data with the AUX data inserted `insert_aux_times` per batch with malicious labels.
:param insert_aux_times number of times aux should be inserted per batch. 1 for Bagdasaryan
:param num_batches number of batches to generate. 200 for Bagdasaryan
:param noise_level sigma of normal distribution noise to add to training samples
Yields:
tuple of two: input data batch and corresponding labels
"""
multiplier = max(float(insert_aux_times) / float(self.mal_aux_labels.shape[0]),
1) # potential multiplier if aux is smaller than insert
number_of_mal_items = int(multiplier * num_batches)
r1 = insert_aux_times
r2 = self.batch_size - insert_aux_times
normal_mult = max(float(num_batches) * float(self.batch_size) / self.x_train.shape[0], 1)
normal_fg = self.fg \
.repeat(int(math.ceil(normal_mult * self.x_train.shape[0]))) \
.shuffle(self.x_train.shape[0]) \
.batch(r2, drop_remainder=True) \
if insert_aux_times == 0:
return normal_fg
mal_fb = tf.data.Dataset.from_tensor_slices((self.x_aux, self.mal_aux_labels)) \
.repeat(number_of_mal_items) \
.shuffle(number_of_mal_items)
if noise_level is not None: # Add noise
mal_fb = mal_fb.map(image_augmentation.add_noise(noise_level))
mal_fb = mal_fb.batch(r1, drop_remainder=True)
zipped = tf.data.Dataset.zip((mal_fb, normal_fg)).map(lambda x, y:
(tf.concat((x[0], y[0]), axis=0),
tf.concat((x[1], y[1]), axis=0))
)
result = zipped.unbatch()
return result.batch(self.batch_size, drop_remainder=True) \
.take(num_batches)
def get_aux_test_generator(self, aux_size):
if aux_size == 0:
return tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.batch(self.batch_size, drop_remainder=False) \
.prefetch(tf.data.experimental.AUTOTUNE)
return tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.repeat(aux_size) \
.batch(self.batch_size, drop_remainder=False) \
.prefetch(tf.data.experimental.AUTOTUNE)
@staticmethod
def keep_samples(x_train, y_train, number_of_samples):
if number_of_samples == -1:
return x_train, y_train
perms = np.random.permutation(number_of_samples)
return x_train[perms, :], y_train[perms]
@staticmethod
def keep_samples_iterative(x_train, y_train, number_of_samples):
if number_of_samples == -1:
return x_train, y_train
perms = [np.random.permutation(min(number_of_samples, val.shape[0])) for val in x_train]
return [val[perm, :] for val, perm in zip(x_train, perms)], \
[val[perm] for val, perm in zip(y_train, perms)]
@staticmethod
def apply_trigger(x_aux):
triggersize = 4
trigger = np.ones((x_aux.shape[0], triggersize, triggersize, 1))
out = x_aux
out[:, 0:triggersize, 0:triggersize, :] = trigger
return out
@staticmethod
def get_mnist_dataset(number_of_samples):
"""MNIST dataset loader"""
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train, x_test = x_train.astype(np.float32), x_test.astype(np.float32)
x_train, x_test = x_train[..., np.newaxis], x_test[..., np.newaxis]
x_train, y_train = Dataset.keep_samples(x_train, y_train, number_of_samples)
return (x_train, y_train), (x_test, y_test)
@staticmethod
def get_fmnist_dataset(number_of_samples):
"""Fashion MNIST dataset loader"""
fmnist = tf.keras.datasets.fashion_mnist
(x_train, y_train), (x_test, y_test) = fmnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train, x_test = x_train.astype(np.float32), x_test.astype(np.float32)
x_train, x_test = x_train[..., np.newaxis], x_test[..., np.newaxis]
x_train, y_train = Dataset.keep_samples(x_train, y_train, number_of_samples)
return (x_train, y_train), (x_test, y_test)
@staticmethod
def get_cifar10_dataset(number_of_samples):
"""Cifar10 dataset loader"""
cifar = tf.keras.datasets.cifar10
(x_train, y_train), (x_test, y_test) = cifar.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train, x_test = x_train.astype(np.float32), x_test.astype(np.float32)
y_train, y_test = np.squeeze(y_train, axis=1), np.squeeze(y_test, axis=1)
x_train, y_train = Dataset.keep_samples(x_train, y_train, number_of_samples)
x_test, y_test = Dataset.keep_samples(x_test, y_test, -1) # Note: hardcoded
# Subtract
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
return (x_train, y_train), (x_test, y_test)
@staticmethod
def get_emnist_dataset(number_of_samples, number_of_clients, normalize_mnist_data):
"""nonIID MNIST dataset loader"""
train_dataset, test_dataset = emnist.load_data()
x_train, y_train = np.array([1.0 - np.array(val['pixels']) for val in train_dataset]), \
np.array([np.array(val['label']).astype(np.uint8) for val in train_dataset])
x_test, y_test = np.array([1.0 - np.array(val['pixels']) for val in test_dataset]), \
np.array([np.array(val['label']).astype(np.uint8) for val in test_dataset])
if normalize_mnist_data:
emnist_mean, emnist_std = 0.036910772, 0.16115953
x_train = np.array([(x - emnist_mean) / emnist_std for x in x_train])
x_test = np.array([(x - emnist_mean) / emnist_std for x in x_test])
# Randomly assign clients to buckets but keep them as client
if number_of_clients < x_train.shape[0]:
assignments = np.random.randint(0, number_of_clients, x_train.shape[0], dtype=np.uint16)
new_x_train = []
new_y_train = []
new_x_test = []
new_y_test = []
for i in range(number_of_clients):
new_x_train.append(
np.concatenate(x_train[assignments == i], axis=0)
)
new_y_train.append(
np.concatenate(y_train[assignments == i], axis=0)
)
new_x_test.append(
np.concatenate(x_test[assignments == i], axis=0)
)
new_y_test.append(
np.concatenate(y_test[assignments == i], axis=0)
)
#
# new_x_train = np.concatenate(new_x_train, axis=0)
# new_y_train = np.concatenate(new_y_train, axis=0)
# new_x_test = np.concatenate(new_x_test, axis=0)
# new_y_test = np.concatenate(new_y_test, axis=0)
if number_of_samples == -1:
number_of_samples_per_client = -1
else:
number_of_samples_per_client = int(number_of_samples / float(number_of_clients))
x_train, y_train = Dataset.keep_samples_iterative(new_x_train, new_y_train, number_of_samples_per_client)
x_test, y_test = Dataset.keep_samples_iterative(new_x_test, new_y_test,
min(number_of_samples_per_client, 500))
elif number_of_clients > x_train.shape[0]:
print(f"Number of clients {number_of_clients} is large than amount of EMNIST users {x_train.shape[0]}")
else:
print("Exactly using EMNIST as clients!")
x_train, x_test = [val.astype(np.float32)[..., np.newaxis] for val in x_train], \
[val.astype(np.float32)[..., np.newaxis] for val in x_test]
return (x_train, y_train), (x_test, y_test)
class ImageGeneratorDataset(Dataset):
def __init__(self, x_train, y_train, batch_size=50, x_test=None, y_test=None):
super().__init__(x_train, y_train, batch_size, x_test, y_test)
def get_aux_test_generator(self, aux_size):
if aux_size == 0:
return tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.batch(self.batch_size, drop_remainder=False) \
.prefetch(tf.data.experimental.AUTOTUNE)
test_dataset = tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.repeat(aux_size) \
.batch(self.batch_size, drop_remainder=False) \
return test_dataset \
.map(image_augmentation.test_aux_augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
.prefetch(tf.data.experimental.AUTOTUNE)
def get_data(self):
return self.fg\
.shuffle(self.x_train.shape[0]) \
.batch(self.batch_size, drop_remainder=True) \
.map(image_augmentation.augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
.prefetch(tf.data.experimental.AUTOTUNE)
def get_aux(self, mal_num_batch):
multiplier = max(float(self.batch_size) / float(self.mal_aux_labels.shape[0]),
1) # potential multiplier if aux is smaller than insert
number_of_mal_items = int(multiplier * mal_num_batch)
return tf.data.Dataset.from_tensor_slices((self.x_aux, self.mal_aux_labels)) \
.repeat(number_of_mal_items) \
.batch(self.batch_size, drop_remainder=False) \
.map(image_augmentation.train_aux_augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
def get_data_with_aux(self, insert_aux_times, num_batches, noise_level=None):
"""Creates one batch of data with the AUX data inserted `insert_aux_times` per batch with malicious labels.
:param insert_aux_times number of times aux should be inserted per batch. 1 for Bagdasaryan
:param num_batches number of batches to generate. 200 for Bagdasaryan
Yields:
tuple of two: input data batch and corresponding labels
"""
# assert self.y_aux != [] and self.x_aux != [] and self.mal_aux_labels != []
multiplier = float(insert_aux_times) / float(self.mal_aux_labels.shape[0]) # potential multiplier if aux is smaller than insert
number_of_mal_items = int(math.ceil(multiplier * num_batches))
r1 = insert_aux_times
r2 = self.batch_size - insert_aux_times
normal_mult = max(float(num_batches) * float(self.batch_size) / self.x_train.shape[0], 1)
normal_fg = self.fg\
.repeat(int(math.ceil(normal_mult))) \
.shuffle(self.x_train.shape[0]) \
.batch(r2, drop_remainder=True) \
.map(image_augmentation.augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
if insert_aux_times == 0:
print(f"Insert 0 {normal_mult}")
return normal_fg
mal_fb = tf.data.Dataset.from_tensor_slices((self.x_aux, self.mal_aux_labels)) \
.repeat(number_of_mal_items) \
.shuffle(number_of_mal_items * self.mal_aux_labels.shape[0])
mal_fb = mal_fb.batch(r1, drop_remainder=True) \
.map(image_augmentation.train_aux_augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
if noise_level is not None: # Add noise
mal_fb = mal_fb.map(image_augmentation.add_noise_batch(noise_level))
zipped = tf.data.Dataset.zip((mal_fb, normal_fg)).map(lambda x, y:
(tf.concat((x[0], y[0]), axis=0), tf.concat((x[1], y[1]), axis=0))
)
result = zipped.unbatch()
return result.batch(self.batch_size, drop_remainder=True)\
.take(num_batches)
class PixelPatternDataset(ImageGeneratorDataset):
def __init__(self, x_train, y_train, target_label, batch_size=50, x_test=None, y_test=None):
super().__init__(x_train, y_train, batch_size, x_test, y_test)
# Assign train set part
(self.x_aux, self.y_aux) = \
(self.x_train, self.y_train)
self.mal_aux_labels = np.repeat(target_label, self.y_aux.shape).astype(np.uint8)
self.pixel_pattern = 'basic'
def get_aux_test_generator(self, aux_size):
if aux_size == 0:
return tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.batch(self.batch_size, drop_remainder=False) \
.map(image_augmentation.add_pixel_pattern(self.pixel_pattern)) \
.prefetch(tf.data.experimental.AUTOTUNE)
test_dataset = tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.repeat(aux_size) \
.batch(self.batch_size, drop_remainder=False) \
return test_dataset \
.map(image_augmentation.test_aux_augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
.map(image_augmentation.add_pixel_pattern(self.pixel_pattern)) \
.prefetch(tf.data.experimental.AUTOTUNE)
def get_data_with_aux(self, insert_aux_times, num_batches, noise_level=None):
"""Creates one batch of data with the AUX data inserted `insert_aux_times` per batch with malicious labels.
:param insert_aux_times number of times aux should be inserted per batch. 1 for Bagdasaryan
:param num_batches number of batches to generate. 200 for Bagdasaryan
Yields:
tuple of two: input data batch and corresponding labels
"""
# assert self.y_aux != [] and self.x_aux != [] and self.mal_aux_labels != []
multiplier = float(insert_aux_times) / float(
self.mal_aux_labels.shape[0]) # potential multiplier if aux is smaller than insert
number_of_mal_items = int(math.ceil(multiplier * num_batches))
r1 = insert_aux_times
r2 = self.batch_size - insert_aux_times
normal_mult = max(float(num_batches) * float(self.batch_size) / self.x_train.shape[0], 1)
normal_fg = self.fg \
.repeat(int(math.ceil(normal_mult))) \
.shuffle(self.x_train.shape[0]) \
.batch(r2, drop_remainder=True) \
.map(image_augmentation.augment, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if insert_aux_times == 0:
return normal_fg
mal_fb = tf.data.Dataset.from_tensor_slices((self.x_aux, self.mal_aux_labels)) \
.repeat(number_of_mal_items) \
.shuffle(number_of_mal_items * self.mal_aux_labels.shape[0])
mal_fb = mal_fb.batch(r1, drop_remainder=True) \
.map(image_augmentation.train_aux_augment, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if noise_level is not None: # Add noise
mal_fb = mal_fb.map(image_augmentation.add_noise_batch(noise_level))
mal_fb = mal_fb.map(image_augmentation.add_pixel_pattern(self.pixel_pattern))
zipped = tf.data.Dataset.zip((mal_fb, normal_fg)).map(lambda x, y:
(tf.concat((x[0], y[0]), axis=0),
tf.concat((x[1], y[1]), axis=0))
)
result = zipped.unbatch()
return result.batch(self.batch_size, drop_remainder=True)\
.take(num_batches)
class GeneratorDataset(Dataset):
def __init__(self, generator, batch_size):
super().__init__([], [], 0, None, None)
self.generator = generator
self.batch_size = batch_size
def get_data(self):
return self.generator\
.batch(self.batch_size)\
.prefetch(tf.data.experimental.AUTOTUNE)
| 18,878 | 42.802784 | 135 | py |
fl-analysis | fl-analysis-master/src/data/image_augmentation.py |
import tensorflow as tf
import numpy as np
from tensorflow.keras.preprocessing.image import apply_affine_transform
def augment(image,label):
image = tf.image.random_flip_left_right(image)
image = tf.numpy_function(shift, [image], tf.float32)
image = normalize(image)
# debug(image, label)
return image, label
def test_augment(image,label):
return normalize(image), label
def train_aux_augment(image, label):
image = tf.image.random_flip_left_right(image)
image = tf.numpy_function(shift, [image], tf.float32)
# image = tf.add(image, tf.random.normal(tf.shape(image), 0, 0.05))
return image, label
def test_aux_augment(image, label):
"""Augmentation if aux test set is small"""
return augment(image, label) # same as training
def normalize(image):
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.2010]
# tf.print("Before:", tf.shape(image), tf.math.reduce_std(image))
# image = tf.image.per_image_standardization(image)
# image = image - tf.reshape(mean, [1, 1, 1, 3])
# image = image / tf.reshape(std, [1, 1, 1, 3])
# tf.print("After:", tf.shape(image), tf.math.reduce_std(image))
return image
def shift(images):
return np.array([shift_single(i) for i in images])
def shift_single(image):
""" Expects numpy, single image """
shape = image.shape
tx = np.random.uniform(-0.1, 0.1) * shape[0]
ty = np.random.uniform(-0.1, 0.1) * shape[1]
image = apply_affine_transform(image, 0,
tx, # tx
ty,
0,
1,
1,
row_axis=0,
col_axis=1,
channel_axis=2,
fill_mode='nearest')
return image
def add_noise_batch(sigma):
def cb(images, labels):
images = images + tf.random.normal(tf.shape(images), mean=0, stddev=sigma)
return images, labels
return cb
def add_pixel_pattern(pixel_pattern):
triggersize = 4
def np_callback(images):
trigger = np.ones((images.shape[0], triggersize, triggersize, images.shape[-1]))
images[:, 0:triggersize, 0:triggersize, :] = trigger
return images
def cb(images, labels):
# shape = tf.shape(images)
# tf.print(shape)
# print(shape)
# trigger = tf.ones((shape[0], triggersize, triggersize, shape[-1]))
# trigger = tf.ones((None, triggersize, triggersize, 3))
# tf.ones_like
# d0 = shape[0]
# tf.print(d0)
# x = tf.constant(tf.float32, shape=[d0, triggersize, triggersize, 3])
# trigger = tf.ones_like(x)
# images[:, 0:triggersize, 0:triggersize, :] = trigger
# this callback is slower i think
images = tf.numpy_function(np_callback, [images], tf.float32)
return images, labels
return cb
def pixel_pattern_if_needed(needed):
def no_op(images, labels):
return images, labels
if needed:
return add_pixel_pattern(None)
else:
return no_op
def debug(image, label):
import matplotlib.pyplot as plt
for i in range(image.shape[0]):
plt.figure()
plt.imshow(image[i] + 0.5)
plt.title(f"Label: {label[i]}")
plt.show() | 3,414 | 28.695652 | 88 | py |
fl-analysis | fl-analysis-master/src/data/tf_data_global.py | from collections import defaultdict
import numpy as np
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
from src.data import image_augmentation
import logging
class GlobalDataset:
"""
A GlobalDataset represents a dataset as a whole. It has two purposes.
- Client datasets are derived from it
- Our global dataset is used for evaluation of the global model. `x_test`, `y_test` and the aux sets
"""
def __init__(self, x_test, y_test):
self.x_test = x_test
self.y_test = y_test
self.x_train = []
self.y_train = []
self.x_aux_train, self.y_aux_train, self.mal_aux_labels_train = \
[], [], []
self.x_aux_test, self.y_aux_test, self.mal_aux_labels_test = \
[], [], []
self.test_generator = tf.data.Dataset.from_tensor_slices((x_test, y_test))
self.aux_test_generator = None
def get_dataset_for_client(self, client_id):
raise Exception("Not implemented")
def get_normal_and_aux_dataset_for_client(self, client_id, aux_sample_size, attack_objective):
raise Exception("Not implemented")
def get_test_batch(self, batch_size, max_num_batches=-1):
"""Creates one batch of test data.
Yields:
tuple of two: input data batch and corresponding labels
"""
# count = min(int(self.x_test.shape[0] / batch_size), max_num_batches)
# for bid in range(count):
# batch_x = self.x_test[bid * batch_size:(bid + 1) * batch_size]
# batch_y = self.y_test[bid * batch_size:(bid + 1) * batch_size]
#
# yield batch_x, batch_y
# bid = 0
# Check here if non cifar?
return self.test_generator.batch(batch_size) \
.take(max_num_batches) \
.map(image_augmentation.test_augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
.prefetch(tf.data.experimental.AUTOTUNE)
# TODO 0: Do we want to use num_backdoor_tasks or aux_sample_size ?
def build_global_aux(self, mal_clients, num_backdoor_tasks, attack_objective, aux_sample_size, augment_size):
""" Select backdoor tasks """
if np.count_nonzero(mal_clients) == 0:
return # no aux
assert np.count_nonzero(mal_clients) >= num_backdoor_tasks # assert we have less 'tasks' than clients
data_x, data_y = self.x_train, self.y_train
total_x_aux, total_y_aux, total_mal_aux_labels = [], [], []
if aux_sample_size == -1:
aux_sample_size = 10000000 # fix as we reformat this
total_aux_count = 0
num_tasks = 0
for i in range(len(data_x)):
if num_tasks >= num_backdoor_tasks:
break
if total_aux_count >= aux_sample_size:
print(f"Hit limit of {total_aux_count}/{aux_sample_size} samples!")
break
if mal_clients[i]:
x_train_me, y_train_me = data_x[i], data_y[i]
# Pick attack samples
inds = np.where(y_train_me == attack_objective[0])[0] # Find all
logging.debug(f"{i} Found {len(inds)} of class {attack_objective} to poison!")
test_inds = np.ones(x_train_me.shape[0], dtype=bool)
test_inds[inds] = False
x_aux, y_aux = x_train_me[inds], y_train_me[inds]
x_train_me, y_train_me = x_train_me[test_inds], y_train_me[test_inds]
# randomly permute labels
mal_labels = np.repeat(attack_objective[1], len(y_aux))
current_aux_count = y_aux.size
if total_aux_count + current_aux_count > aux_sample_size:
# constrain
current_aux_count = aux_sample_size - total_aux_count # how many we have left
x_aux = x_aux[:current_aux_count, :]
y_aux = y_aux[:current_aux_count]
mal_labels = mal_labels[:current_aux_count]
total_x_aux.append(x_aux)
total_y_aux.append(y_aux)
total_mal_aux_labels.append(mal_labels)
data_x[i], data_y[i] = x_train_me, y_train_me
assert not np.any(
data_y[i] == attack_objective[0]) # assert data_y doesnt contain any attack label
total_aux_count += current_aux_count
num_tasks += 1
# assert len(total_x_aux) == num_backdoor_tasks # not applicable with aux_sample_size
self.x_aux_train = np.concatenate(total_x_aux)
self.y_aux_train = np.concatenate(total_y_aux)
self.mal_aux_labels_train = np.concatenate(total_mal_aux_labels).astype(np.uint8)
# Assign train as test set for now ... ! Depends on how we want to implement the behavior
self.x_aux_test = self.x_aux_train
self.y_aux_test = self.y_aux_train
self.mal_aux_labels_test = self.mal_aux_labels_train
# self.build_aux_generator(augment_size)
print(f"Got {len(self.x_aux_train)}/{aux_sample_size} samples for {num_backdoor_tasks} tasks!")
# def build_aux_generator(self, augment_size):
# # self.aux_test_generator = tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.y_aux_test))
# if augment_size == 0:
# self.aux_test_generator = ImageDataGenerator()
# else:
# self.aux_test_generator = ImageDataGenerator(
# # rotation_range=15,
# horizontal_flip=True,
# width_shift_range=0.1,
# height_shift_range=0.1
# )
# self.aux_test_generator.fit(self.x_aux_test)
def get_aux_generator(self, batch_size, aux_size, augment_cifar, attack_type, max_test_batches):
if aux_size == 0:
test_dataset = tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.batch(batch_size, drop_remainder=False) \
.map(image_augmentation.pixel_pattern_if_needed(attack_type == 'pixel_pattern'), num_parallel_calls=tf.data.experimental.AUTOTUNE)
if max_test_batches is not None:
test_dataset = test_dataset.shuffle(max_test_batches)\
.take(max_test_batches)
return test_dataset \
.prefetch(tf.data.experimental.AUTOTUNE)
test_dataset = tf.data.Dataset.from_tensor_slices((self.x_aux_test, self.mal_aux_labels_test)) \
.repeat(aux_size) \
.batch(batch_size, drop_remainder=False) \
.map(image_augmentation.pixel_pattern_if_needed(attack_type == 'pixel_pattern'), num_parallel_calls=tf.data.experimental.AUTOTUNE)
if augment_cifar:
return test_dataset\
.map(image_augmentation.test_aux_augment, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
.prefetch(tf.data.experimental.AUTOTUNE)
else:
return test_dataset.prefetch(tf.data.experimental.AUTOTUNE)
def get_full_dataset(self, size):
x, y = np.concatenate(self.x_train), np.concatenate(self.y_train)
perms = np.random.choice(x.shape[0], size, replace=False)
x, y = x[perms, :], y[perms]
return x, y
class IIDGlobalDataset(GlobalDataset):
def __init__(self, x_train, y_train, num_clients, x_test, y_test):
super().__init__(x_test, y_test)
self.num_clients = num_clients
x_train, y_train = self.shuffle(x_train, y_train)
# Add to list
for client_id in range(num_clients):
data_samples = int(x_train.shape[0] / self.num_clients)
inds = (client_id * data_samples, (client_id + 1) * data_samples)
x, y = x_train[inds[0]:inds[1]], y_train[inds[0]:inds[1]]
self.x_train.append(x)
self.y_train.append(y)
def shuffle(self, x, y):
perms = np.random.permutation(x.shape[0])
return x[perms, :], y[perms]
def get_dataset_for_client(self, client_id):
# dataset = tf.data.Dataset.from_tensor_slices((self.x_train[client_id], self.y_train[client_id]))
# return dataset
return self.x_train[client_id], self.y_train[client_id]
class NonIIDGlobalDataset(GlobalDataset):
def __init__(self, x_train, y_train, x_test, y_test, num_clients):
"""Expects x_train to be a list, x_test one array"""
super().__init__(x_test, y_test)
self.x_train, self.y_train = x_train, y_train
def shuffle(self):
raise Exception("Shuffling is not supported on a non-IID dataset!")
def get_dataset_for_client(self, client_id):
return self.x_train[client_id], self.y_train[client_id]
class DirichletDistributionDivider():
"""Divides dataset according to dirichlet distribution"""
def __init__(self, x_train, y_train, train_aux, test_aux, exclude_aux, num_clients):
"""`train_aux` and `test_aux` should be indices for the `train` arrays."""
self.x_train = x_train
self.y_train = y_train
self.train_aux = train_aux
self.test_aux = test_aux
self.exclude_aux = exclude_aux
self.num_clients = num_clients
def build(self):
alpha = 0.9
cifar_classes = {}
for ind, x in enumerate(self.y_train):
label = x
if self.exclude_aux and (ind in self.train_aux or ind in self.test_aux):
continue
if label in cifar_classes:
cifar_classes[label].append(ind)
else:
cifar_classes[label] = [ind]
class_size = len(cifar_classes[0])
per_participant_list = defaultdict(list)
no_classes = len(cifar_classes.keys())
for n in range(no_classes):
np.random.shuffle(cifar_classes[n])
sampled_probabilities = class_size * np.random.dirichlet(
np.array(self.num_clients * [alpha]))
for user in range(self.num_clients):
no_imgs = int(round(sampled_probabilities[user]))
sampled_list = cifar_classes[n][:min(len(cifar_classes[n]), no_imgs)]
per_participant_list[user].extend(sampled_list)
cifar_classes[n] = cifar_classes[n][min(len(cifar_classes[n]), no_imgs):]
per_participant_train_x = [self.x_train[ind] for _, ind in per_participant_list.items()]
per_participant_train_y = [self.y_train[ind] for _, ind in per_participant_list.items()]
for n in range(self.num_clients):
perms = np.random.permutation(per_participant_train_x[n].shape[0])
per_participant_train_x[n] = per_participant_train_x[n][perms, :]
per_participant_train_y[n] = per_participant_train_y[n][perms]
return (per_participant_train_x, per_participant_train_y) | 10,891 | 41.054054 | 146 | py |
fl-analysis | fl-analysis-master/src/model/resnet.py | from __future__ import print_function
import tensorflow as tf
from tensorflow.keras.layers import Dense, Conv2D, BatchNormalization, Activation
from tensorflow.keras.layers import AveragePooling2D, Input, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.regularizers import l2
from tensorflow.keras.models import Model
import numpy as np
import os
# # Training parameters
# batch_size = 32 # orig paper trained all networks with batch_size=128
# epochs = 200
# data_augmentation = True
# num_classes = 10
#
# # Subtracting pixel mean improves accuracy
# subtract_pixel_mean = True
#
# # Model parameter
# # ----------------------------------------------------------------------------
# # | | 200-epoch | Orig Paper| 200-epoch | Orig Paper| sec/epoch
# # Model | n | ResNet v1 | ResNet v1 | ResNet v2 | ResNet v2 | GTX1080Ti
# # |v1(v2)| %Accuracy | %Accuracy | %Accuracy | %Accuracy | v1 (v2)
# # ----------------------------------------------------------------------------
# # ResNet20 | 3 (2)| 92.16 | 91.25 | ----- | ----- | 35 (---)
# # ResNet32 | 5(NA)| 92.46 | 92.49 | NA | NA | 50 ( NA)
# # ResNet44 | 7(NA)| 92.50 | 92.83 | NA | NA | 70 ( NA)
# # ResNet56 | 9 (6)| 92.71 | 93.03 | 93.01 | NA | 90 (100)
# # ResNet110 |18(12)| 92.65 | 93.39+-.16| 93.15 | 93.63 | 165(180)
# # ResNet164 |27(18)| ----- | 94.07 | ----- | 94.54 | ---(---)
# # ResNet1001| (111)| ----- | 92.39 | ----- | 95.08+-.14| ---(---)
# # ---------------------------------------------------------------------------
# n = 3
#
# # Model version
# # Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2)
# version = 1
#
# # Computed depth from supplied model parameter n
# if version == 1:
# depth = n * 6 + 2
# elif version == 2:
# depth = n * 9 + 2
#
# # Model name, depth and version
# model_type = 'ResNet%dv%d' % (depth, version)
#
# # Load the CIFAR10 data.
# (x_train, y_train), (x_test, y_test) = cifar10.load_data()
#
# # Input image dimensions.
# input_shape = x_train.shape[1:]
#
# # Normalize data.
# x_train = x_train.astype('float32') / 255
# x_test = x_test.astype('float32') / 255
#
# # If subtract pixel mean is enabled
# if subtract_pixel_mean:
# x_train_mean = np.mean(x_train, axis=0)
# x_train -= x_train_mean
# x_test -= x_train_mean
#
# print('x_train shape:', x_train.shape)
# print(x_train.shape[0], 'train samples')
# print(x_test.shape[0], 'test samples')
# print('y_train shape:', y_train.shape)
#
# # Convert class vectors to binary class matrices.
# y_train = keras.utils.to_categorical(y_train, num_classes)
# y_test = keras.utils.to_categorical(y_test, num_classes)
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 1e-3
if epoch > 180:
lr *= 0.5e-3
elif epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True,
name=None):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4),
name=name)
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v1(input_shape, depth, num_classes=10):
"""ResNet Version 1 Model builder [a]
Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
Last ReLU is after the shortcut connection.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filters is
doubled. Within each stage, the layers have the same number filters and the
same number of filters.
Features maps sizes:
stage 0: 32x32, 16
stage 1: 16x16, 32
stage 2: 8x8, 64
The Number of parameters is approx the same as Table 6 of [a]:
ResNet20 0.27M
ResNet32 0.46M
ResNet44 0.66M
ResNet56 0.85M
ResNet110 1.7M
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 6 != 0:
raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
# Start model definition.
num_filters = 16
num_res_blocks = int((depth - 2) / 6)
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs, num_filters = num_filters)
# Instantiate the stack of residual units
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # downsample
y = resnet_layer(inputs=x,
num_filters=num_filters,
strides=strides,
name=f"Conv2D_stack{stack}_res{res_block}_l0")
y = resnet_layer(inputs=y,
num_filters=num_filters,
activation=None,
name=f"Conv2D_stack{stack}_res{res_block}_l1")
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False,
name=f"Conv2D_stack{stack}_res{res_block}_l2")
x = tf.keras.layers.add([x, y])
x = Activation('relu')(x)
num_filters *= 2
# Add classifier on top.
# v1 does not use BN after last shortcut connection-ReLU
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model
def resnet_v2(input_shape, depth, num_classes=10):
"""ResNet Version 2 Model builder [b]
Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
bottleneck layer
First shortcut connection per layer is 1 x 1 Conv2D.
Second and onwards shortcut connection is identity.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filter maps is
doubled. Within each stage, the layers have the same number filters and the
same filter map sizes.
Features maps sizes:
conv1 : 32x32, 16
stage 0: 32x32, 64
stage 1: 16x16, 128
stage 2: 8x8, 256
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# Start model definition.
num_filters_in = 32
num_res_blocks = int((depth - 2) / 9)
inputs = Input(shape=input_shape)
# v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths
x = resnet_layer(inputs=inputs,
num_filters=num_filters_in,
conv_first=True)
# Instantiate the stack of residual units
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# bottleneck residual unit
y = resnet_layer(inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_in,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False)
if res_block == 0:
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = tf.keras.layers.add([x, y])
num_filters_in = num_filters_out
# Add classifier on top.
# v2 has BN-ReLU before Pooling
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='glorot_uniform')(y)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model
#
# if version == 2:
# model = resnet_v2(input_shape=input_shape, depth=depth)
# else:
# model = resnet_v1(input_shape=input_shape, depth=depth)
#
# model.compile(loss='categorical_crossentropy',
# optimizer=Adam(learning_rate=lr_schedule(0)),
# metrics=['accuracy'])
# model.summary()
# print(model_type)
#
# # Prepare model model saving directory.
# save_dir = os.path.join(os.getcwd(), 'saved_models')
# model_name = 'cifar10_%s_model.{epoch:03d}.h5' % model_type
# if not os.path.isdir(save_dir):
# os.makedirs(save_dir)
# filepath = os.path.join(save_dir, model_name)
#
# # Prepare callbacks for model saving and for learning rate adjustment.
# checkpoint = ModelCheckpoint(filepath=filepath,
# monitor='val_acc',
# verbose=1,
# save_best_only=True)
#
# lr_scheduler = LearningRateScheduler(lr_schedule)
#
# lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
# cooldown=0,
# patience=5,
# min_lr=0.5e-6)
#
# callbacks = [checkpoint, lr_reducer, lr_scheduler]
#
# # Run training, with or without data augmentation.
# if not data_augmentation:
# print('Not using data augmentation.')
# model.fit(x_train, y_train,
# batch_size=batch_size,
# epochs=epochs,
# validation_data=(x_test, y_test),
# shuffle=True,
# callbacks=callbacks)
# else:
# print('Using real-time data augmentation.')
# # This will do preprocessing and realtime data augmentation:
# datagen = ImageDataGenerator(
# # set input mean to 0 over the dataset
# featurewise_center=False,
# # set each sample mean to 0
# samplewise_center=False,
# # divide inputs by std of dataset
# featurewise_std_normalization=False,
# # divide each input by its std
# samplewise_std_normalization=False,
# # apply ZCA whitening
# zca_whitening=False,
# # epsilon for ZCA whitening
# zca_epsilon=1e-06,
# # randomly rotate images in the range (deg 0 to 180)
# rotation_range=0,
# # randomly shift images horizontally
# width_shift_range=0.1,
# # randomly shift images vertically
# height_shift_range=0.1,
# # set range for random shear
# shear_range=0.,
# # set range for random zoom
# zoom_range=0.,
# # set range for random channel shifts
# channel_shift_range=0.,
# # set mode for filling points outside the input boundaries
# fill_mode='nearest',
# # value used for fill_mode = "constant"
# cval=0.,
# # randomly flip images
# horizontal_flip=True,
# # randomly flip images
# vertical_flip=False,
# # set rescaling factor (applied before any other transformation)
# rescale=None,
# # set function that will be applied on each input
# preprocessing_function=None,
# # image data format, either "channels_first" or "channels_last"
# data_format=None,
# # fraction of images reserved for validation (strictly between 0 and 1)
# validation_split=0.0)
#
# # Compute quantities required for featurewise normalization
# # (std, mean, and principal components if ZCA whitening is applied).
# datagen.fit(x_train)
#
# # Fit the model on the batches generated by datagen.flow().
# model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
# validation_data=(x_test, y_test),
# epochs=epochs, verbose=1, workers=4,
# callbacks=callbacks)
| 15,635 | 36.317422 | 81 | py |
fl-analysis | fl-analysis-master/src/model/modelc.py |
import tensorflow.keras as keras
from tensorflow.keras.regularizers import l2
from tensorflow.keras import layers
def build_modelc(l2_reg):
do = 0.2
model = keras.Sequential()
# model.add(layers.Dropout(0.2, noise_shape=(32, 32, 3)))
model.add(layers.Conv2D(filters=96, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg), input_shape=(32, 32, 3)))
model.add(layers.Conv2D(filters=96, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
model.add(layers.Conv2D(filters=96, kernel_size=3, strides=2, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
# model.add(layers.Dropout(0.5))
model.add(layers.Conv2D(filters=192, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
model.add(layers.Conv2D(filters=192, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
model.add(layers.Conv2D(filters=192, kernel_size=3, strides=2, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
# model.add(layers.Dropout(0.5))
model.add(layers.Conv2D(filters=192, kernel_size=3, strides=1, kernel_initializer='he_normal', padding='same', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
model.add(layers.Conv2D(filters=192, kernel_size=1, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
model.add(layers.Conv2D(filters=10, kernel_size=1, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg)))
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dense(units=10, activation='softmax'))
return model | 2,230 | 73.366667 | 218 | py |
fl-analysis | fl-analysis-master/src/model/lenet.py |
import tensorflow.keras as keras
from tensorflow.keras import layers
from tensorflow.keras.regularizers import l2
def build_lenet5(input_shape=(32, 32, 3), l2_reg=None):
do = 0.0
regularizer = l2(l2_reg) if l2_reg is not None else None
model = keras.Sequential()
model.add(layers.Conv2D(filters=6, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=regularizer, bias_regularizer=regularizer, input_shape=input_shape))
model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(filters=16, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=regularizer, bias_regularizer=regularizer))
model.add(layers.MaxPooling2D())
model.add(layers.Flatten())
model.add(layers.Dropout(do))
model.add(layers.Dense(units=120, kernel_initializer='he_normal', kernel_regularizer=regularizer, bias_regularizer=regularizer, activation='relu'))
model.add(layers.Dropout(do))
model.add(layers.Dense(units=84, kernel_initializer='he_normal', kernel_regularizer=regularizer, bias_regularizer=regularizer, activation='relu'))
model.add(layers.Dropout(do))
model.add(layers.Dense(units=10, activation='softmax'))
return model | 1,288 | 46.740741 | 220 | py |
fl-analysis | fl-analysis-master/src/model/test_model.py |
import tensorflow.keras as keras
from tensorflow.keras import layers
from tensorflow.keras.regularizers import l2
def build_test_model(input_shape=(32, 32, 3), l2_reg=None):
do = 0.0
regularizer = l2(l2_reg) if l2_reg is not None else None
model = keras.Sequential()
model.add(layers.Conv2D(filters=6, kernel_size=5, strides=1, kernel_initializer='he_normal', padding='valid', activation='relu', kernel_regularizer=regularizer, bias_regularizer=regularizer, input_shape=input_shape))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D())
model.add(layers.Flatten())
model.add(layers.Dense(units=120, kernel_initializer='he_normal', kernel_regularizer=regularizer, bias_regularizer=regularizer, activation='relu'))
model.add(layers.Dense(units=10, activation='softmax'))
return model | 848 | 37.590909 | 220 | py |
fl-analysis | fl-analysis-master/src/model/mobilenet.py | # Implementation by https://github.com/ruchi15/CNN-MobileNetV2-Cifar10
import tensorflow as tf
import os
import warnings
import numpy as np
from tensorflow.keras.layers import Input, Activation, Conv2D, Dense, Dropout, BatchNormalization, ReLU, \
DepthwiseConv2D, GlobalAveragePooling2D, GlobalMaxPooling2D, Add
from tensorflow.keras.models import Model
from keras import regularizers
# define the filter size
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
# define the calculation of each 'inverted Res_Block'
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
prefix = 'block_{}_'.format(block_id)
in_channels = inputs.shape[-1]
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
# Expand
if block_id:
x = Conv2D(expansion * in_channels, kernel_size=1, strides=1, padding='same', use_bias=False, activation=None,
kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5), name=prefix + 'expand')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'expand_BN')(x)
x = ReLU(6., name=prefix + 'expand_relu')(x)
else:
prefix = 'expanded_conv_'
# Depthwise
x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same',
kernel_initializer="he_normal", depthwise_regularizer=regularizers.l2(4e-5),
name=prefix + 'depthwise')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'depthwise_BN')(x)
x = ReLU(6., name=prefix + 'depthwise_relu')(x)
# Project
x = Conv2D(pointwise_filters, kernel_size=1, strides=1, padding='same', use_bias=False, activation=None,
kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5), name=prefix + 'project')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x)
if in_channels == pointwise_filters and stride == 1:
return Add(name=prefix + 'add')([inputs, x])
return x
# Create Build
def create_model(rows, cols, channels):
# encoder - input
alpha = 0.5
include_top = True
model_input = tf.keras.Input(shape=(rows, cols, channels), name='input_image')
x = model_input
first_block_filters = _make_divisible(32 * alpha, 8)
# model architechture
x = Conv2D(first_block_filters, kernel_size=3, strides=1, padding='same', use_bias=False,
kernel_initializer="he_normal", kernel_regularizer=regularizers.l2(4e-5), name='Conv1')(model_input)
x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1, expansion=1, block_id=0)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1, expansion=6, block_id=1)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1, expansion=6, block_id=2)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2, expansion=6, block_id=3)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=4)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=5)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=2, expansion=6, block_id=6)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=7)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=8)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=9)
x = Dropout(rate=0.25)(x)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=10)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=11)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=12)
x = Dropout(rate=0.25)(x)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=13)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=14)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=15)
x = Dropout(rate=0.25)(x)
x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, expansion=6, block_id=16)
x = Dropout(rate=0.25)(x)
# define filter size (last block)
if alpha > 1.0:
last_block_filters = _make_divisible(1280 * alpha, 8)
else:
last_block_filters = 1280
x = Conv2D(last_block_filters, kernel_size=1, use_bias=False, kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(4e-5), name='Conv_1')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999, name='Conv_1_bn')(x)
x = ReLU(6., name='out_relu')(x)
if include_top:
x = GlobalAveragePooling2D(name='global_average_pool')(x)
x = Dense(10, activation='softmax', use_bias=True, name='Logits')(x)
else:
pass
# if pooling == 'avg':
# x = GlobalAveragePooling2D()(x)
# elif pooling == 'max':
# x = GlobalMaxPooling2D()(x)
# create model of MobileNetV2 (for CIFAR-10)
model = Model(inputs=model_input, outputs=x, name='mobilenetv2_cifar10')
# model.compile(optimizer=tf.keras.optimizers.Adam(lr_initial), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# model.compile(optimizer=tf.keras.optimizers.Adam(lr_initial), loss='sparse_categorical_crossentropy',
# metrics=['accuracy'])
return model
def mobilenetv2_cifar10():
# model = create_model(32, 32, 3)
# # model.summary()
#
# return model
# inputs = tf.keras.Input(shape=(32, 32, 3))
# resize_layer = tf.keras.layers.Lambda(
# lambda image: tf.image.resize(
# image,
# (224, 224),
# method=tf.image.ResizeMethod.BICUBIC,
# preserve_aspect_ratio=True
# )
# , input_shape=(32, 32, 3))(inputs)
return tf.keras.applications.mobilenet_v2.MobileNetV2(
input_shape=(32, 32, 3), alpha=0.5,
include_top=True, weights=None, input_tensor=None, pooling=None,
classes=10
)
| 6,395 | 41.357616 | 129 | py |
fl-analysis | fl-analysis-master/src/model/stacked_lstm.py |
import tensorflow as tf
# class StackedLSTM(tf.keras.Model):
# def __init__(self, vocab_size, embedding_dim, n_hidden):
# super().__init__(self)
# self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
#
# rnn_cells = [tf.keras.layers.LSTMCell(n_hidden) for _ in range(2)]
# stacked_lstm = tf.keras.layers.StackedRNNCells(rnn_cells)
# self.lstm_layer = tf.keras.layers.RNN(stacked_lstm)
#
# self.dense = tf.keras.layers.Dense(vocab_size)
#
# def call(self, inputs, states=None, return_state=False, training=False):
# x = inputs
# x = self.embedding(x, training=training)
# if states is None:
# states = self.lstm_layer.get_initial_state(x)
# x, states = self.lstm_layer(x, initial_state=states, training=training)
# x = self.dense(x, training=training)
#
# if return_state:
# return x, states
# else:
# return x
#
#
#
# def build_stacked_lstm():
# model = StackedLSTM(80, 8, 256)
# model.call(tf.keras.layers.Input(shape=(80), name="test_prefix"))
# # model.build(input_shape=(None, 80))
# model.summary()
# return model
from tensorflow.keras.layers import Embedding, Dense, LSTM
from tensorflow.keras import Sequential
def build_stacked_lstm():
vocab_size, embedding_dim, n_hidden = 80, 8, 256
model = Sequential()
model.add(Embedding(vocab_size, embedding_dim))
# rnn_cells = [tf.keras.layers.LSTMCell(n_hidden) for _ in range(2)]
# stacked_lstm = tf.keras.layers.StackedRNNCells(rnn_cells)
# lstm_layer = tf.keras.layers.RNN(stacked_lstm)
model.add(LSTM(n_hidden, return_sequences=True))
model.add(LSTM(n_hidden, return_sequences=False))
model.add(Dense(vocab_size, activation='softmax'))
return model | 1,762 | 31.054545 | 77 | py |
houghnet | houghnet-master/src/main.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import src._init_paths
import os
import torch
import torch.utils.data
from src.lib.opts import opts
from src.lib.models.model import create_model, load_model, save_model
from src.lib.models.data_parallel import DataParallel
from src.lib.logger import Logger
from src.lib.datasets.dataset_factory import get_dataset
from src.lib.trains.train_factory import train_factory
def main(opt):
torch.manual_seed(opt.seed)
torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
# torch.backends.cudnn.enabled = False
Dataset = get_dataset(opt.dataset, opt.task)
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
logger = Logger(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
print('Creating model...')
model = create_model(opt.arch, opt.heads, opt.head_conv, opt.region_num,
opt.vote_field_size)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
start_epoch = 0
if opt.load_model != '':
model, optimizer, start_epoch = load_model(
model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)
Trainer = train_factory[opt.task]
trainer = Trainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
print('Setting up data...')
val_loader = torch.utils.data.DataLoader(
Dataset(opt, 'val'),
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True
)
if opt.test:
_, preds = trainer.val(0, val_loader)
val_loader.dataset.run_eval(preds, opt.save_dir)
return
train_loader = torch.utils.data.DataLoader(
Dataset(opt, 'train'), #train
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True,
drop_last=True
)
print('Starting training...')
best = 1e10
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
mark = epoch if opt.save_all else 'last'
log_dict_train, _ = trainer.train(epoch, train_loader)
logger.write('epoch: {} |'.format(epoch))
for k, v in log_dict_train.items():
logger.scalar_summary('train_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
epoch, model, optimizer)
with torch.no_grad():
log_dict_val, preds = trainer.val(epoch, val_loader)
for k, v in log_dict_val.items():
logger.scalar_summary('val_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if log_dict_val[opt.metric] < best:
best = log_dict_val[opt.metric]
save_model(os.path.join(opt.save_dir, 'model_best.pth'),
epoch, model)
else:
save_model(os.path.join(opt.save_dir, 'model_last.pth'),
epoch, model, optimizer)
logger.write('\n')
if epoch in opt.lr_step:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
logger.close()
if __name__ == '__main__':
opt = opts().parse()
main(opt) | 3,606 | 32.398148 | 78 | py |
houghnet | houghnet-master/src/test.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import src._init_paths
import os
import json
import cv2
import numpy as np
import time
from progress.bar import Bar
import torch
# from src.lib.external.nms import soft_nms
from src.lib.opts import opts
from src.lib.logger import Logger
from src.lib.utils.utils import AverageMeter
from src.lib.datasets.dataset_factory import dataset_factory
from src.lib.detectors.detector_factory import detector_factory
class PrefetchDataset(torch.utils.data.Dataset):
def __init__(self, opt, dataset, pre_process_func):
self.images = dataset.images
self.load_image_func = dataset.coco.loadImgs
self.img_dir = dataset.img_dir
self.pre_process_func = pre_process_func
self.opt = opt
def __getitem__(self, index):
img_id = self.images[index]
img_info = self.load_image_func(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info['file_name'])
image = cv2.imread(img_path)
images, meta = {}, {}
for scale in opt.test_scales:
if opt.task == 'ddd':
images[scale], meta[scale] = self.pre_process_func(
image, scale, img_info['calib'])
else:
images[scale], meta[scale] = self.pre_process_func(image, scale)
return img_id, {'images': images, 'image': image, 'meta': meta}
def __len__(self):
return len(self.images)
def prefetch_test(opt):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
Dataset = dataset_factory[opt.dataset]
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
Logger(opt)
Detector = detector_factory[opt.task]
split = 'val' if not opt.trainval else 'test'
dataset = Dataset(opt, split)
detector = Detector(opt)
data_loader = torch.utils.data.DataLoader(
PrefetchDataset(opt, dataset, detector.pre_process),
batch_size=1, shuffle=False, num_workers=1, pin_memory=True)
results = {}
num_iters = len(dataset)
bar = Bar('{}'.format(opt.exp_id), max=num_iters)
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
avg_time_stats = {t: AverageMeter() for t in time_stats}
for ind, (img_id, pre_processed_images) in enumerate(data_loader):
ret = detector.run(pre_processed_images)
results[img_id.numpy().astype(np.int32)[0]] = ret['results']
Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
for t in avg_time_stats:
avg_time_stats[t].update(ret[t])
Bar.suffix = Bar.suffix + '|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format(
t, tm = avg_time_stats[t])
bar.next()
bar.finish()
for t in avg_time_stats:
print('|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format(t, tm=avg_time_stats[t]))
dataset.run_eval(results, opt.save_dir)
def test(opt):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
Dataset = dataset_factory[opt.dataset]
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
Logger(opt)
Detector = detector_factory[opt.task]
split = 'val' if not opt.trainval else 'test'
dataset = Dataset(opt, split)
detector = Detector(opt)
results = {}
num_iters = len(dataset)
bar = Bar('{}'.format(opt.exp_id), max=num_iters)
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
avg_time_stats = {t: AverageMeter() for t in time_stats}
for ind in range(num_iters):
img_id = dataset.images[ind]
img_info = dataset.coco.loadImgs(ids=[img_id])[0]
img_path = os.path.join(dataset.img_dir, img_info['file_name'])
if opt.task == 'ddd':
ret = detector.run(img_path, img_info['calib'])
else:
ret = detector.run(img_path)
results[img_id] = ret['results']
Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
for t in avg_time_stats:
avg_time_stats[t].update(ret[t])
Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(t, avg_time_stats[t].avg)
bar.next()
bar.finish()
dataset.run_eval(results, opt.save_dir)
if __name__ == '__main__':
opt = opts().parse()
if opt.not_prefetch_test:
test(opt)
else:
prefetch_test(opt) | 4,351 | 32.476923 | 79 | py |
houghnet | houghnet-master/src/tools/convert_hourglass_weight.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
MODEL_PATH = '../../models/ExtremeNet_500000.pkl'
OUT_PATH = '../../models/ExtremeNet_500000.pth'
import torch
state_dict = torch.load(MODEL_PATH)
key_map = {'t_heats': 'hm_t', 'l_heats': 'hm_l', 'b_heats': 'hm_b', \
'r_heats': 'hm_r', 'ct_heats': 'hm_c', \
't_regrs': 'reg_t', 'l_regrs': 'reg_l', \
'b_regrs': 'reg_b', 'r_regrs': 'reg_r'}
out = {}
for k in state_dict.keys():
changed = False
for m in key_map.keys():
if m in k:
if 'ct_heats' in k and m == 't_heats':
continue
new_k = k.replace(m, key_map[m])
out[new_k] = state_dict[k]
changed = True
print('replace {} to {}'.format(k, new_k))
if not changed:
out[k] = state_dict[k]
data = {'epoch': 0,
'state_dict': out}
torch.save(data, OUT_PATH)
| 905 | 28.225806 | 69 | py |
houghnet | houghnet-master/src/tools/voc_eval_lib/model/config.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Initial learning rate
__C.TRAIN.LEARNING_RATE = 0.001
# Momentum
__C.TRAIN.MOMENTUM = 0.9
# Weight decay, for regularization
__C.TRAIN.WEIGHT_DECAY = 0.0001
# Factor for reducing the learning rate
__C.TRAIN.GAMMA = 0.1
# Step size for reducing the learning rate, currently only support one step
__C.TRAIN.STEPSIZE = [30000]
# Iteration intervals for showing the loss during training, on command line interface
__C.TRAIN.DISPLAY = 10
# Whether to double the learning rate for bias
__C.TRAIN.DOUBLE_BIAS = True
# Whether to initialize the weights with truncated normal distribution
__C.TRAIN.TRUNCATED = False
# Whether to have weight decay on bias as well
__C.TRAIN.BIAS_DECAY = False
# Whether to add ground truth boxes to the pool when sampling regions
__C.TRAIN.USE_GT = False
# Whether to use aspect-ratio grouping of training images, introduced merely for saving
# GPU memory
__C.TRAIN.ASPECT_GROUPING = False
# The number of snapshots kept, older ones are deleted to save space
__C.TRAIN.SNAPSHOT_KEPT = 3
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_INTERVAL = 180
# Scale to use during training (can list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 1
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 5000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'gt'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = True
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor satisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# Whether to use all ground truth bounding boxes for training,
# For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''
__C.TRAIN.USE_ALL_GT = True
#
# Testing options
#
__C.TEST = edict()
# Scale to use during testing (can NOT list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'gt'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
# __C.TEST.RPN_MIN_SIZE = 16
# Testing mode, default to be 'nms', 'top' is slower but better
# See report for details
__C.TEST.MODE = 'nms'
# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select
__C.TEST.RPN_TOP_N = 5000
#
# ResNet options
#
__C.RESNET = edict()
# Option to set if max-pooling is appended after crop_and_resize.
# if true, the region will be resized to a square of 2xPOOLING_SIZE,
# then 2x2 max-pooling is applied; otherwise the region will be directly
# resized to a square of POOLING_SIZE
__C.RESNET.MAX_POOL = False
# Number of fixed blocks during training, by default the first of all 4 blocks is fixed
# Range: 0 (none) to 3 (all)
__C.RESNET.FIXED_BLOCKS = 1
#
# MobileNet options
#
__C.MOBILENET = edict()
# Whether to regularize the depth-wise filters during training
__C.MOBILENET.REGU_DEPTH = False
# Number of fixed layers during training, by default the bottom 5 of 14 layers is fixed
# Range: 0 (none) to 12 (all)
__C.MOBILENET.FIXED_LAYERS = 5
# Weight decay for the mobilenet weights
__C.MOBILENET.WEIGHT_DECAY = 0.00004
# Depth multiplier
__C.MOBILENET.DEPTH_MULTIPLIER = 1.
#
# MISC
#
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..', '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Use an end-to-end tensorflow model.
# Note: models in E2E tensorflow mode have only been tested in feed-forward mode,
# but these models are exportable to other tensorflow instances as GraphDef files.
__C.USE_E2E_TF = True
# Default pooling mode, only 'crop' is available
__C.POOLING_MODE = 'crop'
# Size of the pooled region after RoI pooling
__C.POOLING_SIZE = 7
# Anchor scales for RPN
__C.ANCHOR_SCALES = [8,16,32]
# Anchor ratios for RPN
__C.ANCHOR_RATIOS = [0.5,1,2]
# Number of filters for the RPN layer
__C.RPN_CHANNELS = 512
def get_output_dir(imdb, weights_filename):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def get_output_tb_dir(imdb, weights_filename):
"""Return the directory where tensorflow summaries are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.items():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print(('Error under config key: {}'.format(k)))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d
d = d[subkey]
subkey = key_list[-1]
assert subkey in d
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| 11,010 | 27.378866 | 91 | py |
houghnet | houghnet-master/src/lib/logger.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
import os
import time
import sys
import torch
USE_TENSORBOARD = True
try:
import tensorboardX
print('Using tensorboardX')
except:
USE_TENSORBOARD = False
class Logger(object):
def __init__(self, opt):
"""Create a summary writer logging to log_dir."""
if not os.path.exists(opt.save_dir):
os.makedirs(opt.save_dir)
if not os.path.exists(opt.debug_dir):
os.makedirs(opt.debug_dir)
time_str = time.strftime('%Y-%m-%d-%H-%M')
args = dict((name, getattr(opt, name)) for name in dir(opt)
if not name.startswith('_'))
file_name = os.path.join(opt.save_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('==> torch version: {}\n'.format(torch.__version__))
opt_file.write('==> cudnn version: {}\n'.format(
torch.backends.cudnn.version()))
opt_file.write('==> Cmd:\n')
opt_file.write(str(sys.argv))
opt_file.write('\n==> Opt:\n')
for k, v in sorted(args.items()):
opt_file.write(' %s: %s\n' % (str(k), str(v)))
log_dir = opt.save_dir + '/logs_{}'.format(time_str)
if USE_TENSORBOARD:
self.writer = tensorboardX.SummaryWriter(log_dir=log_dir)
else:
if not os.path.exists(os.path.dirname(log_dir)):
os.mkdir(os.path.dirname(log_dir))
if not os.path.exists(log_dir):
os.mkdir(log_dir)
self.log = open(log_dir + '/log.txt', 'w')
try:
os.system('cp {}/opt.txt {}/'.format(opt.save_dir, log_dir))
except:
pass
self.start_line = True
def write(self, txt):
if self.start_line:
time_str = time.strftime('%Y-%m-%d-%H-%M')
self.log.write('{}: {}'.format(time_str, txt))
else:
self.log.write(txt)
self.start_line = False
if '\n' in txt:
self.start_line = True
self.log.flush()
def close(self):
self.log.close()
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
if USE_TENSORBOARD:
self.writer.add_scalar(tag, value, step)
| 2,228 | 29.534247 | 86 | py |
houghnet | houghnet-master/src/lib/detectors/exdet.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import src._init_paths
import os
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from src.lib.external.nms import soft_nms
from src.lib.models.decode import exct_decode, agnex_ct_decode
from src.lib.models.utils import flip_tensor
from src.lib.utils.image import get_affine_transform, transform_preds
from src.lib.utils.post_process import ctdet_post_process
from src.lib.utils.debugger import Debugger
from .base_detector import BaseDetector
class ExdetDetector(BaseDetector):
def __init__(self, opt):
super(ExdetDetector, self).__init__(opt)
self.decode = agnex_ct_decode if opt.agnostic_ex else exct_decode
def process(self, images, return_time=False):
with torch.no_grad():
torch.cuda.synchronize()
output = self.model(images)[-1]
t_heat = output['hm_t'].sigmoid_()
l_heat = output['hm_l'].sigmoid_()
b_heat = output['hm_b'].sigmoid_()
r_heat = output['hm_r'].sigmoid_()
c_heat = output['hm_c'].sigmoid_()
torch.cuda.synchronize()
forward_time = time.time()
if self.opt.reg_offset:
dets = self.decode(t_heat, l_heat, b_heat, r_heat, c_heat,
output['reg_t'], output['reg_l'],
output['reg_b'], output['reg_r'],
K=self.opt.K,
scores_thresh=self.opt.scores_thresh,
center_thresh=self.opt.center_thresh,
aggr_weight=self.opt.aggr_weight)
else:
dets = self.decode(t_heat, l_heat, b_heat, r_heat, c_heat, K=self.opt.K,
scores_thresh=self.opt.scores_thresh,
center_thresh=self.opt.center_thresh,
aggr_weight=self.opt.aggr_weight)
if return_time:
return output, dets, forward_time
else:
return output, dets
def debug(self, debugger, images, dets, output, scale=1):
detection = dets.detach().cpu().numpy().copy()
detection[:, :, :4] *= self.opt.down_ratio
for i in range(1):
inp_height, inp_width = images.shape[2], images.shape[3]
pred_hm = np.zeros((inp_height, inp_width, 3), dtype=np.uint8)
img = images[i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
parts = ['t', 'l', 'b', 'r', 'c']
for p in parts:
tag = 'hm_{}'.format(p)
pred = debugger.gen_colormap(
output[tag][i].detach().cpu().numpy(), (inp_height, inp_width))
if p != 'c':
pred_hm = np.maximum(pred_hm, pred)
else:
debugger.add_blend_img(
img, pred, 'pred_{}_{:.1f}'.format(p, scale))
debugger.add_blend_img(img, pred_hm, 'pred_{:.1f}'.format(scale))
debugger.add_img(img, img_id='out_{:.1f}'.format(scale))
for k in range(len(detection[i])):
# print('detection', detection[i, k, 4], detection[i, k])
if detection[i, k, 4] > 0.01:
# print('detection', detection[i, k, 4], detection[i, k])
debugger.add_coco_bbox(detection[i, k, :4], detection[i, k, -1],
detection[i, k, 4],
img_id='out_{:.1f}'.format(scale))
def post_process(self, dets, meta, scale=1):
out_width, out_height = meta['out_width'], meta['out_height']
dets = dets.detach().cpu().numpy().reshape(2, -1, 14)
dets[1, :, [0, 2]] = out_width - dets[1, :, [2, 0]]
dets = dets.reshape(1, -1, 14)
dets[0, :, 0:2] = transform_preds(
dets[0, :, 0:2], meta['c'], meta['s'], (out_width, out_height))
dets[0, :, 2:4] = transform_preds(
dets[0, :, 2:4], meta['c'], meta['s'], (out_width, out_height))
dets[:, :, 0:4] /= scale
return dets[0]
def merge_outputs(self, detections):
detections = np.concatenate(
[detection for detection in detections], axis=0).astype(np.float32)
classes = detections[..., -1]
keep_inds = (detections[:, 4] > 0)
detections = detections[keep_inds]
classes = classes[keep_inds]
results = {}
for j in range(self.num_classes):
keep_inds = (classes == j)
results[j + 1] = detections[keep_inds][:, 0:7].astype(np.float32)
soft_nms(results[j + 1], Nt=0.5, method=2)
results[j + 1] = results[j + 1][:, 0:5]
scores = np.hstack([
results[j][:, -1]
for j in range(1, self.num_classes + 1)
])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.num_classes + 1):
keep_inds = (results[j][:, -1] >= thresh)
results[j] = results[j][keep_inds]
return results
def show_results(self, debugger, image, results):
debugger.add_img(image, img_id='exdet')
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='exdet')
debugger.show_all_imgs(pause=self.pause)
| 5,149 | 37.721805 | 80 | py |
houghnet | houghnet-master/src/lib/detectors/ctdet.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from src.lib.external.nms import soft_nms
from src.lib.models.decode import ctdet_decode
from src.lib.models.utils import flip_tensor
from src.lib.utils.image import get_affine_transform
from src.lib.utils.post_process import ctdet_post_process
from src.lib.utils.debugger import Debugger
from .base_detector import BaseDetector
class CtdetDetector(BaseDetector):
def __init__(self, opt):
super(CtdetDetector, self).__init__(opt)
def process(self, images, return_time=False):
with torch.no_grad():
output = self.model(images)[-1]
hm = output['hm'].sigmoid_()
wh = output['wh']
reg = output['reg'] if self.opt.reg_offset else None
if self.opt.flip_test:
hm = (hm[0:1] + flip_tensor(hm[1:2])) / 2
wh = (wh[0:1] + flip_tensor(wh[1:2])) / 2
reg = reg[0:1] if reg is not None else None
torch.cuda.synchronize()
forward_time = time.time()
dets = ctdet_decode(hm, wh, reg=reg, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy()
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctdet_post_process(
dets.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'], self.opt.num_classes)
for j in range(1, self.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
dets[0][j][:, :4] /= scale
return dets[0]
def merge_outputs(self, detections):
results = {}
for j in range(1, self.num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
if len(self.scales) > 1 or self.opt.nms:
soft_nms(results[j], Nt=0.5, method=2)
scores = np.hstack(
[results[j][:, 4] for j in range(1, self.num_classes + 1)])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.num_classes + 1):
keep_inds = (results[j][:, 4] >= thresh)
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
detection = dets.detach().cpu().numpy().copy()
detection[:, :, :4] *= self.opt.down_ratio
for i in range(1):
img = images[i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm_{:.1f}'.format(scale))
debugger.add_img(img, img_id='out_pred_{:.1f}'.format(scale))
for k in range(len(dets[i])):
if detection[i, k, 4] > self.opt.center_thresh:
debugger.add_coco_bbox(detection[i, k, :4], detection[i, k, -1],
detection[i, k, 4],
img_id='out_pred_{:.1f}'.format(scale))
def show_results(self, debugger, image, results):
debugger.add_img(image, img_id='ctdet')
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='ctdet')
debugger.show_all_imgs(pause=self.pause) | 3,566 | 37.771739 | 78 | py |
houghnet | houghnet-master/src/lib/detectors/ddd.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from src.lib.external.nms import soft_nms
from src.lib.models.decode import ddd_decode
from src.lib.models.utils import flip_tensor
from src.lib.utils.image import get_affine_transform
from src.lib.utils.post_process import ddd_post_process
from src.lib.utils.debugger import Debugger
from src.lib.utils.ddd_utils import compute_box_3d, project_to_image, alpha2rot_y
from src.lib.utils.ddd_utils import draw_box_3d, unproject_2d_to_3d
from .base_detector import BaseDetector
class DddDetector(BaseDetector):
def __init__(self, opt):
super(DddDetector, self).__init__(opt)
self.calib = np.array([[707.0493, 0, 604.0814, 45.75831],
[0, 707.0493, 180.5066, -0.3454157],
[0, 0, 1., 0.004981016]], dtype=np.float32)
def pre_process(self, image, scale, calib=None):
height, width = image.shape[0:2]
inp_height, inp_width = self.opt.input_h, self.opt.input_w
c = np.array([width / 2, height / 2], dtype=np.float32)
if self.opt.keep_res:
s = np.array([inp_width, inp_height], dtype=np.int32)
else:
s = np.array([width, height], dtype=np.int32)
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = image #cv2.resize(image, (width, height))
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = (inp_image.astype(np.float32) / 255.)
inp_image = (inp_image - self.mean) / self.std
images = inp_image.transpose(2, 0, 1)[np.newaxis, ...]
calib = np.array(calib, dtype=np.float32) if calib is not None \
else self.calib
images = torch.from_numpy(images)
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio,
'calib': calib}
return images, meta
def process(self, images, return_time=False):
with torch.no_grad():
torch.cuda.synchronize()
output = self.model(images)[-1]
output['hm'] = output['hm'].sigmoid_()
output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1.
wh = output['wh'] if self.opt.reg_bbox else None
reg = output['reg'] if self.opt.reg_offset else None
torch.cuda.synchronize()
forward_time = time.time()
dets = ddd_decode(output['hm'], output['rot'], output['dep'],
output['dim'], wh=wh, reg=reg, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy()
detections = ddd_post_process(
dets.copy(), [meta['c']], [meta['s']], [meta['calib']], self.opt)
self.this_calib = meta['calib']
return detections[0]
def merge_outputs(self, detections):
results = detections[0]
for j in range(1, self.num_classes + 1):
if len(results[j] > 0):
keep_inds = (results[j][:, -1] > self.opt.peak_thresh)
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
dets = dets.detach().cpu().numpy()
img = images[0].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_ct_detection(
img, dets[0], show_box=self.opt.reg_bbox,
center_thresh=self.opt.vis_thresh, img_id='det_pred')
def show_results(self, debugger, image, results):
debugger.add_3d_detection(
image, results, self.this_calib,
center_thresh=self.opt.vis_thresh, img_id='add_pred')
debugger.add_bird_view(
results, center_thresh=self.opt.vis_thresh, img_id='bird_pred')
debugger.show_all_imgs(pause=self.pause) | 4,110 | 37.783019 | 81 | py |
houghnet | houghnet-master/src/lib/detectors/multi_pose.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from src.lib.external.nms import soft_nms_39
from src.lib.models.decode import multi_pose_decode
from src.lib.models.utils import flip_tensor, flip_lr_off, flip_lr
from src.lib.utils.image import get_affine_transform
from src.lib.utils.post_process import multi_pose_post_process
from src.lib.utils.debugger import Debugger
from .base_detector import BaseDetector
class MultiPoseDetector(BaseDetector):
def __init__(self, opt):
super(MultiPoseDetector, self).__init__(opt)
self.flip_idx = opt.flip_idx
def process(self, images, return_time=False):
with torch.no_grad():
torch.cuda.synchronize()
output = self.model(images)[-1]
output['hm'] = output['hm'].sigmoid_()
if self.opt.hm_hp and not self.opt.mse_loss:
output['hm_hp'] = output['hm_hp'].sigmoid_()
reg = output['reg'] if self.opt.reg_offset else None
hm_hp = output['hm_hp'] if self.opt.hm_hp else None
hp_offset = output['hp_offset'] if self.opt.reg_hp_offset else None
torch.cuda.synchronize()
forward_time = time.time()
if self.opt.flip_test:
output['hm'] = (output['hm'][0:1] + flip_tensor(output['hm'][1:2])) / 2
output['wh'] = (output['wh'][0:1] + flip_tensor(output['wh'][1:2])) / 2
output['hps'] = (output['hps'][0:1] +
flip_lr_off(output['hps'][1:2], self.flip_idx)) / 2
hm_hp = (hm_hp[0:1] + flip_lr(hm_hp[1:2], self.flip_idx)) / 2 \
if hm_hp is not None else None
reg = reg[0:1] if reg is not None else None
hp_offset = hp_offset[0:1] if hp_offset is not None else None
dets = multi_pose_decode(
output['hm'], output['wh'], output['hps'],
reg=reg, hm_hp=hm_hp, hp_offset=hp_offset, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets = multi_pose_post_process(
dets.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'])
for j in range(1, self.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 39)
# import pdb; pdb.set_trace()
dets[0][j][:, :4] /= scale
dets[0][j][:, 5:] /= scale
return dets[0]
def merge_outputs(self, detections):
results = {}
results[1] = np.concatenate(
[detection[1] for detection in detections], axis=0).astype(np.float32)
if self.opt.nms or len(self.opt.test_scales) > 1:
soft_nms_39(results[1], Nt=0.5, method=2)
results[1] = results[1].tolist()
return results
def debug(self, debugger, images, dets, output, scale=1):
dets = dets.detach().cpu().numpy().copy()
dets[:, :, :4] *= self.opt.down_ratio
dets[:, :, 5:39] *= self.opt.down_ratio
img = images[0].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * self.std + self.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
if self.opt.hm_hp:
pred = debugger.gen_colormap_hp(
output['hm_hp'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hmhp')
def show_results(self, debugger, image, results):
debugger.add_img(image, img_id='multi_pose')
for bbox in results[1]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], 0, bbox[4], img_id='multi_pose')
debugger.add_coco_hp(bbox[5:39], img_id='multi_pose')
debugger.show_all_imgs(pause=self.pause) | 3,850 | 37.89899 | 79 | py |
houghnet | houghnet-master/src/lib/detectors/base_detector.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from src.lib.models.model import create_model, load_model
from src.lib.utils.image import get_affine_transform
from src.lib.utils.debugger import Debugger
class BaseDetector(object):
def __init__(self, opt):
if opt.gpus[0] >= 0:
opt.device = torch.device('cuda')
else:
opt.device = torch.device('cpu')
print('Creating model...')
self.model = create_model(opt.arch, opt.heads, opt.head_conv, opt.region_num, opt.vote_field_size, opt.model_v1)
self.model = load_model(self.model, opt.load_model)
self.model = self.model.to(opt.device)
self.model.eval()
self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
self.max_per_image = 100
self.num_classes = opt.num_classes
self.scales = opt.test_scales
self.opt = opt
self.pause = True
def pre_process(self, image, scale, meta=None):
height, width = image.shape[0:2]
new_height = int(height * scale)
new_width = int(width * scale)
if self.opt.fix_res:
inp_height, inp_width = self.opt.input_h, self.opt.input_w
c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
s = max(height, width) * 1.0
else:
inp_height = (new_height | self.opt.pad) + 1
inp_width = (new_width | self.opt.pad) + 1
c = np.array([new_width // 2, new_height // 2], dtype=np.float32)
s = np.array([inp_width, inp_height], dtype=np.float32)
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = cv2.resize(image, (new_width, new_height))
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32)
images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
if self.opt.flip_test:
images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
images = torch.from_numpy(images)
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio}
return images, meta
def process(self, images, return_time=False):
raise NotImplementedError
def post_process(self, dets, meta, scale=1):
raise NotImplementedError
def merge_outputs(self, detections):
raise NotImplementedError
def debug(self, debugger, images, dets, output, scale=1):
raise NotImplementedError
def show_results(self, debugger, image, results):
raise NotImplementedError
def run(self, image_or_path_or_tensor, meta=None):
load_time, pre_time, net_time, dec_time, post_time = 0, 0, 0, 0, 0
merge_time, tot_time = 0, 0
debugger = Debugger(dataset=self.opt.dataset, ipynb=(self.opt.debug==3),
theme=self.opt.debugger_theme)
start_time = time.time()
pre_processed = False
if isinstance(image_or_path_or_tensor, np.ndarray):
image = image_or_path_or_tensor
elif type(image_or_path_or_tensor) == type (''):
image = cv2.imread(image_or_path_or_tensor)
else:
image = image_or_path_or_tensor['image'][0].numpy()
pre_processed_images = image_or_path_or_tensor
pre_processed = True
loaded_time = time.time()
load_time += (loaded_time - start_time)
img_h, img_w = image.shape[:2]
detections = []
for scale in self.scales:
scale_start_time = time.time()
if not pre_processed:
images, meta = self.pre_process(image, scale, meta)
else:
# import pdb; pdb.set_trace()
images = pre_processed_images['images'][scale][0]
meta = pre_processed_images['meta'][scale]
meta = {k: v.numpy()[0] for k, v in meta.items()}
meta['img_size'] = (img_h, img_w)
images = images.to(self.opt.device)
torch.cuda.synchronize()
pre_process_time = time.time()
pre_time += pre_process_time - scale_start_time
output, dets, forward_time = self.process(images, return_time=True)
torch.cuda.synchronize()
net_time += forward_time - pre_process_time
decode_time = time.time()
dec_time += decode_time - forward_time
if self.opt.debug >= 2:
self.debug(debugger, images, dets, output, scale)
dets = self.post_process(dets, meta, scale)
torch.cuda.synchronize()
post_process_time = time.time()
post_time += post_process_time - decode_time
detections.append(dets)
results = self.merge_outputs(detections)
torch.cuda.synchronize()
end_time = time.time()
merge_time += end_time - post_process_time
tot_time += end_time - start_time
if self.opt.debug >= 1:
self.show_results(debugger, image, results)
return {'results': results, 'tot': tot_time, 'load': load_time,
'pre': pre_time, 'net': net_time, 'dec': dec_time,
'post': post_time, 'merge': merge_time} | 5,206 | 34.910345 | 116 | py |
houghnet | houghnet-master/src/lib/detectors/ctseg.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from pycocotools import mask as mask_utils
try:
from external.nms import soft_nms
except:
print('NMS not imported! If you need it,'
' do \n cd $CenterNet_ROOT/src/lib/external \n make')
from models.decode import ctseg_decode
from models.utils import flip_tensor
from utils.image import get_affine_transform
from utils.post_process import ctseg_post_process
from utils.debugger import Debugger
from src.lib.models.losses import SegLoss
from .base_detector import BaseDetector
class CtsegDetector(BaseDetector):
def __init__(self, opt):
super(CtsegDetector, self).__init__(opt)
self.seg_model = SegLoss(opt.seg_feat_channel)
def process(self, images, return_time=False):
with torch.no_grad():
output = self.model(images)[-1]
hm = output['hm'].sigmoid_()
wh = output['wh']
shape_feat = output['shape']
saliency = output['saliency']
reg = output['reg'] if self.opt.reg_offset else None
if self.opt.flip_test:
hm = (hm[0:1] + flip_tensor(hm[1:2])) / 2
wh = (wh[0:1] + flip_tensor(wh[1:2])) / 2
reg = reg[0:1] if reg is not None else None
saliency = (saliency[0:1] + flip_tensor(saliency[1:2])) / 2
shape_feat = shape_feat[0:1] if shape_feat is not None else None
# assert not self.opt.flip_test,"not support flip_test"
torch.cuda.synchronize()
forward_time = time.time()
dets, masks = ctseg_decode(hm, wh, shape_feat, saliency, self.seg_model, reg=reg, cat_spec_wh=self.opt.cat_spec_wh,
K=self.opt.K)
if return_time:
return output, (dets,masks), forward_time
else:
return output, (dets,masks)
def post_process(self, det_seg, meta, scale=1):
assert scale == 1, "not support scale != 1"
dets,seg = det_seg
dets = dets.detach().cpu().numpy()
seg = seg.detach().cpu().numpy()
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctseg_post_process(
dets.copy(),seg.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'],*meta['img_size'], self.opt.num_classes)
return dets[0]
def merge_outputs(self, detections):
return detections[0]
def show_results(self, debugger, image, results):
debugger.add_img(image, img_id='ctseg')
for j in range(1, self.num_classes + 1):
for i in range(len(results[j]['boxs'])):
bbox=results[j]['boxs'][i]
mask = mask_utils.decode(results[j]['pred_mask'][i])
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='ctseg')
debugger.add_coco_seg(mask,img_id='ctseg')
debugger.show_all_imgs(pause=self.pause)
| 3,141 | 38.275 | 127 | py |
houghnet | houghnet-master/src/lib/models/decode.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from .utils import _gather_feat, _tranpose_and_gather_feat
from detectron2.structures import Boxes # Each row is (x1, y1, x2, y2).
from detectron2.layers import paste_masks_in_image
from detectron2.utils.memory import retry_if_cuda_oom
def _nms(heat, kernel=3):
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(
heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
def _left_aggregate(heat):
'''
heat: batchsize x channels x h x w
'''
shape = heat.shape
heat = heat.reshape(-1, heat.shape[3])
heat = heat.transpose(1, 0).contiguous()
ret = heat.clone()
for i in range(1, heat.shape[0]):
inds = (heat[i] >= heat[i - 1])
ret[i] += ret[i - 1] * inds.float()
return (ret - heat).transpose(1, 0).reshape(shape)
def _right_aggregate(heat):
'''
heat: batchsize x channels x h x w
'''
shape = heat.shape
heat = heat.reshape(-1, heat.shape[3])
heat = heat.transpose(1, 0).contiguous()
ret = heat.clone()
for i in range(heat.shape[0] - 2, -1, -1):
inds = (heat[i] >= heat[i +1])
ret[i] += ret[i + 1] * inds.float()
return (ret - heat).transpose(1, 0).reshape(shape)
def _top_aggregate(heat):
'''
heat: batchsize x channels x h x w
'''
heat = heat.transpose(3, 2)
shape = heat.shape
heat = heat.reshape(-1, heat.shape[3])
heat = heat.transpose(1, 0).contiguous()
ret = heat.clone()
for i in range(1, heat.shape[0]):
inds = (heat[i] >= heat[i - 1])
ret[i] += ret[i - 1] * inds.float()
return (ret - heat).transpose(1, 0).reshape(shape).transpose(3, 2)
def _bottom_aggregate(heat):
'''
heat: batchsize x channels x h x w
'''
heat = heat.transpose(3, 2)
shape = heat.shape
heat = heat.reshape(-1, heat.shape[3])
heat = heat.transpose(1, 0).contiguous()
ret = heat.clone()
for i in range(heat.shape[0] - 2, -1, -1):
inds = (heat[i] >= heat[i + 1])
ret[i] += ret[i + 1] * inds.float()
return (ret - heat).transpose(1, 0).reshape(shape).transpose(3, 2)
def _h_aggregate(heat, aggr_weight=0.1):
return aggr_weight * _left_aggregate(heat) + \
aggr_weight * _right_aggregate(heat) + heat
def _v_aggregate(heat, aggr_weight=0.1):
return aggr_weight * _top_aggregate(heat) + \
aggr_weight * _bottom_aggregate(heat) + heat
'''
# Slow for large number of categories
def _topk(scores, K=40):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, -1), K)
topk_clses = (topk_inds / (height * width)).int()
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs
'''
def _topk_channel(scores, K=40):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
return topk_scores, topk_inds, topk_ys, topk_xs
def _topk(scores, K=40):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(
topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
def agnex_ct_decode(
t_heat, l_heat, b_heat, r_heat, ct_heat,
t_regr=None, l_regr=None, b_regr=None, r_regr=None,
K=40, scores_thresh=0.1, center_thresh=0.1, aggr_weight=0.0, num_dets=1000
):
batch, cat, height, width = t_heat.size()
'''
t_heat = torch.sigmoid(t_heat)
l_heat = torch.sigmoid(l_heat)
b_heat = torch.sigmoid(b_heat)
r_heat = torch.sigmoid(r_heat)
ct_heat = torch.sigmoid(ct_heat)
'''
if aggr_weight > 0:
t_heat = _h_aggregate(t_heat, aggr_weight=aggr_weight)
l_heat = _v_aggregate(l_heat, aggr_weight=aggr_weight)
b_heat = _h_aggregate(b_heat, aggr_weight=aggr_weight)
r_heat = _v_aggregate(r_heat, aggr_weight=aggr_weight)
# perform nms on heatmaps
t_heat = _nms(t_heat)
l_heat = _nms(l_heat)
b_heat = _nms(b_heat)
r_heat = _nms(r_heat)
t_heat[t_heat > 1] = 1
l_heat[l_heat > 1] = 1
b_heat[b_heat > 1] = 1
r_heat[r_heat > 1] = 1
t_scores, t_inds, _, t_ys, t_xs = _topk(t_heat, K=K)
l_scores, l_inds, _, l_ys, l_xs = _topk(l_heat, K=K)
b_scores, b_inds, _, b_ys, b_xs = _topk(b_heat, K=K)
r_scores, r_inds, _, r_ys, r_xs = _topk(r_heat, K=K)
ct_heat_agn, ct_clses = torch.max(ct_heat, dim=1, keepdim=True)
# import pdb; pdb.set_trace()
t_ys = t_ys.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
t_xs = t_xs.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_ys = l_ys.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
l_xs = l_xs.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_ys = b_ys.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
b_xs = b_xs.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_ys = r_ys.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
r_xs = r_xs.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
box_ct_xs = ((l_xs + r_xs + 0.5) / 2).long()
box_ct_ys = ((t_ys + b_ys + 0.5) / 2).long()
ct_inds = box_ct_ys * width + box_ct_xs
ct_inds = ct_inds.view(batch, -1)
ct_heat_agn = ct_heat_agn.view(batch, -1, 1)
ct_clses = ct_clses.view(batch, -1, 1)
ct_scores = _gather_feat(ct_heat_agn, ct_inds)
clses = _gather_feat(ct_clses, ct_inds)
t_scores = t_scores.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_scores = l_scores.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_scores = b_scores.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_scores = r_scores.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
ct_scores = ct_scores.view(batch, K, K, K, K)
scores = (t_scores + l_scores + b_scores + r_scores + 2 * ct_scores) / 6
# reject boxes based on classes
top_inds = (t_ys > l_ys) + (t_ys > b_ys) + (t_ys > r_ys)
top_inds = (top_inds > 0)
left_inds = (l_xs > t_xs) + (l_xs > b_xs) + (l_xs > r_xs)
left_inds = (left_inds > 0)
bottom_inds = (b_ys < t_ys) + (b_ys < l_ys) + (b_ys < r_ys)
bottom_inds = (bottom_inds > 0)
right_inds = (r_xs < t_xs) + (r_xs < l_xs) + (r_xs < b_xs)
right_inds = (right_inds > 0)
sc_inds = (t_scores < scores_thresh) + (l_scores < scores_thresh) + \
(b_scores < scores_thresh) + (r_scores < scores_thresh) + \
(ct_scores < center_thresh)
sc_inds = (sc_inds > 0)
scores = scores - sc_inds.float()
scores = scores - top_inds.float()
scores = scores - left_inds.float()
scores = scores - bottom_inds.float()
scores = scores - right_inds.float()
scores = scores.view(batch, -1)
scores, inds = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
if t_regr is not None and l_regr is not None \
and b_regr is not None and r_regr is not None:
t_regr = _tranpose_and_gather_feat(t_regr, t_inds)
t_regr = t_regr.view(batch, K, 1, 1, 1, 2)
l_regr = _tranpose_and_gather_feat(l_regr, l_inds)
l_regr = l_regr.view(batch, 1, K, 1, 1, 2)
b_regr = _tranpose_and_gather_feat(b_regr, b_inds)
b_regr = b_regr.view(batch, 1, 1, K, 1, 2)
r_regr = _tranpose_and_gather_feat(r_regr, r_inds)
r_regr = r_regr.view(batch, 1, 1, 1, K, 2)
t_xs = t_xs + t_regr[..., 0]
t_ys = t_ys + t_regr[..., 1]
l_xs = l_xs + l_regr[..., 0]
l_ys = l_ys + l_regr[..., 1]
b_xs = b_xs + b_regr[..., 0]
b_ys = b_ys + b_regr[..., 1]
r_xs = r_xs + r_regr[..., 0]
r_ys = r_ys + r_regr[..., 1]
else:
t_xs = t_xs + 0.5
t_ys = t_ys + 0.5
l_xs = l_xs + 0.5
l_ys = l_ys + 0.5
b_xs = b_xs + 0.5
b_ys = b_ys + 0.5
r_xs = r_xs + 0.5
r_ys = r_ys + 0.5
bboxes = torch.stack((l_xs, t_ys, r_xs, b_ys), dim=5)
bboxes = bboxes.view(batch, -1, 4)
bboxes = _gather_feat(bboxes, inds)
clses = clses.contiguous().view(batch, -1, 1)
clses = _gather_feat(clses, inds).float()
t_xs = t_xs.contiguous().view(batch, -1, 1)
t_xs = _gather_feat(t_xs, inds).float()
t_ys = t_ys.contiguous().view(batch, -1, 1)
t_ys = _gather_feat(t_ys, inds).float()
l_xs = l_xs.contiguous().view(batch, -1, 1)
l_xs = _gather_feat(l_xs, inds).float()
l_ys = l_ys.contiguous().view(batch, -1, 1)
l_ys = _gather_feat(l_ys, inds).float()
b_xs = b_xs.contiguous().view(batch, -1, 1)
b_xs = _gather_feat(b_xs, inds).float()
b_ys = b_ys.contiguous().view(batch, -1, 1)
b_ys = _gather_feat(b_ys, inds).float()
r_xs = r_xs.contiguous().view(batch, -1, 1)
r_xs = _gather_feat(r_xs, inds).float()
r_ys = r_ys.contiguous().view(batch, -1, 1)
r_ys = _gather_feat(r_ys, inds).float()
detections = torch.cat([bboxes, scores, t_xs, t_ys, l_xs, l_ys,
b_xs, b_ys, r_xs, r_ys, clses], dim=2)
return detections
def exct_decode(
t_heat, l_heat, b_heat, r_heat, ct_heat,
t_regr=None, l_regr=None, b_regr=None, r_regr=None,
K=40, scores_thresh=0.1, center_thresh=0.1, aggr_weight=0.0, num_dets=1000
):
batch, cat, height, width = t_heat.size()
'''
t_heat = torch.sigmoid(t_heat)
l_heat = torch.sigmoid(l_heat)
b_heat = torch.sigmoid(b_heat)
r_heat = torch.sigmoid(r_heat)
ct_heat = torch.sigmoid(ct_heat)
'''
if aggr_weight > 0:
t_heat = _h_aggregate(t_heat, aggr_weight=aggr_weight)
l_heat = _v_aggregate(l_heat, aggr_weight=aggr_weight)
b_heat = _h_aggregate(b_heat, aggr_weight=aggr_weight)
r_heat = _v_aggregate(r_heat, aggr_weight=aggr_weight)
# perform nms on heatmaps
t_heat = _nms(t_heat)
l_heat = _nms(l_heat)
b_heat = _nms(b_heat)
r_heat = _nms(r_heat)
t_heat[t_heat > 1] = 1
l_heat[l_heat > 1] = 1
b_heat[b_heat > 1] = 1
r_heat[r_heat > 1] = 1
t_scores, t_inds, t_clses, t_ys, t_xs = _topk(t_heat, K=K)
l_scores, l_inds, l_clses, l_ys, l_xs = _topk(l_heat, K=K)
b_scores, b_inds, b_clses, b_ys, b_xs = _topk(b_heat, K=K)
r_scores, r_inds, r_clses, r_ys, r_xs = _topk(r_heat, K=K)
t_ys = t_ys.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
t_xs = t_xs.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_ys = l_ys.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
l_xs = l_xs.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_ys = b_ys.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
b_xs = b_xs.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_ys = r_ys.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
r_xs = r_xs.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
t_clses = t_clses.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_clses = l_clses.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_clses = b_clses.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_clses = r_clses.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
box_ct_xs = ((l_xs + r_xs + 0.5) / 2).long()
box_ct_ys = ((t_ys + b_ys + 0.5) / 2).long()
ct_inds = t_clses.long() * (height * width) + box_ct_ys * width + box_ct_xs
ct_inds = ct_inds.view(batch, -1)
ct_heat = ct_heat.view(batch, -1, 1)
ct_scores = _gather_feat(ct_heat, ct_inds)
t_scores = t_scores.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_scores = l_scores.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_scores = b_scores.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_scores = r_scores.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
ct_scores = ct_scores.view(batch, K, K, K, K)
scores = (t_scores + l_scores + b_scores + r_scores + 2 * ct_scores) / 6
# reject boxes based on classes
cls_inds = (t_clses != l_clses) + (t_clses != b_clses) + \
(t_clses != r_clses)
cls_inds = (cls_inds > 0)
top_inds = (t_ys > l_ys) + (t_ys > b_ys) + (t_ys > r_ys)
top_inds = (top_inds > 0)
left_inds = (l_xs > t_xs) + (l_xs > b_xs) + (l_xs > r_xs)
left_inds = (left_inds > 0)
bottom_inds = (b_ys < t_ys) + (b_ys < l_ys) + (b_ys < r_ys)
bottom_inds = (bottom_inds > 0)
right_inds = (r_xs < t_xs) + (r_xs < l_xs) + (r_xs < b_xs)
right_inds = (right_inds > 0)
sc_inds = (t_scores < scores_thresh) + (l_scores < scores_thresh) + \
(b_scores < scores_thresh) + (r_scores < scores_thresh) + \
(ct_scores < center_thresh)
sc_inds = (sc_inds > 0)
scores = scores - sc_inds.float()
scores = scores - cls_inds.float()
scores = scores - top_inds.float()
scores = scores - left_inds.float()
scores = scores - bottom_inds.float()
scores = scores - right_inds.float()
scores = scores.view(batch, -1)
scores, inds = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
if t_regr is not None and l_regr is not None \
and b_regr is not None and r_regr is not None:
t_regr = _tranpose_and_gather_feat(t_regr, t_inds)
t_regr = t_regr.view(batch, K, 1, 1, 1, 2)
l_regr = _tranpose_and_gather_feat(l_regr, l_inds)
l_regr = l_regr.view(batch, 1, K, 1, 1, 2)
b_regr = _tranpose_and_gather_feat(b_regr, b_inds)
b_regr = b_regr.view(batch, 1, 1, K, 1, 2)
r_regr = _tranpose_and_gather_feat(r_regr, r_inds)
r_regr = r_regr.view(batch, 1, 1, 1, K, 2)
t_xs = t_xs + t_regr[..., 0]
t_ys = t_ys + t_regr[..., 1]
l_xs = l_xs + l_regr[..., 0]
l_ys = l_ys + l_regr[..., 1]
b_xs = b_xs + b_regr[..., 0]
b_ys = b_ys + b_regr[..., 1]
r_xs = r_xs + r_regr[..., 0]
r_ys = r_ys + r_regr[..., 1]
else:
t_xs = t_xs + 0.5
t_ys = t_ys + 0.5
l_xs = l_xs + 0.5
l_ys = l_ys + 0.5
b_xs = b_xs + 0.5
b_ys = b_ys + 0.5
r_xs = r_xs + 0.5
r_ys = r_ys + 0.5
bboxes = torch.stack((l_xs, t_ys, r_xs, b_ys), dim=5)
bboxes = bboxes.view(batch, -1, 4)
bboxes = _gather_feat(bboxes, inds)
clses = t_clses.contiguous().view(batch, -1, 1)
clses = _gather_feat(clses, inds).float()
t_xs = t_xs.contiguous().view(batch, -1, 1)
t_xs = _gather_feat(t_xs, inds).float()
t_ys = t_ys.contiguous().view(batch, -1, 1)
t_ys = _gather_feat(t_ys, inds).float()
l_xs = l_xs.contiguous().view(batch, -1, 1)
l_xs = _gather_feat(l_xs, inds).float()
l_ys = l_ys.contiguous().view(batch, -1, 1)
l_ys = _gather_feat(l_ys, inds).float()
b_xs = b_xs.contiguous().view(batch, -1, 1)
b_xs = _gather_feat(b_xs, inds).float()
b_ys = b_ys.contiguous().view(batch, -1, 1)
b_ys = _gather_feat(b_ys, inds).float()
r_xs = r_xs.contiguous().view(batch, -1, 1)
r_xs = _gather_feat(r_xs, inds).float()
r_ys = r_ys.contiguous().view(batch, -1, 1)
r_ys = _gather_feat(r_ys, inds).float()
detections = torch.cat([bboxes, scores, t_xs, t_ys, l_xs, l_ys,
b_xs, b_ys, r_xs, r_ys, clses], dim=2)
return detections
def ddd_decode(heat, rot, depth, dim, wh=None, reg=None, K=40):
batch, cat, height, width = heat.size()
# heat = torch.sigmoid(heat)
# perform nms on heatmaps
heat = _nms(heat)
scores, inds, clses, ys, xs = _topk(heat, K=K)
if reg is not None:
reg = _tranpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1) + 0.5
ys = ys.view(batch, K, 1) + 0.5
rot = _tranpose_and_gather_feat(rot, inds)
rot = rot.view(batch, K, 8)
depth = _tranpose_and_gather_feat(depth, inds)
depth = depth.view(batch, K, 1)
dim = _tranpose_and_gather_feat(dim, inds)
dim = dim.view(batch, K, 3)
clses = clses.view(batch, K, 1).float()
scores = scores.view(batch, K, 1)
xs = xs.view(batch, K, 1)
ys = ys.view(batch, K, 1)
if wh is not None:
wh = _tranpose_and_gather_feat(wh, inds)
wh = wh.view(batch, K, 2)
detections = torch.cat(
[xs, ys, scores, rot, depth, dim, wh, clses], dim=2)
else:
detections = torch.cat(
[xs, ys, scores, rot, depth, dim, clses], dim=2)
return detections
def ctdet_decode(heat, wh, reg=None, cat_spec_wh=False, K=100):
batch, cat, height, width = heat.size()
# heat = torch.sigmoid(heat)
# perform nms on heatmaps
heat = _nms(heat)
scores, inds, clses, ys, xs = _topk(heat, K=K)
if reg is not None:
reg = _tranpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1) + 0.5
ys = ys.view(batch, K, 1) + 0.5
wh = _tranpose_and_gather_feat(wh, inds)
if cat_spec_wh:
wh = wh.view(batch, K, cat, 2)
clses_ind = clses.view(batch, K, 1, 1).expand(batch, K, 1, 2).long()
wh = wh.gather(2, clses_ind).view(batch, K, 2)
else:
wh = wh.view(batch, K, 2)
clses = clses.view(batch, K, 1).float()
scores = scores.view(batch, K, 1)
bboxes = torch.cat([xs - wh[..., 0:1] / 2,
ys - wh[..., 1:2] / 2,
xs + wh[..., 0:1] / 2,
ys + wh[..., 1:2] / 2], dim=2)
detections = torch.cat([bboxes, scores, clses], dim=2)
return detections
def multi_pose_decode(
heat, wh, kps, reg=None, hm_hp=None, hp_offset=None, K=100):
batch, cat, height, width = heat.size()
num_joints = kps.shape[1] // 2
# heat = torch.sigmoid(heat)
# perform nms on heatmaps
heat = _nms(heat)
scores, inds, clses, ys, xs = _topk(heat, K=K)
kps = _tranpose_and_gather_feat(kps, inds)
kps = kps.view(batch, K, num_joints * 2)
kps[..., ::2] += xs.view(batch, K, 1).expand(batch, K, num_joints)
kps[..., 1::2] += ys.view(batch, K, 1).expand(batch, K, num_joints)
if reg is not None:
reg = _tranpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1) + 0.5
ys = ys.view(batch, K, 1) + 0.5
wh = _tranpose_and_gather_feat(wh, inds)
wh = wh.view(batch, K, 2)
clses = clses.view(batch, K, 1).float()
scores = scores.view(batch, K, 1)
bboxes = torch.cat([xs - wh[..., 0:1] / 2,
ys - wh[..., 1:2] / 2,
xs + wh[..., 0:1] / 2,
ys + wh[..., 1:2] / 2], dim=2)
if hm_hp is not None:
hm_hp = _nms(hm_hp)
thresh = 0.1
kps = kps.view(batch, K, num_joints, 2).permute(
0, 2, 1, 3).contiguous() # b x J x K x 2
reg_kps = kps.unsqueeze(3).expand(batch, num_joints, K, K, 2)
hm_score, hm_inds, hm_ys, hm_xs = _topk_channel(hm_hp, K=K) # b x J x K
if hp_offset is not None:
hp_offset = _tranpose_and_gather_feat(
hp_offset, hm_inds.view(batch, -1))
hp_offset = hp_offset.view(batch, num_joints, K, 2)
hm_xs = hm_xs + hp_offset[:, :, :, 0]
hm_ys = hm_ys + hp_offset[:, :, :, 1]
else:
hm_xs = hm_xs + 0.5
hm_ys = hm_ys + 0.5
mask = (hm_score > thresh).float()
hm_score = (1 - mask) * -1 + mask * hm_score
hm_ys = (1 - mask) * (-10000) + mask * hm_ys
hm_xs = (1 - mask) * (-10000) + mask * hm_xs
hm_kps = torch.stack([hm_xs, hm_ys], dim=-1).unsqueeze(
2).expand(batch, num_joints, K, K, 2)
dist = (((reg_kps - hm_kps) ** 2).sum(dim=4) ** 0.5)
min_dist, min_ind = dist.min(dim=3) # b x J x K
hm_score = hm_score.gather(2, min_ind).unsqueeze(-1) # b x J x K x 1
min_dist = min_dist.unsqueeze(-1)
min_ind = min_ind.view(batch, num_joints, K, 1, 1).expand(
batch, num_joints, K, 1, 2)
hm_kps = hm_kps.gather(3, min_ind)
hm_kps = hm_kps.view(batch, num_joints, K, 2)
l = bboxes[:, :, 0].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
t = bboxes[:, :, 1].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
r = bboxes[:, :, 2].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
b = bboxes[:, :, 3].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
mask = (hm_kps[..., 0:1] < l) + (hm_kps[..., 0:1] > r) + \
(hm_kps[..., 1:2] < t) + (hm_kps[..., 1:2] > b) + \
(hm_score < thresh) + (min_dist > (torch.max(b - t, r - l) * 0.3))
mask = (mask > 0).float().expand(batch, num_joints, K, 2)
kps = (1 - mask) * hm_kps + mask * kps
kps = kps.permute(0, 2, 1, 3).contiguous().view(
batch, K, num_joints * 2)
detections = torch.cat([bboxes, scores, kps, clses], dim=2)
return detections
def ctseg_decode(heat, wh, shape_feat, saliency, seg_model, reg=None, cat_spec_wh=False, K=100):
batch, cat, height, width = heat.size()
# heat = torch.sigmoid(heat)
# perform nms on heatmaps
heat = _nms(heat)
scores, inds, clses, ys, xs = _topk(heat, K=K)
selected = scores > 0.05
if selected.sum() == 0:
selected[0, 0] = 1
scores = scores[selected].unsqueeze(dim=0)
inds = inds[selected].unsqueeze(dim=0)
clses = clses[selected].unsqueeze(dim=0)
ys = ys[selected].unsqueeze(dim=0)
xs = xs[selected].unsqueeze(dim=0)
K_ = scores.shape[1]
if reg is not None:
reg = _tranpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K_, 2)
xs = xs.view(batch, K_, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K_, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K_, 1) + 0.5
ys = ys.view(batch, K_, 1) + 0.5
wh = _tranpose_and_gather_feat(wh, inds)
if cat_spec_wh:
wh = wh.view(batch, K_, cat, 2)
clses_ind = clses.view(batch, K_, 1, 1).expand(batch, K_, 1, 2).long()
wh = wh.gather(2, clses_ind).view(batch, K_, 2)
else:
wh = wh.view(batch, K_, 2)
clses = clses.view(batch, K_, 1).float()
scores = scores.view(batch, K_, 1)
bboxes = torch.cat([xs - wh[..., 0:1] / 2,
ys - wh[..., 1:2] / 2,
xs + wh[..., 0:1] / 2,
ys + wh[..., 1:2] / 2], dim=2)
detections = torch.cat([bboxes, scores, clses], dim=2)
h, w = shape_feat.size(-2), shape_feat.size(-1)
local_shapes = _tranpose_and_gather_feat(shape_feat, inds)
attns = torch.reshape(local_shapes, (1, -1, seg_model.attn_size, seg_model.attn_size))
saliency_list = []
boxes_list = []
saliency_list.append(saliency)
for i in range(1):
boxes_list.append(Boxes(bboxes[i, :, :] * 4.))
rois = seg_model.pooler(saliency_list, boxes_list)
pred_mask_logits = seg_model.merge_bases(rois, attns)
pred_mask_logits = pred_mask_logits.view(
-1, 1, seg_model.pooler_resolution, seg_model.pooler_resolution)
boxes_list[0].scale(0.25, 0.25)
pred_masks = retry_if_cuda_oom(paste_masks_in_image)(
pred_mask_logits[:, 0, :, :], # N, 1, M, M
boxes_list[0],
(h, w),
threshold=0.5,
)
pred_masks = torch.unsqueeze(pred_masks, dim=0)
return detections, pred_masks | 24,358 | 36.824534 | 96 | py |
houghnet | houghnet-master/src/lib/models/losses.py | # ------------------------------------------------------------------------------
# Portions of this code are from
# CornerNet (https://github.com/princeton-vl/CornerNet)
# Copyright (c) 2018, University of Michigan
# Licensed under the BSD 3-Clause License
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from .utils import _tranpose_and_gather_feat
import torch.nn.functional as F
from detectron2.structures import Boxes
from detectron2.modeling.poolers import ROIPooler
from detectron2.layers import ROIAlign, ROIAlignRotated, cat
def _slow_neg_loss(pred, gt):
'''focal loss from CornerNet'''
pos_inds = gt.eq(1)
neg_inds = gt.lt(1)
neg_weights = torch.pow(1 - gt[neg_inds], 4)
loss = 0
pos_pred = pred[pos_inds]
neg_pred = pred[neg_inds]
pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2)
neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2) * neg_weights
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if pos_pred.nelement() == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _neg_loss(pred, gt):
''' Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
'''
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _not_faster_neg_loss(pred, gt):
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
num_pos = pos_inds.float().sum()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
trans_pred = pred * neg_inds + (1 - pred) * pos_inds
weight = neg_weights * neg_inds + pos_inds
all_loss = torch.log(1 - trans_pred) * torch.pow(trans_pred, 2) * weight
all_loss = all_loss.sum()
if num_pos > 0:
all_loss /= num_pos
loss -= all_loss
return loss
def _slow_reg_loss(regr, gt_regr, mask):
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr)
regr = regr[mask]
gt_regr = gt_regr[mask]
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
regr_loss = regr_loss / (num + 1e-4)
return regr_loss
def _reg_loss(regr, gt_regr, mask):
''' L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
'''
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
regr = regr * mask
gt_regr = gt_regr * mask
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)
regr_loss = regr_loss / (num + 1e-4)
return regr_loss
class FocalLoss(nn.Module):
'''nn.Module warpper for focal loss'''
def __init__(self):
super(FocalLoss, self).__init__()
self.neg_loss = _neg_loss
def forward(self, out, target):
return self.neg_loss(out, target)
class RegLoss(nn.Module):
'''Regression loss for an output tensor
Arguments:
output (batch x dim x h x w)
mask (batch x max_objects)
ind (batch x max_objects)
target (batch x max_objects x dim)
'''
def __init__(self):
super(RegLoss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _tranpose_and_gather_feat(output, ind)
loss = _reg_loss(pred, target, mask)
return loss
class RegL1Loss(nn.Module):
def __init__(self):
super(RegL1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _tranpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
# loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
loss = F.l1_loss(pred * mask, target * mask, size_average=False)
loss = loss / (mask.sum() + 1e-4)
return loss
class NormRegL1Loss(nn.Module):
def __init__(self):
super(NormRegL1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _tranpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
# loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
pred = pred / (target + 1e-4)
target = target * 0 + 1
loss = F.l1_loss(pred * mask, target * mask, size_average=False)
loss = loss / (mask.sum() + 1e-4)
return loss
class RegWeightedL1Loss(nn.Module):
def __init__(self):
super(RegWeightedL1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _tranpose_and_gather_feat(output, ind)
mask = mask.float()
# loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
loss = F.l1_loss(pred * mask, target * mask, size_average=False)
loss = loss / (mask.sum() + 1e-4)
return loss
class L1Loss(nn.Module):
def __init__(self):
super(L1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _tranpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
return loss
class BinRotLoss(nn.Module):
def __init__(self):
super(BinRotLoss, self).__init__()
def forward(self, output, mask, ind, rotbin, rotres):
pred = _tranpose_and_gather_feat(output, ind)
loss = compute_rot_loss(pred, rotbin, rotres, mask)
return loss
def compute_res_loss(output, target):
return F.smooth_l1_loss(output, target, reduction='elementwise_mean')
# TODO: weight
def compute_bin_loss(output, target, mask):
mask = mask.expand_as(output)
output = output * mask.float()
return F.cross_entropy(output, target, reduction='elementwise_mean')
def compute_rot_loss(output, target_bin, target_res, mask):
# output: (B, 128, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos,
# bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos]
# target_bin: (B, 128, 2) [bin1_cls, bin2_cls]
# target_res: (B, 128, 2) [bin1_res, bin2_res]
# mask: (B, 128, 1)
# import pdb; pdb.set_trace()
output = output.view(-1, 8)
target_bin = target_bin.view(-1, 2)
target_res = target_res.view(-1, 2)
mask = mask.view(-1, 1)
loss_bin1 = compute_bin_loss(output[:, 0:2], target_bin[:, 0], mask)
loss_bin2 = compute_bin_loss(output[:, 4:6], target_bin[:, 1], mask)
loss_res = torch.zeros_like(loss_bin1)
if target_bin[:, 0].nonzero().shape[0] > 0:
idx1 = target_bin[:, 0].nonzero()[:, 0]
valid_output1 = torch.index_select(output, 0, idx1.long())
valid_target_res1 = torch.index_select(target_res, 0, idx1.long())
loss_sin1 = compute_res_loss(
valid_output1[:, 2], torch.sin(valid_target_res1[:, 0]))
loss_cos1 = compute_res_loss(
valid_output1[:, 3], torch.cos(valid_target_res1[:, 0]))
loss_res += loss_sin1 + loss_cos1
if target_bin[:, 1].nonzero().shape[0] > 0:
idx2 = target_bin[:, 1].nonzero()[:, 0]
valid_output2 = torch.index_select(output, 0, idx2.long())
valid_target_res2 = torch.index_select(target_res, 0, idx2.long())
loss_sin2 = compute_res_loss(
valid_output2[:, 6], torch.sin(valid_target_res2[:, 1]))
loss_cos2 = compute_res_loss(
valid_output2[:, 7], torch.cos(valid_target_res2[:, 1]))
loss_res += loss_sin2 + loss_cos2
return loss_bin1 + loss_bin2 + loss_res
class SegLoss(nn.Module):
def __init__(self,feat_channel):
super(SegLoss, self).__init__()
self.attn_size = 14
self.pooler_resolution = 56
self.num_gpus = 4
self.pooler = ROIPooler(
output_size=self.pooler_resolution,
scales=[0.25],
sampling_ratio=1,
pooler_type='ROIAlignV2',
canonical_level=2)
def forward(self, saliency, shape, gtboxes, reg_mask, ind, instance_mask,
center_target=None, cat_mask=None):
batch_size = saliency.size(0)
local_shapes = _tranpose_and_gather_feat(shape, ind)
attns = torch.reshape(local_shapes, (batch_size,-1, self.attn_size, self.attn_size ))
saliency_list = []
boxes_list = []
reg_mask_list = []
saliency_list.append(saliency)
for i in range(batch_size):
boxes_list.append(Boxes(gtboxes[i,:,:]*4.))
reg_mask_list.append(reg_mask[i])
center_target = _tranpose_and_gather_feat(center_target, ind)[cat_mask.to(dtype=bool)]
# num_obj = reg_mask.sum()
reg_mask = cat(reg_mask_list, dim=0)
rois = self.pooler(saliency_list, boxes_list)
pred_mask_logits = self.merge_bases(rois, attns)
gt_masks = []
for i, instances_per_image in enumerate(boxes_list):
if len(instances_per_image.tensor) == 0:
continue
instances_per_image.scale(0.25, 0.25)
gt_mask_per_image = self.crop_and_resize(instance_mask[i, :, :, :],
instances_per_image.tensor, self.pooler_resolution
).to(device=pred_mask_logits.device)
gt_masks.append(gt_mask_per_image)
gt_masks = cat(gt_masks, dim=0)
N = gt_masks.size(0)
gt_masks = gt_masks.view(N, -1)
loss_denorm = max(center_target.sum()/ self.num_gpus, 1e-6)
# num_rois = pred_mask_logits.size(1)
# true_mask = torch.repeat_interleave(reg_mask.unsqueeze(dim=1),
# repeats=num_rois, dim=1)
reg_mask = reg_mask.to(dtype=bool)
mask_losses = F.binary_cross_entropy(
pred_mask_logits[reg_mask], gt_masks.to(dtype=torch.float32)[reg_mask], reduction="none")
mask_loss = ((mask_losses.mean(dim=-1)*center_target).sum() / loss_denorm)
# mask_loss = mask_loss / num_obj
return mask_loss
def merge_bases(self, rois, coeffs, location_to_inds=None):
# merge predictions
# N = coeffs.size(0)
if location_to_inds is not None:
rois = rois[location_to_inds]
N, B, H, W = rois.size()
coeffs = coeffs.view(N, -1, self.attn_size, self.attn_size)
coeffs = F.interpolate(coeffs, (H, W),
mode='bilinear').sigmoid() #.softmax(dim=1)
masks_preds = (rois.sigmoid() * coeffs ).sum(dim=1)
return masks_preds.view(N, -1)
def crop_and_resize(self, instance_mask, boxes, mask_size):
"""
Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
This can be used to prepare training targets for Mask R-CNN.
It has less reconstruction error compared to rasterization with polygons.
However we observe no difference in accuracy,
but BitMasks requires more memory to store all the masks.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each mask
mask_size (int): the size of the rasterized mask.
Returns:
Tensor:
A bool tensor of shape (N, mask_size, mask_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(instance_mask), "{} != {}".format(len(boxes), len(instance_mask))
device = instance_mask.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
bit_masks = instance_mask.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output | 12,420 | 35.212828 | 103 | py |
houghnet | houghnet-master/src/lib/models/data_parallel.py | import torch
from torch.nn.modules import Module
from torch.nn.parallel.scatter_gather import gather
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.parallel_apply import parallel_apply
from .scatter_gather import scatter_kwargs
class _DataParallel(Module):
r"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards
pass, gradients from each replica are summed into the original module.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is the
same size (so that each GPU processes the same number of samples).
See also: :ref:`cuda-nn-dataparallel-instead`
Arbitrary positional and keyword inputs are allowed to be passed into
DataParallel EXCEPT Tensors. All variables will be scattered on dim
specified (default 0). Primitive types will be broadcasted, but all
other types will be a shallow copy and can be corrupted if written to in
the model's forward pass.
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
output_device: device location of output (default: device_ids[0])
Example::
>>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
>>> output = net(input_var)
"""
# TODO: update notes/cuda.rst when this class handles 8+ GPUs well
def __init__(self, module, device_ids=None, output_device=None, dim=0, chunk_sizes=None):
super(_DataParallel, self).__init__()
if not torch.cuda.is_available():
self.module = module
self.device_ids = []
return
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = device_ids
self.chunk_sizes = chunk_sizes
self.output_device = output_device
if len(self.device_ids) == 1:
self.module.cuda(device_ids[0])
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
def replicate(self, module, device_ids):
return replicate(module, device_ids)
def scatter(self, inputs, kwargs, device_ids, chunk_sizes):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim, chunk_sizes=self.chunk_sizes)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module: the module to evaluate in parallel
inputs: inputs to the module
device_ids: GPU ids on which to replicate module
output_device: GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Variable containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,)
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[:len(inputs)]
replicas = replicate(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim)
def DataParallel(module, device_ids=None, output_device=None, dim=0, chunk_sizes=None):
if chunk_sizes is None:
return torch.nn.DataParallel(module, device_ids, output_device, dim)
standard_size = True
for i in range(1, len(chunk_sizes)):
if chunk_sizes[i] != chunk_sizes[0]:
standard_size = False
if standard_size:
return torch.nn.DataParallel(module, device_ids, output_device, dim)
return _DataParallel(module, device_ids, output_device, dim, chunk_sizes) | 5,176 | 39.445313 | 101 | py |
houghnet | houghnet-master/src/lib/models/utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
def _sigmoid(x):
y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)
return y
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _tranpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
def flip_tensor(x):
return torch.flip(x, [3])
# tmp = x.detach().cpu().numpy()[..., ::-1].copy()
# return torch.from_numpy(tmp).to(x.device)
def flip_lr(x, flip_idx):
tmp = x.detach().cpu().numpy()[..., ::-1].copy()
shape = tmp.shape
for e in flip_idx:
tmp[:, e[0], ...], tmp[:, e[1], ...] = \
tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()
return torch.from_numpy(tmp.reshape(shape)).to(x.device)
def flip_lr_off(x, flip_idx):
tmp = x.detach().cpu().numpy()[..., ::-1].copy()
shape = tmp.shape
tmp = tmp.reshape(tmp.shape[0], 17, 2,
tmp.shape[2], tmp.shape[3])
tmp[:, :, 0, :, :] *= -1
for e in flip_idx:
tmp[:, e[0], ...], tmp[:, e[1], ...] = \
tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()
return torch.from_numpy(tmp.reshape(shape)).to(x.device) | 1,570 | 30.42 | 65 | py |
houghnet | houghnet-master/src/lib/models/model.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from .networks.msra_resnet import get_pose_net
from .networks.resnet_dcn import get_pose_net as get_pose_net_dcn
from .networks.large_hourglass import get_large_hourglass_net
from .networks.houghnet_resnet import get_houghnet_net
from .networks.houghnet_dcn import get_houghnet_dcn_net
from .networks.houghnet_large_hourglass import get_houghnet_large_hourglass_net
from .networks.pose_dla_dcn_hough import get_pose_net as get_dlahough_dcn
from .networks.pose_dla_dcn import get_pose_net as get_dla_dcn
_model_factory = {
'res': [get_pose_net, get_houghnet_net],
'resdcn': [get_pose_net_dcn, get_houghnet_dcn_net],
'hourglass': [get_large_hourglass_net, get_houghnet_large_hourglass_net],
'dla': [get_dla_dcn, get_dlahough_dcn]
}
def create_model(arch, heads, head_conv, region_num=0, vote_field_size=0, model_v1=False):
num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0
arch = arch[:arch.find('_')] if '_' in arch else arch
get_model = _model_factory[arch][1]
model = get_model(num_layers=num_layers, heads=heads, head_conv=head_conv,
region_num=region_num, vote_field_size=vote_field_size, model_v1=model_v1)
return model
def load_model(model, model_path, optimizer=None, resume=False,
lr=None, lr_step=None):
start_epoch = 0
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
print('loaded {}, epoch {}'.format(model_path, checkpoint['epoch']))
state_dict_ = checkpoint['state_dict']
state_dict = {}
# convert data_parallal to model
for k in state_dict_:
if k.startswith('module') and not k.startswith('module_list'):
state_dict[k[7:]] = state_dict_[k]
else:
state_dict[k] = state_dict_[k]
model_state_dict = model.state_dict()
# check loaded parameters and created model parameters
for k in state_dict:
if k in model_state_dict:
if state_dict[k].shape != model_state_dict[k].shape:
print('Skip loading parameter {}, required shape{}, ' \
'loaded shape{}.'.format(
k, model_state_dict[k].shape, state_dict[k].shape))
state_dict[k] = model_state_dict[k]
else:
print('Drop parameter {}.'.format(k))
for k in model_state_dict:
if not (k in state_dict):
print('No param {}.'.format(k))
state_dict[k] = model_state_dict[k]
model.load_state_dict(state_dict, strict=False)
# resume optimizer parameters
if optimizer is not None and resume:
if 'optimizer' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch']
start_lr = lr
for step in lr_step:
if start_epoch >= step:
start_lr *= 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = start_lr
print('Resumed optimizer with start lr', start_lr)
else:
print('No optimizer parameters in checkpoint.')
if optimizer is not None:
return model, optimizer, start_epoch
else:
return model
def save_model(path, epoch, model, optimizer=None):
if isinstance(model, torch.nn.DataParallel):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
data = {'epoch': epoch,
'state_dict': state_dict}
if not (optimizer is None):
data['optimizer'] = optimizer.state_dict()
torch.save(data, path)
| 3,752 | 37.295918 | 96 | py |
houghnet | houghnet-master/src/lib/models/scatter_gather.py | import torch
from torch.autograd import Variable
from torch.nn.parallel._functions import Scatter, Gather
def scatter(inputs, target_gpus, dim=0, chunk_sizes=None):
r"""
Slices variables into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not variables. Does not
support Tensors.
"""
def scatter_map(obj):
if isinstance(obj, Variable):
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
assert not torch.is_tensor(obj), "Tensors not supported in scatter."
if isinstance(obj, tuple):
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list):
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict):
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
return scatter_map(inputs)
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0, chunk_sizes=None):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, dim, chunk_sizes) if inputs else []
kwargs = scatter(kwargs, target_gpus, dim, chunk_sizes) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
| 1,535 | 38.384615 | 77 | py |
houghnet | houghnet-master/src/lib/models/networks/resnet_dcn.py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Dequan Wang and Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import torch
import torch.nn as nn
from .DCNv2.dcn_v2 import DCN
import torch.utils.model_zoo as model_zoo
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class PoseResNet(nn.Module):
def __init__(self, block, layers, heads, head_conv):
self.inplanes = 64
self.heads = heads
self.deconv_with_bias = False
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
3,
[256, 128, 64],
[4, 4, 4],
)
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
if 'hm' in head:
fc = nn.Sequential(
nn.Conv2d(64, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes, kernel_size=4,
stride=1, padding=33, dilation=22, bias=True)
)
fc[-1].bias.data.fill_(-2.19)
else:
fc = nn.Sequential(
nn.Conv2d(64, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
fill_fc_weights(fc)
else:
fc = nn.Conv2d(64, classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
fc = DCN(self.inplanes, planes,
kernel_size=(3,3), stride=1,
padding=1, dilation=1, deformable_groups=1)
# fc = nn.Conv2d(self.inplanes, planes,
# kernel_size=3, stride=1,
# padding=1, dilation=1, bias=False)
# fill_fc_weights(fc)
up = nn.ConvTranspose2d(
in_channels=planes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias)
fill_up_weights(up)
layers.append(fc)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
layers.append(up)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
def init_weights(self, num_layers):
if 1:
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
print('=> init deconv weights from normal distribution')
for name, m in self.deconv_layers.named_modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
def get_pose_net(num_layers, heads, head_conv=256):
block_class, layers = resnet_spec[num_layers]
model = PoseResNet(block_class, layers, heads, head_conv=head_conv)
model.init_weights(num_layers)
return model
| 10,495 | 34.221477 | 80 | py |
houghnet | houghnet-master/src/lib/models/networks/houghnet_large_hourglass.py | # ------------------------------------------------------------------------------
# This code is base on
# CornerNet (https://github.com/princeton-vl/CornerNet)
# Copyright (c) 2018, University of Michigan
# Licensed under the BSD 3-Clause License
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import torch
import torch.nn as nn
from src.lib.models.networks.hough_module import Hough
class convolution(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(convolution, self).__init__()
pad = (k - 1) // 2
self.conv = nn.Conv2d(inp_dim, out_dim, (k, k), padding=(pad, pad), stride=(stride, stride), bias=not with_bn)
self.bn = nn.BatchNorm2d(out_dim) if with_bn else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv = self.conv(x)
bn = self.bn(conv)
relu = self.relu(bn)
return relu
class fully_connected(nn.Module):
def __init__(self, inp_dim, out_dim, with_bn=True):
super(fully_connected, self).__init__()
self.with_bn = with_bn
self.linear = nn.Linear(inp_dim, out_dim)
if self.with_bn:
self.bn = nn.BatchNorm1d(out_dim)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
linear = self.linear(x)
bn = self.bn(linear) if self.with_bn else linear
relu = self.relu(bn)
return relu
class residual(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(residual, self).__init__()
self.conv1 = nn.Conv2d(inp_dim, out_dim, (3, 3), padding=(1, 1), stride=(stride, stride), bias=False)
self.bn1 = nn.BatchNorm2d(out_dim)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_dim, out_dim, (3, 3), padding=(1, 1), bias=False)
self.bn2 = nn.BatchNorm2d(out_dim)
self.skip = nn.Sequential(
nn.Conv2d(inp_dim, out_dim, (1, 1), stride=(stride, stride), bias=False),
nn.BatchNorm2d(out_dim)
) if stride != 1 or inp_dim != out_dim else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv1 = self.conv1(x)
bn1 = self.bn1(conv1)
relu1 = self.relu1(bn1)
conv2 = self.conv2(relu1)
bn2 = self.bn2(conv2)
skip = self.skip(x)
return self.relu(bn2 + skip)
def make_layer(k, inp_dim, out_dim, modules, layer=convolution, **kwargs):
layers = [layer(k, inp_dim, out_dim, **kwargs)]
for _ in range(1, modules):
layers.append(layer(k, out_dim, out_dim, **kwargs))
return nn.Sequential(*layers)
def make_layer_revr(k, inp_dim, out_dim, modules, layer=convolution, **kwargs):
layers = []
for _ in range(modules - 1):
layers.append(layer(k, inp_dim, inp_dim, **kwargs))
layers.append(layer(k, inp_dim, out_dim, **kwargs))
return nn.Sequential(*layers)
class MergeUp(nn.Module):
def forward(self, up1, up2):
return up1 + up2
def make_merge_layer(dim):
return MergeUp()
# def make_pool_layer(dim):
# return nn.MaxPool2d(kernel_size=2, stride=2)
def make_pool_layer(dim):
return nn.Sequential()
def make_unpool_layer(dim):
return nn.Upsample(scale_factor=2)
def make_kp_layer(cnv_dim, curr_dim, out_dim):
return nn.Sequential(
convolution(3, cnv_dim, curr_dim, with_bn=False),
nn.Conv2d(curr_dim, out_dim, (1, 1))
)
def make_inter_layer(dim):
return residual(3, dim, dim)
def make_cnv_layer(inp_dim, out_dim):
return convolution(3, inp_dim, out_dim)
class kp_module(nn.Module):
def __init__(
self, n, dims, modules, layer=residual,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, **kwargs
):
super(kp_module, self).__init__()
self.n = n
curr_mod = modules[0]
next_mod = modules[1]
curr_dim = dims[0]
next_dim = dims[1]
self.up1 = make_up_layer(
3, curr_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.max1 = make_pool_layer(curr_dim)
self.low1 = make_hg_layer(
3, curr_dim, next_dim, curr_mod,
layer=layer, **kwargs
)
self.low2 = kp_module(
n - 1, dims[1:], modules[1:], layer=layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer,
**kwargs
) if self.n > 1 else \
make_low_layer(
3, next_dim, next_dim, next_mod,
layer=layer, **kwargs
)
self.low3 = make_hg_layer_revr(
3, next_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.up2 = make_unpool_layer(curr_dim)
self.merge = make_merge_layer(curr_dim)
def forward(self, x):
up1 = self.up1(x)
max1 = self.max1(x)
low1 = self.low1(max1)
low2 = self.low2(low1)
low3 = self.low3(low2)
up2 = self.up2(low3)
return self.merge(up1, up2)
class exkp(nn.Module):
def __init__(
self, region_num, vote_field_size, model_v1, n, nstack, dims, modules, heads, pre=None, cnv_dim=256,
make_tl_layer=None, make_br_layer=None,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual
):
super(exkp, self).__init__()
self.nstack = nstack
self.heads = heads
self.region_num = region_num
self.vote_field_size = vote_field_size
self.deconv_filter_padding = int(self.vote_field_size / 2)
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.voting_heads = list(heads['voting_heads'])
del heads['voting_heads']
voting = False
## keypoint heatmaps
for head in heads.keys():
if 'hm' in head:
module = nn.ModuleList([
make_heat_layer(
cnv_dim, curr_dim, heads[head]) for _ in range(nstack)
])
self.__setattr__(head, module)
for heat in self.__getattr__(head):
heat[-1].bias.data.fill_(-2.19)
for voting_head in self.voting_heads:
if re.fullmatch(head, voting_head):
voting = True
if voting:
for heat in self.__getattr__(head):
heat[-1].bias.data.fill_(0)
heat[-1].weight.data.fill_(0)
out_classes = int(heads[head] / self.region_num)
hough_voting = Hough(region_num=self.region_num,
vote_field_size=self.vote_field_size,
num_classes=out_classes,
model_v1=model_v1)
self.__setattr__('voting_' + head, hough_voting)
voting = False
else:
module = nn.ModuleList([
make_regr_layer(
cnv_dim, curr_dim, heads[head]) for _ in range(nstack)
])
self.__setattr__(head, module)
self.relu = nn.ReLU(inplace=True) # deconv
def forward(self, image):
# print('image shape', image.shape)
inter = self.pre(image)
outs = []
for ind in range(self.nstack):
kp_, cnv_ = self.kps[ind], self.cnvs[ind]
kp = kp_(inter)
cnv = cnv_(kp)
out = {}
for head in self.heads:
layer = self.__getattr__(head)[ind]
if head in self.voting_heads:
voting_map_hm = layer(cnv)
out[head] = self.__getattr__('voting_' + head)(voting_map_hm)
else:
y = layer(cnv)
out[head] = y
outs.append(out)
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def make_hg_layer(kernel, dim0, dim1, mod, layer=convolution, **kwargs):
layers = [layer(kernel, dim0, dim1, stride=2)]
layers += [layer(kernel, dim1, dim1) for _ in range(mod - 1)]
return nn.Sequential(*layers)
class HourglassNet(exkp):
def __init__(self, heads, region_num, vote_field_size, model_v1, num_stacks=2):
n = 5
dims = [256, 256, 384, 384, 384, 512]
modules = [2, 2, 2, 2, 2, 4]
super(HourglassNet, self).__init__(
region_num, vote_field_size, model_v1, n, num_stacks, dims, modules, heads,
make_tl_layer=None,
make_br_layer=None,
make_pool_layer=make_pool_layer,
make_hg_layer=make_hg_layer,
kp_layer=residual, cnv_dim=256
)
def get_houghnet_large_hourglass_net(num_layers, heads, head_conv, region_num, vote_field_size, model_v1):
model = HourglassNet(heads, region_num, vote_field_size, model_v1, 2)
return model
| 11,454 | 34.030581 | 118 | py |
houghnet | houghnet-master/src/lib/models/networks/houghnet_dcn.py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Dequan Wang and Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import math
import logging
import torch.nn as nn
from .DCNv2.dcn_v2 import DCN
import torch.utils.model_zoo as model_zoo
from src.lib.models.networks.hough_module import Hough
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class HoughNetDcnNet(nn.Module):
def __init__(self, block, layers, heads, region_num, vote_field_size, model_v1, head_conv):
self.inplanes = 64
self.deconv_with_bias = False
self.region_num = region_num
self.vote_field_size = vote_field_size
# self.deconv_filter_padding = int(self.vote_field_size / 2)
super(HoughNetDcnNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
3,
[256, 128, 64],
[4, 4, 4],
)
self.voting_heads = list(heads['voting_heads'])
del heads['voting_heads']
voting = False
self.heads = heads
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(64, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
for voting_head in self.voting_heads:
if re.fullmatch(head, voting_head):
voting = True
if voting:
fc[-1].bias.data.fill_(0)
fc[-1].weight.data.fill_(0)
out_classes = int(classes / self.region_num)
hough_voting = Hough(region_num=self.region_num,
vote_field_size=self.vote_field_size,
num_classes=out_classes,
model_v1=model_v1)
# self.hough_voting_heads.update({head:hough_voting})
self.__setattr__('voting_' + head, hough_voting)
voting = False
else:
fc = nn.Conv2d(64, classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
fc = DCN(self.inplanes, planes,
kernel_size=(3,3), stride=1,
padding=1, dilation=1, deformable_groups=1)
# fc = nn.Conv2d(self.inplanes, planes,
# kernel_size=3, stride=1,
# padding=1, dilation=1, bias=False)
# fill_fc_weights(fc)
up = nn.ConvTranspose2d(
in_channels=planes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias)
fill_up_weights(up)
layers.append(fc)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
layers.append(up)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
ret = {}
for head in self.heads:
if head in self.voting_heads:
voting_map_hm = self.__getattr__(head)(x)
ret[head] = self.__getattr__('voting_' + head)(voting_map_hm)
else:
ret[head] = self.__getattr__(head)(x)
return [ret]
def init_weights(self, num_layers):
if 1:
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
print('=> init deconv weights from normal distribution')
for name, m in self.deconv_layers.named_modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
def get_houghnet_dcn_net(num_layers, heads, region_num, vote_field_size, model_v1, head_conv=256):
block_class, layers = resnet_spec[num_layers]
model = HoughNetDcnNet(block_class, layers, heads, region_num, vote_field_size, model_v1, head_conv=head_conv)
model.init_weights(num_layers)
return model
| 11,486 | 34.673913 | 112 | py |
houghnet | houghnet-master/src/lib/models/networks/pose_dla_dcn.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import numpy as np
from os.path import join
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from .DCNv2.dcn_v2 import DCN
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000,
block=BasicBlock, residual_root=False, linear_root=False):
super(DLA, self).__init__()
self.channels = channels
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x):
y = []
x = self.base_layer(x)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
return y
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
# fc = self.fc
if name.endswith('.pth'):
model_weights = torch.load(data + name)
else:
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
num_classes = len(model_weights[list(model_weights.keys())[-1]])
self.fc = nn.Conv2d(
self.channels[-1], num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
self.load_state_dict(model_weights)
# self.fc = fc
def dla34(pretrained=True, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')
return model
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class DeformConv(nn.Module):
def __init__(self, chi, cho):
super(DeformConv, self).__init__()
self.actf = nn.Sequential(
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
self.conv = DCN(chi, cho, kernel_size=(3,3), stride=1, padding=1, dilation=1, deformable_groups=1)
def forward(self, x):
x = self.conv(x)
x = self.actf(x)
return x
class IDAUp(nn.Module):
def __init__(self, o, channels, up_f):
super(IDAUp, self).__init__()
for i in range(1, len(channels)):
c = channels[i]
f = int(up_f[i])
proj = DeformConv(c, o)
node = DeformConv(o, o)
up = nn.ConvTranspose2d(o, o, f * 2, stride=f,
padding=f // 2, output_padding=0,
groups=o, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
setattr(self, 'node_' + str(i), node)
def forward(self, layers, startp, endp):
for i in range(startp + 1, endp):
upsample = getattr(self, 'up_' + str(i - startp))
project = getattr(self, 'proj_' + str(i - startp))
layers[i] = upsample(project(layers[i]))
node = getattr(self, 'node_' + str(i - startp))
layers[i] = node(layers[i] + layers[i - 1])
class DLAUp(nn.Module):
def __init__(self, startp, channels, scales, in_channels=None):
super(DLAUp, self).__init__()
self.startp = startp
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(channels[j], in_channels[j:],
scales[j:] // scales[j]))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
out = [layers[-1]] # start with 32
for i in range(len(layers) - self.startp - 1):
ida = getattr(self, 'ida_{}'.format(i))
ida(layers, len(layers) -i - 2, len(layers))
out.insert(0, layers[-1])
return out
class Interpolate(nn.Module):
def __init__(self, scale, mode):
super(Interpolate, self).__init__()
self.scale = scale
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale, mode=self.mode, align_corners=False)
return x
class DLASeg(nn.Module):
def __init__(self, base_name, heads, pretrained, down_ratio, final_kernel,
last_level, head_conv, out_channel=0):
super(DLASeg, self).__init__()
assert down_ratio in [2, 4, 8, 16]
self.first_level = int(np.log2(down_ratio))
self.last_level = last_level
self.base = globals()[base_name](pretrained=pretrained)
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(self.first_level, channels[self.first_level:], scales)
if out_channel == 0:
out_channel = channels[self.first_level]
self.ida_up = IDAUp(out_channel, channels[self.first_level:self.last_level],
[2 ** i for i in range(self.last_level - self.first_level)])
self.heads = heads
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(channels[self.first_level], classes,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def forward(self, x):
x = self.base(x)
x = self.dla_up(x)
y = []
for i in range(self.last_level - self.first_level):
y.append(x[i].clone())
self.ida_up(y, 0, len(y))
z = {}
for head in self.heads:
z[head] = self.__getattr__(head)(y[-1])
return [z]
def get_pose_net(num_layers, heads, head_conv=256, down_ratio=4):
model = DLASeg('dla{}'.format(num_layers), heads,
pretrained=True,
down_ratio=down_ratio,
final_kernel=1,
last_level=5,
head_conv=head_conv)
return model
| 17,594 | 34.617409 | 106 | py |
houghnet | houghnet-master/src/lib/models/networks/msra_resnet.py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
BN_MOMENTUM = 0.1
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class PoseResNet(nn.Module):
def __init__(self, block, layers, heads, head_conv, **kwargs):
self.inplanes = 64
self.deconv_with_bias = False
self.heads = heads
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
3,
[256, 256, 256],
[4, 4, 4],
)
# self.final_layer = []
for head in sorted(self.heads):
num_output = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(256, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, num_output,
kernel_size=1, stride=1, padding=0))
else:
fc = nn.Conv2d(
in_channels=256,
out_channels=num_output,
kernel_size=1,
stride=1,
padding=0
)
self.__setattr__(head, fc)
# self.final_layer = nn.ModuleList(self.final_layer)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
def init_weights(self, num_layers, pretrained=True):
if pretrained:
# print('=> init resnet deconv weights from normal distribution')
for _, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
# print('=> init {}.weight as normal(0, 0.001)'.format(name))
# print('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
# print('=> init {}.weight as 1'.format(name))
# print('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# print('=> init final conv weights from normal distribution')
for head in self.heads:
final_layer = self.__getattr__(head)
for i, m in enumerate(final_layer.modules()):
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# print('=> init {}.weight as normal(0, 0.001)'.format(name))
# print('=> init {}.bias as 0'.format(name))
if m.weight.shape[0] == self.heads[head]:
if 'hm' in head:
nn.init.constant_(m.bias, -2.19)
else:
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
#pretrained_state_dict = torch.load(pretrained)
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
else:
print('=> imagenet pretrained model dose not exist')
print('=> please download it first')
raise ValueError('imagenet pretrained model does not exist')
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
def get_pose_net(num_layers, heads, head_conv):
block_class, layers = resnet_spec[num_layers]
model = PoseResNet(block_class, layers, heads, head_conv=head_conv)
model.init_weights(num_layers, pretrained=True)
return model
| 10,167 | 35.185053 | 94 | py |
houghnet | houghnet-master/src/lib/models/networks/large_hourglass.py | # ------------------------------------------------------------------------------
# This code is base on
# CornerNet (https://github.com/princeton-vl/CornerNet)
# Copyright (c) 2018, University of Michigan
# Licensed under the BSD 3-Clause License
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
class convolution(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(convolution, self).__init__()
pad = (k - 1) // 2
self.conv = nn.Conv2d(inp_dim, out_dim, (k, k), padding=(pad, pad), stride=(stride, stride), bias=not with_bn)
self.bn = nn.BatchNorm2d(out_dim) if with_bn else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv = self.conv(x)
bn = self.bn(conv)
relu = self.relu(bn)
return relu
class fully_connected(nn.Module):
def __init__(self, inp_dim, out_dim, with_bn=True):
super(fully_connected, self).__init__()
self.with_bn = with_bn
self.linear = nn.Linear(inp_dim, out_dim)
if self.with_bn:
self.bn = nn.BatchNorm1d(out_dim)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
linear = self.linear(x)
bn = self.bn(linear) if self.with_bn else linear
relu = self.relu(bn)
return relu
class residual(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(residual, self).__init__()
self.conv1 = nn.Conv2d(inp_dim, out_dim, (3, 3), padding=(1, 1), stride=(stride, stride), bias=False)
self.bn1 = nn.BatchNorm2d(out_dim)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_dim, out_dim, (3, 3), padding=(1, 1), bias=False)
self.bn2 = nn.BatchNorm2d(out_dim)
self.skip = nn.Sequential(
nn.Conv2d(inp_dim, out_dim, (1, 1), stride=(stride, stride), bias=False),
nn.BatchNorm2d(out_dim)
) if stride != 1 or inp_dim != out_dim else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv1 = self.conv1(x)
bn1 = self.bn1(conv1)
relu1 = self.relu1(bn1)
conv2 = self.conv2(relu1)
bn2 = self.bn2(conv2)
skip = self.skip(x)
return self.relu(bn2 + skip)
def make_layer(k, inp_dim, out_dim, modules, layer=convolution, **kwargs):
layers = [layer(k, inp_dim, out_dim, **kwargs)]
for _ in range(1, modules):
layers.append(layer(k, out_dim, out_dim, **kwargs))
return nn.Sequential(*layers)
def make_layer_revr(k, inp_dim, out_dim, modules, layer=convolution, **kwargs):
layers = []
for _ in range(modules - 1):
layers.append(layer(k, inp_dim, inp_dim, **kwargs))
layers.append(layer(k, inp_dim, out_dim, **kwargs))
return nn.Sequential(*layers)
class MergeUp(nn.Module):
def forward(self, up1, up2):
return up1 + up2
def make_merge_layer(dim):
return MergeUp()
# def make_pool_layer(dim):
# return nn.MaxPool2d(kernel_size=2, stride=2)
def make_pool_layer(dim):
return nn.Sequential()
def make_unpool_layer(dim):
return nn.Upsample(scale_factor=2)
def make_kp_layer(cnv_dim, curr_dim, out_dim):
return nn.Sequential(
convolution(3, cnv_dim, curr_dim, with_bn=False),
nn.Conv2d(curr_dim, out_dim, (1, 1))
)
def make_inter_layer(dim):
return residual(3, dim, dim)
def make_cnv_layer(inp_dim, out_dim):
return convolution(3, inp_dim, out_dim)
class kp_module(nn.Module):
def __init__(
self, n, dims, modules, layer=residual,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, **kwargs
):
super(kp_module, self).__init__()
self.n = n
curr_mod = modules[0]
next_mod = modules[1]
curr_dim = dims[0]
next_dim = dims[1]
self.up1 = make_up_layer(
3, curr_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.max1 = make_pool_layer(curr_dim)
self.low1 = make_hg_layer(
3, curr_dim, next_dim, curr_mod,
layer=layer, **kwargs
)
self.low2 = kp_module(
n - 1, dims[1:], modules[1:], layer=layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer,
**kwargs
) if self.n > 1 else \
make_low_layer(
3, next_dim, next_dim, next_mod,
layer=layer, **kwargs
)
self.low3 = make_hg_layer_revr(
3, next_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.up2 = make_unpool_layer(curr_dim)
self.merge = make_merge_layer(curr_dim)
def forward(self, x):
up1 = self.up1(x)
max1 = self.max1(x)
low1 = self.low1(max1)
low2 = self.low2(low1)
low3 = self.low3(low2)
up2 = self.up2(low3)
return self.merge(up1, up2)
class exkp(nn.Module):
def __init__(
self, n, nstack, dims, modules, heads, pre=None, cnv_dim=256,
make_tl_layer=None, make_br_layer=None,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual
):
super(exkp, self).__init__()
self.nstack = nstack
self.heads = heads
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
## keypoint heatmaps
for head in heads.keys():
if 'hm' in head:
module = nn.ModuleList([
make_heat_layer(
cnv_dim, curr_dim, heads[head]) for _ in range(nstack)
])
self.__setattr__(head, module)
for heat in self.__getattr__(head):
heat[-1].bias.data.fill_(-2.19)
else:
module = nn.ModuleList([
make_regr_layer(
cnv_dim, curr_dim, heads[head]) for _ in range(nstack)
])
self.__setattr__(head, module)
self.relu = nn.ReLU(inplace=True)
def forward(self, image):
# print('image shape', image.shape)
inter = self.pre(image)
outs = []
for ind in range(self.nstack):
kp_, cnv_ = self.kps[ind], self.cnvs[ind]
kp = kp_(inter)
cnv = cnv_(kp)
out = {}
for head in self.heads:
layer = self.__getattr__(head)[ind]
y = layer(cnv)
out[head] = y
outs.append(out)
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def make_hg_layer(kernel, dim0, dim1, mod, layer=convolution, **kwargs):
layers = [layer(kernel, dim0, dim1, stride=2)]
layers += [layer(kernel, dim1, dim1) for _ in range(mod - 1)]
return nn.Sequential(*layers)
class HourglassNet(exkp):
def __init__(self, heads, num_stacks=2):
n = 5
dims = [256, 256, 384, 384, 384, 512]
modules = [2, 2, 2, 2, 2, 4]
super(HourglassNet, self).__init__(
n, num_stacks, dims, modules, heads,
make_tl_layer=None,
make_br_layer=None,
make_pool_layer=make_pool_layer,
make_hg_layer=make_hg_layer,
kp_layer=residual, cnv_dim=256
)
def get_large_hourglass_net(num_layers, heads, head_conv):
model = HourglassNet(heads, 2)
return model
| 9,942 | 32.033223 | 118 | py |
houghnet | houghnet-master/src/lib/models/networks/houghnet_resnet.py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from src.lib.models.networks.hough_module import Hough
BN_MOMENTUM = 0.1
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class HoughNetResNet(nn.Module):
def __init__(self, block, layers, heads, region_num, vote_field_size, model_v1, head_conv, **kwargs):
self.inplanes = 64
self.deconv_with_bias = False
self.heads = heads
self.region_num = region_num
self.vote_field_size = vote_field_size
super(HoughNetResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer2(
3,
[256, 128, 64],
[4, 4, 4],
)
# self.final_layer = []
self.voting_heads = list(heads['voting_heads'])
del heads['voting_heads']
voting = False
self.heads = heads
for head in sorted(self.heads):
num_output = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(64, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, num_output,
kernel_size=1, stride=1, padding=0))
for voting_head in self.voting_heads:
if re.fullmatch(head, voting_head):
voting = True
if voting:
out_classes = int(num_output / self.region_num)
hough_voting = Hough(region_num=self.region_num,
vote_field_size=self.vote_field_size,
num_classes=out_classes,
model_v1=model_v1)
self.__setattr__('voting_' + head, hough_voting)
voting = False
else:
fc = nn.Conv2d(
in_channels=64,
out_channels=num_output,
kernel_size=1,
stride=1,
padding=0
)
self.__setattr__(head, fc)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def _make_deconv_layer2(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
fc = nn.Conv2d(self.inplanes, planes,
kernel_size=3, stride=1,
padding=1, dilation=1, bias=False)
fill_fc_weights(fc)
up = nn.ConvTranspose2d(
in_channels=planes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias)
layers.append(fc)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
layers.append(up)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
ret = {}
for head in self.heads:
if head in self.voting_heads:
voting_map_hm = self.__getattr__(head)(x)
ret[head] = self.__getattr__('voting_' + head)(voting_map_hm)
else:
ret[head] = self.__getattr__(head)(x)
return [ret]
def init_weights(self, num_layers, pretrained=True):
if pretrained:
# print('=> init resnet deconv weights from normal distribution')
for _, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
# print('=> init {}.weight as normal(0, 0.001)'.format(name))
# print('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
# print('=> init {}.weight as 1'.format(name))
# print('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# print('=> init final conv weights from normal distribution')
for head in self.heads:
final_layer = self.__getattr__(head)
for i, m in enumerate(final_layer.modules()):
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# print('=> init {}.weight as normal(0, 0.001)'.format(name))
# print('=> init {}.bias as 0'.format(name))
if m.weight.shape[0] == self.heads[head]:
if 'hm' in head:
nn.init.constant_(m.weight, 0)
nn.init.constant_(m.bias, 0)
else:
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
#pretrained_state_dict = torch.load(pretrained)
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
else:
print('=> imagenet pretrained model dose not exist')
print('=> please download it first')
raise ValueError('imagenet pretrained model does not exist')
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
def get_houghnet_net(num_layers, heads, head_conv, region_num, model_v1, vote_field_size):
block_class, layers = resnet_spec[num_layers]
model = HoughNetResNet(block_class, layers, heads, region_num, vote_field_size, model_v1, head_conv=head_conv)
model.init_weights(num_layers, pretrained=True)
return model
| 13,125 | 36.289773 | 113 | py |
houghnet | houghnet-master/src/lib/models/networks/pose_dla_dcn_hough.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import logging
import numpy as np
from os.path import join
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from src.lib.models.networks.hough_module import Hough
import re
from .DCNv2.dcn_v2 import DCN
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000,
block=BasicBlock, residual_root=False, linear_root=False):
super(DLA, self).__init__()
self.channels = channels
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x):
y = []
x = self.base_layer(x)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
return y
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
# fc = self.fc
if name.endswith('.pth'):
model_weights = torch.load(data + name)
else:
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
num_classes = len(model_weights[list(model_weights.keys())[-1]])
self.fc = nn.Conv2d(
self.channels[-1], num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
self.load_state_dict(model_weights)
# self.fc = fc
def dla34(pretrained=True, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')
return model
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class DeformConv(nn.Module):
def __init__(self, chi, cho):
super(DeformConv, self).__init__()
self.actf = nn.Sequential(
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
self.conv = DCN(chi, cho, kernel_size=(3,3), stride=1, padding=1, dilation=1, deformable_groups=1)
def forward(self, x):
x = self.conv(x)
x = self.actf(x)
return x
class IDAUp(nn.Module):
def __init__(self, o, channels, up_f):
super(IDAUp, self).__init__()
for i in range(1, len(channels)):
c = channels[i]
f = int(up_f[i])
proj = DeformConv(c, o)
node = DeformConv(o, o)
up = nn.ConvTranspose2d(o, o, f * 2, stride=f,
padding=f // 2, output_padding=0,
groups=o, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
setattr(self, 'node_' + str(i), node)
def forward(self, layers, startp, endp):
for i in range(startp + 1, endp):
upsample = getattr(self, 'up_' + str(i - startp))
project = getattr(self, 'proj_' + str(i - startp))
layers[i] = upsample(project(layers[i]))
node = getattr(self, 'node_' + str(i - startp))
layers[i] = node(layers[i] + layers[i - 1])
class DLAUp(nn.Module):
def __init__(self, startp, channels, scales, in_channels=None):
super(DLAUp, self).__init__()
self.startp = startp
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(channels[j], in_channels[j:],
scales[j:] // scales[j]))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
out = [layers[-1]] # start with 32
for i in range(len(layers) - self.startp - 1):
ida = getattr(self, 'ida_{}'.format(i))
ida(layers, len(layers) -i - 2, len(layers))
out.insert(0, layers[-1])
return out
class Interpolate(nn.Module):
def __init__(self, scale, mode):
super(Interpolate, self).__init__()
self.scale = scale
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale, mode=self.mode, align_corners=False)
return x
class DLASegHough(nn.Module):
def __init__(self, base_name, heads, pretrained, down_ratio, final_kernel,
last_level, region_num, vote_field_size, model_v1, head_conv, out_channel=0):
super(DLASegHough, self).__init__()
assert down_ratio in [2, 4, 8, 16]
self.region_num = region_num
self.vote_field_size = vote_field_size
# self.num_classes = int(heads['hm_hp'] / region_num)
self.first_level = int(np.log2(down_ratio))
self.last_level = last_level
self.base = globals()[base_name](pretrained=pretrained)
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(self.first_level, channels[self.first_level:], scales)
if out_channel == 0:
out_channel = channels[self.first_level]
self.ida_up = IDAUp(out_channel, channels[self.first_level:self.last_level],
[2 ** i for i in range(self.last_level - self.first_level)])
self.voting_heads = list(heads['voting_heads'])
del heads['voting_heads']
voting = False
self.heads = heads
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
for voting_head in self.voting_heads:
if re.fullmatch(head, voting_head):
voting = True
if voting:
fc[-1].bias.data.fill_(0)
fc[-1].weight.data.fill_(0)
out_classes = int(classes / self.region_num)
hough_voting = Hough(region_num=self.region_num,
vote_field_size=self.vote_field_size,
num_classes=out_classes,
model_v1=model_v1)
self.__setattr__('voting_' + head, hough_voting)
voting = False
else:
fc = nn.Conv2d(channels[self.first_level], classes,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def forward(self, x):
x = self.base(x)
x = self.dla_up(x)
y = []
for i in range(self.last_level - self.first_level):
y.append(x[i].clone())
self.ida_up(y, 0, len(y))
z = {}
for head in self.heads:
if head in self.voting_heads:
voting_map_hm = self.__getattr__(head)(y[-1])
z[head] = self.__getattr__('voting_' + head)(voting_map_hm)
else:
z[head] = self.__getattr__(head)(y[-1])
return [z]
def get_pose_net(num_layers, heads, region_num, vote_field_size, head_conv=256, down_ratio=4, model_v1=False):
model = DLASegHough('dla{}'.format(num_layers), heads,
pretrained=True,
down_ratio=down_ratio,
final_kernel=1,
last_level=5,
region_num=region_num,
vote_field_size=vote_field_size,
model_v1=model_v1,
head_conv=head_conv)
return model
| 19,013 | 35.28626 | 111 | py |
houghnet | houghnet-master/src/lib/models/networks/dlav0.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from os.path import join
import torch
from torch import nn
import torch.utils.model_zoo as model_zoo
import numpy as np
BatchNorm = nn.BatchNorm2d
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(planes)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = BatchNorm(out_channels)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
BatchNorm(out_channels)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000,
block=BasicBlock, residual_root=False, return_levels=False,
pool_size=7, linear_root=False):
super(DLA, self).__init__()
self.channels = channels
self.return_levels = return_levels
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
BatchNorm(channels[0]),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
self.avgpool = nn.AvgPool2d(pool_size)
self.fc = nn.Conv2d(channels[-1], num_classes, kernel_size=1,
stride=1, padding=0, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
BatchNorm(planes),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
BatchNorm(planes),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x):
y = []
x = self.base_layer(x)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
if self.return_levels:
return y
else:
x = self.avgpool(x)
x = self.fc(x)
x = x.view(x.size(0), -1)
return x
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
fc = self.fc
if name.endswith('.pth'):
model_weights = torch.load(data + name)
else:
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
num_classes = len(model_weights[list(model_weights.keys())[-1]])
self.fc = nn.Conv2d(
self.channels[-1], num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
self.load_state_dict(model_weights)
self.fc = fc
def dla34(pretrained, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')
return model
def dla46_c(pretrained=None, **kwargs): # DLA-46-C
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla46_c')
return model
def dla46x_c(pretrained=None, **kwargs): # DLA-X-46-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla46x_c')
return model
def dla60x_c(pretrained, **kwargs): # DLA-X-60-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla60x_c', hash='b870c45c')
return model
def dla60(pretrained=None, **kwargs): # DLA-60
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla60')
return model
def dla60x(pretrained=None, **kwargs): # DLA-X-60
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla60x')
return model
def dla102(pretrained=None, **kwargs): # DLA-102
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla102')
return model
def dla102x(pretrained=None, **kwargs): # DLA-X-102
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla102x')
return model
def dla102x2(pretrained=None, **kwargs): # DLA-X-102 64
BottleneckX.cardinality = 64
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla102x2')
return model
def dla169(pretrained=None, **kwargs): # DLA-169
Bottleneck.expansion = 2
model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla169')
return model
def set_bn(bn):
global BatchNorm
BatchNorm = bn
dla.BatchNorm = bn
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class IDAUp(nn.Module):
def __init__(self, node_kernel, out_dim, channels, up_factors):
super(IDAUp, self).__init__()
self.channels = channels
self.out_dim = out_dim
for i, c in enumerate(channels):
if c == out_dim:
proj = Identity()
else:
proj = nn.Sequential(
nn.Conv2d(c, out_dim,
kernel_size=1, stride=1, bias=False),
BatchNorm(out_dim),
nn.ReLU(inplace=True))
f = int(up_factors[i])
if f == 1:
up = Identity()
else:
up = nn.ConvTranspose2d(
out_dim, out_dim, f * 2, stride=f, padding=f // 2,
output_padding=0, groups=out_dim, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
for i in range(1, len(channels)):
node = nn.Sequential(
nn.Conv2d(out_dim * 2, out_dim,
kernel_size=node_kernel, stride=1,
padding=node_kernel // 2, bias=False),
BatchNorm(out_dim),
nn.ReLU(inplace=True))
setattr(self, 'node_' + str(i), node)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, layers):
assert len(self.channels) == len(layers), \
'{} vs {} layers'.format(len(self.channels), len(layers))
layers = list(layers)
for i, l in enumerate(layers):
upsample = getattr(self, 'up_' + str(i))
project = getattr(self, 'proj_' + str(i))
layers[i] = upsample(project(l))
x = layers[0]
y = []
for i in range(1, len(layers)):
node = getattr(self, 'node_' + str(i))
x = node(torch.cat([x, layers[i]], 1))
y.append(x)
return x, y
class DLAUp(nn.Module):
def __init__(self, channels, scales=(1, 2, 4, 8, 16), in_channels=None):
super(DLAUp, self).__init__()
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(3, channels[j], in_channels[j:],
scales[j:] // scales[j]))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
layers = list(layers)
assert len(layers) > 1
for i in range(len(layers) - 1):
ida = getattr(self, 'ida_{}'.format(i))
x, y = ida(layers[-i - 2:])
layers[-i - 1:] = y
return x
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class DLASeg(nn.Module):
def __init__(self, base_name, heads,
pretrained=True, down_ratio=4, head_conv=256):
super(DLASeg, self).__init__()
assert down_ratio in [2, 4, 8, 16]
self.heads = heads
self.first_level = int(np.log2(down_ratio))
self.base = globals()[base_name](
pretrained=pretrained, return_levels=True)
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(channels[self.first_level:], scales=scales)
'''
self.fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], classes, kernel_size=1,
stride=1, padding=0, bias=True)
)
'''
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(channels[self.first_level], classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
'''
up_factor = 2 ** self.first_level
if up_factor > 1:
up = nn.ConvTranspose2d(classes, classes, up_factor * 2,
stride=up_factor, padding=up_factor // 2,
output_padding=0, groups=classes,
bias=False)
fill_up_weights(up)
up.weight.requires_grad = False
else:
up = Identity()
self.up = up
self.softmax = nn.LogSoftmax(dim=1)
for m in self.fc.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
'''
def forward(self, x):
x = self.base(x)
x = self.dla_up(x[self.first_level:])
# x = self.fc(x)
# y = self.softmax(self.up(x))
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
'''
def optim_parameters(self, memo=None):
for param in self.base.parameters():
yield param
for param in self.dla_up.parameters():
yield param
for param in self.fc.parameters():
yield param
'''
'''
def dla34up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla34', classes, pretrained_base=pretrained_base, **kwargs)
return model
def dla60up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla60', classes, pretrained_base=pretrained_base, **kwargs)
return model
def dla102up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla102', classes,
pretrained_base=pretrained_base, **kwargs)
return model
def dla169up(classes, pretrained_base=None, **kwargs):
model = DLASeg('dla169', classes,
pretrained_base=pretrained_base, **kwargs)
return model
'''
def get_pose_net(num_layers, heads, add_conv=256, down_ratio=4):
model = DLASeg('dla{}'.format(num_layers), heads,
pretrained=True,
down_ratio=down_ratio,
head_conv=head_conv)
return model
| 22,681 | 34.003086 | 86 | py |
houghnet | houghnet-master/src/lib/models/networks/hough_module.py |
import torch
import torch.nn as nn
import numpy as np
PI = np.pi
class Hough(nn.Module):
def __init__(self, angle=90, R2_list=[4, 64, 256, 1024],
num_classes=80, region_num=9, vote_field_size=17,
voting_map_size_w=128, voting_map_size_h=128, model_v1=False):
super(Hough, self).__init__()
self.angle = angle
self.R2_list = R2_list
self.region_num = region_num
self.num_classes = num_classes
self.vote_field_size = vote_field_size
self.deconv_filter_padding = int(self.vote_field_size / 2)
self.voting_map_size_w = voting_map_size_w
self.voting_map_size_h = voting_map_size_h
self.model_v1 = model_v1
self.deconv_filters = self._prepare_deconv_filters()
def _prepare_deconv_filters(self):
half_w = int(self.voting_map_size_w / 2)
half_h = int(self.voting_map_size_h / 2)
vote_center = torch.tensor([half_h, half_w]).cuda()
logmap_onehot = self.calculate_logmap((self.voting_map_size_h, self.voting_map_size_w), vote_center)
weights = logmap_onehot / \
torch.clamp(torch.sum(torch.sum(logmap_onehot, dim=0), dim=0).float(), min=1.0)
start_x = half_h - int(self.vote_field_size/2)
stop_x = half_h + int(self.vote_field_size/2) + 1
start_y = half_w - int(self.vote_field_size/2)
stop_y = half_w + int(self.vote_field_size/2) + 1
'''This if-block only applies for my two pretrained models. Please ignore this for your own trainings.'''
if self.model_v1 and self.region_num==17 and self.vote_field_size==65:
start_x -=1
stop_x -=1
start_y -=1
stop_y -=1
deconv_filters = weights[start_x:stop_x, start_y:stop_y,:].permute(2,0,1).view(self.region_num, 1,
self.vote_field_size, self.vote_field_size)
W = nn.Parameter(deconv_filters.repeat(self.num_classes, 1, 1, 1))
W.requires_grad = False
layers = []
deconv_kernel = nn.ConvTranspose2d(
in_channels=self.region_num*self.num_classes,
out_channels=1*self.num_classes,
kernel_size=self.vote_field_size,
padding=self.deconv_filter_padding,
groups=self.num_classes,
bias=False)
with torch.no_grad():
deconv_kernel.weight = W
layers.append(deconv_kernel)
return nn.Sequential(*layers)
def generate_grid(self, h, w):
x = torch.arange(0, w).float().cuda()
y = torch.arange(0, h).float().cuda()
grid = torch.stack([x.repeat(h), y.repeat(w, 1).t().contiguous().view(-1)], 1)
return grid.repeat(1, 1).view(-1, 2)
def calculate_logmap(self, im_size, center, angle=90, R2_list=[4, 64, 256, 1024]):
points = self.generate_grid(im_size[0], im_size[1]) # [x,y]
total_angles = 360 / angle
# check inside which circle
y_dif = points[:, 1].cuda() - center[0].float()
x_dif = points[:, 0].cuda() - center[1].float()
xdif_2 = x_dif * x_dif
ydif_2 = y_dif * y_dif
sum_of_squares = xdif_2 + ydif_2
# find angle
arc_angle = (torch.atan2(y_dif, x_dif) * 180 / PI).long()
arc_angle[arc_angle < 0] += 360
angle_id = (arc_angle / angle).long() + 1
c_region = torch.ones(xdif_2.shape, dtype=torch.long).cuda() * len(R2_list)
for i in range(len(R2_list) - 1, -1, -1):
region = R2_list[i]
c_region[(sum_of_squares) <= region] = i
results = angle_id + (c_region - 1) * total_angles
results[results < 0] = 0
results.view(im_size[0], im_size[1])
logmap = results.view(im_size[0], im_size[1])
logmap_onehot = torch.nn.functional.one_hot(logmap.long(), num_classes=17).float()
logmap_onehot = logmap_onehot[:, :, :self.region_num]
return logmap_onehot
def forward(self, voting_map, targets=None):
if self.model_v1:
batch_size, channels, width, height = voting_map.shape
voting_map = voting_map.view(batch_size, self.region_num, self.num_classes, width, height)
voting_map = voting_map.permute(0, 2, 1, 3, 4)
voting_map = voting_map.reshape(batch_size, -1, width, height)
heatmap = self.deconv_filters(voting_map)
return heatmap
| 4,491 | 34.370079 | 113 | py |
houghnet | houghnet-master/src/lib/models/networks/DCNv2/test.py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import gradcheck
from dcn_v2 import dcn_v2_conv, DCNv2, DCN
from dcn_v2 import dcn_v2_pooling, DCNv2Pooling, DCNPooling
deformable_groups = 1
N, inC, inH, inW = 2, 2, 4, 4
outC = 2
kH, kW = 3, 3
def conv_identify(weight, bias):
weight.data.zero_()
bias.data.zero_()
o, i, h, w = weight.shape
y = h//2
x = w//2
for p in range(i):
for q in range(o):
if p == q:
weight.data[q, p, y, x] = 1.0
def check_zero_offset():
conv_offset = nn.Conv2d(inC, deformable_groups * 2 * kH * kW,
kernel_size=(kH, kW),
stride=(1, 1),
padding=(1, 1),
bias=True).cuda()
conv_mask = nn.Conv2d(inC, deformable_groups * 1 * kH * kW,
kernel_size=(kH, kW),
stride=(1, 1),
padding=(1, 1),
bias=True).cuda()
dcn_v2 = DCNv2(inC, outC, (kH, kW),
stride=1, padding=1, dilation=1,
deformable_groups=deformable_groups).cuda()
conv_offset.weight.data.zero_()
conv_offset.bias.data.zero_()
conv_mask.weight.data.zero_()
conv_mask.bias.data.zero_()
conv_identify(dcn_v2.weight, dcn_v2.bias)
input = torch.randn(N, inC, inH, inW).cuda()
offset = conv_offset(input)
mask = conv_mask(input)
mask = torch.sigmoid(mask)
output = dcn_v2(input, offset, mask)
output *= 2
d = (input - output).abs().max()
if d < 1e-10:
print('Zero offset passed')
else:
print('Zero offset failed')
print(input)
print(output)
def check_gradient_dconv():
input = torch.rand(N, inC, inH, inW).cuda() * 0.01
input.requires_grad = True
offset = torch.randn(N, deformable_groups * 2 * kW * kH, inH, inW).cuda() * 2
# offset.data.zero_()
# offset.data -= 0.5
offset.requires_grad = True
mask = torch.rand(N, deformable_groups * 1 * kW * kH, inH, inW).cuda()
# mask.data.zero_()
mask.requires_grad = True
mask = torch.sigmoid(mask)
weight = torch.randn(outC, inC, kH, kW).cuda()
weight.requires_grad = True
bias = torch.rand(outC).cuda()
bias.requires_grad = True
stride = 1
padding = 1
dilation = 1
print('check_gradient_dconv: ',
gradcheck(dcn_v2_conv, (input, offset, mask, weight, bias,
stride, padding, dilation, deformable_groups),
eps=1e-3, atol=1e-4, rtol=1e-2))
def check_pooling_zero_offset():
input = torch.randn(2, 16, 64, 64).cuda().zero_()
input[0, :, 16:26, 16:26] = 1.
input[1, :, 10:20, 20:30] = 2.
rois = torch.tensor([
[0, 65, 65, 103, 103],
[1, 81, 41, 119, 79],
]).cuda().float()
pooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=16,
no_trans=True,
group_size=1,
trans_std=0.0).cuda()
out = pooling(input, rois, input.new())
s = ', '.join(['%f' % out[i, :, :, :].mean().item()
for i in range(rois.shape[0])])
print(s)
dpooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=16,
no_trans=False,
group_size=1,
trans_std=0.0).cuda()
offset = torch.randn(20, 2, 7, 7).cuda().zero_()
dout = dpooling(input, rois, offset)
s = ', '.join(['%f' % dout[i, :, :, :].mean().item()
for i in range(rois.shape[0])])
print(s)
def check_gradient_dpooling():
input = torch.randn(2, 3, 5, 5).cuda() * 0.01
N = 4
batch_inds = torch.randint(2, (N, 1)).cuda().float()
x = torch.rand((N, 1)).cuda().float() * 15
y = torch.rand((N, 1)).cuda().float() * 15
w = torch.rand((N, 1)).cuda().float() * 10
h = torch.rand((N, 1)).cuda().float() * 10
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
offset = torch.randn(N, 2, 3, 3).cuda()
input.requires_grad = True
offset.requires_grad = True
spatial_scale = 1.0 / 4
pooled_size = 3
output_dim = 3
no_trans = 0
group_size = 1
trans_std = 0.0
sample_per_part = 4
part_size = pooled_size
print('check_gradient_dpooling:',
gradcheck(dcn_v2_pooling, (input, rois, offset,
spatial_scale,
pooled_size,
output_dim,
no_trans,
group_size,
part_size,
sample_per_part,
trans_std),
eps=1e-4))
def example_dconv():
input = torch.randn(2, 64, 128, 128).cuda()
# wrap all things (offset and mask) in DCN
dcn = DCN(64, 64, kernel_size=(3, 3), stride=1,
padding=1, deformable_groups=2).cuda()
# print(dcn.weight.shape, input.shape)
output = dcn(input)
targert = output.new(*output.size())
targert.data.uniform_(-0.01, 0.01)
error = (targert - output).mean()
error.backward()
print(output.shape)
def example_dpooling():
input = torch.randn(2, 32, 64, 64).cuda()
batch_inds = torch.randint(2, (20, 1)).cuda().float()
x = torch.randint(256, (20, 1)).cuda().float()
y = torch.randint(256, (20, 1)).cuda().float()
w = torch.randint(64, (20, 1)).cuda().float()
h = torch.randint(64, (20, 1)).cuda().float()
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
offset = torch.randn(20, 2, 7, 7).cuda()
input.requires_grad = True
offset.requires_grad = True
# normal roi_align
pooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=True,
group_size=1,
trans_std=0.1).cuda()
# deformable pooling
dpooling = DCNv2Pooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=False,
group_size=1,
trans_std=0.1).cuda()
out = pooling(input, rois, offset)
dout = dpooling(input, rois, offset)
print(out.shape)
print(dout.shape)
target_out = out.new(*out.size())
target_out.data.uniform_(-0.01, 0.01)
target_dout = dout.new(*dout.size())
target_dout.data.uniform_(-0.01, 0.01)
e = (target_out - out).mean()
e.backward()
e = (target_dout - dout).mean()
e.backward()
def example_mdpooling():
input = torch.randn(2, 32, 64, 64).cuda()
input.requires_grad = True
batch_inds = torch.randint(2, (20, 1)).cuda().float()
x = torch.randint(256, (20, 1)).cuda().float()
y = torch.randint(256, (20, 1)).cuda().float()
w = torch.randint(64, (20, 1)).cuda().float()
h = torch.randint(64, (20, 1)).cuda().float()
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
# mdformable pooling (V2)
dpooling = DCNPooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=False,
group_size=1,
trans_std=0.1,
deform_fc_dim=1024).cuda()
dout = dpooling(input, rois)
target = dout.new(*dout.size())
target.data.uniform_(-0.1, 0.1)
error = (target - dout).mean()
error.backward()
print(dout.shape)
if __name__ == '__main__':
example_dconv()
example_dpooling()
example_mdpooling()
check_pooling_zero_offset()
# zero offset check
if inC == outC:
check_zero_offset()
check_gradient_dpooling()
check_gradient_dconv()
# """
# ****** Note: backward is not reentrant error may not be a serious problem,
# ****** since the max error is less than 1e-7,
# ****** Still looking for what trigger this problem
# """
| 8,506 | 30.391144 | 81 | py |
houghnet | houghnet-master/src/lib/models/networks/DCNv2/setup.py | #!/usr/bin/env python
import os
import glob
import torch
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
from setuptools import find_packages
from setuptools import setup
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "src")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
else:
raise NotImplementedError('Cuda is not availabel')
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"_ext",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="DCNv2",
version="0.1",
author="charlesshang",
url="https://github.com/charlesshang/DCNv2",
description="deformable convolutional networks",
packages=find_packages(exclude=("configs", "tests",)),
# install_requires=requirements,
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
) | 1,977 | 28.969697 | 73 | py |
houghnet | houghnet-master/src/lib/models/networks/DCNv2/dcn_v2.py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import math
import torch
from torch import nn
from torch.autograd import Function
from torch.nn.modules.utils import _pair
from torch.autograd.function import once_differentiable
import _ext as _backend
class _DCNv2(Function):
@staticmethod
def forward(ctx, input, offset, mask, weight, bias,
stride, padding, dilation, deformable_groups):
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.kernel_size = _pair(weight.shape[2:4])
ctx.deformable_groups = deformable_groups
output = _backend.dcn_v2_forward(input, weight, bias,
offset, mask,
ctx.kernel_size[0], ctx.kernel_size[1],
ctx.stride[0], ctx.stride[1],
ctx.padding[0], ctx.padding[1],
ctx.dilation[0], ctx.dilation[1],
ctx.deformable_groups)
ctx.save_for_backward(input, offset, mask, weight, bias)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, offset, mask, weight, bias = ctx.saved_tensors
grad_input, grad_offset, grad_mask, grad_weight, grad_bias = \
_backend.dcn_v2_backward(input, weight,
bias,
offset, mask,
grad_output,
ctx.kernel_size[0], ctx.kernel_size[1],
ctx.stride[0], ctx.stride[1],
ctx.padding[0], ctx.padding[1],
ctx.dilation[0], ctx.dilation[1],
ctx.deformable_groups)
return grad_input, grad_offset, grad_mask, grad_weight, grad_bias,\
None, None, None, None,
dcn_v2_conv = _DCNv2.apply
class DCNv2(nn.Module):
def __init__(self, in_channels, out_channels,
kernel_size, stride, padding, dilation=1, deformable_groups=1):
super(DCNv2, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.deformable_groups = deformable_groups
self.weight = nn.Parameter(torch.Tensor(
out_channels, in_channels, *self.kernel_size))
self.bias = nn.Parameter(torch.Tensor(out_channels))
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.zero_()
def forward(self, input, offset, mask):
assert 2 * self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \
offset.shape[1]
assert self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \
mask.shape[1]
return dcn_v2_conv(input, offset, mask,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.deformable_groups)
class DCN(DCNv2):
def __init__(self, in_channels, out_channels,
kernel_size, stride, padding,
dilation=1, deformable_groups=1):
super(DCN, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, deformable_groups)
channels_ = self.deformable_groups * 3 * self.kernel_size[0] * self.kernel_size[1]
self.conv_offset_mask = nn.Conv2d(self.in_channels,
channels_,
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding,
bias=True)
self.init_offset()
def init_offset(self):
self.conv_offset_mask.weight.data.zero_()
self.conv_offset_mask.bias.data.zero_()
def forward(self, input):
out = self.conv_offset_mask(input)
o1, o2, mask = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
return dcn_v2_conv(input, offset, mask,
self.weight, self.bias,
self.stride,
self.padding,
self.dilation,
self.deformable_groups)
class _DCNv2Pooling(Function):
@staticmethod
def forward(ctx, input, rois, offset,
spatial_scale,
pooled_size,
output_dim,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0):
ctx.spatial_scale = spatial_scale
ctx.no_trans = int(no_trans)
ctx.output_dim = output_dim
ctx.group_size = group_size
ctx.pooled_size = pooled_size
ctx.part_size = pooled_size if part_size is None else part_size
ctx.sample_per_part = sample_per_part
ctx.trans_std = trans_std
output, output_count = \
_backend.dcn_v2_psroi_pooling_forward(input, rois, offset,
ctx.no_trans, ctx.spatial_scale,
ctx.output_dim, ctx.group_size,
ctx.pooled_size, ctx.part_size,
ctx.sample_per_part, ctx.trans_std)
ctx.save_for_backward(input, rois, offset, output_count)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, rois, offset, output_count = ctx.saved_tensors
grad_input, grad_offset = \
_backend.dcn_v2_psroi_pooling_backward(grad_output,
input,
rois,
offset,
output_count,
ctx.no_trans,
ctx.spatial_scale,
ctx.output_dim,
ctx.group_size,
ctx.pooled_size,
ctx.part_size,
ctx.sample_per_part,
ctx.trans_std)
return grad_input, None, grad_offset, \
None, None, None, None, None, None, None, None
dcn_v2_pooling = _DCNv2Pooling.apply
class DCNv2Pooling(nn.Module):
def __init__(self,
spatial_scale,
pooled_size,
output_dim,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0):
super(DCNv2Pooling, self).__init__()
self.spatial_scale = spatial_scale
self.pooled_size = pooled_size
self.output_dim = output_dim
self.no_trans = no_trans
self.group_size = group_size
self.part_size = pooled_size if part_size is None else part_size
self.sample_per_part = sample_per_part
self.trans_std = trans_std
def forward(self, input, rois, offset):
assert input.shape[1] == self.output_dim
if self.no_trans:
offset = input.new()
return dcn_v2_pooling(input, rois, offset,
self.spatial_scale,
self.pooled_size,
self.output_dim,
self.no_trans,
self.group_size,
self.part_size,
self.sample_per_part,
self.trans_std)
class DCNPooling(DCNv2Pooling):
def __init__(self,
spatial_scale,
pooled_size,
output_dim,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0,
deform_fc_dim=1024):
super(DCNPooling, self).__init__(spatial_scale,
pooled_size,
output_dim,
no_trans,
group_size,
part_size,
sample_per_part,
trans_std)
self.deform_fc_dim = deform_fc_dim
if not no_trans:
self.offset_mask_fc = nn.Sequential(
nn.Linear(self.pooled_size * self.pooled_size *
self.output_dim, self.deform_fc_dim),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_dim, self.deform_fc_dim),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_dim, self.pooled_size *
self.pooled_size * 3)
)
self.offset_mask_fc[4].weight.data.zero_()
self.offset_mask_fc[4].bias.data.zero_()
def forward(self, input, rois):
offset = input.new()
if not self.no_trans:
# do roi_align first
n = rois.shape[0]
roi = dcn_v2_pooling(input, rois, offset,
self.spatial_scale,
self.pooled_size,
self.output_dim,
True, # no trans
self.group_size,
self.part_size,
self.sample_per_part,
self.trans_std)
# build mask and offset
offset_mask = self.offset_mask_fc(roi.view(n, -1))
offset_mask = offset_mask.view(
n, 3, self.pooled_size, self.pooled_size)
o1, o2, mask = torch.chunk(offset_mask, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
# do pooling with offset and mask
return dcn_v2_pooling(input, rois, offset,
self.spatial_scale,
self.pooled_size,
self.output_dim,
self.no_trans,
self.group_size,
self.part_size,
self.sample_per_part,
self.trans_std) * mask
# only roi_align
return dcn_v2_pooling(input, rois, offset,
self.spatial_scale,
self.pooled_size,
self.output_dim,
self.no_trans,
self.group_size,
self.part_size,
self.sample_per_part,
self.trans_std)
| 12,081 | 38.743421 | 92 | py |
houghnet | houghnet-master/src/lib/trains/exdet.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import numpy as np
import cv2
import sys
import time
from src.lib.utils.debugger import Debugger
from src.lib.models.data_parallel import DataParallel
from src.lib.models.losses import FocalLoss, RegL1Loss
from src.lib.models.decode import agnex_ct_decode, exct_decode
from src.lib.models.utils import _sigmoid
from .base_trainer import BaseTrainer
class ExdetLoss(torch.nn.Module):
def __init__(self, opt):
super(ExdetLoss, self).__init__()
self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
self.crit_reg = RegL1Loss()
self.opt = opt
self.parts = ['t', 'l', 'b', 'r', 'c']
def forward(self, outputs, batch):
opt = self.opt
hm_loss, reg_loss = 0, 0
for s in range(opt.num_stacks):
output = outputs[s]
for p in self.parts:
tag = 'hm_{}'.format(p)
output[tag] = _sigmoid(output[tag])
hm_loss += self.crit(output[tag], batch[tag]) / opt.num_stacks
if p != 'c' and opt.reg_offset and opt.off_weight > 0:
reg_loss += self.crit_reg(output['reg_{}'.format(p)],
batch['reg_mask'],
batch['ind_{}'.format(p)],
batch['reg_{}'.format(p)]) / opt.num_stacks
loss = opt.hm_weight * hm_loss + opt.off_weight * reg_loss
loss_stats = {'loss': loss, 'off_loss': reg_loss, 'hm_loss': hm_loss}
return loss, loss_stats
class ExdetTrainer(BaseTrainer):
def __init__(self, opt, model, optimizer=None):
super(ExdetTrainer, self).__init__(opt, model, optimizer=optimizer)
self.decode = agnex_ct_decode if opt.agnostic_ex else exct_decode
def _get_losses(self, opt):
loss_states = ['loss', 'hm_loss', 'off_loss']
loss = ExdetLoss(opt)
return loss_states, loss
def debug(self, batch, output, iter_id):
opt = self.opt
detections = self.decode(output['hm_t'], output['hm_l'],
output['hm_b'], output['hm_r'],
output['hm_c']).detach().cpu().numpy()
detections[:, :, :4] *= opt.input_res / opt.output_res
for i in range(1):
debugger = Debugger(
dataset=opt.dataset, ipynb=(opt.debug==3), theme=opt.debugger_theme)
pred_hm = np.zeros((opt.input_res, opt.input_res, 3), dtype=np.uint8)
gt_hm = np.zeros((opt.input_res, opt.input_res, 3), dtype=np.uint8)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.opt.std + self.opt.mean) * 255.).astype(np.uint8)
for p in self.parts:
tag = 'hm_{}'.format(p)
pred = debugger.gen_colormap(output[tag][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch[tag][i].detach().cpu().numpy())
if p != 'c':
pred_hm = np.maximum(pred_hm, pred)
gt_hm = np.maximum(gt_hm, gt)
if p == 'c' or opt.debug > 2:
debugger.add_blend_img(img, pred, 'pred_{}'.format(p))
debugger.add_blend_img(img, gt, 'gt_{}'.format(p))
debugger.add_blend_img(img, pred_hm, 'pred')
debugger.add_blend_img(img, gt_hm, 'gt')
debugger.add_img(img, img_id='out')
for k in range(len(detections[i])):
if detections[i, k, 4] > 0.1:
debugger.add_coco_bbox(detections[i, k, :4], detections[i, k, -1],
detections[i, k, 4], img_id='out')
if opt.debug == 4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True) | 3,645 | 41.395349 | 79 | py |
houghnet | houghnet-master/src/lib/trains/ctdet.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import numpy as np
from src.lib.models.losses import FocalLoss
from src.lib.models.losses import RegL1Loss, RegLoss, NormRegL1Loss, RegWeightedL1Loss
from src.lib.models.decode import ctdet_decode
from src.lib.models.utils import _sigmoid
from src.lib.utils.debugger import Debugger
from src.lib.utils.post_process import ctdet_post_process
from src.lib.utils.oracle_utils import gen_oracle_map
from .base_trainer import BaseTrainer
class CtdetLoss(torch.nn.Module):
def __init__(self, opt):
super(CtdetLoss, self).__init__()
self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
RegLoss() if opt.reg_loss == 'sl1' else None
self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
NormRegL1Loss() if opt.norm_wh else \
RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
self.opt = opt
def forward(self, outputs, batch):
opt = self.opt
hm_loss, wh_loss, off_loss = 0, 0, 0
for s in range(opt.num_stacks):
output = outputs[s]
if not opt.mse_loss:
output['hm'] = _sigmoid(output['hm'])
if opt.eval_oracle_hm:
output['hm'] = batch['hm']
if opt.eval_oracle_wh:
output['wh'] = torch.from_numpy(gen_oracle_map(
batch['wh'].detach().cpu().numpy(),
batch['ind'].detach().cpu().numpy(),
output['wh'].shape[3], output['wh'].shape[2])).to(opt.device)
if opt.eval_oracle_offset:
output['reg'] = torch.from_numpy(gen_oracle_map(
batch['reg'].detach().cpu().numpy(),
batch['ind'].detach().cpu().numpy(),
output['reg'].shape[3], output['reg'].shape[2])).to(opt.device)
hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
if opt.wh_weight > 0:
if opt.dense_wh:
mask_weight = batch['dense_wh_mask'].sum() + 1e-4
wh_loss += (
self.crit_wh(output['wh'] * batch['dense_wh_mask'],
batch['dense_wh'] * batch['dense_wh_mask']) /
mask_weight) / opt.num_stacks
elif opt.cat_spec_wh:
wh_loss += self.crit_wh(
output['wh'], batch['cat_spec_mask'],
batch['ind'], batch['cat_spec_wh']) / opt.num_stacks
else:
wh_loss += self.crit_reg(
output['wh'], batch['reg_mask'],
batch['ind'], batch['wh']) / opt.num_stacks
if opt.reg_offset and opt.off_weight > 0:
off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
batch['ind'], batch['reg']) / opt.num_stacks
loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \
opt.off_weight * off_loss
loss_stats = {'loss': loss, 'hm_loss': hm_loss,
'wh_loss': wh_loss, 'off_loss': off_loss}
return loss, loss_stats
class CtdetTrainer(BaseTrainer):
def __init__(self, opt, model, optimizer=None):
super(CtdetTrainer, self).__init__(opt, model, optimizer=optimizer)
def _get_losses(self, opt):
loss_states = ['loss', 'hm_loss', 'wh_loss', 'off_loss']
loss = CtdetLoss(opt)
return loss_states, loss
def debug(self, batch, output, iter_id):
opt = self.opt
reg = output['reg'] if opt.reg_offset else None
dets = ctdet_decode(
output['hm'], output['wh'], reg=reg,
cat_spec_wh=opt.cat_spec_wh, K=opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets[:, :, :4] *= opt.down_ratio
dets_gt = batch['meta']['gt_det'].numpy().reshape(1, -1, dets.shape[2])
dets_gt[:, :, :4] *= opt.down_ratio
for i in range(1):
debugger = Debugger(
dataset=opt.dataset, ipynb=(opt.debug==3), theme=opt.debugger_theme)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * opt.std + opt.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_blend_img(img, gt, 'gt_hm')
debugger.add_img(img, img_id='out_pred')
for k in range(len(dets[i])):
if dets[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets[i, k, :4], dets[i, k, -1],
dets[i, k, 4], img_id='out_pred')
debugger.add_img(img, img_id='out_gt')
for k in range(len(dets_gt[i])):
if dets_gt[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets_gt[i, k, :4], dets_gt[i, k, -1],
dets_gt[i, k, 4], img_id='out_gt')
if opt.debug == 4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
def save_result(self, output, batch, results):
reg = output['reg'] if self.opt.reg_offset else None
dets = ctdet_decode(
output['hm'], output['wh'], reg=reg,
cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets_out = ctdet_post_process(
dets.copy(), batch['meta']['c'].cpu().numpy(),
batch['meta']['s'].cpu().numpy(),
output['hm'].shape[2], output['hm'].shape[3], output['hm'].shape[1])
results[batch['meta']['img_id'].cpu().numpy()[0]] = dets_out[0] | 5,574 | 41.234848 | 86 | py |
houghnet | houghnet-master/src/lib/trains/ddd.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import numpy as np
from src.lib.models.losses import FocalLoss, L1Loss, BinRotLoss
from src.lib.models.decode import ddd_decode
from src.lib.models.utils import _sigmoid
from src.lib.utils.debugger import Debugger
from src.lib.utils.post_process import ddd_post_process
from src.lib.utils.oracle_utils import gen_oracle_map
from .base_trainer import BaseTrainer
class DddLoss(torch.nn.Module):
def __init__(self, opt):
super(DddLoss, self).__init__()
self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
self.crit_reg = L1Loss()
self.crit_rot = BinRotLoss()
self.opt = opt
def forward(self, outputs, batch):
opt = self.opt
hm_loss, dep_loss, rot_loss, dim_loss = 0, 0, 0, 0
wh_loss, off_loss = 0, 0
for s in range(opt.num_stacks):
output = outputs[s]
output['hm'] = _sigmoid(output['hm'])
output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1.
if opt.eval_oracle_dep:
output['dep'] = torch.from_numpy(gen_oracle_map(
batch['dep'].detach().cpu().numpy(),
batch['ind'].detach().cpu().numpy(),
opt.output_w, opt.output_h)).to(opt.device)
hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
if opt.dep_weight > 0:
dep_loss += self.crit_reg(output['dep'], batch['reg_mask'],
batch['ind'], batch['dep']) / opt.num_stacks
if opt.dim_weight > 0:
dim_loss += self.crit_reg(output['dim'], batch['reg_mask'],
batch['ind'], batch['dim']) / opt.num_stacks
if opt.rot_weight > 0:
rot_loss += self.crit_rot(output['rot'], batch['rot_mask'],
batch['ind'], batch['rotbin'],
batch['rotres']) / opt.num_stacks
if opt.reg_bbox and opt.wh_weight > 0:
wh_loss += self.crit_reg(output['wh'], batch['rot_mask'],
batch['ind'], batch['wh']) / opt.num_stacks
if opt.reg_offset and opt.off_weight > 0:
off_loss += self.crit_reg(output['reg'], batch['rot_mask'],
batch['ind'], batch['reg']) / opt.num_stacks
loss = opt.hm_weight * hm_loss + opt.dep_weight * dep_loss + \
opt.dim_weight * dim_loss + opt.rot_weight * rot_loss + \
opt.wh_weight * wh_loss + opt.off_weight * off_loss
loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'dep_loss': dep_loss,
'dim_loss': dim_loss, 'rot_loss': rot_loss,
'wh_loss': wh_loss, 'off_loss': off_loss}
return loss, loss_stats
class DddTrainer(BaseTrainer):
def __init__(self, opt, model, optimizer=None):
super(DddTrainer, self).__init__(opt, model, optimizer=optimizer)
def _get_losses(self, opt):
loss_states = ['loss', 'hm_loss', 'dep_loss', 'dim_loss', 'rot_loss',
'wh_loss', 'off_loss']
loss = DddLoss(opt)
return loss_states, loss
def debug(self, batch, output, iter_id):
opt = self.opt
wh = output['wh'] if opt.reg_bbox else None
reg = output['reg'] if opt.reg_offset else None
dets = ddd_decode(output['hm'], output['rot'], output['dep'],
output['dim'], wh=wh, reg=reg, K=opt.K)
# x, y, score, r1-r8, depth, dim1-dim3, cls
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
calib = batch['meta']['calib'].detach().numpy()
# x, y, score, rot, depth, dim1, dim2, dim3
# if opt.dataset == 'gta':
# dets[:, 12:15] /= 3
dets_pred = ddd_post_process(
dets.copy(), batch['meta']['c'].detach().numpy(),
batch['meta']['s'].detach().numpy(), calib, opt)
dets_gt = ddd_post_process(
batch['meta']['gt_det'].detach().numpy().copy(),
batch['meta']['c'].detach().numpy(),
batch['meta']['s'].detach().numpy(), calib, opt)
#for i in range(input.size(0)):
for i in range(1):
debugger = Debugger(dataset=opt.dataset, ipynb=(opt.debug==3),
theme=opt.debugger_theme)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.opt.std + self.opt.mean) * 255.).astype(np.uint8)
pred = debugger.gen_colormap(
output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'hm_pred')
debugger.add_blend_img(img, gt, 'hm_gt')
# decode
debugger.add_ct_detection(
img, dets[i], show_box=opt.reg_bbox, center_thresh=opt.center_thresh,
img_id='det_pred')
debugger.add_ct_detection(
img, batch['meta']['gt_det'][i].cpu().numpy().copy(),
show_box=opt.reg_bbox, img_id='det_gt')
debugger.add_3d_detection(
batch['meta']['image_path'][i], dets_pred[i], calib[i],
center_thresh=opt.center_thresh, img_id='add_pred')
debugger.add_3d_detection(
batch['meta']['image_path'][i], dets_gt[i], calib[i],
center_thresh=opt.center_thresh, img_id='add_gt')
# debugger.add_bird_view(
# dets_pred[i], center_thresh=opt.center_thresh, img_id='bird_pred')
# debugger.add_bird_view(dets_gt[i], img_id='bird_gt')
debugger.add_bird_views(
dets_pred[i], dets_gt[i],
center_thresh=opt.center_thresh, img_id='bird_pred_gt')
# debugger.add_blend_img(img, pred, 'out', white=True)
debugger.compose_vis_add(
batch['meta']['image_path'][i], dets_pred[i], calib[i],
opt.center_thresh, pred, 'bird_pred_gt', img_id='out')
# debugger.add_img(img, img_id='out')
if opt.debug ==4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
def save_result(self, output, batch, results):
opt = self.opt
wh = output['wh'] if opt.reg_bbox else None
reg = output['reg'] if opt.reg_offset else None
dets = ddd_decode(output['hm'], output['rot'], output['dep'],
output['dim'], wh=wh, reg=reg, K=opt.K)
# x, y, score, r1-r8, depth, dim1-dim3, cls
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
calib = batch['meta']['calib'].detach().numpy()
# x, y, score, rot, depth, dim1, dim2, dim3
dets_pred = ddd_post_process(
dets.copy(), batch['meta']['c'].detach().numpy(),
batch['meta']['s'].detach().numpy(), calib, opt)
img_id = batch['meta']['img_id'].detach().numpy()[0]
results[img_id] = dets_pred[0]
for j in range(1, opt.num_classes + 1):
keep_inds = (results[img_id][j][:, -1] > opt.center_thresh)
results[img_id][j] = results[img_id][j][keep_inds] | 6,967 | 43.954839 | 80 | py |
houghnet | houghnet-master/src/lib/trains/multi_pose.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import numpy as np
from src.lib.models.losses import FocalLoss, RegL1Loss, RegLoss, RegWeightedL1Loss
from src.lib.models.decode import multi_pose_decode
from src.lib.models.utils import _sigmoid, flip_tensor, flip_lr_off, flip_lr
from src.lib.utils.debugger import Debugger
from src.lib.utils.post_process import multi_pose_post_process
from src.lib.utils.oracle_utils import gen_oracle_map
from .base_trainer import BaseTrainer
class MultiPoseLoss(torch.nn.Module):
def __init__(self, opt):
super(MultiPoseLoss, self).__init__()
self.crit = FocalLoss()
self.crit_hm_hp = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
self.crit_kp = RegWeightedL1Loss() if not opt.dense_hp else \
torch.nn.L1Loss(reduction='sum')
self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
RegLoss() if opt.reg_loss == 'sl1' else None
self.opt = opt
def forward(self, outputs, batch):
opt = self.opt
hm_loss, wh_loss, off_loss = 0, 0, 0
hp_loss, off_loss, hm_hp_loss, hp_offset_loss = 0, 0, 0, 0
for s in range(opt.num_stacks):
output = outputs[s]
output['hm'] = _sigmoid(output['hm'])
if opt.hm_hp and not opt.mse_loss:
output['hm_hp'] = _sigmoid(output['hm_hp'])
if opt.eval_oracle_hmhp:
output['hm_hp'] = batch['hm_hp']
if opt.eval_oracle_hm:
output['hm'] = batch['hm']
if opt.eval_oracle_kps:
if opt.dense_hp:
output['hps'] = batch['dense_hps']
else:
output['hps'] = torch.from_numpy(gen_oracle_map(
batch['hps'].detach().cpu().numpy(),
batch['ind'].detach().cpu().numpy(),
opt.output_res, opt.output_res)).to(opt.device)
if opt.eval_oracle_hp_offset:
output['hp_offset'] = torch.from_numpy(gen_oracle_map(
batch['hp_offset'].detach().cpu().numpy(),
batch['hp_ind'].detach().cpu().numpy(),
opt.output_res, opt.output_res)).to(opt.device)
hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
if opt.dense_hp:
mask_weight = batch['dense_hps_mask'].sum() + 1e-4
hp_loss += (self.crit_kp(output['hps'] * batch['dense_hps_mask'],
batch['dense_hps'] * batch['dense_hps_mask']) /
mask_weight) / opt.num_stacks
else:
hp_loss += self.crit_kp(output['hps'], batch['hps_mask'],
batch['ind'], batch['hps']) / opt.num_stacks
if opt.wh_weight > 0:
wh_loss += self.crit_reg(output['wh'], batch['reg_mask'],
batch['ind'], batch['wh']) / opt.num_stacks
if opt.reg_offset and opt.off_weight > 0:
off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
batch['ind'], batch['reg']) / opt.num_stacks
if opt.reg_hp_offset and opt.off_weight > 0:
hp_offset_loss += self.crit_reg(
output['hp_offset'], batch['hp_mask'],
batch['hp_ind'], batch['hp_offset']) / opt.num_stacks
if opt.hm_hp and opt.hm_hp_weight > 0:
hm_hp_loss += self.crit_hm_hp(
output['hm_hp'], batch['hm_hp']) / opt.num_stacks
loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \
opt.off_weight * off_loss + opt.hp_weight * hp_loss + \
opt.hm_hp_weight * hm_hp_loss + opt.off_weight * hp_offset_loss
loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'hp_loss': hp_loss,
'hm_hp_loss': hm_hp_loss, 'hp_offset_loss': hp_offset_loss,
'wh_loss': wh_loss, 'off_loss': off_loss}
return loss, loss_stats
class MultiPoseTrainer(BaseTrainer):
def __init__(self, opt, model, optimizer=None):
super(MultiPoseTrainer, self).__init__(opt, model, optimizer=optimizer)
def _get_losses(self, opt):
loss_states = ['loss', 'hm_loss', 'hp_loss', 'hm_hp_loss',
'hp_offset_loss', 'wh_loss', 'off_loss']
loss = MultiPoseLoss(opt)
return loss_states, loss
def debug(self, batch, output, iter_id):
opt = self.opt
reg = output['reg'] if opt.reg_offset else None
hm_hp = output['hm_hp'] if opt.hm_hp else None
hp_offset = output['hp_offset'] if opt.reg_hp_offset else None
dets = multi_pose_decode(
output['hm'], output['wh'], output['hps'],
reg=reg, hm_hp=hm_hp, hp_offset=hp_offset, K=opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets[:, :, :4] *= opt.input_res / opt.output_res
dets[:, :, 5:39] *= opt.input_res / opt.output_res
dets_gt = batch['meta']['gt_det'].numpy().reshape(1, -1, dets.shape[2])
dets_gt[:, :, :4] *= opt.input_res / opt.output_res
dets_gt[:, :, 5:39] *= opt.input_res / opt.output_res
for i in range(1):
debugger = Debugger(
dataset=opt.dataset, ipynb=(opt.debug==3), theme=opt.debugger_theme)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * opt.std + opt.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_blend_img(img, gt, 'gt_hm')
debugger.add_img(img, img_id='out_pred')
for k in range(len(dets[i])):
if dets[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets[i, k, :4], dets[i, k, -1],
dets[i, k, 4], img_id='out_pred')
debugger.add_coco_hp(dets[i, k, 5:39], img_id='out_pred')
debugger.add_img(img, img_id='out_gt')
for k in range(len(dets_gt[i])):
if dets_gt[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets_gt[i, k, :4], dets_gt[i, k, -1],
dets_gt[i, k, 4], img_id='out_gt')
debugger.add_coco_hp(dets_gt[i, k, 5:39], img_id='out_gt')
if opt.hm_hp:
pred = debugger.gen_colormap_hp(output['hm_hp'][i].detach().cpu().numpy())
gt = debugger.gen_colormap_hp(batch['hm_hp'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hmhp')
debugger.add_blend_img(img, gt, 'gt_hmhp')
if opt.debug == 4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
def save_result(self, output, batch, results):
reg = output['reg'] if self.opt.reg_offset else None
hm_hp = output['hm_hp'] if self.opt.hm_hp else None
hp_offset = output['hp_offset'] if self.opt.reg_hp_offset else None
dets = multi_pose_decode(
output['hm'], output['wh'], output['hps'],
reg=reg, hm_hp=hm_hp, hp_offset=hp_offset, K=self.opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets_out = multi_pose_post_process(
dets.copy(), batch['meta']['c'].cpu().numpy(),
batch['meta']['s'].cpu().numpy(),
output['hm'].shape[2], output['hm'].shape[3])
results[batch['meta']['img_id'].cpu().numpy()[0]] = dets_out[0] | 7,300 | 44.347826 | 82 | py |
houghnet | houghnet-master/src/lib/trains/base_trainer.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import torch
from progress.bar import Bar
from src.lib.models.data_parallel import DataParallel
from src.lib.utils.utils import AverageMeter
class ModleWithLoss(torch.nn.Module):
def __init__(self, model, loss):
super(ModleWithLoss, self).__init__()
self.model = model
self.loss = loss
def forward(self, batch):
outputs = self.model(batch['input'])
loss, loss_stats = self.loss(outputs, batch)
return outputs[-1], loss, loss_stats
class BaseTrainer(object):
def __init__(
self, opt, model, optimizer=None):
self.opt = opt
self.optimizer = optimizer
self.loss_stats, self.loss = self._get_losses(opt)
self.model_with_loss = ModleWithLoss(model, self.loss)
def set_device(self, gpus, chunk_sizes, device):
if len(gpus) > 1:
self.model_with_loss = DataParallel(
self.model_with_loss, device_ids=gpus,
chunk_sizes=chunk_sizes).to(device)
else:
self.model_with_loss = self.model_with_loss.to(device)
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device=device, non_blocking=True)
def run_epoch(self, phase, epoch, data_loader):
model_with_loss = self.model_with_loss
if phase == 'train':
model_with_loss.train()
else:
if len(self.opt.gpus) > 1:
model_with_loss = self.model_with_loss.module
model_with_loss.eval()
torch.cuda.empty_cache()
opt = self.opt
results = {}
data_time, batch_time = AverageMeter(), AverageMeter()
avg_loss_stats = {l: AverageMeter() for l in self.loss_stats}
num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
bar = Bar('{}/{}'.format(opt.task, opt.exp_id), max=num_iters)
end = time.time()
for iter_id, batch in enumerate(data_loader):
if iter_id >= num_iters:
break
data_time.update(time.time() - end)
for k in batch:
if k != 'meta':
batch[k] = batch[k].to(device=opt.device, non_blocking=True)
output, loss, loss_stats = model_with_loss(batch)
loss = loss.mean()
if phase == 'train':
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
Bar.suffix = '{phase}: [{0}][{1}/{2}]|Tot: {total:} |ETA: {eta:} '.format(
epoch, iter_id, num_iters, phase=phase,
total=bar.elapsed_td, eta=bar.eta_td)
for l in avg_loss_stats:
avg_loss_stats[l].update(
loss_stats[l].mean().item(), batch['input'].size(0))
Bar.suffix = Bar.suffix + '|{} {:.4f} '.format(l, avg_loss_stats[l].avg)
if not opt.hide_data_time:
Bar.suffix = Bar.suffix + '|Data {dt.val:.3f}s({dt.avg:.3f}s) ' \
'|Net {bt.avg:.3f}s'.format(dt=data_time, bt=batch_time)
if opt.print_iter > 0:
if iter_id % opt.print_iter == 0:
print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
else:
bar.next()
if opt.debug > 0:
self.debug(batch, output, iter_id)
if opt.test:
self.save_result(output, batch, results)
del output, loss, loss_stats
bar.finish()
ret = {k: v.avg for k, v in avg_loss_stats.items()}
ret['time'] = bar.elapsed_td.total_seconds() / 60.
return ret, results
def debug(self, batch, output, iter_id):
raise NotImplementedError
def save_result(self, output, batch, results):
raise NotImplementedError
def _get_losses(self, opt):
raise NotImplementedError
def val(self, epoch, data_loader):
return self.run_epoch('val', epoch, data_loader)
def train(self, epoch, data_loader):
return self.run_epoch('train', epoch, data_loader) | 3,929 | 32.02521 | 80 | py |
houghnet | houghnet-master/src/lib/trains/ctseg.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import numpy as np
from src.lib.models.losses import FocalLoss,SegLoss
from src.lib.models.losses import RegL1Loss, RegLoss, NormRegL1Loss, RegWeightedL1Loss
from src.lib.models.decode import ctdet_decode
from src.lib.models.utils import _sigmoid
from src.lib.utils.debugger import Debugger
from src.lib.utils.post_process import ctdet_post_process
from src.lib.utils.oracle_utils import gen_oracle_map
from .base_trainer import BaseTrainer
class CtsegLoss(torch.nn.Module):
def __init__(self, opt):
super(CtsegLoss, self).__init__()
self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
RegLoss() if opt.reg_loss == 'sl1' else None
self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
NormRegL1Loss() if opt.norm_wh else \
RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
self.crit_mask = SegLoss(opt.seg_feat_channel)
self.opt = opt
def forward(self, outputs, batch):
opt = self.opt
hm_loss, wh_loss, off_loss, mask_loss = 0, 0, 0,0
for s in range(opt.num_stacks):
output = outputs[s]
if not opt.mse_loss:
output['hm'] = _sigmoid(output['hm'])
if opt.eval_oracle_hm:
output['hm'] = batch['hm']
if opt.eval_oracle_wh:
output['wh'] = torch.from_numpy(gen_oracle_map(
batch['wh'].detach().cpu().numpy(),
batch['ind'].detach().cpu().numpy(),
output['wh'].shape[3], output['wh'].shape[2])).to(opt.device)
if opt.eval_oracle_offset:
output['reg'] = torch.from_numpy(gen_oracle_map(
batch['reg'].detach().cpu().numpy(),
batch['ind'].detach().cpu().numpy(),
output['reg'].shape[3], output['reg'].shape[2])).to(opt.device)
hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
if opt.wh_weight > 0:
if opt.dense_wh:
mask_weight = batch['dense_wh_mask'].sum() + 1e-4
wh_loss += (
self.crit_wh(output['wh'] * batch['dense_wh_mask'],
batch['dense_wh'] * batch['dense_wh_mask']) /
mask_weight) / opt.num_stacks
elif opt.cat_spec_wh:
wh_loss += self.crit_wh(
output['wh'], batch['cat_spec_mask'],
batch['ind'], batch['cat_spec_wh']) / opt.num_stacks
else:
wh_loss += self.crit_reg(
output['wh'], batch['reg_mask'],
batch['ind'], batch['wh']) / opt.num_stacks
if opt.reg_offset and opt.off_weight > 0:
off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
batch['ind'], batch['reg']) / opt.num_stacks
mask_loss+=self.crit_mask(output['saliency'], output['shape'], batch['gtboxes'],
batch['reg_mask'], batch['ind'], batch['instance_mask'],
output['hm'], batch['cat_spec_mask'])
loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \
opt.off_weight * off_loss + opt.seg_weight * mask_loss
loss_stats = {'loss': loss, 'hm_loss': hm_loss,
'wh_loss': wh_loss, 'off_loss': off_loss,"mask_loss":mask_loss}
return loss, loss_stats
class CtsegTrainer(BaseTrainer):
def __init__(self, opt, model, optimizer=None):
super(CtsegTrainer, self).__init__(opt, model, optimizer=optimizer)
def _get_losses(self, opt):
loss_states = ['loss', 'hm_loss', 'wh_loss', 'off_loss','mask_loss']
loss = CtsegLoss(opt)
return loss_states, loss
def debug(self, batch, output, iter_id):
opt = self.opt
reg = output['reg'] if opt.reg_offset else None
dets = ctdet_decode(
output['hm'], output['wh'], reg=reg,
cat_spec_wh=opt.cat_spec_wh, K=opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets[:, :, :4] *= opt.down_ratio
dets_gt = batch['meta']['gt_det'].numpy().reshape(1, -1, dets.shape[2])
dets_gt[:, :, :4] *= opt.down_ratio
for i in range(1):
debugger = Debugger(
dataset=opt.dataset, ipynb=(opt.debug == 3), theme=opt.debugger_theme)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * opt.std + opt.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_blend_img(img, gt, 'gt_hm')
debugger.add_img(img, img_id='out_pred')
for k in range(len(dets[i])):
if dets[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets[i, k, :4], dets[i, k, -1],
dets[i, k, 4], img_id='out_pred')
debugger.add_img(img, img_id='out_gt')
for k in range(len(dets_gt[i])):
if dets_gt[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets_gt[i, k, :4], dets_gt[i, k, -1],
dets_gt[i, k, 4], img_id='out_gt')
if opt.debug == 4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
def save_result(self, output, batch, results):
reg = output['reg'] if self.opt.reg_offset else None
dets = ctdet_decode(
output['hm'], output['wh'], reg=reg,
cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets_out = ctdet_post_process(
dets.copy(), batch['meta']['c'].cpu().numpy(),
batch['meta']['s'].cpu().numpy(),
output['hm'].shape[2], output['hm'].shape[3], output['hm'].shape[1])
results[batch['meta']['img_id'].cpu().numpy()[0]] = dets_out[0] | 6,685 | 47.100719 | 97 | py |
houghnet | houghnet-master/src/lib/datasets/sample/exdet.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import pycocotools.coco as coco
import numpy as np
import torch
import json
import cv2
import os
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
import pycocotools.coco as coco
import math
class EXDetDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def __getitem__(self, index):
img_id = self.images[index]
img_info = self.coco.loadImgs(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info['file_name'])
img = cv2.imread(img_path)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.])
s = max(img.shape[0], img.shape[1]) * 1.0
flipped = False
if self.split == 'train':
if not self.opt.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, img.shape[1])
h_border = self._get_border(128, img.shape[0])
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
c[0] += img.shape[1] * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += img.shape[0] * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
trans_input = get_affine_transform(
c, s, 0, [self.opt.input_res, self.opt.input_res])
inp = cv2.warpAffine(img, trans_input,
(self.opt.input_res, self.opt.input_res),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train' and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_res = self.opt.output_res
num_classes = self.opt.num_classes
trans_output = get_affine_transform(c, s, 0, [output_res, output_res])
num_hm = 1 if self.opt.agnostic_ex else num_classes
hm_t = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_l = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_b = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_r = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_c = np.zeros((num_classes, output_res, output_res), dtype=np.float32)
reg_t = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_l = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_b = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_r = np.zeros((self.max_objs, 2), dtype=np.float32)
ind_t = np.zeros((self.max_objs), dtype=np.int64)
ind_l = np.zeros((self.max_objs), dtype=np.int64)
ind_b = np.zeros((self.max_objs), dtype=np.int64)
ind_r = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
for k in range(num_objs):
ann = anns[k]
# bbox = self._coco_box_to_bbox(ann['bbox'])
# tlbr
pts = np.array(ann['extreme_points'], dtype=np.float32).reshape(4, 2)
# cls_id = int(self.cat_ids[ann['category_id']] - 1) # bug
cls_id = int(self.cat_ids[ann['category_id']])
hm_id = 0 if self.opt.agnostic_ex else cls_id
if flipped:
pts[:, 0] = width - pts[:, 0] - 1
pts[1], pts[3] = pts[3].copy(), pts[1].copy()
for j in range(4):
pts[j] = affine_transform(pts[j], trans_output)
pts = np.clip(pts, 0, self.opt.output_res - 1)
h, w = pts[2, 1] - pts[0, 1], pts[3, 0] - pts[1, 0]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
pt_int = pts.astype(np.int32)
draw_gaussian(hm_t[hm_id], pt_int[0], radius)
draw_gaussian(hm_l[hm_id], pt_int[1], radius)
draw_gaussian(hm_b[hm_id], pt_int[2], radius)
draw_gaussian(hm_r[hm_id], pt_int[3], radius)
reg_t[k] = pts[0] - pt_int[0]
reg_l[k] = pts[1] - pt_int[1]
reg_b[k] = pts[2] - pt_int[2]
reg_r[k] = pts[3] - pt_int[3]
ind_t[k] = pt_int[0, 1] * output_res + pt_int[0, 0]
ind_l[k] = pt_int[1, 1] * output_res + pt_int[1, 0]
ind_b[k] = pt_int[2, 1] * output_res + pt_int[2, 0]
ind_r[k] = pt_int[3, 1] * output_res + pt_int[3, 0]
ct = [int((pts[3, 0] + pts[1, 0]) / 2), int((pts[0, 1] + pts[2, 1]) / 2)]
draw_gaussian(hm_c[cls_id], ct, radius)
reg_mask[k] = 1
ret = {'input': inp, 'hm_t': hm_t, 'hm_l': hm_l, 'hm_b': hm_b,
'hm_r': hm_r, 'hm_c': hm_c}
if self.opt.reg_offset:
ret.update({'reg_mask': reg_mask,
'reg_t': reg_t, 'reg_l': reg_l, 'reg_b': reg_b, 'reg_r': reg_r,
'ind_t': ind_t, 'ind_l': ind_l, 'ind_b': ind_b, 'ind_r': ind_r})
return ret | 5,722 | 40.773723 | 81 | py |
houghnet | houghnet-master/src/lib/datasets/sample/ctdet.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import numpy as np
import torch
import json
import cv2
import os
from src.lib.utils.image import flip, color_aug
from src.lib.utils.image import get_affine_transform, affine_transform
from src.lib.utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from src.lib.utils.image import draw_dense_reg
import math
class CTDetDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def __getitem__(self, index):
img_id = self.images[index]
file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
img_path = os.path.join(self.img_dir, file_name)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
img = cv2.imread(img_path)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
if self.opt.keep_res:
input_h = (height | self.opt.pad) + 1
input_w = (width | self.opt.pad) + 1
s = np.array([input_w, input_h], dtype=np.float32)
else:
s = max(img.shape[0], img.shape[1]) * 1.0
input_h, input_w = self.opt.input_h, self.opt.input_w
flipped = False
if self.split == 'train':
if not self.opt.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, img.shape[1])
h_border = self._get_border(128, img.shape[0])
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
c[0] = width - c[0] - 1
trans_input = get_affine_transform(
c, s, 0, [input_w, input_h])
inp = cv2.warpAffine(img, trans_input,
(input_w, input_h),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train' and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_h = input_h // self.opt.down_ratio
output_w = input_w // self.opt.down_ratio
num_classes = self.num_classes
trans_output = get_affine_transform(c, s, 0, [output_w, output_h])
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
dense_wh = np.zeros((2, output_h, output_w), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
cat_spec_wh = np.zeros((self.max_objs, num_classes * 2), dtype=np.float32)
cat_spec_mask = np.zeros((self.max_objs, num_classes * 2), dtype=np.uint8)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
bbox = self._coco_box_to_bbox(ann['bbox'])
cls_id = int(self.cat_ids[ann['category_id']])
if flipped:
bbox[[0, 2]] = width - bbox[[2, 0]] - 1
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
radius = self.opt.hm_gauss if self.opt.mse_loss else radius
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(hm[cls_id], ct_int, radius)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
cat_spec_wh[k, cls_id * 2: cls_id * 2 + 2] = wh[k]
cat_spec_mask[k, cls_id * 2: cls_id * 2 + 2] = 1
if self.opt.dense_wh:
draw_dense_reg(dense_wh, hm.max(axis=0), ct_int, wh[k], radius)
gt_det.append([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2, 1, cls_id])
ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh}
if self.opt.dense_wh:
hm_a = hm.max(axis=0, keepdims=True)
dense_wh_mask = np.concatenate([hm_a, hm_a], axis=0)
ret.update({'dense_wh': dense_wh, 'dense_wh_mask': dense_wh_mask})
del ret['wh']
elif self.opt.cat_spec_wh:
ret.update({'cat_spec_wh': cat_spec_wh, 'cat_spec_mask': cat_spec_mask})
del ret['wh']
if self.opt.reg_offset:
ret.update({'reg': reg})
if self.opt.debug > 0 or not self.split == 'train':
gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \
np.zeros((1, 6), dtype=np.float32)
meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_id}
ret['meta'] = meta
return ret | 5,835 | 39.248276 | 88 | py |
houghnet | houghnet-master/src/lib/datasets/sample/ddd.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import pycocotools.coco as coco
import numpy as np
import torch
import json
import cv2
import os
import math
from src.lib.utils.image import flip, color_aug
from src.lib.utils.image import get_affine_transform, affine_transform
from src.lib.utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
import pycocotools.coco as coco
class DddDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _convert_alpha(self, alpha):
return math.radians(alpha + 45) if self.alpha_in_degree else alpha
def __getitem__(self, index):
img_id = self.images[index]
img_info = self.coco.loadImgs(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info['file_name'])
img = cv2.imread(img_path)
if 'calib' in img_info:
calib = np.array(img_info['calib'], dtype=np.float32)
else:
calib = self.calib
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.])
if self.opt.keep_res:
s = np.array([self.opt.input_w, self.opt.input_h], dtype=np.int32)
else:
s = np.array([width, height], dtype=np.int32)
aug = False
if self.split == 'train' and np.random.random() < self.opt.aug_ddd:
aug = True
sf = self.opt.scale
cf = self.opt.shift
s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
c[0] += img.shape[1] * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += img.shape[0] * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
trans_input = get_affine_transform(
c, s, 0, [self.opt.input_w, self.opt.input_h])
inp = cv2.warpAffine(img, trans_input,
(self.opt.input_w, self.opt.input_h),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
# if self.split == 'train' and not self.opt.no_color_aug:
# color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
num_classes = self.opt.num_classes
trans_output = get_affine_transform(
c, s, 0, [self.opt.output_w, self.opt.output_h])
hm = np.zeros(
(num_classes, self.opt.output_h, self.opt.output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
dep = np.zeros((self.max_objs, 1), dtype=np.float32)
rotbin = np.zeros((self.max_objs, 2), dtype=np.int64)
rotres = np.zeros((self.max_objs, 2), dtype=np.float32)
dim = np.zeros((self.max_objs, 3), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
rot_mask = np.zeros((self.max_objs), dtype=np.uint8)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
bbox = self._coco_box_to_bbox(ann['bbox'])
cls_id = int(self.cat_ids[ann['category_id']])
if cls_id <= -99:
continue
# if flipped:
# bbox[[0, 2]] = width - bbox[[2, 0]] - 1
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, self.opt.output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, self.opt.output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius = gaussian_radius((h, w))
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
if cls_id < 0:
ignore_id = [_ for _ in range(num_classes)] \
if cls_id == - 1 else [- cls_id - 2]
if self.opt.rect_mask:
hm[ignore_id, int(bbox[1]): int(bbox[3]) + 1,
int(bbox[0]): int(bbox[2]) + 1] = 0.9999
else:
for cc in ignore_id:
draw_gaussian(hm[cc], ct, radius)
hm[ignore_id, ct_int[1], ct_int[0]] = 0.9999
continue
draw_gaussian(hm[cls_id], ct, radius)
wh[k] = 1. * w, 1. * h
gt_det.append([ct[0], ct[1], 1] + \
self._alpha_to_8(self._convert_alpha(ann['alpha'])) + \
[ann['depth']] + (np.array(ann['dim']) / 1).tolist() + [cls_id])
if self.opt.reg_bbox:
gt_det[-1] = gt_det[-1][:-1] + [w, h] + [gt_det[-1][-1]]
# if (not self.opt.car_only) or cls_id == 1: # Only estimate ADD for cars !!!
if 1:
alpha = self._convert_alpha(ann['alpha'])
# print('img_id cls_id alpha rot_y', img_path, cls_id, alpha, ann['rotation_y'])
if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.:
rotbin[k, 0] = 1
rotres[k, 0] = alpha - (-0.5 * np.pi)
if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.:
rotbin[k, 1] = 1
rotres[k, 1] = alpha - (0.5 * np.pi)
dep[k] = ann['depth']
dim[k] = ann['dim']
# print(' cat dim', cls_id, dim[k])
ind[k] = ct_int[1] * self.opt.output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1 if not aug else 0
rot_mask[k] = 1
# print('gt_det', gt_det)
# print('')
ret = {'input': inp, 'hm': hm, 'dep': dep, 'dim': dim, 'ind': ind,
'rotbin': rotbin, 'rotres': rotres, 'reg_mask': reg_mask,
'rot_mask': rot_mask}
if self.opt.reg_bbox:
ret.update({'wh': wh})
if self.opt.reg_offset:
ret.update({'reg': reg})
if self.opt.debug > 0 or not ('train' in self.split):
gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \
np.zeros((1, 18), dtype=np.float32)
meta = {'c': c, 's': s, 'gt_det': gt_det, 'calib': calib,
'image_path': img_path, 'img_id': img_id}
ret['meta'] = meta
return ret
def _alpha_to_8(self, alpha):
# return [alpha, 0, 0, 0, 0, 0, 0, 0]
ret = [0, 0, 0, 1, 0, 0, 0, 1]
if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.:
r = alpha - (-0.5 * np.pi)
ret[1] = 1
ret[2], ret[3] = np.sin(r), np.cos(r)
if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.:
r = alpha - (0.5 * np.pi)
ret[5] = 1
ret[6], ret[7] = np.sin(r), np.cos(r)
return ret
| 6,825 | 38.918129 | 90 | py |
houghnet | houghnet-master/src/lib/datasets/sample/multi_pose.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import numpy as np
import torch
import json
import cv2
import os
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from utils.image import draw_dense_reg
import math
class MultiPoseDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def __getitem__(self, index):
img_id = self.images[index]
file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
img_path = os.path.join(self.img_dir, file_name)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
img = cv2.imread(img_path)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
s = max(img.shape[0], img.shape[1]) * 1.0
rot = 0
flipped = False
if self.split == 'train':
if not self.opt.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, img.shape[1])
h_border = self._get_border(128, img.shape[0])
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.opt.aug_rot:
rf = self.opt.rotate
rot = np.clip(np.random.randn()*rf, -rf*2, rf*2)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
c[0] = width - c[0] - 1
trans_input = get_affine_transform(
c, s, rot, [self.opt.input_res, self.opt.input_res])
inp = cv2.warpAffine(img, trans_input,
(self.opt.input_res, self.opt.input_res),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train' and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_res = self.opt.output_res
num_joints = self.num_joints
trans_output_rot = get_affine_transform(c, s, rot, [output_res, output_res])
trans_output = get_affine_transform(c, s, 0, [output_res, output_res])
hm = np.zeros((self.num_classes, output_res, output_res), dtype=np.float32)
hm_hp = np.zeros((num_joints, output_res, output_res), dtype=np.float32)
dense_kps = np.zeros((num_joints, 2, output_res, output_res),
dtype=np.float32)
dense_kps_mask = np.zeros((num_joints, output_res, output_res),
dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
kps = np.zeros((self.max_objs, num_joints * 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
kps_mask = np.zeros((self.max_objs, self.num_joints * 2), dtype=np.uint8)
hp_offset = np.zeros((self.max_objs * num_joints, 2), dtype=np.float32)
hp_ind = np.zeros((self.max_objs * num_joints), dtype=np.int64)
hp_mask = np.zeros((self.max_objs * num_joints), dtype=np.int64)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
bbox = self._coco_box_to_bbox(ann['bbox'])
cls_id = int(ann['category_id']) - 1
pts = np.array(ann['keypoints'], np.float32).reshape(num_joints, 3)
if flipped:
bbox[[0, 2]] = width - bbox[[2, 0]] - 1
pts[:, 0] = width - pts[:, 0] - 1
for e in self.flip_idx:
pts[e[0]], pts[e[1]] = pts[e[1]].copy(), pts[e[0]].copy()
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox = np.clip(bbox, 0, output_res - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if (h > 0 and w > 0) or (rot != 0):
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = self.opt.hm_gauss if self.opt.mse_loss else max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_res + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
num_kpts = pts[:, 2].sum()
if num_kpts == 0:
hm[cls_id, ct_int[1], ct_int[0]] = 0.9999
reg_mask[k] = 0
hp_radius = gaussian_radius((math.ceil(h), math.ceil(w)))
hp_radius = self.opt.hm_gauss \
if self.opt.mse_loss else max(0, int(hp_radius))
for j in range(num_joints):
if pts[j, 2] > 0:
pts[j, :2] = affine_transform(pts[j, :2], trans_output_rot)
if pts[j, 0] >= 0 and pts[j, 0] < output_res and \
pts[j, 1] >= 0 and pts[j, 1] < output_res:
kps[k, j * 2: j * 2 + 2] = pts[j, :2] - ct_int
kps_mask[k, j * 2: j * 2 + 2] = 1
pt_int = pts[j, :2].astype(np.int32)
hp_offset[k * num_joints + j] = pts[j, :2] - pt_int
hp_ind[k * num_joints + j] = pt_int[1] * output_res + pt_int[0]
hp_mask[k * num_joints + j] = 1
if self.opt.dense_hp:
# must be before draw center hm gaussian
draw_dense_reg(dense_kps[j], hm[cls_id], ct_int,
pts[j, :2] - ct_int, radius, is_offset=True)
draw_gaussian(dense_kps_mask[j], ct_int, radius)
draw_gaussian(hm_hp[j], pt_int, hp_radius)
draw_gaussian(hm[cls_id], ct_int, radius)
gt_det.append([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2, 1] +
pts[:, :2].reshape(num_joints * 2).tolist() + [cls_id])
if rot != 0:
hm = hm * 0 + 0.9999
reg_mask *= 0
kps_mask *= 0
ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh,
'hps': kps, 'hps_mask': kps_mask}
if self.opt.dense_hp:
dense_kps = dense_kps.reshape(num_joints * 2, output_res, output_res)
dense_kps_mask = dense_kps_mask.reshape(
num_joints, 1, output_res, output_res)
dense_kps_mask = np.concatenate([dense_kps_mask, dense_kps_mask], axis=1)
dense_kps_mask = dense_kps_mask.reshape(
num_joints * 2, output_res, output_res)
ret.update({'dense_hps': dense_kps, 'dense_hps_mask': dense_kps_mask})
del ret['hps'], ret['hps_mask']
if self.opt.reg_offset:
ret.update({'reg': reg})
if self.opt.hm_hp:
ret.update({'hm_hp': hm_hp})
if self.opt.reg_hp_offset:
ret.update({'hp_offset': hp_offset, 'hp_ind': hp_ind, 'hp_mask': hp_mask})
if self.opt.debug > 0 or not self.split == 'train':
gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \
np.zeros((1, 40), dtype=np.float32)
meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_id}
ret['meta'] = meta
return ret
| 7,913 | 42.01087 | 81 | py |
houghnet | houghnet-master/src/lib/datasets/sample/ctseg.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import numpy as np
import torch
import json
import cv2
import os
from src.lib.utils.image import flip, color_aug
from src.lib.utils.image import get_affine_transform, affine_transform
from src.lib.utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from src.lib.utils.image import draw_dense_reg
import math
class CTSegDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def __getitem__(self, index):
img_id = self.images[index]
file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
img_path = os.path.join(self.img_dir, file_name)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
img = cv2.imread(img_path)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
if self.opt.keep_res:
input_h = (height | self.opt.pad) + 1
input_w = (width | self.opt.pad) + 1
s = np.array([input_w, input_h], dtype=np.float32)
else:
s = max(img.shape[0], img.shape[1]) * 1.0
input_h, input_w = self.opt.input_h, self.opt.input_w
flipped = False
if self.split == 'train':
if not self.opt.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, img.shape[1])
h_border = self._get_border(128, img.shape[0])
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
c[0] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
c[1] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
c[0] = width - c[0] - 1
trans_input = get_affine_transform(
c, s, 0, [input_w, input_h])
inp = cv2.warpAffine(img, trans_input,
(input_w, input_h),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train' and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_h = input_h // self.opt.down_ratio
output_w = input_w // self.opt.down_ratio
num_classes = self.num_classes
trans_output = get_affine_transform(c, s, 0, [output_w, output_h])
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
gtboxes = np.zeros((self.max_objs, 4), dtype=np.float32)
dense_wh = np.zeros((2, output_h, output_w), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
cat_spec_wh = np.zeros((self.max_objs, num_classes * 2), dtype=np.float32)
cat_spec_mask = np.zeros((self.max_objs, num_classes), dtype=np.uint8)
instance_masks = np.zeros((self.max_objs, output_h,output_w),dtype=np.float32)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
instance_mask = self.coco.annToMask(ann)
bbox = self._coco_box_to_bbox(ann['bbox'])
cls_id = int(self.cat_ids[ann['category_id']])
if flipped:
bbox[[0, 2]] = width - bbox[[2, 0]] - 1
instance_mask = instance_mask[:, ::-1]
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
instance_mask= cv2.warpAffine(instance_mask, trans_output,
(output_w, output_h),
flags=cv2.INTER_LINEAR)
instance_mask = instance_mask.astype(np.float32)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
radius = self.opt.hm_gauss if self.opt.mse_loss else radius
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(hm[cls_id], ct_int, radius)
gtboxes[k] = bbox
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
cat_spec_wh[k, cls_id * 2: cls_id * 2 + 2] = wh[k]
cat_spec_mask[k, cls_id] = 1
instance_masks[k] = instance_mask
if self.opt.dense_wh:
draw_dense_reg(dense_wh, hm.max(axis=0), ct_int, wh[k], radius)
gt_det.append([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2, 1, cls_id])
ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh,
"instance_mask":instance_masks, 'gtboxes':gtboxes, 'cat_spec_mask': cat_spec_mask}
if self.opt.dense_wh:
hm_a = hm.max(axis=0, keepdims=True)
dense_wh_mask = np.concatenate([hm_a, hm_a], axis=0)
ret.update({'dense_wh': dense_wh, 'dense_wh_mask': dense_wh_mask})
del ret['wh']
elif self.opt.cat_spec_wh:
ret.update({'cat_spec_wh': cat_spec_wh, 'cat_spec_mask': cat_spec_mask})
del ret['wh']
if self.opt.reg_offset:
ret.update({'reg': reg})
if self.opt.debug > 0 or not self.split == 'train':
gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \
np.zeros((1, 6), dtype=np.float32)
meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_id}
ret['meta'] = meta
return ret | 7,112 | 43.735849 | 97 | py |
houghnet | houghnet-master/src/lib/datasets/dataset/kitti.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import pycocotools.coco as coco
import numpy as np
import torch
import json
import cv2
import os
import math
import torch.utils.data as data
class KITTI(data.Dataset):
num_classes = 3
default_resolution = [384, 1280]
mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(KITTI, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'kitti')
self.img_dir = os.path.join(self.data_dir, 'images', 'trainval')
if opt.trainval:
split = 'trainval' if split == 'train' else 'test'
self.img_dir = os.path.join(self.data_dir, 'images', split)
self.annot_path = os.path.join(
self.data_dir, 'annotations', 'kitti_{}.json').format(split)
else:
self.annot_path = os.path.join(self.data_dir,
'annotations', 'kitti_{}_{}.json').format(opt.kitti_split, split)
self.max_objs = 50
self.class_name = [
'__background__', 'Pedestrian', 'Car', 'Cyclist']
self.cat_ids = {1:0, 2:1, 3:2, 4:-3, 5:-3, 6:-2, 7:-99, 8:-99, 9:-1}
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self.split = split
self.opt = opt
self.alpha_in_degree = False
print('==> initializing kitti {}, {} data.'.format(opt.kitti_split, split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def __len__(self):
return self.num_samples
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
pass
def save_results(self, results, save_dir):
results_dir = os.path.join(save_dir, 'results')
if not os.path.exists(results_dir):
os.mkdir(results_dir)
for img_id in results.keys():
out_path = os.path.join(results_dir, '{:06d}.txt'.format(img_id))
f = open(out_path, 'w')
for cls_ind in results[img_id]:
for j in range(len(results[img_id][cls_ind])):
class_name = self.class_name[cls_ind]
f.write('{} 0.0 0'.format(class_name))
for i in range(len(results[img_id][cls_ind][j])):
f.write(' {:.2f}'.format(results[img_id][cls_ind][j][i]))
f.write('\n')
f.close()
def run_eval(self, results, save_dir):
self.save_results(results, save_dir)
os.system('./src//tools/kitti_eval/evaluate_object_3d_offline ' + \
'./data/kitti/training/label_2 ' + \
'{}/results/'.format(save_dir))
| 3,060 | 33.011111 | 79 | py |
houghnet | houghnet-master/src/lib/datasets/dataset/coco_hp.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import torch.utils.data as data
class COCOHP(data.Dataset):
num_classes = 1
num_joints = 17
default_resolution = [512, 512]
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
flip_idx = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16]]
def __init__(self, opt, split):
super(COCOHP, self).__init__()
self.edges = [[0, 1], [0, 2], [1, 3], [2, 4],
[4, 6], [3, 5], [5, 6],
[5, 7], [7, 9], [6, 8], [8, 10],
[6, 12], [5, 11], [11, 12],
[12, 14], [14, 16], [11, 13], [13, 15]]
self.acc_idxs = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
self.data_dir = os.path.join(opt.data_dir)
self.img_dir = os.path.join(self.data_dir, 'images', '{}2017'.format(split))
if split == 'test':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'image_info_test-dev2017.json').format(split)
else:
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'person_keypoints_{}2017.json').format(split)
self.max_objs = 32
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self.split = split
self.opt = opt
print('==> initializing coco 2017 {} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
image_ids = self.coco.getImgIds()
if split == 'train':
self.images = []
for img_id in image_ids:
idxs = self.coco.getAnnIds(imgIds=[img_id])
if len(idxs) > 0:
self.images.append(img_id)
else:
self.images = image_ids
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
# import pdb; pdb.set_trace()
detections = []
for image_id in all_bboxes:
for cls_ind in all_bboxes[image_id]:
category_id = 1
for dets in all_bboxes[image_id][cls_ind]:
bbox = dets[:4]
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = dets[4]
bbox_out = list(map(self._to_float, bbox))
keypoints = np.concatenate([
np.array(dets[5:39], dtype=np.float32).reshape(-1, 2),
np.ones((17, 1), dtype=np.float32)], axis=1).reshape(51).tolist()
keypoints = list(map(self._to_float, keypoints))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(score)),
"keypoints": keypoints
}
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(opt.save_dir, "results.json")
# detections = convert_eval_format(all_boxes)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, "keypoints")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(self.coco, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize() | 4,244 | 34.375 | 80 | py |
houghnet | houghnet-master/src/lib/datasets/dataset/pascal.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
import numpy as np
import torch
import json
import os
import torch.utils.data as data
class PascalVOC(data.Dataset):
num_classes = 20
default_resolution = [384, 384]
mean = np.array([0.485, 0.456, 0.406],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.229, 0.224, 0.225],
dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(PascalVOC, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'voc')
self.img_dir = os.path.join(self.data_dir, 'images')
_ann_name = {'train': 'trainval0712', 'val': 'test2007'}
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'pascal_{}.json').format(_ann_name[split])
self.max_objs = 50
self.class_name = ['__background__', "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog",
"horse", "motorbike", "person", "pottedplant", "sheep", "sofa",
"train", "tvmonitor"]
self._valid_ids = np.arange(1, 21, dtype=np.int32)
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self.split = split
self.opt = opt
print('==> initializing pascal {} data.'.format(_ann_name[split]))
self.coco = coco.COCO(self.annot_path)
self.images = sorted(self.coco.getImgIds())
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
detections = [[[] for __ in range(self.num_samples)] \
for _ in range(self.num_classes + 1)]
for i in range(self.num_samples):
img_id = self.images[i]
for j in range(1, self.num_classes + 1):
if isinstance(all_bboxes[img_id][j], np.ndarray):
detections[j][i] = all_bboxes[img_id][j].tolist()
else:
detections[j][i] = all_bboxes[img_id][j]
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(save_dir, "results.json")
# detections = self.convert_eval_format(results)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
os.system('python tools/reval.py ' + \
'{}/results.json'.format(save_dir))
| 3,032 | 35.542169 | 80 | py |
houghnet | houghnet-master/src/lib/datasets/dataset/coco_seg.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import torch.utils.data as data
class COCOSEG(data.Dataset):
num_classes = 80
default_resolution = [512, 512]
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(COCOSEG, self).__init__()
self.data_dir = os.path.join(opt.data_dir, opt.coco_dir)
# self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split))
self.img_dir = os.path.join(self.data_dir + '/images', '{}2017'.format(split))
if split == 'test':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'image_info_test-dev2017.json').format(split)
else:
if opt.task == 'exdet':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_extreme_{}2017.json').format(split)
else:
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_{}2017.json').format(split)
self.max_objs = 70
self.class_name = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush']
self._valid_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 84, 85, 86, 87, 88, 89, 90]
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \
for v in range(1, self.num_classes + 1)]
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
# self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
# self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
self.split = split
self.opt = opt
print('==> initializing coco 2017 {} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
# import pdb; pdb.set_trace()
detections = []
for image_id in all_bboxes:
for cls_ind in all_bboxes[image_id]:
category_id = self._valid_ids[cls_ind - 1]
if type(all_bboxes[image_id][cls_ind]) == dict:
for id in range(len(all_bboxes[image_id][cls_ind]['boxs'])):
bbox = all_bboxes[image_id][cls_ind]['boxs'][id]
mask = all_bboxes[image_id][cls_ind]['pred_mask'][id]
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(score)),
"segmentation": mask
}
detections.append(detection)
else:
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(score))
}
if len(bbox) > 5:
extreme_points = list(map(self._to_float, bbox[5:13]))
detection["extreme_points"] = extreme_points
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
detections = self.convert_eval_format(results)
coco_dets = self.coco.loadRes(detections)
coco_eval = COCOeval(self.coco, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(self.coco, coco_dets, "segm")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize() | 6,004 | 38.768212 | 82 | py |
houghnet | houghnet-master/src/lib/datasets/dataset/coco.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import torch.utils.data as data
class COCO(data.Dataset):
num_classes = 80
default_resolution = [512, 512]
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(COCO, self).__init__()
self.data_dir = os.path.join(opt.data_dir, opt.coco_dir)
self.img_dir = os.path.join(self.data_dir + '/images', '{}2017'.format(split))
if split == 'test':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'image_info_test-dev2017.json').format(split)
else:
if opt.task == 'exdet':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_extreme_{}2017.json').format(split)
else:
if opt.minicoco and split=="train":
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_mini{}2017.json').format(split)
else:
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_{}2017.json').format(split)
self.max_objs = 128
self.class_name = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush']
self._valid_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 84, 85, 86, 87, 88, 89, 90]
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \
for v in range(1, self.num_classes + 1)]
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
# self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
# self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
self.split = split
self.opt = opt
print('==> initializing coco 2017 {} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
# import pdb; pdb.set_trace()
detections = []
for image_id in all_bboxes:
for cls_ind in all_bboxes[image_id]:
category_id = self._valid_ids[cls_ind - 1]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(score))
}
if len(bbox) > 5:
extreme_points = list(map(self._to_float, bbox[5:13]))
detection["extreme_points"] = extreme_points
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(save_dir, "results.json")
# detections = self.convert_eval_format(results)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
| 5,426 | 39.2 | 82 | py |
houghnet | houghnet-master/src/lib/utils/image.py | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
import random
import torch
PI = np.pi
def flip(img):
return img[:, :, ::-1].copy()
def transform_preds(coords, center, scale, output_size):
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def crop(img, center, scale, output_size, rot=0):
trans = get_affine_transform(center, scale, rot, output_size)
dst_img = cv2.warpAffine(img,
trans,
(int(output_size[0]), int(output_size[1])),
flags=cv2.INTER_LINEAR)
return dst_img
def gaussian_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
value = np.array(value, dtype=np.float32).reshape(-1, 1, 1)
dim = value.shape[0]
reg = np.ones((dim, diameter*2+1, diameter*2+1), dtype=np.float32) * value
if is_offset and dim == 2:
delta = np.arange(diameter*2+1) - radius
reg[0] = reg[0] - delta.reshape(1, -1)
reg[1] = reg[1] - delta.reshape(-1, 1)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_regmap = regmap[:, y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom,
radius - left:radius + right]
masked_reg = reg[:, radius - top:radius + bottom,
radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
idx = (masked_gaussian >= masked_heatmap).reshape(
1, masked_gaussian.shape[0], masked_gaussian.shape[1])
masked_regmap = (1-idx) * masked_regmap + idx * masked_reg
regmap[:, y - top:y + bottom, x - left:x + right] = masked_regmap
return regmap
def draw_msra_gaussian(heatmap, center, sigma):
tmp_size = sigma * 3
mu_x = int(center[0] + 0.5)
mu_y = int(center[1] + 0.5)
w, h = heatmap.shape[0], heatmap.shape[1]
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0:
return heatmap
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
g_x = max(0, -ul[0]), min(br[0], h) - ul[0]
g_y = max(0, -ul[1]), min(br[1], w) - ul[1]
img_x = max(0, ul[0]), min(br[0], h)
img_y = max(0, ul[1]), min(br[1], w)
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum(
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]],
g[g_y[0]:g_y[1], g_x[0]:g_x[1]])
return heatmap
def grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def lighting_(data_rng, image, alphastd, eigval, eigvec):
alpha = data_rng.normal(scale=alphastd, size=(3, ))
image += np.dot(eigvec, eigval * alpha)
def blend_(alpha, image1, image2):
image1 *= alpha
image2 *= (1 - alpha)
image1 += image2
def saturation_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs[:, :, None])
def brightness_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
image *= alpha
def contrast_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs_mean)
def color_aug(data_rng, image, eig_val, eig_vec):
functions = [brightness_, contrast_, saturation_]
random.shuffle(functions)
gs = grayscale(image)
gs_mean = gs.mean()
for f in functions:
f(data_rng, image, gs, gs_mean, 0.4)
lighting_(data_rng, image, 0.1, eig_val, eig_vec)
| 7,720 | 31.305439 | 88 | py |
houghnet | houghnet-master/src/lib/utils/utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count > 0:
self.avg = self.sum / self.count | 542 | 22.608696 | 59 | py |
GaitForeMer | GaitForeMer-main/training/transformer_model_fn.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Implments the model function for the POTR model."""
import numpy as np
import os
import sys
import argparse
import json
import time
# from potr.data.Gait17JointsDataset import Gait17JointsDataset
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from sklearn.metrics import classification_report
from numpyencoder import NumpyEncoder
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import training.seq2seq_model_fn as seq2seq_model_fn
import models.PoseTransformer as PoseTransformer
import models.PoseEncoderDecoder as PoseEncoderDecoder
import data.NTURGDDataset as NTURGDDataset
import data.GaitJointsDataset as GaitJointsDataset
import utils.utils as utils
_DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
_WEIGHT_DECAY = 0.00001
_NSEEDS = 8
class POTRModelFn(seq2seq_model_fn.ModelFn):
def __init__(self,
params,
train_dataset_fn,
eval_dataset_fn,
pose_encoder_fn=None,
pose_decoder_fn=None):
super(POTRModelFn, self).__init__(
params, train_dataset_fn, eval_dataset_fn, pose_encoder_fn, pose_decoder_fn)
self._loss_fn = self.layerwise_loss_fn
self.task = params['task']
if self.task == 'downstream':
weights = torch.tensor([9., 28., 13., 4.])
weights = weights / weights.sum() # turn into percentage
weights = 1.0 / weights # inverse
weights = weights / weights.sum()
self._loss_weights = weights.to(_DEVICE)
self._weighted_ce_loss = nn.CrossEntropyLoss(weight=self._loss_weights)
print('Using a weighted CE loss for gait impairment score prediction.')
else:
print('Using a standard CE loss for activity prediction.')
def smooth_l1(self, decoder_pred, decoder_gt):
l1loss = nn.SmoothL1Loss(reduction='mean')
return l1loss(decoder_pred, decoder_gt)
def loss_l1(self, decoder_pred, decoder_gt):
return nn.L1Loss(reduction='mean')(decoder_pred, decoder_gt)
def loss_activity(self, logits, class_gt):
"""Computes entropy loss from logits between predictions and class."""
if self.task == 'downstream':
return self._weighted_ce_loss(logits, class_gt)
else:
return nn.functional.cross_entropy(logits, class_gt, reduction='mean')
def compute_class_loss(self, class_logits, class_gt):
"""Computes the class loss for each of the decoder layers predictions or memory."""
class_loss = 0.0
for l in range(len(class_logits)):
class_loss += self.loss_activity(class_logits[l], class_gt)
return class_loss/len(class_logits)
def select_loss_fn(self):
if self._params['loss_fn'] == 'mse':
return self.loss_mse
elif self._params['loss_fn'] == 'smoothl1':
return self.smooth_l1
elif self._params['loss_fn'] == 'l1':
return self.loss_l1
else:
raise ValueError('Unknown loss name {}.'.format(self._params['loss_fn']))
def layerwise_loss_fn(self, decoder_pred, decoder_gt, class_logits=None, class_gt=None):
"""Computes layerwise loss between predictions and ground truth."""
pose_loss = 0.0
loss_fn = self.select_loss_fn()
for l in range(len(decoder_pred)):
pose_loss += loss_fn(decoder_pred[l], decoder_gt)
pose_loss = pose_loss/len(decoder_pred)
if class_logits is not None:
return pose_loss, self.compute_class_loss(class_logits, class_gt)
return pose_loss, None
def init_model(self, pose_encoder_fn=None, pose_decoder_fn=None):
self._model = PoseTransformer.model_factory(
self._params,
pose_encoder_fn,
pose_decoder_fn
)
def select_optimizer(self):
optimizer = optim.AdamW(
self._model.parameters(), lr=self._params['learning_rate'],
betas=(0.9, 0.999),
weight_decay=_WEIGHT_DECAY
)
return optimizer
def dataset_factory(params, fold, model_prefix):
if params['dataset'] == 'ntu_rgbd':
return NTURGDDataset.dataset_factory(params)
elif params['dataset'] == 'pd_gait':
return GaitJointsDataset.dataset_factory(params, fold)
else:
raise ValueError('Unknown dataset {}'.format(params['dataset']))
def single_vote(pred):
"""
Get majority vote of predicted classes for the clips in one video.
:param preds: list of predicted class for each clip of one video
:return: majority vote of predicted class for one video
"""
p = np.array(pred)
counts = np.bincount(p)
max_count = 0
max_index = 0
for i in range(len(counts)):
if max_count < counts[i]:
max_index = i
max_count = counts[i]
return max_index
def save_json(filename, attributes, names):
"""
Save training parameters and evaluation results to json file.
:param filename: save filename
:param attributes: attributes to save
:param names: name of attributes to save in json file
"""
with open(filename, "w", encoding="utf8") as outfile:
d = {}
for i in range(len(attributes)):
name = names[i]
attribute = attributes[i]
d[name] = attribute
json.dump(d, outfile, indent=4, cls=NumpyEncoder)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_prefix', type=str, default='')
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--data_path', type=str)
parser.add_argument('--learning_rate', type=float, default=1e-5)
parser.add_argument('--max_epochs', type=int, default=500)
parser.add_argument('--steps_per_epoch', type=int, default=200)
parser.add_argument('--action', nargs='*', type=str, default=None)
parser.add_argument('--use_one_hot', action='store_true')
parser.add_argument('--init_fn', type=str, default='xavier_init')
parser.add_argument('--include_last_obs', action='store_true')
parser.add_argument('--task', type=str, default='downstream', choices=['pretext', 'downstream'])
parser.add_argument('--downstream_strategy', default='both_then_class', choices=['both', 'class', 'both_then_class'])
# pose transformers related parameters
parser.add_argument('--model_dim', type=int, default=256)
parser.add_argument('--num_encoder_layers', type=int, default=4)
parser.add_argument('--num_decoder_layers', type=int, default=4)
parser.add_argument('--num_heads', type=int, default=4)
parser.add_argument('--dim_ffn', type=int, default=2048)
parser.add_argument('--dropout', type=float, default=0.3)
parser.add_argument('--source_seq_len', type=int, default=50)
parser.add_argument('--target_seq_len', type=int, default=25)
parser.add_argument('--max_gradient_norm', type=float, default=0.1)
parser.add_argument('--lr_step_size',type=int, default=400)
parser.add_argument('--learning_rate_fn',type=str, default='step')
parser.add_argument('--warmup_epochs', type=int, default=100)
parser.add_argument('--pose_format', type=str, default='rotmat')
parser.add_argument('--remove_low_std', action='store_true')
parser.add_argument('--remove_global_trans', action='store_true')
parser.add_argument('--loss_fn', type=str, default='l1')
parser.add_argument('--pad_decoder_inputs', action='store_true')
parser.add_argument('--pad_decoder_inputs_mean', action='store_true')
parser.add_argument('--use_wao_amass_joints', action='store_true')
parser.add_argument('--non_autoregressive', action='store_true')
parser.add_argument('--pre_normalization', action='store_true')
parser.add_argument('--use_query_embedding', action='store_true')
parser.add_argument('--predict_activity', action='store_true')
parser.add_argument('--use_memory', action='store_true')
parser.add_argument('--query_selection',action='store_true')
parser.add_argument('--activity_weight', type=float, default=1.0)
parser.add_argument('--pose_embedding_type', type=str, default='gcn_enc')
parser.add_argument('--encoder_ckpt', type=str, default=None)
parser.add_argument('--dataset', type=str, default='h36m_v2')
parser.add_argument('--skip_rate', type=int, default=5)
parser.add_argument('--eval_num_seeds', type=int, default=_NSEEDS)
parser.add_argument('--copy_method', type=str, default=None)
parser.add_argument('--finetuning_ckpt', type=str, default=None)
parser.add_argument('--pos_enc_alpha', type=float, default=10)
parser.add_argument('--pos_enc_beta', type=float, default=500)
args = parser.parse_args()
params = vars(args)
if params['task'] == 'downstream':
num_folds = 54
else:
num_folds = 1
total_preds = []
total_gts = []
preds_votes = []
preds_probs = []
all_folds = range(1, 55)
for fold in all_folds:
print(f'Fold {fold} out of {num_folds}')
utils.create_dir_tree(params['model_prefix']) # moving this up because dataset mean and std stored under it
train_dataset_fn, eval_dataset_fn = dataset_factory(params, fold, params['model_prefix'])
params['input_dim'] = train_dataset_fn.dataset._data_dim
params['pose_dim'] = train_dataset_fn.dataset._pose_dim
pose_encoder_fn, pose_decoder_fn = \
PoseEncoderDecoder.select_pose_encoder_decoder_fn(params)
config_path = os.path.join(params['model_prefix'], 'config', 'config.json')
with open(config_path, 'w') as file_:
json.dump(params, file_, indent=4)
model_fn = POTRModelFn(
params, train_dataset_fn,
eval_dataset_fn,
pose_encoder_fn, pose_decoder_fn
)
if params['task'] == 'downstream':
predictions, gts, pred_probs = model_fn.train()
print('predicitons:', predictions)
# save predicted classes
preds_votes.append(predictions.tolist())
# save predicted probabilities
preds_probs.append(pred_probs.tolist())
# save final predictions and true labels
if np.shape(gts)[0] == 1: # only 1 clip
pred = int(predictions)
else:
pred = single_vote(predictions)
gt = gts[0]
total_preds.append(pred)
total_gts.append(int(gt))
del model_fn, pose_encoder_fn, pose_decoder_fn
attributes = [preds_votes, total_preds, preds_probs, total_gts]
names = ['predicted_classes', 'predicted_final_classes', 'prediction_list', 'true_labels']
jsonfilename = os.path.join(params['model_prefix'], 'results.json')
save_json(jsonfilename, attributes, names)
else:
model_fn.train()
if params['task'] == 'downstream':
print(classification_report(total_gts, total_preds))
| 11,607 | 37.059016 | 119 | py |
GaitForeMer | GaitForeMer-main/training/seq2seq_model_fn.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Implements a model function estimator for training, evaluation and predict.
Take and adapted from the code presented in [4]
[1] https://github.com/asheshjain399/RNNexp/issues/6#issuecomment-249404882
[2] https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/dataParser/Utils/motionGenerationError.m#L40-L54
[3] https://github.com/asheshjain399/RNNexp/issues/6#issuecomment-247769197
[4] https://arxiv.org/pdf/1705.02445.pdf
"""
import sys
import numpy as np
import json
import sys
import os
import argparse
import time
from abc import abstractmethod
import tqdm
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
# import wandb
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
import utils.WarmUpScheduler as warm_up_scheduler
import visualize.viz as viz
import models.seq2seq_model as seq2seq_model
_DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# min threshold for mean average precision in metters
# Set to 10 cm
_MAP_TRESH = 0.10
class ModelFn(object):
"""Implements the model functionalities: training, evaliation and prediction."""
def __init__(
self,
params,
train_dataset_fn=None,
eval_dataset_fn=None,
pose_encoder_fn=None,
pose_decoder_fn=None):
"""Initialization of model function."""
self._params = params
self._train_dataset_fn = train_dataset_fn
self._eval_dataset_fn = eval_dataset_fn
self._visualize = False
thisname = self.__class__.__name__
# self._norm_stats = train_dataset_fn.dataset._norm_stats
self._norm_stats = None
self._ms_range = [80, 160, 320, 400, 560, 1000]
self.init_model(pose_encoder_fn, pose_decoder_fn)
self._loss_fn = self.loss_mse
self._model.to(_DEVICE)
self._optimizer_fn = self.select_optimizer()
self.select_lr_fn()
self.finetune_init()
self._lr_db_curve = []
lr_type = 'stepwise' if self._params['learning_rate_fn'] == 'beatles' \
else 'epochwise'
self._params['lr_schedule_type'] = lr_type
self.evaluate_fn = self.evaluate_nturgbd
self._writer = SummaryWriter(
os.path.join(self._params['model_prefix'], 'tf_logs'))
self._time_range_eval = []
m_params = filter(lambda p: p.requires_grad, self._model.parameters())
nparams = sum([np.prod(p.size()) for p in m_params])
#print arguments
# print('[INFO] ({}) This module has {} parameters!'.format(thisname, nparams))
# print('[INFO] ({}) Intializing ModelFn with params'.format(thisname))
# for k,v in self._params.items():
# print('[INFO] ({}) {}: {}'.format(thisname, k, v))
def finetune_init(self):
if self._params['finetuning_ckpt'] is not None:
print('[INFO] (finetune_model) Finetuning from:',
self._params['finetuning_ckpt'])
# edits made here to exclude activity prediction head
model_state_dict = torch.load(self._params['finetuning_ckpt'], map_location=_DEVICE)
if 'gait' in self._params['dataset']: # exclude prediction head
del model_state_dict['_action_head.0.weight']
del model_state_dict['_action_head.0.bias']
self._model.load_state_dict(model_state_dict, strict=False)
else:
self._model.load_state_dict(model_state_dict)
def select_lr_fn(self):
"""Calls the selection of learning rate function."""
self._lr_scheduler = self.get_lr_fn()
lr_fn = self._params['learning_rate_fn']
if self._params['warmup_epochs'] > 0 and lr_fn != 'beatles':
self._lr_scheduler = warm_up_scheduler.GradualWarmupScheduler(
self._optimizer_fn, multiplier=1,
total_epoch=self._params['warmup_epochs'],
after_scheduler=self._lr_scheduler
)
def get_lr_fn(self):
"""Creates the function to be used to generate the learning rate."""
if self._params['learning_rate_fn'] == 'step':
return torch.optim.lr_scheduler.StepLR(
self._optimizer_fn, step_size=self._params['lr_step_size'], gamma=0.1
)
elif self._params['learning_rate_fn'] == 'exponential':
return torch.optim.lr_scheduler.ExponentialLR(
self._optimizer_fn, gamma=0.95
)
elif self._params['learning_rate_fn'] == 'linear':
# sets learning rate by multipliying initial learning rate times a function
lr0, T = self._params['learning_rate'], self._params['max_epochs']
lrT = lr0*0.5
m = (lrT - 1) / T
lambda_fn = lambda epoch: m*epoch + 1.0
return torch.optim.lr_scheduler.LambdaLR(
self._optimizer_fn, lr_lambda=lambda_fn
)
elif self._params['learning_rate_fn'] == 'beatles':
# D^(-0.5)*min(i^(-0.5), i*warmup_steps^(-1.5))
D = float(self._params['model_dim'])
warmup = self._params['warmup_epochs']
lambda_fn = lambda e: (D**(-0.5))*min((e+1.0)**(-0.5), (e+1.0)*warmup**(-1.5))
return torch.optim.lr_scheduler.LambdaLR(
self._optimizer_fn, lr_lambda=lambda_fn
)
else:
raise ValueError('Unknown learning rate function: {}'.format(
self._params['learning_rate_fn']))
@abstractmethod
def init_model(self, pose_encoder_fn, pose_decoder_fn):
pass
@abstractmethod
def select_optimizer(self):
pass
def loss_mse(self, decoder_pred, decoder_gt):
"""Computes the L2 loss between predictions and ground truth."""
step_loss = (decoder_pred - decoder_gt)**2
step_loss = step_loss.mean()
return step_loss
@abstractmethod
def compute_loss(self, inputs=None, target=None, preds=None, class_logits=None, class_gt=None):
return self._loss_fn(preds, target, class_logits, class_gt)
def print_logs(self, step_loss, current_step, pose_loss, activity_loss, selection_loss):
selection_logs = ''
if self._params['query_selection']:
selection_logs = 'selection loss {:.4f}'.format(selection_loss)
if self._params['predict_activity']:
print("[INFO] global {:06d}; step {:04d}; pose_loss {:4f} - class_loss {:4f}; step_loss: {:.4f}; lr: {:.2e} {:s}".\
format(self._global_step, current_step, pose_loss, activity_loss,
step_loss, self._params['learning_rate'], selection_logs)
)
else:
print("[INFO] global {3:06d}; step {0:04d}; step_loss: {1:.4f}; lr: {2:.2e} {4:s}".\
format(current_step, step_loss, self._params['learning_rate'],
self._global_step, selection_logs)
)
def compute_selection_loss(self, inputs, target, cols_softmax=False):
"""Compute the query entry selection loss.
Args:
inputs: [batch_size, src_len, tgt_len]
target: [batch_size, src_len, tgt_len]
"""
axis_ = 2 if cols_softmax else 1
target = F.softmax(-target, dim=axis_)
return torch.nn.MSELoss(reduction='mean')(inputs, target)
def train_one_epoch(self, epoch):
"""Trains for a number of steps before evaluation."""
epoch_loss = 0
act_loss = 0
sel_loss = 0
N = len(self._train_dataset_fn)
for current_step, sample in enumerate(self._train_dataset_fn):
self._optimizer_fn.zero_grad()
for k in sample.keys():
if k == 'actions' or k == 'decoder_outputs_euler' or k=='action_str':
continue
sample[k] = sample[k].to(_DEVICE)
decoder_pred = self._model(
sample['encoder_inputs'], sample['decoder_inputs'])
selection_loss = 0
if self._params['query_selection']:
prob_mat = decoder_pred[-1][-1]
selection_loss = self.compute_selection_loss(
inputs=prob_mat,
target=sample['src_tgt_distance']
)
sel_loss += selection_loss
pred_class, gt_class = None, None
if self._params['predict_activity']:
gt_class = sample['action_ids'] # one label for the sequence
pred_class = decoder_pred[1]
pose_loss, activity_loss = self.compute_loss(
inputs=sample['encoder_inputs'],
target=sample['decoder_outputs'],
preds=decoder_pred[0],
class_logits=pred_class,
class_gt=gt_class
)
step_loss = pose_loss + selection_loss
if self._params['predict_activity']:
if self._params['task'] == 'pretext':
step_loss += self._params['activity_weight']*activity_loss
else:
if self._params['downstream_strategy'] == 'both':
step_loss += self._params['activity_weight']*activity_loss
elif self._params['downstream_strategy'] == 'class':
step_loss = activity_loss
elif self._params['downstream_strategy'] == 'both_then_class':
if epoch >= 50:
step_loss = activity_loss
else:
step_loss += self._params['activity_weight']*activity_loss
act_loss += activity_loss
epoch_loss += step_loss.item()
step_loss.backward()
if self._params['max_gradient_norm'] is not None:
torch.nn.utils.clip_grad_norm_(
self._model.parameters(), self._params['max_gradient_norm'])
self._optimizer_fn.step()
if current_step % 10 == 0:
step_loss = step_loss.cpu().data.numpy()
# self.print_logs(step_loss, current_step, pose_loss, activity_loss,
# selection_loss)
self.update_learning_rate(self._global_step, mode='stepwise')
self._global_step += 1
if self._params['query_selection']:
self._scalars['train_selectioin_loss'] = sel_loss/N
if self._params['predict_activity']:
return epoch_loss/N, act_loss/N
return epoch_loss/N
def train(self):
"""Main training loop."""
self._params['learning_rate'] = self._lr_scheduler.get_lr()[0]
self._global_step = 1
thisname = self.__class__.__name__
# wandb.init(name='training', project='GaitForeMer')
for e in range(self._params['max_epochs']):
self._scalars = {}
self._model.train()
start_time = time.time()
epoch_loss = self.train_one_epoch(e)
act_log = ''
if self._params['predict_activity']:
act_loss = epoch_loss[1]
epoch_loss = epoch_loss[0]
act_log = '; activity_loss: {}'.format(act_loss)
self._scalars['act_loss_train'] = act_loss
self._scalars['epoch_loss'] = epoch_loss
print("epoch {0:04d}; epoch_loss: {1:.4f}".format(e, epoch_loss)+act_log)
self.flush_extras(e, 'train')
_time = time.time() - start_time
self._model.eval()
eval_loss = self.evaluate_fn(e, _time)
act_log = ''
if self._params['predict_activity']:
self._scalars['act_loss_eval'] = eval_loss[1]
self._scalars['accuracy'] = eval_loss[2]
act_log = '; act_eval_loss {}; accuracy {}'.format(eval_loss[1], eval_loss[2])
eval_activity_loss = eval_loss[1]
eval_accuracy = eval_loss[2]
# eval_loss = eval_loss[0]
self._scalars['eval_loss'] = eval_loss[0]
print("[INFO] ({}) Epoch {:04d}; eval_loss: {:.4f}; lr: {:.2e}".format(
thisname, e, eval_loss[0], self._params['learning_rate'])+act_log)
self.write_summary(e)
# wandb_logs = {"train loss": epoch_loss, "train activity loss": act_loss, "eval loss": eval_loss, "eval activity loss": eval_activity_loss, "eval accuracy": eval_accuracy}
# wandb.log(wandb_logs)
model_path = os.path.join(
self._params['model_prefix'], 'models', 'ckpt_epoch_%04d.pt'%e)
if (e+1)%100 == 0:
torch.save(self._model.state_dict(), model_path)
self.update_learning_rate(e, mode='epochwise')
self.flush_extras(e, 'eval')
# return predictions and real ones
predictions = eval_loss[3]
gt = eval_loss[4]
pred_probs = eval_loss[5]
return predictions, gt, pred_probs
# save the last one
# model_path = os.path.join(
# self._params['model_prefix'], 'models', 'ckpt_epoch_%04d.pt'%e)
# torch.save(self._model.state_dict(). model_path)
# self.flush_curves()
def write_summary(self, epoch):
# for action_, ms_errors_ in ms_eval_loss.items():
self._writer.add_scalars(
'loss/recon_loss',
{'train':self._scalars['epoch_loss'], 'eval': self._scalars['eval_loss']},
epoch
)
# write scalars for H36M dataset prediction style
action_ = self._train_dataset_fn.dataset._monitor_action
if 'ms_eval_loss' in self._scalars.keys():
range_len = len(self._scalars['ms_eval_loss'][action_])
# range_len = len(self._ms_range)
ms_dict = {str(self._ms_range[i]): self._scalars['ms_eval_loss'][action_][i]
for i in range(range_len)}
ms_e = np.concatenate([np.array(v).reshape(1,range_len)
for k,v in self._scalars['ms_eval_loss'].items()], axis=0)
self._writer.add_scalars('ms_loss/eval-'+action_, ms_dict, epoch)
ms_e = np.mean(ms_e, axis=0) # (n_actions)
self._time_range_eval.append(np.expand_dims(ms_e, axis=0)) # (1, n_actions)
all_ms = {str(self._ms_range[i]): ms_e[i] for i in range(len(ms_e))}
self._writer.add_scalars('ms_loss/eval-all', all_ms, epoch)
self._writer.add_scalar('MSRE/msre_eval', self._scalars['msre'], epoch)
self._writer.add_scalars('time_range/eval',
{'short-term':np.mean(ms_e[:4]), 'long-term':np.mean(ms_e)}, epoch)
if self._params['predict_activity']:
self._writer.add_scalars(
'loss/class_loss',
{'train': self._scalars['act_loss_train'], 'eval': self._scalars['act_loss_eval']},
epoch
)
self._writer.add_scalar('class/accuracy', self._scalars['accuracy'], epoch)
if self._params['query_selection']:
self._writer.add_scalars(
'selection/query_selection',
{'eval': self._scalars['eval_selection_loss'],
'train': self._scalars['train_selectioin_loss']},
epoch
)
if 'mAP' in self._scalars.keys():
self._writer.add_scalar('mAP/mAP', self._scalars['mAP'], epoch)
if 'MPJPE' in self._scalars.keys():
self._writer.add_scalar('MPJPE/MPJPE', self._scalars['MPJPE'], epoch)
def print_range_summary(self, action, mean_mean_errors):
mean_eval_error = []
# Pretty print of the results for 80, 160, 320, 400, 560 and 1000 ms
print("{0: <16} |".format(action), end="")
for ms in [1,3,7,9,13,24]:
if self._params['target_seq_len'] >= ms + 1:
print(" {0:.3f} |".format(mean_mean_errors[ms]), end="")
mean_eval_error.append(mean_mean_errors[ms])
else:
print(" n/a |", end="")
print()
return mean_eval_error
def print_table_header(self):
print()
print("{0: <16} |".format("milliseconds"), end="")
for ms in [80, 160, 320, 400, 560, 1000]:
print(" {0:5d} |".format(ms), end="")
print()
def flush_curves(self):
path_ = os.path.join(self._params['model_prefix'], 'loss_info')
os.makedirs(path_, exist_ok=True)
path_ = os.path.join(path_, 'eval_time_range.npy')
np.save(path_, np.concatenate(self._time_range_eval, axis=0))
path_ = os.path.join(path_, 'lr_schedule.npy')
np.save(path_, np.array(self._lr_db_curve))
def update_learning_rate(self, epoch_step, mode='stepwise'):
"""Update learning rate handler updating only when the mode matches."""
if self._params['lr_schedule_type'] == mode:
self._lr_scheduler.step(epoch_step)
self._writer.add_scalar(
'learning_rate/lr', self._params['learning_rate'], epoch_step)
self._lr_db_curve.append([self._params['learning_rate'], epoch_step])
# self._params['learning_rate'] = self._lr_scheduler.get_last_lr()[0]
self._params['learning_rate'] = self._lr_scheduler.get_lr()[0]
@abstractmethod
def flush_extras(self, epoch, phase):
pass
def compute_class_accurracy_sequence(self, class_logits, class_gt):
# softmax on last dimension and get max on last dimension
class_pred = torch.argmax(class_logits.softmax(-1), -1)
accuracy = (class_pred == class_gt).float().sum()
accuracy = accuracy / class_logits.size()[0]
return accuracy.item()
def compute_class_accurracy_instance(self, class_logits, class_gt):
# softmax on last dimension and get max on last dimension
tar_seq_len = self._params['target_seq_len']
class_pred = torch.argmax(class_logits.softmax(-1), -1)
accuracy = (class_pred == class_gt).float().sum()
accuracy = accuracy / (class_logits.size()[0]*tar_seq_len)
return accuracy.item()
def validation_srnn_ms(self, sample, decoder_pred):
# the data was flatened from a sequence of size
# [n_actions, n_seeds, target_length, pose_size]
n_actions = len(self._params['action_subset'])
seq_shape = (n_actions, self._params['eval_num_seeds'],
self._params['target_seq_len'], self._params['pose_dim'])
srnn_gts_euler = sample['decoder_outputs_euler']
decoder_pred_ = decoder_pred.cpu().numpy()
decoder_pred_ = decoder_pred_.reshape(seq_shape)
do_remove = self._params['remove_low_std']
mean_eval_error_dict = {}
self.print_table_header()
eval_ms_mean = []
for ai, action in enumerate(sample['actions']):
action = action[0]
decoder_pred = decoder_pred_[ai, :, :, :]
if self._params['dataset'] == 'h36m':
# seq_len x n_seeds x pose_dim
decoder_pred = decoder_pred.transpose([1, 0, 2])
# a list or a vector of length n_seeds
# each entry of: shape seq_len x complete_pose_dim (H36M == 99)
srnn_pred_euler = self._eval_dataset_fn.dataset.post_process_to_euler(decoder_pred)
# n_seeds x seq_len
mean_errors = np.zeros((self._params['eval_num_seeds'],
self._params['target_seq_len']))
# Training is done in exponential map or rotation matrix or quaternion
# but the error is reported in Euler angles, as in previous work [3,4,5]
for i in np.arange(self._params['eval_num_seeds']):
# seq_len x complete_pose_dim (H36M==99)
eulerchannels_pred = srnn_pred_euler[i]
# n_seeds x seq_len x complete_pose_dim (H36M==96)
action_gt = srnn_gts_euler[action]
# seq_len x complete_pose_dim (H36M==96)
gt_i = np.copy(action_gt.squeeze()[i].numpy())
# Only remove global rotation. Global translation was removed before
gt_i[:, 0:3] = 0
# here [2,4,5] remove data based on the std of the batch THIS IS WEIRD!
# (seq_len, 96) - (seq_len, 96)
idx_to_use = np.where(np.std(gt_i, 0) > 1e-4)[0]
euc_error = np.power(gt_i[:,idx_to_use] - eulerchannels_pred[:,idx_to_use], 2)
# shape: seq_len
euc_error = np.sum(euc_error, 1)
# shape: seq_len
euc_error = np.sqrt(euc_error)
mean_errors[i,:] = euc_error
# This is simply the mean error over the eval_num_seeds examples
# with shape [eval_num_seeds]
mean_mean_errors = np.mean(mean_errors, 0)
mean_eval_error_dict[action] = self.print_range_summary(action, mean_mean_errors)
return mean_eval_error_dict
@torch.no_grad()
def evaluate_nturgbd(self, current_step, dummy_entry=None):
eval_loss = 0.0
mAP_all = 0.0
class_loss = 0.0
mean_accuracy = 0.0
N = len(self._eval_dataset_fn)
gt_class_ = []
pred_class_ = []
num_joints = self._params['pose_dim'] // 3
TP = np.zeros((num_joints,))
FN = np.zeros((num_joints,))
MPJPE = np.zeros((num_joints,))
for (i, sample) in tqdm.tqdm(enumerate(self._eval_dataset_fn)):
for k in sample.keys():
if k=='action_str':
continue
sample[k] = sample[k].to(_DEVICE)
decoder_pred = self._model(
sample['encoder_inputs'], sample['decoder_inputs'])
pred_class, gt_class = None, None
if self._params['predict_activity']:
gt_class = sample['action_ids'] # one label for the sequence
pred_class = decoder_pred[1]
decoder_pred = decoder_pred[0]
gt_class_.append(gt_class.item())
pred_class_.append(pred_class[-1].cpu().numpy())
pose_loss, activity_loss = self.compute_loss(
inputs=sample['encoder_inputs'],
target=sample['decoder_outputs'],
preds=decoder_pred,
class_logits=pred_class,
class_gt=gt_class
)
# Can save predicted outputs for visualization here
# if i == 2:
# predicted_pose = decoder_pred[-1].squeeze().reshape(20, 17, 3).cpu().numpy()
# input_pose = sample['encoder_inputs'].squeeze().reshape(39, 17, 3).cpu().numpy()
# gt_pose = sample['decoder_outputs'].squeeze().reshape(20, 17, 3).cpu().numpy()
# np.save('output_poses/v37_pred.npy', predicted_pose)
# np.save('output_poses/v37_gt.npy', gt_pose)
# np.save('output_poses/v37_input.npy', input_pose)
# # break
eval_loss+= pose_loss
eval_loss /= N
if self._params['predict_activity']:
class_loss /= N
pred_class_ = torch.squeeze(torch.from_numpy(np.stack(pred_class_)))
gt_class_ = torch.from_numpy(np.array(gt_class_))
# print(pred_class_.size(), gt_class_.size())
accuracy = self.compute_class_accurracy_sequence(pred_class_, gt_class_)
return (eval_loss, class_loss, accuracy, torch.argmax(pred_class_.softmax(-1), -1), gt_class_, pred_class_.softmax(-1))
# return (eval_loss, class_loss, accuracy, torch.argmax(pred_class_.softmax(-1), -1), gt_class_)
# return (eval_loss, class_loss, accuracy)
return eval_loss
def compute_mean_average_precision(self, prediction, target):
pred = np.squeeze(prediction)
tgt = np.squeeze(target)
T, D = pred.shape
pred = self._eval_dataset_fn.dataset.unormalize_sequence(pred)
tgt = self._eval_dataset_fn.dataset.unormalize_sequence(tgt)
# num_frames x num_joints x 3
pred = pred.reshape((T, -1, 3))
tgt = tgt.reshape((T, -1, 3))
# compute the norm for the last axis: (x,y,z) coordinates
# num_frames x num_joints
TP = np.linalg.norm(pred-tgt, axis=-1) <= _MAP_TRESH
TP = TP.astype(int)
FN = np.logical_not(TP).astype(int)
# num_joints
TP = np.sum(TP, axis=0)
FN = np.sum(FN, axis=0)
# compute recall for each joint
recall = TP / (TP+FN)
# average over joints
mAP = np.mean(recall)
return mAP, TP, FN
def compute_MPJPE(self, prediction, target):
pred = np.squeeze(prediction)
tgt = np.squeeze(target)
T, D = pred.shape
pred = self._eval_dataset_fn.dataset.unormalize_sequence(pred)
tgt = self._eval_dataset_fn.dataset.unormalize_sequence(tgt)
# num_frames x num_joints x 3
pred = pred.reshape((T, -1, 3))
tgt = tgt.reshape((T, -1, 3))
# compute the norm for the last axis: (x,y,z) coordinates
# num_frames x num_joints
norm = np.linalg.norm(pred-tgt, axis=-1)
# num_joints
MPJPE = np.mean(norm, axis=0)
return MPJPE
| 24,065 | 36.024615 | 179 | py |
GaitForeMer | GaitForeMer-main/training/pose_classifier_fn.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""
[1] https://arxiv.org/abs/1312.6114
"""
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
import numpy as np
import os
import sys
import argparse
import tqdm
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import models.PoseActionClassifier as ActionClass
import data.H36MDatasetPose as H36MDataset
import utils.utils as utils
_DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class PoseActionFn(object):
def __init__(self, params, train_dataset, val_dataset=None):
self._params = params
self._train_dataset = train_dataset
self._val_dataset = val_dataset
self._writer = SummaryWriter(
os.path.join(self._params['model_prefix'], 'tf_logs'))
self.load_constants()
self.init_model()
self.select_optimizer()
thisname = self.__class__.__name__
self._lr_scheduler = utils.get_lr_fn(self._params, self._optimizer_fn)
for k, v in self._params.items():
print('[INFO] ({}) {}: {}'.format(thisname, k, v))
def load_constants(self):
self._params['use_one_hot'] = False
self._params['parent'], self._params['offset'], \
self._params['rot_ind'], self._params['exp_map_ind'] = \
utils.load_constants(self._params['data_path'])
def init_model(self):
self._model = ActionClass.ActionClassifier(
dim=self._params['model_dim'],
n_classes=len(self._params['action_subset'])
)
self._model.to(_DEVICE)
n_params = filter(lambda p: p.requires_grad, self._model.parameters())
n_params = sum([np.prod(p.size()) for p in n_params])
print('++++++++ Total Parameters:', n_params)
def select_optimizer(self):
self._optimizer_fn = optim.Adam(
self._model.parameters(),
lr=self._params['learning_rate']
)
def compute_accuracy(self, class_logits, class_gt):
class_pred = torch.argmax(class_logits.softmax(-1), -1)
accuracy = (class_pred == class_gt).float().sum()
accuracy = accuracy / (class_logits.size()[0])
return accuracy
def forward_model(self, sample):
pose_gt = sample['pose'].to(_DEVICE)
class_gt = sample['action'].to(_DEVICE)
class_logits = self._model(pose_gt)
loss = nn.functional.cross_entropy(class_logits, class_gt, reduction='mean')
accuracy = self.compute_accuracy(class_logits, class_gt)
return loss, accuracy
def train_one_epoch(self, epoch):
epoch_loss, epoch_accuracy = 0, 0
N = len(self._train_dataset)
self._model.train()
for i, sample in enumerate(self._train_dataset):
self._optimizer_fn.zero_grad()
loss, accuracy = self.forward_model(sample)
if i%1000 == 0:
print('[INFO] epoch: {:04d}; it: {:04d} loss: {:.4f}; acc: {:.4f}'.format(
epoch, i, loss, accuracy))
loss.backward()
self._optimizer_fn.step()
epoch_loss += loss
epoch_accuracy += accuracy
return epoch_loss/N, epoch_accuracy/N
@torch.no_grad()
def validation(self, epoch):
epoch_loss, epoch_accuracy = 0, 0
N = len(self._val_dataset)
self._model.eval()
for i, sample in tqdm.tqdm(enumerate(self._val_dataset)):
loss, accuracy = self.forward_model(sample)
epoch_loss += loss
epoch_accuracy += accuracy
return epoch_loss/N, epoch_accuracy/N
def train(self):
thisname = self.__class__.__name__
self._params['learning_rate'] = self._lr_scheduler.get_last_lr()[0]
for e in range(self._params['max_epochs']):
self._model.train()
epoch_loss, epoch_accuracy = self.train_one_epoch(e)
val_loss, val_accuracy = self.validation(e)
# save models
model_path = os.path.join(
self._params['model_prefix'], 'models', 'ckpt_epoch_%04d.pt'%e)
torch.save(self._model.state_dict(), model_path)
# verbose and write the scalars
print('[INFO] Epoch: {:04d}; epoch_loss: {:.4f}; epoch_accuracy: {:.4f}; val_loss: {:.4f}; val_accuracy: {:.4f}; lr: {:2.2e}'.format(
e, epoch_loss, epoch_accuracy, val_loss, val_accuracy, self._params['learning_rate']))
self._writer.add_scalars(
'loss/loss', {'train': epoch_loss, 'val': val_loss}, e)
self._writer.add_scalars(
'accurracy/accurracy', {'train': epoch_accuracy, 'val': val_accuracy}, e)
self._writer.add_scalar(
'learning_rate/lr', self._params['learning_rate'], e)
self._lr_scheduler.step(e)
self._params['learning_rate'] = self._lr_scheduler.get_last_lr()[0]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default=None)
parser.add_argument('--action', type=str, nargs='*', default=None)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--pose_format', type=str, default='expmap')
parser.add_argument('--remove_low_std', action='store_true')
parser.add_argument('--model_dim', type=int, default=128)
parser.add_argument('--max_epochs', type=int, default=500)
parser.add_argument('--model_prefix', type=str, default=None)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--learning_rate_fn', type=str, default='linear')
args = parser.parse_args()
params = vars(args)
if 'all' in args.action:
args.action = H36MDataset._ALL_ACTIONS
params['action_subset'] = args.action
dataset_t = H36MDataset.H36MDataset(params, mode='train')
dataset_v = H36MDataset.H36MDataset(
params, mode='eval', norm_stats=dataset_t._norm_stats)
train_dataset_fn= torch.utils.data.DataLoader(
dataset_t,
batch_size=params['batch_size'],
shuffle=True,
num_workers=4,
collate_fn=H36MDataset.collate_fn,
drop_last=True
)
val_dataset_fn = torch.utils.data.DataLoader(
dataset_v,
batch_size=1,
shuffle=True,
num_workers=1,
collate_fn=H36MDataset.collate_fn,
drop_last=True
)
params['input_dim'] = train_dataset_fn.dataset._data_dim
params['pose_dim'] = train_dataset_fn.dataset._pose_dim
vae_trainer = PoseActionFn(params, train_dataset_fn, val_dataset_fn)
vae_trainer.train()
| 7,534 | 34.21028 | 139 | py |
GaitForeMer | GaitForeMer-main/models/Conv1DEncoder.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Model of 1D convolutions for encoding pose sequences."""
import numpy as np
import os
import sys
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import torch
import torch.nn as nn
class Pose1DEncoder(nn.Module):
def __init__(self, input_channels=3, output_channels=128, n_joints=21):
super(Pose1DEncoder, self).__init__()
self._input_channels = input_channels
self._output_channels = output_channels
self._n_joints = n_joints
self.init_model()
def init_model(self):
self._model = nn.Sequential(
nn.Conv1d(in_channels=self._input_channels, out_channels=32, kernel_size=7),
nn.BatchNorm1d(32),
nn.ReLU(True),
nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3),
nn.BatchNorm1d(32),
nn.ReLU(True),
nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3),
nn.BatchNorm1d(64),
nn.ReLU(True),
nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3),
nn.BatchNorm1d(64),
nn.ReLU(True),
nn.Conv1d(in_channels=64, out_channels=128, kernel_size=3),
nn.BatchNorm1d(128),
nn.ReLU(True),
nn.Conv1d(in_channels=128, out_channels=128, kernel_size=3),
nn.BatchNorm1d(128),
nn.ReLU(True),
nn.Conv1d(in_channels=128, out_channels=self._output_channels, kernel_size=3),
nn.BatchNorm1d(self._output_channels),
nn.ReLU(True),
nn.Conv1d(in_channels=self._output_channels, out_channels=self._output_channels, kernel_size=3)
)
def forward(self, x):
"""
Args:
x: [batch_size, seq_len, skeleton_dim].
"""
# inputs to model is [batch_size, channels, n_joints]
# transform the batch to [batch_size*seq_len, dof, n_joints]
bs, seq_len, dim = x.size()
dof = dim//self._n_joints
x = x.view(bs*seq_len, dof, self._n_joints)
# [batch_size*seq_len, dof, n_joints]
x = self._model(x)
# [batch_size, seq_len, output_channels]
x = x.view(bs, seq_len, self._output_channels)
return x
class Pose1DTemporalEncoder(nn.Module):
def __init__(self, input_channels, output_channels):
super(Pose1DTemporalEncoder, self).__init__()
self._input_channels = input_channels
self._output_channels = output_channels
self.init_model()
def init_model(self):
self._model = nn.Sequential(
nn.Conv1d(
in_channels=self._input_channels, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm1d(32),
nn.ReLU(True),
nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm1d(32),
nn.ReLU(True),
nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.BatchNorm1d(64),
nn.ReLU(True),
nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3, padding=1),
nn.BatchNorm1d(64),
nn.ReLU(True),
nn.Conv1d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm1d(128),
nn.ReLU(True),
nn.Conv1d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm1d(128),
nn.ReLU(True),
nn.Conv1d(in_channels=128, out_channels=self._output_channels, kernel_size=3, padding=1),
nn.BatchNorm1d(self._output_channels),
nn.ReLU(True),
nn.Conv1d(in_channels=self._output_channels, out_channels=self._output_channels, kernel_size=3, padding=1)
)
def forward(self, x):
"""
Args:
x: [batch_size, seq_len, skeleton_dim].
"""
# batch_size, skeleton_dim, seq_len
x = torch.transpose(x, 1,2)
x = self._model(x)
# batch_size, seq_len, skeleton_dim
x = torch.transpose(x, 1, 2)
return x
if __name__ == '__main__':
dof = 9
output_channels = 128
n_joints = 21
seq_len = 49
model = Pose1DTemporalEncoder(input_channels=dof*n_joints, output_channels=output_channels)
inputs = torch.FloatTensor(10, seq_len, dof*n_joints)
X = model(inputs)
print(X.size())
# model = Pose1DEncoder(input_channels=dof, output_channels=output_channels)
# inputs = torch.FloatTensor(10, seq_len, dof*n_joints)
# X = model(inputs)
# print(X.size())
| 5,262 | 32.954839 | 114 | py |
GaitForeMer | GaitForeMer-main/models/Transformer.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Implementation of the Transformer for sequence-to-sequence decoding.
Implementation of the transformer for sequence to sequence prediction as in
[1] and [2].
[1] https://arxiv.org/pdf/1706.03762.pdf
[2] https://arxiv.org/pdf/2005.12872.pdf
"""
import numpy as np
import os
import sys
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
import utils.PositionEncodings as PositionEncodings
import models.TransformerEncoder as Encoder
import models.TransformerDecoder as Decoder
class Transformer(nn.Module):
def __init__(self,
num_encoder_layers=6,
num_decoder_layers=6,
model_dim=256,
num_heads=8,
dim_ffn=2048,
dropout=0.1,
init_fn=utils.normal_init_,
use_query_embedding=False,
pre_normalization=False,
query_selection=False,
target_seq_len=25):
"""Implements the Transformer model for sequence-to-sequence modeling."""
super(Transformer, self).__init__()
self._model_dim = model_dim
self._num_heads = num_heads
self._dim_ffn = dim_ffn
self._dropout = dropout
self._use_query_embedding = use_query_embedding
self._query_selection = query_selection
self._tgt_seq_len = target_seq_len
self._encoder = Encoder.TransformerEncoder(
num_layers=num_encoder_layers,
model_dim=model_dim,
num_heads=num_heads,
dim_ffn=dim_ffn,
dropout=dropout,
init_fn=init_fn,
pre_normalization=pre_normalization
)
self._decoder = Decoder.TransformerDecoder(
num_layers=num_decoder_layers,
model_dim=model_dim,
num_heads=num_heads,
dim_ffn=dim_ffn,
dropout=dropout,
init_fn=init_fn,
use_query_embedding=use_query_embedding,
pre_normalization=pre_normalization
)
if self._query_selection:
self._position_predictor = nn.Linear(self._model_dim, self._tgt_seq_len)
def process_index_selection(self, self_attn, one_to_one_selection=False):
"""Selection of query elments using position predictor from encoder memory.
After prediction a maximum assignement problem is solved to get indices for
each element in the query sequence.
Args:
self_attn: Encoder memory with shape [src_len, batch_size, model_dim]
Returns:
A tuple with two list of i and j matrix entries of m
"""
batch_size = self_attn.size()[1]
# batch_size x src_seq_len x model_dim
in_pos = torch.transpose(self_attn, 0, 1)
# predict the matrix of similitudes
# batch_size x src_seq_len x tgt_seq_len
prob_matrix = self._position_predictor(in_pos)
# apply softmax on the similutes to get probabilities on positions
# batch_size x src_seq_len x tgt_seq_len
if one_to_one_selection:
soft_matrix = F.softmax(prob_matrix, dim=2)
# predict assignments in a one to one fashion maximizing the sum of probs
indices = [linear_sum_assignment(soft_matrix[i].cpu().detach(), maximize=True)
for i in range(batch_size)
]
else:
# perform softmax by rows to have many targets to one input assignements
soft_matrix = F.softmax(prob_matrix, dim=1)
indices_rows = torch.argmax(soft_matrix, 1)
indices = [(indices_rows[i], list(range(prob_matrix.size()[2])))
for i in range(batch_size)
]
return indices, soft_matrix
def forward(self,
source_seq,
target_seq,
encoder_position_encodings=None,
decoder_position_encodings=None,
query_embedding=None,
mask_target_padding=None,
mask_look_ahead=None,
get_attn_weights=False,
query_selection_fn=None,
fold=None,
eval_step=None):
if self._use_query_embedding:
bs = source_seq.size()[1]
query_embedding = query_embedding.unsqueeze(1).repeat(1, bs, 1)
decoder_position_encodings = encoder_position_encodings
memory, enc_weights = self._encoder(source_seq, encoder_position_encodings)
# Save encoder outputs
# if fold is not None:
# encoder_output_dir = 'encoder_outputs'
# if not os.path.exists(f'{encoder_output_dir}f{fold}/'):
# os.makedirs(f'{encoder_output_dir}f{fold}/')
# outpath = f'{encoder_output_dir}f{fold}/{eval_step}.npy'
# encoder_output = memory.detach().cpu().numpy()
# np.save(outpath, encoder_output)
tgt_plain = None
# perform selection from input sequence
if self._query_selection:
indices, prob_matrix = self.process_index_selection(memory)
tgt_plain, target_seq = query_selection_fn(indices)
out_attn, out_weights = self._decoder(
target_seq,
memory,
decoder_position_encodings,
query_embedding=query_embedding,
mask_target_padding=mask_target_padding,
mask_look_ahead=mask_look_ahead,
get_attn_weights=get_attn_weights
)
out_weights_ = None
enc_weights_ = None
prob_matrix_ = None
if get_attn_weights:
out_weights_, enc_weights_ = out_weights, enc_weights
if self._query_selection:
prob_matrix_ = prob_matrix
return out_attn, memory, out_weights_, enc_weights_, (tgt_plain, prob_matrix_)
| 6,581 | 33.103627 | 85 | py |
GaitForeMer | GaitForeMer-main/models/PoseTransformer.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Implementation of the Transformer for sequence-to-sequence decoding.
Implementation of the transformer for sequence to sequence prediction as in
[1] and [2].
[1] https://arxiv.org/pdf/1706.03762.pdf
[2] https://arxiv.org/pdf/2005.12872.pdf
"""
import numpy as np
import os
import sys
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
import utils.PositionEncodings as PositionEncodings
import models.TransformerEncoder as Encoder
import models.TransformerDecoder as Decoder
from models.Transformer import Transformer
_SOURCE_LENGTH = 110
_TARGET_LENGTH = 55
_POSE_DIM = 54
_PAD_LENGTH = _SOURCE_LENGTH
class PoseTransformer(nn.Module):
"""Implements the sequence-to-sequence Transformer .model for pose prediction."""
def __init__(self,
pose_dim=_POSE_DIM,
model_dim=256,
num_encoder_layers=6,
num_decoder_layers=6,
num_heads=8,
dim_ffn=2048,
dropout=0.1,
target_seq_length=_TARGET_LENGTH,
source_seq_length=_SOURCE_LENGTH,
input_dim=None,
init_fn=utils.xavier_init_,
non_autoregressive=False,
use_query_embedding=False,
pre_normalization=False,
predict_activity=False,
use_memory=False,
num_activities=None,
pose_embedding=None,
pose_decoder=None,
copy_method='uniform_scan',
query_selection=False,
pos_encoding_params=(10000, 1)):
"""Initialization of pose transformers."""
super(PoseTransformer, self).__init__()
self._target_seq_length = target_seq_length
self._source_seq_length = source_seq_length
self._pose_dim = pose_dim
self._input_dim = pose_dim if input_dim is None else input_dim
self._model_dim = model_dim
self._use_query_embedding = use_query_embedding
self._predict_activity = predict_activity
self._num_activities = num_activities
self._num_decoder_layers = num_decoder_layers
self._mlp_dim = model_dim
self._non_autoregressive = non_autoregressive
self._pose_embedding = pose_embedding
self._pose_decoder = pose_decoder
self._query_selection = query_selection
thisname = self.__class__.__name__
self._copy_method = copy_method
self._pos_encoding_params = pos_encoding_params
self._transformer = Transformer(
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
model_dim=model_dim,
num_heads=num_heads,
dim_ffn=dim_ffn,
dropout=dropout,
init_fn=init_fn,
use_query_embedding=use_query_embedding,
pre_normalization=pre_normalization,
query_selection=query_selection,
target_seq_len=target_seq_length
)
t_params = filter(lambda p: p.requires_grad, self._transformer.parameters())
nparams = sum([np.prod(p.size()) for p in t_params])
print('[INFO] ({}) Transformer has {} parameters!'.format(thisname, nparams))
self._pos_encoder = PositionEncodings.PositionEncodings1D(
num_pos_feats=self._model_dim,
temperature=self._pos_encoding_params[0],
alpha=self._pos_encoding_params[1]
)
self._pos_decoder = PositionEncodings.PositionEncodings1D(
num_pos_feats=self._model_dim,
temperature=self._pos_encoding_params[0],
alpha=self._pos_encoding_params[1]
)
# self.init_pose_encoder_decoders(init_fn)
self._use_class_token = False
self.init_position_encodings()
self.init_query_embedding()
if self._use_class_token:
self.init_class_token()
if self._predict_activity:
self._action_head_size = self._model_dim if self._use_class_token \
else self._model_dim*(self._source_seq_length-1)
self._action_head = nn.Sequential(
nn.Linear(self._action_head_size, self._num_activities),
)
def init_query_embedding(self):
"""Initialization of query sequence embedding."""
self._query_embed = nn.Embedding(self._target_seq_length, self._model_dim)
print('[INFO] ({}) Init query embedding!'.format(self.__class__.__name__))
nn.init.xavier_uniform_(self._query_embed.weight.data)
# self._query_embed.weight.data.normal_(0.0, 0.004)
def init_class_token(self):
token = torch.FloatTensor(1, self._model_dim)
print('[INFO] ({}) Init class token!'.format(self.__class__.__name__))
self._class_token = nn.Parameter(token, requires_grad=True)
nn.init.xavier_uniform_(self._class_token.data)
def init_position_encodings(self):
src_len = self._source_seq_length-1
# when using a token we need an extra element in the sequence
if self._use_class_token:
src_len = src_len + 1
encoder_pos_encodings = self._pos_encoder(src_len).view(
src_len, 1, self._model_dim)
decoder_pos_encodings = self._pos_decoder(self._target_seq_length).view(
self._target_seq_length, 1, self._model_dim)
mask_look_ahead = torch.from_numpy(
utils.create_look_ahead_mask(
self._target_seq_length, self._non_autoregressive))
self._encoder_pos_encodings = nn.Parameter(
encoder_pos_encodings, requires_grad=False)
self._decoder_pos_encodings = nn.Parameter(
decoder_pos_encodings, requires_grad=False)
self._mask_look_ahead = nn.Parameter(
mask_look_ahead, requires_grad=False)
def forward(self,
input_pose_seq,
target_pose_seq=None,
mask_target_padding=None,
get_attn_weights=False,
fold=None,
eval_step=None):
"""Performs the forward pass of the pose transformers.
Args:
input_pose_seq: Shape [batch_size, src_sequence_length, dim_pose].
target_pose_seq: Shape [batch_size, tgt_sequence_length, dim_pose].
Returns:
A tensor of the predicted sequence with shape [batch_size,
tgt_sequence_length, dim_pose].
"""
if self.training:
return self.forward_training(
input_pose_seq, target_pose_seq, mask_target_padding, get_attn_weights) # no fold here, only want to save eval
# eval forward for non auto regressive type of model
if self._non_autoregressive:
return self.forward_training(
input_pose_seq, target_pose_seq, mask_target_padding, get_attn_weights, fold=fold, eval_step=eval_step)
return self.forward_autoregressive(
input_pose_seq, target_pose_seq, mask_target_padding, get_attn_weights)
def handle_class_token(self, input_pose_seq):
"""
Args:
input_pose_seq: [src_len, batch_size, model_dim]
"""
# concatenate extra token for activity prediction as an extra
# element of the input sequence
# specialized token is not a skeleton
_, B, _ = input_pose_seq.size()
token = self._class_token.squeeze().repeat(1, B, 1)
input_pose_seq = torch.cat([token, input_pose_seq], axis=0)
return input_pose_seq
def handle_copy_query(self, indices, input_pose_seq_):
"""Handles the way queries are generated copying items from the inputs.
Args:
indices: A list of tuples len `batch_size`. Each tuple contains has the
form (input_list, target_list) where input_list contains indices of
elements in the input to be copy to elements in the target specified by
target_list.
input_pose_seq_: Source skeleton sequence [batch_size, src_len, pose_dim].
Returns:
A tuple with first elements the decoder input skeletons with shape
[tgt_len, batch_size, skeleton_dim], and the skeleton embeddings of the
input sequence with shape [tgt_len, batch_size, pose_dim].
"""
batch_size = input_pose_seq_.size()[0]
decoder_inputs = torch.FloatTensor(
batch_size,
self._target_seq_length,
self._pose_dim
).to(self._decoder_pos_encodings.device)
for i in range(batch_size):
for j in range(self._target_seq_length):
src_idx, tgt_idx = indices[i][0][j], indices[i][1][j]
decoder_inputs[i, tgt_idx] = input_pose_seq_[i, src_idx]
dec_inputs_encode = self._pose_embedding(decoder_inputs)
return torch.transpose(decoder_inputs, 0, 1), \
torch.transpose(dec_inputs_encode, 0, 1)
def forward_training(self,
input_pose_seq_,
target_pose_seq_,
mask_target_padding,
get_attn_weights=False,
fold=None,
eval_step=None):
"""Compute forward pass for training and non recursive inference.
Args:
input_pose_seq_: Source sequence [batch_size, src_len, skeleton_dim].
target_pose_seq_: Query target sequence [batch_size, tgt_len, skeleton_dim].
mask_target_padding: Mask for target masking with ones where elements
belong to the padding elements of shape [batch_size, tgt_len, skeleton_dim].
get_attn_weights: Boolean to indicate if attention weights should be returned.
Returns:
"""
# 1) Encode the sequence with given pose encoder
# [batch_size, sequence_length, model_dim]
input_pose_seq = input_pose_seq_
target_pose_seq = target_pose_seq_
if self._pose_embedding is not None:
input_pose_seq = self._pose_embedding(input_pose_seq)
target_pose_seq = self._pose_embedding(target_pose_seq)
# 2) compute the look-ahead mask and the positional encodings
# [sequence_length, batch_size, model_dim]
input_pose_seq = torch.transpose(input_pose_seq, 0, 1)
target_pose_seq = torch.transpose(target_pose_seq, 0, 1)
def query_copy_fn(indices):
return self.handle_copy_query(indices, input_pose_seq_)
# concatenate extra token for activity prediction as an extr element of the
# input sequence, i.e. specialized token is not a skeleton
if self._use_class_token:
input_pose_seq = self.handle_class_token(input_pose_seq)
# 3) compute the attention weights using the transformer
# [target_sequence_length, batch_size, model_dim]
attn_output, memory, attn_weights, enc_weights, mat = self._transformer(
input_pose_seq,
target_pose_seq,
query_embedding=self._query_embed.weight,
encoder_position_encodings=self._encoder_pos_encodings,
decoder_position_encodings=self._decoder_pos_encodings,
mask_look_ahead=self._mask_look_ahead,
mask_target_padding=mask_target_padding,
get_attn_weights=get_attn_weights,
query_selection_fn=query_copy_fn,
fold=fold,
eval_step=eval_step
)
end = self._input_dim if self._input_dim == self._pose_dim else self._pose_dim
out_sequence = []
target_pose_seq_ = mat[0] if self._query_selection else \
torch.transpose(target_pose_seq_, 0, 1)
# 4) decode sequence with pose decoder. The decoding process is time
# independent. It means non-autoregressive or parallel decoding.
# [batch_size, target_sequence_length, pose_dim]
for l in range(self._num_decoder_layers):
# [target_seq_length*batch_size, pose_dim]
out_sequence_ = self._pose_decoder(
attn_output[l].view(-1, self._model_dim))
# [target_seq_length, batch_size, pose_dim]
out_sequence_ = out_sequence_.view(
self._target_seq_length, -1, self._pose_dim)
# apply residual connection between target query and predicted pose
# [tgt_seq_len, batch_size, pose_dim]
out_sequence_ = out_sequence_ + target_pose_seq_[:, :, 0:end]
# [batch_size, tgt_seq_len, pose_dim]
out_sequence_ = torch.transpose(out_sequence_, 0, 1)
out_sequence.append(out_sequence_)
if self._predict_activity:
out_class = self.predict_activity(attn_output, memory)
return out_sequence, out_class, attn_weights, enc_weights, mat
return out_sequence, attn_weights, enc_weights, mat
def predict_activity(self, attn_output, memory):
"""Performs activity prediction either from memory or class token.
attn_output: Encoder memory. Shape [src_seq_len, batch_size, model_dim].
"""
# [batch_size, src_len, model_dim]
in_act = torch.transpose(memory, 0, 1)
# use a single specialized token for predicting activity
# the specialized token is in the first element of the sequence
if self._use_class_token:
# [batch_size, model_dim]
token = in_act[:, 0]
actions = self._action_head(token)
return [actions]
# use all the input sequence attention to predict activity
# [batch_size, src_len*model_dim]
in_act = torch.reshape(in_act, (-1, self._action_head_size))
actions = self._action_head(in_act)
return [actions]
#out_class = []
#for l in range(self._num_decoder_layers):
# in_act = torch.transpose(attn_output[l], 0, 1)
# in_act = torch.reshape(in_act, (-1, self._action_head_size))
# actions = self._action_head(in_act)
# out_class.append(actions)
#return out_class
def forward_autoregressive(self,
input_pose_seq,
target_pose_seq=None,
mask_target_padding=None,
get_attn_weights=False):
"""Compute forward pass for auto-regressive inferece in test time."""
thisdevice = self._encoder_pos_encodings.device
# the first query pose is the first in the target
prev_target = target_pose_seq[:, 0, :]
# 1) Enconde using the pose embeding
if self._pose_embedding is not None:
input_pose_seq = self._pose_embedding(input_pose_seq)
target_pose_seq = self._pose_embedding(target_pose_seq)
# [batch_size, 1, model_dim]
target_seq = target_pose_seq[:, 0:1, :]
# 2) compute the look-ahead mask and the positional encodings
# [sequence_length, batch_size, model_dim]
input_pose_seq = torch.transpose(input_pose_seq, 0, 1)
target_seq = torch.transpose(target_seq, 0, 1)
# concatenate extra token for activity prediction as an extra
if self._use_class_token:
input_pose_seq = self.handle_class_token(input_pose_seq)
# 3) use auto recursion to compute the predicted set of tokens
memory, enc_attn_weights = self._transformer._encoder(
input_pose_seq, self._encoder_pos_encodings)
# get only the first In teory it should only be one target pose at testing
batch_size = memory.size()[1]
out_pred_seq = torch.FloatTensor(
batch_size, self._target_seq_length, self._pose_dim).to(thisdevice)
for t in range(self._target_seq_length):
position_encodings = self._pos_decoder(t+1).view(
t+1, 1, self._model_dim).to(thisdevice)
mask_look_ahead = torch.from_numpy(
utils.create_look_ahead_mask(t+1)).to(thisdevice)
# a list of length n_decoder_layers with elements of
# shape [t, batch_size, model_dim]
out_attn, out_weights = self._transformer._decoder(
target_seq,
memory,
position_encodings,
mask_look_ahead=mask_look_ahead
)
# get only the last predicted token decode it to get the pose and
# then encode the pose. shape [1*batch_size, pose_dim]
# for 8 seeds of evaluation (batch_size)
pred_pose = self._pose_decoder(
out_attn[-1][t:(t+1), :, :].view(-1, self._model_dim))
# apply residual between last target pose and recently generated pose
if self._pose_dim == self._input_dim:
pred_pose = pred_pose + prev_target
else:
prev_target[:, 0:self._pose_dim] = pred_pose + prev_target[:,0:self._pose_dim]
pred_pose = prev_target
prev_target = pred_pose
out_pred_seq[:, t, :] = pred_pose.view(-1, self._input_dim)[:, 0:self._pose_dim]
if self._pose_embedding is not None:
pose_code = self._pose_embedding(pred_pose)
# [1, batch_size, model_dim]
pose_code = pose_code.view(-1, batch_size, self._model_dim)
# [t+1, batch_size, model_dim]
target_seq = torch.cat([target_seq, pose_code], axis=0)
# 1) the last attention output contains all the necessary sequence; or
# 2) Use all the memory to predict
if self._predict_activity:
actions = self.predict_activity(out_attn, memory)
if self._predict_activity:
return [out_pred_seq], [actions[-1]], None, None
return [out_pred_seq]
def model_factory(params, pose_embedding_fn, pose_decoder_fn):
init_fn = utils.normal_init_ \
if params['init_fn'] == 'normal_init' else utils.xavier_init_
return PoseTransformer(
pose_dim=params['pose_dim'],
input_dim=params['input_dim'],
model_dim=params['model_dim'],
num_encoder_layers=params['num_encoder_layers'],
num_decoder_layers=params['num_decoder_layers'],
num_heads=params['num_heads'],
dim_ffn=params['dim_ffn'],
dropout=params['dropout'],
target_seq_length=params['target_seq_len'],
source_seq_length=params['source_seq_len'],
init_fn=init_fn,
non_autoregressive=params['non_autoregressive'],
use_query_embedding=params['use_query_embedding'],
pre_normalization=params['pre_normalization'],
predict_activity=params['predict_activity'],
num_activities=params['num_activities'],
use_memory=params['use_memory'],
pose_embedding=pose_embedding_fn(params),
pose_decoder=pose_decoder_fn(params),
query_selection=params['query_selection'],
pos_encoding_params=(params['pos_enc_beta'], params['pos_enc_alpha'])
)
if __name__ == '__main__':
transformer = PoseTransformer(model_dim=_POSE_DIM, num_heads=6)
transformer.eval()
batch_size = 8
model_dim = 256
tgt_seq = torch.FloatTensor(batch_size, _TARGET_LENGTH, _POSE_DIM).fill_(1)
src_seq = torch.FloatTensor(batch_size, _SOURCE_LENGTH-1, _POSE_DIM).fill_(1)
outputs = transformer(src_seq, tgt_seq)
print(outputs[-1].size())
| 19,154 | 38.658385 | 120 | py |
GaitForeMer | GaitForeMer-main/models/seq2seq_model.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Sequence to sequence model for human motion prediction.
The model has been implemented according to [1] and adapted from its pytorch
version [2]. The reimplementation has the purpose of reducing clutter in
code and for learing purposes.
[1]
[2]
"""
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
class Seq2SeqModel(nn.Module):
"""Sequence to sequence model."""
def __init__(
self,
architecture='tied',
source_seq_len=50,
target_seq_len=25,
rnn_size=1024, # hidden recurrent layer size
num_layers=1,
max_gradient_norm=5,
batch_size=16,
learning_rate=0.005,
learning_rate_decay_factor=0.95,
loss_to_use='sampling_based',
number_of_actions=1,
one_hot=True,
residual_velocities=False,
dropout=0.0,
dtype=torch.float32,
device=None):
"""
Args:
architecture: [basic, tied] whether to tie the decoder and decoder.
source_seq_len: lenght of the input sequence.
target_seq_len: lenght of the target sequence.
rnn_size: number of units in the rnn.
num_layers: number of rnns to stack.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g., for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
loss_to_use: [supervised, sampling_based]. Whether to use ground truth in
each timestep to compute the loss after decoding, or to feed back the
prediction from the previous time-step.
number_of_actions: number of classes we have.
one_hot: whether to use one_hot encoding during train/test (sup models).
residual_velocities: whether to use a residual connection that models velocities.
dtype: the data type to use to store internal variables.
"""
super(Seq2SeqModel, self).__init__()
self.HUMAN_SIZE = 54
self.input_size = self.HUMAN_SIZE + number_of_actions if one_hot else self.HUMAN_SIZE
print( "One hot is ", one_hot )
print( "Input size is %d" % self.input_size )
# Summary writers for train and test runs
self.source_seq_len = source_seq_len
self.target_seq_len = target_seq_len
self.rnn_size = rnn_size
self.batch_size = batch_size
self.dropout = dropout
# === Create the RNN that will keep the state ===
print('rnn_size = {0}'.format( rnn_size ))
self.cell = torch.nn.GRUCell(self.input_size, self.rnn_size)
# self.cell2 = torch.nn.GRUCell(self.rnn_size, self.rnn_size)
self.fc1 = nn.Linear(self.rnn_size, self.input_size)
def forward(self, encoder_inputs, decoder_inputs):
def loop_function(prev, i):
return prev
batchsize = encoder_inputs.size()[0]
encoder_inputs = torch.transpose(encoder_inputs, 0, 1)
decoder_inputs = torch.transpose(decoder_inputs, 0, 1)
state = torch.zeros(batchsize, self.rnn_size).\
to(encoder_inputs.get_device())
# state2 = torch.zeros(batchsize, self.rnn_size)
# if use_cuda:
# state = state.cuda()
# # state2 = state2.cuda()
for i in range(self.source_seq_len-1):
state = self.cell(encoder_inputs[i], state)
# state2 = self.cell2(state, state2)
state = F.dropout(state, self.dropout, training=self.training)
# if use_cuda:
# state = state.cuda()
## state2 = state2.cuda()
outputs = []
prev = None
for i, inp in enumerate(decoder_inputs):
# loop function is trained as in auto regressive
if loop_function is not None and prev is not None:
inp = loop_function(prev, i)
inp = inp.detach()
state = self.cell(inp, state)
# state2 = self.cell2(state, state2)
# output = inp + self.fc1(state2)
# state = F.dropout(state, self.dropout, training=self.training)
output = inp + self.fc1(F.dropout(state, self.dropout, training=self.training))
outputs.append(output.view([1, batchsize, self.input_size]))
if loop_function is not None:
prev = output
# return outputs, state
outputs = torch.cat(outputs, 0)
return torch.transpose(outputs, 0, 1)
| 5,425 | 34.697368 | 89 | py |
GaitForeMer | GaitForeMer-main/models/PoseEncoderDecoder.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Definition of pose encoder and encoder embeddings and model factory."""
import numpy as np
import os
import sys
import torch
import torch.nn as nn
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
import models.PoseGCN as GCN
import models.Conv1DEncoder as Conv1DEncoder
def pose_encoder_mlp(params):
# These two encoders should be experimented with a graph NN and
# a prior based pose decoder using also the graph
init_fn = utils.normal_init_ \
if params['init_fn'] == 'normal_init' else utils.xavier_init_
pose_embedding = nn.Sequential(
nn.Linear(params['input_dim'], params['model_dim']),
nn.Dropout(0.1)
)
utils.weight_init(pose_embedding, init_fn_=init_fn)
return pose_embedding
def pose_decoder_mlp(params):
init_fn = utils.normal_init_ \
if params['init_fn'] == 'normal_init' else utils.xavier_init_
pose_decoder = nn.Linear(params['model_dim'], params['pose_dim'])
utils.weight_init(pose_decoder, init_fn_=init_fn)
return pose_decoder
def pose_decoder_gcn(params):
decoder = GCN.PoseGCN(
input_features=params['model_dim'],
output_features = 9 if params['pose_format'] == 'rotmat' else 3,
model_dim=params['model_dim'],
output_nodes=params['n_joints'],
p_dropout=params['dropout'],
num_stage=1
)
return decoder
def pose_encoder_gcn(params):
encoder = GCN.SimpleEncoder(
n_nodes=params['n_joints'],
input_features=9 if params['pose_format'] == 'rotmat' else 3,
#n_nodes=params['pose_dim'],
#input_features=1,
model_dim=params['model_dim'],
p_dropout=params['dropout']
)
return encoder
def pose_encoder_conv1d(params):
encoder = Conv1DEncoder.Pose1DEncoder(
input_channels=9 if params['pose_format'] == 'rotmat' else 3,
output_channels=params['model_dim'],
n_joints=params['n_joints']
)
return encoder
def pose_encoder_conv1dtemporal(params):
dof = 9 if params['pose_format'] == 'rotmat' else 3
encoder = Conv1DEncoder.Pose1DTemporalEncoder(
input_channels=dof*params['n_joints'],
output_channels=params['model_dim']
)
return encoder
def select_pose_encoder_decoder_fn(params):
if params['pose_embedding_type'].lower() == 'simple':
return pose_encoder_mlp, pose_decoder_mlp
if params['pose_embedding_type'].lower() == 'conv1d_enc':
return pose_encoder_conv1d, pose_decoder_mlp
if params['pose_embedding_type'].lower() == 'convtemp_enc':
return pose_encoder_conv1dtemporal, pose_decoder_mlp
if params['pose_embedding_type'].lower() == 'gcn_dec':
return pose_encoder_mlp, pose_decoder_gcn
if params['pose_embedding_type'].lower() == 'gcn_enc':
return pose_encoder_gcn, pose_decoder_mlp
if params['pose_embedding_type'].lower() == 'gcn_full':
return pose_encoder_gcn, pose_decoder_gcn
elif params['pose_embedding_type'].lower() == 'vae':
return pose_encoder_vae, pose_decoder_mlp
else:
raise ValueError('Unknown pose embedding {}'.format(params['pose_embedding_type']))
| 4,120 | 32.504065 | 87 | py |
GaitForeMer | GaitForeMer-main/models/TransformerEncoder.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Implementation of Transformer encoder and encoder layer with self attention.
Implementation of the encoder layer as in [1] and [2] for sequence to
sequence modeling.
[1] https://arxiv.org/pdf/1706.03762.pdf
[2] https://arxiv.org/pdf/2005.12872.pdf
"""
import numpy as np
import sys
import os
import torch
import torch.nn as nn
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
class EncoderLayer(nn.Module):
"""Implements the transformer encoder Layer."""
def __init__(self,
model_dim=256,
num_heads=8,
dim_ffn=2048,
dropout=0.1,
init_fn=utils.normal_init_,
pre_normalization=False):
"""Encoder layer initialization.
Args:
model_dim:
num_heads:
dim_ffn:
dropout:
"""
super(EncoderLayer, self).__init__()
self._model_dim = model_dim
self._num_heads = num_heads
self._dim_ffn = dim_ffn
self._dropout = dropout
self._pre_normalization = pre_normalization
self._self_attn = nn.MultiheadAttention(model_dim, num_heads, dropout)
self._relu = nn.ReLU()
self._dropout_layer = nn.Dropout(self._dropout)
self._linear1 = nn.Linear(model_dim, self._dim_ffn)
self._linear2 = nn.Linear(self._dim_ffn, self._model_dim)
self._norm1 = nn.LayerNorm(model_dim, eps=1e-5)
self._norm2 = nn.LayerNorm(model_dim, eps=1e-5)
utils.weight_init(self._linear1, init_fn_=init_fn)
utils.weight_init(self._linear2, init_fn_=init_fn)
def forward(self, source_seq, pos_encodings):
"""Computes forward pass according.
Args:
source_seq: [sequence_length, batch_size, model_dim].
pos_encodings: [sequence_length, model_dim].
Returns:
Tensor of shape [sequence_length, batch_size, model_dim].
"""
if self._pre_normalization:
return self.forward_pre(source_seq, pos_encodings)
return self.forward_post(source_seq, pos_encodings)
def forward_post(self, source_seq, pos_encodings):
"""Computes decoder layer forward pass with pre normalization.
Args:
source_seq: [sequence_length, batch_size, model_dim].
pos_encodings: [sequence_length, model_dim].
Returns:
Tensor of shape [sequence_length, batch_size, model_dim].
"""
# add positional encodings to the input sequence
# for self attention query is the same as key
query = source_seq + pos_encodings
key = query
value = source_seq
attn_output, attn_weights = self._self_attn(
query,
key,
value,
need_weights=True
)
norm_attn = self._dropout_layer(attn_output) + source_seq
norm_attn = self._norm1(norm_attn)
output = self._linear1(norm_attn)
output = self._relu(output)
output = self._dropout_layer(output)
output = self._linear2(output)
output = self._dropout_layer(output) + norm_attn
output = self._norm2(output)
return output, attn_weights
def forward_pre(self, source_seq_, pos_encodings):
"""Computes decoder layer forward pass with pre normalization.
Args:
source_seq: [sequence_length, batch_size, model_dim].
pos_encodings: [sequence_length, model_dim].
Returns:
Tensor of shape [sequence_length, batch_size, model_dim].
"""
# add positional encodings to the input sequence
# for self attention query is the same as key
source_seq = self._norm1(source_seq_)
query = source_seq + pos_encodings
key = query
value = source_seq
attn_output, attn_weights = self._self_attn(
query,
key,
value,
need_weights=True
)
norm_attn_ = self._dropout_layer(attn_output) + source_seq_
norm_attn = self._norm2(norm_attn_)
output = self._linear1(norm_attn)
output = self._relu(output)
output = self._dropout_layer(output)
output = self._linear2(output)
output = self._dropout_layer(output) + norm_attn_
return output, attn_weights
class TransformerEncoder(nn.Module):
def __init__(self,
num_layers=6,
model_dim=256,
num_heads=8,
dim_ffn=2048,
dropout=0.1,
init_fn=utils.normal_init_,
pre_normalization=False):
super(TransformerEncoder, self).__init__()
"""Transforme encoder initialization."""
self._num_layers = num_layers
self._model_dim = model_dim
self._num_heads = num_heads
self._dim_ffn = dim_ffn
self._dropout = dropout
# self._norm = norm
self._pre_normalization = pre_normalization
self._encoder_stack = self.init_encoder_stack(init_fn)
def init_encoder_stack(self, init_fn):
"""Create the stack of encoder layers."""
stack = nn.ModuleList()
for s in range(self._num_layers):
layer = EncoderLayer(
model_dim=self._model_dim,
num_heads=self._num_heads,
dim_ffn=self._dim_ffn,
dropout=self._dropout,
init_fn=init_fn,
pre_normalization=self._pre_normalization
)
stack.append(layer)
return stack
def forward(self, input_sequence, pos_encodings):
"""Computes decoder forward pass.
Args:
source_seq: [sequence_length, batch_size, model_dim].
pos_encodings: [sequence_length, model_dim].
Returns:
Tensor of shape [sequence_length, batch_size, model_dim].
"""
outputs = input_sequence
for l in range(self._num_layers):
outputs, attn_weights = self._encoder_stack[l](outputs, pos_encodings)
# if self._norm:
# outputs = self._norm(outputs)
return outputs, attn_weights
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
seq_length = 50
pos_encodings = torch.FloatTensor(seq_length, 1, 256).uniform_(0,1)
seq = torch.FloatTensor(seq_length, 8, 256).fill_(1.0)
pos_encodings = pos_encodings.to(device)
seq = seq.to(device)
encoder = TransformerEncoder(num_layers=6)
encoder.to(device)
encoder.eval()
print(encoder(seq, pos_encodings).size())
| 7,169 | 28.628099 | 79 | py |
GaitForeMer | GaitForeMer-main/models/TransformerDecoder.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Implementation of Transformer decoder and decoder layer with self attention.
Implementation of the decoder layer as in [1] and [2] for sequence to
sequence modeling.
[1] https://arxiv.org/pdf/1706.03762.pdf
[2] https://arxiv.org/pdf/2005.12872.pdf
"""
import numpy as np
import sys
import os
import torch
import torch.nn as nn
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
class DecoderLayer(nn.Module):
"""Implements the transformer decoder Layer."""
def __init__(self,
model_dim=256,
num_heads=8,
dim_ffn=2048,
dropout=0.1,
init_fn=utils.normal_init_,
pre_normalization=False,
use_query_embedding=False):
"""Decoder layer initialization.
Args:
model_dim:
num_heads:
dim_ffn:
dropout:
"""
super(DecoderLayer, self).__init__()
self._model_dim = model_dim
self._num_heads = num_heads
self._dim_ffn = dim_ffn
self._dropout = dropout
self._pre_normalization = pre_normalization
self._use_query_embedding = use_query_embedding
self._self_attn = nn.MultiheadAttention(
model_dim, num_heads, dropout=dropout
)
self._multihead_attn = nn.MultiheadAttention(
model_dim, num_heads, dropout=dropout
)
# the so-called point-wise network
self._linear1 = nn.Linear(model_dim, dim_ffn)
self._linear2 = nn.Linear(dim_ffn, model_dim)
self._relu = nn.ReLU()
self._norm1 = nn.LayerNorm(model_dim)
self._norm2 = nn.LayerNorm(model_dim)
self._norm3 = nn.LayerNorm(model_dim)
self._dropout1 = nn.Dropout(dropout)
self._dropout2 = nn.Dropout(dropout)
self._dropout3 = nn.Dropout(dropout)
self._dropout4 = nn.Dropout(dropout)
utils.weight_init(self._linear1, init_fn_=init_fn)
utils.weight_init(self._linear2, init_fn_=init_fn)
self._forward_fn = self.forward_pre if pre_normalization else self.forward_post
def forward(self,
target_seq,
memory,
pos_encodings,
query_embedding=None,
mask_look_ahead=None,
mask_target_padding=None):
"""Forward pass of the layer.
Args:
target_seq: [target_seq_length, batch_size, model_dim]
memory: [source_seq_length, batch_size, model_dim]
mask_look_ahead: []
mask_target_padding:
"""
return self._forward_fn(
target_seq,
memory,
pos_encodings,
query_embedding=query_embedding,
mask_look_ahead=mask_look_ahead,
mask_target_padding=mask_target_padding
)
def handle_query_embedding(self, sequence, embedding):
"""Handle """
if self._use_query_embedding:
return sequence + embedding
return sequence
def forward_post(self,
target_seq,
memory,
pos_encodings,
query_embedding=None,
mask_look_ahead=None,
mask_target_padding=None):
"""Forward pass of the layer with post normalization.
Args:
target_seq: [target_seq_length, batch_size, model_dim]
memory: [source_seq_length, batch_size, model_dim]
mask_look_ahead: []
mask_target_padding:
"""
# 1) Compute self attention with current sequence of inferred tokens
# query is the same as key for self attention
# [batch_size, seq_length, model_dim]
if self._use_query_embedding:
q = k = v = target_seq + query_embedding
else:
q = k = v = target_seq + pos_encodings
self_attn, self_attn_weights = self._self_attn(
query=q, key=k, value=v, #target_seq,
attn_mask=mask_look_ahead,
key_padding_mask=mask_target_padding
)
self_attn = self._dropout1(self_attn)
out_self_attn = self._norm1(self_attn + target_seq)
# 2) Attend the encoder's memory given the comptued self attention
# [batch_size, seq_length, model_dim]
attn, attn_weights = self._multihead_attn(
query=self.handle_query_embedding(out_self_attn, query_embedding),
key=self.handle_query_embedding(memory, pos_encodings),
value=memory)
attn = self._dropout2(attn)
out_attn = self._norm2(attn + out_self_attn)
# 3) Compute pointwise embeding by expanding and projecting + dropout
ffn_output = self._linear1(out_attn)
ffn_output = self._relu(ffn_output)
ffn_output = self._dropout4(ffn_output)
ffn_output = self._linear2(ffn_output)
# 4) Compute residual connection as final output
ffn_output = self._dropout3(ffn_output)
outputs = self._norm3(ffn_output + out_attn)
return outputs, self_attn_weights, attn_weights
def forward_pre(self,
target_seq_,
memory,
pos_encodings,
query_embedding=None,
mask_look_ahead=None,
mask_target_padding=None):
"""Forward pass of the layer with pre normalization.
Args:
target_seq: [target_seq_length, batch_size, model_dim]
memory: [source_seq_length, batch_size, model_dim]
mask_look_ahead: []
mask_target_padding:
"""
target_seq = self._norm1(target_seq_)
# 1) Compute self attention with current sequence of inferred tokens
# query is the same as key for self attention
# [batch_size, seq_length, model_dim]
if self._use_query_embedding:
# in case of using only the query embedding follow DETR [2] which drops
# values to zero and uses only the query embeddings
q = k = target_seq + query_embedding
v = target_seq
else:
q = k = v = target_seq + pos_encodings
self_attn, self_attn_weights = self._self_attn(
query=q, key=k, value=v,
attn_mask=mask_look_ahead,
key_padding_mask=mask_target_padding
)
self_attn = self._dropout1(self_attn)
out_self_attn = self._norm2(self_attn + target_seq_)
# 2) Attend the encoder's memory given the comptued self attention
# [batch_size, seq_length, model_dim]
attn, attn_weights = self._multihead_attn(
query=self.handle_query_embedding(out_self_attn, query_embedding),
key=self.handle_query_embedding(memory, pos_encodings),
value=memory)
attn = self._dropout2(attn)
out_attn = self._norm3(attn + out_self_attn)
# 3) Compute pointwise embeding by expanding and projecting + dropout
ffn_output = self._linear1(out_attn)
ffn_output = self._relu(ffn_output)
ffn_output = self._dropout4(ffn_output)
ffn_output = self._linear2(ffn_output)
# 4) Compute residual connection as final output
ffn_output = self._dropout3(ffn_output)
return ffn_output, self_attn_weights, attn_weights
class TransformerDecoder(nn.Module):
"""Transformer decoder module."""
def __init__(self,
num_layers=6,
model_dim=256,
num_heads=8,
dim_ffn=2048,
dropout=0.1,
init_fn=utils.normal_init_,
pre_normalization=False,
use_query_embedding=False):
super(TransformerDecoder, self).__init__()
self._model_dim = model_dim
self._num_heads = num_heads
self._dim_ffn = dim_ffn
self._dropout = dropout
self._num_layers = num_layers
self._use_query_embedding = use_query_embedding
self._pre_normalization = pre_normalization
self._decoder_stack = self.init_decoder_stack(init_fn)
def init_decoder_stack(self, init_fn):
stack = nn.ModuleList()
for s in range(self._num_layers):
layer = DecoderLayer(
model_dim=self._model_dim,
num_heads=self._num_heads,
dim_ffn=self._dim_ffn,
dropout=self._dropout,
init_fn=init_fn,
pre_normalization=self._pre_normalization,
use_query_embedding=self._use_query_embedding
)
stack.append(layer)
return stack
def forward(self,
target_seq,
memory,
pos_encodings,
query_embedding=None,
mask_target_padding=None,
mask_look_ahead=None,
get_attn_weights=False):
"""Computes forward pass of decoder.
Args:
target_seq: [target_sequence_length, batch_size, model_dim].
memory: [source_sequence_length, batch_size, model_dim].
pos_encodings: [target_seq_length, model_dim].
mask_look_ahead: [target_seq_length, model_dim].
Returns:
A tensor with the decoded attention with shape [target_sequence_length,
batch_size, model_dim].
"""
seq_length = target_seq.size()[0]
output_list = []
attn_weights_list = [] if get_attn_weights else None
outputs = torch.zeros_like(target_seq) if self._use_query_embedding else target_seq
for l in range(self._num_layers):
outputs, self_attn_weights, attn_weights = self._decoder_stack[l](
outputs, memory,
pos_encodings=pos_encodings,
query_embedding=query_embedding,
mask_target_padding=mask_target_padding,
mask_look_ahead=mask_look_ahead
)
if get_attn_weights:
attn_weights_list.append(attn_weights)
output_list.append(outputs)
return output_list, attn_weights_list
if __name__ == '__main__':
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
seq_length = 55
batch_size = 8
model_dim = 256
tgt_seq = torch.FloatTensor(seq_length, batch_size, model_dim).fill_(1)
memory = torch.FloatTensor(seq_length, batch_size, model_dim).uniform_(0, 1)
mask_look_ahead = utils.create_look_ahead_mask(seq_length)
mask_look_ahead = torch.from_numpy(mask_look_ahead)
encodings = torch.FloatTensor(seq_length, 1, model_dim).uniform_(0,1)
decoder = TransformerDecoder()
outputs = decoder(tgt_seq, memory, encodings, mask_look_ahead=mask_look_ahead)
print(outputs.size())
| 11,059 | 31.818991 | 87 | py |
GaitForeMer | GaitForeMer-main/models/PoseGCN.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Graph Convolutional Neural Network implementation.
Code adapted from [1].
[1] https://github.com/wei-mao-2019/HisRepItself
[2] https://github.com/tkipf/gcn/blob/92600c39797c2bfb61a508e52b88fb554df30177/gcn/layers.py#L132
"""
import os
import sys
import torch.nn as nn
import torch
from torch.nn.parameter import Parameter
import math
import numpy as np
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import utils.utils as utils
class GraphConvolution(nn.Module):
"""Implements graph convolutions."""
def __init__(self, in_features, out_features, output_nodes=48, bias=False):
"""Constructor.
The graph convolutions can be defined as \sigma(AxHxW), where A is the
adjacency matrix, H is the feature representation from previous layer
and W is the wegith of the current layer. The dimensions of such martices
A\in R^{NxN}, H\in R^{NxM} and W\in R^{MxO} where
- N is the number of nodes
- M is the number of input features per node
- O is the number of output features per node
Args:
in_features: Number of input features per node.
out_features: Number of output features per node.
output_nodes: Number of nodes in the graph.
"""
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self._output_nodes = output_nodes
# W\in R^{MxO}
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
# A\in R^{NxN}
self.att = Parameter(torch.FloatTensor(output_nodes, output_nodes))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
self.att.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
"""Forward pass.
Args:
x: [batch_size, n_nodes, input_features]
Returns:
Feature representation computed from inputs.
Shape is [batch_size, n_nodes, output_features].
"""
# [batch_size, input_dim, output_features]
# HxW = {NxM}x{MxO} = {NxO}
support = torch.matmul(x, self.weight)
# [batch_size, n_nodes, output_features]
# = {NxN}x{NxO} = {NxO}
output = torch.matmul(self.att, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class GC_Block(nn.Module):
"""Residual block with graph convolutions.
The implementation uses the same number of input features for outputs.
"""
def __init__(self, in_features, p_dropout, output_nodes=48, bias=False):
"""Constructor.
Args:
in_features: Number of input and output features.
p_dropout: Dropout used in the layers.
output_nodes: Number of output nodes in the graph.
"""
super(GC_Block, self).__init__()
self.in_features = in_features
self.out_features = in_features
self.gc1 = GraphConvolution(
in_features, in_features,
output_nodes=output_nodes,
bias=bias
)
self.bn1 = nn.BatchNorm1d(output_nodes * in_features)
self.gc2 = GraphConvolution(
in_features, in_features,
output_nodes=output_nodes,
bias=bias
)
self.bn2 = nn.BatchNorm1d(output_nodes * in_features)
self.do = nn.Dropout(p_dropout)
self.act_f = nn.Tanh()
def forward(self, x):
"""Forward pass of the residual module"""
y = self.gc1(x)
b, n, f = y.shape
y = self.bn1(y.view(b, -1)).view(b, n, f)
y = self.act_f(y)
y = self.do(y)
y = self.gc2(y)
b, n, f = y.shape
y = self.bn2(y.view(b, -1)).view(b, n, f)
y = self.act_f(y)
y = self.do(y)
return y + x
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class PoseGCN(nn.Module):
def __init__(self,
input_features=128,
output_features=3,
model_dim=128,
output_nodes=21,
p_dropout=0.1,
num_stage=1):
"""Constructor.
Args:
input_feature: num of input feature of the graph nodes.
model_dim: num of hidden features of the generated embeddings.
p_dropout: dropout probability
num_stage: number of residual blocks in the network.
output_nodes: number of nodes in graph.
"""
super(PoseGCN, self).__init__()
self.num_stage = num_stage
self._n_nodes = output_nodes
self._model_dim = model_dim
self._output_features = output_features
self._hidden_dim = 512
self._front = nn.Sequential(
nn.Linear(model_dim, output_nodes*self._hidden_dim),
nn.Dropout(p_dropout)
)
utils.weight_init(self._front, init_fn_=utils.xavier_init_)
self.gc1 = GraphConvolution(
self._hidden_dim,
self._hidden_dim,
output_nodes=output_nodes
)
self.bn1 = nn.BatchNorm1d(output_nodes * self._hidden_dim)
self.gcbs = []
for i in range(num_stage):
self.gcbs.append(GC_Block(
self._hidden_dim,
p_dropout=p_dropout,
output_nodes=output_nodes)
)
self.gcbs = nn.ModuleList(self.gcbs)
self.gc7 = GraphConvolution(
self._hidden_dim,
output_features,
output_nodes=output_nodes
)
self.do = nn.Dropout(p_dropout)
self.act_f = nn.Tanh()
gcn_params = filter(lambda p: p.requires_grad, self.parameters())
nparams = sum([np.prod(p.size()) for p in gcn_params])
print('[INFO] ({}) GCN has {} params!'.format(self.__class__.__name__, nparams))
def preprocess(self, x):
if len(x.size()) < 3:
_, D = x.size()
# seq_len, batch_size, input_dim
x = x.view(self._seq_len, -1, D)
# [batch_size, seq_len, input_dim]
x = torch.transpose(x, 0, 1)
# [batch_size, input_dim, seq_len]
x = torch.transpose(x, 1, 2)
return x
return x
def postprocess(self, y):
"""Flattents the input tensor.
Args:
y: Input tensor of shape [batch_size, n_nodes, output_features].
"""
y = y.view(-1, self._n_nodes*self._output_features)
return y
def forward(self, x):
"""Forward pass of network.
Args:
x: [batch_size, model_dim].
"""
# [batch_size, model_dim*n_nodes]
x = self._front(x)
x = x.view(-1, self._n_nodes, self._hidden_dim)
# [batch_size, n_joints, model_dim]
y = self.gc1(x)
b, n, f = y.shape
y = self.bn1(y.view(b, -1)).view(b, n, f)
y = self.act_f(y)
y = self.do(y)
for i in range(self.num_stage):
y = self.gcbs[i](y)
# [batch_size, n_joints, output_features]
y = self.gc7(y)
# y = y + x
# [seq_len*batch_size, input_dim]
y = self.postprocess(y)
return y
class SimpleEncoder(nn.Module):
def __init__(self,
n_nodes=63,
input_features=1,
model_dim=128,
p_dropout=0.1):
"""Constructor.
Args:
input_dim: Dimension of the input vector. This will be equivalent to
the number of nodes in the graph, each node with 1 feature each.
model_dim: Dimension of the output vector to produce.
p_dropout: Dropout to be applied for regularization.
"""
super(SimpleEncoder, self).__init__()
#The graph convolutions can be defined as \sigma(AxHxW), where A is the
#A\in R^{NxN} x H\in R^{NxM} x W\in R ^{MxO}
self._input_features = input_features
self._output_nodes = n_nodes
self._hidden_dim = 512
self._model_dim = model_dim
self._num_stage = 1
print('[INFO] ({}) Hidden dimension: {}!'.format(
self.__class__.__name__, self._hidden_dim))
self.gc1 = GraphConvolution(
in_features=self._input_features,
out_features=self._hidden_dim,
output_nodes=self._output_nodes
)
self.bn1 = nn.BatchNorm1d(self._output_nodes*self._hidden_dim)
self.gc2 = GraphConvolution(
in_features=self._hidden_dim,
out_features=model_dim,
output_nodes=self._output_nodes
)
self.gcbs = []
for i in range(self._num_stage):
self.gcbs.append(GC_Block(
self._hidden_dim,
p_dropout=p_dropout,
output_nodes=self._output_nodes)
)
self.gcbs = nn.ModuleList(self.gcbs)
self.do = nn.Dropout(p_dropout)
self.act_f = nn.Tanh()
self._back = nn.Sequential(
nn.Linear(model_dim*self._output_nodes, model_dim),
nn.Dropout(p_dropout)
)
utils.weight_init(self._back, init_fn_=utils.xavier_init_)
gcn_params = filter(lambda p: p.requires_grad, self.parameters())
nparams = sum([np.prod(p.size()) for p in gcn_params])
print('[INFO] ({}) GCN has {} params!'.format(self.__class__.__name__, nparams))
def forward(self, x):
"""Forward pass of network.
Args:
x: [batch_size, n_poses, pose_dim/input_dim].
"""
B, S, D = x.size()
# [batch_size, n_joints, model_dim]
y = self.gc1(x.view(-1, self._output_nodes, self._input_features))
b, n, f = y.shape
y = self.bn1(y.view(b, -1)).view(b, n, f)
y = self.act_f(y)
y = self.do(y)
for i in range(self._num_stage):
y = self.gcbs[i](y)
# [batch_size, n_joints, model_dim]
y = self.gc2(y)
# [batch_size, model_dim]
y = self._back(y.view(-1, self._model_dim*self._output_nodes))
# [batch_size, n_poses, model_dim]
y = y.view(B, S, self._model_dim)
return y
def test_decoder():
seq_len = 25
input_size = 63
model_dim = 128
dropout = 0.3
n_stages = 2
output_nodes = 21
joint_dof = 1
n_joints = model_dim
layer = GraphConvolution(
in_features=joint_dof,
out_features=model_dim,
output_nodes=n_joints
)
X = torch.FloatTensor(10, n_joints, joint_dof)
print(layer(X).size())
gcn = PoseGCN(
input_features=model_dim,
output_features=3,
model_dim=model_dim,
output_nodes=output_nodes,
p_dropout=0.1,
num_stage=2
)
X = torch.FloatTensor(10*seq_len, model_dim)
print(gcn(X).size())
def test_encoder():
input_size = 63
model_dim = 128
dropout = 0.3
n_stages = 2
output_nodes = 21
dof = 9
encoder = SimpleEncoder(
n_nodes=output_nodes,
model_dim=model_dim,
input_features=dof,
p_dropout=0.1
)
X = torch.FloatTensor(10, 25, output_nodes*dof)
print(encoder(X).size())
if __name__ == '__main__':
#test_decoder()
test_encoder()
| 11,945 | 26.976581 | 97 | py |
GaitForeMer | GaitForeMer-main/models/potr_fn.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Model function to deploy POTR models for visualization and generation."""
import numpy as np
import os
import sys
import argparse
import json
import time
import cv2
from matplotlib import image
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import training.seq2seq_model_fn as seq2seq_model_fn
import models.PoseTransformer as PoseTransformer
import models.PoseEncoderDecoder as PoseEncoderDecoder
import data.H36MDataset_v2 as H36MDataset_v2
import data.AMASSDataset as AMASSDataset
import utils.utils as utils
import radam.radam as radam
import training.transformer_model_fn as tr_fn
import tqdm
# _DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
_DEVICE = torch.device('cpu')
def plot_conf_mat(matrix):
import matplotlib.pyplot as plt
import matplotlib
fig, ax = plt.subplots(figsize=(30,30))
#im = ax.imshow(matrix, cmap='Wistia')
im = ax.imshow(matrix, cmap='Blues')
action_labels = ['A%02d'%i for i in range(1, 61, 1)]
ax.set_xticks(np.arange(len(action_labels)))
ax.set_yticks(np.arange(len(action_labels)))
ax.set_xticklabels(action_labels, fontdict={'fontsize':15})#, rotation=90)
ax.set_yticklabels(action_labels, fontdict={'fontsize':15})
ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
for i in range(len(action_labels)):
for j in range(len(action_labels)):
# color= "w" if round(matrix[i, j],2) < nmax else "black"
text = ax.text(j, i, round(matrix[i, j], 2),
ha="center", va="center", color="black", fontsize=10)
plt.ylabel("")
plt.xlabel("")
# ax.set_title("Small plot")
fig.tight_layout()
#plt.show()
plt.savefig('confusion_matrix.png')
plt.close()
def crop_image(img):
size = max(img.shape[0], img.shape[1])
h = int(size*0.30)
w = int(size*0.30)
cy = img.shape[0]//2
cx = img.shape[1]//2
crop = img[cy-h//2:cy+h//2, cx-w//2:cx+w//2]
return crop
def visualize_h36mdataset():
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', type=str)
parser.add_argument('--model_file', type=str)
parser.add_argument('--data_path', type=str, default=None)
args = parser.parse_args()
params = json.load(open(args.config_file))
if args.data_path is not None:
params['data_path'] = args.data_path
args.data_path = params['data_path']
train_dataset_fn, eval_dataset_fn = tr_fn.dataset_factory(params)
pose_encoder_fn, pose_decoder_fn = \
PoseEncoderDecoder.select_pose_encoder_decoder_fn(params)
for k,v in params.items():
print('[INFO] (POTRFn@main) {}: {}'.format(k, v))
# ids of most common actions in H36M
actions = [('walking', 12), ('eating', 2), ('smoking', 9),
('discussion', 1), ('directions', 0)]
with torch.no_grad():
for i in range(len(actions)):
action, acidx = actions[i]
sample = next(iter(eval_dataset_fn))
enc_inputs = sample['encoder_inputs'].to(_DEVICE)
dec_inputs = sample['decoder_inputs'].to(_DEVICE)
gts = np.squeeze(sample['decoder_outputs'].cpu().numpy())[8*acidx:8*acidx+8]
ins = np.squeeze(sample['encoder_inputs'].cpu().numpy())[8*acidx:8*acidx+8]
ins = eval_dataset_fn.dataset.unnormalize_pad_data_to_expmap(ins)
H36MDataset_v2.visualize_sequence(
ins[0:1], args.data_path, prefix='skeletons/%s/gt_in'%action, colors=['gray', 'gray'])
#print(gts.shape)
gts = eval_dataset_fn.dataset.unnormalize_pad_data_to_expmap(gts)
H36MDataset_v2.visualize_sequence(
gts[0:1], args.data_path, prefix='skeletons/%s/gt'%action, colors=['gray', 'gray'])
enc_inputs = torch.squeeze(enc_inputs)
dec_inputs = torch.squeeze(dec_inputs)
model = PoseTransformer.model_factory(
params,
pose_encoder_fn,
pose_decoder_fn
)
model.load_state_dict(torch.load(args.model_file, map_location=_DEVICE))
model.to(_DEVICE)
model.eval()
prediction, attn_weights, memory = model(
enc_inputs,
dec_inputs,
get_attn_weights=True
)
prediction = prediction[-1][8*acidx:8*acidx+8].cpu().numpy()[0:1]
preds = eval_dataset_fn.dataset.unnormalize_pad_data_to_expmap(prediction)
H36MDataset_v2.visualize_sequence(preds, args.data_path,
prefix='skeletons/%s/pred'%action, colors=['red', 'red'])
def compute_mean_average_precision(prediction, target, dataset_fn):
pred = prediction.cpu().numpy().squeeze()
tgt = target.cpu().numpy().squeeze()
T, D = pred.shape
pred = dataset_fn.dataset.unormalize_sequence(pred)
tgt = dataset_fn.dataset.unormalize_sequence(tgt)
pred = pred.reshape((T, -1, 3))
tgt = tgt.reshape((T, -1, 3))
mAP, _, _, (TP, FN) = utils.compute_mean_average_precision(
pred, tgt, seq2seq_model_fn._MAP_TRESH, per_frame=True
)
return mAP, TP, FN
def compute_mpjpe(prediction, target, dataset_fn):
pred = prediction.cpu().numpy().squeeze()
tgt = target.cpu().numpy().squeeze()
T, D = pred.shape
pred = dataset_fn.dataset.unormalize_sequence(pred)
tgt = dataset_fn.dataset.unormalize_sequence(tgt)
pred = pred.reshape((T, -1, 3))
tgt = tgt.reshape((T, -1, 3))
# seq_len x n_joints
norm = np.squeeze(np.linalg.norm(pred-tgt, axis=-1))
return norm
def compute_test_mAP_nturgbd():
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', type=str)
parser.add_argument('--model_file', type=str)
parser.add_argument('--data_path', type=str, default=None)
args = parser.parse_args()
params = json.load(open(args.config_file))
if args.data_path is not None:
params['data_path'] = args.data_path
args.data_path = params['data_path']
params['test_phase'] = True
_, test_dataset_fn = tr_fn.dataset_factory(params)
pose_encoder_fn, pose_decoder_fn = \
PoseEncoderDecoder.select_pose_encoder_decoder_fn(params)
for k,v in params.items():
print('[INFO] (POTRFn@main) {}: {}'.format(k, v))
model = PoseTransformer.model_factory(
params,
pose_encoder_fn,
pose_decoder_fn
)
model.load_state_dict(torch.load(args.model_file, map_location=_DEVICE))
model.to(_DEVICE)
model.eval()
FN = np.zeros((params['target_seq_len'],), dtype=np.float32)
TP = np.zeros((params['target_seq_len'],), dtype=np.float32)
FN_joint = np.zeros((params['n_joints'],), dtype=np.float32)
TP_joint = np.zeros((params['n_joints'],), dtype=np.float32)
MPJPE = np.zeros((params['n_joints'],), dtype=np.float32)
pred_activity = []
gt_activity = []
with torch.no_grad():
print('Running testing...')
for n, sample in tqdm.tqdm(enumerate(test_dataset_fn)):
enc_inputs = sample['encoder_inputs'].to(_DEVICE)
dec_inputs = sample['decoder_inputs'].to(_DEVICE)
gts = sample['decoder_outputs'].to(_DEVICE)
# ins = np.squeeze(sample['encoder_inputs'].cpu().numpy())
outputs = model(
enc_inputs,
dec_inputs,
get_attn_weights=True
)
if params['predict_activity']:
a_ids = sample['action_ids']
prediction, out_logits, attn_weights, memory = outputs
out_class = torch.argmax(out_logits[-1].softmax(-1), -1)
else:
prediction, attn_weights, memory = outputs
mAP, TP_, FN_ = compute_mean_average_precision(prediction[-1], gts, test_dataset_fn)
MPJPE_ = compute_mpjpe(prediction[-1], gts, test_dataset_fn)
# reduce by frame to get per joint MPJPE
MPJPE = MPJPE + np.sum(MPJPE_, axis=0)
# reduce by frame to get per joint AP
TP_joint = TP_joint + np.sum(TP_, axis=0)
FN_joint = FN_joint + np.sum(FN_, axis=0)
# reduce by joint to get per frame AP
TP_ = np.sum(TP_, axis=-1)
FN_ = np.sum(FN_, axis=-1)
TP = TP + TP_
FN = FN + FN_
# print(n, ':', prediction[-1].size(), out_class.item(), a_ids.item(), mAP, TP.shape, FN.shape)
if params['predict_activity']:
pred_activity.append(out_class.item())
gt_activity.append(a_ids.item())
#accurracy = (np.array(gt_activity) == np.array(pred_activity)).astype(np.float32).sum()
#accurracy = accurracy / len(gt_activity)
accurracy = -1
if params['predict_activity']:
accurracy = accuracy_score(gt_activity, pred_activity, normalize='true')
conf_matrix = confusion_matrix(gt_activity, pred_activity, normalize='true')
plot_conf_mat(conf_matrix)
AP = TP / (TP+FN)
AP_joints = TP_joint / (TP_joint + FN_joint)
MPJPE = MPJPE / (n*params['target_seq_len'])
print('[INFO] The mAP per joint\n', np.around(AP_joints, 2))
print('[INFO] The MPJPE\n', np.around(MPJPE,4)*100.0)
print('[INFO] The accuracy: {} mAP: {}'.format(
round(accurracy, 2), round(np.mean(AP), 2)))
ms_range = [0.08, 0.160, 0.320, 0.400, 0.5, 0.66]
FPS = 30.0
ms_map = []
for ms in ms_range:
nf = int(round(ms*FPS))
ms_map.append(np.mean(AP[0:nf]))
print()
print("{0: <16} |".format("milliseconds"), end="")
for ms in ms_range:
print(" {0:5d} |".format(int(ms*1000)), end="")
print()
print("{0: <16} |".format("global mAP"), end="")
for mAP in ms_map:
print(" {0:.3f} |".format(mAP), end="")
print()
def visualize_attn_weights():
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', type=str)
parser.add_argument('--model_file', type=str)
parser.add_argument('--data_path', type=str, default=None)
args = parser.parse_args()
params = json.load(open(args.config_file))
if args.data_path is not None:
params['data_path'] = args.data_path
args.data_path = params['data_path']
train_dataset_fn, eval_dataset_fn = tr_fn.dataset_factory(params)
pose_encoder_fn, pose_decoder_fn = \
PoseEncoderDecoder.select_pose_encoder_decoder_fn(params)
model = PoseTransformer.model_factory(
params,
pose_encoder_fn,
pose_decoder_fn
)
model.load_state_dict(torch.load(args.model_file, map_location=_DEVICE))
model.to(_DEVICE)
model.eval()
for k,v in params.items():
print('[INFO] (POTRFn@main) {}: {}'.format(k, v))
# ids of most common actions in H36M
actions = [('walking', 12)]
#[('walking', 12), ('eating', 2), ('smoking', 9),
# ('discussion', 1), ('directions', 0)]
with torch.no_grad():
for i in range(len(actions)):
action, acidx = actions[i]
sample = next(iter(eval_dataset_fn))
enc_inputs = sample['encoder_inputs'].to(_DEVICE)
dec_inputs = sample['decoder_inputs'].to(_DEVICE)
enc_inputs = torch.squeeze(enc_inputs)
dec_inputs = torch.squeeze(dec_inputs)
prediction, attn_weights, enc_weights = model(
enc_inputs,
dec_inputs,
get_attn_weights=True
)
attn_weights= attn_weights[-1][8*acidx:8*acidx+8].cpu().numpy()[0:1]
attn_weights = np.squeeze(attn_weights)
print(attn_weights.shape)
path = 'skeletons/%s'%action
in_imgs_ = [crop_image(cv2.imread(os.path.join(path, x)) )
for x in os.listdir(path) if 'gt_in' in x]
in_imgs = [in_imgs_[i] for i in range(0, len(in_imgs_), 2)]
pred_imgs = [crop_image(cv2.imread(os.path.join(path, x)))
for x in os.listdir(path) if 'pred_0' in x]
the_shape = in_imgs[0].shape
cx = the_shape[1]//2
cy = the_shape[0]//2
in_imgs = np.concatenate(in_imgs, axis=1)
pred_imgs = np.concatenate(pred_imgs, axis=1)
#cv2.imshow('In IMG', in_imgs)
#cv2.imshow('pred IMG', pred_imgs)
#cv2.waitKey()
spaces_between = 5
print(in_imgs.shape, pred_imgs.shape, the_shape)
canvas = np.ones(
(in_imgs.shape[0]*spaces_between, in_imgs.shape[1], 3),
dtype=in_imgs.dtype)*255
canvas[0:the_shape[0], :] = in_imgs
canvas[the_shape[0]*(spaces_between-1):, 0:pred_imgs.shape[1]] = pred_imgs
#cx_pred = cx + the_shape[1]*(spaces_between-1) - cx//2
cy_pred = cy + the_shape[0]*(spaces_between-1) - cy//3*2
print(attn_weights.min(), attn_weights.max())
mean = attn_weights.mean()
#plt.imshow(canvas, origin='lower')
pil_canvas = Image.fromarray(canvas)
d_canvas = ImageDraw.Draw(pil_canvas)
for pred_idx in range(attn_weights.shape[0]):
# cy_pred = cy + pred_idx*the_shape[0]
cx_pred = cx + pred_idx*the_shape[1]
#cv2.circle(canvas, (cx_pred, cy_pred), 5, [0,255,0], -1)
for ii, in_idx in enumerate(range(0, attn_weights.shape[1], 2)):
# cy_in = cy + ii*the_shape[0]
cx_in = cx + ii*the_shape[1]
this_weight = attn_weights[pred_idx, in_idx]
if this_weight > mean:
#d_canvas.line([(cx+cx//2, cy_in), (cx_pred, cy_pred)], fill=(255,0,0, 25), width=this_weight/mean)
d_canvas.line([(cx_in, cy+cy//3*2), (cx_pred, cy_pred)], fill=(255,0,0, 25), width=this_weight/mean)
name = 'the_canvas.png'
#cv2.imwrite('the_canvas.jpg', canvas)
# plt.show()
#plt.imsave(name, canvas)
pil_canvas.save(name)
print(pil_canvas.info)
fig, ax = plt.subplots(figsize=(20,10))
ax.matshow(attn_weights)
plt.ylabel("")
plt.xlabel("")
fig.tight_layout()
#plt.show()
name = 'attn_map.png'
plt.savefig(name)
plt.close()
if __name__ == '__main__':
# visualize_h36mdataset()
visualize_attn_weights()
#compute_test_mAP_nturgbd()
| 14,622 | 30.245726 | 110 | py |
GaitForeMer | GaitForeMer-main/utils/WarmUpScheduler.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Warm up scheduler implementation.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Adapted from https://github.com/ildoonet/pytorch-gradual-warmup-lr/blob/master/warmup_scheduler/scheduler.py
"""
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.lr_scheduler import ReduceLROnPlateau
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer."""
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
"""Constructor.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier if
multiplier > 1.0. if multiplier = 1.0, lr starts from 0 and ends up with
the base_lr.
total_epoch: target learning rate is reached at total_epoch, gradually
after_scheduler: after target_epoch, use this scheduler
(eg. ReduceLROnPlateau)
"""
self.multiplier = multiplier
if self.multiplier < 1.:
raise ValueError('multiplier should be greater than or equal to 1.')
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
super(GradualWarmupScheduler, self).__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [
base_lr * self.multiplier for base_lr in self.base_lrs]
self.finished = True
# return self.after_scheduler.get_last_lr()
return self.after_scheduler.get_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
if self.multiplier == 1.0:
return [base_lr * (float(self.last_epoch) / self.total_epoch)
for base_lr in self.base_lrs]
else:
return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.)
for base_lr in self.base_lrs]
def step_ReduceLROnPlateau(self, metrics, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
# ReduceLROnPlateau is called at the end of epoch, whereas others
# are called at beginning
self.last_epoch = epoch if epoch != 0 else 1
if self.last_epoch <= self.total_epoch:
warmup_lr = [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.)
for base_lr in self.base_lrs]
for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):
param_group['lr'] = lr
else:
if epoch is None:
self.after_scheduler.step(metrics, None)
else:
self.after_scheduler.step(metrics, epoch - self.total_epoch)
def step(self, epoch=None, metrics=None):
if type(self.after_scheduler) != ReduceLROnPlateau:
if self.finished and self.after_scheduler:
if epoch is None:
self.after_scheduler.step(None)
else:
self.after_scheduler.step(epoch - self.total_epoch)
# self._last_lr = self.after_scheduler.get_last_lr()
self._last_lr = self.after_scheduler.get_lr()
else:
return super(GradualWarmupScheduler, self).step(epoch)
else:
self.step_ReduceLROnPlateau(metrics, epoch)
| 4,280 | 39.771429 | 108 | py |
GaitForeMer | GaitForeMer-main/utils/utils.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Set of utility functions."""
import torch
import numpy as np
import copy
import json
import os
import cv2
import torch.nn as nn
def expmap_to_euler(action_sequence):
rotmats = expmap_to_rotmat(action_sequence)
eulers = rotmat_to_euler(rotmats)
return eulers
def expmap_to_rotmat(action_sequence):
"""Convert exponential maps to rotmats.
Args:
action_sequence: [n_samples, n_joints, 3]
Returns:
Rotation matrices for exponenital maps [n_samples, n_joints, 9].
"""
n_samples, n_joints, _ = action_sequence.shape
expmap = np.reshape(action_sequence, [n_samples*n_joints, 1, 3])
# first three values are positions, so technically it's meaningless to convert them,
# but we do it anyway because later we discard this values anywho
rotmats = np.zeros([n_samples*n_joints, 3, 3])
for i in range(rotmats.shape[0]):
rotmats[i] = cv2.Rodrigues(expmap[i])[0]
rotmats = np.reshape(rotmats, [n_samples, n_joints, 3*3])
return rotmats
def rotmat_to_expmap(action_sequence):
"""Convert rotmats to expmap.
Args:
action_sequence: [n_samples, n_joints, 9]
Returns:
Rotation exponenital maps [n_samples, n_joints, 3].
"""
n_samples, n_joints, _ = action_sequence.shape
rotmats = np.reshape(action_sequence, [n_samples*n_joints, 3, 3])
# first three values are positions, so technically it's meaningless to convert them,
# but we do it anyway because later we discard this values anywho
expmaps = np.zeros([n_samples*n_joints, 3, 1])
for i in range(rotmats.shape[0]):
expmaps[i] = cv2.Rodrigues(rotmats[i])[0]
expmaps = np.reshape(expmaps, [n_samples, n_joints, 3])
return expmaps
def rotmat_to_euler(action_sequence):
"""Convert exponential maps to rotmats.
Args:
action_sequence: [n_samples, n_joints, 9]
Returns:
Euler angles for rotation maps given [n_samples, n_joints, 3].
"""
n_samples, n_joints, _ = action_sequence.shape
rotmats = np.reshape(action_sequence, [n_samples*n_joints, 3, 3])
eulers = np.zeros([n_samples*n_joints, 3])
for i in range(eulers.shape[0]):
eulers[i] = rotmat2euler(rotmats[i])
eulers = np.reshape(eulers, [n_samples, n_joints, 3])
return eulers
def rotmat2euler(R):
"""Converts a rotation matrix to Euler angles.
Matlab port to python for evaluation purposes
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/RotMat2Euler.m#L1
Args:
R: a 3x3 rotation matrix
Returns:
eul: a 3x1 Euler angle representation of R
"""
if R[0,2] >= 1 or R[0,2] <= -1:
# special case values are out of bounds for arcsinc
E3 = 0 # set arbitrarily
dlta = np.arctan2( R[0,1], R[0,2] );
if R[0,2] == -1:
E2 = np.pi/2;
E1 = E3 + dlta;
else:
E2 = -np.pi/2;
E1 = -E3 + dlta;
else:
E2 = -np.arcsin(R[0,2])
E1 = np.arctan2(R[1,2]/np.cos(E2), R[2,2]/np.cos(E2) )
E3 = np.arctan2(R[0,1]/np.cos(E2), R[0,0]/np.cos(E2) )
eul = np.array([E1, E2, E3]);
return eul
def load_constants(data_path):
offset = json.load(open(os.path.join(data_path, 'offset.json')))
parent = json.load(open(os.path.join(data_path, 'parent.json')))
rot_ind = json.load(open(os.path.join(data_path, 'rot_ind.json')))
parent = np.array(parent)-1
offset = np.array(offset).reshape(-1, 3)
exp_map_ind = np.split(np.arange(4, 100)-1, 32)
return parent, offset, rot_ind, exp_map_ind
def compute_forward_kinematics(angles, parent, offset, rotInd, expmapInd):
"""Computes forward kinematics from angles to 3d points.
Convert joint angles and bone lenghts into the 3d points of a person.
Based on expmap2xyz.m, available at
https://github.com/asheshjain399/RNNexp/blob/7fc5a53292dc0f232867beb66c3a9ef845d705cb/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/exp2xyz.m
Args
angles: 99-long vector with 3d position and 3d joint angles in expmap format
parent: 32-long vector with parent-child relationships in the kinematic tree
offset: 96-long vector with bone lenghts
rotInd: 32-long list with indices into angles
expmapInd: 32-long list with indices into expmap angles
Returns
xyz: 32x3 3d points that represent a person in 3d space
"""
assert len(angles) == 99, 'Incorrect number of angles.'
# Structure that indicates parents for each joint
njoints = 32
xyzStruct = [dict() for x in range(njoints)]
for i in np.arange(njoints):
if not rotInd[i] : # If the list is empty
xangle, yangle, zangle = 0, 0, 0
else:
xangle = angles[rotInd[i][0]-1]
yangle = angles[rotInd[i][1]-1]
zangle = angles[rotInd[i][2]-1]
r = angles[expmapInd[i]]
thisRotation = expmap2rotmat(r)
thisPosition = np.array([xangle, yangle, zangle])
if parent[i] == -1: # Root node
xyzStruct[i]['rotation'] = thisRotation
xyzStruct[i]['xyz'] = np.reshape(offset[i,:], (1,3)) + thisPosition
else:
xyzStruct[i]['xyz'] = (offset[i,:] + thisPosition).dot(
xyzStruct[parent[i]]['rotation']) + xyzStruct[parent[i]]['xyz']
xyzStruct[i]['rotation'] = thisRotation.dot(
xyzStruct[parent[i]]['rotation'])
xyz = [xyzStruct[i]['xyz'] for i in range(njoints)]
xyz = np.array(xyz).squeeze()
xyz = xyz[:,[0,2,1]]
# xyz = xyz[:,[2,0,1]]
return np.reshape( xyz, [-1] )
def revert_coordinate_space(channels, R0, T0):
"""Arrange poses to a canonical form to face the camera.
Bring a series of poses to a canonical form so they are facing the camera
when they start. Adapted from
https://github.com/asheshjain399/RNNexp/blob/7fc5a53292dc0f232867beb66c3a9ef845d705cb/structural_rnn/CRFProblems/H3.6m/dataParser/Utils/revertCoordinateSpace.m
Args:
channels: n-by-99 matrix of poses
R0: 3x3 rotation for the first frame
T0: 1x3 position for the first frame
Returns:
channels_rec: The passed poses, but the first has T0 and R0, and the
rest of the sequence is modified accordingly.
"""
n, d = channels.shape
channels_rec = copy.copy(channels)
R_prev = R0
T_prev = T0
rootRotInd = np.arange(3,6)
for ii in range(n):
R_diff = expmap2rotmat(channels[ii, rootRotInd])
R = R_diff.dot(R_prev)
channels_rec[ii, rootRotInd] = rotmat2expmap(R)
T = T_prev + (R_prev.T).dot(np.reshape(channels[ii,:3],[3,1])).reshape(-1)
channels_rec[ii,:3] = T
T_prev = T
R_prev = R
return channels_rec
def rotmat2quat(R):
"""Converts a rotation matrix to a quaternion.
Matlab port to python for evaluation purposes
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/rotmat2quat.m#L4
Args:
R: 3x3 rotation matrix
Returns:
q: 1x4 quaternion
"""
rotdiff = R - R.T;
r = np.zeros(3)
r[0] = -rotdiff[1,2]
r[1] = rotdiff[0,2]
r[2] = -rotdiff[0,1]
sintheta = np.linalg.norm(r) / 2;
r0 = np.divide(r, np.linalg.norm(r) + np.finfo(np.float32).eps );
costheta = (np.trace(R)-1) / 2;
theta = np.arctan2( sintheta, costheta );
q = np.zeros(4)
q[0] = np.cos(theta/2)
q[1:] = r0*np.sin(theta/2)
return q
def quat2expmap(q):
"""Convert quaternions to an exponential map.
Matlab port to python for evaluation purposes
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/quat2expmap.m#L1
Args:
q: 1x4 quaternion
Returns:
r: 1x3 exponential map
Raises:
ValueError if the l2 norm of the quaternion is not close to 1
"""
if (np.abs(np.linalg.norm(q)-1)>1e-3):
raise(ValueError, "quat2expmap: input quaternion is not norm 1")
sinhalftheta = np.linalg.norm(q[1:])
coshalftheta = q[0]
r0 = np.divide( q[1:], (np.linalg.norm(q[1:]) + np.finfo(np.float32).eps));
theta = 2 * np.arctan2( sinhalftheta, coshalftheta )
theta = np.mod( theta + 2*np.pi, 2*np.pi )
if theta > np.pi:
theta = 2 * np.pi - theta
r0 = -r0
r = r0 * theta
return r
def rotmat2expmap(R):
return quat2expmap( rotmat2quat(R) )
def expmap2rotmat(r):
"""Converts an exponential map (axis angle number) to rotation matrix.
Converts an exponential map angle to a rotation matrix Matlab port to python
for evaluation purposes. This is also called Rodrigues' formula and can be
found also implemented in opencv as cv2.Rodrigues.
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/expmap2rotmat.m
Args:
r: 1x3 exponential map
Returns:
R: 3x3 rotation matrix
"""
theta = np.linalg.norm( r )
r0 = np.divide( r, theta + np.finfo(np.float32).eps )
r0x = np.array([0, -r0[2], r0[1], 0, 0, -r0[0], 0, 0, 0]).reshape(3,3)
r0x = r0x - r0x.T
R = np.eye(3,3) + np.sin(theta)*r0x + (1-np.cos(theta))*(r0x).dot(r0x);
return R
def revert_output_format(
poses,
data_mean,
data_std,
dim_to_ignore,
actions,
use_one_hot):
"""Transforms pose predictions to a more interpretable format.
Converts the output of the neural network to a format that is more easy to
manipulate for, e.g. conversion to other format or visualization
Args:
poses: Sequence of pose predictions. A list with (seq_length) entries,
each with a (batch_size, dim) output
Returns:
poses_out: List of tensors each of size (batch_size, seq_length, dim).
"""
seq_len = len(poses)
if seq_len == 0:
return []
batch_size, dim = poses[0].shape
poses_out = np.concatenate(poses)
poses_out = np.reshape(poses_out, (seq_len, batch_size, dim))
poses_out = np.transpose(poses_out, [1, 0, 2])
poses_out_list = []
for i in range(poses_out.shape[0]):
poses_out_list.append(
unnormalize_data(poses_out[i, :, :], data_mean, data_std,
dim_to_ignore, actions, use_one_hot))
return poses_out_list
def unnormalize_data(
normalizedData,
data_mean,
data_std,
dimensions_to_ignore=None,
actions=[],
use_one_hot=False):
"""
https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/generateMotionData.py#L12
Args
normalizedData: nxd matrix with normalized data
data_mean: vector of mean used to normalize the data
data_std: vector of standard deviation used to normalize the data
dimensions_to_ignore: vector with dimensions not used by the model
actions: list of strings with the encoded actions
use_one_hot: whether the data comes with one-hot encoding
Returns
origData: data originally used to
"""
T = normalizedData.shape[0]
D = data_mean.shape[0]
origData = np.zeros((T, D), dtype=np.float32)
dimensions_to_ignore = [] if dimensions_to_ignore==None else dimensions_to_ignore
dimensions_to_use = [i for i in range(D) if i not in dimensions_to_ignore]
dimensions_to_use = np.array(dimensions_to_use)
#print('Size of the normalized data', normalizedData.shape)
#print('Size of the mean data', data_mean.shape[0])
#print('Lenght of the dimensions to use', len(dimensions_to_use))
if use_one_hot:
origData[:, dimensions_to_use] = normalizedData[:, :-len(actions)]
else:
# print('++++++++++++++++++++',origData.shape, normalizedData.shape, len(dimensions_to_use))
origData[:, dimensions_to_use] = normalizedData
# potentially ineficient, but only done once per experiment
stdMat = data_std.reshape((1, D))
stdMat = np.repeat(stdMat, T, axis=0)
meanMat = data_mean.reshape((1, D))
meanMat = np.repeat(meanMat, T, axis=0)
origData = np.multiply(origData, stdMat) + meanMat
return origData
def get_srnn_gts(
actions,
model,
test_set,
data_mean,
data_std,
dim_to_ignore,
one_hot,
to_euler=True):
"""
Get the ground truths for srnn's sequences, and convert to Euler angles.
(the error is always computed in Euler angles).
Args
actions: a list of actions to get ground truths for.
model: training model we are using (we only use the "get_batch" method).
test_set: dictionary with normalized training data.
data_mean: d-long vector with the mean of the training data.
data_std: d-long vector with the standard deviation of the training data.
dim_to_ignore: dimensions that we are not using to train/predict.
one_hot: whether the data comes with one-hot encoding indicating action.
to_euler: whether to convert the angles to Euler format or keep thm in exponential map
Returns
srnn_gts_euler: a dictionary where the keys are actions, and the values
are the ground_truth, denormalized expected outputs of srnns's seeds.
"""
srnn_gts_euler = {}
for action in actions:
srnn_gt_euler = []
_, _, srnn_expmap = model.get_batch_srnn( test_set, action )
# expmap -> rotmat -> euler
for i in np.arange( srnn_expmap.shape[0] ):
denormed = data_utils.unNormalizeData(srnn_expmap[i,:,:], data_mean, data_std, dim_to_ignore, actions, one_hot )
if to_euler:
for j in np.arange( denormed.shape[0] ):
for k in np.arange(3,97,3):
denormed[j,k:k+3] = data_utils.rotmat2euler( data_utils.expmap2rotmat( denormed[j,k:k+3] ))
srnn_gt_euler.append( denormed );
# Put back in the dictionary
srnn_gts_euler[action] = srnn_gt_euler
return srnn_gts_euler
def normal_init_(layer, mean_, sd_, bias, norm_bias=True):
"""Intialization of layers with normal distribution with mean and bias"""
classname = layer.__class__.__name__
# Only use the convolutional layers of the module
#if (classname.find('Conv') != -1 ) or (classname.find('Linear')!=-1):
if classname.find('Linear') != -1:
print('[INFO] (normal_init) Initializing layer {}'.format(classname))
layer.weight.data.normal_(mean_, sd_)
if norm_bias:
layer.bias.data.normal_(bias, 0.05)
else:
layer.bias.data.fill_(bias)
def weight_init(
module,
mean_=0,
sd_=0.004,
bias=0.0,
norm_bias=False,
init_fn_=normal_init_):
"""Initialization of layers with normal distribution"""
moduleclass = module.__class__.__name__
try:
for layer in module:
if layer.__class__.__name__ == 'Sequential':
for l in layer:
init_fn_(l, mean_, sd_, bias, norm_bias)
else:
init_fn_(layer, mean_, sd_, bias, norm_bias)
except TypeError:
init_fn_(module, mean_, sd_, bias, norm_bias)
def xavier_init_(layer, mean_, sd_, bias, norm_bias=True):
classname = layer.__class__.__name__
if classname.find('Linear')!=-1:
print('[INFO] (xavier_init) Initializing layer {}'.format(classname))
nn.init.xavier_uniform_(layer.weight.data)
# nninit.xavier_normal(layer.bias.data)
if norm_bias:
layer.bias.data.normal_(0, 0.05)
else:
layer.bias.data.zero_()
def create_dir_tree(base_dir):
dir_tree = ['models', 'tf_logs', 'config', 'std_log']
for dir_ in dir_tree:
os.makedirs(os.path.join(base_dir, dir_), exist_ok=True)
def create_look_ahead_mask(seq_length, is_nonautoregressive=False):
"""Generates a binary mask to prevent to use future context in a sequence."""
if is_nonautoregressive:
return np.zeros((seq_length, seq_length), dtype=np.float32)
x = np.ones((seq_length, seq_length), dtype=np.float32)
mask = np.triu(x, 1).astype(np.float32)
return mask # (seq_len, seq_len)
def pose_expmap2rotmat(input_pose):
"""Convert exponential map pose format to rotation matrix pose format."""
pose_rotmat = []
for j in np.arange(input_pose.shape[0]):
rot_mat = [expmap2rotmat(input_pose[j, k:k+3]) for k in range(3, 97, 3)]
pose_rotmat.append(np.stack(rot_mat).flatten())
pose_rotmat = np.stack(pose_rotmat)
return pose_rotmat
def expmap23d_sequence(sequence, norm_stats, params):
viz_poses = revert_output_format(
[sequence], norm_stats['mean'], norm_stats['std'],
norm_stats['dim_to_ignore'], params['action_subset'],
params['use_one_hot'])
nframes = sequence.shape[0]
expmap = revert_coordinate_space(
viz_poses[0], np.eye(3), np.zeros(3))
xyz_data = np.zeros((nframes, 96))
for i in range(nframes):
xyz_data[i, :] = compute_forward_kinematics(
expmap[i, :],
params['parent'],
params['offset'],
params['rot_ind'],
params['exp_map_ind']
)
return xyz_data
def get_lr_fn(params, optimizer_fn):
"""Creates the function to be used to generate the learning rate."""
if params['learning_rate_fn'] == 'step':
return torch.optim.lr_scheduler.StepLR(
optimizer_fn, step_size=params['lr_step_size'], gamma=0.1
)
elif params['learning_rate_fn'] == 'exponential':
return torch.optim.lr_scheduler.ExponentialLR(
optimizer_fn, gamma=0.95
)
elif params['learning_rate_fn'] == 'linear':
# sets learning rate by multipliying initial learning rate times a function
lr0, T = params['learning_rate'], params['max_epochs']
lrT = lr0*0.1
m = (lrT - 1) / T
lambda_fn = lambda epoch: m*epoch + 1.0
return torch.optim.lr_scheduler.LambdaLR(
optimizer_fn, lr_lambda=lambda_fn
)
elif params['learning_rate_fn'] == 'beatles':
# D^(-0.5)*min(i^(-0.5), i*warmup_steps^(-1.5))
D = float(params['model_dim'])
warmup = params['warmup_epochs']
lambda_fn = lambda e: (D**(-0.5))*min((e+1.0)**(-0.5), (e+1.0)*warmup**(-1.5))
return torch.optim.lr_scheduler.LambdaLR(
optimizer_fn, lr_lambda=lambda_fn
)
else:
raise ValueError('Unknown learning rate function: {}'.format(
params['learning_rate_fn']))
def compute_mean_average_precision(prediction, target, threshold, per_frame=False):
"""
Args:
prediction: unormalized sequece of shape [seq_len, num_joints, 3]
target: unormalized sequence of shape [seq_len, num_joints, 3]
threshold: float
"""
# compute the norm for the last axis: (x,y,z) coordinates
# [num_frames x num_joints]
TP = np.linalg.norm(prediction-target, axis=-1) <= threshold
TP_ = TP.astype(int)
FN_ = np.logical_not(TP).astype(int)
# [num_joints]
TP = np.sum(TP_, axis=0)
FN = np.sum(FN_, axis=0)
# compute recall for each joint
recall = TP / (TP+FN)
# average over joints
mAP = np.mean(recall)
if per_frame:
return mAP, TP, FN, (TP_, FN_)
return mAP, TP, FN
| 19,278 | 30.708882 | 161 | py |
GaitForeMer | GaitForeMer-main/utils/visualize_attention_weights.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Visualization of the attention weights."""
import torch
import torch.nn as nn
import numpy as np
import argparse
import sys
import os
import tqdm
import json
from sklearn.metrics import confusion_matrix
thispath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, thispath+"/../")
import data.H36MDataset_v2 as H36M_v2
import models.PoseTransformer as PoseTransformer
import models.PoseEncoderDecoder as PoseEncoderDecoder
import matplotlib.pyplot as plt
import matplotlib
def plot_conf_mat(matrix, action_labels):
fig, ax = plt.subplots()
im = ax.imshow(matrix, cmap='Wistia')
ax.set_xticks(np.arange(len(action_labels)))
ax.set_yticks(np.arange(len(action_labels)))
ax.set_xticklabels(action_labels, fontdict={'fontsize':10}, rotation=90)
ax.set_yticklabels(action_labels, fontdict={'fontsize':10})
# cbar_kw={}
# if set_colorbar:
# cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
# cbar.ax.set_ylabel("", rotation=-90, va="bottom")
# nmax= np.max(matrix)/2.
ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
for i in range(len(action_labels)):
for j in range(len(action_labels)):
# color= "w" if round(matrix[i, j],2) < nmax else "black"
text = ax.text(j, i, round(matrix[i, j], 2),
ha="center", va="center", color="black", fontsize=5)
plt.ylabel("")
plt.xlabel("")
# ax.set_title("Small plot")
fig.tight_layout()
plt.show()
#plt.savefig(name)
plt.close()
the_keys = [(5, 'directions', 1), (5, 'directions', 2), (5, 'discussion', 1), (5, 'discussion', 2), (5, 'eating', 1), (5, 'eating', 2), (5, 'greeting', 1), (5, 'greeting', 2), (5, 'phoning', 1), (5, 'phoning', 2), (5, 'posing', 1), (5, 'posing', 2), (5, 'purchases', 1), (5, 'purchases', 2), (5, 'sitting', 1), (5, 'sitting', 2), (5, 'sittingdown', 1), (5, 'sittingdown', 2), (5, 'smoking', 1), (5, 'smoking', 2), (5, 'takingphoto', 1), (5, 'takingphoto', 2), (5, 'waiting', 1), (5, 'waiting', 2), (5, 'walking', 1), (5, 'walking', 2), (5, 'walkingdog', 1), (5, 'walkingdog', 2), (5, 'walkingtogether', 1), (5, 'walkingtogether', 2)
]
def get_windows(
data,
source_seq_len,
target_seq_len,
pad_decoder_inputs,
input_size, n_windows):
N, _ = data.shape
src_seq_len = source_seq_len - 1
encoder_inputs_ = []
decoder_inputs_ = []
decoder_outputs_ = []
start_frame = 0
for n in range(n_windows):
encoder_inputs = np.zeros((src_seq_len, input_size), dtype=np.float32)
decoder_inputs = np.zeros((target_seq_len, input_size), dtype=np.float32)
decoder_outputs = np.zeros((target_seq_len, input_size), dtype=np.float32)
# total_frames x n_joints*joint_dim
total_frames = source_seq_len + target_seq_len
data_sel = data[start_frame:(start_frame+total_frames), :]
encoder_inputs[:, 0:input_size] = data_sel[0:src_seq_len,:]
decoder_inputs[:, 0:input_size] = data_sel[src_seq_len:src_seq_len+target_seq_len, :]
decoder_outputs[:, 0:input_size] = data_sel[source_seq_len:, 0:input_size]
if pad_decoder_inputs:
query = decoder_inputs[0:1, :]
decoder_inputs = np.repeat(query, target_seq_len, axis=0)
encoder_inputs_.append(encoder_inputs)
decoder_inputs_.append(decoder_inputs)
decoder_outputs_.append(decoder_outputs)
start_frame = start_frame + src_seq_len
return (
torch.from_numpy(np.stack(encoder_inputs_)),
torch.from_numpy(np.stack(decoder_inputs_)),
torch.from_numpy(np.stack(decoder_outputs_))
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--params_json', type=str, default=None)
parser.add_argument('--model', type=str, default= None)
args = parser.parse_args()
_DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
params = json.load(open(args.params_json))
train_dataset_fn, eval_dataset_fn = H36M_v2.dataset_factory(params)
pose_encoder_fn, pose_decoder_fn = \
PoseEncoderDecoder.select_pose_encoder_decoder_fn(params)
potr = PoseTransformer.model_factory(
params, pose_encoder_fn, pose_decoder_fn)
potr.load_state_dict(torch.load(args.model, map_location=_DEVICE))
potr.to(_DEVICE)
potr.eval()
all_pred, all_gt = [], []
n_windows = 8
the_keys_ = [the_keys[i] for i in range(1, len(the_keys), 2)]
with torch.no_grad():
for i in range(len(the_keys_)):
entry_key = the_keys_[i] # (5, 'walking', 1)
data = eval_dataset_fn.dataset._data[entry_key]
encoder_inputs, decoder_inputs, decoder_outputs = get_windows(
data,
params['source_seq_len'],
params['target_seq_len'],
params['pad_decoder_inputs'],
params['input_dim'],
n_windows
)
pred_sequence, attn_weights, enc_weights= potr(
encoder_inputs.to(_DEVICE),
decoder_inputs.to(_DEVICE),
get_attn_weights=True
)
enc_weights = enc_weights.cpu().numpy()
attn_weights = attn_weights[-1].cpu().numpy()
attn_weights = [attn_weights[j] for j in range(n_windows)]
mat = np.concatenate(attn_weights, axis=-1)
mat = np.concatenate([enc_weights[j] for j in range(n_windows)], axis=-1)
print(enc_weights.shape)
fig, ax = plt.subplots(figsize=(20,10))
ax.matshow(mat)
plt.ylabel("")
plt.xlabel("")
fig.tight_layout()
#plt.show()
name = 'vis_attn/%s_.png'%(entry_key[1])
plt.savefig(name)
plt.close()
| 6,648 | 33.630208 | 632 | py |
GaitForeMer | GaitForeMer-main/utils/PositionEncodings.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Implementation of the 2D positional encodings used in [1].
Position encodings gives a signature to each pixel in the image by a set
of sine frequecies computed with a 2D sine function.
[1] https://arxiv.org/abs/2005.12872
[2] https://arxiv.org/pdf/1706.03762.pdf
"""
import numpy as np
import math
import torch
from torch import nn
class PositionEncodings2D(object):
"""Implementation of 2d masked position encodings as a NN layer.
This is a more general version of the position embedding, very similar
to the one used by the Attention is all you need paper, but generalized
to work on images as used in [1].
"""
def __init__(
self,
num_pos_feats=64,
temperature=10000,
normalize=False,
scale=None):
"""Constructs position embeding layer.
Args:
num_pos_feats: An integer for the depth of the encoding signature per
pixel for each axis `x` and `y`.
temperature: Value of the exponential temperature.
normalize: Bool indicating if the encodings shuld be normalized by number
of pixels in each image row.
scale: Use for scaling factor. Normally None is used for 2*pi scaling.
"""
super().__init__()
self._num_pos_feats = num_pos_feats
self._temperature = temperature
self._normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self._scale = scale
def __call__(self, mask):
"""Generates the positional encoding given image boolean mask.
Args:
mask: Boolean tensor of shape [batch_size, width, height] with ones
in pixels that belong to the padding and zero in valid pixels.
Returns:
Sine position encodings. Shape [batch_size, num_pos_feats*2, width, height]
"""
# the positional encodings are generated for valid pixels hence
# we need to take the negation of the boolean mask
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self._normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self._scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self._scale
dim_t = torch.arange(
self._num_pos_feats, dtype=torch.float32)
dim_t = self._temperature ** (2 * (dim_t // 2) / self._num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(),
pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(),
pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class PositionEncodings1D(object):
"""Positional encodings for `1D` sequences.
Implements the following equations:
PE_{(pos, 2i)} = sin(pos/10000^{2i/d_model})
PE_{(pos, 2i+1)} = cos(pos/10000^{2i/d_model})
Where d_model is the number of positional features. Also known as the
depth of the positional encodings. These are the positional encodings
proposed in [2].
"""
def __init__(self, num_pos_feats=512, temperature=10000, alpha=1):
self._num_pos_feats = num_pos_feats
self._temperature = temperature
self._alpha = alpha
def __call__(self, seq_length):
angle_rads = self.get_angles(
np.arange(seq_length)[:, np.newaxis],
np.arange(self._num_pos_feats)[np.newaxis, :]
)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
pos_encoding = pos_encoding.astype(np.float32)
return torch.from_numpy(pos_encoding)
def get_angles(self, pos, i):
angle_rates = 1 / np.power(
self._temperature, (2 * (i//2)) / np.float32(self._num_pos_feats))
return self._alpha*pos * angle_rates
def visualize_2d_encodings():
import cv2
import numpy as np
import matplotlib.pyplot as pplt
# Create a mask where pixels are all valid
mask = torch.BoolTensor(1, 32, 32).fill_(False)
# position encodigns with a signature of depth per pixel
# the efective pixel signature is num_pos_feats*2 (128 for each axis)
pos_encodings_gen = PositionEncodings2D(num_pos_feats=128, normalize=True)
encodings = pos_encodings_gen(mask).numpy()
print('Shape of encodings', encodings.shape)
# visualize the first frequency channel for x and y
y_encodings = encodings[0,0, :, :]
x_encodings = encodings[0,128, : ,:]
pplt.matshow(x_encodings, cmap=pplt.get_cmap('jet'))
pplt.matshow(y_encodings, cmap=pplt.get_cmap('jet'))
pplt.show()
def visualize_1d_encodings():
import matplotlib.pyplot as plt
pos_encoder_gen = PositionEncodings1D()
pos_encoding = pos_encoder_gen(50).numpy()
print(pos_encoding.shape)
plt.pcolormesh(pos_encoding[0], cmap='RdBu')
plt.xlabel('Depth')
plt.xlim((0, 512))
plt.ylabel('position in sequence')
plt.colorbar()
plt.show()
if __name__ == "__main__":
visualize_2d_encodings()
# visualize_1d_encodings()
| 6,337 | 32.712766 | 81 | py |
GaitForeMer | GaitForeMer-main/data/GaitJointsDataset.py | import os
import sys
import numpy as np
import torch
import argparse
import tqdm
import pickle
import random
_TOTAL_ACTIONS = 4
# Mapping from 1-base of NTU to vibe 49 joints
# hip, thorax,
_MAJOR_JOINTS = [39, 41, 37, 43, 34, 35, 36, 33, 32, 31, 28, 29, 30, 27, 26, 25, 40]
# 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 13, 14, 15, 17, 18, 19, 21
_NMAJOR_JOINTS = len(_MAJOR_JOINTS)
_MIN_STD = 1e-4
_SPINE_ROOT = 0 # after only taking major joints (ie index in _MAJOR_JOINTS)
def collate_fn(batch):
"""Collate function for data loaders."""
e_inp = torch.from_numpy(np.stack([e['encoder_inputs'] for e in batch]))
d_inp = torch.from_numpy(np.stack([e['decoder_inputs'] for e in batch]))
d_out = torch.from_numpy(np.stack([e['decoder_outputs'] for e in batch]))
action_id = torch.from_numpy(np.stack([e['action_id'] for e in batch]))
action = [e['action_str'] for e in batch]
batch_ = {
'encoder_inputs': e_inp,
'decoder_inputs': d_inp,
'decoder_outputs': d_out,
'action_str': action,
'action_ids': action_id
}
return batch_
class GaitJointsDataset(torch.utils.data.Dataset):
def __init__(self, params=None, mode='train', fold=1):
super(GaitJointsDataset, self).__init__()
self._params = params
self._mode = mode
thisname = self.__class__.__name__
self._monitor_action = 'normal'
self._action_str = ['normal', 'slight', 'moderate', 'severe']
self.data_dir = self._params['data_path']
self.fold = fold
self.load_data()
def load_data(self):
train_data = pickle.load(open(self.data_dir+"EPG_train_" + str(self.fold) + ".pkl", "rb"))
test_data = pickle.load(open(self.data_dir+"EPG_test_" + str(self.fold) + ".pkl", "rb"))
if self._mode == 'train':
X_1, Y = self.data_generator(train_data, mode='train', fold_number=self.fold)
else:
X_1, Y = self.data_generator(test_data)
self.X_1 = X_1
self.Y = Y
self._action_str = ['none', 'mild', 'moderate', 'severe']
self._pose_dim = 3 * _NMAJOR_JOINTS
self._data_dim = self._pose_dim
def data_generator(self, T, mode='test', fold_number=1):
X_1 = []
Y = []
# bootstrap_number = 3
# num_samples = 39
total_num_clips = 0
for i in range(len(T['pose'])):
total_num_clips += 1
p = np.copy(T['pose'][i])
# print(np.shape(p))
y_label_index = T['label'][i]
label = y_label_index
X_1.append(p)
Y.append(label)
# can't stack X_1 because not all have equal frames
Y = np.stack(Y)
# For using a subset of the dataset (few-shot)
# if mode == 'train':
# sampling_dir = 'PATH/TO/BOOTSTRAP_SAMPLING_DIR'
# all_clip_video_names = pickle.load(open(sampling_dir + "all_clip_video_names.pkl", "rb"))
# clip_video_names = all_clip_video_names[fold_number - 1]
# all_bootstrap_samples = pickle.load(open(sampling_dir + f'{num_samples}_samples/bootstrap_{bootstrap_number}_samples.pkl', 'rb'))
# bootstrap_samples = all_bootstrap_samples[fold_number - 1]
# mask_list = [1 if video_name in bootstrap_samples else 0 for video_name in clip_video_names]
# train_indices = [train_idx for train_idx, mask_value in enumerate(mask_list) if mask_value == 1]
# X_1 = [X_1[train_idx] for train_idx in train_indices]
# Y = Y[train_indices]
return X_1, Y
def __len__(self):
return len(self.Y)
def __getitem__(self, idx):
return self._get_item_train(idx)
def _get_item_train(self, idx):
"""Get item for the training mode."""
x = self.X_1[idx]
y = self.Y[idx]
# adjust for mapping/subset of joints from vibe to ntu
x = x[:,_MAJOR_JOINTS,:]
action_id = y
source_seq_len = self._params['source_seq_len']
target_seq_len = self._params['target_seq_len']
input_size = 3 * _NMAJOR_JOINTS # not sure if this is right
pose_size = 3 * _NMAJOR_JOINTS # note sure if thiis is right
total_frames = source_seq_len + target_seq_len
src_seq_len = source_seq_len - 1
encoder_inputs = np.zeros((src_seq_len, input_size), dtype=np.float32)
decoder_inputs = np.zeros((target_seq_len, input_size), dtype=np.float32)
decoder_outputs = np.zeros((target_seq_len, pose_size), dtype=np.float32)
# total_framesxn_joints*joint_dim
N = np.shape(x)[0]
x = x.reshape(N, -1)
start_frame = np.random.randint(0, N - total_frames + 1)
# original code did not change start frame between epochs
start_frame = random.randint(0, N - total_frames) # high inclusive
data_sel = x[start_frame:(start_frame + total_frames), :]
encoder_inputs[:, 0:input_size] = data_sel[0:src_seq_len,:]
decoder_inputs[:, 0:input_size] = \
data_sel[src_seq_len:src_seq_len+target_seq_len, :]
decoder_outputs[:, 0:pose_size] = data_sel[source_seq_len:, 0:pose_size]
if self._params['pad_decoder_inputs']:
query = decoder_inputs[0:1, :]
decoder_inputs = np.repeat(query, target_seq_len, axis=0)
return {
'encoder_inputs': encoder_inputs,
'decoder_inputs': decoder_inputs,
'decoder_outputs': decoder_outputs,
'action_id': action_id,
'action_str': self._action_str[action_id],
}
def dataset_factory(params, fold):
"""Defines the datasets that will be used for training and validation."""
params['num_activities'] = _TOTAL_ACTIONS
params['virtual_dataset_size'] = params['steps_per_epoch']*params['batch_size']
params['n_joints'] = _NMAJOR_JOINTS
eval_mode = 'test' if 'test_phase' in params.keys() else 'eval'
if eval_mode == 'test':
train_dataset_fn = None
else:
train_dataset = GaitJointsDataset(params, mode='train', fold=fold)
train_dataset_fn = torch.utils.data.DataLoader(
train_dataset,
batch_size=params['batch_size'],
shuffle=True,
num_workers=4,
collate_fn=collate_fn,
)
eval_dataset = GaitJointsDataset(
params,
mode=eval_mode,
fold=fold,
)
eval_dataset_fn = torch.utils.data.DataLoader(
eval_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
collate_fn=collate_fn,
)
return train_dataset_fn, eval_dataset_fn | 6,232 | 31.128866 | 137 | py |
GaitForeMer | GaitForeMer-main/data/NTURGDDataset.py | ###############################################################################
# Pose Transformers (POTR): Human Motion Prediction with Non-Autoregressive
# Transformers
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by
# Angel Martinez <angel.martinez@idiap.ch>,
#
# This file is part of
# POTR: Human Motion Prediction with Non-Autoregressive Transformers
#
# POTR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# POTR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POTR. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""Pytorch dataset of skeletons for the NTU-RGB+D [1] dataset.
[1] http://rose1.ntu.edu.sg/Datasets/actionRecognition.asp
[2] https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Shahroudy_NTU_RGBD_A_CVPR_2016_paper.pdf
"""
import os
import sys
import numpy as np
import torch
import argparse
import tqdm
# tran subjects id can be found in [2]
_TRAIN_SUBJECTS = [
1, 2, 4, 5, 8, 9, 13, 14, 15,16, 17, 18, 19, 25, 27, 28, 31, 34, 35, 38
]
_TEST_SUBJECTS = [x for x in range(1, 40) if x not in _TRAIN_SUBJECTS]
# the joints according to [2] in 1-base
# 1-base of the spine 2-middle of the spine 3-neck 4-head 5-left shoulder
# 6-left elbow 7-left wrist 8-left hand 9-right shoulder 10-right elbow
# 11-right wrist 12-right hand 13-left hip 14-left knee 15-left ankle
# 16-left foot 17-right hip 18-right knee 19-right ankle 20-right foot
# 21-spine 22-tip of the left hand 23-left thumb 24-tip of the right
# hand 25-right thumb
# here set the joint indices in base 0
_MAJOR_JOINTS = [x-1 for x in
[1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 13, 14, 15, 17, 18, 19, 21]
]
_NMAJOR_JOINTS = len(_MAJOR_JOINTS)
_SPINE_ROOT = 0
_MIN_STD = 1e-4
# NTURGB+D contains 60 actions
_TOTAL_ACTIONS = 60
_MIN_REQ_FRAMES = 65
def collate_fn(batch):
"""Collate function for data loaders."""
e_inp = torch.from_numpy(np.stack([e['encoder_inputs'] for e in batch]))
d_inp = torch.from_numpy(np.stack([e['decoder_inputs'] for e in batch]))
d_out = torch.from_numpy(np.stack([e['decoder_outputs'] for e in batch]))
action_id = torch.from_numpy(np.stack([e['action_id'] for e in batch]))
action = [e['action_str'] for e in batch]
batch_ = {
'encoder_inputs': e_inp,
'decoder_inputs': d_inp,
'decoder_outputs': d_out,
'action_str': action,
'action_ids': action_id
}
return batch_
def load_action_labels(data_path):
data_labels = []
with open(os.path.join(data_path, 'action_labels.txt')) as file_:
for line in file_:
data_labels.append(line.strip())
return data_labels
def get_activity_from_file(path_file):
# The pattern is SsssCcccPpppRrrrAaaa.skeleton
pattern = path_file.split('/')[-1].split('.')[0]
setup_id= int(pattern[1:4])
camera_id = int(pattern[5:8])
subject_id = int(pattern[9:12])
replication_id = int(pattern[13:16])
activity_id = int(pattern[17:])
return (setup_id, camera_id, subject_id, replication_id, activity_id)
def select_fold_files(path_to_data, skip_files_path):
all_files = [x for x in os.listdir(path_to_data) if x.endswith('skeleton')]
with open(skip_files_path) as file_:
skip_files = [line.strip() for line in file_]
training_files = []
test_files = []
for path_file in all_files:
if path_file.split('.')[0] in skip_files:
print('Skiping file:', path_file)
continue
seq_info = get_activity_from_file(path_file)
if seq_info[2] in _TRAIN_SUBJECTS:
training_files.append(path_file)
else:
test_files.append(path_file)
return training_files, test_files
def save_fold_files(path_to_data, output_path, skip_files_path):
training_files, test_files = select_fold_files(path_to_data, skip_files_path)
val_idx = np.random.choice(
len(training_files), int(len(training_files)*0.05), replace=False)
training_files = [training_files[i]
for i in range(len(training_files)) if i not in val_idx]
val_files = [training_files[i]
for i in range(len(training_files)) if i in val_idx]
with open(os.path.join(output_path, 'training_files.txt'), 'w') as file_:
for f in training_files:
print(f, file=file_)
with open(os.path.join(output_path, 'testing_files.txt'), 'w') as file_:
for f in test_files:
print(f, file=file_)
with open(os.path.join(output_path, 'validation_files.txt'), 'w') as file_:
for f in val_files:
print(f, file=file_)
def read_sequence_kinect_skeletons(path_file):
"""Reads the text file provided in the
"""
fid = open(path_file, 'r')
seq_info = get_activity_from_file(path_file)
# first line is the number of frames
framecount = int(fid.readline().strip())
bodies = {}
for i in range(framecount):
bodycount = int(fid.readline().strip())
for b in range(bodycount):
# traccking ID of the skeleton
line = fid.readline().strip().split(' ')
body_id = int(line[0])
arrayint = [int(x) for x in line[1:7]]
lean = [float(x) for x in line[7:9]]
tracking_state = int(line[-1])
#number of joints
joint_count = int(fid.readline().strip())
joints = []
for j in range(joint_count):
line = fid.readline().strip().split(' ')
# 3D location of the joint
joint_3d = [float(x) for x in line[0:3]]
# 2D depth location of joints
joint_2d_depth = [float(x) for x in line[3:5]]
# 2D color location of joints
joint_2d_color = [float(x) for x in line[5:7]]
# orientation of joints (?)
joint_orientation = [float(x) for x in line[7:11]]
# tracking state
joint_track_state = int(line[-1])
joints.append(joint_3d)
if body_id in list(bodies.keys()):
bodies[body_id].append(np.array(joints, dtype=np.float32))
else:
bodies[body_id] = [np.array(joints, dtype=np.float32)]
for k, v in bodies.items():
bodies[k] = np.stack(v)
return bodies, seq_info
def select_sequence_based_var(action_sequence_dict):
"""Selects the actor in sequence based on the sum of variance of X, Y, Z."""
larger_var = -1
selected_key = None
for k, v in action_sequence_dict.items():
var = np.var(v, axis=-1)
sum_var = np.sum(var)
if sum_var > larger_var:
larger_var = sum_var
selected_key = k
return action_sequence_dict[selected_key]
class NTURGDDatasetSkeleton(torch.utils.data.Dataset):
def __init__(self, params=None, mode='train'):
super(NTURGDDatasetSkeleton, self).__init__()
self._params = params
self._mode = mode
thisname = self.__class__.__name__
self._monitor_action = 'walking'
for k, v in params.items():
print('[INFO] ({}) {}: {}'.format(thisname, k, v))
data_path = self._params['data_path']
self._action_str = load_action_labels(data_path)
self._fold_file = ''
if self._mode.lower() == 'train':
self._fold_file = os.path.join(data_path, 'training_files.txt')
elif self._mode.lower() == 'eval':
self._fold_file = os.path.join(data_path, 'validation_files.txt')
elif self._mode.lower() == 'test':
self._fold_file = os.path.join(data_path, 'testing_files.txt')
else:
raise ValueError('Unknown launching mode: {}'.format(self._mode))
self.load_data()
def read_fold_file(self, fold_file):
files = []
with open(fold_file) as file_:
for line in file_:
files.append(line.strip())
return files
def compute_norm_stats(self, data):
self._norm_stats = {}
mean = np.mean(data, axis=0)
std = np.mean(data, axis=0)
std[np.where(std<_MIN_STD)] = 1
self._norm_stats['mean'] = mean.ravel()
self._norm_stats['std'] = std.ravel()
def load_compute_norm_stats(self, data):
mean_path = os.path.join(self._params['data_path'], 'mean.npy')
std_path = os.path.join(self._params['data_path'], 'std.npy')
thisname = self.__class__.__name__
self._norm_stats = {}
if os.path.exists(mean_path):
print('[INFO] ({}) Loading normalization stats!'.format(thisname))
self._norm_stats['mean'] = np.load(mean_path)
self._norm_stats['std'] = np.load(std_path)
elif self._mode == 'train':
print('[INFO] ({}) Computing normalization stats!'.format(thisname))
self.compute_norm_stats(data)
np.save(mean_path, self._norm_stats['mean'])
np.save(std_path, self._norm_stats['std'])
else:
raise ValueError('Cant compute statistics in not training mode!')
def normalize_data(self):
for k in self._data.keys():
tmp_data = self._data[k]
tmp_data = tmp_data - self._norm_stats['mean']
tmp_data = np.divide(tmp_data, self._norm_stats['std'])
self._data[k] = tmp_data
def load_data(self):
seq_files = self.read_fold_file(self._fold_file)
self._data = {}
all_dataset = []
seq_lens = []
for sequence_file in tqdm.tqdm(seq_files):
sequence_file = os.path.join(self._params['data_path'],
'nturgb+d_skeletons', sequence_file)
# the sequence key contains
# (setup_id, camera_id, subject_id, replication_id, activity_id)
# sequence shape [num_frames, 25, 3]
action_sequence, seq_key = read_sequence_kinect_skeletons(sequence_file)
# added code, there are no actors in sequence
if len(action_sequence) == 0:
continue
# FOR TESTING PURPOSES, EXIT LOADING CODE EARLY
# if len(all_dataset) > 100:
# break
action_sequence = select_sequence_based_var(action_sequence)
# sequence shape [num_frames, 16, 3]
action_sequence = action_sequence[:, _MAJOR_JOINTS, :]
# Only consider sequences with more than _MIN_REQ_FRAMES frames
if action_sequence.shape[0]<_MIN_REQ_FRAMES:
continue
# center joints in the spine of the skeleton
root_sequence = np.expand_dims(action_sequence[:, _SPINE_ROOT, :], axis=1)
action_sequence = action_sequence - root_sequence
T, N, D = action_sequence.shape
seq_lens.append(T)
# total_frames x n_joints*3
self._data[seq_key] = action_sequence.reshape((T, -1))
all_dataset.append(action_sequence)
all_dataset = np.concatenate(all_dataset, axis=0)
self.load_compute_norm_stats(all_dataset)
self.normalize_data()
self._pose_dim = self._norm_stats['std'].shape[-1]
self._data_dim = self._pose_dim
self._data_keys = list(self._data.keys())
thisname = self.__class__.__name__
print('[INFO] ({}) The min seq len for mode: {} is: {}'.format(
thisname, self._mode, min(seq_lens)))
print('[INFO] ({}) Pose dim: {} Data dim: {}'.format(
thisname, self._pose_dim, self._data_dim))
def __len__(self):
if self._mode == 'train':
return max(len(self._data_keys), self._params['virtual_dataset_size'])
return len(self._data_keys)
def __getitem__(self, idx):
return self._get_item_train(idx)
def _get_item_train(self, idx):
"""Get item for the training mode."""
if self._mode == 'train':
# idx = np.random.choice(len(self._data_keys), 1)[0]
idx = np.random.choice(len(self._data_keys))
the_key = self._data_keys[idx]
# the action id in the files come in 1 based index
action_id = the_key[-1] - 1
source_seq_len = self._params['source_seq_len']
target_seq_len = self._params['target_seq_len']
input_size = self._pose_dim
pose_size = self._pose_dim
total_frames = source_seq_len + target_seq_len
src_seq_len = source_seq_len - 1
encoder_inputs = np.zeros((src_seq_len, input_size), dtype=np.float32)
decoder_inputs = np.zeros((target_seq_len, input_size), dtype=np.float32)
decoder_outputs = np.zeros((target_seq_len, pose_size), dtype=np.float32)
N, _ = self._data[the_key].shape
start_frame = np.random.randint(0, N-total_frames)
# total_framesxn_joints*joint_dim
data_sel = self._data[the_key][start_frame:(start_frame+total_frames), :]
encoder_inputs[:, 0:input_size] = data_sel[0:src_seq_len,:]
decoder_inputs[:, 0:input_size] = \
data_sel[src_seq_len:src_seq_len+target_seq_len, :]
decoder_outputs[:, 0:pose_size] = data_sel[source_seq_len:, 0:pose_size]
if self._params['pad_decoder_inputs']:
query = decoder_inputs[0:1, :]
decoder_inputs = np.repeat(query, target_seq_len, axis=0)
return {
'encoder_inputs': encoder_inputs,
'decoder_inputs': decoder_inputs,
'decoder_outputs': decoder_outputs,
'action_id': action_id,
'action_str': self._action_str[action_id],
}
def unormalize_sequence(self, action_sequence):
sequence = action_sequence*self._norm_stats['std']
sequence = sequence + self._norm_stats['mean']
return sequence
def dataset_factory(params):
"""Defines the datasets that will be used for training and validation."""
params['num_activities'] = _TOTAL_ACTIONS
params['virtual_dataset_size'] = params['steps_per_epoch']*params['batch_size']
params['n_joints'] = _NMAJOR_JOINTS
eval_mode = 'test' if 'test_phase' in params.keys() else 'eval'
if eval_mode == 'test':
train_dataset_fn = None
else:
train_dataset = NTURGDDatasetSkeleton(params, mode='train')
train_dataset_fn = torch.utils.data.DataLoader(
train_dataset,
batch_size=params['batch_size'],
shuffle=True,
num_workers=4,
collate_fn=collate_fn,
drop_last=True
)
eval_dataset = NTURGDDatasetSkeleton(
params,
mode=eval_mode
)
eval_dataset_fn = torch.utils.data.DataLoader(
eval_dataset,
batch_size=1,
shuffle=True,
num_workers=1,
drop_last=True,
collate_fn=collate_fn,
)
return train_dataset_fn, eval_dataset_fn
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default=None)
parser.add_argument('--pad_decoder_inputs', action='store_true')
parser.add_argument('--source_seq_len', type=int, default=40)
parser.add_argument('--target_seq_len', type=int, default=20)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--steps_per_epoch', type=int, default=200)
args = parser.parse_args()
params = vars(args)
print('Creating fold files')
save_fold_files(
'data/nturgb+d_data/nturgb+d_skeletons', # path_to_data
'data/nturgb+d_data', # output_path
'data/nturgb+d_data/missing_skeletons.txt' # skip_files_path
)
train_dataset_load, val_dataset_load = dataset_factory(params)
for n, sample in enumerate(val_dataset_load):
print(n,
sample['encoder_inputs'].size(),
sample['decoder_inputs'].size(),
sample['decoder_outputs'].size(),
sample['action_ids'].size())
| 15,284 | 32.084416 | 110 | py |
CPFN | CPFN-master/training_PatchSelection.py | # Importation of packages
import os
import sys
import torch
import argparse
import numpy as np
# Importing the Dataset file
from Dataset import dataloaders
# Importing the Network file
from PointNet2 import pn2_network
# Importing the Utils files
from Utils import config_loader, training_utils, training_visualisation
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', help='YAML configuration file', default='Configs/config_patchSelec.yml')
parser.add_argument('--lowres_dataset', help='Directory of the Lowres Dataset', default=os.path.expanduser('data/TraceParts_v2_LowRes/'))
parser.add_argument('--highres_dataset', help='Directory of the Highres Dataset', default=os.path.expanduser('data/TraceParts_v2/'))
parser.add_argument('--scale', help='Scale of the Primitives', type=float, default=0.05)
parser.add_argument('--patchselec_weigths', help='Filename of the model weights to load', default='')
args = parser.parse_args()
# Loading the config file
conf = config_loader.Patch_SelecConfig(args.config_file)
# Selecting the visible GPUs
visible_GPUs = conf.get_CUDA_visible_GPUs()
device = torch.device('cuda')
if visible_GPUs is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(visible_GPUs)
# Training Parameters
nb_epochs = conf.get_n_epochs()
init_learning_rate = conf.get_init_learning_rate()
val_interval = conf.get_val_interval()
snapshot_interval = conf.get_snapshot_interval()
# Training Dataset
csv_path_train = os.path.join('Dataset', conf.get_train_data_file())
noisy_train = conf.get_train_data_first_n()
first_n_train = conf.is_train_data_noisy()
num_workers_train = conf.get_nb_train_workers()
if not os.path.isdir(conf.get_weights_folder()):
os.mkdir(conf.get_weights_folder())
# Validation Dataset
csv_path_val = os.path.join('Dataset', conf.get_val_data_file())
noisy_val = conf.get_val_data_first_n()
first_n_val = conf.is_val_data_noisy()
num_workers_val = conf.get_nb_val_workers()
# Launching the Network
patchselec_weights_filename = 'patchselec_%s_module'%str(round(args.scale, 2))
patchselec_module = pn2_network.PointNet2(dim_input=3, dim_pos=3, output_sizes=[2]).to(device)
if os.path.isfile(os.path.join(conf.get_weights_folder(), args.patchselec_weigths)):
dict = torch.load(os.path.join(conf.get_weights_folder(), args.patchselec_weigths))
patchselec_module.load_state_dict(dict, strict=True)
train_dataset = dataloaders.Dataset_PatchSelection(csv_path_train, args.lowres_dataset, args.highres_dataset, args.scale, n_points=8192, normalisation=True)
train_datasampler = dataloaders.RandomSampler(data_source=train_dataset, seed=12345, identical_epochs=False)
train_dataloader = torch.utils.data.DataLoader(train_dataset, sampler=train_datasampler, batch_size=conf.get_batch_size(), num_workers=conf.get_nb_train_workers(), pin_memory=True)
val_dataset = dataloaders.Dataset_PatchSelection(csv_path_val, args.lowres_dataset, args.highres_dataset, args.scale, n_points=8192, normalisation=True)
val_datasampler = dataloaders.RandomSampler(data_source=val_dataset, seed=12345, identical_epochs=False)
val_dataloader = torch.utils.data.DataLoader(val_dataset, sampler=val_datasampler, batch_size=conf.get_batch_size(), num_workers=conf.get_nb_val_workers(), pin_memory=True)
# Optimizer
optimizer = torch.optim.Adam(patchselec_module.parameters(), lr=init_learning_rate)
# Visualisation
visualiser = training_visualisation.Visualiser(conf.get_visualisation_interval())
# Initialisation
global_step = 0
best_loss = np.inf
for epoch in range(nb_epochs):
global_step, _ = training_utils.patch_selection_train_val_epoch(train_dataloader, patchselec_module, epoch, optimizer, global_step, visualiser, args, conf, device, network_mode='train')
if (epoch % conf.get_val_interval() == 0) and (epoch > 0):
with torch.no_grad():
_, loss = training_utils.patch_selection_train_val_epoch(val_dataloader, patchselec_module, epoch, optimizer, global_step, visualiser, args, conf, device, network_mode='val')
if loss < best_loss:
torch.save(patchselec_module.state_dict(), os.path.join(conf.get_weights_folder(), patchselec_weights_filename + '.pth'))
best_loss = loss
if (epoch % conf.get_snapshot_interval() == 0) and (epoch > 0):
torch.save(patchselec_module.state_dict(), os.path.join(conf.get_weights_folder(), patchselec_weights_filename + '%d.pth'%epoch))
torch.save(patchselec_module.state_dict(), os.path.join(conf.get_weights_folder(), patchselec_weights_filename + '%d.pth' % epoch)) | 4,812 | 54.321839 | 193 | py |
CPFN | CPFN-master/evaluation_globalSPFN.py | # Importation of packages
import os
import sys
import torch
import argparse
import numpy as np
import pandas as pd
# Importing the Dataset files
from Dataset import dataloaders
# Importing the Network files
from SPFN import fitter_factory, metric_implementation, losses_implementation
from PointNet2 import pn2_network
# Importing Utils files
from Utils import config_loader
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', help='YAML configuration file', default='Configs/config_globalSPFN.yml')
parser.add_argument('--lowres_dataset', help='Directory of the Lowres Input Dataset', default=os.path.expanduser('data/TraceParts_v2_lowres/'))
parser.add_argument('--highres_dataset', help='Directory of the Highres Input Dataset', default=os.path.expanduser('data/TraceParts_v2/'))
parser.add_argument('--path_patches', help='Path to Sampled Patches h5 files', default=os.path.expanduser('data/TraceParts_v2_patches/'))
parser.add_argument('--scale', help='Scale to select the smallest primitive', default=0.05, type=float)
parser.add_argument('--output_folder', help='Directory of the output folder', default=os.path.expanduser('data/TraceParts_v2_globalspfn/'))
parser.add_argument('--evaluation_set', help='Whether to evaluate on the train or test set', default='test')
args = parser.parse_args()
path_patches = os.path.join(args.path_patches, str(round(args.scale,2)))
if not os.path.isdir(args.output_folder):
os.mkdir(args.output_folder)
# Loading the config file
conf = config_loader.SPFNConfig(args.config_file)
# Selecting the visible GPUs
visible_GPUs = conf.get_CUDA_visible_GPUs()
device = torch.device('cuda')
if visible_GPUs is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(visible_GPUs)
# Primtive Types and Numbers
fitter_factory.register_primitives(conf.get_list_of_primitives())
n_registered_primitives = fitter_factory.get_n_registered_primitives()
n_max_global_instances = conf.get_n_max_global_instances()
# Test Dataset
if args.evaluation_set == 'train':
csv_path_test = os.path.join('Dataset', conf.get_train_data_file())
else:
csv_path_test = os.path.join('Dataset', conf.get_test_data_file())
noisy_test = conf.is_test_data_noisy()
first_n_test = conf.get_test_data_first_n()
# Launching the Network
spfn_module_filename = 'globalspfn_module.pth'
spfn_module = pn2_network.PointNet2(dim_input=3, dim_pos=3, output_sizes=[3, n_registered_primitives, n_max_global_instances]).to(device)
dict = torch.load(os.path.join(conf.get_weights_folder(), spfn_module_filename))
spfn_module.load_state_dict(dict, strict=True)
spfn_module.eval()
test_dataset = dataloaders.Dataset_GlobalSPFN(n_max_global_instances, csv_path_test, args.lowres_dataset, args.highres_dataset, path_patches, noisy_test, test=True, n_points=None, first_n=first_n_test, fixed_order=True)
test_datasampler = dataloaders.Sampler(data_source=test_dataset)
test_dataloader = torch.utils.data.DataLoader(test_dataset, sampler=test_datasampler, batch_size=1, num_workers=0, pin_memory=True)
dataframe_results = pd.DataFrame(columns=['Filename', 'mIoU', 'Type', 'Normal', 'Axis', 'MeanRes', 'StdRes', 'SkCoverage0.01', 'SkCoverage0.02', 'PCoverage0.01', 'PCoverage0.02'])
list_mIoU = []
for batch_id, data in enumerate(test_dataloader, 0):
if batch_id%100==0: print('Iteration %d / %d' % (batch_id, len(test_dataloader)))
P = data[0].type(torch.FloatTensor).to(device)
X_gt = data[1].type(torch.FloatTensor).to(device)
points_per_instance = data[2].type(torch.FloatTensor).to(device)
I_gt = data[3].type(torch.LongTensor).to(device)
T_gt = data[4].type(torch.LongTensor).to(device)
plane_n_gt = data[5].type(torch.FloatTensor).to(device)
cylinder_axis_gt = data[6].type(torch.FloatTensor).to(device)
cone_axis_gt = data[7].type(torch.FloatTensor).to(device)
patch_centers = data[8].type(torch.LongTensor).to(device)
gt_parameters = {'plane_normal': plane_n_gt, 'cylinder_axis': cylinder_axis_gt, 'cone_axis': cone_axis_gt}
glob_features = None
loc_features = None
if not os.path.isdir(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5',''))):
os.mkdir(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5','')))
with torch.no_grad():
X, T, W, global_feat, local_feat = spfn_module(P, glob_features=glob_features, loc_features=loc_features)
if args.evaluation_set == 'test':
np.save(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5',''), 'local_feat_full.npy'), local_feat[0].cpu().numpy())
local_feat = local_feat[:,:,patch_centers[0]]
X = X / torch.norm(X, dim=2, keepdim=True)
W = torch.softmax(W, dim=2)
with torch.no_grad():
W = metric_implementation.hard_W_encoding(W)
matching_indices, mask = metric_implementation.hungarian_matching(W, I_gt)
mask = mask.float()
mIoU = metric_implementation.compute_segmentation_iou(W, I_gt, matching_indices, mask)
if not os.path.isdir(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5', ''))):
os.mkdir(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5', '')))
if args.evaluation_set == 'test':
np.save(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5', ''), 'object_seg.npy'), W[0].cpu().numpy())
np.save(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5', ''), 'object_normals.npy'), X[0].cpu().numpy())
np.save(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5', ''), 'object_type.npy'), T[0].cpu().numpy())
mIoU, type_accuracy, normal_difference, axis_difference, mean_residual, std_residual, Sk_coverage, P_coverage, W, predicted_parameters, T = metric_implementation.compute_all_metrics(P, X, X_gt, W, I_gt, T, T_gt, points_per_instance, gt_parameters, list_epsilon=[0.01, 0.02], classes=conf.get_list_of_primitives())
list_mIoU.append(mIoU.item())
if batch_id%100==0: print('mIoU: ', np.mean(list_mIoU))
dataframe_results.loc[batch_id] = [test_dataset.hdf5_file_list[batch_id].replace('.h5',''), mIoU.item(), type_accuracy.item(), normal_difference.item(), axis_difference.item(), mean_residual.item(), std_residual.item(), Sk_coverage[0].item(), Sk_coverage[1].item(), P_coverage[0].item(), P_coverage[1].item()]
np.save(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5',''), 'global_feat.npy'), global_feat[0,:,0].cpu().numpy())
np.save(os.path.join(args.output_folder, test_dataset.hdf5_file_list[batch_id].replace('.h5',''), 'local_feat.npy'), local_feat[0].cpu().numpy())
dataframe_results.to_csv(os.path.join(args.output_folder, 'Results.csv')) | 7,235 | 63.607143 | 321 | py |
CPFN | CPFN-master/evaluation_baselineSPFN.py | # Importation of packages
import os
import sys
import torch
import argparse
import numpy as np
import pandas as pd
# Importing the Dataset files
from Dataset import dataloaders
# Importing the Network files
from SPFN import fitter_factory, metric_implementation, losses_implementation
from PointNet2 import pn2_network
# Importing Utils files
from Utils import config_loader, merging_utils
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', help='YAML configuration file', default='Configs/config_localSPFN.yml')
parser.add_argument('--lowres_dataset', help='Directory of the Lowres Input Dataset', default=os.path.expanduser('data/TraceParts_v2_LowRes/'))
parser.add_argument('--highres_dataset', help='Directory of the Highres Input Dataset', default=os.path.expanduser('data/TraceParts_v2/'))
parser.add_argument('--dir_spfn', help='Directory of the global SPFN output', default=os.path.expanduser('data/GlobalSPFN_Results/'))
parser.add_argument('--dir_indices', help='Directory of the indices', default=os.path.expanduser('data/Heatmap/'))
parser.add_argument('--output_folder', help='Directory of the output folder', default=os.path.expanduser('data/LocalSPFN_Results/'))
parser.add_argument('--scale', help='Scale of the primitives', default=0.05)
args = parser.parse_args()
dir_indices = os.path.join(args.dir_indices, str(round(args.scale,2)))
if not os.path.isdir(args.output_folder):
os.mkdir(args.output_folder)
# Loading the config file
conf = config_loader.Local_SPFNConfig(args.config_file)
# Selecting the visible GPUs
visible_GPUs = conf.get_CUDA_visible_GPUs()
device = torch.device('cuda')
if visible_GPUs is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(visible_GPUs)
# Primtive Types and Numbers
fitter_factory.register_primitives(conf.get_list_of_primitives())
n_registered_primitives = fitter_factory.get_n_registered_primitives()
n_max_global_instances = conf.get_n_max_global_instances()
n_max_local_instances = conf.get_n_max_local_instances()
# Test Dataset
csv_path_test = os.path.join(args.lowres_dataset, conf.get_test_data_file())
noisy_test = conf.get_test_data_first_n()
first_n_test = conf.is_test_data_noisy()
test_dataset = dataloaders.Dataset_TestLocalSPFN(n_max_global_instances, n_max_local_instances, csv_path_test, args.dir_spfn, args.lowres_dataset, args.highres_dataset,
dir_indices, noisy_test, first_n=first_n_test, fixed_order=True)
test_datasampler = dataloaders.Sampler(data_source=test_dataset)
test_dataloader = torch.utils.data.DataLoader(test_dataset, sampler=test_datasampler, batch_size=1, num_workers=0, pin_memory=True)
dataframe_results = pd.DataFrame(columns=['Filename', 'mIoU', 'Type', 'Normal', 'Axis', 'MeanRes', 'StdRes', 'SkCoverage0.01', 'SkCoverage0.02', 'PCoverage0.01', 'PCoverage0.02'])
cpt_df_stats = 0
dataframe_results_stats = pd.DataFrame(columns=['Filename', 'Primtive Id', 'Mask', 'Nb Points', 'mIoU'])
list_mIoU = []
for batch_id, data in enumerate(test_dataloader, 0):
if batch_id%100==0: print('Iteration %d / %d' % (batch_id, len(test_dataloader)))
P = data[0].type(torch.FloatTensor).squeeze(0).to(device)
nb_patches, num_points, _ = P.size()
P_gt = data[2].type(torch.FloatTensor).squeeze(0).to(device)
I_gt = data[3].type(torch.LongTensor).squeeze(0).to(device)
T_gt = data[4].type(torch.LongTensor).squeeze(0).to(device)
patch_indices = data[5].type(torch.LongTensor).squeeze(0).to(device)
spfn_labels = data[6].type(torch.LongTensor).squeeze(0).to(device)
num_global_points = spfn_labels.size(0)
spfn_normals = data[7].type(torch.FloatTensor).squeeze(0).to(device)
spfn_type = data[8].type(torch.FloatTensor).squeeze(0).to(device)
glob_features = data[9].type(torch.FloatTensor).squeeze(0).to(device)
loc_features = data[10].type(torch.FloatTensor).squeeze(0).to(device)
P_global = data[11].type(torch.FloatTensor).squeeze(0).to(device)
X_gt_global = data[12].type(torch.FloatTensor).squeeze(0).to(device)
I_gt_global = data[13].type(torch.LongTensor).squeeze(0).to(device)
plane_n_gt = data[14].type(torch.FloatTensor).to(device)
cylinder_axis_gt = data[15].type(torch.FloatTensor).to(device)
cone_axis_gt = data[16].type(torch.FloatTensor).to(device)
gt_parameters = {'plane_normal': plane_n_gt,
'cylinder_axis': cylinder_axis_gt,
'cone_axis': cone_axis_gt}
W_fusion = torch.eye(n_max_global_instances + 1).to(spfn_labels.device)[torch.argmax(spfn_labels, dim=1) + 1]
W_fusion = W_fusion[:, 1:]
X_global = spfn_normals
T_global = spfn_type
with torch.no_grad():
W_fusion = metric_implementation.hard_W_encoding(W_fusion.unsqueeze(0))
matching_indices_fusion, mask_fusion = metric_implementation.hungarian_matching(W_fusion, I_gt_global.unsqueeze(0))
mask_fusion = mask_fusion.float()
mIoU_fusion = metric_implementation.compute_segmentation_iou(W_fusion, I_gt_global.unsqueeze(0), matching_indices_fusion, mask_fusion)
mIoU_fusion_per_primitive = 1 - losses_implementation.compute_miou_loss(W_fusion, I_gt_global.unsqueeze(0), matching_indices_fusion)[0]
_, unique_counts_primitives_fusion = np.unique(I_gt_global.cpu().numpy(), return_counts=True)
for j in range(len(unique_counts_primitives_fusion)):
dataframe_results_stats.loc[cpt_df_stats] = [test_dataset.hdf5_file_list[batch_id], j, mask_fusion[0, j].item(), unique_counts_primitives_fusion[j], mIoU_fusion_per_primitive[0, j].item()]
cpt_df_stats += 1
# ADDED
mIoU, type_accuracy, normal_difference, axis_difference, mean_residual, std_residual, Sk_coverage, P_coverage, W, predicted_parameters, T = metric_implementation.compute_all_metrics(
P_global.unsqueeze(0), X_global.unsqueeze(0), X_gt_global.unsqueeze(0), W_fusion, I_gt_global.unsqueeze(0),
T_global.unsqueeze(0), T_gt.unsqueeze(0), P_gt.unsqueeze(0), gt_parameters,
list_epsilon=[0.01, 0.02], classes=['plane', 'sphere', 'cylinder', 'cone'])
list_mIoU.append(mIoU.item())
if batch_id%100==0: print('mIoU: ', np.mean(list_mIoU))
dataframe_results.loc[batch_id] = [test_dataset.hdf5_file_list[batch_id], mIoU.item(), type_accuracy.item(),
normal_difference.item(), axis_difference.item(), mean_residual.item(),
std_residual.item(), Sk_coverage[0].item(), Sk_coverage[1].item(), P_coverage[0].item(), P_coverage[1].item()]
dataframe_results.to_csv(os.path.join(args.output_folder, 'Results_baseline.csv'), index=False)
dataframe_results_stats.to_csv(os.path.join(args.output_folder, 'Results_Stats_baseline.csv'), index=False) | 7,131 | 60.482759 | 197 | py |
CPFN | CPFN-master/evaluation_PatchSelection.py | # Importation of packages
import os
import sys
import h5py
import torch
import argparse
import numpy as np
# Importing the Dataset file
from Dataset import dataloaders
# Importing the Network file
from PointNet2 import pn2_network
# Importing the Utils files
from Utils import config_loader, sampling_utils
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', help='YAML configuration file', default='Configs/config_patchSelec.yml')
parser.add_argument('--lowres_dataset', help='Directory of the Lowres Dataset', default=os.path.expanduser('data/TraceParts_v2_lowres/'))
parser.add_argument('--highres_dataset', help='Directory of the Highres Dataset', default=os.path.expanduser('data/TraceParts_v2/'))
parser.add_argument('--heatmap_folder', help='Directory to save the heatmaps in', default=os.path.expanduser('data/TraceParts_v2_heatmaps/'))
parser.add_argument('--scale', help='Scale of the Primitives', type=float, default=0.05)
args = parser.parse_args()
heatmap_folder = os.path.join(args.heatmap_folder, str(args.scale))
os.makedirs(heatmap_folder, exist_ok=True)
# Loading the config file
conf = config_loader.Patch_SelecConfig(args.config_file)
# Selecting the visible GPUs
visible_GPUs = conf.get_CUDA_visible_GPUs()
device = torch.device('cuda')
if visible_GPUs is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(visible_GPUs)
# Test Dataset
csv_path_test = os.path.join('Dataset', conf.get_test_data_file())
noisy_test = conf.get_test_data_first_n()
first_n_test = conf.is_test_data_noisy()
# Launching the Network
if args.scale<1:
patchselec_module_filename = 'patchselec_%s_module'%str(round(args.scale, 2)) + '.pth'
patchselec_module = pn2_network.PointNet2(dim_input=3, dim_pos=3, output_sizes=[2]).to(device)
dict = torch.load(os.path.join(conf.get_weights_folder(), patchselec_module_filename))
patchselec_module.load_state_dict(dict, strict=True)
patchselec_module.eval()
test_dataset = dataloaders.Dataset_PatchSelection(csv_path_test, args.lowres_dataset, args.highres_dataset, args.scale, n_points=8192, normalisation=True)
test_datasampler = dataloaders.Sampler(data_source=test_dataset)
test_dataloader = torch.utils.data.DataLoader(test_dataset, sampler=test_datasampler, batch_size=1, num_workers=0, pin_memory=True)
# Initialisation
if args.scale<1:
confusion_matrix = np.zeros([2, 2])
for batch_id, data in enumerate(test_dataloader, 0):
if batch_id%100==0: print('Iteration %d / %d' % (batch_id, len(test_dataloader)))
# Computing the prediction
points = data[0].type(torch.FloatTensor).to(device)
batch_size_current, num_points, _ = points.size()
output_labels = data[1].type(torch.LongTensor).to(device)
shuffled_indices = data[2].type(torch.LongTensor).to(device)
if args.scale<1:
predicted_labels = patchselec_module(points)[0]
predicted_labels = torch.argmax(predicted_labels, dim=2)
else:
predicted_labels = output_labels[0]
if not os.path.isdir(os.path.join(heatmap_folder, test_dataset.hdf5_file_list_lowres[batch_id].split('/')[-1].replace('.h5',''))):
os.mkdir(os.path.join(heatmap_folder, test_dataset.hdf5_file_list_lowres[batch_id].split('/')[-1].replace('.h5', '')))
# Computing the confusion matrix
if args.scale<1:
confusion_matrix[0, 0] += torch.sum((predicted_labels == 0) * (output_labels == 0)).item()
confusion_matrix[0, 1] += torch.sum((predicted_labels == 0) * (output_labels == 1)).item()
confusion_matrix[1, 0] += torch.sum((predicted_labels == 1) * (output_labels == 0)).item()
confusion_matrix[1, 1] += torch.sum((predicted_labels == 1) * (output_labels == 1)).item()
predicted_labels = torch.gather(predicted_labels[0], 0, shuffled_indices[0])
# Selecting the indices
with h5py.File(os.path.join(args.highres_dataset, test_dataset.hdf5_file_list_lowres[batch_id].split('/')[-1]), 'r') as f:
gt_points_hr = f['gt_points'][()]
gt_labels_hr = f['gt_labels'][()]
with h5py.File(os.path.join(os.path.join(args.lowres_dataset, test_dataset.hdf5_file_list_lowres[batch_id].split('/')[-1])), 'r') as f:
gt_points_lr = f['gt_points'][()]
gt_labels_lr = f['gt_labels'][()]
pool_indices = np.where(predicted_labels.detach().cpu().numpy())[0]
if len(pool_indices) > 0:
patch_indices = sampling_utils.sample(gt_points_lr, gt_points_hr, pool_indices, max_number_patches=len(pool_indices))
np.save(os.path.join(heatmap_folder, test_dataset.hdf5_file_list_lowres[batch_id].split('/')[-1].replace('.h5', '_indices.npy')), patch_indices)
if args.scale<1:
confusion_matrix = confusion_matrix / np.sum(confusion_matrix)
print('Confusion Matrix', confusion_matrix)
np.save(os.path.join(heatmap_folder, 'confusion_matrix.npy'), confusion_matrix) | 5,095 | 54.391304 | 158 | py |
CPFN | CPFN-master/evaluation_localSPFN.py | # Importation of packages
import os
import sys
import torch
import argparse
import numpy as np
import pandas as pd
# Importing the Dataset files
from Dataset import dataloaders
# Importing the Network files
from SPFN import fitter_factory, metric_implementation, losses_implementation
from PointNet2 import pn2_network
# Importing utils files
from Utils import config_loader, merging_utils
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', help='YAML configuration file', default='Configs/config_localSPFN.yml')
parser.add_argument('--lowres_dataset', help='Directory of the Lowres Input Dataset', default=os.path.expanduser('data/TraceParts_v2_lowres/'))
parser.add_argument('--highres_dataset', help='Directory of the Highres Input Dataset', default=os.path.expanduser('data/TraceParts_v2/'))
parser.add_argument('--dir_spfn', help='Directory of the global SPFN output', default=os.path.expanduser('data/TraceParts_v2_globalspfn/'))
parser.add_argument('--dir_indices', help='Directory of the indices', default=os.path.expanduser('data/TraceParts_v2_heatmaps/'))
parser.add_argument('--output_folder', help='Directory of the output folder', default=os.path.expanduser('data/TraceParts_v2_localspfn/'))
parser.add_argument('--scale', help='Scale of the primitives', default=0.05, type=float)
args = parser.parse_args()
dir_indices = os.path.join(args.dir_indices, str(round(args.scale,2)))
if not os.path.isdir(args.output_folder):
os.mkdir(args.output_folder)
# Loading the config file
conf = config_loader.Local_SPFNConfig(args.config_file)
# Selecting the visible GPUs
visible_GPUs = conf.get_CUDA_visible_GPUs()
device = torch.device('cuda')
if visible_GPUs is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(visible_GPUs)
# Primtive Types and Numbers
fitter_factory.register_primitives(conf.get_list_of_primitives())
n_registered_primitives = fitter_factory.get_n_registered_primitives()
n_max_global_instances = conf.get_n_max_global_instances()
n_max_local_instances = conf.get_n_max_local_instances()
# Test Dataset
csv_path_test = os.path.join('Dataset', conf.get_test_data_file())
noisy_test = conf.get_test_data_first_n()
first_n_test = conf.is_test_data_noisy()
# Launching the Network
spfn_module_filename = 'localspfn_%s_module.pth'%str(round(args.scale, 2))
spfn_module = pn2_network.PointNet2(dim_input=3, dim_pos=3, output_sizes=[3, n_registered_primitives, n_max_local_instances], use_glob_features=False, use_loc_features=False).to(device)
dict = torch.load(os.path.join(conf.get_weights_folder(), spfn_module_filename))
spfn_module.load_state_dict(dict, strict=True)
spfn_module.eval()
test_dataset = dataloaders.Dataset_TestLocalSPFN(n_max_global_instances, n_max_local_instances, csv_path_test, args.dir_spfn, args.lowres_dataset, args.highres_dataset,
dir_indices, noisy_test, first_n=first_n_test, fixed_order=True)
test_datasampler = dataloaders.Sampler(data_source=test_dataset)
test_dataloader = torch.utils.data.DataLoader(test_dataset, sampler=test_datasampler, batch_size=1, num_workers=0, pin_memory=True)
dataframe_results = pd.DataFrame(columns=['Filename', 'mIoU', 'Type', 'Normal', 'Axis', 'MeanRes', 'StdRes', 'SkCoverage0.01', 'SkCoverage0.02', 'PCoverage0.01', 'PCoverage0.02'])
cpt_df_stats = 0
dataframe_results_stats = pd.DataFrame(columns=['Filename', 'Primitive Id', 'Mask', 'Nb Points', 'mIoU'])
list_mIoU = []
for batch_id, data in enumerate(test_dataloader, 0):
if batch_id%100==0: print('Iteration %d / %d' % (batch_id, len(test_dataloader)))
P = data[0].type(torch.FloatTensor).squeeze(0).to(device)
nb_patches, num_points, _ = P.size()
P_gt = data[2].type(torch.FloatTensor).squeeze(0).to(device)
I_gt = data[3].type(torch.LongTensor).squeeze(0).to(device)
T_gt = data[4].type(torch.LongTensor).squeeze(0).to(device)
patch_indices = data[5].type(torch.LongTensor).squeeze(0).to(device)
spfn_labels = data[6].type(torch.LongTensor).squeeze(0).to(device)
num_global_points = spfn_labels.size(0)
spfn_normals = data[7].type(torch.FloatTensor).squeeze(0).to(device)
spfn_type = data[8].type(torch.FloatTensor).squeeze(0).to(device)
glob_features = data[9].type(torch.FloatTensor).squeeze(0).to(device)
loc_features = data[10].type(torch.FloatTensor).squeeze(0).to(device)
P_global = data[11].type(torch.FloatTensor).squeeze(0).to(device)
X_gt_global = data[12].type(torch.FloatTensor).squeeze(0).to(device)
I_gt_global = data[13].type(torch.LongTensor).squeeze(0).to(device)
plane_n_gt = data[14].type(torch.FloatTensor).to(device)
cylinder_axis_gt = data[15].type(torch.FloatTensor).to(device)
cone_axis_gt = data[16].type(torch.FloatTensor).to(device)
gt_parameters = {'plane_normal': plane_n_gt,
'cylinder_axis': cylinder_axis_gt,
'cone_axis': cone_axis_gt}
if nb_patches > 0:
X, T, W, _, _ = spfn_module(P, glob_features=glob_features, loc_features=loc_features)
X = X / torch.norm(X, dim=2, keepdim=True)
W = torch.softmax(W, dim=2)
with torch.no_grad():
W_fusion = W
similarity_fusion = merging_utils.similarity_soft(spfn_labels, W_fusion, patch_indices)
labels_fusion = merging_utils.run_heuristic_solver(similarity_fusion.cpu().numpy(), nb_patches, n_max_global_instances, n_max_local_instances)
point2primitive_fusion = torch.zeros([num_global_points, nb_patches * n_max_local_instances + n_max_global_instances]).float().to(device)
for b in range(nb_patches):
point2primitive_fusion[patch_indices[b], b * n_max_local_instances:(b + 1) * n_max_local_instances] = W_fusion[b]
point2primitive_fusion[:, (b+1)*n_max_local_instances:] = spfn_labels
# Deleting the patch prediction for points within any patches
flag = torch.sum(point2primitive_fusion[:,:(b+1)*n_max_local_instances], dim=1)>0
point2primitive_fusion[flag,(b+1)*n_max_local_instances:] = 0
W_fusion = merging_utils.get_point_final(point2primitive_fusion, torch.from_numpy(labels_fusion).to(device))
with torch.no_grad():
patch_indices = patch_indices.contiguous()
X = X.contiguous()
T = T.contiguous()
# Normal estimation
X_global = torch.zeros_like(X_gt_global)
X_global = X_global.scatter_add_(0, patch_indices.view(-1).unsqueeze(1).expand(-1, 3), X.view(-1, 3))
empty_indices = torch.all(X_global==0, axis=1)
X_global[empty_indices] = spfn_normals[empty_indices]
X_global = torch.nn.functional.normalize(X_global, p=2, dim=1, eps=1e-12)
# Type estimation
T_gt_perpoint = torch.gather(T_gt, 0, I_gt_global)
patch_indices = patch_indices.view(-1).unsqueeze(1).expand(-1, len(conf.get_list_of_primitives()))
num = torch.zeros_like(T_gt_perpoint).float().unsqueeze(1).expand(-1, len(conf.get_list_of_primitives()))
num = num.scatter_add(0, patch_indices, T.view(-1, len(conf.get_list_of_primitives())))
den = torch.zeros_like(T_gt_perpoint).float().unsqueeze(1).expand(-1, len(conf.get_list_of_primitives()))
den = den.scatter_add(0, patch_indices, torch.ones_like(patch_indices).float())
T_global = num / den.clamp(min=1)
T_global[empty_indices] = spfn_type[empty_indices]
else:
W_fusion = torch.eye(n_max_global_instances + 1).to(spfn_labels.device)[torch.argmax(spfn_labels, dim=1) + 1]
W_fusion = W_fusion[:, 1:]
X_global = spfn_normals
T_global = spfn_type
W_fusion = W_fusion[:,torch.sum(W_fusion, dim=0)>1]
if W_fusion.shape[1] < n_max_global_instances:
W_fusion = torch.cat((W_fusion, torch.zeros([W_fusion.shape[0], n_max_global_instances-W_fusion.shape[1]]).to(device)), dim=1)
with torch.no_grad():
W_fusion = metric_implementation.hard_W_encoding(W_fusion.unsqueeze(0))
matching_indices_fusion, mask_fusion = metric_implementation.hungarian_matching(W_fusion, I_gt_global.unsqueeze(0))
mask_fusion = mask_fusion.float()
mIoU_fusion = metric_implementation.compute_segmentation_iou(W_fusion, I_gt_global.unsqueeze(0), matching_indices_fusion, mask_fusion)
mIoU_fusion_per_primitive = 1 - losses_implementation.compute_miou_loss(W_fusion, I_gt_global.unsqueeze(0), matching_indices_fusion)[0]
_, unique_counts_primitives_fusion = np.unique(I_gt_global.cpu().numpy(), return_counts=True)
for j in range(len(unique_counts_primitives_fusion)):
dataframe_results_stats.loc[cpt_df_stats] = [test_dataset.hdf5_file_list[batch_id], j, mask_fusion[0, j].item(), unique_counts_primitives_fusion[j], mIoU_fusion_per_primitive[0, j].item()]
cpt_df_stats += 1
with torch.no_grad():
mIoU, type_accuracy, normal_difference, axis_difference, mean_residual, std_residual, Sk_coverage, P_coverage, W, predicted_parameters, T = metric_implementation.compute_all_metrics(
P_global.unsqueeze(0), X_global.unsqueeze(0), X_gt_global.unsqueeze(0), W_fusion, I_gt_global.unsqueeze(0),
T_global.unsqueeze(0), T_gt.unsqueeze(0), P_gt.unsqueeze(0), gt_parameters,
list_epsilon=[0.01, 0.02], classes=['plane', 'sphere', 'cylinder', 'cone'])
list_mIoU.append(mIoU.item())
if batch_id%100==0: print('mIoU: ', np.mean(list_mIoU))
dataframe_results.loc[batch_id] = [test_dataset.hdf5_file_list[batch_id], mIoU.item(), type_accuracy.item(),
normal_difference.item(), axis_difference.item(), mean_residual.item(),
std_residual.item(), Sk_coverage[0].item(), Sk_coverage[1].item(), P_coverage[0].item(), P_coverage[1].item()]
dataframe_results.to_csv(os.path.join(args.output_folder, 'Results.csv'), index=False)
dataframe_results_stats.to_csv(os.path.join(args.output_folder, 'Results_Stats.csv'), index=False) | 10,606 | 63.284848 | 197 | py |
CPFN | CPFN-master/training_SPFN.py | # Importation of packages
import os
import sys
import torch
import argparse
import numpy as np
# Importing the Dataset files
from Dataset import dataloaders
# Importing the Network files
from SPFN import fitter_factory, losses_implementation
from PointNet2 import pn2_network
# Importing Utils files
from Utils import config_loader, training_utils, training_visualisation
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', help='YAML configuration file', type=str, default='Configs/config_globalSPFN.yml')
parser.add_argument('--lowres_dataset', help='Directory of the Input Dataset', type=str, default=os.path.expanduser('data/TraceParts_v2_lowres/'))
parser.add_argument('--network', help='Network to train: GlobalSPFN, LocalSPFN', type=str, default='GlobalSPFN')
parser.add_argument('--path_patches', help='Path to Sampled Patches h5 files', type=str, default=os.path.expanduser('data/TraceParts_v2_patches'))
parser.add_argument('--scale', help='Scale to select the smallest primitive', type=float, default=0.05)
parser.add_argument('--spfn_weigths', help='Filename of the model weights to load', type=str, default='')
args = parser.parse_args()
# Loading the config file
assert (args.network in ['GlobalSPFN', 'LocalSPFN'])
if args.network == 'GlobalSPFN':
conf = config_loader.Global_SPFNConfig(args.config_file)
elif args.network == 'LocalSPFN':
conf = config_loader.Local_SPFNConfig(args.config_file)
# Selecting the visible GPUs
visible_GPUs = conf.get_CUDA_visible_GPUs()
device = torch.device('cuda')
if visible_GPUs is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(visible_GPUs)
# Primtive Types and Numbers
fitter_factory.register_primitives(conf.get_list_of_primitives())
n_registered_primitives = fitter_factory.get_n_registered_primitives()
n_max_global_instances = conf.get_n_max_global_instances()
if args.network == 'LocalSPFN':
n_max_local_instances = conf.get_n_max_local_instances()
# Training Parameters
nb_epochs = conf.get_n_epochs()
init_learning_rate = conf.get_init_learning_rate()
val_interval = conf.get_val_interval()
snapshot_interval = conf.get_snapshot_interval()
# Training Dataset
csv_path_train = os.path.join('Dataset', conf.get_train_data_file())
noisy_train = conf.is_train_data_noisy()
first_n_train = conf.get_train_data_first_n()
num_workers_train = conf.get_nb_train_workers()
path_patches = os.path.join(args.path_patches, str(round(args.scale, 2)))
# Validation Dataset
csv_path_val = os.path.join('Dataset', conf.get_val_data_file())
noisy_val = conf.is_val_data_noisy()
first_n_val = conf.get_val_data_first_n()
num_workers_val = conf.get_nb_val_workers()
# Launching the Network
if args.network == 'GlobalSPFN':
spfn_weights_filename = 'globalspfn_module'
spfn_module = pn2_network.PointNet2(dim_input=3, dim_pos=3, output_sizes=[3, n_registered_primitives, n_max_global_instances]).to(device)
elif args.network == 'LocalSPFN':
spfn_weights_filename = 'localspfn_%s_module'%str(round(args.scale, 2))
spfn_module = pn2_network.PointNet2(dim_input=3, dim_pos=3, output_sizes=[3, n_registered_primitives, n_max_local_instances]).to(device)
if os.path.isfile(os.path.join(conf.get_weights_folder(), args.spfn_weigths)):
dict = torch.load(os.path.join(conf.get_weights_folder(), args.spfn_weigths))
spfn_module.load_state_dict(dict, strict=True)
# Loading the dataset
if args.network == 'GlobalSPFN':
train_dataset = dataloaders.Dataset_GlobalSPFN(n_max_global_instances, csv_path_train, args.lowres_dataset, None, None, noisy_train, n_points=8192, first_n=first_n_train, fixed_order=False)
train_datasampler = dataloaders.RandomSampler(data_source=train_dataset, seed=12345, identical_epochs=False)
train_dataloader = torch.utils.data.DataLoader(train_dataset, sampler=train_datasampler, batch_size=conf.get_batch_size(), num_workers=num_workers_train, pin_memory=True)
val_dataset = dataloaders.Dataset_GlobalSPFN(n_max_global_instances, csv_path_val, args.lowres_dataset, None, None, noisy_val, n_points=8192, first_n=first_n_val, fixed_order=False)
val_datasampler = dataloaders.RandomSampler(data_source=val_dataset, seed=12345, identical_epochs=False)
val_dataloader = torch.utils.data.DataLoader(val_dataset, sampler=val_datasampler, batch_size=conf.get_batch_size(), num_workers=conf.get_nb_val_workers(), pin_memory=True)
elif args.network == 'LocalSPFN':
train_dataset = dataloaders.Dataset_TrainLocalSPFN(n_max_local_instances, csv_path_train, path_patches, noisy_train, first_n=first_n_train, fixed_order=False, lean=True)
train_datasampler = dataloaders.RandomSampler(data_source=train_dataset, seed=12345, identical_epochs=False)
train_dataloader = torch.utils.data.DataLoader(train_dataset, sampler=train_datasampler, batch_size=conf.get_batch_size(), num_workers=num_workers_train, pin_memory=True)
val_dataset = dataloaders.Dataset_TrainLocalSPFN(n_max_local_instances, csv_path_val, path_patches, noisy_val, first_n=first_n_val, fixed_order=False, lean=True)
val_datasampler = dataloaders.RandomSampler(data_source=val_dataset, seed=12345, identical_epochs=False)
val_dataloader = torch.utils.data.DataLoader(val_dataset, sampler=val_datasampler, batch_size=conf.get_batch_size(), num_workers=conf.get_nb_val_workers(), pin_memory=True)
# Optimizer
optimizer = torch.optim.Adam(spfn_module.parameters(), lr=init_learning_rate)
# Visualisation
visualiser = training_visualisation.Visualiser(conf.get_visualisation_interval())
# Initialisation
global_step = 0
old_learning_rate = init_learning_rate
best_loss = np.inf
for epoch in range(nb_epochs):
global_step, _ = training_utils.spfn_train_val_epoch(train_dataloader, spfn_module, epoch, optimizer, global_step, visualiser, args, conf, device, network_mode='train')
if (epoch % conf.get_val_interval() == 0) and (epoch > 0):
with torch.no_grad():
_, loss = training_utils.spfn_train_val_epoch(val_dataloader, spfn_module, epoch, optimizer, global_step, visualiser, args, conf, device, network_mode='val')
if loss < best_loss:
torch.save(spfn_module.state_dict(), os.path.join(conf.get_weights_folder(), spfn_weights_filename + '.pth'))
best_loss = loss
if (epoch % conf.get_snapshot_interval() == 0) and (epoch > 0):
torch.save(spfn_module.state_dict(), os.path.join(conf.get_weights_folder(), spfn_weights_filename + '%d.pth' % epoch))
torch.save(spfn_module.state_dict(), os.path.join(conf.get_weights_folder(), spfn_weights_filename + '%d.pth' % epoch)) | 6,960 | 59.530435 | 198 | py |
CPFN | CPFN-master/SPFN/sphere_fitter.py | # Importatiomn of packages
import torch
import numpy as np
if __name__ == '__main__':
import tensorflow as tf
from SPFN.primitives import Sphere
from SPFN.geometry_utils import weighted_sphere_fitting, weighted_sphere_fitting_tensorflow
def compute_parameters(P, W):
batch_size, n_points, _ = P.size()
_, _, n_max_primitives = W.size()
P = P.unsqueeze(1).expand(batch_size, n_max_primitives, n_points, 3).contiguous()
W = W.transpose(1, 2).contiguous()
P = P.view(batch_size * n_max_primitives, n_points, 3)
W = W.view(batch_size * n_max_primitives, n_points)
center, radius_squared = weighted_sphere_fitting(P, W)
center = center.view(batch_size, n_max_primitives, 3)
radius_squared = radius_squared.view(batch_size, n_max_primitives)
return center, radius_squared
def compute_parameters_tensorflow(P, W):
batch_size = tf.shape(P)[0]
n_points = tf.shape(P)[1]
n_max_primitives = tf.shape(W)[2]
P = tf.tile(tf.expand_dims(P, axis=1), [1, n_max_primitives, 1, 1]) # BxKxNx3
W = tf.transpose(W, perm=[0, 2, 1]) # BxKxN
P = tf.reshape(P, [batch_size * n_max_primitives, n_points, 3]) # BKxNx3
W = tf.reshape(W, [batch_size * n_max_primitives, n_points]) # BKxN
center, radius_squared = weighted_sphere_fitting_tensorflow(P, W)
center = tf.reshape(center, [batch_size, n_max_primitives, 3])
radius_squared = tf.reshape(radius_squared, [batch_size, n_max_primitives])
return center, radius_squared
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
center_torch, radius_squared_torch = compute_parameters(P_torch, W_torch)
center_torch = center_torch.detach().cpu().numpy()
radius_squared_torch = radius_squared_torch.detach().cpu().numpy()
print('center_torch', center_torch)
print('radius_squared_torch', radius_squared_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
center_tensorflow, radius_squared_tensorflow = compute_parameters_tensorflow(P_tensorflow, W_tensorflow)
sess = tf.Session()
center_tensorflow, radius_squared_tensorflow = sess.run([center_tensorflow, radius_squared_tensorflow])
print(np.abs(center_tensorflow-center_torch).max())
print(np.abs(radius_squared_tensorflow-radius_squared_torch).max())
def sqrt_safe(x):
return torch.sqrt(torch.abs(x) + 1e-10)
def compute_residue_single(center, radius_squared, p):
return (sqrt_safe(torch.sum((p - center)**2, dim=-1)) - sqrt_safe(radius_squared))**2
def sqrt_safe_tensorflow(x):
return tf.sqrt(tf.abs(x) + 1e-10)
def compute_residue_single_tensorflow(center, radius_squared, p):
return tf.square(sqrt_safe_tensorflow(tf.reduce_sum(tf.square(p - center), axis=-1)) - sqrt_safe_tensorflow(radius_squared))
if __name__ == '__main__':
batch_size = 100
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
center = np.random.randn(batch_size, num_points, 3)
radius_squared = np.random.rand(batch_size, num_points)
p = np.random.rand(batch_size, num_points, 3)
center_torch = torch.from_numpy(center).float().to(device)
radius_squared_torch = torch.from_numpy(radius_squared).float().to(device)
p_torch = torch.from_numpy(p).float().to(device)
residue_loss_torch = compute_residue_single(center_torch, radius_squared_torch, p_torch)
residue_loss_torch = residue_loss_torch.detach().cpu().numpy()
print('residue_loss_torch', residue_loss_torch)
# Debugging with Tensorflow
center_tensorflow = tf.constant(center, dtype=tf.float32)
radius_squared_tensorflow = tf.constant(radius_squared, dtype=tf.float32)
p_tensorflow = tf.constant(p, dtype=tf.float32)
residue_loss_torch_tensorflow = compute_residue_single_tensorflow(center_tensorflow, radius_squared_tensorflow, p_tensorflow)
sess = tf.Session()
residue_loss_torch_tensorflow = sess.run(residue_loss_torch_tensorflow)
print(np.abs(residue_loss_torch_tensorflow - residue_loss_torch).max())
def create_primitive_from_dict(d):
assert d['type'] == 'sphere'
location = np.array([d['location_x'], d['location_y'], d['location_z']], dtype=float)
radius = float(d['radius'])
return Sphere(center=location, radius=radius)
def extract_parameter_data_as_dict(primitives, n_max_primitives):
return {}
def extract_predicted_parameters_as_json(sphere_center, sphere_radius_squared, k):
sphere = Sphere(sphere_center, np.sqrt(sphere_radius_squared))
return {
'type': 'sphere',
'center_x': float(sphere.center[0]),
'center_y': float(sphere.center[1]),
'center_z': float(sphere.center[2]),
'radius': float(sphere.radius),
'label': k,
} | 5,084 | 44.810811 | 129 | py |
CPFN | CPFN-master/SPFN/plane_fitter.py | # Importatiomn of packages
import torch
import numpy as np
if __name__ == '__main__':
import tensorflow as tf
from SPFN.primitives import Plane
from SPFN.geometry_utils import weighted_plane_fitting, weighted_plane_fitting_tensorflow
def compute_parameters(P, W):
batch_size, n_points, _ = P.size()
_, _, n_max_instances = W.size()
W_reshaped = W.transpose(1, 2).contiguous().view(batch_size * n_max_instances, n_points)
P_tiled = P.unsqueeze(1).expand(batch_size, n_max_instances, n_points, 3).contiguous().view(batch_size * n_max_instances, n_points, 3)
n, c = weighted_plane_fitting(P_tiled, W_reshaped) # BKx3
n = n.view(batch_size, n_max_instances, 3)
c = c.view(batch_size, n_max_instances)
return n, c
def compute_parameters_tensorflow(P, W):
batch_size = tf.shape(P)[0]
n_points = tf.shape(P)[1]
n_max_instances = tf.shape(W)[2]
W_reshaped = tf.reshape(tf.transpose(W, [0, 2, 1]), [batch_size * n_max_instances, n_points]) # BKxN
P_tiled = tf.reshape(tf.tile(tf.expand_dims(P, axis=1), [1, n_max_instances, 1, 1]), [batch_size * n_max_instances, n_points, 3]) # BKxNx3, important there to match indices in W_reshaped!!!
n, c = weighted_plane_fitting_tensorflow(P_tiled, W_reshaped) # BKx3
n = tf.reshape(n, [batch_size, n_max_instances, 3]) # BxKx3
c = tf.reshape(c, [batch_size, n_max_instances]) # BxK
return n, c
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
n_torch, c_torch = compute_parameters(P_torch, W_torch)
n_torch = n_torch.detach().cpu().numpy()
c_torch = c_torch.detach().cpu().numpy()
print('n_torch', n_torch)
print('c_torch', c_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
n_tensorflow, c_tensorflow = compute_parameters_tensorflow(P_tensorflow, W_tensorflow)
sess = tf.Session()
n_tensorflow, c_tensorflow = sess.run([n_tensorflow, c_tensorflow])
print(np.minimum(np.abs(n_tensorflow - n_torch), np.abs(n_tensorflow + n_torch)).max())
print(np.minimum(np.abs(c_tensorflow - c_torch), np.abs(c_tensorflow + c_torch)).max())
def compute_residue_single(n, c, p):
return (torch.sum(p * n, dim=-1) - c)**2
def compute_residue_single_tensorflow(n, c, p):
# n: ...x3, c: ..., p: ...x3
return tf.square(tf.reduce_sum(p * n, axis=-1) - c)
if __name__ == '__main__':
batch_size = 100
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
n = np.random.randn(batch_size, num_points, 3)
c = np.random.rand(batch_size, num_points)
p = np.random.rand(batch_size, num_points, 3)
n_torch = torch.from_numpy(n).float().to(device)
c_torch = torch.from_numpy(c).float().to(device)
p_torch = torch.from_numpy(p).float().to(device)
residue_loss_torch = compute_residue_single(n_torch, c_torch, p_torch)
residue_loss_torch = residue_loss_torch.detach().cpu().numpy()
print('residue_loss_torch', residue_loss_torch)
# Debugging with Tensorflow
n_tensorflow = tf.constant(n, dtype=tf.float32)
c_tensorflow = tf.constant(c, dtype=tf.float32)
p_tensorflow = tf.constant(p, dtype=tf.float32)
residue_loss_torch_tensorflow = compute_residue_single_tensorflow(n_tensorflow, c_tensorflow, p_tensorflow)
sess = tf.Session()
residue_loss_torch_tensorflow = sess.run(residue_loss_torch_tensorflow)
print(np.abs(residue_loss_torch_tensorflow-residue_loss_torch).max())
def acos_safe(x):
return torch.acos(torch.clamp(x, min=-1.0+1e-6, max=1.0-1e-6))
def compute_parameter_loss(predicted_n, gt_n, matching_indices, angle_diff):
# predicted_axis: BxK1x3
# gt_axis: BXK2x3
# matching indices: BxK2
batch_size, nb_primitives, _ = gt_n.size()
predicted_n = torch.gather(predicted_n, 1, matching_indices.unsqueeze(2).expand(batch_size, nb_primitives, 3))
dot_abs = torch.abs(torch.sum(predicted_n * gt_n, axis=2))
if angle_diff:
return acos_safe(dot_abs) # BxK
else:
return 1.0 - dot_abs # BxK
def batched_gather(data, indices, axis):
# data - Bx...xKx..., axis is where dimension K is
# indices - BxK
# output[b, ..., k, ...] = in[b, ..., indices[b, k], ...]
assert axis >= 1
ndims = data.get_shape().ndims # allow dynamic rank
if axis > 1:
# tranpose data to BxKx...
perm = np.arange(ndims)
perm[axis] = 1
perm[1] = axis
data = tf.transpose(data, perm=perm)
batch_size = tf.shape(data)[0]
batch_nums = tf.tile(tf.expand_dims(tf.expand_dims(tf.range(batch_size), axis=1), axis=2), multiples=[1, tf.shape(indices)[1], 1]) # BxKx1
indices = tf.concat([batch_nums, tf.expand_dims(indices, axis=2)], axis=2) # BxKx2
gathered_data = tf.gather_nd(data, indices=indices)
if axis > 1:
gathered_data = tf.transpose(gathered_data, perm=perm)
return gathered_data
def acos_safe_tensorflow(x):
return tf.math.acos(tf.clip_by_value(x, -1.0+1e-6, 1.0-1e-6))
def compute_parameter_loss_tensorflow(predicted_n, gt_n, matching_indices, angle_diff):
n = batched_gather(predicted_n, matching_indices, axis=1)
dot_abs = tf.abs(tf.reduce_sum(n * gt_n, axis=2))
if angle_diff:
return acos_safe_tensorflow(dot_abs) # BxK
else:
return 1.0 - dot_abs # BxK
if __name__ == '__main__':
batch_size = 100
num_primitives1 = 15
num_primitives2 = 5
device = torch.device('cuda:0')
np.random.seed(0)
predicted_axis = np.random.randn(batch_size, num_primitives1, 3)
gt_axis = np.random.randn(batch_size, num_primitives2, 3)
matching_indices = np.random.randint(0, 15, (batch_size, num_primitives2))
angle_diff = True
predicted_axis_torch = torch.from_numpy(predicted_axis).float().to(device)
gt_axis_torch = torch.from_numpy(gt_axis).float().to(device)
matching_indices_torch = torch.from_numpy(matching_indices).long().to(device)
loss_torch = compute_parameter_loss(predicted_axis_torch, gt_axis_torch, matching_indices_torch, angle_diff)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
predicted_axis_tensorflow = tf.constant(predicted_axis, dtype=tf.float32)
gt_axis_tensorflow = tf.constant(gt_axis, dtype=tf.float32)
matching_indices_tensorflow = tf.constant(matching_indices, dtype=tf.int32)
loss_tensorflow = compute_parameter_loss_tensorflow(predicted_axis_tensorflow, gt_axis_tensorflow,
matching_indices_tensorflow, angle_diff)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
def create_primitive_from_dict(d):
assert d['type'] == 'plane'
location = np.array([d['location_x'], d['location_y'], d['location_z']], dtype=float)
axis = np.array([d['axis_x'], d['axis_y'], d['axis_z']], dtype=float)
return Plane(n=axis, c=np.dot(location, axis))
def extract_parameter_data_as_dict(primitives, n_max_instances):
n = np.zeros(dtype=float, shape=[n_max_instances, 3])
for i, primitive in enumerate(primitives):
if isinstance(primitive, Plane):
n[i] = primitive.n
return {
'plane_n_gt': n
}
def extract_predicted_parameters_as_json(plane_normal, plane_center, k):
# This is only for a single plane
plane = Plane(plane_normal, plane_center)
json_info = {
'type': 'plane',
'center_x': float(plane.center[0]),
'center_y': float(plane.center[1]),
'center_z': float(plane.center[2]),
'normal_x': float(plane.n[0]),
'normal_y': float(plane.n[1]),
'normal_z': float(plane.n[2]),
'x_size': float(plane.x_range[1] - plane.x_range[0]),
'y_size': float(plane.y_range[1] - plane.y_range[0]),
'x_axis_x': float(plane.x_axis[0]),
'x_axis_y': float(plane.x_axis[1]),
'x_axis_z': float(plane.x_axis[2]),
'y_axis_x': float(plane.y_axis[0]),
'y_axis_y': float(plane.y_axis[1]),
'y_axis_z': float(plane.y_axis[2]),
'label': k,
}
return json_info | 8,518 | 43.369792 | 194 | py |
CPFN | CPFN-master/SPFN/metric_implementation.py | # Importation of packages
import torch
import numpy as np
from scipy.optimize import linear_sum_assignment
from SPFN import plane_fitter, sphere_fitter, cylinder_fitter, cone_fitter
from SPFN import losses_implementation
def hungarian_matching(W_pred, I_gt):
# This non-tf function does not backprob gradient, only output matching indices
# W_pred - BxNxK
# I_gt - BxN, may contain -1's
# Output: matching_indices - BxK, where (b,k)th ground truth primitive is matched with (b, matching_indices[b, k])
# where only n_gt_labels entries on each row have meaning. The matching does not include gt background instance
batch_size, n_points, n_max_labels = W_pred.size()
matching_indices = torch.zeros([batch_size, n_max_labels], dtype=torch.long).to(W_pred.device)
mask = torch.zeros([batch_size, n_max_labels], dtype=torch.bool).to(W_pred.device)
for b in range(batch_size):
# assuming I_gt[b] does not have gap
n_gt_labels = torch.max(I_gt[b]).item() + 1 # this is K'
W_gt = torch.eye(n_gt_labels+1).to(I_gt.device)[I_gt[b]]
dot = torch.mm(W_gt.transpose(0,1), W_pred[b])
denominator = torch.sum(W_gt, dim=0).unsqueeze(1) + torch.sum(W_pred[b], dim=0).unsqueeze(0) - dot
cost = dot / torch.clamp(denominator, min=1e-10, max=None) # K'xK
cost = cost[:n_gt_labels, :] # remove last row, corresponding to matching gt background instance
_, col_ind = linear_sum_assignment(-cost.detach().cpu().numpy()) # want max solution
col_ind = torch.from_numpy(col_ind).long().to(matching_indices.device)
matching_indices[b, :n_gt_labels] = col_ind
mask[b, :n_gt_labels] = True
return matching_indices, mask
# Converting W to hard encoding
def hard_W_encoding(W):
# W - BxNxK
_, _, num_labels = W.size()
hardW = torch.eye(num_labels).to(W.device)[torch.argmax(W, dim=2)]
return hardW
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
W_torch = torch.from_numpy(W).float().to(device)
hardW = hard_W_encoding(W_torch)
print('hardW', hardW.size())
# Getting the per instance type
def get_instance_type(T, W):
instance_type = torch.bmm(W.transpose(1,2), T)
instance_type = torch.argmax(instance_type, dim=2)
return instance_type
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
n_max_instances = 12
n_type = 4
device = torch.device('cuda:0')
np.random.seed(0)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, n_type)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
W_torch = torch.from_numpy(W).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
instance_type = get_instance_type(T_torch, W_torch)
print('instance_type', instance_type.size())
def sqrt_safe(x):
return torch.sqrt(torch.abs(x) + 1e-10)
# Getting the residual loss
def get_residual_loss(parameters, matching_indices, points_per_instance, T, classes=['plane','sphere','cylinder','cone']):
batch_size, num_primitives, num_primitive_points, _ = points_per_instance.shape
_, residue_per_point_array = losses_implementation.compute_residue_loss(parameters, matching_indices, points_per_instance, torch.gather(T, 1, matching_indices), classes=classes)
residue_per_point_array = torch.gather(residue_per_point_array, 3, T.view(batch_size, num_primitives, 1, 1).expand(batch_size, num_primitives, num_primitive_points, 1)).squeeze(3)
residual_loss = sqrt_safe(residue_per_point_array)
return residual_loss
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
points_per_instance = np.random.randn(batch_size, n_max_instances, num_points_instance, 3)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
points_per_instance_torch = torch.from_numpy(points_per_instance).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
W_torch = hard_W_encoding(W_torch)
parameters_torch = losses_implementation.compute_parameters(P_torch, W_torch, X_torch)
matching_indices_torch, _ = hungarian_matching(W_torch, I_gt_torch)
T_torch = get_instance_type(T_torch, W_torch)
residual_loss = get_residual_loss(parameters_torch, matching_indices_torch, points_per_instance_torch, T_torch, classes=['plane','sphere','cylinder','cone'])
print('residual_loss', residual_loss.size())
# Arccos safe
def acos_safe(x):
return torch.acos(torch.clamp(x, min=-1.0+1e-6, max=1.0-1e-6))
# Segmentation mIoU
def compute_segmentation_iou(W, I_gt, matching_indices, mask):# W - BxNxK
mIoU = 1 - losses_implementation.compute_miou_loss(W, I_gt, matching_indices)[0]
mIoU = torch.sum(mask * mIoU, dim=1) / torch.sum(mask, dim=1)
return mIoU
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
W_torch = torch.from_numpy(W).float().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
W_torch = hard_W_encoding(W_torch)
matching_indices_torch, mask_torch = hungarian_matching(W_torch, I_gt_torch)
mIou = compute_segmentation_iou(W_torch, I_gt_torch, matching_indices_torch, mask_torch)
print('mIou', mIou.size())
# Mean primitive type accuracy
def compute_type_accuracy(T, T_gt, matching_indices, mask):
T_reordered = torch.gather(T, 1, matching_indices) # BxNxK
type_accuracy = torch.sum(mask*(T_reordered == T_gt), dim=1) / torch.sum(mask, dim=1)
return type_accuracy
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
mask = np.random.randint(0, 2, (batch_size, n_max_instances))
W_torch = torch.from_numpy(W).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
mask_torch = torch.from_numpy(mask).float().to(device)
W_torch = hard_W_encoding(W_torch)
T_torch = get_instance_type(T_torch, W_torch)
type_accuracy = compute_type_accuracy(T_torch, T_gt_torch, mask_torch)
print('type_accuracy', type_accuracy.size())
# Mean point normal difference
def compute_normal_difference(X, X_gt):
normal_difference = torch.mean(acos_safe(torch.abs(torch.sum(X*X_gt, dim=2))), dim=1)
return normal_difference
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
X_gt = np.random.randn(batch_size, num_points, 3)
X_gt = X_gt / np.linalg.norm(X_gt, axis=2, keepdims=True)
X_torch = torch.from_numpy(X).float().to(device)
X_gt_torch = torch.from_numpy(X_gt).float().to(device)
normal_difference = compute_normal_difference(X_gt_torch, X_gt_torch)
print('normal_difference', normal_difference.size())
# Mean primitive axis difference
def compute_axis_difference(predicted_parameters, gt_parameters, matching_indices, T, T_gt, mask, classes=['plane','sphere','cylinder','cone'], div_eps=1e-10):
mask = mask * (T == T_gt).float()
parameter_loss = losses_implementation.compute_parameter_loss(predicted_parameters, gt_parameters, matching_indices, T_gt, is_eval=True, classes=classes)
axis_difference = torch.sum(mask * parameter_loss, dim=1) / torch.clamp(torch.sum(parameter_loss, dim=1), min=div_eps, max=None)
return axis_difference
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
plane_normal = np.random.randn(batch_size, n_max_instances, 3)
plane_normal = plane_normal / np.linalg.norm(plane_normal, axis=2, keepdims=True)
plane_center = np.random.randn(batch_size, n_max_instances)
sphere_center = np.random.randn(batch_size, n_max_instances, 3)
sphere_radius_squared = np.abs(np.random.randn(batch_size, n_max_instances))
cylinder_axis = np.random.randn(batch_size, n_max_instances, 3)
cylinder_axis = cylinder_axis / np.linalg.norm(cylinder_axis, axis=2, keepdims=True)
cylinder_center = np.random.randn(batch_size, n_max_instances, 3)
cylinder_radius_square = np.abs(np.random.randn(batch_size, n_max_instances))
cone_apex = np.random.randn(batch_size, n_max_instances, 3)
cone_axis = np.random.randn(batch_size, n_max_instances, 3)
cone_half_angle = np.abs(np.random.randn(batch_size, n_max_instances))
gt_parameters = {'plane_normal': plane_normal,
'plane_center': plane_center,
'sphere_center': sphere_center,
'sphere_radius_squared': sphere_radius_squared,
'cylinder_axis': cylinder_axis,
'cylinder_center': cylinder_center,
'cylinder_radius_square': cylinder_radius_square,
'cone_apex': cone_apex,
'cone_axis': cone_axis,
'cone_half_angle': cone_half_angle}
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
gt_parameters_torch = {'plane_normal': torch.from_numpy(gt_parameters['plane_normal']).float().to(device),
'plane_center': torch.from_numpy(gt_parameters['plane_center']).float().to(device),
'sphere_center': torch.from_numpy(gt_parameters['sphere_center']).float().to(device),
'sphere_radius_squared': torch.from_numpy(gt_parameters['sphere_radius_squared']).float().to(device),
'cylinder_axis': torch.from_numpy(gt_parameters['cylinder_axis']).float().to(device),
'cylinder_center': torch.from_numpy(gt_parameters['cylinder_center']).float().to(device),
'cylinder_radius_square': torch.from_numpy(gt_parameters['cylinder_radius_square']).float().to(device),
'cone_apex': torch.from_numpy(gt_parameters['cone_apex']).float().to(device),
'cone_axis': torch.from_numpy(gt_parameters['cone_axis']).float().to(device),
'cone_half_angle': torch.from_numpy(gt_parameters['cone_half_angle']).float().to(device)}
W_torch = hard_W_encoding(W_torch)
predicted_parameters_torch = losses_implementation.compute_parameters(P_torch, W_torch, X_torch)
matching_indices_torch, mask_torch = hungarian_matching(W_torch, I_gt_torch)
T_torch = get_instance_type(T_torch, W_torch)
axis_difference = compute_axis_difference(predicted_parameters_torch, gt_parameters_torch, matching_indices_torch, T_torch, T_gt_torch, mask_torch, classes=['plane', 'sphere', 'cylinder', 'cone'])
print('axis_difference', axis_difference.size())
# Mean/Std Sk residual
def compute_meanstd_Sk_residual(residue_loss, mask):
mean_residual = torch.sum(mask * torch.mean(residue_loss, dim=2), dim=1) / torch.sum(mask, dim=1)
std_residual = torch.sum(mask * torch.std(residue_loss, dim=2), dim=1) / torch.sum(mask, dim=1)
return mean_residual, std_residual
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
plane_normal = np.random.randn(batch_size, n_max_instances, 3)
plane_normal = plane_normal / np.linalg.norm(plane_normal, axis=2, keepdims=True)
plane_center = np.random.randn(batch_size, n_max_instances)
sphere_center = np.random.randn(batch_size, n_max_instances, 3)
sphere_radius_squared = np.abs(np.random.randn(batch_size, n_max_instances))
cylinder_axis = np.random.randn(batch_size, n_max_instances, 3)
cylinder_axis = cylinder_axis / np.linalg.norm(cylinder_axis, axis=2, keepdims=True)
cylinder_center = np.random.randn(batch_size, n_max_instances, 3)
cylinder_radius_square = np.abs(np.random.randn(batch_size, n_max_instances))
cone_apex = np.random.randn(batch_size, n_max_instances, 3)
cone_axis = np.random.randn(batch_size, n_max_instances, 3)
cone_half_angle = np.abs(np.random.randn(batch_size, n_max_instances))
gt_parameters = {'plane_normal': plane_normal,
'plane_center': plane_center,
'sphere_center': sphere_center,
'sphere_radius_squared': sphere_radius_squared,
'cylinder_axis': cylinder_axis,
'cylinder_center': cylinder_center,
'cylinder_radius_square': cylinder_radius_square,
'cone_apex': cone_apex,
'cone_axis': cone_axis,
'cone_half_angle': cone_half_angle}
points_per_instance = np.random.randn(batch_size, n_max_instances, num_points_instance, 3)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
gt_parameters_torch = {'plane_normal': torch.from_numpy(gt_parameters['plane_normal']).float().to(device),
'plane_center': torch.from_numpy(gt_parameters['plane_center']).float().to(device),
'sphere_center': torch.from_numpy(gt_parameters['sphere_center']).float().to(device),
'sphere_radius_squared': torch.from_numpy(gt_parameters['sphere_radius_squared']).float().to(
device),
'cylinder_axis': torch.from_numpy(gt_parameters['cylinder_axis']).float().to(device),
'cylinder_center': torch.from_numpy(gt_parameters['cylinder_center']).float().to(device),
'cylinder_radius_square': torch.from_numpy(
gt_parameters['cylinder_radius_square']).float().to(device),
'cone_apex': torch.from_numpy(gt_parameters['cone_apex']).float().to(device),
'cone_axis': torch.from_numpy(gt_parameters['cone_axis']).float().to(device),
'cone_half_angle': torch.from_numpy(gt_parameters['cone_half_angle']).float().to(device)}
points_per_instance_torch = torch.from_numpy(points_per_instance).float().to(device)
W_torch = hard_W_encoding(W_torch)
predicted_parameters_torch = losses_implementation.compute_parameters(P_torch, W_torch, X_torch)
matching_indices_torch, mask_torch = hungarian_matching(W_torch, I_gt_torch)
T_torch = get_instance_type(T_torch, W_torch)
residue_loss_torch = get_residual_loss(predicted_parameters_torch, matching_indices_torch, points_per_instance_torch, T_torch, classes=['plane', 'sphere', 'cylinder', 'cone'])
mean_residual, std_residual = compute_meanstd_Sk_residual(residue_loss_torch, mask_torch)
print('Mean Sk Residual Loss: ', mean_residual)
print('Std Sk Residual Loss: ', std_residual)
# Sk coverage
def compute_Sk_coverage(residue_loss, epsilon, mask):
residue_loss = torch.mean((residue_loss < epsilon).float(), dim=2)
Sk_coverage = torch.sum(mask * residue_loss, dim=1) / torch.sum(mask, dim=1)
return Sk_coverage
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
plane_normal = np.random.randn(batch_size, n_max_instances, 3)
plane_normal = plane_normal / np.linalg.norm(plane_normal, axis=2, keepdims=True)
plane_center = np.random.randn(batch_size, n_max_instances)
sphere_center = np.random.randn(batch_size, n_max_instances, 3)
sphere_radius_squared = np.abs(np.random.randn(batch_size, n_max_instances))
cylinder_axis = np.random.randn(batch_size, n_max_instances, 3)
cylinder_axis = cylinder_axis / np.linalg.norm(cylinder_axis, axis=2, keepdims=True)
cylinder_center = np.random.randn(batch_size, n_max_instances, 3)
cylinder_radius_square = np.abs(np.random.randn(batch_size, n_max_instances))
cone_apex = np.random.randn(batch_size, n_max_instances, 3)
cone_axis = np.random.randn(batch_size, n_max_instances, 3)
cone_half_angle = np.abs(np.random.randn(batch_size, n_max_instances))
gt_parameters = {'plane_normal': plane_normal,
'plane_center': plane_center,
'sphere_center': sphere_center,
'sphere_radius_squared': sphere_radius_squared,
'cylinder_axis': cylinder_axis,
'cylinder_center': cylinder_center,
'cylinder_radius_square': cylinder_radius_square,
'cone_apex': cone_apex,
'cone_axis': cone_axis,
'cone_half_angle': cone_half_angle}
points_per_instance = np.random.randn(batch_size, n_max_instances, num_points_instance, 3)
epsilon = 0.01
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
gt_parameters_torch = {'plane_normal': torch.from_numpy(gt_parameters['plane_normal']).float().to(device),
'plane_center': torch.from_numpy(gt_parameters['plane_center']).float().to(device),
'sphere_center': torch.from_numpy(gt_parameters['sphere_center']).float().to(device),
'sphere_radius_squared': torch.from_numpy(gt_parameters['sphere_radius_squared']).float().to(
device),
'cylinder_axis': torch.from_numpy(gt_parameters['cylinder_axis']).float().to(device),
'cylinder_center': torch.from_numpy(gt_parameters['cylinder_center']).float().to(device),
'cylinder_radius_square': torch.from_numpy(
gt_parameters['cylinder_radius_square']).float().to(device),
'cone_apex': torch.from_numpy(gt_parameters['cone_apex']).float().to(device),
'cone_axis': torch.from_numpy(gt_parameters['cone_axis']).float().to(device),
'cone_half_angle': torch.from_numpy(gt_parameters['cone_half_angle']).float().to(device)}
points_per_instance_torch = torch.from_numpy(points_per_instance).float().to(device)
W_torch = hard_W_encoding(W_torch)
predicted_parameters_torch = losses_implementation.compute_parameters(P_torch, W_torch, X_torch)
matching_indices_torch, mask_torch = hungarian_matching(W_torch, I_gt_torch)
T_torch = get_instance_type(T_torch, W_torch)
residue_loss_torch = get_residual_loss(predicted_parameters_torch, matching_indices_torch,
points_per_instance_torch, T_torch,
classes=['plane', 'sphere', 'cylinder', 'cone'])
Sk_coverage = compute_Sk_coverage(residue_loss_torch, epsilon, mask_torch)
print('Sk Coverage : ', Sk_coverage)
# P coverage
def compute_P_coverage(P, T, matching_indices, predicted_parameters, epsilon, classes=['plane', 'sphere', 'cylinder', 'cone']):
batch_size, num_points, _ = P.size()
_, num_primitives = T.size()
residue_loss = get_residual_loss(predicted_parameters, matching_indices, P.unsqueeze(1).expand(batch_size, num_primitives, num_points, 3), torch.gather(T, 1, matching_indices), classes=classes)
residue_loss, _ = torch.min(residue_loss, dim=1)
P_coverage = torch.mean((residue_loss < epsilon).float(), dim=1)
return P_coverage
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
plane_normal = np.random.randn(batch_size, n_max_instances, 3)
plane_normal = plane_normal / np.linalg.norm(plane_normal, axis=2, keepdims=True)
plane_center = np.random.randn(batch_size, n_max_instances)
sphere_center = np.random.randn(batch_size, n_max_instances, 3)
sphere_radius_squared = np.abs(np.random.randn(batch_size, n_max_instances))
cylinder_axis = np.random.randn(batch_size, n_max_instances, 3)
cylinder_axis = cylinder_axis / np.linalg.norm(cylinder_axis, axis=2, keepdims=True)
cylinder_center = np.random.randn(batch_size, n_max_instances, 3)
cylinder_radius_square = np.abs(np.random.randn(batch_size, n_max_instances))
cone_apex = np.random.randn(batch_size, n_max_instances, 3)
cone_axis = np.random.randn(batch_size, n_max_instances, 3)
cone_half_angle = np.abs(np.random.randn(batch_size, n_max_instances))
gt_parameters = {'plane_normal': plane_normal,
'plane_center': plane_center,
'sphere_center': sphere_center,
'sphere_radius_squared': sphere_radius_squared,
'cylinder_axis': cylinder_axis,
'cylinder_center': cylinder_center,
'cylinder_radius_square': cylinder_radius_square,
'cone_apex': cone_apex,
'cone_axis': cone_axis,
'cone_half_angle': cone_half_angle}
points_per_instance = np.random.randn(batch_size, n_max_instances, num_points_instance, 3)
epsilon = 0.01
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
gt_parameters_torch = {'plane_normal': torch.from_numpy(gt_parameters['plane_normal']).float().to(device),
'plane_center': torch.from_numpy(gt_parameters['plane_center']).float().to(device),
'sphere_center': torch.from_numpy(gt_parameters['sphere_center']).float().to(device),
'sphere_radius_squared': torch.from_numpy(gt_parameters['sphere_radius_squared']).float().to(
device),
'cylinder_axis': torch.from_numpy(gt_parameters['cylinder_axis']).float().to(device),
'cylinder_center': torch.from_numpy(gt_parameters['cylinder_center']).float().to(device),
'cylinder_radius_square': torch.from_numpy(
gt_parameters['cylinder_radius_square']).float().to(device),
'cone_apex': torch.from_numpy(gt_parameters['cone_apex']).float().to(device),
'cone_axis': torch.from_numpy(gt_parameters['cone_axis']).float().to(device),
'cone_half_angle': torch.from_numpy(gt_parameters['cone_half_angle']).float().to(device)}
points_per_instance_torch = torch.from_numpy(points_per_instance).float().to(device)
W_torch = hard_W_encoding(W_torch)
predicted_parameters_torch = losses_implementation.compute_parameters(P_torch, W_torch, X_torch)
matching_indices_torch, mask_torch = hungarian_matching(W_torch, I_gt_torch)
T_torch = get_instance_type(T_torch, W_torch)
P_coverage = compute_P_coverage(P_torch, T_torch, matching_indices_torch, predicted_parameters_torch, classes=['plane', 'sphere', 'cylinder', 'cone'])
print('P Coverage : ', P_coverage)
def compute_all_metrics(P, X, X_gt, W, I_gt, T, T_gt, points_per_instance, gt_parameters, list_epsilon=[0.01, 0.02], classes=['plane', 'sphere', 'cylinder', 'cone']):
W = hard_W_encoding(W)
T = get_instance_type(T, W)
diff = T.size(1) - T_gt.size(1)
if diff>0:
T_gt = torch.cat((T_gt, torch.zeros_like(T_gt[:, 0:1]).expand(-1, diff)), dim=1)
elif diff < 0:
W = torch.cat((W, torch.zeros_like(W[:,:,0:1]).expand(-1, -1, -diff)), dim=2)
T = torch.cat((T, torch.zeros_like(T[:, 0:1]).expand(-1, -diff)), dim=1)
matching_indices, mask = hungarian_matching(W, I_gt)
mask = mask.float()
mIoU = compute_segmentation_iou(W, I_gt, matching_indices, mask)
type_accuracy = compute_type_accuracy(T, T_gt, matching_indices, mask)
normal_difference = compute_normal_difference(X, X_gt)
predicted_parameters = losses_implementation.compute_parameters(P, W, X)
if diff > 0:
gt_parameters['plane_normal'] = torch.cat((gt_parameters['plane_normal'], torch.zeros_like(gt_parameters['plane_normal'][:, 0:1]).expand(-1, diff, 3)), dim=1)
gt_parameters['cylinder_axis'] = torch.cat((gt_parameters['cylinder_axis'], torch.zeros_like(gt_parameters['cylinder_axis'][:, 0:1]).expand(-1, diff, 3)), dim=1)
gt_parameters['cone_axis'] = torch.cat((gt_parameters['cone_axis'], torch.zeros_like(gt_parameters['cone_axis'][:, 0:1]).expand(-1, diff, 3)), dim=1)
points_per_instance = torch.cat((points_per_instance, torch.zeros_like(points_per_instance[:,0:1]).expand(-1, diff, 512, 3)), dim=1)
axis_difference = compute_axis_difference(predicted_parameters, gt_parameters, matching_indices, T, T_gt, mask, classes=classes)
residue_loss = get_residual_loss(predicted_parameters, matching_indices, points_per_instance, T_gt, classes=classes)
mean_residual, std_residual = compute_meanstd_Sk_residual(residue_loss, mask)
Sk_coverage = []
for epsilon in list_epsilon:
Sk_coverage.append(compute_Sk_coverage(residue_loss, epsilon, mask))
P_coverage = []
for epsilon in list_epsilon:
P_coverage.append(compute_P_coverage(P, T, matching_indices, predicted_parameters, epsilon, classes=classes))
return mIoU, type_accuracy, normal_difference, axis_difference, mean_residual, std_residual, Sk_coverage, P_coverage, W, predicted_parameters, T
if __name__ == '__main__' and 1:
batch_size = 8
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
W = W / np.linalg.norm(W, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T = T / np.linalg.norm(T, axis=2, keepdims=True)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
X_gt = np.random.randn(batch_size, num_points, 3)
X_gt = X_gt / np.linalg.norm(X_gt, axis=2, keepdims=True)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
plane_normal = np.random.randn(batch_size, n_max_instances, 3)
plane_normal = plane_normal / np.linalg.norm(plane_normal, axis=2, keepdims=True)
plane_center = np.random.randn(batch_size, n_max_instances)
sphere_center = np.random.randn(batch_size, n_max_instances, 3)
sphere_radius_squared = np.abs(np.random.randn(batch_size, n_max_instances))
cylinder_axis = np.random.randn(batch_size, n_max_instances, 3)
cylinder_axis = cylinder_axis / np.linalg.norm(cylinder_axis, axis=2, keepdims=True)
cylinder_center = np.random.randn(batch_size, n_max_instances, 3)
cylinder_radius_square = np.abs(np.random.randn(batch_size, n_max_instances))
cone_apex = np.random.randn(batch_size, n_max_instances, 3)
cone_axis = np.random.randn(batch_size, n_max_instances, 3)
cone_half_angle = np.abs(np.random.randn(batch_size, n_max_instances))
gt_parameters = {'plane_normal': plane_normal,
'plane_center': plane_center,
'sphere_center': sphere_center,
'sphere_radius_squared': sphere_radius_squared,
'cylinder_axis': cylinder_axis,
'cylinder_center': cylinder_center,
'cylinder_radius_square': cylinder_radius_square,
'cone_apex': cone_apex,
'cone_axis': cone_axis,
'cone_half_angle': cone_half_angle}
points_per_instance = np.random.randn(batch_size, n_max_instances, num_points_instance, 3)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
X_gt_torch = torch.from_numpy(X_gt).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
gt_parameters_torch = {'plane_normal': torch.from_numpy(gt_parameters['plane_normal']).float().to(device),
'plane_center': torch.from_numpy(gt_parameters['plane_center']).float().to(device),
'sphere_center': torch.from_numpy(gt_parameters['sphere_center']).float().to(device),
'sphere_radius_squared': torch.from_numpy(gt_parameters['sphere_radius_squared']).float().to(
device),
'cylinder_axis': torch.from_numpy(gt_parameters['cylinder_axis']).float().to(device),
'cylinder_center': torch.from_numpy(gt_parameters['cylinder_center']).float().to(device),
'cylinder_radius_square': torch.from_numpy(
gt_parameters['cylinder_radius_square']).float().to(device),
'cone_apex': torch.from_numpy(gt_parameters['cone_apex']).float().to(device),
'cone_axis': torch.from_numpy(gt_parameters['cone_axis']).float().to(device),
'cone_half_angle': torch.from_numpy(gt_parameters['cone_half_angle']).float().to(device)}
points_per_instance_torch = torch.from_numpy(points_per_instance).float().to(device)
mIoU, type_accuracy, normal_difference, axis_difference, mean_residual, std_residual, Sk_coverage, P_coverage = compute_all_metrics(P_torch, X_torch, X_gt_torch, W_torch, I_gt_torch, T_torch, T_gt_torch, points_per_instance_torch, gt_parameters_torch, classes=['plane', 'sphere', 'cylinder', 'cone'])
print('mIoU', mIoU.size())
print('type_accuracy', type_accuracy.size())
print('normal_difference', normal_difference.size())
print('axis_difference', axis_difference.size())
print('mean_residual', mean_residual.size())
print('std_residual', std_residual.size())
for i in range(len(Sk_coverage)):
print('Sk_coverage_%d'%i, Sk_coverage[i].size())
for i in range(len(P_coverage)):
print('P_coverage_%d'%i, P_coverage[i].size())
def creates_json(T, predicted_parameters):
list_json = []
for i, type_id in enumerate(T):
if type_id == 0:
json = plane_fitter.extract_predicted_parameters_as_json(predicted_parameters['plane_normal'][0,i].cpu().numpy(), predicted_parameters['plane_center'][0,i].cpu().numpy(), i)
elif type_id == 1:
json = sphere_fitter.extract_predicted_parameters_as_json(predicted_parameters['sphere_center'][0,i].cpu().numpy(), predicted_parameters['sphere_radius_squared'][0,i].cpu().numpy(), i)
elif type_id == 2:
json = cylinder_fitter.extract_predicted_parameters_as_json(predicted_parameters['cylinder_center'][0,i].cpu().numpy(), predicted_parameters['cylinder_radius_squared'][0,i].cpu().numpy(), predicted_parameters['cylinder_axis'][0,i].cpu().numpy(), i)
elif type_id == 3:
json = cone_fitter.extract_predicted_parameters_as_json(predicted_parameters['cone_apex'][0,i].cpu().numpy(), predicted_parameters['cone_axis'][0,i].cpu().numpy(), predicted_parameters['cone_half_angle'][0,i].cpu().numpy(), i)
list_json.append(json)
return list_json | 36,132 | 59.121464 | 304 | py |
CPFN | CPFN-master/SPFN/geometry_utils.py | # Importing packages
import torch
import numpy as np
if __name__ == '__main__':
import tensorflow as tf
from SPFN.differentiable_tls import solve_weighted_tls, solve_weighted_tls_tensorflow
def compute_consistent_plane_frame(normal):
# Input: normal is Bx3
# Returns: x_axis, y_axis, both of dimension Bx3
device = normal.get_device()
batch_size, _ = normal.size()
candidate_axes = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] # Actually, 2 should be enough. This may still cause singularity TODO!!!
y_axes = []
for tmp_axis in candidate_axes:
torch_axis = torch.FloatTensor(tmp_axis).to(device).unsqueeze(0)
y_axes.append(torch.cross(normal, torch_axis.expand(batch_size, 3)))
y_axes = torch.stack(y_axes, dim=0) # QxBx3
y_axes_norm = torch.norm(y_axes, dim=2) # QxB
# choose the axis with largest norm
y_axes_chosen_idx = torch.argmax(y_axes_norm, dim=0) # B
y_axes_chosen_idx = y_axes_chosen_idx.view(1, batch_size, 1).expand(1, batch_size, 3)
# y_axes_chosen[b, :] = y_axes[y_axes_chosen_idx[b], b, :]
y_axes = torch.gather(y_axes, 0, y_axes_chosen_idx).squeeze(0)
y_axes = torch.nn.functional.normalize(y_axes, p=2, dim=1, eps=1e-12)
x_axes = torch.cross(y_axes, normal) # Bx3
return x_axes, y_axes
def compute_consistent_plane_frame_tensorflow(normal):
# Input: normal is Bx3
# Returns: x_axis, y_axis, both of dimension Bx3
batch_size = tf.shape(normal)[0]
candidate_axes = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] # Actually, 2 should be enough. This may still cause singularity TODO!!!
y_axes = []
for tmp_axis in candidate_axes:
tf_axis = tf.tile(tf.expand_dims(tf.constant(dtype=tf.float32, value=tmp_axis), axis=0), [batch_size, 1]) # Bx3
y_axes.append(tf.cross(normal, tf_axis))
y_axes = tf.stack(y_axes, axis=0) # QxBx3
y_axes_norm = tf.norm(y_axes, axis=2) # QxB
# choose the axis with largest norm
y_axes_chosen_idx = tf.argmax(y_axes_norm, axis=0) # B
# y_axes_chosen[b, :] = y_axes[y_axes_chosen_idx[b], b, :]
indices_0 = tf.tile(tf.expand_dims(y_axes_chosen_idx, axis=1), [1, 3]) # Bx3
indices_1 = tf.tile(tf.expand_dims(tf.range(batch_size), axis=1), [1, 3]) # Bx3
indices_2 = tf.tile(tf.expand_dims(tf.range(3), axis=0), [batch_size, 1]) # Bx3
indices = tf.stack([tf.cast(indices_0, tf.int32), indices_1, indices_2], axis=2) # Bx3x3
y_axes = tf.gather_nd(y_axes, indices=indices) # Bx3
if tf.VERSION == '1.4.1':
y_axes = tf.nn.l2_normalize(y_axes, dim=1)
else:
y_axes = tf.nn.l2_normalize(y_axes, axis=1)
x_axes = tf.cross(y_axes, normal) # Bx3
return x_axes, y_axes
if __name__ == '__main__':
batch_size = 100
device = torch.device('cuda:0')
np.random.seed(0)
normal = np.random.randn(batch_size, 3)
normal = normal / np.linalg.norm(normal, axis=1, keepdims=True)
normal_torch = torch.from_numpy(normal).float().to(device)
x_axes_torch, y_axes_torch = compute_consistent_plane_frame(normal_torch)
x_axes_torch = x_axes_torch.detach().cpu().numpy()
y_axes_torch = y_axes_torch.detach().cpu().numpy()
print('x_axes_torch', x_axes_torch)
print('y_axes_torch', y_axes_torch)
# Debugging with Tensorflow
normal_tensorflow = tf.constant(normal, dtype=tf.float32)
x_axes_tensorflow, y_axes_tensorflow = compute_consistent_plane_frame_tensorflow(normal_tensorflow)
sess = tf.Session()
x_axes_tensorflow, y_axes_tensorflow = sess.run([x_axes_tensorflow, y_axes_tensorflow])
print(np.max(np.abs(x_axes_tensorflow-x_axes_torch)))
def weighted_plane_fitting(P, W, division_eps=1e-10):
# P - BxNx3
# W - BxN
# Returns n, c, with n - Bx3, c - B
WP = P * W.unsqueeze(2) # BxNx3
W_sum = torch.sum(W, dim=1, keepdim=True) # Bx1
P_weighted_mean = torch.sum(WP, dim=1) / torch.clamp(W_sum, min=division_eps, max=None) # Bx3
A = P - P_weighted_mean.unsqueeze(1) # BxNx3
n = solve_weighted_tls(A, W) # Bx3
c = torch.sum(n * P_weighted_mean, dim=1)
return n, c
def weighted_plane_fitting_tensorflow(P, W, division_eps=1e-10):
# P - BxNx3
# W - BxN
# Returns n, c, with n - Bx3, c - B
WP = P * tf.expand_dims(W, axis=2) # BxNx3
W_sum = tf.reduce_sum(W, axis=1) # B
P_weighted_mean = tf.reduce_sum(WP, axis=1) / tf.maximum(tf.expand_dims(W_sum, 1), division_eps) # Bx3
A = P - tf.expand_dims(P_weighted_mean, axis=1) # BxNx3
n = solve_weighted_tls_tensorflow(A, W) # Bx3
c = tf.reduce_sum(n * P_weighted_mean, axis=1)
return n, c
if __name__ == '__main__':
batch_size = 100
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
n_torch, c_torch = weighted_plane_fitting(P_torch, W_torch)
n_torch = n_torch.detach().cpu().numpy()
c_torch = c_torch.detach().cpu().numpy()
print('n_torch', n_torch)
print('c_torch', c_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
n_tensorflow, c_tensorflow = weighted_plane_fitting_tensorflow(P_tensorflow, W_tensorflow)
sess = tf.Session()
n_tensorflow, c_tensorflow = sess.run([n_tensorflow, c_tensorflow])
print(np.minimum(np.abs(n_tensorflow - n_torch), np.abs(n_tensorflow + n_torch)).max())
print(np.minimum(np.abs(c_tensorflow - c_torch), np.abs(c_tensorflow + c_torch)).max())
def guarded_matrix_solve_ls(A, b, W, condition_number_cap=1e5, sqrt_eps=1e-10, ls_l2_regularizer=1e-8):
# Solve weighted least square ||\sqrt(W)(Ax-b)||^2
# A - BxNxD
# b - BxNx1
# W - BxN
batch_size, _, dim = A.size()
sqrt_W = torch.sqrt(torch.clamp(W, min=sqrt_eps, max=None)).unsqueeze(2) # BxN
A = A * sqrt_W # BxNxD
b = b * sqrt_W # BxNx1
# Compute singular value, trivializing the problem when condition number is too large
AtA = torch.bmm(A.transpose(1,2), A)
_, s, _ = torch.svd(AtA, compute_uv=False) # s will be BxD
s = s.detach()
mask = s[:,0] / s[:,-1] < condition_number_cap # B
#import pdb; pdb.set_trace()
#A = A * mask.float().view(batch_size, 1, 1)
#x = torch.linalg.lstsq(A, b).solution
AtA = AtA * mask.float().view(batch_size, 1, 1) + ls_l2_regularizer * torch.eye(dim).unsqueeze(0).to(A.device) # zero out badly conditioned data
Atb = torch.bmm(A.transpose(1, 2) * mask.float().view(batch_size, 1, 1), b)
x, _ = torch.solve(Atb, AtA)
x = x.squeeze(2)
return x # BxD
if __name__ == '__main__':
sqrt_eps = 1e-10
ls_l2_regularizer = 1e-8
batch_size = 100
num_points = 1024
dimension = 3
device = torch.device('cuda:0')
np.random.seed(0)
A = np.random.randn(batch_size, num_points, dimension)
b = np.random.randn(batch_size, num_points, 1)
W = np.random.rand(batch_size, num_points)
A = torch.from_numpy(A).float().to(device)
b = torch.from_numpy(b).float().to(device)
W = torch.from_numpy(W).float().to(device)
sqrt_W = torch.sqrt(torch.clamp(W, sqrt_eps)).unsqueeze(2) # BxN
A = A * sqrt_W # BxNxD
b = b * sqrt_W # BxNx1
AtA = torch.bmm(A.transpose(1, 2), A)
mask = torch.zeros([batch_size]).float().to(A.device) # B
AtA = AtA * mask.view(batch_size, 1, 1) + ls_l2_regularizer * torch.eye(dimension).unsqueeze(0).to(device) # zero out badly conditioned data
Atb = torch.bmm(A.transpose(1, 2) * mask.view(batch_size, 1, 1), b)
x = torch.solve(Atb, AtA)
def guarded_matrix_solve_ls_tensorflow(A, b, W, condition_number_cap=1e5, sqrt_eps=1e-10, ls_l2_regularizer=1e-8):
# Solve weighted least square ||\sqrt(W)(Ax-b)||^2
# A - BxNxD
# b - BxNx1
# W - BxN
sqrt_W = tf.sqrt(tf.maximum(W, sqrt_eps)) # BxN
A *= tf.expand_dims(sqrt_W, axis=2) # BxNxD
b *= tf.expand_dims(sqrt_W, axis=2) # BxNx1
# Compute singular value, trivializing the problem when condition number is too large
AtA = tf.matmul(a=A, b=A, transpose_a=True)
s, _, _ = [tf.stop_gradient(u) for u in tf.svd(AtA)] # s will be BxD
mask = tf.less(s[:, 0] / s[:, -1], condition_number_cap) # B
A *= tf.to_float(tf.expand_dims(tf.expand_dims(mask, axis=1), axis=2)) # zero out badly conditioned data
x = tf.matrix_solve_ls(A, b, l2_regularizer=ls_l2_regularizer, fast=True) # BxDx1
return tf.squeeze(x, axis=2) # BxD
if __name__ == '__main__':
batch_size = 100
num_points = 1024
dimension = 3
device = torch.device('cuda:0')
np.random.seed(0)
A = np.random.randn(batch_size, num_points, dimension)
b = np.random.randn(batch_size, num_points, 1)
W = np.random.rand(batch_size, num_points)
A_torch = torch.from_numpy(A).float().to(device)
b_torch = torch.from_numpy(b).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
x_torch = guarded_matrix_solve_ls(A_torch, b_torch, W_torch)
x_torch = x_torch.detach().cpu().numpy()
print('x_torch', x_torch)
# Debugging with Tensorflow
A_tensorflow = tf.constant(A, dtype=tf.float32)
b_tensorflow = tf.constant(b, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
x_tensorflow = guarded_matrix_solve_ls_tensorflow(A_tensorflow, b_tensorflow, W_tensorflow)
sess = tf.Session()
x_tensorflow = sess.run(x_tensorflow)
print(np.max(np.abs(x_tensorflow - x_torch)))
def weighted_sphere_fitting(P, W, division_eps=1e-10):
# P - BxNxD
# W - BxN
W_sum = torch.sum(W, axis=1) # B
WP_sqr_sum = torch.sum(W * torch.sum(P**2, axis=2), axis=1) # B
P_sqr = torch.sum(P**2, axis=2) # BxN
b = ((WP_sqr_sum / torch.clamp(W_sum, min=division_eps, max=None)).unsqueeze(1) - P_sqr).unsqueeze(2) # BxNx1
WP_sum = torch.sum(W.unsqueeze(2) * P, dim=1) # BxD
A = 2 * ((WP_sum / torch.clamp(W_sum, min=division_eps, max=None).unsqueeze(1)).unsqueeze(1) - P) # BxNxD
# Seek least norm solution to the least square
center = guarded_matrix_solve_ls(A, b, W) # BxD
W_P_minus_C_sqr_sum = P - center.unsqueeze(1) # BxNxD
W_P_minus_C_sqr_sum = W * torch.sum(W_P_minus_C_sqr_sum**2, dim=2) # BxN
r_sqr = torch.sum(W_P_minus_C_sqr_sum, dim=1) / torch.clamp(W_sum, min=division_eps, max=None) # B
return center, r_sqr
def weighted_sphere_fitting_tensorflow(P, W, division_eps=1e-10):
# P - BxNxD
# W - BxN
W_sum = tf.reduce_sum(W, axis=1) # B
WP_sqr_sum = tf.reduce_sum(W * tf.reduce_sum(tf.square(P), axis=2), axis=1) # B
P_sqr = tf.reduce_sum(tf.square(P), axis=2) # BxN
b = tf.expand_dims(tf.expand_dims(WP_sqr_sum / tf.maximum(W_sum, division_eps), axis=1) - P_sqr, axis=2) # BxNx1
WP_sum = tf.reduce_sum(tf.expand_dims(W, axis=2) * P, axis=1) # BxD
A = 2 * (tf.expand_dims(WP_sum / tf.expand_dims(tf.maximum(W_sum, division_eps), axis=1), axis=1) - P) # BxNxD
# Seek least norm solution to the least square
center = guarded_matrix_solve_ls_tensorflow(A, b, W) # BxD
W_P_minus_C_sqr_sum = P - tf.expand_dims(center, axis=1) # BxNxD
W_P_minus_C_sqr_sum = W * tf.reduce_sum(tf.square(W_P_minus_C_sqr_sum), axis=2) # BxN
r_sqr = tf.reduce_sum(W_P_minus_C_sqr_sum, axis=1) / tf.maximum(W_sum, division_eps) # B
return center, r_sqr
if __name__ == '__main__':
batch_size = 100
num_points = 1024
dimension = 3
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, dimension)
W = np.random.rand(batch_size, num_points)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
center_torch, r_sqr_torch = weighted_sphere_fitting(P_torch, W_torch)
center_torch = center_torch.detach().cpu().numpy()
r_sqr_torch = r_sqr_torch.detach().cpu().numpy()
print('center_torch', center_torch)
print('r_sqr_torch', r_sqr_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
center_tensorflow, r_sqr_tensorflow = weighted_sphere_fitting_tensorflow(P_tensorflow, W_tensorflow)
sess = tf.Session()
center_tensorflow, r_sqr_tensorflow = sess.run([center_tensorflow, r_sqr_tensorflow])
print(np.max(np.abs(center_tensorflow - center_torch)))
print(np.max(np.abs(r_sqr_tensorflow - r_sqr_torch))) | 12,490 | 46.494297 | 148 | py |
CPFN | CPFN-master/SPFN/differentiable_tls.py | # Importation of packages
import torch
import numpy as np
if __name__ == '__main__':
import tensorflow as tf
from torch.autograd import gradcheck
def guard_one_over_matrix(M, min_abs_value=1e-10):
_, row, _ = M.size()
device = M.get_device()
up = torch.triu(torch.clamp(M, min=min_abs_value, max=None), diagonal=0)
low = torch.tril(torch.clamp(M, min=None, max=-min_abs_value), diagonal=0)
M = up + low
M = M + torch.eye(row).to(device)
M = 1 / M
M = M - torch.eye(row).to(device)
return M
def guard_one_over_matrix_tensorflow(M, min_abs_value=1e-10):
up = tf.matrix_band_part(tf.maximum(min_abs_value, M), 0, -1)
low = tf.matrix_band_part(tf.minimum(-min_abs_value, M), -1, 0)
M = up + low
M += tf.eye(tf.shape(M)[1])
M = 1 / M
M -= tf.eye(tf.shape(M)[1])
return M
if __name__ == '__main__':
batch_size = 100
P = 5
device = torch.device('cuda:0')
np.random.seed(0)
M = np.random.randn(batch_size, P, P)
M_torch = torch.from_numpy(M).float().to(device)
M_torch = guard_one_over_matrix(M_torch)
M_torch = M_torch.detach().cpu().numpy()
print('M_torch', M_torch)
# Debugging with Tensorflow
M_tensorflow = tf.constant(M, dtype=tf.float32)
M_tensorflow_ = guard_one_over_matrix_tensorflow(M_tensorflow)
sess = tf.Session()
M_tensorflow = sess.run(M_tensorflow_)
print(np.max(np.abs(M_tensorflow - M_torch)))
def compute_svd_K(s):
# s should be BxP
# res[b,i,j] = 1/(s[b,i]^2 - s[b,j]^2) if i != j, 0 otherwise
# res will be BxPxP
s = s**2
res = s.unsqueeze(2) - s.unsqueeze(1)
# making absolute value in res is at least 1e-10
res = guard_one_over_matrix(res)
return res
def compute_svd_K_tensorflow(s):
# s should be BxP
# res[b,i,j] = 1/(s[b,i]^2 - s[b,j]^2) if i != j, 0 otherwise
# res will be BxPxP
s = tf.square(s)
res = tf.expand_dims(s, 2) - tf.expand_dims(s, 1)
# making absolute value in res is at least 1e-10
res = guard_one_over_matrix_tensorflow(res)
return res
if __name__ == '__main__':
batch_size = 100
P = 5
device = torch.device('cuda:0')
np.random.seed(0)
s = np.random.randn(batch_size, P)
s_torch = torch.from_numpy(s).float().to(device)
res_torch = compute_svd_K(s_torch)
res_torch = res_torch.detach().cpu().numpy()
print('res_torch', res_torch)
# Debugging with Tensorflow
s_tensorflow = tf.constant(s, dtype=tf.float32)
res_tensorflow = compute_svd_K_tensorflow(s_tensorflow)
sess = tf.Session()
res_tensorflow = sess.run(res_tensorflow)
print(np.max(np.abs(res_tensorflow - res_torch)))
def custom_svd_v_column_tensorflow(M, col_index=-1):
# Must make sure M is finite. Otherwise cudaSolver might fail.
assert_op = tf.Assert(tf.logical_not(tf.reduce_any(tf.logical_not(tf.is_finite(M)))), [M], summarize=10)
with tf.control_dependencies([assert_op]):
with tf.get_default_graph().gradient_override_map({'Svd': 'CustomSvd'}):
s, u, v = tf.svd(M, name='Svd') # M = usv^T
return v[:, :, col_index]
def register_custom_svd_gradient_tensorflow():
tf.RegisterGradient('CustomSvd')(custom_gradient_svd_tensorflow)
def custom_gradient_svd_tensorflow(op, grad_s, grad_u, grad_v):
s, u, v = op.outputs
# s - BxP
# u - BxNxP, N >= P
# v - BxPxP
v_t = tf.transpose(v, [0, 2, 1])
K = compute_svd_K_tensorflow(s)
inner = tf.transpose(K, [0, 2, 1]) * tf.matmul(v_t, grad_v)
inner = (inner + tf.transpose(inner, [0, 2, 1])) / 2
# ignoring gradient coming from grad_s and grad_u for our purpose
res = tf.matmul(u, tf.matmul(2 * tf.matmul(tf.matrix_diag(s), inner), v_t))
return res
if __name__ == '__main__' and 1:
register_custom_svd_gradient_tensorflow()
batch_size = 100
P = 5
device = torch.device('cuda:0')
np.random.seed(0)
M = np.random.randn(batch_size, P, P)
M_tensorflow = tf.constant(M, dtype=tf.float32)
M_input = tf.placeholder(dtype=tf.float32, shape=[None, P, P])
with tf.get_default_graph().gradient_override_map({'Svd': 'CustomSvd'}):
s, u, v = tf.svd(M_input, name='Svd') # M = usv^T
with tf.Session() as sess:
error = tf.test.compute_gradient_error(M_input, [batch_size, P, P], v, [batch_size, P, P])
print('Error: ', error)
class Custom_svd_v_colum(torch.autograd.Function):
@staticmethod
def forward(ctx, M, col_index=-1):
u, s, v = torch.svd(M, some=True)
out = v[:,:,col_index]
ctx.save_for_backward(u, s, v)
ctx.col_index = col_index
return out
@staticmethod
def backward(ctx, grad_out):
u, s, v = ctx.saved_tensors
col_index = ctx.col_index
grad_v = torch.zeros_like(v)
grad_v[:,:,col_index] = grad_out
v_t = v.transpose(1, 2)
K = compute_svd_K(s)
inner = K.transpose(1,2) * torch.bmm(v_t, grad_v)
inner = (inner + inner.transpose(1, 2)) / 2
# ignoring gradient coming from grad_s and grad_u for our purpose
res = torch.bmm(u, torch.bmm(2 * torch.bmm(torch.diag_embed(s, offset=0, dim1=-2, dim2=-1), inner), v_t))
return res, None
if __name__ == '__main__':
batch_size = 100
P = 5
device = torch.device('cuda:0')
np.random.seed(0)
M = np.random.randn(batch_size, P, P)
M_torch = torch.from_numpy(M).float().to(device)
out_torch = Custom_svd_v_colum().apply(M_torch)
out_torch = out_torch.detach().cpu().numpy()
print('out_torch', out_torch)
# Debugging with Tensorflow
M_tensorflow = tf.constant(M, dtype=tf.float32)
out_tensorflow = custom_svd_v_column_tensorflow(M_tensorflow)
sess = tf.Session()
out_tensorflow = sess.run(out_tensorflow)
print(np.minimum(np.abs(out_tensorflow-out_torch), np.abs(out_tensorflow+out_torch)).max())
if __name__ == '__main__' and 1:
batch_size = 4
P = 5
device = torch.device('cuda:0')
np.random.seed(0)
M = np.random.randn(batch_size, P, P)
M_torch = torch.from_numpy(M).float().to(device)
M_torch = torch.nn.Parameter(M_torch, requires_grad=True)
try:
custom_svd_v_colum = Custom_svd_v_colum.apply
torch.autograd.gradcheck(custom_svd_v_colum, (M_torch, -1), raise_exception=True)
print('Test on Custom_svd_v_colum: Success')
except:
print('Test on Custom_svd_v_colum: Failure')
raise
if __name__ == '__main__' and 1:
register_custom_svd_gradient_tensorflow()
batch_size = 100
P = 5
device = torch.device('cuda:0')
np.random.seed(0)
M = np.random.randn(batch_size, P, P)
M_torch = torch.from_numpy(M).float().to(device)
M_torch = torch.nn.Parameter(M_torch, requires_grad=True)
out = Custom_svd_v_colum().apply(M_torch)
out.backward(torch.ones_like(out))
M_grad_torch = M_torch.grad.detach().cpu().numpy()
M_tensorflow = tf.constant(M, dtype=tf.float32)
out = custom_svd_v_column_tensorflow(M_tensorflow)
M_grad_tensorflow = tf.gradients(out, [M_tensorflow])[0]
sess = tf.Session()
M_grad_tensorflow = sess.run(M_grad_tensorflow)
print(np.minimum(np.abs(M_grad_tensorflow - M_grad_torch), np.abs(M_grad_tensorflow + M_grad_torch)).max())
def solve_weighted_tls(A, W):
# A - BxNx3
# W - BxN, positive weights
# Find solution to min x^T A^T diag(W) A x = min ||\sqrt{diag(W)} A x||^2, subject to ||x|| = 1
batch_size, num_points, _ = A.size()
A_p = A.unsqueeze(2) * A.unsqueeze(3) # BxNx3x3
W_p = W.view(batch_size, num_points, 1, 1)
M = torch.sum(W_p * A_p, dim=1) # Bx3x3
x = Custom_svd_v_colum().apply(M) # Bx3
return x
def solve_weighted_tls_tensorflow(A, W):
# A - BxNx3
# W - BxN, positive weights
# Find solution to min x^T A^T diag(W) A x = min ||\sqrt{diag(W)} A x||^2, subject to ||x|| = 1
A_p = tf.expand_dims(A, axis=2) * tf.expand_dims(A, axis=3) # BxNx3x3
W_p = tf.expand_dims(tf.expand_dims(W, axis=2), axis=3) # BxNx1x1
M = tf.reduce_sum(W_p * A_p, axis=1) # Bx3x3
x = custom_svd_v_column_tensorflow(M) # Bx3
return x
if __name__ == '__main__':
batch_size = 100
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
A = np.random.randn(batch_size, num_points, 3)
W = np.random.randn(batch_size, num_points)
A_torch = torch.from_numpy(A).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
x_torch = solve_weighted_tls(A_torch, W_torch)
x_torch = x_torch.detach().cpu().numpy()
print('x_torch', x_torch)
# Debugging with Tensorflow
A_tensorflow = tf.constant(A, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
x_tensorflow = solve_weighted_tls_tensorflow(A_tensorflow, W_tensorflow)
sess = tf.Session()
x_tensorflow = sess.run(x_tensorflow)
print(np.minimum(np.abs(x_tensorflow-x_torch), np.abs(x_tensorflow+x_torch)).max()) | 8,960 | 36.493724 | 113 | py |
CPFN | CPFN-master/SPFN/cylinder_fitter.py | # Importation of pqckqges
import torch
import numpy as np
if __name__ == '__main__':
import tensorflow as tf
from SPFN.primitives import Cylinder
from SPFN.differentiable_tls import solve_weighted_tls, solve_weighted_tls_tensorflow
from SPFN.geometry_utils import compute_consistent_plane_frame, compute_consistent_plane_frame_tensorflow, weighted_sphere_fitting, weighted_sphere_fitting_tensorflow
def compute_parameters(P, W, X):
# First determine n as the solution to \min \sum W_i (X_i \cdot n)^2
batch_size, n_points, _ = P.size()
_, _, n_max_primitives = W.size()
W_reshaped = W.transpose(1,2).contiguous().view(batch_size * n_max_primitives, n_points) # BKxN
X_reshaped = X.unsqueeze(1).expand(batch_size, n_max_primitives, n_points, 3).contiguous().view(batch_size * n_max_primitives, n_points, 3)
n = solve_weighted_tls(X_reshaped, W_reshaped).view(batch_size, n_max_primitives, 3) # BxKx3
x_axes, y_axes = compute_consistent_plane_frame(n.view(batch_size * n_max_primitives, 3))
x_axes = x_axes.view(batch_size, n_max_primitives, 3) # BxKx3
y_axes = y_axes.view(batch_size, n_max_primitives, 3) # BxKx3
x_coord = torch.sum(P.unsqueeze(1) * x_axes.unsqueeze(2), dim=3) # BxKxN
y_coord = torch.sum(P.unsqueeze(1) * y_axes.unsqueeze(2), dim=3) # BxKxN
P_proj = torch.stack([x_coord, y_coord], dim=3) # BxKxNx2, 2D projection point
P_proj_reshaped = P_proj.view(batch_size * n_max_primitives, n_points, 2) # BKxNx2
circle_center, circle_radius_squared = weighted_sphere_fitting(P_proj_reshaped, W_reshaped)
circle_center = circle_center.view(batch_size, n_max_primitives, 2) # BxKx2
center = circle_center[:,:,0].unsqueeze(2) * x_axes + circle_center[:,:,1].unsqueeze(2) * y_axes # BxKx3
radius_square = circle_radius_squared.view(batch_size, n_max_primitives) # BxK
return n, center, radius_square
def compute_parameters_tensorflow(P, W, X):
# First determine n as the solution to \min \sum W_i (X_i \cdot n)^2
batch_size = tf.shape(P)[0]
n_points = tf.shape(P)[1]
n_max_primitives = tf.shape(W)[2]
W_reshaped = tf.reshape(tf.transpose(W, [0, 2, 1]), [batch_size * n_max_primitives, n_points]) # BKxN
X_reshaped = tf.reshape(tf.tile(tf.expand_dims(X, axis=1), [1, n_max_primitives, 1, 1]), [batch_size * n_max_primitives, n_points, 3]) # BKxNx3
n = tf.reshape(solve_weighted_tls_tensorflow(X_reshaped, W_reshaped), [batch_size, n_max_primitives, 3]) # BxKx3
x_axes, y_axes = compute_consistent_plane_frame_tensorflow(tf.reshape(n, [batch_size * n_max_primitives, 3]))
x_axes = tf.reshape(x_axes, [batch_size, n_max_primitives, 3]) # BxKx3
y_axes = tf.reshape(y_axes, [batch_size, n_max_primitives, 3]) # BxKx3
x_coord = tf.reduce_sum(tf.expand_dims(P, axis=1) * tf.expand_dims(x_axes, axis=2), axis=3) # BxKxN
y_coord = tf.reduce_sum(tf.expand_dims(P, axis=1) * tf.expand_dims(y_axes, axis=2), axis=3) # BxKxN
P_proj = tf.stack([x_coord, y_coord], axis=3) # BxKxNx2, 2D projection point
P_proj_reshaped = tf.reshape(P_proj, [batch_size * n_max_primitives, n_points, 2]) # BKxNx2
circle_center, circle_radius_squared = weighted_sphere_fitting_tensorflow(P_proj_reshaped, W_reshaped)
circle_center = tf.reshape(circle_center, [batch_size, n_max_primitives, 2]) # BxKx2
center = tf.expand_dims(circle_center[:, :, 0], axis=2) * x_axes + tf.expand_dims(circle_center[:, :, 1], axis=2) * y_axes # BxKx3
radius_square = tf.reshape(circle_radius_squared, [batch_size, n_max_primitives]) # BxK
return n, center, radius_square
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
n_torch, center_torch, radius_square_torch = compute_parameters(P_torch, W_torch, X_torch)
n_torch = n_torch.detach().cpu().numpy()
center_torch = center_torch.detach().cpu().numpy()
radius_square_torch = radius_square_torch.detach().cpu().numpy()
print('n_torch', n_torch)
print('center_torch', center_torch)
print('radius_square_torch', radius_square_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
X_tensorflow = tf.constant(X, dtype=tf.float32)
n_tensorflow, center_tensorflow, radius_square_tensorflow = compute_parameters_tensorflow(P_tensorflow, W_tensorflow, X_tensorflow)
sess = tf.Session()
n_tensorflow, center_tensorflow, radius_square_tensorflow = sess.run([n_tensorflow, center_tensorflow, radius_square_tensorflow])
print(np.minimum(np.abs(n_tensorflow - n_torch), np.abs(n_tensorflow + n_torch)).max())
print(np.abs(center_tensorflow - center_torch).max())
print(np.abs(radius_square_tensorflow - radius_square_torch).max())
def sqrt_safe(x):
return torch.sqrt(torch.abs(x) + 1e-10)
def compute_residue_single(axis, center, radius_squared, p):
p_minus_c = p - center
p_minus_c_sqr = torch.sum(p_minus_c**2, dim=-1)
p_minus_c_dot_n = torch.sum(p_minus_c * axis, dim=-1)
return (sqrt_safe(p_minus_c_sqr - p_minus_c_dot_n**2) - sqrt_safe(radius_squared))**2
def sqrt_safe_tensorflow(x):
return tf.sqrt(tf.abs(x) + 1e-10)
def compute_residue_single_tensorflow(axis, center, radius_squared, p):
p_minus_c = p - center
p_minus_c_sqr = tf.reduce_sum(tf.square(p_minus_c), axis=-1)
p_minus_c_dot_n = tf.reduce_sum(p_minus_c * axis, axis=-1)
return tf.square(sqrt_safe_tensorflow(p_minus_c_sqr - tf.square(p_minus_c_dot_n)) - sqrt_safe_tensorflow(radius_squared))
if __name__ == '__main__':
batch_size = 100
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
axis = np.random.randn(batch_size, num_points, 3)
center = np.random.randn(batch_size, num_points, 3)
radius_squared = np.random.randn(batch_size, num_points)
p = np.random.randn(batch_size, num_points, 3)
axis_torch = torch.from_numpy(axis).float().to(device)
center_torch = torch.from_numpy(center).float().to(device)
radius_squared_torch = torch.from_numpy(radius_squared).float().to(device)
p_torch = torch.from_numpy(p).float().to(device)
loss_torch = compute_residue_single(axis_torch, center_torch, radius_squared_torch, p_torch)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
axis_tensorflow = tf.constant(axis, dtype=tf.float32)
center_tensorflow = tf.constant(center, dtype=tf.float32)
radius_squared_tensorflow = tf.constant(radius_squared, dtype=tf.float32)
p_tensorflow = tf.constant(p, dtype=tf.float32)
loss_tensorflow = compute_residue_single_tensorflow(axis_tensorflow, center_tensorflow, radius_squared_tensorflow, p_tensorflow)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
def acos_safe(x):
return torch.acos(torch.clamp(x, min=-1.0+1e-6, max=1.0-1e-6))
def compute_parameter_loss(predicted_axis, gt_axis, matching_indices, angle_diff):
# predicted_axis: BxK1x3
# gt_axis: BXK2x3
# matching indices: BxK2
batch_size, nb_primitives, _ = gt_axis.size()
predicted_axis = torch.gather(predicted_axis, 1, matching_indices.unsqueeze(2).expand(batch_size, nb_primitives, 3))
dot_abs = torch.abs(torch.sum(predicted_axis * gt_axis, axis=2))
if angle_diff:
return acos_safe(dot_abs) # BxK
else:
return 1.0 - dot_abs # BxK
def batched_gather(data, indices, axis):
# data - Bx...xKx..., axis is where dimension K is
# indices - BxK
# output[b, ..., k, ...] = in[b, ..., indices[b, k], ...]
assert axis >= 1
ndims = data.get_shape().ndims # allow dynamic rank
if axis > 1:
# tranpose data to BxKx...
perm = np.arange(ndims)
perm[axis] = 1
perm[1] = axis
data = tf.transpose(data, perm=perm)
batch_size = tf.shape(data)[0]
batch_nums = tf.tile(tf.expand_dims(tf.expand_dims(tf.range(batch_size), axis=1), axis=2), multiples=[1, tf.shape(indices)[1], 1]) # BxKx1
indices = tf.concat([batch_nums, tf.expand_dims(indices, axis=2)], axis=2) # BxKx2
gathered_data = tf.gather_nd(data, indices=indices)
if axis > 1:
gathered_data = tf.transpose(gathered_data, perm=perm)
return gathered_data
def acos_safe_tensorflow(x):
return tf.math.acos(tf.clip_by_value(x, -1.0+1e-6, 1.0-1e-6))
def compute_parameter_loss_tensorflow(predicted_axis, gt_axis, matching_indices, angle_diff):
n = batched_gather(predicted_axis, matching_indices, axis=1)
dot_abs = tf.abs(tf.reduce_sum(n * gt_axis, axis=2))
if angle_diff:
return acos_safe_tensorflow(dot_abs) # BxK
else:
return 1.0 - dot_abs # BxK
if __name__ == '__main__':
batch_size = 100
num_primitives1 = 15
num_primitives2 = 5
device = torch.device('cuda:0')
np.random.seed(0)
predicted_axis = np.random.randn(batch_size, num_primitives1, 3)
gt_axis = np.random.randn(batch_size, num_primitives2, 3)
matching_indices = np.random.randint(0, 15, (batch_size, num_primitives2))
angle_diff = True
predicted_axis_torch = torch.from_numpy(predicted_axis).float().to(device)
gt_axis_torch = torch.from_numpy(gt_axis).float().to(device)
matching_indices_torch = torch.from_numpy(matching_indices).long().to(device)
loss_torch = compute_parameter_loss(predicted_axis_torch, gt_axis_torch, matching_indices_torch, angle_diff)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
predicted_axis_tensorflow = tf.constant(predicted_axis, dtype=tf.float32)
gt_axis_tensorflow = tf.constant(gt_axis, dtype=tf.float32)
matching_indices_tensorflow = tf.constant(matching_indices, dtype=tf.int32)
loss_tensorflow = compute_parameter_loss_tensorflow(predicted_axis_tensorflow, gt_axis_tensorflow, matching_indices_tensorflow, angle_diff)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
def create_primitive_from_dict(d):
assert d['type'] == 'cylinder'
location = np.array([d['location_x'], d['location_y'], d['location_z']], dtype=float)
axis = np.array([d['axis_x'], d['axis_y'], d['axis_z']], dtype=float)
radius = float(d['radius'])
return Cylinder(center=location, radius=radius, axis=axis)
def extract_parameter_data_as_dict(primitives, n_max_primitives):
n = np.zeros(dtype=float, shape=[n_max_primitives, 3])
for i, primitive in enumerate(primitives):
if isinstance(primitive, Cylinder):
n[i] = primitive.axis
return {
'cylinder_axis_gt': n
}
def extract_predicted_parameters_as_json(cylinder_center, cylinder_radius_squared, cylinder_axis, k):
cylinder = Cylinder(cylinder_center, np.sqrt(cylinder_radius_squared), cylinder_axis, height=5)
return {
'type': 'cylinder',
'center_x': float(cylinder.center[0]),
'center_y': float(cylinder.center[1]),
'center_z': float(cylinder.center[2]),
'radius': float(cylinder.radius),
'axis_x': float(cylinder.axis[0]),
'axis_y': float(cylinder.axis[1]),
'axis_z': float(cylinder.axis[2]),
'height': float(cylinder.height),
'label': k,
} | 11,804 | 51.234513 | 166 | py |
CPFN | CPFN-master/SPFN/losses_implementation.py | # Importation of packages
import torch
import numpy as np
if __name__ == '__main__':
import tensorflow as tf
from scipy.optimize import linear_sum_assignment
from SPFN import plane_fitter, sphere_fitter, cylinder_fitter, cone_fitter
# Segmentation Loss
def hungarian_matching(W_pred, I_gt):
# This non-tf function does not backprob gradient, only output matching indices
# W_pred - BxNxK
# I_gt - BxN, may contain -1's
# Output: matching_indices - BxK, where (b,k)th ground truth primitive is matched with (b, matching_indices[b, k])
# where only n_gt_labels entries on each row have meaning. The matching does not include gt background instance
batch_size, n_points, n_max_labels = W_pred.size()
matching_indices = torch.zeros([batch_size, n_max_labels], dtype=torch.long).to(W_pred.device)
for b in range(batch_size):
# assuming I_gt[b] does not have gap
n_gt_labels = torch.max(I_gt[b]).item() + 1 # this is K'
W_gt = torch.eye(n_gt_labels+1).to(I_gt.device)[I_gt[b]]
dot = torch.mm(W_gt.transpose(0,1), W_pred[b])
denominator = torch.sum(W_gt, dim=0).unsqueeze(1) + torch.sum(W_pred[b], dim=0).unsqueeze(0) - dot
cost = dot / torch.clamp(denominator, min=1e-10, max=None) # K'xK
cost = cost[:n_gt_labels, :] # remove last row, corresponding to matching gt background instance
_, col_ind = linear_sum_assignment(-cost.detach().cpu().numpy()) # want max solution
col_ind = torch.from_numpy(col_ind).long().to(matching_indices.device)
matching_indices[b, :n_gt_labels] = col_ind
return matching_indices
def hungarian_matching_tensorflow(W_pred, I_gt):
# This non-tf function does not backprob gradient, only output matching indices
# W_pred - BxNxK
# I_gt - BxN, may contain -1's
# Output: matching_indices - BxK, where (b,k)th ground truth primitive is matched with (b, matching_indices[b, k])
# where only n_gt_labels entries on each row have meaning. The matching does not include gt background instance
batch_size = I_gt.shape[0]
n_points = I_gt.shape[1]
n_max_labels = W_pred.shape[2]
matching_indices = np.zeros([batch_size, n_max_labels], dtype=np.int32)
for b in range(batch_size):
# assuming I_gt[b] does not have gap
n_gt_labels = np.max(I_gt[b]) + 1 # this is K'
W_gt = np.zeros([n_points, n_gt_labels + 1]) # HACK: add an extra column to contain -1's
W_gt[np.arange(n_points), I_gt[b]] = 1.0 # NxK'
dot = np.sum(np.expand_dims(W_gt, axis=2) * np.expand_dims(W_pred[b], axis=1), axis=0) # K'xK
denominator = np.expand_dims(np.sum(W_gt, axis=0), axis=1) + np.expand_dims(np.sum(W_pred[b], axis=0), axis=0) - dot
cost = dot / np.maximum(denominator, 1e-10) # K'xK
cost = cost[:n_gt_labels, :] # remove last row, corresponding to matching gt background instance
_, col_ind = linear_sum_assignment(-cost) # want max solution
matching_indices[b, :n_gt_labels] = col_ind
return matching_indices
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
W_pred = np.random.rand(batch_size, num_points, n_max_instances)
I_gt = np.random.randint(-1, n_max_instances, (batch_size, num_points))
W_pred = W_pred / np.linalg.norm(W_pred, axis=2, keepdims=True)
W_pred_torch = torch.from_numpy(W_pred).float().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
matching_indices_torch = hungarian_matching(W_pred_torch, I_gt_torch)
matching_indices_torch = matching_indices_torch.detach().cpu().numpy()
print('matching_indices_torch', matching_indices_torch)
# Debugging with Tensorflow
W_pred_tensorflow = tf.constant(W_pred, dtype=tf.float32)
I_gt_tensorflow = tf.constant(I_gt, dtype=tf.int32)
matching_indices_tensorflow = tf.py_func(hungarian_matching_tensorflow, [W_pred_tensorflow, I_gt_tensorflow], Tout=tf.int32)
sess = tf.Session()
matching_indices_tensorflow = sess.run(matching_indices_tensorflow)
print(np.abs(matching_indices_torch - matching_indices_tensorflow).max())
def compute_miou_loss(W, I_gt, matching_indices, div_eps=1e-10):
# W - BxNxK
# I_gt - BxN
batch_size, n_points, n_max_labels = W.size()
_, n_labels = matching_indices.size()
W_reordered = torch.gather(W, 2, matching_indices.unsqueeze(1).expand(batch_size, n_points, n_labels)) # BxNxK
# notice in tf.one_hot, -1 will result in a zero row, which is what we want
W_gt = torch.eye(n_labels+2).to(I_gt.device)[I_gt]
W_gt = W_gt[:,:,:n_labels]
dot = torch.sum(W_gt * W_reordered, axis=1) # BxK
denominator = torch.sum(W_gt, dim=1) + torch.sum(W_reordered, dim=1) - dot
mIoU = dot / (denominator + div_eps) # BxK
return 1.0 - mIoU, 1 - dot / n_points
def batched_gather_tensorflow(data, indices, axis):
# data - Bx...xKx..., axis is where dimension K is
# indices - BxK
# output[b, ..., k, ...] = in[b, ..., indices[b, k], ...]
assert axis >= 1
ndims = data.get_shape().ndims # allow dynamic rank
if axis > 1:
# tranpose data to BxKx...
perm = np.arange(ndims)
perm[axis] = 1
perm[1] = axis
data = tf.transpose(data, perm=perm)
batch_size = tf.shape(data)[0]
batch_nums = tf.tile(tf.expand_dims(tf.expand_dims(tf.range(batch_size), axis=1), axis=2), multiples=[1, tf.shape(indices)[1], 1]) # BxKx1
indices = tf.concat([batch_nums, tf.expand_dims(indices, axis=2)], axis=2) # BxKx2
gathered_data = tf.gather_nd(data, indices=indices)
if axis > 1:
gathered_data = tf.transpose(gathered_data, perm=perm)
return gathered_data
def compute_miou_loss_tensorflow(W, I_gt, matching_indices):
# W - BxNxK
# I_gt - BxN
W_reordered = batched_gather_tensorflow(W, indices=matching_indices, axis=2) # BxNxK
depth = tf.shape(W)[2]
# notice in tf.one_hot, -1 will result in a zero row, which is what we want
W_gt = tf.one_hot(I_gt, depth=depth, dtype=tf.float32) # BxNxK
dot = tf.reduce_sum(W_gt * W_reordered, axis=1) # BxK
denominator = tf.reduce_sum(W_gt, axis=1) + tf.reduce_sum(W_reordered, axis=1) - dot
mIoU = dot / (denominator + 1e-10) # BxK
return 1.0 - mIoU
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
W = np.random.rand(batch_size, num_points, n_max_instances)
I_gt = np.random.randint(-1, n_max_instances, (batch_size, num_points))
W = W / np.linalg.norm(W, axis=2, keepdims=True)
W_torch = torch.from_numpy(W).float().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
with torch.no_grad():
matching_indices_torch = hungarian_matching(W_torch, I_gt_torch)
loss_torch, _ = compute_miou_loss(W_torch, I_gt_torch, matching_indices_torch)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
W_tensorflow = tf.constant(W, dtype=tf.float32)
I_gt_tensorflow = tf.constant(I_gt, dtype=tf.int32)
matching_indices_tensorflow = tf.stop_gradient(tf.py_func(hungarian_matching_tensorflow, [W_tensorflow, I_gt_tensorflow], Tout=tf.int32))
loss_tensorflow = compute_miou_loss_tensorflow(W_tensorflow, I_gt_tensorflow, matching_indices_tensorflow)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
# Normal Loss
def acos_safe(x):
return torch.acos(torch.clamp(x, min=-1.0+1e-6, max=1.0-1e-6))
def compute_normal_loss(normal, normal_gt, angle_diff):
# normal, normal_gt: BxNx3
# Assume normals are unoriented
dot_abs = torch.abs(torch.sum(normal * normal_gt, dim=2)) # BxN
if angle_diff:
return torch.mean(acos_safe(dot_abs), dim=1)
else:
return torch.mean(1.0 - dot_abs, dim=1)
def acos_safe_tensorflow(x):
return tf.math.acos(tf.clip_by_value(x, -1.0+1e-6, 1.0-1e-6))
def compute_normal_loss_tensorflow(normal, normal_gt, angle_diff):
# normal, normal_gt: BxNx3
# Assume normals are unoriented
dot_abs = tf.abs(tf.reduce_sum(normal * normal_gt, axis=2)) # BxN
if angle_diff:
return tf.reduce_mean(acos_safe_tensorflow(dot_abs), axis=1)
else:
return tf.reduce_mean(1.0 - dot_abs, axis=1)
if __name__ == '__main__':
batch_size = 100
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
normal = np.random.randn(batch_size, num_points, 3)
normal_gt = np.random.randn(batch_size, num_points, 3)
angle_diff = True
normal_torch = torch.from_numpy(normal).float().to(device)
normal_gt_torch = torch.from_numpy(normal_gt).float().to(device)
loss_torch = compute_normal_loss(normal_torch, normal_gt_torch, angle_diff)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
normal_tensorflow = tf.constant(normal, dtype=tf.float32)
normal_gt_tensorflow = tf.constant(normal_gt, dtype=tf.float32)
loss_tensorflow = compute_normal_loss_tensorflow(normal_tensorflow, normal_gt_tensorflow, angle_diff)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
# Type Loss
def compute_per_point_type_loss(per_point_type, I_gt, T_gt, is_eval):
# For training, per_point_type is BxNxQ, where Q = n_registered_primitives
# For test, per_point_type is BxN
# I_gt - BxN, allow -1
# T_gt - BxK
batch_size, n_points = I_gt.size()
per_point_type_gt = torch.gather(T_gt, 1, torch.clamp(I_gt, min=0, max=None))
if is_eval:
type_loss = 1.0 - (per_point_type == per_point_type_gt).float()
else:
type_loss = torch.nn.functional.cross_entropy(per_point_type.contiguous().view(batch_size*n_points, -1), per_point_type_gt.view(batch_size*n_points), reduction='none') # BxN
type_loss = type_loss.view(batch_size, n_points)
# do not add loss to background points in gt
type_loss = torch.where(I_gt == -1, torch.zeros_like(type_loss), type_loss)
return torch.sum(type_loss, dim=1) / (torch.sum((I_gt != -1).float(), dim=1).float()) # B
def compute_per_point_type_loss_tensorflow(per_point_type, I_gt, T_gt, is_eval):
# For training, per_point_type is BxNxQ, where Q = n_registered_primitives
# For test, per_point_type is BxN
# I_gt - BxN, allow -1
# T_gt - BxK
batch_size = tf.shape(I_gt)[0]
n_points = tf.shape(I_gt)[1]
indices_0 = tf.tile(tf.expand_dims(tf.range(batch_size), axis=1), [1, n_points]) # BxN
indices = tf.stack([indices_0, tf.maximum(0, I_gt)], axis=2)
per_point_type_gt = tf.gather_nd(T_gt, indices=indices) # BxN
if is_eval:
type_loss = 1.0 - tf.to_float(tf.equal(per_point_type, per_point_type_gt))
else:
type_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=per_point_type, labels=per_point_type_gt) # BxN
# do not add loss to background points in gt
type_loss = tf.where(tf.equal(I_gt, -1), tf.zeros_like(type_loss), type_loss)
return tf.reduce_sum(type_loss, axis=1) / tf.to_float(tf.count_nonzero(tf.not_equal(I_gt, -1), axis=1)) # B
if __name__ == '__main__':
batch_size = 100
num_points = 1024
Q = 4
K = 10
device = torch.device('cuda:0')
np.random.seed(0)
per_point_type = np.random.randn(batch_size, num_points, Q)
I_gt = np.random.randint(-1, K, (batch_size, num_points))
T_gt = np.random.randint(0, Q, (batch_size, K))
is_eval = False
per_point_type_torch = torch.from_numpy(per_point_type).float().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
loss_torch = compute_per_point_type_loss(per_point_type_torch, I_gt_torch, T_gt_torch, is_eval)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
per_point_type_tensorflow = tf.constant(per_point_type, dtype=tf.float32)
I_gt_tensorflow = tf.constant(I_gt, dtype=tf.int32)
T_gt_tensorflow = tf.constant(T_gt, dtype=tf.int32)
loss_tensorflow = compute_per_point_type_loss_tensorflow(per_point_type_tensorflow, I_gt_tensorflow, T_gt_tensorflow, is_eval)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
def compute_parameters(P, W, X, classes=['plane','sphere','cylinder','cone']):
parameters = {}
for class_ in classes:
if class_ == 'plane':
plane_normal, plane_center = plane_fitter.compute_parameters(P, W)
parameters['plane_normal'] = plane_normal
parameters['plane_center'] = plane_center
elif class_ == 'sphere':
sphere_center, sphere_radius_squared = sphere_fitter.compute_parameters(P, W)
parameters['sphere_center'] = sphere_center
parameters['sphere_radius_squared'] = sphere_radius_squared
elif class_ == 'cylinder':
cylinder_axis, cylinder_center, cylinder_radius_squared = cylinder_fitter.compute_parameters(P, W, X)
parameters['cylinder_axis'] = cylinder_axis
parameters['cylinder_center'] = cylinder_center
parameters['cylinder_radius_squared'] = cylinder_radius_squared
elif class_ == 'cone':
cone_apex, cone_axis, cone_half_angle = cone_fitter.compute_parameters(P, W, X)
parameters['cone_apex'] = cone_apex
parameters['cone_axis'] = cone_axis
parameters['cone_half_angle'] = cone_half_angle
else:
raise NotImplementedError
return parameters
def compute_parameters_tensorflow(P, W, X, classes=['plane','sphere','cylinder','cone']):
parameters = {}
for class_ in classes:
if class_ == 'plane':
plane_normal, plane_center = plane_fitter.compute_parameters_tensorflow(P, W)
parameters['plane_normal'] = plane_normal
parameters['plane_center'] = plane_center
elif class_ == 'sphere':
sphere_center, sphere_radius_squared = sphere_fitter.compute_parameters_tensorflow(P, W)
parameters['sphere_center'] = sphere_center
parameters['sphere_radius_squared'] = sphere_radius_squared
elif class_ == 'cylinder':
cylinder_axis, cylinder_center, cylinder_radius_squared = cylinder_fitter.compute_parameters_tensorflow(P, W, X)
parameters['cylinder_axis'] = cylinder_axis
parameters['cylinder_center'] = cylinder_center
parameters['cylinder_radius_squared'] = cylinder_radius_squared
elif class_ == 'cone':
cone_apex, cone_axis, cone_half_angle = cone_fitter.compute_parameters_tensorflow(P, W, X)
parameters['cone_apex'] = cone_apex
parameters['cone_axis'] = cone_axis
parameters['cone_half_angle'] = cone_half_angle
else:
raise NotImplementedError
return parameters
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
parameters = compute_parameters(P_torch, W_torch, X_torch)
plane_normal_torch, plane_center_torch, sphere_center_torch, sphere_radius_squared_torch, cylinder_axis_torch, cylinder_center_torch, cylinder_radius_square_torch, cone_apex_torch, cone_axis_torch, cone_half_angle_torch = \
parameters['plane_normal'], parameters['plane_center'], parameters['sphere_center'], parameters['sphere_radius_squared'], parameters['cylinder_axis'], parameters['cylinder_center'], parameters['cylinder_radius_square'], parameters['cone_apex'] ,parameters['cone_axis'], parameters['cone_half_angle']
plane_normal_torch = plane_normal_torch.detach().cpu().numpy()
plane_center_torch = plane_center_torch.detach().cpu().numpy()
sphere_center_torch = sphere_center_torch.detach().cpu().numpy()
sphere_radius_squared_torch = sphere_radius_squared_torch.detach().cpu().numpy()
cylinder_axis_torch = cylinder_axis_torch.detach().cpu().numpy()
cylinder_center_torch = cylinder_center_torch.detach().cpu().numpy()
cylinder_radius_square_torch = cylinder_radius_square_torch.detach().cpu().numpy()
cone_apex_torch = cone_apex_torch.detach().cpu().numpy()
cone_axis_torch = cone_axis_torch.detach().cpu().numpy()
cone_half_angle_torch = cone_half_angle_torch.detach().cpu().numpy()
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
X_tensorflow = tf.constant(X, dtype=tf.float32)
parameters = compute_parameters_tensorflow(P_tensorflow, W_tensorflow, X_tensorflow)
sess = tf.Session()
plane_normal_tensorflow, plane_center_tensorflow, sphere_center_tensorflow, sphere_radius_squared_tensorflow, cylinder_axis_tensorflow, cylinder_center_tensorflow, cylinder_radius_square_tensorflow, cone_apex_tensorflow, cone_axis_tensorflow, cone_half_angle_tensorflow = \
sess.run([parameters['plane_normal'], parameters['plane_center'], parameters['sphere_center'], parameters['sphere_radius_squared'], parameters['cylinder_axis'], parameters['cylinder_center'], parameters['cylinder_radius_square'], parameters['cone_apex'] ,parameters['cone_axis'], parameters['cone_half_angle']])
print(np.minimum(np.abs(plane_normal_tensorflow - plane_normal_torch), np.abs(plane_normal_tensorflow + plane_normal_torch)).max())
print(np.minimum(np.abs(plane_center_tensorflow - plane_center_torch), np.abs(plane_center_tensorflow + plane_center_torch)).max())
print(np.abs(sphere_center_tensorflow - sphere_center_torch).max())
print(np.abs(sphere_radius_squared_tensorflow - sphere_radius_squared_torch).max())
print(np.minimum(np.abs(cylinder_axis_tensorflow - cylinder_axis_torch), np.abs(cylinder_axis_tensorflow + cylinder_axis_torch)).max())
print(np.abs(cylinder_center_tensorflow - cylinder_center_torch).max())
print(np.abs(cylinder_radius_square_tensorflow - cylinder_radius_square_torch).max())
print(np.abs(cone_apex_tensorflow - cone_apex_torch).max())
print(np.minimum(np.abs(cone_axis_tensorflow - cone_axis_torch), np.abs(cone_axis_tensorflow + cone_axis_torch)).max())
print(np.abs(cone_half_angle_tensorflow - cone_half_angle_torch).max())
# Residue Loss
def compute_residue_loss(parameters, matching_indices, points_per_instance, T_gt, classes=['plane','sphere','cylinder','cone']):
# parameters is a dictionary where each key represents a different parameter
# points_per_instance of size BxKxN'x3
residue_losses = [] # a length T array of BxK tensors
residue_per_point_array = [] # a length T array of BxKxN' tensors
#residue_per_class = []
batch_size, n_labels = matching_indices.size()
for class_ in classes:
if class_ == 'plane':
residue_per_point = plane_fitter.compute_residue_single(torch.gather(parameters['plane_normal'], 1, matching_indices.unsqueeze(2).expand(batch_size, n_labels, 3)).unsqueeze(2),
torch.gather(parameters['plane_center'], 1, matching_indices).unsqueeze(2),
points_per_instance)
elif class_ == 'sphere':
residue_per_point = sphere_fitter.compute_residue_single(torch.gather(parameters['sphere_center'], 1, matching_indices.unsqueeze(2).expand(batch_size, n_labels, 3)).unsqueeze(2),
torch.gather(parameters['sphere_radius_squared'], 1, matching_indices.expand(batch_size, n_labels)).unsqueeze(2),
points_per_instance)
elif class_ == 'cylinder':
residue_per_point = cylinder_fitter.compute_residue_single(torch.gather(parameters['cylinder_axis'], 1, matching_indices.unsqueeze(2).expand(batch_size, n_labels, 3)).unsqueeze(2),
torch.gather(parameters['cylinder_center'], 1, matching_indices.unsqueeze(2).expand(batch_size, n_labels, 3)).unsqueeze(2),
torch.gather(parameters['cylinder_radius_squared'], 1, matching_indices.expand(batch_size, n_labels)).unsqueeze(2),
points_per_instance)
elif class_ == 'cone':
residue_per_point = cone_fitter.compute_residue_single(torch.gather(parameters['cone_apex'], 1, matching_indices.unsqueeze(2).expand(batch_size, n_labels, 3)).unsqueeze(2),
torch.gather(parameters['cone_axis'], 1, matching_indices.unsqueeze(2).expand(batch_size, n_labels, 3)).unsqueeze(2),
torch.gather(parameters['cone_half_angle'], 1, matching_indices.expand(batch_size, n_labels)).unsqueeze(2),
points_per_instance)
else:
raise NotImplementedError
#residue_per_class.append(residue_per_point)
residue_per_point_array.append(residue_per_point)
residue_losses.append(torch.mean(residue_per_point, dim=2))
residue_losses = torch.stack(residue_losses, dim=2)
residue_loss = torch.gather(residue_losses, 2, T_gt.unsqueeze(2)).squeeze(2)
residue_per_point_array = torch.stack(residue_per_point_array, dim=3) # BxKxN'xT
return residue_loss, residue_per_point_array#, residue_per_class
def aggregate_loss_from_stacked_tensorflow(loss_stacked, T_gt):
# loss_stacked - BxKxT, T_gt - BxK
# out[b, k] = loss_stacked[b, k, T_gt[b, k]]
B = tf.shape(loss_stacked)[0]
K = tf.shape(loss_stacked)[1]
indices_0 = tf.tile(tf.expand_dims(tf.range(B), axis=1), multiples=[1, K]) # BxK
indices_1 = tf.tile(tf.expand_dims(tf.range(K), axis=0), multiples=[B, 1]) # BxK
indices = tf.stack([indices_0, indices_1, T_gt], axis=2) # BxKx3
return tf.gather_nd(loss_stacked, indices=indices)
def compute_residue_loss_tensorflow(parameters, matching_indices, points_per_instance, T_gt, classes=['plane','sphere','cylinder','cone']):
residue_losses = [] # a length T array of BxK tensors
residue_per_point_array = [] # a length T array of BxKxN' tensors
#residue_per_class = []
for class_ in classes:
if class_ == 'plane':
residue_per_point = plane_fitter.compute_residue_single_tensorflow(tf.expand_dims(batched_gather_tensorflow(parameters['plane_normal'], matching_indices, axis=1), axis=2),
tf.expand_dims(batched_gather_tensorflow(parameters['plane_center'], matching_indices, axis=1), axis=2),
points_per_instance)
elif class_ == 'sphere':
residue_per_point = sphere_fitter.compute_residue_single_tensorflow(tf.expand_dims(batched_gather_tensorflow(parameters['sphere_center'], matching_indices, axis=1), axis=2),
tf.expand_dims(batched_gather_tensorflow(parameters['sphere_radius_squared'], matching_indices, axis=1), axis=2),
points_per_instance)
elif class_ == 'cylinder':
residue_per_point = cylinder_fitter.compute_residue_single_tensorflow(tf.expand_dims(batched_gather_tensorflow(parameters['cylinder_axis'], matching_indices, axis=1), axis=2),
tf.expand_dims(batched_gather_tensorflow(parameters['cylinder_center'], matching_indices, axis=1), axis=2),
tf.expand_dims(batched_gather_tensorflow(parameters['cylinder_radius_squared'], matching_indices, axis=1), axis=2),
points_per_instance)
elif class_ == 'cone':
residue_per_point = cone_fitter.compute_residue_single_tensorflow(tf.expand_dims(batched_gather_tensorflow(parameters['cone_apex'], matching_indices, axis=1), axis=2),
tf.expand_dims(batched_gather_tensorflow(parameters['cone_axis'], matching_indices, axis=1), axis=2),
tf.expand_dims(batched_gather_tensorflow(parameters['cone_half_angle'], matching_indices, axis=1), axis=2),
points_per_instance)
else:
raise NotImplementedError
#residue_per_class.append(residue_per_point)
residue_per_point_array.append(residue_per_point)
residue_losses.append(tf.reduce_mean(residue_per_point, axis=2))
residue_losses = tf.stack(residue_losses, axis=2)
residue_per_point_array = tf.stack(residue_per_point_array, axis=3) # BxKxN'xT
# Aggregate losses across fitters
residue_loss = aggregate_loss_from_stacked_tensorflow(residue_losses, T_gt) # BxK
return residue_loss, residue_per_point_array#, residue_per_class
if __name__ == '__main__':
batch_size = 100
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
points_per_instance = np.random.randn(batch_size, n_max_instances, num_points_instance, 3)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
points_per_instance_torch = torch.from_numpy(points_per_instance).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
parameters_torch = compute_parameters(P_torch, W_torch, X_torch)
matching_indices_torch = hungarian_matching(W_torch, I_gt_torch)
residue_loss_torch, residue_per_point_array_torch, residue_per_class_torch = compute_residue_loss(parameters_torch, matching_indices_torch, points_per_instance_torch, T_gt_torch, classes=['plane', 'sphere', 'cylinder', 'cone'])
residue_loss_torch = residue_loss_torch.detach().cpu().numpy()
residue_per_point_array_torch = residue_per_point_array_torch.detach().cpu().numpy()
residue_per_class_torch = [elt.detach().cpu().numpy() for elt in residue_per_class_torch]
print('residue_loss_torch', residue_loss_torch)
print('residue_per_point_array_torch', residue_per_point_array_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
X_tensorflow = tf.constant(X, dtype=tf.float32)
points_per_instance_tensorflow = tf.constant(points_per_instance, dtype=tf.float32)
T_gt_tensorflow = tf.constant(T_gt, dtype=tf.int32)
I_gt_tensorflow = tf.constant(I_gt, dtype=tf.int32)
parameters_tensorflow = compute_parameters_tensorflow(P_tensorflow, W_tensorflow, X_tensorflow)
matching_indices_tensorflow = tf.stop_gradient(tf.py_func(hungarian_matching_tensorflow, [W_tensorflow, I_gt_tensorflow], Tout=tf.int32))
residue_loss_tensorflow, residue_per_point_array_tensorflow, residue_per_class_tensorflow = compute_residue_loss_tensorflow(parameters_tensorflow, matching_indices_tensorflow, points_per_instance_tensorflow, T_gt_tensorflow, classes=['plane', 'sphere', 'cylinder', 'cone'])
sess = tf.Session()
residue_loss_tensorflow, residue_per_point_array_tensorflow, residue_per_class_tensorflow = sess.run([residue_loss_tensorflow, residue_per_point_array_tensorflow, residue_per_class_tensorflow])
print(np.abs(residue_loss_tensorflow - residue_loss_torch).max())
print(np.abs(residue_per_point_array_tensorflow - residue_per_point_array_torch).max())
for i, class_ in enumerate(['plane', 'sphere', 'cylinder', 'cone']):
print(class_, np.abs(residue_per_class_tensorflow[i] - residue_per_class_torch[i]).max())
def compute_parameter_loss(predicted_parameters, gt_parameters, matching_indices, T_gt, is_eval=False, classes=['plane','sphere','cylinder','cone']):
parameter_losses = [] # a length T array of BxK tensors
batch_size, n_max_instances = predicted_parameters[list(predicted_parameters.keys())[0]].size()[0:2]
for class_ in classes:
if class_ == 'plane':
parameter_loss = plane_fitter.compute_parameter_loss(predicted_parameters['plane_normal'], gt_parameters['plane_normal'], matching_indices, angle_diff=is_eval)
elif class_ == 'sphere':
parameter_loss = torch.zeros([batch_size, n_max_instances], dtype=torch.float).to(T_gt.device)
elif class_ == 'cylinder':
parameter_loss = cylinder_fitter.compute_parameter_loss(predicted_parameters['cylinder_axis'], gt_parameters['cylinder_axis'], matching_indices, angle_diff=is_eval)
elif class_ == 'cone':
parameter_loss = cone_fitter.compute_parameter_loss(predicted_parameters['cone_axis'], gt_parameters['cone_axis'], matching_indices, angle_diff=is_eval)
else:
raise NotImplementedError
parameter_losses.append(parameter_loss)
parameter_losses = torch.stack(parameter_losses, dim=2)
parameter_loss = torch.gather(parameter_losses, 2, T_gt.unsqueeze(2)).squeeze(2) # BxK
return parameter_loss
def compute_parameter_loss_tensorflow(predicted_parameters, gt_parameters, matching_indices, T_gt, is_eval=False, classes=['plane','sphere','cylinder','cone']):
parameter_losses = [] # a length T array of BxK tensors
for class_ in classes:
if class_ == 'plane':
parameter_loss = plane_fitter.compute_parameter_loss_tensorflow(predicted_parameters['plane_normal'], gt_parameters['plane_normal'], matching_indices, angle_diff=is_eval)
elif class_ == 'sphere':
parameter_loss = tf.zeros(dtype=tf.float32, shape=[batch_size, n_max_instances])
elif class_ == 'cylinder':
parameter_loss = cylinder_fitter.compute_parameter_loss_tensorflow(predicted_parameters['cylinder_axis'], gt_parameters['cylinder_axis'], matching_indices, angle_diff=is_eval)
elif class_ == 'cone':
parameter_loss = cone_fitter.compute_parameter_loss_tensorflow(predicted_parameters['cone_axis'], gt_parameters['cone_axis'], matching_indices, angle_diff=is_eval)
else:
raise NotImplementedError
parameter_losses.append(parameter_loss)
parameter_losses = tf.stack(parameter_losses, axis=2)
parameter_loss = aggregate_loss_from_stacked_tensorflow(parameter_losses, T_gt) # BxK
return parameter_loss
if __name__ == '__main__':
batch_size = 100
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
plane_normal = np.random.randn(batch_size, n_max_instances, 3)
plane_normal = plane_normal / np.linalg.norm(plane_normal, axis=2, keepdims=True)
plane_center = np.random.randn(batch_size, n_max_instances)
sphere_center = np.random.randn(batch_size, n_max_instances, 3)
sphere_radius_squared = np.abs(np.random.randn(batch_size, n_max_instances))
cylinder_axis = np.random.randn(batch_size, n_max_instances, 3)
cylinder_axis = cylinder_axis / np.linalg.norm(cylinder_axis, axis=2, keepdims=True)
cylinder_center = np.random.randn(batch_size, n_max_instances, 3)
cylinder_radius_square = np.abs(np.random.randn(batch_size, n_max_instances))
cone_apex = np.random.randn(batch_size, n_max_instances, 3)
cone_axis = np.random.randn(batch_size, n_max_instances, 3)
cone_half_angle = np.abs(np.random.randn(batch_size, n_max_instances))
gt_parameters = {'plane_normal': plane_normal,
'plane_center': plane_center,
'sphere_center': sphere_center,
'sphere_radius_squared': sphere_radius_squared,
'cylinder_axis': cylinder_axis,
'cylinder_center': cylinder_center,
'cylinder_radius_square': cylinder_radius_square,
'cone_apex': cone_apex,
'cone_axis': cone_axis,
'cone_half_angle': cone_half_angle}
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
gt_parameters_torch = {'plane_normal': torch.from_numpy(gt_parameters['plane_normal']).float().to(device),
'plane_center': torch.from_numpy(gt_parameters['plane_center']).float().to(device),
'sphere_center': torch.from_numpy(gt_parameters['sphere_center']).float().to(device),
'sphere_radius_squared': torch.from_numpy(gt_parameters['sphere_radius_squared']).float().to(device),
'cylinder_axis': torch.from_numpy(gt_parameters['cylinder_axis']).float().to(device),
'cylinder_center': torch.from_numpy(gt_parameters['cylinder_center']).float().to(device),
'cylinder_radius_square': torch.from_numpy(gt_parameters['cylinder_radius_square']).float().to(device),
'cone_apex': torch.from_numpy(gt_parameters['cone_apex']).float().to(device),
'cone_axis': torch.from_numpy(gt_parameters['cone_axis']).float().to(device),
'cone_half_angle': torch.from_numpy(gt_parameters['cone_half_angle']).float().to(device)}
predicted_parameters_torch = compute_parameters(P_torch, W_torch, X_torch)
matching_indices_torch = hungarian_matching(W_torch, I_gt_torch)
parameter_loss_torch = compute_parameter_loss(predicted_parameters_torch, gt_parameters_torch, matching_indices_torch, T_gt_torch, is_eval=False, classes=['plane','sphere','cylinder','cone'])
parameter_loss_torch = parameter_loss_torch.detach().cpu().numpy()
print('parameter_loss_torch', parameter_loss_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
X_tensorflow = tf.constant(X, dtype=tf.float32)
T_gt_tensorflow = tf.constant(T_gt, dtype=tf.int32)
I_gt_tensorflow = tf.constant(I_gt, dtype=tf.int32)
gt_parameters_tensorflow = {'plane_normal': tf.constant(gt_parameters['plane_normal'], dtype=tf.float32),
'plane_center': tf.constant(gt_parameters['plane_center'], dtype=tf.float32),
'sphere_center': tf.constant(gt_parameters['sphere_center'], dtype=tf.float32),
'sphere_radius_squared': tf.constant(gt_parameters['sphere_radius_squared'], dtype=tf.float32),
'cylinder_axis': tf.constant(gt_parameters['cylinder_axis'], dtype=tf.float32),
'cylinder_center': tf.constant(gt_parameters['cylinder_center'], dtype=tf.float32),
'cylinder_radius_square': tf.constant(gt_parameters['cylinder_radius_square'], dtype=tf.float32),
'cone_apex': tf.constant(gt_parameters['cone_apex'], dtype=tf.float32),
'cone_axis': tf.constant(gt_parameters['cone_axis'], dtype=tf.float32),
'cone_half_angle': tf.constant(gt_parameters['cone_half_angle'], dtype=tf.float32)}
predicted_parameters_tensorflow = compute_parameters_tensorflow(P_tensorflow, W_tensorflow, X_tensorflow)
matching_indices_tensorflow = tf.stop_gradient(tf.py_func(hungarian_matching_tensorflow, [W_tensorflow, I_gt_tensorflow], Tout=tf.int32))
parameter_loss_tensorflow = compute_parameter_loss_tensorflow(predicted_parameters_tensorflow, gt_parameters_tensorflow, matching_indices_tensorflow, T_gt_tensorflow, is_eval=False, classes=['plane', 'sphere', 'cylinder', 'cone'])
sess = tf.Session()
parameter_loss_tensorflow = sess.run(parameter_loss_tensorflow)
print(np.abs(parameter_loss_tensorflow - parameter_loss_torch).max())
def sequence_mask(lengths, maxlen=None):
if maxlen is None:
maxlen = lengths.max()
row_vector = torch.arange(0, maxlen, 1).to(lengths.device)
matrix = lengths.unsqueeze(dim=-1)
mask = row_vector < matrix
return mask
def get_mask_gt(I_gt, n_max_instances):
n_instances_gt = torch.max(I_gt, dim=1)[0] + 1 # only count known primitive type instances, as -1 will be ignored
mask_gt = sequence_mask(n_instances_gt, maxlen=n_max_instances)
return mask_gt
def get_mask_gt_tensorflow(I_gt, n_max_instances):
n_instances_gt = tf.reduce_max(I_gt, axis=1) + 1 # only count known primitive type instances, as -1 will be ignored
mask_gt = tf.sequence_mask(n_instances_gt, maxlen=n_max_instances)
return mask_gt
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
I_gt1 = np.random.randint(0, n_max_instances, (batch_size-batch_size//2, num_points))
I_gt2 = np.random.randint(0, n_max_instances//2, (batch_size//2, num_points))
I_gt = np.concatenate((I_gt1, I_gt2), axis=0)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
mask_gt_torch = get_mask_gt(I_gt_torch, n_max_instances)
mask_gt_torch = mask_gt_torch.detach().cpu().numpy()
print('mask_gt_torch', mask_gt_torch)
# Debugging with Tensorflow
I_gt_tensorflow = tf.constant(I_gt, dtype=tf.int32)
mask_gt_tensorflow = get_mask_gt_tensorflow(I_gt_tensorflow, n_max_instances)
sess = tf.Session()
mask_gt_tensorflow = sess.run(mask_gt_tensorflow)
print(np.all(mask_gt_torch == mask_gt_tensorflow))
def reduce_mean_masked_instance(loss, mask_gt):
# loss: BxK
loss = torch.where(mask_gt, loss, torch.zeros_like(loss))
reduced_loss = torch.sum(loss, axis=1) # B
denom = torch.sum(mask_gt.float(), dim=1) # B
return torch.where(denom > 0, reduced_loss / denom, torch.zeros_like(reduced_loss)) # B
def collect_losses(normal_loss, normal_loss_multiplier, type_loss, type_loss_multiplier, avg_miou_loss, miou_loss, miou_loss_multiplier,
avg_residue_loss, residue_loss, residue_loss_multiplier, avg_parameter_loss, parameter_loss, parameter_loss_multiplier,
total_loss_multiplier):
total_loss = 0
# Normal Loss
normal_loss_per_data = normal_loss
total_normal_loss = torch.mean(normal_loss_per_data)
if normal_loss_multiplier > 0:
total_loss = total_loss + normal_loss_multiplier * total_normal_loss
# Total loss
type_loss_per_data = type_loss
total_type_loss = torch.mean(type_loss_per_data)
if type_loss_multiplier > 0:
total_loss = total_loss + type_loss_multiplier * total_type_loss
# mIoU Loss
miou_loss_per_data = avg_miou_loss
miou_loss_per_instance = miou_loss
total_miou_loss = torch.mean(miou_loss_per_data)
if miou_loss_multiplier > 0:
total_loss = total_loss + miou_loss_multiplier * total_miou_loss
# Residue Loss
residue_loss_per_data = avg_residue_loss
residue_loss_per_instance = residue_loss
total_residue_loss = torch.mean(residue_loss_per_data)
if residue_loss_multiplier > 0:
total_loss = total_loss + residue_loss_multiplier * total_residue_loss
# Paramerer Loss
parameter_loss_per_data = avg_parameter_loss
parameter_loss_per_instance = parameter_loss
total_parameter_loss = torch.mean(parameter_loss_per_data)
if parameter_loss_multiplier > 0:
total_loss = total_loss + parameter_loss_multiplier * total_parameter_loss
total_loss = total_loss * total_loss_multiplier
return total_loss, total_normal_loss, total_type_loss, total_miou_loss, total_residue_loss, total_parameter_loss
def compute_all_losses(P, W, I_gt, X, X_gt, T, T_gt, gt_parameters, points_per_instance,
normal_loss_multiplier, type_loss_multiplier, miou_loss_multiplier, residue_loss_multiplier, parameter_loss_multiplier, total_loss_multiplier, is_eval,
mode_seg='mIoU', classes=['plane','sphere','cylinder','cone']):
assert(mode_seg in ['mIoU', 'intersection'])
batch_size, _, n_max_instances = W.size()
matching_indices = hungarian_matching(W, I_gt)
if (residue_loss_multiplier>0) or (parameter_loss_multiplier>0):
predicted_parameters = compute_parameters(P, W, X)
mask_gt = get_mask_gt(I_gt, n_max_instances)
if normal_loss_multiplier>0:
normal_loss = compute_normal_loss(X, X_gt, angle_diff=is_eval)
else:
normal_loss = torch.zeros([batch_size, n_max_instances]).to(P.device)
if type_loss_multiplier>0:
type_loss = compute_per_point_type_loss(T, I_gt, T_gt, is_eval)
else:
type_loss = torch.zeros([batch_size, n_max_instances]).to(P.device)
if (mode_seg == 'mIoU') and (miou_loss_multiplier>0):
miou_loss, _ = compute_miou_loss(W, I_gt, matching_indices)
avg_miou_loss = reduce_mean_masked_instance(miou_loss, mask_gt)
elif (mode_seg == 'intersection') and (miou_loss_multiplier>0):
_, miou_loss = compute_miou_loss(W, I_gt, matching_indices)
avg_miou_loss = reduce_mean_masked_instance(miou_loss, mask_gt)
else:
miou_loss = torch.zeros([batch_size, n_max_instances]).to(P.device)
avg_miou_loss = torch.zeros([batch_size]).to(P.device)
if residue_loss_multiplier>0:
residue_loss, residue_per_point_array = compute_residue_loss(predicted_parameters, matching_indices, points_per_instance, T_gt, classes=classes)
avg_residue_loss = reduce_mean_masked_instance(residue_loss, mask_gt)
else:
residue_loss = torch.zeros([batch_size, n_max_instances]).to(P.device)
avg_residue_loss = torch.zeros([batch_size]).to(P.device)
if parameter_loss_multiplier>0:
parameter_loss = compute_parameter_loss(predicted_parameters, gt_parameters, matching_indices, T_gt, is_eval, classes=classes)
avg_parameter_loss = reduce_mean_masked_instance(parameter_loss, mask_gt)
else:
parameter_loss = torch.zeros([batch_size, n_max_instances]).to(P.device)
avg_parameter_loss = torch.zeros([batch_size]).to(P.device)
total_loss, total_normal_loss, total_type_loss, total_miou_loss, total_residue_loss, total_parameter_loss = \
collect_losses(normal_loss, normal_loss_multiplier, type_loss, type_loss_multiplier, avg_miou_loss, miou_loss, miou_loss_multiplier,
avg_residue_loss, residue_loss, residue_loss_multiplier, avg_parameter_loss, parameter_loss, parameter_loss_multiplier,
total_loss_multiplier)
if (residue_loss_multiplier > 0) or (parameter_loss_multiplier > 0):
return total_loss, total_normal_loss, total_type_loss, total_miou_loss, total_residue_loss, total_parameter_loss, predicted_parameters['plane_normal'], predicted_parameters['cylinder_axis'], predicted_parameters['cone_axis']
else:
return total_loss, total_normal_loss, total_type_loss, total_miou_loss, total_residue_loss, total_parameter_loss, None, None, None
def reduce_mean_masked_instance_tensorflow(loss, mask_gt):
# loss: BxK
loss = tf.where(mask_gt, loss, tf.zeros_like(loss))
reduced_loss = tf.reduce_sum(loss, axis=1) # B
denom = tf.reduce_sum(tf.to_float(mask_gt), axis=1) # B
return tf.where(denom > 0, reduced_loss / denom, tf.zeros_like(reduced_loss)) # B
def collect_losses_tensorflow(normal_loss, normal_loss_multiplier, type_loss, type_loss_multiplier, avg_miou_loss, miou_loss, miou_loss_multiplier,
avg_residue_loss, residue_loss, residue_loss_multiplier, avg_parameter_loss, parameter_loss, parameter_loss_multiplier,
total_loss_multiplier):
total_loss = tf.zeros(shape=[], dtype=tf.float32)
normal_loss_per_data = normal_loss
total_normal_loss = tf.reduce_mean(normal_loss_per_data)
if normal_loss_multiplier > 0:
total_loss += normal_loss_multiplier * total_normal_loss
type_loss_per_data = type_loss
total_type_loss = tf.reduce_mean(type_loss_per_data)
if type_loss_multiplier > 0:
total_loss += type_loss_multiplier * total_type_loss
miou_loss_per_data = avg_miou_loss
miou_loss_per_instance = miou_loss
total_miou_loss = tf.reduce_mean(miou_loss_per_data)
if miou_loss_multiplier > 0:
total_loss += miou_loss_multiplier * total_miou_loss
residue_loss_per_data = avg_residue_loss
residue_loss_per_instance = residue_loss
total_residue_loss = tf.reduce_mean(residue_loss_per_data)
if residue_loss_multiplier > 0:
total_loss += residue_loss_multiplier * total_residue_loss
parameter_loss_per_data = avg_parameter_loss
parameter_loss_per_instance = parameter_loss
total_parameter_loss = tf.reduce_mean(parameter_loss_per_data)
if parameter_loss_multiplier > 0:
total_loss += parameter_loss_multiplier * total_parameter_loss
total_loss *= total_loss_multiplier
return total_loss
def compute_all_losses_tensorflow(P, W, I_gt, X, X_gt, T, T_gt, gt_parameters, points_per_instance,
normal_loss_multiplier, type_loss_multiplier, miou_loss_multiplier, residue_loss_multiplier, parameter_loss_multiplier, total_loss_multiplier, is_eval,
classes=['plane','sphere','cylinder','cone']):
b_max_instances = W.shape[2]
matching_indices = tf.stop_gradient(tf.py_func(hungarian_matching_tensorflow, [W, I_gt], Tout=tf.int32))
predicted_parameters = compute_parameters_tensorflow(P, W, X)
mask_gt = get_mask_gt_tensorflow(I_gt, n_max_instances)
normal_loss = compute_normal_loss_tensorflow(X, X_gt, angle_diff=is_eval)
type_loss = compute_per_point_type_loss_tensorflow(T, I_gt, T_gt, is_eval)
miou_loss = compute_miou_loss_tensorflow(W, I_gt, matching_indices)
avg_miou_loss = reduce_mean_masked_instance_tensorflow(miou_loss, mask_gt)
residue_loss, residue_per_point_array = compute_residue_loss_tensorflow(predicted_parameters, matching_indices, points_per_instance, T_gt, classes=classes)
avg_residue_loss = reduce_mean_masked_instance_tensorflow(residue_loss, mask_gt)
parameter_loss = compute_parameter_loss_tensorflow(predicted_parameters, gt_parameters, matching_indices, T_gt, is_eval, classes=classes)
avg_parameter_loss = reduce_mean_masked_instance_tensorflow(parameter_loss, mask_gt)
total_loss = collect_losses_tensorflow(normal_loss, normal_loss_multiplier, type_loss, type_loss_multiplier, avg_miou_loss, miou_loss, miou_loss_multiplier,
avg_residue_loss, residue_loss, residue_loss_multiplier, avg_parameter_loss, parameter_loss, parameter_loss_multiplier,
total_loss_multiplier)
return total_loss
if __name__ == '__main__':
batch_size = 100
num_points = 1024
num_points_instance = 512
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
X_gt = np.random.randn(batch_size, num_points, 3)
X_gt = X_gt / np.linalg.norm(X_gt, axis=2, keepdims=True)
T = np.random.rand(batch_size, num_points, 4)
T_gt = np.random.randint(0, 4, (batch_size, n_max_instances))
I_gt = np.random.randint(0, n_max_instances, (batch_size, num_points))
plane_normal = np.random.randn(batch_size, n_max_instances, 3)
plane_normal = plane_normal / np.linalg.norm(plane_normal, axis=2, keepdims=True)
plane_center = np.random.randn(batch_size, n_max_instances)
sphere_center = np.random.randn(batch_size, n_max_instances, 3)
sphere_radius_squared = np.abs(np.random.randn(batch_size, n_max_instances))
cylinder_axis = np.random.randn(batch_size, n_max_instances, 3)
cylinder_axis = cylinder_axis / np.linalg.norm(cylinder_axis, axis=2, keepdims=True)
cylinder_center = np.random.randn(batch_size, n_max_instances, 3)
cylinder_radius_square = np.abs(np.random.randn(batch_size, n_max_instances))
cone_apex = np.random.randn(batch_size, n_max_instances, 3)
cone_axis = np.random.randn(batch_size, n_max_instances, 3)
cone_half_angle = np.abs(np.random.randn(batch_size, n_max_instances))
points_per_instance = np.random.randn(batch_size, n_max_instances, num_points_instance, 3)
normal_loss_multiplier = 1.0
type_loss_multiplier = 1.0
miou_loss_multiplier = 1.0
residue_loss_multiplier = 1.0
parameter_loss_multiplier = 1.0
total_loss_multiplier = 1.0
is_eval = False
gt_parameters = {'plane_normal': plane_normal,
'plane_center': plane_center,
'sphere_center': sphere_center,
'sphere_radius_squared': sphere_radius_squared,
'cylinder_axis': cylinder_axis,
'cylinder_center': cylinder_center,
'cylinder_radius_square': cylinder_radius_square,
'cone_apex': cone_apex,
'cone_axis': cone_axis,
'cone_half_angle': cone_half_angle}
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
X_gt_torch = torch.from_numpy(X_gt).float().to(device)
T_torch = torch.from_numpy(T).float().to(device)
T_gt_torch = torch.from_numpy(T_gt).long().to(device)
I_gt_torch = torch.from_numpy(I_gt).long().to(device)
gt_parameters_torch = {'plane_normal': torch.from_numpy(gt_parameters['plane_normal']).float().to(device),
'plane_center': torch.from_numpy(gt_parameters['plane_center']).float().to(device),
'sphere_center': torch.from_numpy(gt_parameters['sphere_center']).float().to(device),
'sphere_radius_squared': torch.from_numpy(gt_parameters['sphere_radius_squared']).float().to(device),
'cylinder_axis': torch.from_numpy(gt_parameters['cylinder_axis']).float().to(device),
'cylinder_center': torch.from_numpy(gt_parameters['cylinder_center']).float().to(device),
'cylinder_radius_square': torch.from_numpy(gt_parameters['cylinder_radius_square']).float().to(device),
'cone_apex': torch.from_numpy(gt_parameters['cone_apex']).float().to(device),
'cone_axis': torch.from_numpy(gt_parameters['cone_axis']).float().to(device),
'cone_half_angle': torch.from_numpy(gt_parameters['cone_half_angle']).float().to(device)}
points_per_instance_torch = torch.from_numpy(points_per_instance).long().to(device)
total_loss_torch = compute_all_losses(P_torch, W_torch, I_gt_torch, X_torch, X_gt_torch, T_torch, T_gt_torch, gt_parameters_torch, points_per_instance_torch,
normal_loss_multiplier, type_loss_multiplier, miou_loss_multiplier, residue_loss_multiplier, parameter_loss_multiplier,
total_loss_multiplier, is_eval)[0]
total_loss_torch = total_loss_torch.detach().cpu().numpy()
print('total_loss_torch', total_loss_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
X_tensorflow = tf.constant(X, dtype=tf.float32)
X_gt_tensorflow = tf.constant(X_gt, dtype=tf.float32)
T_tensorflow = tf.constant(T, dtype=tf.float32)
T_gt_tensorflow = tf.constant(T_gt, dtype=tf.int32)
I_gt_tensorflow = tf.constant(I_gt, dtype=tf.int32)
gt_parameters_tensorflow = {'plane_normal': tf.constant(gt_parameters['plane_normal'], dtype=tf.float32),
'plane_center': tf.constant(gt_parameters['plane_center'], dtype=tf.float32),
'sphere_center': tf.constant(gt_parameters['sphere_center'], dtype=tf.float32),
'sphere_radius_squared': tf.constant(gt_parameters['sphere_radius_squared'], dtype=tf.float32),
'cylinder_axis': tf.constant(gt_parameters['cylinder_axis'], dtype=tf.float32),
'cylinder_center': tf.constant(gt_parameters['cylinder_center'], dtype=tf.float32),
'cylinder_radius_square': tf.constant(gt_parameters['cylinder_radius_square'], dtype=tf.float32),
'cone_apex': tf.constant(gt_parameters['cone_apex'], dtype=tf.float32),
'cone_axis': tf.constant(gt_parameters['cone_axis'], dtype=tf.float32),
'cone_half_angle': tf.constant(gt_parameters['cone_half_angle'], dtype=tf.float32)}
points_per_instance_tensorflow = tf.constant(points_per_instance, dtype=tf.float32)
total_loss_tensorflow = compute_all_losses_tensorflow(P_tensorflow, W_tensorflow, I_gt_tensorflow, X_tensorflow, X_gt_tensorflow, T_tensorflow, T_gt_tensorflow, gt_parameters_tensorflow, points_per_instance_tensorflow,
normal_loss_multiplier, type_loss_multiplier, miou_loss_multiplier, residue_loss_multiplier, parameter_loss_multiplier,
total_loss_multiplier, is_eval)
sess = tf.Session()
total_loss_tensorflow = sess.run(total_loss_tensorflow)
print(np.abs(total_loss_tensorflow - total_loss_torch).max()) | 55,650 | 62.819954 | 319 | py |
CPFN | CPFN-master/SPFN/cone_fitter.py | # Importation of packages
import torch
import numpy as np
if __name__ == '__main__':
import tensorflow as tf
from SPFN.primitives import Cone
from SPFN.geometry_utils import guarded_matrix_solve_ls, guarded_matrix_solve_ls_tensorflow, weighted_plane_fitting, weighted_plane_fitting_tensorflow
def acos_safe(x):
return torch.acos(torch.clamp(x, min=-1.0+1e-6, max=1.0-1e-6))
def compute_parameters(P, W, X, div_eps=1e-10):
batch_size, n_points, _ = P.size()
_, _, n_max_instances = W.size()
W_reshaped = W.transpose(1,2).contiguous().view(batch_size * n_max_instances, n_points) # BKxN
# A - BKxNx3
A = X.unsqueeze(1).expand(batch_size, n_max_instances, n_points, 3).contiguous().view(batch_size * n_max_instances, n_points, 3)
# b - BKxNx1
b = torch.sum(P * X, dim=2).unsqueeze(1).expand(batch_size, n_max_instances, n_points).contiguous().view(batch_size * n_max_instances, n_points, 1)
apex = guarded_matrix_solve_ls(A, b, W_reshaped).view(batch_size, n_max_instances, 3) # BxKx3
X_tiled = A
# TODO: use P-apex instead of X for plane fitting
plane_n, plane_c = weighted_plane_fitting(X_tiled, W_reshaped)
axis = plane_n.view(batch_size, n_max_instances, 3) # BxKx3
P_minus_apex = P.unsqueeze(2) - apex.unsqueeze(1) # BxNxKx3
P_minus_apex_normalized = torch.nn.functional.normalize(P_minus_apex, p=2, dim=3, eps=1e-12)
P_minus_apex_normalized_dot_axis = torch.sum(axis.unsqueeze(1) * P_minus_apex_normalized, dim=3) # BxNxK
# flip direction of axis if wrong
sgn_axis = torch.sign(torch.sum(W * P_minus_apex_normalized_dot_axis, dim=1)) # BxK
sgn_axis = sgn_axis + (sgn_axis==0.0).float() # prevent sgn == 0
axis = axis * sgn_axis.unsqueeze(2) # BxKx3
tmp = W * acos_safe(torch.abs(P_minus_apex_normalized_dot_axis)) # BxNxK
W_sum = torch.sum(W, dim=1) # BxK
half_angle = torch.sum(tmp, dim=1) / (W_sum + div_eps) # BxK
half_angle = torch.clamp(half_angle, min=1e-3, max=np.pi/2-1e-3) # angle cannot be too weird
return apex, axis, half_angle
def acos_safe_tensorflow(x):
return tf.math.acos(tf.clip_by_value(x, -1.0+1e-6, 1.0-1e-6))
def compute_parameters_tensorflow(P, W, X):
batch_size = tf.shape(P)[0]
n_points = tf.shape(P)[1]
n_max_instances = W.get_shape()[2]
W_reshaped = tf.reshape(tf.transpose(W, [0, 2, 1]), [batch_size * n_max_instances, n_points]) # BKxN
# A - BKxNx3
A = tf.reshape(tf.tile(tf.expand_dims(X, axis=1), [1, n_max_instances, 1, 1]), [batch_size * n_max_instances, n_points, 3]) # BKxNx3
# b - BKxNx1
b = tf.expand_dims(tf.reshape(tf.tile(tf.expand_dims(tf.reduce_sum(P * X, axis=2), axis=1), [1, n_max_instances, 1]), [batch_size * n_max_instances, n_points]), axis=2)
apex = tf.reshape(guarded_matrix_solve_ls_tensorflow(A, b, W_reshaped), [batch_size, n_max_instances, 3]) # BxKx3
X_tiled = A
# TODO: use P-apex instead of X for plane fitting
plane_n, plane_c = weighted_plane_fitting_tensorflow(X_tiled, W_reshaped)
axis = tf.reshape(plane_n, [batch_size, n_max_instances, 3]) # BxKx3
P_minus_apex_normalized = tf.nn.l2_normalize(tf.expand_dims(P, axis=2) - tf.expand_dims(apex, 1), axis=3) # BxNxKx3
P_minus_apex_normalized_dot_axis = tf.reduce_sum(tf.expand_dims(axis, axis=1) * P_minus_apex_normalized, axis=3) # BxNxK
# flip direction of axis if wrong
sgn_axis = tf.sign(tf.reduce_sum(W * P_minus_apex_normalized_dot_axis, axis=1)) # BxK
sgn_axis += tf.to_float(tf.equal(sgn_axis, 0.0)) # prevent sgn == 0
axis *= tf.expand_dims(sgn_axis, axis=2) # BxKx3
tmp = W * acos_safe_tensorflow(tf.abs(P_minus_apex_normalized_dot_axis)) # BxNxK
W_sum = tf.reduce_sum(W, axis=1) # BxK
half_angle = tf.reduce_sum(tmp, axis=1) / W_sum # BxK
tf.clip_by_value(half_angle, 1e-3, np.pi / 2 - 1e-3) # angle cannot be too weird
return apex, axis, half_angle
if __name__ == '__main__':
batch_size = 100
num_points = 1024
n_max_instances = 12
device = torch.device('cuda:0')
np.random.seed(0)
P = np.random.randn(batch_size, num_points, 3)
W = np.random.rand(batch_size, num_points, n_max_instances)
X = np.random.randn(batch_size, num_points, 3)
X = X / np.linalg.norm(X, axis=2, keepdims=True)
P_torch = torch.from_numpy(P).float().to(device)
W_torch = torch.from_numpy(W).float().to(device)
X_torch = torch.from_numpy(X).float().to(device)
apex_torch, axis_torch, half_angle_torch = compute_parameters(P_torch, W_torch, X_torch)
apex_torch = apex_torch.detach().cpu().numpy()
axis_torch = axis_torch.detach().cpu().numpy()
half_angle_torch = half_angle_torch.detach().cpu().numpy()
print('apex_torch', apex_torch)
print('axis_torch', axis_torch)
print('half_angle_torch', half_angle_torch)
# Debugging with Tensorflow
P_tensorflow = tf.constant(P, dtype=tf.float32)
W_tensorflow = tf.constant(W, dtype=tf.float32)
X_tensorflow = tf.constant(X, dtype=tf.float32)
apex_tensorflow, axis_tensorflow, half_angle_tensorflow = compute_parameters_tensorflow(P_tensorflow, W_tensorflow, X_tensorflow)
sess = tf.Session()
apex_tensorflow, axis_tensorflow, half_angle_tensorflow = sess.run([apex_tensorflow, axis_tensorflow, half_angle_tensorflow])
print(np.abs(apex_tensorflow - apex_torch).max())
print(np.minimum(np.abs(axis_tensorflow - axis_torch), np.abs(axis_tensorflow + axis_torch)).max())
print(np.abs(half_angle_tensorflow - half_angle_torch).max())
def compute_residue_single(apex, axis, half_angle, p):
# apex: ...x3, axis: ...x3, half_angle: ..., p: ...x30
v = p - apex
v_normalized = torch.nn.functional.normalize(v, p=2, dim=-1, eps=1e-12)
alpha = acos_safe(torch.sum(v_normalized * axis, dim=-1))
return (torch.sin(torch.clamp(torch.abs(alpha - half_angle), min=None, max=np.pi / 2)))**2 * torch.sum(v * v, dim=-1)
def compute_residue_single_tensorflow(apex, axis, half_angle, p):
# apex: ...x3, axis: ...x3, half_angle: ..., p: ...x3
v = p - apex
v_normalized = tf.nn.l2_normalize(v, axis=-1)
alpha = acos_safe_tensorflow(tf.reduce_sum(v_normalized * axis, axis=-1))
return tf.square(tf.sin(tf.minimum(tf.abs(alpha - half_angle), np.pi / 2))) * tf.reduce_sum(v * v, axis=-1)
if __name__ == '__main__':
batch_size = 100
num_points = 1024
device = torch.device('cuda:0')
np.random.seed(0)
apex = np.random.randn(batch_size, num_points, 3)
axis = np.random.randn(batch_size, num_points, 3)
half_angle = np.random.randn(batch_size, num_points)
p = np.random.randn(batch_size, num_points, 3)
apex_torch = torch.from_numpy(apex).float().to(device)
axis_torch = torch.from_numpy(axis).float().to(device)
half_angle_torch = torch.from_numpy(half_angle).float().to(device)
p_torch = torch.from_numpy(p).float().to(device)
loss_torch = compute_residue_single(apex_torch, axis_torch, half_angle_torch, p_torch)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
apex_tensorflow = tf.constant(apex, dtype=tf.float32)
axis_tensorflow = tf.constant(axis, dtype=tf.float32)
half_angle_tensorflow = tf.constant(half_angle, dtype=tf.float32)
p_tensorflow = tf.constant(p, dtype=tf.float32)
loss_tensorflow = compute_residue_single_tensorflow(apex_tensorflow, axis_tensorflow, half_angle_tensorflow, p_tensorflow)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
def compute_parameter_loss(predicted_axis, gt_axis, matching_indices, angle_diff):
# predicted_axis: BxK1x3
# gt_axis: BXK2x3
# matching indices: BxK2
batch_size, nb_primitives, _ = gt_axis.size()
predicted_axis = torch.gather(predicted_axis, 1, matching_indices.unsqueeze(2).expand(batch_size, nb_primitives, 3))
dot_abs = torch.abs(torch.sum(predicted_axis * gt_axis, axis=2))
if angle_diff:
return acos_safe(dot_abs) # BxK
else:
return 1.0 - dot_abs # BxK
def batched_gather(data, indices, axis):
# data - Bx...xKx..., axis is where dimension K is
# indices - BxK
# output[b, ..., k, ...] = in[b, ..., indices[b, k], ...]
assert axis >= 1
ndims = data.get_shape().ndims # allow dynamic rank
if axis > 1:
# tranpose data to BxKx...
perm = np.arange(ndims)
perm[axis] = 1
perm[1] = axis
data = tf.transpose(data, perm=perm)
batch_size = tf.shape(data)[0]
batch_nums = tf.tile(tf.expand_dims(tf.expand_dims(tf.range(batch_size), axis=1), axis=2), multiples=[1, tf.shape(indices)[1], 1]) # BxKx1
indices = tf.concat([batch_nums, tf.expand_dims(indices, axis=2)], axis=2) # BxKx2
gathered_data = tf.gather_nd(data, indices=indices)
if axis > 1:
gathered_data = tf.transpose(gathered_data, perm=perm)
return gathered_data
def compute_parameter_loss_tensorflow(predicted_axis, gt_axis, matching_indices, angle_diff):
axis = batched_gather(predicted_axis, matching_indices, axis=1)
dot_abs = tf.abs(tf.reduce_sum(axis * gt_axis, axis=2))
if angle_diff:
return acos_safe_tensorflow(dot_abs) # BxK
else:
return 1.0 - dot_abs # BxK
if __name__ == '__main__':
batch_size = 100
num_primitives1 = 15
num_primitives2 = 5
device = torch.device('cuda:0')
np.random.seed(0)
predicted_axis = np.random.randn(batch_size, num_primitives1, 3)
gt_axis = np.random.randn(batch_size, num_primitives2, 3)
matching_indices = np.random.randint(0, 15, (batch_size, num_primitives2))
angle_diff = True
predicted_axis_torch = torch.from_numpy(predicted_axis).float().to(device)
gt_axis_torch = torch.from_numpy(gt_axis).float().to(device)
matching_indices_torch = torch.from_numpy(matching_indices).long().to(device)
loss_torch = compute_parameter_loss(predicted_axis_torch, gt_axis_torch, matching_indices_torch, angle_diff)
loss_torch = loss_torch.detach().cpu().numpy()
print('loss_torch', loss_torch)
# Debugging with Tensorflow
predicted_axis_tensorflow = tf.constant(predicted_axis, dtype=tf.float32)
gt_axis_tensorflow = tf.constant(gt_axis, dtype=tf.float32)
matching_indices_tensorflow = tf.constant(matching_indices, dtype=tf.int32)
loss_tensorflow = compute_parameter_loss_tensorflow(predicted_axis_tensorflow, gt_axis_tensorflow, matching_indices_tensorflow, angle_diff)
sess = tf.Session()
loss_tensorflow = sess.run(loss_tensorflow)
print(np.abs(loss_torch - loss_tensorflow).max())
def create_primitive_from_dict(d):
assert d['type'] == 'cone'
apex = np.array([d['apex_x'], d['apex_y'], d['apex_z']], dtype=float)
axis = np.array([d['axis_x'], d['axis_y'], d['axis_z']], dtype=float)
half_angle = float(d['semi_angle'])
return Cone(apex=apex, axis=axis, half_angle=half_angle)
def extract_parameter_data_as_dict(primitives, n_max_instances):
axis_gt = np.zeros(dtype=float, shape=[n_max_instances, 3])
apex_gt = np.zeros(dtype=float, shape=[n_max_instances, 3])
half_angle_gt = np.zeros(dtype=float, shape=[n_max_instances])
for i, primitive in enumerate(primitives):
if isinstance(primitive, Cone):
axis_gt[i] = primitive.axis
apex_gt[i] = primitive.apex
half_angle_gt[i] = primitive.half_angle
return {
'cone_axis_gt': axis_gt,
}
def extract_predicted_parameters_as_json(cone_apex, cone_axis, cone_half_angle, k):
cone = Cone(cone_apex, cone_axis, cone_half_angle, z_min=0.0, z_max=5.0)
return {
'type': 'cone',
'apex_x': float(cone.apex[0]),
'apex_y': float(cone.apex[1]),
'apex_z': float(cone.apex[2]),
'axis_x': float(cone.axis[0]),
'axis_y': float(cone.axis[1]),
'axis_z': float(cone.axis[2]),
'angle': float(cone.half_angle * 2),
'z_min': float(cone.z_min),
'z_max': float(cone.z_max),
'label': k,
} | 12,047 | 49.835443 | 172 | py |
CPFN | CPFN-master/Utils/training_utils.py | # Importation of packages
import sys
import torch
import numpy as np
from SPFN import losses_implementation
# BN Decay
def get_batch_norm_decay(global_step, batch_size, bn_decay_step, staircase=True):
BN_INIT_DECAY = 0.5
BN_DECAY_RATE = 0.5
BN_DECAY_CLIP = 0.99
p = global_step * batch_size / bn_decay_step
if staircase:
p = int(np.floor(p))
bn_momentum = max(BN_INIT_DECAY * (BN_DECAY_RATE ** p), 1-BN_DECAY_CLIP)
return bn_momentum
def update_momentum(module, bn_momentum):
for name, module_ in module.named_modules():
if 'bn' in name:
module_.momentum = bn_momentum
# LR Decay
def get_learning_rate(init_learning_rate, global_step, batch_size, decay_step, decay_rate, staircase=True):
p = global_step * batch_size / decay_step
if staircase:
p = int(np.floor(p))
learning_rate = init_learning_rate * (decay_rate ** p)
return learning_rate
# Train For One Epoch
def patch_selection_train_val_epoch(dataloader, patchselec_module, epoch, optimizer, global_step, visualiser, args, conf, device, network_mode='train'):
assert(network_mode in ['train', 'val'])
# Loading conf information related to current file
batch_size = conf.get_batch_size()
bn_decay_step = conf.get_bn_decay_step()
decay_step = conf.get_decay_step()
decay_rate = conf.get_decay_rate()
init_learning_rate = conf.get_init_learning_rate()
# Iteration over the dataset
old_bn_momentum = get_batch_norm_decay(global_step, batch_size, bn_decay_step, staircase=True)
old_learning_rate = get_learning_rate(init_learning_rate, global_step, batch_size, decay_step, decay_rate, staircase=True)
total_loss = 0
if network_mode == 'train':
patchselec_module.train()
elif network_mode == 'val':
patchselec_module.eval()
patchselec_module.train()
for batch_id, data in enumerate(dataloader, 0):
optimizer.zero_grad()
# Updating the BN decay
bn_momentum = get_batch_norm_decay(global_step, batch_size, bn_decay_step, staircase=True)
if old_bn_momentum != bn_momentum:
update_momentum(patchselec_module, bn_momentum)
old_bn_momentum = bn_momentum
# Updating the LR decay
learning_rate = get_learning_rate(init_learning_rate, global_step, batch_size, decay_step, decay_rate, staircase=True)
if old_learning_rate != learning_rate:
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
old_learning_rate = learning_rate
# Proper training
points = data[0].type(torch.FloatTensor).to(device)
batch_size_current, num_points, _ = points.size()
output_labels = data[1].type(torch.LongTensor).to(device)
predicted_labels = patchselec_module(points)[0]
predicted_labels = predicted_labels.contiguous().view(batch_size_current * num_points, 2)
output_labels = output_labels.view(batch_size_current * num_points)
loss = torch.nn.functional.cross_entropy(predicted_labels, output_labels)
total_loss += batch_size_current * loss.item()
# Printing Values
if batch_id%100==0: print('[%s][Epoch %d - Iteration %d] Loss: %f' % (network_mode, epoch, batch_id, loss.item()))
if network_mode == 'train':
# Backward Pass
loss.backward()
optimizer.step()
global_step += 1
# Updating the visualiser
visualiser.log_loss(loss.item(), '%s_loss' % network_mode)
visualiser.update()
return global_step, total_loss
def spfn_train_val_epoch(dataloader, spfn_module, epoch, optimizer, global_step, visualiser, args, conf, device, network_mode='train'):
assert(network_mode in ['train', 'val'])
# Loading conf information related to current file
batch_size = conf.get_batch_size()
bn_decay_step = conf.get_bn_decay_step()
decay_step = conf.get_decay_step()
decay_rate = conf.get_decay_rate()
init_learning_rate = conf.get_init_learning_rate()
# Losses
miou_loss_multiplier = conf.get_miou_loss_multiplier()
normal_loss_multiplier = conf.get_normal_loss_multiplier()
type_loss_multiplier = conf.get_type_loss_multiplier()
parameter_loss_multiplier = conf.get_parameter_loss_multiplier()
residue_loss_multiplier = conf.get_residue_loss_multiplier()
total_loss_multiplier = conf.get_total_loss_multiplier()
# Iteration over the dataset
old_bn_momentum = get_batch_norm_decay(global_step, batch_size, bn_decay_step, staircase=True)
old_learning_rate = get_learning_rate(init_learning_rate, global_step, batch_size, decay_step, decay_rate, staircase=True)
total_loss_ = 0
if network_mode == 'train':
spfn_module.train()
elif network_mode == 'val':
spfn_module.eval()
for batch_id, data in enumerate(dataloader, 0):
if batch_id%100==0: print('[%s][Epoch %d - Iteration %d]' % (network_mode, epoch, batch_id))
optimizer.zero_grad()
# Updating the BN decay
bn_momentum = get_batch_norm_decay(global_step, batch_size, bn_decay_step, staircase=True)
if old_bn_momentum != bn_momentum:
update_momentum(spfn_module, bn_momentum)
old_bn_momentum = bn_momentum
# Updating the LR decay
learning_rate = get_learning_rate(init_learning_rate, global_step, batch_size, decay_step, decay_rate, staircase=True)
if old_learning_rate != learning_rate:
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
old_learning_rate = learning_rate
# Loading the inputs
P = data[0].type(torch.FloatTensor).to(device)
batch_size_current, num_points, _ = P.size()
X_gt = data[1].type(torch.FloatTensor).to(device)
points_per_instance = data[2].type(torch.FloatTensor).to(device)
_, nb_primitives, nb_points_primitives, _ = points_per_instance.size()
I_gt = data[3].type(torch.LongTensor).to(device)
T_gt = data[4].type(torch.LongTensor).to(device)
plane_n_gt = data[5].type(torch.FloatTensor).to(device)
cylinder_axis_gt = data[6].type(torch.FloatTensor).to(device)
cone_axis_gt = data[7].type(torch.FloatTensor).to(device)
gt_parameters = {'plane_normal': plane_n_gt, 'cylinder_axis': cylinder_axis_gt, 'cone_axis': cone_axis_gt}
if args.network == 'GlobalSPFN':
glob_features = None
loc_features = None
elif args.network == 'LocalSPFN':
glob_features = data[8].type(torch.FloatTensor).to(device)
loc_features = data[9].type(torch.FloatTensor).to(device)
# Forward Pass
X, T, W, _, _ = spfn_module(P, glob_features=glob_features, loc_features=loc_features)
X = torch.nn.functional.normalize(X, p=2, dim=2, eps=1e-12)
W = torch.softmax(W, dim=2)
total_loss, total_normal_loss, total_type_loss, total_miou_loss, total_residue_loss, total_parameter_loss, _, _, _ = losses_implementation.compute_all_losses(
P, W, I_gt, X, X_gt, T, T_gt, gt_parameters, points_per_instance, normal_loss_multiplier,
type_loss_multiplier, miou_loss_multiplier, residue_loss_multiplier, parameter_loss_multiplier,
total_loss_multiplier, False, mode_seg='mIoU', classes=conf.get_list_of_primitives())
total_loss_ += batch_size_current * total_loss.item()
if network_mode == 'train':
# Backward Pass
total_loss.backward()
# Unecessary check for the gradient
flag = False
for param in spfn_module.parameters():
if param.requires_grad and ((torch.any(torch.isinf(param.grad))) or torch.any(torch.isnan(param.grad))):
flag = True
break
if not flag:
optimizer.step()
global_step += 1
# Printing Values
if batch_id%100==0:
print('Loss Value: ', total_loss.item())
print('Normal Loss', total_normal_loss.item())
print('Type Loss', total_type_loss.item())
print('mIoU Loss', total_miou_loss.item())
print('Residue Loss', total_residue_loss.item())
print('Parameter Loss', total_parameter_loss.item())
# Updating the visualiser
visualiser.log_loss(total_loss.item(), '%s_loss'%network_mode)
visualiser.log_loss(total_normal_loss.item(), '%s_normal_loss'%network_mode)
visualiser.log_loss(total_type_loss.item(), '%s_type_loss'%network_mode)
visualiser.log_loss(total_miou_loss.item(), '%s_miou_loss'%network_mode)
visualiser.log_loss(total_residue_loss.item(), '%s_residue_loss'%network_mode)
visualiser.log_loss(total_parameter_loss.item(), '%s_parameter_loss'%network_mode)
visualiser.update()
return global_step, total_loss_ | 8,974 | 49.994318 | 166 | py |
CPFN | CPFN-master/Utils/training_visualisation.py | import torch
import numpy as np
from torch import nn
from visdom import Visdom
ORANGE = np.array([[255, 105, 0]])
BLUE = np.array([[40, 40, 255]])
RED = np.array([[255, 40, 40]])
class Visualiser(object):
def __init__(self, plotting_interval, port=8097):
self.vis = Visdom(port=port)
self.line_plotter = VisdomLinePlotter(self.vis)
self.plotting_interval = plotting_interval
self.plotting_step = 0
self.loss_history_dict = {}
self.image_dict = {}
self.window_elements = []
def log_image(self, image, name):
image = torch.clamp(image, 0, 1)
image = image.cpu().detach().numpy()
self.image_dict[name] = image
if not name in self.window_elements:
self.window_elements.append(name)
def log_loss(self, loss, name):
current_history = self.loss_history_dict.get(name, [np.nan] * self.plotting_interval)
updated_history = current_history[1:] + [loss]
self.loss_history_dict[name] = updated_history
if not name in self.window_elements:
self.window_elements.append(name)
def update(self):
if self.plotting_step % self.plotting_interval == 0:
loss_avg_dict = {k: torch.tensor(self.loss_history_dict[k]).mean().item() for k in self.loss_history_dict}
for name in loss_avg_dict:
loss_avg = loss_avg_dict[name]
self.line_plotter.plot(name, name, name, self.plotting_step, loss_avg, color=ORANGE)
for name in self.image_dict:
self.vis.image(self.image_dict[name], opts=dict(title=name), win=self.window_elements.index(name))
self.plotting_step += 1
class VisdomLinePlotter(object):
def __init__(self, vis, env_name='main'):
self.vis = vis
self.env = env_name
self.plots = {}
def plot(self, var_name, split_name, title_name, x, y, color):
if var_name not in self.plots:
self.plots[var_name] = self.vis.line(X=np.array([x, x]), Y=np.array([y, y]), env=self.env, opts=dict(
legend=[split_name],
title=title_name,
linecolor=color,
xlabel='Training steps',
ylabel=var_name
))
else:
self.vis.line(X=np.array([x]), Y=np.array([y]), env=self.env,
opts=dict(
legend=[split_name],
title=title_name,
linecolor=color,
xlabel='Training steps',
ylabel=var_name
),
win=self.plots[var_name], name=split_name, update = 'append') | 2,658 | 38.102941 | 118 | py |
CPFN | CPFN-master/Utils/merging_utils.py | # Importation of packages
import torch
import numba
import numpy as np
def similarity_soft(spfn_labels, predicted_labels, point_indices):
num_points_per_object, max_label_per_object = spfn_labels.size()
nb_patches, num_points_per_patch, max_label_per_patch = predicted_labels.size()
point2primitive_prediction = torch.zeros([num_points_per_object, nb_patches*max_label_per_patch+max_label_per_object]).to(predicted_labels.device)
for b in range(nb_patches):
predicted_labels_b = predicted_labels[b]
point2primitive_prediction[point_indices[b],b*max_label_per_patch:(b+1)*max_label_per_patch] += predicted_labels_b
point2primitive_prediction[:,(b+1)*max_label_per_patch:] = spfn_labels
intersection_primitives = torch.mm(point2primitive_prediction.transpose(0,1),point2primitive_prediction)
return intersection_primitives
@numba.jit(numba.int64[:](numba.int64[:,:], numba.int64[:], numba.float64[:]), nopython=True)
def heuristic_merging(pairs_id, patch_id, penalty_value):
pairs_id1 = pairs_id[:,0]
pairs_id2 = pairs_id[:,1]
segment_id = np.arange(len(patch_id), dtype=numba.int64)
patch_1hot = np.eye(patch_id.max()+1)[patch_id]
while len(pairs_id1) > 0:
pair_id1 = pairs_id1[np.argmax(penalty_value)]
pair_id2 = pairs_id2[np.argmax(penalty_value)]
segment_id[segment_id==segment_id[pair_id2]] = segment_id[pair_id1]
selection_row = segment_id==segment_id[pair_id1]
patch_1hot[selection_row] = np.sum(patch_1hot[selection_row], axis=0)
intersection = np.sum(patch_1hot[pairs_id1] * patch_1hot[pairs_id2], axis=1)
pairs_id1 = pairs_id1[intersection==0]
pairs_id2 = pairs_id2[intersection==0]
penalty_value = penalty_value[intersection==0]
return segment_id
def run_heuristic_solver(similarity_matrix, nb_patches, max_label_per_object, max_label_per_patch, threshold=0):
# Building the Gurobi optimisation problem
indices = np.where(similarity_matrix>threshold)
penalty_array = np.stack((indices[0], indices[1], similarity_matrix[indices[0], indices[1]]), axis=1)
penalty_array = penalty_array[penalty_array[:,0]<penalty_array[:,1]]
# Heuristic
patch_id = np.concatenate((np.repeat(np.arange(nb_patches), repeats=max_label_per_patch, axis=0), nb_patches*np.ones([max_label_per_object], dtype=int)), axis=0)
glob_output_labels_heuristic = heuristic_merging(penalty_array[:,:2].astype(int), patch_id, penalty_array[:,2])
flag = np.diag(similarity_matrix)
replacement_values = np.concatenate((np.tile(np.arange(-max_label_per_patch, 0), nb_patches), np.arange(-max_label_per_object, 0)), axis=0)
glob_output_labels_heuristic[flag<threshold] = replacement_values[flag<threshold]
_, glob_output_labels_heuristic = np.unique(glob_output_labels_heuristic, return_inverse=True)
return glob_output_labels_heuristic
def get_point_final(point2primitive_prediction, output_labels_heuristic):
output_labels_heuristic = torch.eye(output_labels_heuristic.max()+1).to(output_labels_heuristic.device)[output_labels_heuristic.long()]
output_labels_heuristic = output_labels_heuristic / (torch.sum(output_labels_heuristic, dim=0, keepdim=True) + 1e-10)
final_output_labels_heuristic = torch.mm(point2primitive_prediction, output_labels_heuristic)
return final_output_labels_heuristic | 3,371 | 62.622642 | 165 | py |
CPFN | CPFN-master/Dataset/dataloaders.py | # Importation of packages
import os
import re
import h5py
import torch
import pickle
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch.utils.data as data
# Importing Utils files
from Utils import dataset_utils
class Dataset_PatchSelection(data.Dataset):
def __init__(self, csv_path, lowres_folder, highres_folder, scale, n_points=None, normalisation=True):
self.lowres_folder = lowres_folder
self.highres_folder = highres_folder
self.scale = scale
self.n_points = n_points
self.normalisation = normalisation
csv_raw = pd.read_csv(csv_path, delimiter=',', header=None)[0]
self.hdf5_file_list = np.sort([file_ for file_ in csv_raw])
self.hdf5_file_list_lowres = [os.path.join(self.lowres_folder, file_.split('.')[0] + '.h5') for file_ in self.hdf5_file_list]
self.hdf5_file_list_highres = [os.path.join(self.highres_folder, file_.split('.')[0] + '.h5') for file_ in self.hdf5_file_list]
self.n_data = len(self.hdf5_file_list)
self.preload_dataset()
def preload_dataset(self):
self.list_points = []
self.list_output_labels = []
self.list_shuffled_indices = []
print('Preloading Dataset:')
for i in tqdm(range(self.n_data)):
points, output_labels, shuffled_indices = dataset_utils.create_unit_data_from_hdf5_patch_selection(self.hdf5_file_list_lowres[i], self.hdf5_file_list_highres[i], normalisation=self.normalisation, scale=self.scale, n_points=self.n_points)
self.list_points.append(points)
self.list_output_labels.append(output_labels)
self.list_shuffled_indices.append(shuffled_indices)
def __getitem__(self, index):
# find shape that contains the point with given global index
points = self.list_points[index]
points = torch.from_numpy(points).float()
output_labels = self.list_output_labels[index]
output_labels = torch.from_numpy(output_labels).long()
shuffled_indices = self.list_shuffled_indices[index]
shuffled_indices = torch.from_numpy(shuffled_indices).long()
return points, output_labels, shuffled_indices
def __len__(self):
return self.n_data
class Dataset_GlobalSPFN(data.Dataset):
def __init__(self, n_max_global_instances, csv_path, lowres_folder, highres_folder, path_patches, noisy, n_points=8192, test=False, first_n=-1, fixed_order=False):
self.n_max_global_instances = n_max_global_instances
self.lowres_folder = lowres_folder
self.highres_folder = highres_folder
if not test:
self.dir_files = self.lowres_folder
self.path_patches = None
else:
self.dir_files = self.highres_folder
self.path_patches = path_patches
self.noisy = noisy
self.n_points = n_points
self.test = test
self.first_n = first_n
self.fixed_order = fixed_order
csv_raw = pd.read_csv(csv_path, delimiter=',', header=None)[0]
self.hdf5_file_list = np.sort(csv_raw)
if not fixed_order:
random.shuffle(self.hdf5_file_list)
if first_n != -1:
self.hdf5_file_list = self.hdf5_file_list[:first_n]
self.n_data = len(self.hdf5_file_list)
if not self.test:
self.preload_dataset()
def preload_dataset(self):
print(f'Preloading Dataset:')
for index in tqdm(range(self.__len__())):
data_elt = self.fetch_data_at_index(index)
if not hasattr(self, 'data_matrix'):
self.data_matrix = {}
for key in data_elt.keys():
trailing_ones = np.full([len(data_elt[key].shape)], 1, dtype=int)
self.data_matrix[key] = np.tile(np.expand_dims(np.zeros_like(data_elt[key]), axis=0), [self.n_data, *trailing_ones])
for key in data_elt.keys():
self.data_matrix[key][index, ...] = data_elt[key]
def fetch_data_at_index(self, i):
file_ = self.hdf5_file_list[i]
with h5py.File(os.path.join(self.dir_files, file_), 'r') as f:
data = dataset_utils.create_unit_data_from_hdf5_spfn(f, self.n_max_global_instances, self.noisy, n_points=self.n_points, use_glob_features=False, use_loc_features=False, fixed_order=self.fixed_order, shuffle=not self.fixed_order)
assert data is not None # assume data are all clean
if self.test:
if os.path.isfile(os.path.join(self.path_patches, file_.replace('.h5','_indices.npy'))):
data['patch_centers'] = np.load(os.path.join(self.path_patches, file_.replace('.h5','_indices.npy')))[:,0]
else:
data['patch_centers'] = np.array([])
return data
def __getitem__(self, index):
# find shape that contains the point with given global index
if not self.test:
data = {}
for key in self.data_matrix.keys():
data[key] = self.data_matrix[key][index,...]
else:
data = self.fetch_data_at_index(index)
P = torch.from_numpy(data['P'].astype(np.float32))
normal_gt = torch.from_numpy(data['normal_gt'].astype(np.float32))
P_gt = torch.from_numpy(data['P_gt'].astype(np.float32))
I_gt = torch.from_numpy(data['I_gt'].astype(np.int64))
T_gt = torch.from_numpy(data['T_gt'].astype(np.int64))
plane_n_gt = torch.from_numpy(data['plane_n_gt'].astype(np.float32))
cylinder_axis_gt = torch.from_numpy(data['cylinder_axis_gt'].astype(np.float32))
cone_axis_gt = torch.from_numpy(data['cone_axis_gt'].astype(np.float32))
if self.test:
patch_centers = torch.from_numpy(data['patch_centers'].astype(np.int64))
return P, normal_gt, P_gt, I_gt, T_gt, plane_n_gt, cylinder_axis_gt, cone_axis_gt, patch_centers
else:
return P, normal_gt, P_gt, I_gt, T_gt, plane_n_gt, cylinder_axis_gt, cone_axis_gt
def __len__(self):
return self.n_data
class Dataset_TrainLocalSPFN(data.Dataset):
def __init__(self, n_max_local_instances, csv_path, patch_folder, noisy, first_n=-1, fixed_order=False, lean=False):
self.n_max_local_instances = n_max_local_instances
self.noisy = noisy
self.first_n = first_n
self.fixed_order = fixed_order
self.lean = lean
self.patch_folder = patch_folder
csv_raw = pd.read_csv(csv_path, delimiter=',', header=None)[0]
self.hdf5_file_list = np.sort(csv_raw)
self.n_data = 0
self.hdf5_file_list = np.sort([elt for elt in self.hdf5_file_list if self.check_dataset(elt)])
if not fixed_order:
random.shuffle(self.hdf5_file_list)
if self.lean:
nb_patch_file = np.zeros([len(self.hdf5_file_list)])
for i, file_ in enumerate(self.hdf5_file_list):
patch_files = [os.path.join(self.patch_folder, file_.split('.')[0], file_) for file_ in os.listdir(os.path.join(self.patch_folder, file_.split('.')[0])) if file_.split('.')[1] == 'h5']
nb_patch_file[i] = len(patch_files)
self.nb_patch_file = nb_patch_file
if first_n != -1:
self.hdf5_file_list = self.hdf5_file_list[:first_n]
if not self.lean:
self.preload_data()
def check_dataset(self, file_):
cond = os.path.isdir(os.path.join(self.patch_folder, file_.split('.')[0]))
if not cond:
return False
patch_files = [os.path.join(self.patch_folder, file_.split('.')[0], file_) for file_ in os.listdir(os.path.join(self.patch_folder, file_.split('.')[0])) if file_.split('.')[1] == 'h5']
self.n_data += len(patch_files)
return True
def preload_data(self):
cpt = 0
print('Preloading Dataset:')
for i, file_ in tqdm(enumerate(self.hdf5_file_list)):
if i%100==0: print('%d / %d'%(i, len(self.hdf5_file_list)))
patch_files = [os.path.join(self.patch_folder, file_.split('.')[0], file_) for file_ in os.listdir(os.path.join(self.patch_folder, file_.split('.')[0])) if file_.split('.')[1] == 'h5']
patch_files = np.sort(patch_files)
for j in range(len(patch_files)):
patch_file = os.path.join(self.patch_folder, file_.split('.')[0], file_.replace('.h5','_patch%d.h5'%j))
data_elt = self.fetch_data_at_index(patch_file)
if not hasattr(self, 'data_matrix'):
self.data_matrix = {}
for key in data_elt.keys():
trailing_ones = np.full([len(data_elt[key].shape)], 1, dtype=int)
self.data_matrix[key] = np.tile(np.expand_dims(np.zeros_like(data_elt[key]), axis=0), [self.n_data, *trailing_ones])
for key in data_elt.keys():
self.data_matrix[key][cpt, ...] = data_elt[key]
cpt += 1
def fetch_data_at_index(self, patch_file):
with h5py.File(patch_file, 'r') as f:
data = dataset_utils.create_unit_data_from_hdf5_spfn(f, self.n_max_local_instances, noisy=self.noisy, n_points=None, use_glob_features=True, use_loc_features=True, fixed_order=self.fixed_order, shuffle=not self.fixed_order)
assert data is not None # assume data are all clean
return data
def __getitem__(self, index):
# find shape that contains the point with given global index
if not self.lean:
data = {}
for key in self.data_matrix.keys():
data[key] = self.data_matrix[key][index, ...]
else:
cumsum = np.cumsum(self.nb_patch_file)
index_ = np.where(index<cumsum)[0][0]
file_ = self.hdf5_file_list[index_]
if index_ == 0:
j = index
else:
j = int(index - cumsum[index_-1])
patch_file = os.path.join(self.patch_folder, file_.split('.')[0], file_.replace('.h5', '_patch%d.h5' % j))
data = self.fetch_data_at_index(patch_file)
P = torch.from_numpy(data['P'].astype(np.float32))
normal_gt = torch.from_numpy(data['normal_gt'].astype(np.float32))
P_gt = torch.from_numpy(data['P_gt'].astype(np.float32))
I_gt = torch.from_numpy(data['I_gt'].astype(np.int64))
T_gt = torch.from_numpy(data['T_gt'].astype(np.int64))
plane_n_gt = torch.from_numpy(data['plane_n_gt'].astype(np.float32))
cylinder_axis_gt = torch.from_numpy(data['cylinder_axis_gt'].astype(np.float32))
cone_axis_gt = torch.from_numpy(data['cone_axis_gt'].astype(np.float32))
glob_features = torch.from_numpy(data['glob_features'].astype(np.float32))
loc_features = torch.from_numpy(data['loc_features'].astype(np.float32))
output_tuple = (P, normal_gt, P_gt, I_gt, T_gt, plane_n_gt, cylinder_axis_gt, cone_axis_gt, glob_features, loc_features)
return output_tuple
def __len__(self):
return self.n_data
class Dataset_TestLocalSPFN(data.Dataset):
def __init__(self, n_max_global_instances, n_max_local_instances, csv_path, dir_spfn, dir_lowres, dir_highres, dir_indices, noisy, first_n=-1, fixed_order=False):
self.n_max_global_instances = n_max_global_instances
self.n_max_local_instances = n_max_local_instances
self.dir_spfn = dir_spfn
self.dir_lowres = dir_lowres
self.dir_highres = dir_highres
self.dir_indices = dir_indices
self.noisy = noisy
self.first_n = first_n
self.fixed_order = fixed_order
csv_raw = pd.read_csv(csv_path, delimiter=',', header=None)[0]
self.hdf5_file_list = np.sort(csv_raw)
self.n_data = len(self.hdf5_file_list)
self.hdf5_file_list_improvement = [elt for elt in self.hdf5_file_list if self.check_dataset(elt)]
def check_dataset(self, file_):
cond = os.path.isfile(os.path.join(self.dir_indices, file_.split('.')[0] + '_indices.npy'))
if not cond:
return False
return True
def fetch_data_at_index(self, patch_file):
with h5py.File(patch_file, 'r') as f:
data = dataset_utils.create_unit_data_from_hdf5_spfn(f, self.n_max_global_instances, self.noisy, n_points=None, fixed_order=True, shuffle=False)
assert data is not None # assume data are all clean
return data
def __getitem__(self, index):
# find shape that contains the point with given global index
folder = self.hdf5_file_list[index]
# Loading the highres file
data_elt = self.fetch_data_at_index(os.path.join(self.dir_highres, folder))
P_global = data_elt['P']
normal_gt_global = data_elt['normal_gt']
P_gt_global = data_elt['P_gt']
I_gt_global = data_elt['I_gt']
T_gt_global = data_elt['T_gt']
plane_n_gt_global = data_elt['plane_n_gt']
cylinder_axis_gt_global = data_elt['cylinder_axis_gt']
cone_axis_gt_global = data_elt['cone_axis_gt']
if (folder in self.hdf5_file_list_improvement):
# Loading the patch indices
patch_indices = np.load(os.path.join(self.dir_indices, folder.replace('.h5', '_indices.npy')))
nb_patches, _ = patch_indices.shape
P_unormalised = P_global[patch_indices]
mean = np.mean(P_unormalised, axis=1, keepdims=True)
P = P_unormalised - mean
norm = np.linalg.norm(P, axis=2, keepdims=True).max(axis=1, keepdims=True)
P = P / norm
_, num_local_points, _ = P.shape
normal_gt = normal_gt_global[patch_indices]
I_gt = I_gt_global[patch_indices]
P_gt = np.zeros((nb_patches,) + P_gt_global[:self.n_max_local_instances].shape)
T_gt = np.zeros((nb_patches,) + T_gt_global[:self.n_max_local_instances].shape)
plane_n_gt = np.zeros((nb_patches,) + plane_n_gt_global[:self.n_max_local_instances].shape)
cylinder_axis_gt = np.zeros((nb_patches,) + cylinder_axis_gt_global[:self.n_max_local_instances].shape)
cone_axis_gt = np.zeros((nb_patches,) + cone_axis_gt_global[:self.n_max_local_instances].shape)
for i in range(nb_patches):
flag = -1 in I_gt[i]
unique_values, inverse_values = np.unique(I_gt[i], return_inverse=True)
if flag: inverse_values = inverse_values - 1
I_gt[i] = inverse_values
P_gt[i,np.arange(len(unique_values))] = P_gt_global[unique_values]
T_gt[i, np.arange(len(unique_values))] = T_gt_global[unique_values]
plane_n_gt[i, np.arange(len(unique_values))] = plane_n_gt_global[unique_values]
cylinder_axis_gt[i, np.arange(len(unique_values))] = cylinder_axis_gt_global[unique_values]
cone_axis_gt[i, np.arange(len(unique_values))] = cone_axis_gt_global[unique_values]
# Loading the features
glob_features = np.load(os.path.join(self.dir_spfn, folder.replace('.h5',''), 'global_feat.npy'))
loc_features = np.load(os.path.join(self.dir_spfn, folder.replace('.h5',''), 'local_feat_full.npy'))
list_glob_features = []
list_loc_features = []
for patch_id in range(nb_patches):
list_glob_features.append(glob_features)
list_loc_features.append(loc_features[:,patch_id])
glob_features = np.stack(list_glob_features, axis=0)
loc_features = np.stack(list_loc_features, axis=0)
else:
nb_patches = 0
P = np.zeros([0, 8192, 3]).astype(np.float32)
normal_gt = np.zeros([0, 8192, 3]).astype(np.float32)
I_gt = np.zeros([0, 8192]).astype(np.int64)
glob_features = np.zeros([0, 1024]).astype(np.float32)
loc_features = np.zeros([0, 128]).astype(np.float32)
patch_indices = np.zeros([0, 8192]).astype(np.int64)
P_unormalised = np.zeros([0, 8192, 3]).astype(np.float32)
P_gt = np.zeros([0, 21, 512, 3]).astype(np.float32)
T_gt = np.zeros([0, 21]).astype(np.int64)
plane_n_gt = np.zeros([0, 21, 3]).astype(np.float32)
cylinder_axis_gt = np.zeros([0, 21, 3]).astype(np.float32)
cone_axis_gt = np.zeros([0, 21, 3]).astype(np.float32)
# Loading the SPFN output
spfn_labels = np.load(os.path.join(self.dir_spfn, folder.replace('.h5', ''), 'object_seg.npy'))
spfn_normals = np.load(os.path.join(self.dir_spfn, folder.replace('.h5', ''), 'object_normals.npy'))
spfn_type = np.load(os.path.join(self.dir_spfn, folder.replace('.h5', ''), 'object_type.npy'))
# Shuffling the output
for i in range(nb_patches):
perm = np.random.permutation(num_local_points)
P[i] = P[i, perm]
P_unormalised[i] = P_unormalised[i, perm]
normal_gt[i] = normal_gt[i, perm]
I_gt[i] = I_gt[i, perm]
patch_indices[i] = patch_indices[i, perm]
# Exporting all the data
P = torch.from_numpy(P.astype(np.float32))
normal_gt = torch.from_numpy(normal_gt.astype(np.float32))
P_gt = torch.from_numpy(P_gt.astype(np.float32))
I_gt = torch.from_numpy(I_gt.astype(np.int64))
T_gt = torch.from_numpy(T_gt.astype(np.int64))
plane_n_gt = torch.from_numpy(plane_n_gt.astype(np.float32))
cylinder_axis_gt = torch.from_numpy(cylinder_axis_gt.astype(np.float32))
cone_axis_gt = torch.from_numpy(cone_axis_gt.astype(np.float32))
patch_indices = torch.from_numpy(patch_indices.astype(np.float32))
spfn_labels = torch.from_numpy(spfn_labels.astype(np.int64))
spfn_normals = torch.from_numpy(spfn_normals.astype(np.float32))
spfn_type = torch.from_numpy(spfn_type.astype(np.float32))
glob_features = torch.from_numpy(glob_features.astype(np.float32))
loc_features = torch.from_numpy(loc_features.astype(np.float32))
I_gt_global = torch.from_numpy(I_gt_global.astype(np.int64))
return P, normal_gt, P_gt_global, I_gt, T_gt_global, patch_indices, spfn_labels, spfn_normals, spfn_type, glob_features, loc_features, P_global, normal_gt_global, I_gt_global, plane_n_gt_global, cylinder_axis_gt_global, cone_axis_gt_global, P_unormalised, P_gt, T_gt, plane_n_gt, cylinder_axis_gt, cone_axis_gt
def __len__(self):
return self.n_data
class RandomSampler(data.sampler.Sampler):
def __init__(self, data_source, seed=None, identical_epochs=False):
self.data_source = data_source
self.seed = seed
if self.seed is None:
self.seed = np.random.randint(0, 2 ** 32 - 1, 1)[0]
self.identical_epochs = identical_epochs
self.rng = np.random.RandomState(self.seed)
self.total_samples_count = len(self.data_source)
def __iter__(self):
if self.identical_epochs:
self.rng.seed(self.seed)
return iter(self.rng.choice(self.total_samples_count, size=self.total_samples_count, replace=False))
def __len__(self):
return self.total_samples_count
class Sampler(data.sampler.Sampler):
def __init__(self, data_source):
self.data_source = data_source
self.total_samples_count = len(self.data_source)
def __iter__(self):
return iter(np.arange(0, self.total_samples_count))
def __len__(self):
return self.total_samples_count | 19,560 | 54.729345 | 318 | py |
CPFN | CPFN-master/PointNet2/pn2_network.py | # Importation of packages
import os
import sys
import torch
import numpy as np
from SPFN.losses_implementation import compute_all_losses
from PointNet2.pointnet2_ops.modules.pointset_abstraction import PointsetAbstraction
from PointNet2.pointnet2_ops.modules.pointset_feature_propagation import PointsetFeaturePropagation
class PointNet2(torch.nn.Module):
def __init__(self, dim_input=3, dim_pos=3, output_sizes=[16], use_glob_features=False, use_loc_features=False, features_extractor=False):
super(PointNet2, self).__init__()
self.dim_pos = dim_pos
self.use_glob_features = use_glob_features
self.use_loc_features = use_loc_features
self.features_extractor = features_extractor
# Encoding stage
self.sa1 = PointsetAbstraction(num_points=512, dim_pos=dim_pos, dim_feats=dim_input-dim_pos, radius_list=[0.2], num_samples_list=[64], mlp_list=[[64,64,128]], group_all=False)
self.sa2 = PointsetAbstraction(num_points=128, dim_pos=dim_pos, dim_feats=128, radius_list=[0.4], num_samples_list=[64], mlp_list=[[128,128,256]], group_all=False)
self.sa3 = PointsetAbstraction(num_points=None, dim_pos=dim_pos, dim_feats=256, radius_list=None, num_samples_list=None, mlp_list=[256, 512, 1024], group_all=True)
# Decoding stage
offset = 0
if self.use_glob_features:
offset += 1024
if self.use_loc_features:
offset += 128
self.sfp1 = PointsetFeaturePropagation(dim_feats=1024+offset+256, mlp=[256,256])
self.sfp2 = PointsetFeaturePropagation(dim_feats=256+128, mlp=[256,128])
self.sfp3 = PointsetFeaturePropagation(dim_feats=128+dim_input-dim_pos, mlp=[128,128,128])
# FC stage
self.fc1 = torch.nn.Conv1d(128, 128, 1)
if not self.features_extractor:
self.bn1 = torch.nn.BatchNorm1d(128)
self.fc2 = torch.nn.ModuleList()
for output_size in output_sizes:
self.fc2.append(torch.nn.Conv1d(128, output_size, 1))
def forward(self, x, glob_features=None, loc_features=None, fast=True):
x = x.transpose(2,1)
batch_size = x.shape[0]
input_pos = x[:,:self.dim_pos,:]
if x.shape[1] > self.dim_pos:
input_feats = x[:,self.dim_pos:,:]
else:
input_feats = None
# Encoding stage
l1_pos, l1_feats = self.sa1(input_pos, input_feats, fast=fast)
l2_pos, l2_feats = self.sa2(l1_pos, l1_feats, fast=fast)
l3_pos, l3_feats = self.sa3(l2_pos, l2_feats, fast=fast)
# Adding additional features
if self.use_glob_features:
l3_feats = torch.cat((l3_feats, glob_features.unsqueeze(2)), dim=1)
if self.use_loc_features:
l3_feats = torch.cat((l3_feats, loc_features.unsqueeze(2)), dim=1)
# Decoding stage
l4_feats = self.sfp1(l2_pos, l3_pos, l2_feats, l3_feats, fast=fast)
l5_feats = self.sfp2(l1_pos, l2_pos, l1_feats, l4_feats, fast=fast)
l6_feats = self.sfp3(input_pos, l1_pos, input_feats, l5_feats, fast=fast)
# FC stage
output_feat = self.fc1(l6_feats)
if not self.features_extractor:
output_feat = torch.nn.functional.relu(self.bn1(output_feat))
output_feat = torch.nn.functional.dropout(output_feat, p=0.5)
results = []
for fc2_layer in self.fc2:
result = fc2_layer(output_feat)
result = result.transpose(1,2)
results.append(result)
results.append(l3_feats)
results.append(output_feat)
return results
else:
return l3_feats, output_feat | 3,692 | 49.589041 | 183 | py |
CPFN | CPFN-master/PointNet2/pointnet2_ops/setup.py | import os
import glob
import setuptools
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
sources = glob.glob("cuda_ops/src/*.cpp") + glob.glob("cuda_ops/src/*.cu")
headers = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cuda_ops/include')
setuptools.setup(
name="pointnet2_ops",
version="1.0",
description="PointNet++ modules",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
ext_modules=[
CUDAExtension(
name='cuda_ops',
sources=sources,
extra_compile_args={
"cxx": ["-O2", "-I{}".format(headers)],
"nvcc": ["-O2", "-I{}".format(headers)],
},
)
],
cmdclass={"build_ext": BuildExtension},
) | 915 | 28.548387 | 86 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.