text stringlengths 81 112k |
|---|
Gets a new grouped symbol whose output contains
inputs to output nodes of the original symbol.
Example
-------
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.Variable('z')
>>> a = y+z
>>> b = x+a
>>> b.get_children()
<Symbol Grouped>
>>> b.get_children().list_outputs()
['x', '_plus10_output']
>>> b.get_children().get_children().list_outputs()
['y', 'z']
Returns
-------
sgroup : Symbol or None
The children of the head node. If the symbol has no
inputs then ``None`` will be returned.
def get_children(self):
"""Gets a new grouped symbol whose output contains
inputs to output nodes of the original symbol.
Example
-------
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.Variable('z')
>>> a = y+z
>>> b = x+a
>>> b.get_children()
<Symbol Grouped>
>>> b.get_children().list_outputs()
['x', '_plus10_output']
>>> b.get_children().get_children().list_outputs()
['y', 'z']
Returns
-------
sgroup : Symbol or None
The children of the head node. If the symbol has no
inputs then ``None`` will be returned.
"""
handle = SymbolHandle()
check_call(_LIB.MXSymbolGetChildren(
self.handle, ctypes.byref(handle)))
ret = Symbol(handle=handle)
if len(ret.list_outputs()) == 0:
return None
return ret |
Lists all the arguments in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_arguments
['a', 'b']
Returns
-------
args : list of string
List containing the names of all the arguments required to compute the symbol.
def list_arguments(self):
"""Lists all the arguments in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_arguments
['a', 'b']
Returns
-------
args : list of string
List containing the names of all the arguments required to compute the symbol.
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXSymbolListArguments(
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)] |
Lists all the outputs in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_outputs()
['_plus12_output']
Returns
-------
list of str
List of all the outputs.
For most symbols, this list contains only the name of this symbol.
For symbol groups, this is a list with the names of all symbols
in the group.
def list_outputs(self):
"""Lists all the outputs in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_outputs()
['_plus12_output']
Returns
-------
list of str
List of all the outputs.
For most symbols, this list contains only the name of this symbol.
For symbol groups, this is a list with the names of all symbols
in the group.
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXSymbolListOutputs(
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)] |
Lists all the auxiliary states in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_auxiliary_states()
[]
Example of auxiliary states in `BatchNorm`.
>>> data = mx.symbol.Variable('data')
>>> weight = mx.sym.Variable(name='fc1_weight')
>>> fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128)
>>> fc2 = mx.symbol.BatchNorm(fc1, name='batchnorm0')
>>> fc2.list_auxiliary_states()
['batchnorm0_moving_mean', 'batchnorm0_moving_var']
Returns
-------
aux_states : list of str
List of the auxiliary states in input symbol.
Notes
-----
Auxiliary states are special states of symbols that do not correspond to an argument,
and are not updated by gradient descent. Common examples of auxiliary states
include the `moving_mean` and `moving_variance` in `BatchNorm`.
Most operators do not have auxiliary states.
def list_auxiliary_states(self):
"""Lists all the auxiliary states in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_auxiliary_states()
[]
Example of auxiliary states in `BatchNorm`.
>>> data = mx.symbol.Variable('data')
>>> weight = mx.sym.Variable(name='fc1_weight')
>>> fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128)
>>> fc2 = mx.symbol.BatchNorm(fc1, name='batchnorm0')
>>> fc2.list_auxiliary_states()
['batchnorm0_moving_mean', 'batchnorm0_moving_var']
Returns
-------
aux_states : list of str
List of the auxiliary states in input symbol.
Notes
-----
Auxiliary states are special states of symbols that do not correspond to an argument,
and are not updated by gradient descent. Common examples of auxiliary states
include the `moving_mean` and `moving_variance` in `BatchNorm`.
Most operators do not have auxiliary states.
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXSymbolListAuxiliaryStates(
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)] |
Lists all arguments and auxiliary states of this Symbol.
Returns
-------
inputs : list of str
List of all inputs.
Examples
--------
>>> bn = mx.sym.BatchNorm(name='bn')
>>> bn.list_arguments()
['bn_data', 'bn_gamma', 'bn_beta']
>>> bn.list_auxiliary_states()
['bn_moving_mean', 'bn_moving_var']
>>> bn.list_inputs()
['bn_data', 'bn_gamma', 'bn_beta', 'bn_moving_mean', 'bn_moving_var']
def list_inputs(self):
"""Lists all arguments and auxiliary states of this Symbol.
Returns
-------
inputs : list of str
List of all inputs.
Examples
--------
>>> bn = mx.sym.BatchNorm(name='bn')
>>> bn.list_arguments()
['bn_data', 'bn_gamma', 'bn_beta']
>>> bn.list_auxiliary_states()
['bn_moving_mean', 'bn_moving_var']
>>> bn.list_inputs()
['bn_data', 'bn_gamma', 'bn_beta', 'bn_moving_mean', 'bn_moving_var']
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.NNSymbolListInputNames(
self.handle, 0, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)] |
Infers the type of all arguments and all outputs, given the known types
for some arguments.
This function takes the known types of some arguments in either positional way
or keyword argument way as input. It returns a tuple of `None` values
if there is not enough information to deduce the missing types.
Inconsistencies in the known types will cause an error to be raised.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> arg_types, out_types, aux_types = c.infer_type(a='float32')
>>> arg_types
[<type 'numpy.float32'>, <type 'numpy.float32'>]
>>> out_types
[<type 'numpy.float32'>]
>>> aux_types
[]
Parameters
----------
*args :
Type of known arguments in a positional way.
Unknown type can be marked as None.
**kwargs :
Keyword arguments of known types.
Returns
-------
arg_types : list of numpy.dtype or None
List of argument types.
The order is same as the order of list_arguments().
out_types : list of numpy.dtype or None
List of output types.
The order is same as the order of list_outputs().
aux_types : list of numpy.dtype or None
List of auxiliary state types.
The order is same as the order of list_auxiliary_states().
def infer_type(self, *args, **kwargs):
"""Infers the type of all arguments and all outputs, given the known types
for some arguments.
This function takes the known types of some arguments in either positional way
or keyword argument way as input. It returns a tuple of `None` values
if there is not enough information to deduce the missing types.
Inconsistencies in the known types will cause an error to be raised.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> arg_types, out_types, aux_types = c.infer_type(a='float32')
>>> arg_types
[<type 'numpy.float32'>, <type 'numpy.float32'>]
>>> out_types
[<type 'numpy.float32'>]
>>> aux_types
[]
Parameters
----------
*args :
Type of known arguments in a positional way.
Unknown type can be marked as None.
**kwargs :
Keyword arguments of known types.
Returns
-------
arg_types : list of numpy.dtype or None
List of argument types.
The order is same as the order of list_arguments().
out_types : list of numpy.dtype or None
List of output types.
The order is same as the order of list_outputs().
aux_types : list of numpy.dtype or None
List of auxiliary state types.
The order is same as the order of list_auxiliary_states().
"""
try:
res = self._infer_type_impl(False, *args, **kwargs)
if res[1] is None:
arg_shapes, _, _ = self._infer_type_impl(True, *args, **kwargs)
arg_names = self.list_arguments()
unknowns = []
for name, dtype in zip(arg_names, arg_shapes):
if not dtype:
if len(unknowns) >= 10:
unknowns.append('...')
break
unknowns.append('%s: %s' % (name, str(dtype)))
warnings.warn(
"Cannot decide type for the following arguments. " +
"Consider providing them as input:\n\t" +
"\n\t".join(unknowns), stacklevel=2)
return res
except MXNetError:
print("infer_type error. Arguments:")
for i, arg in enumerate(args):
print(" #%d: %s" % (i, arg))
for k, v in kwargs.items():
print(" %s: %s" % (k, v))
raise |
The actual implementation for calling type inference API.
def _infer_type_impl(self, partial, *args, **kwargs):
"""The actual implementation for calling type inference API."""
# pylint: disable=too-many-locals
if len(args) != 0 and len(kwargs) != 0:
raise ValueError('Can only specify known argument \
types either by positional or kwargs way.')
sdata = []
if len(args) != 0:
keys = c_array(ctypes.c_char_p, [])
for s in args:
if s is not None:
s = _numpy.dtype(s).type
if s not in _DTYPE_NP_TO_MX:
raise TypeError('Argument need to be one of ' + str(_DTYPE_NP_TO_MX))
sdata.append(_DTYPE_NP_TO_MX[s])
else:
sdata.append(-1)
else:
str_keys = []
for k, v in kwargs.items():
v = _numpy.dtype(v).type
if v in _DTYPE_NP_TO_MX:
str_keys.append(k)
sdata.append(_DTYPE_NP_TO_MX[v])
keys = c_str_array(str_keys)
arg_type_size = mx_uint()
arg_type_data = ctypes.POINTER(ctypes.c_int)()
out_type_size = mx_uint()
out_type_data = ctypes.POINTER(ctypes.c_int)()
aux_type_size = mx_uint()
aux_type_data = ctypes.POINTER(ctypes.c_int)()
complete = ctypes.c_int()
if partial:
infer_func = _LIB.MXSymbolInferTypePartial
else:
infer_func = _LIB.MXSymbolInferType
check_call(infer_func(
self.handle,
mx_uint(len(sdata)),
keys,
c_array_buf(ctypes.c_int, array('i', sdata)),
ctypes.byref(arg_type_size),
ctypes.byref(arg_type_data),
ctypes.byref(out_type_size),
ctypes.byref(out_type_data),
ctypes.byref(aux_type_size),
ctypes.byref(aux_type_data),
ctypes.byref(complete)))
if complete.value != 0:
arg_types = [
_DTYPE_MX_TO_NP[arg_type_data[i]] for i in range(arg_type_size.value)]
out_types = [
_DTYPE_MX_TO_NP[out_type_data[i]] for i in range(out_type_size.value)]
aux_types = [
_DTYPE_MX_TO_NP[aux_type_data[i]] for i in range(aux_type_size.value)]
return (arg_types, out_types, aux_types)
else:
return (None, None, None) |
Infers the shapes of all arguments and all outputs given the known shapes of
some arguments.
This function takes the known shapes of some arguments in either positional way
or keyword argument way as input. It returns a tuple of `None` values
if there is not enough information to deduce the missing shapes.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=(3,3))
>>> arg_shapes
[(3L, 3L), (3L, 3L)]
>>> out_shapes
[(3L, 3L)]
>>> aux_shapes
[]
>>> c.infer_shape(a=(0,3)) # 0s in shape means unknown dimensions. So, returns None.
(None, None, None)
Inconsistencies in the known shapes will cause an error to be raised.
See the following example:
>>> data = mx.sym.Variable('data')
>>> out = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=1000)
>>> out = mx.sym.Activation(data=out, act_type='relu')
>>> out = mx.sym.FullyConnected(data=out, name='fc2', num_hidden=10)
>>> weight_shape= (1, 100)
>>> data_shape = (100, 100)
>>> out.infer_shape(data=data_shape, fc1_weight=weight_shape)
Error in operator fc1: Shape inconsistent, Provided=(1,100), inferred shape=(1000,100)
Parameters
----------
*args :
Shape of arguments in a positional way.
Unknown shape can be marked as None.
**kwargs :
Keyword arguments of the known shapes.
Returns
-------
arg_shapes : list of tuple or None
List of argument shapes.
The order is same as the order of list_arguments().
out_shapes : list of tuple or None
List of output shapes.
The order is same as the order of list_outputs().
aux_shapes : list of tuple or None
List of auxiliary state shapes.
The order is same as the order of list_auxiliary_states().
def infer_shape(self, *args, **kwargs):
"""Infers the shapes of all arguments and all outputs given the known shapes of
some arguments.
This function takes the known shapes of some arguments in either positional way
or keyword argument way as input. It returns a tuple of `None` values
if there is not enough information to deduce the missing shapes.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=(3,3))
>>> arg_shapes
[(3L, 3L), (3L, 3L)]
>>> out_shapes
[(3L, 3L)]
>>> aux_shapes
[]
>>> c.infer_shape(a=(0,3)) # 0s in shape means unknown dimensions. So, returns None.
(None, None, None)
Inconsistencies in the known shapes will cause an error to be raised.
See the following example:
>>> data = mx.sym.Variable('data')
>>> out = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=1000)
>>> out = mx.sym.Activation(data=out, act_type='relu')
>>> out = mx.sym.FullyConnected(data=out, name='fc2', num_hidden=10)
>>> weight_shape= (1, 100)
>>> data_shape = (100, 100)
>>> out.infer_shape(data=data_shape, fc1_weight=weight_shape)
Error in operator fc1: Shape inconsistent, Provided=(1,100), inferred shape=(1000,100)
Parameters
----------
*args :
Shape of arguments in a positional way.
Unknown shape can be marked as None.
**kwargs :
Keyword arguments of the known shapes.
Returns
-------
arg_shapes : list of tuple or None
List of argument shapes.
The order is same as the order of list_arguments().
out_shapes : list of tuple or None
List of output shapes.
The order is same as the order of list_outputs().
aux_shapes : list of tuple or None
List of auxiliary state shapes.
The order is same as the order of list_auxiliary_states().
"""
try:
res = self._infer_shape_impl(False, *args, **kwargs)
if res[1] is None:
arg_shapes, _, _ = self._infer_shape_impl(True, *args, **kwargs)
arg_names = self.list_arguments()
unknowns = []
for name, shape in zip(arg_names, arg_shapes):
if is_np_compat():
shape_is_none = not shape or -1 in shape
else:
shape_is_none = not shape or 0 in shape
if shape_is_none:
if len(unknowns) >= 10:
unknowns.append('...')
break
unknowns.append('%s: %s' % (name, str(shape)))
warnings.warn(
"Cannot decide shape for the following arguments " +
"(0s in shape means unknown dimensions). " +
"Consider providing them as input:\n\t" +
"\n\t".join(unknowns), stacklevel=2)
return res
except MXNetError:
print("infer_shape error. Arguments:")
for i, arg in enumerate(args):
print(" #%d: %s" % (i, arg))
for k, v in kwargs.items():
print(" %s: %s" % (k, v))
raise |
The actual implementation for calling shape inference API.
def _infer_shape_impl(self, partial, *args, **kwargs):
"""The actual implementation for calling shape inference API."""
# pylint: disable=too-many-locals
if len(args) != 0 and len(kwargs) != 0:
raise ValueError('Can only specify known argument \
shapes either by positional or kwargs way.')
sdata = []
indptr = [0]
if len(args) != 0:
keys = c_array(ctypes.c_char_p, [])
for i, s in enumerate(args):
if s is not None:
if not isinstance(s, tuple):
raise TypeError("Arguments need to be shapes (tuple), "
"but argument %d is %s." % (i, type(s)))
sdata.extend(s)
indptr.append(len(sdata))
else:
str_keys = []
for k, v in kwargs.items():
if not isinstance(v, tuple):
raise TypeError("Arguments need to be shapes (tuple), "
"but '%s' is %s." % (k, type(v)))
str_keys.append(k)
sdata.extend(v)
indptr.append(len(sdata))
keys = c_str_array(str_keys)
arg_shape_size = mx_uint()
arg_shape_ndim = ctypes.POINTER(mx_int)()
arg_shape_data = ctypes.POINTER(ctypes.POINTER(mx_int))()
out_shape_size = mx_uint()
out_shape_ndim = ctypes.POINTER(mx_int)()
out_shape_data = ctypes.POINTER(ctypes.POINTER(mx_int))()
aux_shape_size = mx_uint()
aux_shape_ndim = ctypes.POINTER(mx_int)()
aux_shape_data = ctypes.POINTER(ctypes.POINTER(mx_int))()
complete = ctypes.c_int()
if partial:
infer_func = _LIB.MXSymbolInferShapePartialEx
else:
infer_func = _LIB.MXSymbolInferShapeEx
check_call(infer_func(
self.handle,
mx_uint(len(indptr) - 1),
keys,
c_array_buf(mx_uint, array('I', indptr)),
c_array_buf(mx_int, array('i', sdata)),
ctypes.byref(arg_shape_size),
ctypes.byref(arg_shape_ndim),
ctypes.byref(arg_shape_data),
ctypes.byref(out_shape_size),
ctypes.byref(out_shape_ndim),
ctypes.byref(out_shape_data),
ctypes.byref(aux_shape_size),
ctypes.byref(aux_shape_ndim),
ctypes.byref(aux_shape_data),
ctypes.byref(complete)))
if complete.value != 0:
arg_shapes = [tuple(arg_shape_data[i][:arg_shape_ndim[i]])
if arg_shape_ndim[i] >= 0 else None
for i in range(arg_shape_size.value)]
out_shapes = [tuple(out_shape_data[i][:out_shape_ndim[i]])
if out_shape_ndim[i] >= 0 else None
for i in range(out_shape_size.value)]
aux_shapes = [tuple(aux_shape_data[i][:aux_shape_ndim[i]])
if aux_shape_ndim[i] >= 0 else None
for i in range(aux_shape_size.value)]
return (arg_shapes, out_shapes, aux_shapes)
else:
return (None, None, None) |
Saves symbol to a file.
You can also use pickle to do the job if you only work on python.
The advantage of `load`/`save` functions is that the file contents are language agnostic.
This means the model saved by one language binding can be loaded by a different
language binding of `MXNet`.
You also get the benefit of being able to directly load/save from cloud storage(S3, HDFS).
Parameters
----------
fname : str
The name of the file.
- "s3://my-bucket/path/my-s3-symbol"
- "hdfs://my-bucket/path/my-hdfs-symbol"
- "/path-to/my-local-symbol"
See Also
--------
symbol.load : Used to load symbol from file.
def save(self, fname):
"""Saves symbol to a file.
You can also use pickle to do the job if you only work on python.
The advantage of `load`/`save` functions is that the file contents are language agnostic.
This means the model saved by one language binding can be loaded by a different
language binding of `MXNet`.
You also get the benefit of being able to directly load/save from cloud storage(S3, HDFS).
Parameters
----------
fname : str
The name of the file.
- "s3://my-bucket/path/my-s3-symbol"
- "hdfs://my-bucket/path/my-hdfs-symbol"
- "/path-to/my-local-symbol"
See Also
--------
symbol.load : Used to load symbol from file.
"""
if not isinstance(fname, string_types):
raise TypeError('fname need to be string')
check_call(_LIB.MXSymbolSaveToFile(self.handle, c_str(fname))) |
Saves symbol to a JSON string.
See Also
--------
symbol.load_json : Used to load symbol from JSON string.
def tojson(self):
"""Saves symbol to a JSON string.
See Also
--------
symbol.load_json : Used to load symbol from JSON string.
"""
json_str = ctypes.c_char_p()
check_call(_LIB.MXSymbolSaveToJSON(self.handle, ctypes.byref(json_str)))
return py_str(json_str.value) |
Helper function to get NDArray lists handles from various inputs.
Parameters
----------
arg_key : str
The name of argument, used for error message.
args : list of NDArray or dict of str to NDArray
Input arguments to the symbols.
If type is list of NDArray, the position is in the same order of arg_names.
If type is dict of str to NDArray, then it maps the name of arguments
to the corresponding NDArray,
args_names : list of string
List of argument names.
allow_missing : boolean
Whether missing argument is allowed.
When allowed, the missing handle will be set to None(null)
Returns
-------
handles : list of NDArrayHandle
The positional list of NDArrayHandles generated from input.
def _get_ndarray_inputs(arg_key, args, arg_names, allow_missing):
"""Helper function to get NDArray lists handles from various inputs.
Parameters
----------
arg_key : str
The name of argument, used for error message.
args : list of NDArray or dict of str to NDArray
Input arguments to the symbols.
If type is list of NDArray, the position is in the same order of arg_names.
If type is dict of str to NDArray, then it maps the name of arguments
to the corresponding NDArray,
args_names : list of string
List of argument names.
allow_missing : boolean
Whether missing argument is allowed.
When allowed, the missing handle will be set to None(null)
Returns
-------
handles : list of NDArrayHandle
The positional list of NDArrayHandles generated from input.
"""
# setup args
arg_handles = []
arg_arrays = []
if isinstance(args, list):
if len(args) != len(arg_names):
raise ValueError('Length of %s does not match the number of arguments' % arg_key)
for narr in args:
if narr is None and allow_missing:
arg_handles.append(None)
elif not isinstance(narr, NDArray):
raise TypeError('Only accept list of NDArrays or dict of str to NDArray')
else:
arg_handles.append(narr.handle)
arg_arrays = args
elif isinstance(args, dict):
for name in arg_names:
if name in args:
narr = args[name]
if not isinstance(narr, NDArray):
raise TypeError('Only accept list of NDArrays or dict of str to NDArray')
arg_handles.append(narr.handle)
arg_arrays.append(narr)
else:
if allow_missing:
arg_handles.append(None)
arg_arrays.append(None)
else:
raise ValueError('key `%s` is missing in `%s`' % (name, arg_key))
else:
raise TypeError('Only accept list of NDArrays or dict of str to NDArray')
return c_array(NDArrayHandle, arg_handles), arg_arrays |
Bind current symbol to get an executor, allocate all the arguments needed.
Allows specifying data types.
This function simplifies the binding procedure. You need to specify only input data shapes.
Before binding the executor, the function allocates arguments and auxiliary states
that were not explicitly specified. Allows specifying data types.
Example
-------
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.FullyConnected(x, num_hidden=4)
>>> exe = y.simple_bind(mx.cpu(), x=(5,4), grad_req='null')
>>> exe.forward()
[<NDArray 5x4 @cpu(0)>]
>>> exe.outputs[0].asnumpy()
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
>>> exe.arg_arrays
[<NDArray 5x4 @cpu(0)>, <NDArray 4x4 @cpu(0)>, <NDArray 4 @cpu(0)>]
>>> exe.grad_arrays
[<NDArray 5x4 @cpu(0)>, <NDArray 4x4 @cpu(0)>, <NDArray 4 @cpu(0)>]
Parameters
----------
ctx : Context
The device context the generated executor to run on.
grad_req: string
{'write', 'add', 'null'}, or list of str or dict of str to str, optional
To specify how we should update the gradient to the `args_grad`.
- 'write' means every time gradient is written to specified `args_grad` NDArray.
- 'add' means every time gradient is added to the specified NDArray.
- 'null' means no action is taken, the gradient may not be calculated.
type_dict : Dict of str->numpy.dtype
Input type dictionary, name->dtype
stype_dict : Dict of str->str
Input storage type dictionary, name->storage_type
group2ctx : Dict of string to mx.Context
The dict mapping the `ctx_group` attribute to the context assignment.
shared_arg_names : List of string
The argument names whose `NDArray` of shared_exec can be reused for initializing
the current executor.
shared_exec : Executor
The executor whose arg_arrays, arg_arrays, grad_arrays, and aux_arrays can be
reused for initializing the current executor.
shared_buffer : Dict of string to `NDArray`
The dict mapping argument names to the `NDArray` that can be reused for initializing
the current executor. This buffer will be checked for reuse if one argument name
of the current executor is not found in `shared_arg_names`. The `NDArray` s are
expected have default storage type.
kwargs : Dict of str->shape
Input shape dictionary, name->shape
Returns
-------
executor : mxnet.Executor
The generated executor
def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None,
group2ctx=None, shared_arg_names=None, shared_exec=None,
shared_buffer=None, **kwargs):
"""Bind current symbol to get an executor, allocate all the arguments needed.
Allows specifying data types.
This function simplifies the binding procedure. You need to specify only input data shapes.
Before binding the executor, the function allocates arguments and auxiliary states
that were not explicitly specified. Allows specifying data types.
Example
-------
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.FullyConnected(x, num_hidden=4)
>>> exe = y.simple_bind(mx.cpu(), x=(5,4), grad_req='null')
>>> exe.forward()
[<NDArray 5x4 @cpu(0)>]
>>> exe.outputs[0].asnumpy()
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
>>> exe.arg_arrays
[<NDArray 5x4 @cpu(0)>, <NDArray 4x4 @cpu(0)>, <NDArray 4 @cpu(0)>]
>>> exe.grad_arrays
[<NDArray 5x4 @cpu(0)>, <NDArray 4x4 @cpu(0)>, <NDArray 4 @cpu(0)>]
Parameters
----------
ctx : Context
The device context the generated executor to run on.
grad_req: string
{'write', 'add', 'null'}, or list of str or dict of str to str, optional
To specify how we should update the gradient to the `args_grad`.
- 'write' means every time gradient is written to specified `args_grad` NDArray.
- 'add' means every time gradient is added to the specified NDArray.
- 'null' means no action is taken, the gradient may not be calculated.
type_dict : Dict of str->numpy.dtype
Input type dictionary, name->dtype
stype_dict : Dict of str->str
Input storage type dictionary, name->storage_type
group2ctx : Dict of string to mx.Context
The dict mapping the `ctx_group` attribute to the context assignment.
shared_arg_names : List of string
The argument names whose `NDArray` of shared_exec can be reused for initializing
the current executor.
shared_exec : Executor
The executor whose arg_arrays, arg_arrays, grad_arrays, and aux_arrays can be
reused for initializing the current executor.
shared_buffer : Dict of string to `NDArray`
The dict mapping argument names to the `NDArray` that can be reused for initializing
the current executor. This buffer will be checked for reuse if one argument name
of the current executor is not found in `shared_arg_names`. The `NDArray` s are
expected have default storage type.
kwargs : Dict of str->shape
Input shape dictionary, name->shape
Returns
-------
executor : mxnet.Executor
The generated executor
"""
# data types
num_provided_arg_types = 0
provided_arg_type_names = ctypes.POINTER(ctypes.c_char_p)() # provided type argument names
provided_arg_type_data = ctypes.POINTER(mx_uint)() # provided types
if type_dict is not None:
provided_arg_type_names = []
provided_arg_type_data = []
for k, v in type_dict.items():
v = _numpy.dtype(v).type
if v in _DTYPE_NP_TO_MX:
provided_arg_type_names.append(k)
provided_arg_type_data.append(_DTYPE_NP_TO_MX[v])
num_provided_arg_types = mx_uint(len(provided_arg_type_names))
provided_arg_type_names = c_str_array(provided_arg_type_names)
provided_arg_type_data = c_array_buf(ctypes.c_int, array('i', provided_arg_type_data))
# storage types
num_provided_arg_stypes = 0
# provided storage type argument names
provided_arg_stype_names = ctypes.POINTER(ctypes.c_char_p)()
provided_arg_stype_data = ctypes.POINTER(mx_uint)() # provided storage types
if stype_dict is not None:
provided_arg_stype_names = []
provided_arg_stype_data = []
for k, v in stype_dict.items():
if v in _STORAGE_TYPE_STR_TO_ID:
provided_arg_stype_names.append(k)
provided_arg_stype_data.append(_STORAGE_TYPE_STR_TO_ID[v])
num_provided_arg_stypes = mx_uint(len(provided_arg_stype_names))
provided_arg_stype_names = c_str_array(provided_arg_stype_names)
provided_arg_stype_data = c_array_buf(ctypes.c_int, array('i', provided_arg_stype_data))
provided_arg_shape_data = [] # shape data
# argument shape index in sdata,
# e.g. [sdata[indptr[0]], sdata[indptr[1]]) is the shape of the first arg
provided_arg_shape_idx = [0]
provided_arg_shape_names = [] # provided argument names
for k, v in kwargs.items():
# if k not in listed_arguments and k not in listed_aux_states:
# raise ValueError('arg name %s is not valid', k)
if isinstance(v, tuple):
provided_arg_shape_names.append(k)
provided_arg_shape_data.extend(v)
provided_arg_shape_idx.append(len(provided_arg_shape_data))
provided_req_type_list_len = 0
provided_grad_req_types = ctypes.POINTER(ctypes.c_char_p)()
provided_grad_req_names = ctypes.POINTER(ctypes.c_char_p)()
if grad_req is not None:
if isinstance(grad_req, string_types):
# use provided_req_type_list_len = 0 to indicate this situation
provided_req_type_list_len = 0
provided_grad_req_types = [grad_req]
elif isinstance(grad_req, list):
if len(grad_req) == 0:
raise RuntimeError('grad_req in simple_bind cannot be an empty list')
provided_grad_req_types = grad_req
provided_req_type_list_len = len(provided_grad_req_types)
elif isinstance(grad_req, dict):
if len(grad_req) == 0:
raise RuntimeError('grad_req in simple_bind cannot be an empty dict')
provided_grad_req_names = []
provided_grad_req_types = []
for k, v in grad_req.items():
provided_grad_req_names.append(k)
provided_grad_req_types.append(v)
provided_grad_req_names = c_str_array(provided_grad_req_names)
provided_req_type_list_len = len(provided_grad_req_types)
provided_grad_req_types = c_str_array(provided_grad_req_types)
num_ctx_map_keys = mx_uint(0)
ctx_map_keys = ctypes.POINTER(ctypes.c_char_p)()
ctx_map_dev_types = ctypes.POINTER(ctypes.c_int)()
ctx_map_dev_ids = ctypes.POINTER(ctypes.c_int)()
if group2ctx is not None:
ctx_map_keys = []
ctx_map_dev_types = []
ctx_map_dev_ids = []
for key, val in group2ctx.items():
ctx_map_keys.append(key)
ctx_map_dev_types.append(val.device_typeid)
ctx_map_dev_ids.append(val.device_id)
num_ctx_map_keys = mx_uint(len(ctx_map_keys))
ctx_map_keys = c_str_array(ctx_map_keys)
ctx_map_dev_types = c_array(ctypes.c_int, array('i', ctx_map_dev_types))
ctx_map_dev_ids = c_array(ctypes.c_int, array('i', ctx_map_dev_ids))
# prepare param names
shared_arg_name_list = []
if shared_arg_names is not None:
if not isinstance(shared_arg_names, list):
raise ValueError('shared_arg_names in simple_bind must be a list or None')
shared_arg_name_list = shared_arg_names
# prepare shared_buffer
if shared_buffer is None:
shared_buffer_len = ctypes.c_int(-1)
shared_buffer_names = ctypes.POINTER(ctypes.c_char_p)()
shared_buffer_handles = ctypes.POINTER(NDArrayHandle)()
else:
if not isinstance(shared_buffer, dict):
raise ValueError('shared_buffer in simple_bind must be dict or None')
buffer_names = shared_buffer.keys()
buffer_arrays = shared_buffer.values()
for v in buffer_arrays:
assert(v.stype == 'default'), \
"shared_buffer is expected to only contain NDArrays with default storage"
shared_buffer_names = c_str_array(buffer_names)
shared_buffer_len = ctypes.c_int(len(buffer_arrays))
shared_buffer_handles = c_handle_array(buffer_arrays)
updated_shared_buffer_names = ctypes.POINTER(ctypes.c_char_p)()
updated_shared_buffer_handles = ctypes.POINTER(NDArrayHandle)()
# prepare shared_exec_handle
shared_exec_handle = shared_exec.handle if shared_exec is not None else ExecutorHandle()
# prepare current executor handle
exe_handle = ExecutorHandle()
# prepare current executor's in_args, arg_grads, and aux_states
num_in_args = ctypes.c_uint()
in_arg_handles = ctypes.POINTER(NDArrayHandle)()
arg_grad_handles = ctypes.POINTER(NDArrayHandle)()
num_aux_states = ctypes.c_uint()
aux_state_handles = ctypes.POINTER(NDArrayHandle)()
try:
check_call(_LIB.MXExecutorSimpleBindEx(self.handle,
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
num_ctx_map_keys,
ctx_map_keys,
ctx_map_dev_types,
ctx_map_dev_ids,
mx_uint(provided_req_type_list_len),
provided_grad_req_names,
provided_grad_req_types,
mx_uint(len(provided_arg_shape_names)),
c_str_array(provided_arg_shape_names),
c_array_buf(mx_int,
array('I', provided_arg_shape_data)),
c_array_buf(mx_uint,
array('i', provided_arg_shape_idx)),
num_provided_arg_types,
provided_arg_type_names,
provided_arg_type_data,
num_provided_arg_stypes,
provided_arg_stype_names,
provided_arg_stype_data,
mx_uint(len(shared_arg_name_list)),
c_str_array(shared_arg_name_list),
ctypes.byref(shared_buffer_len),
shared_buffer_names,
shared_buffer_handles,
ctypes.byref(updated_shared_buffer_names),
ctypes.byref(updated_shared_buffer_handles),
ctypes.byref(num_in_args),
ctypes.byref(in_arg_handles),
ctypes.byref(arg_grad_handles),
ctypes.byref(num_aux_states),
ctypes.byref(aux_state_handles),
shared_exec_handle,
ctypes.byref(exe_handle)))
except MXNetError as e:
error_msg = "simple_bind error. Arguments:\n"
for k, v in kwargs.items():
error_msg += "%s: %s\n" % (k, v)
error_msg += "%s" % e
raise RuntimeError(error_msg)
# update shared_buffer
if shared_buffer is not None:
for i in range(shared_buffer_len.value):
k = py_str(updated_shared_buffer_names[i])
v = NDArray(NDArrayHandle(updated_shared_buffer_handles[i]))
shared_buffer[k] = v
# create in_args, arg_grads, and aux_states for the current executor
arg_arrays = [_ndarray_cls(NDArrayHandle(in_arg_handles[i]))
for i in range(num_in_args.value)]
grad_arrays = [_ndarray_cls(NDArrayHandle(arg_grad_handles[i]))
if arg_grad_handles[i] is not None
else None for i in range(num_in_args.value)]
aux_arrays = [_ndarray_cls(NDArrayHandle(aux_state_handles[i]))
for i in range(num_aux_states.value)]
executor = Executor(exe_handle, self, ctx, grad_req, group2ctx)
executor.arg_arrays = arg_arrays
executor.grad_arrays = grad_arrays
executor.aux_arrays = aux_arrays
return executor |
Binds the current symbol to an executor and returns it.
We first declare the computation and then bind to the data to run.
This function returns an executor which provides method `forward()` method for evaluation
and a `outputs()` method to get all the results.
Example
-------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = a + b
<Symbol _plus1>
>>> ex = c.bind(ctx=mx.cpu(), args={'a' : mx.nd.ones([2,3]), 'b' : mx.nd.ones([2,3])})
>>> ex.forward()
[<NDArray 2x3 @cpu(0)>]
>>> ex.outputs[0].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
Parameters
----------
ctx : Context
The device context the generated executor to run on.
args : list of NDArray or dict of str to NDArray
Input arguments to the symbol.
- If the input type is a list of `NDArray`, the order should be same as the order
of `list_arguments()`.
- If the input type is a dict of str to `NDArray`, then it maps the name of arguments
to the corresponding `NDArray`.
- In either case, all the arguments must be provided.
args_grad : list of NDArray or dict of str to `NDArray`, optional
When specified, `args_grad` provides NDArrays to hold
the result of gradient value in backward.
- If the input type is a list of `NDArray`, the order should be same as the order
of `list_arguments()`.
- If the input type is a dict of str to `NDArray`, then it maps the name of arguments
to the corresponding NDArray.
- When the type is a dict of str to `NDArray`, one only need to provide the dict
for required argument gradient.
Only the specified argument gradient will be calculated.
grad_req : {'write', 'add', 'null'}, or list of str or dict of str to str, optional
To specify how we should update the gradient to the `args_grad`.
- 'write' means everytime gradient is write to specified `args_grad` `NDArray`.
- 'add' means everytime gradient is add to the specified NDArray.
- 'null' means no action is taken, the gradient may not be calculated.
aux_states : list of `NDArray`, or dict of str to `NDArray`, optional
Input auxiliary states to the symbol, only needed when the output of
`list_auxiliary_states()` is not empty.
- If the input type is a list of `NDArray`, the order should be same as the order
of `list_auxiliary_states()`.
- If the input type is a dict of str to `NDArray`, then it maps the name of
`auxiliary_states` to the corresponding `NDArray`,
- In either case, all the auxiliary states need to be provided.
group2ctx : Dict of string to mx.Context
The dict mapping the `ctx_group` attribute to the context assignment.
shared_exec : mx.executor.Executor
Executor to share memory with. This is intended for runtime reshaping, variable length
sequences, etc. The returned executor shares state with `shared_exec`, and should not be
used in parallel with it.
Returns
-------
executor : Executor
The generated executor
Notes
-----
Auxiliary states are the special states of symbols that do not correspond
to an argument, and do not have gradient but are still useful
for the specific operations. Common examples of auxiliary states include
the `moving_mean` and `moving_variance` states in `BatchNorm`.
Most operators do not have auxiliary states and in those cases,
this parameter can be safely ignored.
One can give up gradient by using a dict in `args_grad` and only specify
gradient they interested in.
def bind(self, ctx, args, args_grad=None, grad_req='write',
aux_states=None, group2ctx=None, shared_exec=None):
"""Binds the current symbol to an executor and returns it.
We first declare the computation and then bind to the data to run.
This function returns an executor which provides method `forward()` method for evaluation
and a `outputs()` method to get all the results.
Example
-------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = a + b
<Symbol _plus1>
>>> ex = c.bind(ctx=mx.cpu(), args={'a' : mx.nd.ones([2,3]), 'b' : mx.nd.ones([2,3])})
>>> ex.forward()
[<NDArray 2x3 @cpu(0)>]
>>> ex.outputs[0].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
Parameters
----------
ctx : Context
The device context the generated executor to run on.
args : list of NDArray or dict of str to NDArray
Input arguments to the symbol.
- If the input type is a list of `NDArray`, the order should be same as the order
of `list_arguments()`.
- If the input type is a dict of str to `NDArray`, then it maps the name of arguments
to the corresponding `NDArray`.
- In either case, all the arguments must be provided.
args_grad : list of NDArray or dict of str to `NDArray`, optional
When specified, `args_grad` provides NDArrays to hold
the result of gradient value in backward.
- If the input type is a list of `NDArray`, the order should be same as the order
of `list_arguments()`.
- If the input type is a dict of str to `NDArray`, then it maps the name of arguments
to the corresponding NDArray.
- When the type is a dict of str to `NDArray`, one only need to provide the dict
for required argument gradient.
Only the specified argument gradient will be calculated.
grad_req : {'write', 'add', 'null'}, or list of str or dict of str to str, optional
To specify how we should update the gradient to the `args_grad`.
- 'write' means everytime gradient is write to specified `args_grad` `NDArray`.
- 'add' means everytime gradient is add to the specified NDArray.
- 'null' means no action is taken, the gradient may not be calculated.
aux_states : list of `NDArray`, or dict of str to `NDArray`, optional
Input auxiliary states to the symbol, only needed when the output of
`list_auxiliary_states()` is not empty.
- If the input type is a list of `NDArray`, the order should be same as the order
of `list_auxiliary_states()`.
- If the input type is a dict of str to `NDArray`, then it maps the name of
`auxiliary_states` to the corresponding `NDArray`,
- In either case, all the auxiliary states need to be provided.
group2ctx : Dict of string to mx.Context
The dict mapping the `ctx_group` attribute to the context assignment.
shared_exec : mx.executor.Executor
Executor to share memory with. This is intended for runtime reshaping, variable length
sequences, etc. The returned executor shares state with `shared_exec`, and should not be
used in parallel with it.
Returns
-------
executor : Executor
The generated executor
Notes
-----
Auxiliary states are the special states of symbols that do not correspond
to an argument, and do not have gradient but are still useful
for the specific operations. Common examples of auxiliary states include
the `moving_mean` and `moving_variance` states in `BatchNorm`.
Most operators do not have auxiliary states and in those cases,
this parameter can be safely ignored.
One can give up gradient by using a dict in `args_grad` and only specify
gradient they interested in.
"""
# pylint: disable=too-many-locals, too-many-branches
if not isinstance(ctx, Context):
raise TypeError("Context type error")
listed_arguments = self.list_arguments()
args_handle, args = self._get_ndarray_inputs('args', args, listed_arguments, False)
# setup args gradient
if args_grad is None:
args_grad_handle = c_array(NDArrayHandle, [None] * len(args))
else:
args_grad_handle, args_grad = self._get_ndarray_inputs(
'args_grad', args_grad, listed_arguments, True)
if aux_states is None:
aux_states = []
aux_args_handle, aux_states = self._get_ndarray_inputs(
'aux_states', aux_states, self.list_auxiliary_states(), False)
# setup requirements
if isinstance(grad_req, string_types):
if grad_req not in _GRAD_REQ_MAP:
raise ValueError('grad_req must be in %s' % str(_GRAD_REQ_MAP))
reqs_array = c_array_buf(mx_uint,
array('I', [_GRAD_REQ_MAP[grad_req]] * len(listed_arguments)))
elif isinstance(grad_req, list):
reqs_array = c_array_buf(mx_uint,
array('I', [_GRAD_REQ_MAP[item] for item in grad_req]))
elif isinstance(grad_req, dict):
req_array = []
for name in listed_arguments:
if name in grad_req:
req_array.append(_GRAD_REQ_MAP[grad_req[name]])
else:
req_array.append(0)
reqs_array = c_array_buf(mx_uint, array('I', req_array))
ctx_map_keys = []
ctx_map_dev_types = []
ctx_map_dev_ids = []
if group2ctx:
for key, val in group2ctx.items():
ctx_map_keys.append(key)
ctx_map_dev_types.append(val.device_typeid)
ctx_map_dev_ids.append(val.device_id)
handle = ExecutorHandle()
shared_handle = shared_exec.handle if shared_exec is not None else ExecutorHandle()
check_call(_LIB.MXExecutorBindEX(self.handle,
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
mx_uint(len(ctx_map_keys)),
c_str_array(ctx_map_keys),
c_array_buf(ctypes.c_int, array('i', ctx_map_dev_types)),
c_array_buf(ctypes.c_int, array('i', ctx_map_dev_ids)),
mx_uint(len(args)),
args_handle,
args_grad_handle,
reqs_array,
mx_uint(len(aux_states)),
aux_args_handle,
shared_handle,
ctypes.byref(handle)))
executor = Executor(handle, self, ctx, grad_req, group2ctx)
executor.arg_arrays = args
executor.grad_arrays = args_grad
executor.aux_arrays = aux_states
return executor |
Gets the autodiff of current symbol.
This function can only be used if current symbol is a loss function.
.. note:: This function is currently not implemented.
Parameters
----------
wrt : Array of String
keyword arguments of the symbol that the gradients are taken.
Returns
-------
grad : Symbol
A gradient Symbol with returns to be the corresponding gradients.
def gradient(self, wrt):
"""Gets the autodiff of current symbol.
This function can only be used if current symbol is a loss function.
.. note:: This function is currently not implemented.
Parameters
----------
wrt : Array of String
keyword arguments of the symbol that the gradients are taken.
Returns
-------
grad : Symbol
A gradient Symbol with returns to be the corresponding gradients.
"""
handle = SymbolHandle()
c_wrt = c_str_array(wrt)
check_call(_LIB.MXSymbolGrad(self.handle,
mx_uint(len(wrt)),
c_wrt,
ctypes.byref(handle)))
return Symbol(handle) |
Evaluates a symbol given arguments.
The `eval` method combines a call to `bind` (which returns an executor)
with a call to `forward` (executor method).
For the common use case, where you might repeatedly evaluate with same arguments,
eval is slow.
In that case, you should call `bind` once and then repeatedly call forward.
This function allows simpler syntax for less cumbersome introspection.
Example
-------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = a + b
>>> ex = c.eval(ctx = mx.cpu(), a = mx.nd.ones([2,3]), b = mx.nd.ones([2,3]))
>>> ex
[<NDArray 2x3 @cpu(0)>]
>>> ex[0].asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
Parameters
----------
ctx : Context
The device context the generated executor to run on.
kwargs : Keyword arguments of type `NDArray`
Input arguments to the symbol. All the arguments must be provided.
Returns
----------
result : a list of NDArrays corresponding to the values taken by each symbol when
evaluated on given args. When called on a single symbol (not a group),
the result will be a list with one element.
def eval(self, ctx=None, **kwargs):
"""Evaluates a symbol given arguments.
The `eval` method combines a call to `bind` (which returns an executor)
with a call to `forward` (executor method).
For the common use case, where you might repeatedly evaluate with same arguments,
eval is slow.
In that case, you should call `bind` once and then repeatedly call forward.
This function allows simpler syntax for less cumbersome introspection.
Example
-------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = a + b
>>> ex = c.eval(ctx = mx.cpu(), a = mx.nd.ones([2,3]), b = mx.nd.ones([2,3]))
>>> ex
[<NDArray 2x3 @cpu(0)>]
>>> ex[0].asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
Parameters
----------
ctx : Context
The device context the generated executor to run on.
kwargs : Keyword arguments of type `NDArray`
Input arguments to the symbol. All the arguments must be provided.
Returns
----------
result : a list of NDArrays corresponding to the values taken by each symbol when
evaluated on given args. When called on a single symbol (not a group),
the result will be a list with one element.
"""
if ctx is None:
ctx = current_context()
return self.bind(ctx, kwargs).forward() |
Return symbol for target backend.
Parameters
----------
backend : str
The backend names.
Returns
-------
out : Symbol
The created Symbol for target backend.
def get_backend_symbol(self, backend):
"""Return symbol for target backend.
Parameters
----------
backend : str
The backend names.
Returns
-------
out : Symbol
The created Symbol for target backend.
"""
out = SymbolHandle()
check_call(_LIB.MXGenBackendSubgraph(self.handle, c_str(backend), ctypes.byref(out)))
return Symbol(out) |
Perform pixel-shuffling on the input.
def hybrid_forward(self, F, x):
"""Perform pixel-shuffling on the input."""
f = self._factor
# (N, C*f, W)
x = F.reshape(x, (0, -4, -1, f, 0)) # (N, C, f, W)
x = F.transpose(x, (0, 1, 3, 2)) # (N, C, W, f)
x = F.reshape(x, (0, 0, -3)) # (N, C, W*f)
return x |
Perform pixel-shuffling on the input.
def hybrid_forward(self, F, x):
"""Perform pixel-shuffling on the input."""
f1, f2 = self._factors
# (N, f1*f2*C, H, W)
x = F.reshape(x, (0, -4, -1, f1 * f2, 0, 0)) # (N, C, f1*f2, H, W)
x = F.reshape(x, (0, 0, -4, f1, f2, 0, 0)) # (N, C, f1, f2, H, W)
x = F.transpose(x, (0, 1, 4, 2, 5, 3)) # (N, C, H, f1, W, f2)
x = F.reshape(x, (0, 0, -3, -3)) # (N, C, H*f1, W*f2)
return x |
Perform pixel-shuffling on the input.
def hybrid_forward(self, F, x):
"""Perform pixel-shuffling on the input."""
# `transpose` doesn't support 8D, need other implementation
f1, f2, f3 = self._factors
# (N, C*f1*f2*f3, D, H, W)
x = F.reshape(x, (0, -4, -1, f1 * f2 * f3, 0, 0, 0)) # (N, C, f1*f2*f3, D, H, W)
x = F.swapaxes(x, 2, 3) # (N, C, D, f1*f2*f3, H, W)
x = F.reshape(x, (0, 0, 0, -4, f1, f2*f3, 0, 0)) # (N, C, D, f1, f2*f3, H, W)
x = F.reshape(x, (0, 0, -3, 0, 0, 0)) # (N, C, D*f1, f2*f3, H, W)
x = F.swapaxes(x, 3, 4) # (N, C, D*f1, H, f2*f3, W)
x = F.reshape(x, (0, 0, 0, 0, -4, f2, f3, 0)) # (N, C, D*f1, H, f2, f3, W)
x = F.reshape(x, (0, 0, 0, -3, 0, 0)) # (N, C, D*f1, H*f2, f3, W)
x = F.swapaxes(x, 4, 5) # (N, C, D*f1, H*f2, W, f3)
x = F.reshape(x, (0, 0, 0, 0, -3)) # (N, C, D*f1, H*f2, W*f3)
return x |
Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param target_exception: the exception to check. may be a tuple of
exceptions to check
:type target_exception: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay_s: initial delay between retries in seconds
:type delay_s: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
def retry(target_exception, tries=4, delay_s=1, backoff=2):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param target_exception: the exception to check. may be a tuple of
exceptions to check
:type target_exception: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay_s: initial delay between retries in seconds
:type delay_s: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
"""
import time
from functools import wraps
def decorated_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay_s
while mtries > 1:
try:
return f(*args, **kwargs)
except target_exception as e:
logging.warning("Exception: %s, Retrying in %d seconds...", str(e), mdelay)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return decorated_retry |
Returns a module loaded with the provided model.
Parameters
----------
model_name: str
Prefix of the MXNet model name as stored on the local directory.
epoch_num : int
Epoch number of model we would like to load.
input_shape: tuple
The shape of the input data in the form of (batch_size, channels, height, width)
files: list of strings
List of URLs pertaining to files that need to be downloaded in order to use the model.
data_shapes: list of tuples.
List of tuples where each tuple is a pair of input variable name and its shape.
label_shapes: list of (str, tuple)
Typically is ``data_iter.provide_label``.
label_names: list of str
Name of the output labels in the MXNet symbolic graph.
gpus: str
Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6.
If empty, we use CPU.
Returns
-------
MXNet module
def load_model(model_name, epoch_num, data_shapes, label_shapes, label_names, gpus=''):
"""Returns a module loaded with the provided model.
Parameters
----------
model_name: str
Prefix of the MXNet model name as stored on the local directory.
epoch_num : int
Epoch number of model we would like to load.
input_shape: tuple
The shape of the input data in the form of (batch_size, channels, height, width)
files: list of strings
List of URLs pertaining to files that need to be downloaded in order to use the model.
data_shapes: list of tuples.
List of tuples where each tuple is a pair of input variable name and its shape.
label_shapes: list of (str, tuple)
Typically is ``data_iter.provide_label``.
label_names: list of str
Name of the output labels in the MXNet symbolic graph.
gpus: str
Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6.
If empty, we use CPU.
Returns
-------
MXNet module
"""
sym, arg_params, aux_params = mx.model.load_checkpoint(model_name, epoch_num)
mod = create_module(sym, data_shapes, label_shapes, label_names, gpus)
mod.set_params(
arg_params=arg_params,
aux_params=aux_params,
allow_missing=True
)
return mod |
Creates a new MXNet module.
Parameters
----------
sym : Symbol
An MXNet symbol.
input_shape: tuple
The shape of the input data in the form of (batch_size, channels, height, width)
files: list of strings
List of URLs pertaining to files that need to be downloaded in order to use the model.
data_shapes: list of tuples.
List of tuples where each tuple is a pair of input variable name and its shape.
label_shapes: list of (str, tuple)
Typically is ``data_iter.provide_label``.
label_names: list of str
Name of the output labels in the MXNet symbolic graph.
gpus: str
Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6.
If empty, we use CPU.
Returns
-------
MXNet module
def create_module(sym, data_shapes, label_shapes, label_names, gpus=''):
"""Creates a new MXNet module.
Parameters
----------
sym : Symbol
An MXNet symbol.
input_shape: tuple
The shape of the input data in the form of (batch_size, channels, height, width)
files: list of strings
List of URLs pertaining to files that need to be downloaded in order to use the model.
data_shapes: list of tuples.
List of tuples where each tuple is a pair of input variable name and its shape.
label_shapes: list of (str, tuple)
Typically is ``data_iter.provide_label``.
label_names: list of str
Name of the output labels in the MXNet symbolic graph.
gpus: str
Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6.
If empty, we use CPU.
Returns
-------
MXNet module
"""
if gpus == '':
devices = mx.cpu()
else:
devices = [mx.gpu(int(i)) for i in gpus.split(',')]
data_names = [data_shape[0] for data_shape in data_shapes]
mod = mx.mod.Module(
symbol=sym,
data_names=data_names,
context=devices,
label_names=label_names
)
mod.bind(
for_training=False,
data_shapes=data_shapes,
label_shapes=label_shapes
)
return mod |
evalute network given validation record file
Parameters:
----------
net : str or None
Network name or use None to load from json without modifying
path_imgrec : str
path to the record validation file
path_imglist : str
path to the list file to replace labels in record file, optional
num_classes : int
number of classes, not including background
mean_pixels : tuple
(mean_r, mean_g, mean_b)
data_shape : tuple or int
(3, height, width) or height/width
model_prefix : str
model prefix of saved checkpoint
epoch : int
load model epoch
ctx : mx.ctx
mx.gpu() or mx.cpu()
batch_size : int
validation batch size
nms_thresh : float
non-maximum suppression threshold
force_nms : boolean
whether suppress different class objects
ovp_thresh : float
AP overlap threshold for true/false postives
use_difficult : boolean
whether to use difficult objects in evaluation if applicable
class_names : comma separated str
class names in string, must correspond to num_classes if set
voc07_metric : boolean
whether to use 11-point evluation as in VOC07 competition
def evaluate_net(net, path_imgrec, num_classes, num_batch, mean_pixels, data_shape,
model_prefix, epoch, ctx=mx.cpu(), batch_size=32,
path_imglist="", nms_thresh=0.45, force_nms=False,
ovp_thresh=0.5, use_difficult=False, class_names=None,
voc07_metric=False):
"""
evalute network given validation record file
Parameters:
----------
net : str or None
Network name or use None to load from json without modifying
path_imgrec : str
path to the record validation file
path_imglist : str
path to the list file to replace labels in record file, optional
num_classes : int
number of classes, not including background
mean_pixels : tuple
(mean_r, mean_g, mean_b)
data_shape : tuple or int
(3, height, width) or height/width
model_prefix : str
model prefix of saved checkpoint
epoch : int
load model epoch
ctx : mx.ctx
mx.gpu() or mx.cpu()
batch_size : int
validation batch size
nms_thresh : float
non-maximum suppression threshold
force_nms : boolean
whether suppress different class objects
ovp_thresh : float
AP overlap threshold for true/false postives
use_difficult : boolean
whether to use difficult objects in evaluation if applicable
class_names : comma separated str
class names in string, must correspond to num_classes if set
voc07_metric : boolean
whether to use 11-point evluation as in VOC07 competition
"""
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# args
if isinstance(data_shape, int):
data_shape = (3, data_shape, data_shape)
assert len(data_shape) == 3 and data_shape[0] == 3
model_prefix += '_' + str(data_shape[1])
# iterator
eval_iter = DetRecordIter(path_imgrec, batch_size, data_shape, mean_pixels=mean_pixels,
path_imglist=path_imglist, **cfg.valid)
# model params
load_net, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
# network
if net is None:
net = load_net
else:
net = get_symbol(net, data_shape[1], num_classes=num_classes,
nms_thresh=nms_thresh, force_suppress=force_nms)
if not 'label' in net.list_arguments():
label = mx.sym.Variable(name='label')
net = mx.sym.Group([net, label])
# init module
mod = mx.mod.Module(net, label_names=('label',), logger=logger, context=ctx,
fixed_param_names=net.list_arguments())
mod.bind(data_shapes=eval_iter.provide_data, label_shapes=eval_iter.provide_label)
mod.set_params(args, auxs, allow_missing=False, force_init=True)
# run evaluation
if voc07_metric:
metric = VOC07MApMetric(ovp_thresh, use_difficult, class_names)
else:
metric = MApMetric(ovp_thresh, use_difficult, class_names)
num = num_batch * batch_size
data = [mx.random.uniform(-1.0, 1.0, shape=shape, ctx=ctx) for _, shape in mod.data_shapes]
batch = mx.io.DataBatch(data, []) # empty label
dry_run = 5 # use 5 iterations to warm up
for i in range(dry_run):
mod.forward(batch, is_train=False)
for output in mod.get_outputs():
output.wait_to_read()
tic = time.time()
results = mod.score(eval_iter, metric, num_batch=num_batch)
speed = num / (time.time() - tic)
if logger is not None:
logger.info('Finished inference with %d images' % num)
logger.info('Finished with %f images per second', speed)
for k, v in results:
print("{}: {}".format(k, v)) |
Initializes the parameters and auxiliary states. By default this function
does nothing. Subclass should override this method if contains parameters.
Parameters
----------
initializer : Initializer
Called to initialize parameters if needed.
arg_params : dict
If not ``None``, should be a dictionary of existing `arg_params`. Initialization
will be copied from that.
aux_params : dict
If not ``None``, should be a dictionary of existing `aux_params`. Initialization
will be copied from that.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
"""Initializes the parameters and auxiliary states. By default this function
does nothing. Subclass should override this method if contains parameters.
Parameters
----------
initializer : Initializer
Called to initialize parameters if needed.
arg_params : dict
If not ``None``, should be a dictionary of existing `arg_params`. Initialization
will be copied from that.
aux_params : dict
If not ``None``, should be a dictionary of existing `aux_params`. Initialization
will be copied from that.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
"""
pass |
Evaluates and accumulates evaluation metric on outputs of the last forward computation.
Subclass should override this method if needed.
Parameters
----------
eval_metric : EvalMetric
labels : list of NDArray
Typically ``data_batch.label``.
def update_metric(self, eval_metric, labels, pre_sliced=False):
"""Evaluates and accumulates evaluation metric on outputs of the last forward computation.
Subclass should override this method if needed.
Parameters
----------
eval_metric : EvalMetric
labels : list of NDArray
Typically ``data_batch.label``.
"""
if self._label_shapes is None:
# since we do not need labels, we are probably not a module with a loss
# function or predictions, so just ignore this call
return
if pre_sliced:
raise RuntimeError("PythonModule does not support presliced labels")
# by default we expect our outputs are some scores that could be evaluated
eval_metric.update(labels, self.get_outputs()) |
Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
for_training : bool
Default is ``True``. Whether the executors should be bind for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
for_training : bool
Default is ``True``. Whether the executors should be bind for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
"""
if self.binded and not force_rebind:
self.logger.warning('Already bound, ignoring bind()')
return
assert grad_req == 'write', "Python module only support write gradient"
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
assert len(data_shapes) == len(self._data_names)
assert [x[0] for x in data_shapes] == self._data_names
self._data_shapes = data_shapes
self._label_shapes = label_shapes
if label_shapes is not None:
assert self._label_names is not None
assert len(self._label_names) == len(label_shapes)
assert [x[0] for x in label_shapes] == self._label_names
self._output_shapes = self._compute_output_shapes() |
Forward computation. Here we do nothing but to keep a reference to
the scores and the labels so that we can do backward computation.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means `is_train` takes the value of ``self.for_training``.
def forward(self, data_batch, is_train=None):
"""Forward computation. Here we do nothing but to keep a reference to
the scores and the labels so that we can do backward computation.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means `is_train` takes the value of ``self.for_training``.
"""
self._scores = data_batch.data[0]
if is_train is None:
is_train = self.for_training
if is_train:
self._labels = data_batch.label[0] |
Actual implementation of the backward computation. The computation
should take ``self._scores`` and ``self._labels`` and then compute the
gradients with respect to the scores, store it as an `NDArray` in
``self._scores_grad``.
Instead of defining a subclass and overriding this function,
a more convenient way is to pass in a `grad_func` when constructing
the module object. Then it will be called to compute the gradients.
def _backward_impl(self):
"""Actual implementation of the backward computation. The computation
should take ``self._scores`` and ``self._labels`` and then compute the
gradients with respect to the scores, store it as an `NDArray` in
``self._scores_grad``.
Instead of defining a subclass and overriding this function,
a more convenient way is to pass in a `grad_func` when constructing
the module object. Then it will be called to compute the gradients.
"""
if self._grad_func is not None:
grad = self._grad_func(self._scores, self._labels)
if not isinstance(grad, nd.NDArray):
grad = nd.array(grad)
self._scores_grad = grad
else:
raise NotImplementedError() |
Encode sentences and (optionally) build a mapping
from string tokens to integer indices. Unknown keys
will be added to vocabulary.
Parameters
----------
sentences : list of list of str
A list of sentences to encode. Each sentence
should be a list of string tokens.
vocab : None or dict of str -> int
Optional input Vocabulary
invalid_label : int, default -1
Index for invalid token, like <end-of-sentence>
invalid_key : str, default '\\n'
Key for invalid token. Use '\\n' for end
of sentence by default.
start_label : int
lowest index.
unknown_token: str
Symbol to represent unknown token.
If not specified, unknown token will be skipped.
Returns
-------
result : list of list of int
encoded sentences
vocab : dict of str -> int
result vocabulary
def encode_sentences(sentences, vocab=None, invalid_label=-1, invalid_key='\n',
start_label=0, unknown_token=None):
"""Encode sentences and (optionally) build a mapping
from string tokens to integer indices. Unknown keys
will be added to vocabulary.
Parameters
----------
sentences : list of list of str
A list of sentences to encode. Each sentence
should be a list of string tokens.
vocab : None or dict of str -> int
Optional input Vocabulary
invalid_label : int, default -1
Index for invalid token, like <end-of-sentence>
invalid_key : str, default '\\n'
Key for invalid token. Use '\\n' for end
of sentence by default.
start_label : int
lowest index.
unknown_token: str
Symbol to represent unknown token.
If not specified, unknown token will be skipped.
Returns
-------
result : list of list of int
encoded sentences
vocab : dict of str -> int
result vocabulary
"""
idx = start_label
if vocab is None:
vocab = {invalid_key: invalid_label}
new_vocab = True
else:
new_vocab = False
res = []
for sent in sentences:
coded = []
for word in sent:
if word not in vocab:
assert (new_vocab or unknown_token), "Unknown token %s"%word
if idx == invalid_label:
idx += 1
if unknown_token:
word = unknown_token
vocab[word] = idx
idx += 1
coded.append(vocab[word])
res.append(coded)
return res, vocab |
Resets the iterator to the beginning of the data.
def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
random.shuffle(self.idx)
for buck in self.data:
np.random.shuffle(buck)
self.nddata = []
self.ndlabel = []
for buck in self.data:
label = np.empty_like(buck)
label[:, :-1] = buck[:, 1:]
label[:, -1] = self.invalid_label
self.nddata.append(ndarray.array(buck, dtype=self.dtype))
self.ndlabel.append(ndarray.array(label, dtype=self.dtype)) |
Returns the next batch of data.
def next(self):
"""Returns the next batch of data."""
if self.curr_idx == len(self.idx):
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
if self.major_axis == 1:
data = self.nddata[i][j:j+self.batch_size].T
label = self.ndlabel[i][j:j+self.batch_size].T
else:
data = self.nddata[i][j:j+self.batch_size]
label = self.ndlabel[i][j:j+self.batch_size]
return DataBatch([data], [label], pad=0,
bucket_key=self.buckets[i],
provide_data=[DataDesc(
name=self.data_name, shape=data.shape,
layout=self.layout)],
provide_label=[DataDesc(
name=self.label_name, shape=label.shape,
layout=self.layout)]) |
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
def getInstance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance |
Description : run lipnet training code using argument info
def main():
"""
Description : run lipnet training code using argument info
"""
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--image_path', type=str, default='./data/datasets/')
parser.add_argument('--align_path', type=str, default='./data/align/')
parser.add_argument('--num_gpus', type=int, default=1)
parser.add_argument('--num_workers', type=int, default=0)
parser.add_argument('--data_type', type=str, default='valid')
parser.add_argument('--model_path', type=str, default=None)
config = parser.parse_args()
trainer = Train(config)
trainer.build_model(path=config.model_path)
trainer.load_dataloader()
if config.data_type == 'train':
data_loader = trainer.train_dataloader
elif config.data_type == 'valid':
data_loader = trainer.valid_dataloader
trainer.infer_batch(data_loader) |
Get the variable given a name if one exists or create a new one if missing.
Parameters
----------
name : str
name of the variable
**kwargs :
more arguments that's passed to symbol.Variable
def get(self, name, **kwargs):
"""Get the variable given a name if one exists or create a new one if missing.
Parameters
----------
name : str
name of the variable
**kwargs :
more arguments that's passed to symbol.Variable
"""
name = self._prefix + name
if name not in self._params:
self._params[name] = symbol.Variable(name, **kwargs)
return self._params[name] |
Reset before re-using the cell for another graph.
def reset(self):
"""Reset before re-using the cell for another graph."""
self._init_counter = -1
self._counter = -1
if hasattr(self, '_cells'):
for cell in self._cells:
cell.reset() |
Initial state for this cell.
Parameters
----------
func : callable, default symbol.zeros
Function for creating initial state. Can be symbol.zeros,
symbol.uniform, symbol.Variable etc.
Use symbol.Variable if you want to directly
feed input as states.
**kwargs :
more keyword arguments passed to func. For example
mean, std, dtype, etc.
Returns
-------
states : nested list of Symbol
Starting states for the first RNN step.
def begin_state(self, func=symbol.zeros, **kwargs):
"""Initial state for this cell.
Parameters
----------
func : callable, default symbol.zeros
Function for creating initial state. Can be symbol.zeros,
symbol.uniform, symbol.Variable etc.
Use symbol.Variable if you want to directly
feed input as states.
**kwargs :
more keyword arguments passed to func. For example
mean, std, dtype, etc.
Returns
-------
states : nested list of Symbol
Starting states for the first RNN step.
"""
assert not self._modified, \
"After applying modifier cells (e.g. DropoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
states = []
for info in self.state_info:
self._init_counter += 1
if info is None:
state = func(name='%sbegin_state_%d'%(self._prefix, self._init_counter),
**kwargs)
else:
kwargs.update(info)
state = func(name='%sbegin_state_%d'%(self._prefix, self._init_counter),
**kwargs)
states.append(state)
return states |
Unpack fused weight matrices into separate
weight matrices.
For example, say you use a module object `mod` to run a network that has an lstm cell.
In `mod.get_params()[0]`, the lstm parameters are all represented as a single big vector.
`cell.unpack_weights(mod.get_params()[0])` will unpack this vector into a dictionary of
more readable lstm parameters - c, f, i, o gates for i2h (input to hidden) and
h2h (hidden to hidden) weights.
Parameters
----------
args : dict of str -> NDArray
Dictionary containing packed weights.
usually from `Module.get_params()[0]`.
Returns
-------
args : dict of str -> NDArray
Dictionary with unpacked weights associated with
this cell.
See Also
--------
pack_weights: Performs the reverse operation of this function.
def unpack_weights(self, args):
"""Unpack fused weight matrices into separate
weight matrices.
For example, say you use a module object `mod` to run a network that has an lstm cell.
In `mod.get_params()[0]`, the lstm parameters are all represented as a single big vector.
`cell.unpack_weights(mod.get_params()[0])` will unpack this vector into a dictionary of
more readable lstm parameters - c, f, i, o gates for i2h (input to hidden) and
h2h (hidden to hidden) weights.
Parameters
----------
args : dict of str -> NDArray
Dictionary containing packed weights.
usually from `Module.get_params()[0]`.
Returns
-------
args : dict of str -> NDArray
Dictionary with unpacked weights associated with
this cell.
See Also
--------
pack_weights: Performs the reverse operation of this function.
"""
args = args.copy()
if not self._gate_names:
return args
h = self._num_hidden
for group_name in ['i2h', 'h2h']:
weight = args.pop('%s%s_weight'%(self._prefix, group_name))
bias = args.pop('%s%s_bias' % (self._prefix, group_name))
for j, gate in enumerate(self._gate_names):
wname = '%s%s%s_weight' % (self._prefix, group_name, gate)
args[wname] = weight[j*h:(j+1)*h].copy()
bname = '%s%s%s_bias' % (self._prefix, group_name, gate)
args[bname] = bias[j*h:(j+1)*h].copy()
return args |
Pack separate weight matrices into a single packed
weight.
Parameters
----------
args : dict of str -> NDArray
Dictionary containing unpacked weights.
Returns
-------
args : dict of str -> NDArray
Dictionary with packed weights associated with
this cell.
def pack_weights(self, args):
"""Pack separate weight matrices into a single packed
weight.
Parameters
----------
args : dict of str -> NDArray
Dictionary containing unpacked weights.
Returns
-------
args : dict of str -> NDArray
Dictionary with packed weights associated with
this cell.
"""
args = args.copy()
if not self._gate_names:
return args
for group_name in ['i2h', 'h2h']:
weight = []
bias = []
for gate in self._gate_names:
wname = '%s%s%s_weight'%(self._prefix, group_name, gate)
weight.append(args.pop(wname))
bname = '%s%s%s_bias'%(self._prefix, group_name, gate)
bias.append(args.pop(bname))
args['%s%s_weight'%(self._prefix, group_name)] = ndarray.concatenate(weight)
args['%s%s_bias'%(self._prefix, group_name)] = ndarray.concatenate(bias)
return args |
Unroll an RNN cell across time steps.
Parameters
----------
length : int
Number of steps to unroll.
inputs : Symbol, list of Symbol, or None
If `inputs` is a single Symbol (usually the output
of Embedding symbol), it should have shape
(batch_size, length, ...) if layout == 'NTC',
or (length, batch_size, ...) if layout == 'TNC'.
If `inputs` is a list of symbols (usually output of
previous unroll), they should all have shape
(batch_size, ...).
begin_state : nested list of Symbol, default None
Input states created by `begin_state()`
or output state of another cell.
Created from `begin_state()` if None.
layout : str, optional
`layout` of input symbol. Only used if inputs
is a single Symbol.
merge_outputs : bool, optional
If False, return outputs as a list of Symbols.
If True, concatenate output across time steps
and return a single symbol with shape
(batch_size, length, ...) if layout == 'NTC',
or (length, batch_size, ...) if layout == 'TNC'.
If None, output whatever is faster.
Returns
-------
outputs : list of Symbol or Symbol
Symbol (if `merge_outputs` is True) or list of Symbols
(if `merge_outputs` is False) corresponding to the output from
the RNN from this unrolling.
states : nested list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of begin_state().
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None):
"""Unroll an RNN cell across time steps.
Parameters
----------
length : int
Number of steps to unroll.
inputs : Symbol, list of Symbol, or None
If `inputs` is a single Symbol (usually the output
of Embedding symbol), it should have shape
(batch_size, length, ...) if layout == 'NTC',
or (length, batch_size, ...) if layout == 'TNC'.
If `inputs` is a list of symbols (usually output of
previous unroll), they should all have shape
(batch_size, ...).
begin_state : nested list of Symbol, default None
Input states created by `begin_state()`
or output state of another cell.
Created from `begin_state()` if None.
layout : str, optional
`layout` of input symbol. Only used if inputs
is a single Symbol.
merge_outputs : bool, optional
If False, return outputs as a list of Symbols.
If True, concatenate output across time steps
and return a single symbol with shape
(batch_size, length, ...) if layout == 'NTC',
or (length, batch_size, ...) if layout == 'TNC'.
If None, output whatever is faster.
Returns
-------
outputs : list of Symbol or Symbol
Symbol (if `merge_outputs` is True) or list of Symbols
(if `merge_outputs` is False) corresponding to the output from
the RNN from this unrolling.
states : nested list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of begin_state().
"""
self.reset()
inputs, _ = _normalize_sequence(length, inputs, layout, False)
if begin_state is None:
begin_state = self.begin_state()
states = begin_state
outputs = []
for i in range(length):
output, states = self(inputs[i], states)
outputs.append(output)
outputs, _ = _normalize_sequence(length, outputs, layout, merge_outputs)
return outputs, states |
Get activation function. Convert if is string
def _get_activation(self, inputs, activation, **kwargs):
"""Get activation function. Convert if is string"""
if isinstance(activation, string_types):
return symbol.Activation(inputs, act_type=activation, **kwargs)
else:
return activation(inputs, **kwargs) |
slice fused rnn weights
def _slice_weights(self, arr, li, lh):
"""slice fused rnn weights"""
args = {}
gate_names = self._gate_names
directions = self._directions
b = len(directions)
p = 0
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_weight'%(self._prefix, direction, layer, gate)
if layer > 0:
size = b*lh*lh
args[name] = arr[p:p+size].reshape((lh, b*lh))
else:
size = li*lh
args[name] = arr[p:p+size].reshape((lh, li))
p += size
for gate in gate_names:
name = '%s%s%d_h2h%s_weight'%(self._prefix, direction, layer, gate)
size = lh**2
args[name] = arr[p:p+size].reshape((lh, lh))
p += size
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
for gate in gate_names:
name = '%s%s%d_h2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
assert p == arr.size, "Invalid parameters size for FusedRNNCell"
return args |
Unfuse the fused RNN in to a stack of rnn cells.
Returns
-------
cell : mxnet.rnn.SequentialRNNCell
unfused cell that can be used for stepping, and can run on CPU.
def unfuse(self):
"""Unfuse the fused RNN in to a stack of rnn cells.
Returns
-------
cell : mxnet.rnn.SequentialRNNCell
unfused cell that can be used for stepping, and can run on CPU.
"""
stack = SequentialRNNCell()
get_cell = {'rnn_relu': lambda cell_prefix: RNNCell(self._num_hidden,
activation='relu',
prefix=cell_prefix),
'rnn_tanh': lambda cell_prefix: RNNCell(self._num_hidden,
activation='tanh',
prefix=cell_prefix),
'lstm': lambda cell_prefix: LSTMCell(self._num_hidden,
prefix=cell_prefix),
'gru': lambda cell_prefix: GRUCell(self._num_hidden,
prefix=cell_prefix)}[self._mode]
for i in range(self._num_layers):
if self._bidirectional:
stack.add(BidirectionalCell(
get_cell('%sl%d_'%(self._prefix, i)),
get_cell('%sr%d_'%(self._prefix, i)),
output_prefix='%sbi_l%d_'%(self._prefix, i)))
else:
stack.add(get_cell('%sl%d_'%(self._prefix, i)))
if self._dropout > 0 and i != self._num_layers - 1:
stack.add(DropoutCell(self._dropout, prefix='%s_dropout%d_'%(self._prefix, i)))
return stack |
Append a cell into the stack.
Parameters
----------
cell : BaseRNNCell
The cell to be appended. During unroll, previous cell's output (or raw inputs if
no previous cell) is used as the input to this cell.
def add(self, cell):
"""Append a cell into the stack.
Parameters
----------
cell : BaseRNNCell
The cell to be appended. During unroll, previous cell's output (or raw inputs if
no previous cell) is used as the input to this cell.
"""
self._cells.append(cell)
if self._override_cell_params:
assert cell._own_params, \
"Either specify params for SequentialRNNCell " \
"or child cells, not both."
cell.params._params.update(self.params._params)
self.params._params.update(cell.params._params) |
Reads an image from file path or URL, optionally resizing to given image dimensions and
subtracting mean.
:param img_path: path to file, or url to download
:param image_dims: image dimensions to resize to, or None
:param mean: mean file to subtract, or None
:return: loaded image, in RGB format
def read_image(img_path, image_dims=None, mean=None):
"""
Reads an image from file path or URL, optionally resizing to given image dimensions and
subtracting mean.
:param img_path: path to file, or url to download
:param image_dims: image dimensions to resize to, or None
:param mean: mean file to subtract, or None
:return: loaded image, in RGB format
"""
import urllib
filename = img_path.split("/")[-1]
if img_path.startswith('http'):
urllib.urlretrieve(img_path, filename)
img = cv2.imread(filename)
else:
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if image_dims is not None:
img = cv2.resize(img, image_dims) # resize to image_dims to fit model
img = np.rollaxis(img, 2) # change to (c, h, w) order
img = img[np.newaxis, :] # extend to (n, c, h, w)
if mean is not None:
mean = np.array(mean)
if mean.shape == (3,):
mean = mean[np.newaxis, :, np.newaxis, np.newaxis] # extend to (n, c, 1, 1)
img = img.astype(np.float32) - mean # subtract mean
return img |
Changes device of given mxnet arguments
:param arg_params: arguments
:param aux_params: auxiliary parameters
:param ctx: new device context
:return: arguments and auxiliary parameters on new device
def _ch_dev(arg_params, aux_params, ctx):
"""
Changes device of given mxnet arguments
:param arg_params: arguments
:param aux_params: auxiliary parameters
:param ctx: new device context
:return: arguments and auxiliary parameters on new device
"""
new_args = dict()
new_auxs = dict()
for k, v in arg_params.items():
new_args[k] = v.as_in_context(ctx)
for k, v in aux_params.items():
new_auxs[k] = v.as_in_context(ctx)
return new_args, new_auxs |
Run the layer comparison on a caffe model, given its prototxt, weights and mean.
The comparison is done by inferring on a given image using both caffe and mxnet model
:param image_url: image file or url to run inference on
:param gpu: gpu to use, -1 for cpu
:param caffe_prototxt_path: path to caffe prototxt
:param caffe_model_path: path to caffe weights
:param caffe_mean: path to caffe mean file
def convert_and_compare_caffe_to_mxnet(image_url, gpu, caffe_prototxt_path, caffe_model_path,
caffe_mean, mean_diff_allowed, max_diff_allowed):
"""
Run the layer comparison on a caffe model, given its prototxt, weights and mean.
The comparison is done by inferring on a given image using both caffe and mxnet model
:param image_url: image file or url to run inference on
:param gpu: gpu to use, -1 for cpu
:param caffe_prototxt_path: path to caffe prototxt
:param caffe_model_path: path to caffe weights
:param caffe_mean: path to caffe mean file
"""
import caffe
from caffe_proto_utils import read_network_dag, process_network_proto, read_caffe_mean
from convert_model import convert_model
if isinstance(caffe_mean, str):
caffe_mean = read_caffe_mean(caffe_mean)
elif caffe_mean is None:
pass
elif len(caffe_mean) == 3:
# swap channels from Caffe BGR to RGB
caffe_mean = caffe_mean[::-1]
# get caffe root location, this is needed to run the upgrade network utility, so we only need
# to support parsing of latest caffe
caffe_root = os.path.dirname(os.path.dirname(caffe.__path__[0]))
caffe_prototxt_path = process_network_proto(caffe_root, caffe_prototxt_path)
_, layer_name_to_record, top_to_layers = read_network_dag(caffe_prototxt_path)
caffe.set_mode_cpu()
caffe_net = caffe.Net(caffe_prototxt_path, caffe_model_path, caffe.TEST)
image_dims = tuple(caffe_net.blobs['data'].shape)[2:4]
logging.info('getting image %s', image_url)
img_rgb = read_image(image_url, image_dims, caffe_mean)
img_bgr = img_rgb[:, ::-1, :, :]
caffe_net.blobs['data'].reshape(*img_bgr.shape)
caffe_net.blobs['data'].data[...] = img_bgr
_ = caffe_net.forward()
# read sym and add all outputs
sym, arg_params, aux_params, _ = convert_model(caffe_prototxt_path, caffe_model_path)
sym = sym.get_internals()
# now mxnet
if gpu < 0:
ctx = mx.cpu(0)
else:
ctx = mx.gpu(gpu)
arg_params, aux_params = _ch_dev(arg_params, aux_params, ctx)
arg_params["data"] = mx.nd.array(img_rgb, ctx)
arg_params["prob_label"] = mx.nd.empty((1,), ctx)
exe = sym.bind(ctx, arg_params, args_grad=None, grad_req="null", aux_states=aux_params)
exe.forward(is_train=False)
compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed)
return |
Implementation of Breadth-first search (BFS) on caffe network DAG
:param root_node: root node of caffe network DAG
:param process_node: function to run on each node
def _bfs(root_node, process_node):
"""
Implementation of Breadth-first search (BFS) on caffe network DAG
:param root_node: root node of caffe network DAG
:param process_node: function to run on each node
"""
from collections import deque
seen_nodes = set()
next_nodes = deque()
seen_nodes.add(root_node)
next_nodes.append(root_node)
while next_nodes:
current_node = next_nodes.popleft()
# process current node
process_node(current_node)
for child_node in current_node.children:
if child_node not in seen_nodes:
seen_nodes.add(child_node)
next_nodes.append(child_node) |
Compare layer by layer of a caffe network with mxnet network
:param caffe_net: loaded caffe network
:param arg_params: arguments
:param aux_params: auxiliary parameters
:param exe: mxnet model
:param layer_name_to_record: map between caffe layer and information record
:param top_to_layers: map between caffe blob name to layers which outputs it (including inplace)
:param mean_diff_allowed: mean difference allowed between caffe blob and mxnet blob
:param max_diff_allowed: max difference allowed between caffe blob and mxnet blob
def compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed):
"""
Compare layer by layer of a caffe network with mxnet network
:param caffe_net: loaded caffe network
:param arg_params: arguments
:param aux_params: auxiliary parameters
:param exe: mxnet model
:param layer_name_to_record: map between caffe layer and information record
:param top_to_layers: map between caffe blob name to layers which outputs it (including inplace)
:param mean_diff_allowed: mean difference allowed between caffe blob and mxnet blob
:param max_diff_allowed: max difference allowed between caffe blob and mxnet blob
"""
import re
log_format = ' {0:<40} {1:<40} {2:<8} {3:>10} {4:>10} {5:<1}'
compare_layers_from_nets.is_first_convolution = True
def _compare_blob(caf_blob, mx_blob, caf_name, mx_name, blob_type, note):
diff = np.abs(mx_blob - caf_blob)
diff_mean = diff.mean()
diff_max = diff.max()
logging.info(log_format.format(caf_name, mx_name, blob_type, '%4.5f' % diff_mean,
'%4.5f' % diff_max, note))
assert diff_mean < mean_diff_allowed
assert diff_max < max_diff_allowed
def _process_layer_parameters(layer):
logging.debug('processing layer %s of type %s', layer.name, layer.type)
normalized_layer_name = re.sub('[-/]', '_', layer.name)
# handle weight and bias of convolution and fully-connected layers
if layer.name in caffe_net.params and layer.type in ['Convolution', 'InnerProduct',
'Deconvolution']:
has_bias = len(caffe_net.params[layer.name]) > 1
mx_name_weight = '{}_weight'.format(normalized_layer_name)
mx_beta = arg_params[mx_name_weight].asnumpy()
# first convolution should change from BGR to RGB
if layer.type == 'Convolution' and compare_layers_from_nets.is_first_convolution:
compare_layers_from_nets.is_first_convolution = False
# if RGB or RGBA
if mx_beta.shape[1] == 3 or mx_beta.shape[1] == 4:
# Swapping BGR of caffe into RGB in mxnet
mx_beta[:, [0, 2], :, :] = mx_beta[:, [2, 0], :, :]
caf_beta = caffe_net.params[layer.name][0].data
_compare_blob(caf_beta, mx_beta, layer.name, mx_name_weight, 'weight', '')
if has_bias:
mx_name_bias = '{}_bias'.format(normalized_layer_name)
mx_gamma = arg_params[mx_name_bias].asnumpy()
caf_gamma = caffe_net.params[layer.name][1].data
_compare_blob(caf_gamma, mx_gamma, layer.name, mx_name_bias, 'bias', '')
elif layer.name in caffe_net.params and layer.type == 'Scale':
if 'scale' in normalized_layer_name:
bn_name = normalized_layer_name.replace('scale', 'bn')
elif 'sc' in normalized_layer_name:
bn_name = normalized_layer_name.replace('sc', 'bn')
else:
assert False, 'Unknown name convention for bn/scale'
beta_name = '{}_beta'.format(bn_name)
gamma_name = '{}_gamma'.format(bn_name)
mx_beta = arg_params[beta_name].asnumpy()
caf_beta = caffe_net.params[layer.name][1].data
_compare_blob(caf_beta, mx_beta, layer.name, beta_name, 'mov_mean', '')
mx_gamma = arg_params[gamma_name].asnumpy()
caf_gamma = caffe_net.params[layer.name][0].data
_compare_blob(caf_gamma, mx_gamma, layer.name, gamma_name, 'mov_var', '')
elif layer.name in caffe_net.params and layer.type == 'BatchNorm':
mean_name = '{}_moving_mean'.format(normalized_layer_name)
var_name = '{}_moving_var'.format(normalized_layer_name)
caf_rescale_factor = caffe_net.params[layer.name][2].data
mx_mean = aux_params[mean_name].asnumpy()
caf_mean = caffe_net.params[layer.name][0].data / caf_rescale_factor
_compare_blob(caf_mean, mx_mean, layer.name, mean_name, 'mean', '')
mx_var = aux_params[var_name].asnumpy()
caf_var = caffe_net.params[layer.name][1].data / caf_rescale_factor
_compare_blob(caf_var, mx_var, layer.name, var_name, 'var',
'expect 1e-04 change due to cudnn eps')
elif layer.type in ['Input', 'Pooling', 'ReLU', 'Eltwise', 'Softmax', 'LRN', 'Concat',
'Dropout', 'Crop']:
# no parameters to check for these layers
pass
else:
warnings.warn('No handling for layer %s of type %s, should we ignore it?', layer.name,
layer.type)
return
def _process_layer_output(caffe_blob_name):
logging.debug('processing blob %s', caffe_blob_name)
# skip blobs not originating from actual layers, e.g. artificial split layers added by caffe
if caffe_blob_name not in top_to_layers:
return
caf_blob = caffe_net.blobs[caffe_blob_name].data
# data should change from BGR to RGB
if caffe_blob_name == 'data':
# if RGB or RGBA
if caf_blob.shape[1] == 3 or caf_blob.shape[1] == 4:
# Swapping BGR of caffe into RGB in mxnet
caf_blob[:, [0, 2], :, :] = caf_blob[:, [2, 0], :, :]
mx_name = 'data'
else:
# get last layer name which outputs this blob name
last_layer_name = top_to_layers[caffe_blob_name][-1]
normalized_last_layer_name = re.sub('[-/]', '_', last_layer_name)
mx_name = '{}_output'.format(normalized_last_layer_name)
if 'scale' in mx_name:
mx_name = mx_name.replace('scale', 'bn')
elif 'sc' in mx_name:
mx_name = mx_name.replace('sc', 'bn')
if mx_name not in exe.output_dict:
logging.error('mxnet blob %s is missing, time to extend the compare tool..', mx_name)
return
mx_blob = exe.output_dict[mx_name].asnumpy()
_compare_blob(caf_blob, mx_blob, caffe_blob_name, mx_name, 'output', '')
return
# check layer parameters
logging.info('\n***** Network Parameters '.ljust(140, '*'))
logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note'))
first_layer_name = layer_name_to_record.keys()[0]
_bfs(layer_name_to_record[first_layer_name], _process_layer_parameters)
# check layer output
logging.info('\n***** Network Outputs '.ljust(140, '*'))
logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note'))
for caffe_blob_name in caffe_net.blobs.keys():
_process_layer_output(caffe_blob_name)
return |
Entrypoint for compare_layers
def main():
"""Entrypoint for compare_layers"""
parser = argparse.ArgumentParser(
description='Tool for testing caffe to mxnet conversion layer by layer')
parser.add_argument('--image_url', type=str,
default='https://github.com/dmlc/web-data/raw/master/mxnet/doc/'\
'tutorials/python/predict_image/cat.jpg',
help='input image to test inference, can be either file path or url')
parser.add_argument('--caffe_prototxt_path', type=str,
default='./model.prototxt',
help='path to caffe prototxt')
parser.add_argument('--caffe_model_path', type=str,
default='./model.caffemodel',
help='path to caffe weights')
parser.add_argument('--caffe_mean', type=str,
default='./model_mean.binaryproto',
help='path to caffe mean file')
parser.add_argument('--mean_diff_allowed', type=int, default=1e-03,
help='mean difference allowed between caffe blob and mxnet blob')
parser.add_argument('--max_diff_allowed', type=int, default=1e-01,
help='max difference allowed between caffe blob and mxnet blob')
parser.add_argument('--gpu', type=int, default=-1, help='the gpu id used for predict')
args = parser.parse_args()
convert_and_compare_caffe_to_mxnet(args.image_url, args.gpu, args.caffe_prototxt_path,
args.caffe_model_path, args.caffe_mean,
args.mean_diff_allowed, args.max_diff_allowed) |
Get executor to Stochastic Gradient Langevin Dynamics and/or Bayesian Dark Knowledge
def get_executor(sym, ctx, data_inputs, initializer=None):
"""Get executor to Stochastic Gradient Langevin Dynamics and/or Bayesian Dark Knowledge"""
data_shapes = {k: v.shape for k, v in data_inputs.items()}
arg_names = sym.list_arguments()
aux_names = sym.list_auxiliary_states()
param_names = list(set(arg_names) - set(data_inputs.keys()))
arg_shapes, output_shapes, aux_shapes = sym.infer_shape(**data_shapes)
arg_name_shape = {k: s for k, s in zip(arg_names, arg_shapes)}
params = {n: nd.empty(arg_name_shape[n], ctx=ctx) for n in param_names}
params_grad = {n: nd.empty(arg_name_shape[n], ctx=ctx) for n in param_names}
aux_states = {k: nd.empty(s, ctx=ctx) for k, s in zip(aux_names, aux_shapes)}
exe = sym.bind(ctx=ctx, args=dict(params, **data_inputs),
args_grad=params_grad,
aux_states=aux_states)
if initializer is not None:
for k, v in params.items():
initializer(k, v)
return exe, params, params_grad, aux_states |
Create copy of parameters
def copy_param(exe, new_param=None):
"""Create copy of parameters"""
if new_param is None:
new_param = {k: nd.empty(v.shape, ctx=mx.cpu()) for k, v in exe.arg_dict.items()}
for k, v in new_param.items():
exe.arg_dict[k].copyto(v)
return new_param |
Parse command line arguments
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("font_path", help="Path to ttf font file or directory containing ttf files")
parser.add_argument("--loss", help="'ctc' or 'warpctc' loss [Default 'ctc']", default='ctc')
parser.add_argument("--cpu",
help="Number of CPUs for training [Default 8]. Ignored if --gpu is specified.",
type=int, default=8)
parser.add_argument("--gpu", help="Number of GPUs for training [Default 0]", type=int)
parser.add_argument("--num_proc", help="Number CAPTCHA generating processes [Default 4]", type=int, default=4)
parser.add_argument("--prefix", help="Checkpoint prefix [Default 'ocr']", default='ocr')
return parser.parse_args() |
Program entry point
def main():
"""Program entry point"""
args = parse_args()
if not any(args.loss == s for s in ['ctc', 'warpctc']):
raise ValueError("Invalid loss '{}' (must be 'ctc' or 'warpctc')".format(args.loss))
hp = Hyperparams()
# Start a multiprocessor captcha image generator
mp_captcha = MPDigitCaptcha(
font_paths=get_fonts(args.font_path), h=hp.seq_length, w=30,
num_digit_min=3, num_digit_max=4, num_processes=args.num_proc, max_queue_size=hp.batch_size * 2)
try:
# Must call start() before any call to mxnet module (https://github.com/apache/incubator-mxnet/issues/9213)
mp_captcha.start()
if args.gpu:
contexts = [mx.context.gpu(i) for i in range(args.gpu)]
else:
contexts = [mx.context.cpu(i) for i in range(args.cpu)]
init_states = lstm.init_states(hp.batch_size, hp.num_lstm_layer, hp.num_hidden)
data_train = OCRIter(
hp.train_epoch_size // hp.batch_size, hp.batch_size, init_states, captcha=mp_captcha, name='train')
data_val = OCRIter(
hp.eval_epoch_size // hp.batch_size, hp.batch_size, init_states, captcha=mp_captcha, name='val')
symbol = lstm.lstm_unroll(
num_lstm_layer=hp.num_lstm_layer,
seq_len=hp.seq_length,
num_hidden=hp.num_hidden,
num_label=hp.num_label,
loss_type=args.loss)
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
module = mx.mod.Module(
symbol,
data_names=['data', 'l0_init_c', 'l0_init_h', 'l1_init_c', 'l1_init_h'],
label_names=['label'],
context=contexts)
metrics = CtcMetrics(hp.seq_length)
module.fit(train_data=data_train,
eval_data=data_val,
# use metrics.accuracy or metrics.accuracy_lcs
eval_metric=mx.metric.np(metrics.accuracy, allow_extra_outputs=True),
optimizer='sgd',
optimizer_params={'learning_rate': hp.learning_rate,
'momentum': hp.momentum,
'wd': 0.00001,
},
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34),
num_epoch=hp.num_epoch,
batch_end_callback=mx.callback.Speedometer(hp.batch_size, 50),
epoch_end_callback=mx.callback.do_checkpoint(args.prefix),
)
except KeyboardInterrupt:
print("W: interrupt received, stopping...")
finally:
# Reset multiprocessing captcha generator to stop processes
mp_captcha.reset() |
Gatys et al. CVPR 2017
ref: Image Style Transfer Using Convolutional Neural Networks
def optimize(args):
""" Gatys et al. CVPR 2017
ref: Image Style Transfer Using Convolutional Neural Networks
"""
if args.cuda:
ctx = mx.gpu(0)
else:
ctx = mx.cpu(0)
# load the content and style target
content_image = utils.tensor_load_rgbimage(args.content_image,ctx, size=args.content_size, keep_asp=True)
content_image = utils.subtract_imagenet_mean_preprocess_batch(content_image)
style_image = utils.tensor_load_rgbimage(args.style_image, ctx, size=args.style_size)
style_image = utils.subtract_imagenet_mean_preprocess_batch(style_image)
# load the pre-trained vgg-16 and extract features
vgg = net.Vgg16()
utils.init_vgg_params(vgg, 'models', ctx=ctx)
# content feature
f_xc_c = vgg(content_image)[1]
# style feature
features_style = vgg(style_image)
gram_style = [net.gram_matrix(y) for y in features_style]
# output
output = Parameter('output', shape=content_image.shape)
output.initialize(ctx=ctx)
output.set_data(content_image)
# optimizer
trainer = gluon.Trainer([output], 'adam',
{'learning_rate': args.lr})
mse_loss = gluon.loss.L2Loss()
# optimizing the images
for e in range(args.iters):
utils.imagenet_clamp_batch(output.data(), 0, 255)
# fix BN for pre-trained vgg
with autograd.record():
features_y = vgg(output.data())
content_loss = 2 * args.content_weight * mse_loss(features_y[1], f_xc_c)
style_loss = 0.
for m in range(len(features_y)):
gram_y = net.gram_matrix(features_y[m])
gram_s = gram_style[m]
style_loss = style_loss + 2 * args.style_weight * mse_loss(gram_y, gram_s)
total_loss = content_loss + style_loss
total_loss.backward()
trainer.step(1)
if (e + 1) % args.log_interval == 0:
print('loss:{:.2f}'.format(total_loss.asnumpy()[0]))
# save the image
output = utils.add_imagenet_mean_batch(output.data())
utils.tensor_save_bgrimage(output[0], args.output_image, args.cuda) |
Get symbol of mnist
def get_mnist_sym(output_op=None, num_hidden=400):
"""Get symbol of mnist"""
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='mnist_fc1', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc2', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu2', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc3', num_hidden=10)
if output_op is None:
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
else:
net = output_op(data=net, name='softmax')
return net |
Get synthetic gradient value
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
"""Get synthetic gradient value"""
if grad is None:
grad = nd.empty(theta.shape, theta.context)
theta1 = theta.asnumpy()[0]
theta2 = theta.asnumpy()[1]
v1 = sigma1 ** 2
v2 = sigma2 ** 2
vx = sigmax ** 2
denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
-(X - theta1 - theta2) ** 2 / (2 * vx))
grad_npy = numpy.zeros(theta.shape)
grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
+ numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) *
(X - theta1 - theta2) / vx) / denominator).sum() + theta1 / v1
grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) *
(X - theta1 - theta2) / vx) / denominator).sum() + theta2 / v2
grad[:] = grad_npy
return grad |
Get toy symbol
def get_toy_sym(teacher=True, teacher_noise_precision=None):
"""Get toy symbol"""
if teacher:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='teacher_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='teacher_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='teacher_fc2', num_hidden=1)
net = mx.symbol.LinearRegressionOutput(data=net, name='teacher_output',
grad_scale=teacher_noise_precision)
else:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='student_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='student_relu1', act_type="relu")
student_mean = mx.symbol.FullyConnected(data=net, name='student_mean', num_hidden=1)
student_var = mx.symbol.FullyConnected(data=net, name='student_var', num_hidden=1)
net = mx.symbol.Group([student_mean, student_var])
return net |
Run DistilledSGLD on mnist dataset
def run_mnist_DistilledSGLD(num_training=50000, gpu_id=None):
"""Run DistilledSGLD on mnist dataset"""
X, Y, X_test, Y_test = load_mnist(num_training)
minibatch_size = 100
if num_training >= 10000:
num_hidden = 800
total_iter_num = 1000000
teacher_learning_rate = 1E-6
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.1
else:
num_hidden = 400
total_iter_num = 20000
teacher_learning_rate = 4E-5
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.001
teacher_net = get_mnist_sym(num_hidden=num_hidden)
logsoftmax = LogSoftmax()
student_net = get_mnist_sym(output_op=logsoftmax, num_hidden=num_hidden)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev(gpu_id))}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev(gpu_id))}
teacher_initializer = BiasXavier(factor_type="in", magnitude=1)
student_initializer = BiasXavier(factor_type="in", magnitude=1)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=total_iter_num,
student_initializer=student_initializer,
teacher_initializer=teacher_initializer,
student_optimizing_algorithm="adam",
teacher_learning_rate=teacher_learning_rate,
student_learning_rate=student_learning_rate,
teacher_prior_precision=teacher_prior, student_prior_precision=student_prior,
perturb_deviation=perturb_deviation, minibatch_size=100, dev=dev(gpu_id)) |
Run SGLD on toy dataset
def run_toy_SGLD(gpu_id=None):
"""Run SGLD on toy dataset"""
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0 / 9.0
net = get_toy_sym(True, teacher_noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))}
initializer = mx.init.Uniform(0.07)
exe, params, _ = SGLD(sym=net,
data_inputs=data_inputs,
X=X,
Y=Y,
X_test=X_test,
Y_test=Y_test,
total_iter_num=50000,
initializer=initializer,
learning_rate=1E-4,
# lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
prior_precision=0.1,
burn_in_iter_num=1000,
thin_interval=10,
task='regression',
minibatch_size=minibatch_size,
dev=dev(gpu_id)) |
Run DistilledSGLD on toy dataset
def run_toy_DistilledSGLD(gpu_id):
"""Run DistilledSGLD on toy dataset"""
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0
teacher_net = get_toy_sym(True, teacher_noise_precision)
student_net = get_toy_sym(False)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id))}
teacher_initializer = mx.init.Uniform(0.07)
student_initializer = mx.init.Uniform(0.07)
student_grad_f = lambda student_outputs, teacher_pred: \
regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=80000,
teacher_initializer=teacher_initializer,
student_initializer=student_initializer,
teacher_learning_rate=1E-4, student_learning_rate=0.01,
# teacher_lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
student_lr_scheduler=mx.lr_scheduler.FactorScheduler(8000, 0.8),
student_grad_f=student_grad_f,
teacher_prior_precision=0.1, student_prior_precision=0.001,
perturb_deviation=0.1, minibatch_size=minibatch_size, task='regression',
dev=dev(gpu_id)) |
Run HMC on toy dataset
def run_toy_HMC(gpu_id=None):
"""Run HMC on toy dataset"""
X, Y, X_test, Y_test = load_toy()
minibatch_size = Y.shape[0]
noise_precision = 1 / 9.0
net = get_toy_sym(True, noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))}
initializer = mx.init.Uniform(0.07)
sample_pool = HMC(net, data_inputs=data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test,
sample_num=300000, initializer=initializer, prior_precision=1.0,
learning_rate=1E-3, L=10, dev=dev(gpu_id)) |
Run synthetic SGLD
def run_synthetic_SGLD():
"""Run synthetic SGLD"""
theta1 = 0
theta2 = 1
sigma1 = numpy.sqrt(10)
sigma2 = 1
sigmax = numpy.sqrt(2)
X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100)
minibatch_size = 1
total_iter_num = 1000000
lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num,
factor=0.55)
optimizer = mx.optimizer.create('sgld',
learning_rate=None,
rescale_grad=1.0,
lr_scheduler=lr_scheduler,
wd=0)
updater = mx.optimizer.get_updater(optimizer)
theta = mx.random.normal(0, 1, (2,), mx.cpu())
grad = nd.empty((2,), mx.cpu())
samples = numpy.zeros((2, total_iter_num))
start = time.time()
for i in range(total_iter_num):
if (i + 1) % 100000 == 0:
end = time.time()
print("Iter:%d, Time spent: %f" % (i + 1, end - start))
start = time.time()
ind = numpy.random.randint(0, X.shape[0])
synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax,
rescale_grad=X.shape[0] / float(minibatch_size), grad=grad)
updater('theta', grad, theta)
samples[:, i] = theta.asnumpy()
plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet)
plt.colorbar()
plt.show() |
wrapper function for loading pascal voc dataset
Parameters:
----------
image_set : str
train, trainval...
year : str
2007, 2012 or combinations splitted by comma
devkit_path : str
root directory of dataset
shuffle : bool
whether to shuffle initial list
Returns:
----------
Imdb
def load_pascal(image_set, year, devkit_path, shuffle=False):
"""
wrapper function for loading pascal voc dataset
Parameters:
----------
image_set : str
train, trainval...
year : str
2007, 2012 or combinations splitted by comma
devkit_path : str
root directory of dataset
shuffle : bool
whether to shuffle initial list
Returns:
----------
Imdb
"""
image_set = [y.strip() for y in image_set.split(',')]
assert image_set, "No image_set specified"
year = [y.strip() for y in year.split(',')]
assert year, "No year specified"
# make sure (# sets == # years)
if len(image_set) > 1 and len(year) == 1:
year = year * len(image_set)
if len(image_set) == 1 and len(year) > 1:
image_set = image_set * len(year)
assert len(image_set) == len(year), "Number of sets and year mismatch"
imdbs = []
for s, y in zip(image_set, year):
imdbs.append(PascalVoc(s, y, devkit_path, shuffle, is_train=True))
if len(imdbs) > 1:
return ConcatDB(imdbs, shuffle)
else:
return imdbs[0] |
wrapper function for loading ms coco dataset
Parameters:
----------
image_set : str
train2014, val2014, valminusminival2014, minival2014
dirname: str
root dir for coco
shuffle: boolean
initial shuffle
def load_coco(image_set, dirname, shuffle=False):
"""
wrapper function for loading ms coco dataset
Parameters:
----------
image_set : str
train2014, val2014, valminusminival2014, minival2014
dirname: str
root dir for coco
shuffle: boolean
initial shuffle
"""
anno_files = ['instances_' + y.strip() + '.json' for y in image_set.split(',')]
assert anno_files, "No image set specified"
imdbs = []
for af in anno_files:
af_path = os.path.join(dirname, 'annotations', af)
imdbs.append(Coco(af_path, dirname, shuffle=shuffle))
if len(imdbs) > 1:
return ConcatDB(imdbs, shuffle)
else:
return imdbs[0] |
Resets the iterator to the beginning of the data.
def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
#shuffle data in each bucket
random.shuffle(self.idx)
for i, buck in enumerate(self.sentences):
self.indices[i], self.sentences[i], self.characters[i], self.label[i] = shuffle(self.indices[i],
self.sentences[i],
self.characters[i],
self.label[i])
self.ndindex = []
self.ndsent = []
self.ndchar = []
self.ndlabel = []
#for each bucket of data
for i, buck in enumerate(self.sentences):
#append the lists with an array
self.ndindex.append(ndarray.array(self.indices[i], dtype=self.dtype))
self.ndsent.append(ndarray.array(self.sentences[i], dtype=self.dtype))
self.ndchar.append(ndarray.array(self.characters[i], dtype=self.dtype))
self.ndlabel.append(ndarray.array(self.label[i], dtype=self.dtype)) |
Returns the next batch of data.
def next(self):
"""Returns the next batch of data."""
if self.curr_idx == len(self.idx):
raise StopIteration
#i = batches index, j = starting record
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
indices = self.ndindex[i][j:j + self.batch_size]
sentences = self.ndsent[i][j:j + self.batch_size]
characters = self.ndchar[i][j:j + self.batch_size]
label = self.ndlabel[i][j:j + self.batch_size]
return DataBatch([sentences, characters], [label], pad=0, index = indices, bucket_key=self.buckets[i],
provide_data=[DataDesc(name=self.data_names[0], shape=sentences.shape, layout=self.layout),
DataDesc(name=self.data_names[1], shape=characters.shape, layout=self.layout)],
provide_label=[DataDesc(name=self.label_name, shape=label.shape, layout=self.layout)]) |
Converts a reshape layer from mxnet to coreml.
This doesn't currently handle the deprecated parameters for the reshape layer.
Parameters
----------
network: net
An mxnet network object.
layer: node
Node to convert.
module: module
A module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
def convert_reshape(net, node, module, builder):
"""Converts a reshape layer from mxnet to coreml.
This doesn't currently handle the deprecated parameters for the reshape layer.
Parameters
----------
network: net
An mxnet network object.
layer: node
Node to convert.
module: module
A module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
target_shape = node['shape']
if any(item <= 0 for item in target_shape):
raise NotImplementedError('Special dimensional values less than or equal to 0 are not supported yet.'
'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.')
if 'reverse' in node and node['reverse'] == 'True':
raise NotImplementedError('"reverse" parameter is not supported by yet.'
'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.')
mode = 0 # CHANNEL_FIRST
builder.add_reshape(name, input_name, output_name, target_shape, mode) |
Convert a transpose layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
def convert_transpose(net, node, module, builder):
"""Convert a transpose layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
param = _get_attrs(node)
axes = literal_eval(param['axes'])
builder.add_permute(name, axes, input_name, output_name) |
Convert a flatten layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
def convert_flatten(net, node, module, builder):
"""Convert a flatten layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
mode = 0 # CHANNEL_FIRST
builder.add_flatten(name, mode, input_name, output_name) |
Convert a softmax layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
def convert_softmax(net, node, module, builder):
"""Convert a softmax layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
builder.add_softmax(name=name,
input_name=input_name,
output_name=output_name) |
Convert an activation layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
def convert_activation(net, node, module, builder):
"""Convert an activation layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
mx_non_linearity = _get_attrs(node)['act_type']
#TODO add SCALED_TANH, SOFTPLUS, SOFTSIGN, SIGMOID_HARD, LEAKYRELU, PRELU, ELU, PARAMETRICSOFTPLUS, THRESHOLDEDRELU, LINEAR
if mx_non_linearity == 'relu':
non_linearity = 'RELU'
elif mx_non_linearity == 'tanh':
non_linearity = 'TANH'
elif mx_non_linearity == 'sigmoid':
non_linearity = 'SIGMOID'
else:
raise TypeError('Unknown activation type %s' % mx_non_linearity)
builder.add_activation(name = name,
non_linearity = non_linearity,
input_name = input_name,
output_name = output_name) |
Convert a leakyrelu layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
def convert_leakyrelu(net, node, module, builder):
"""Convert a leakyrelu layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
inputs = node['inputs']
args, _ = module.get_params()
mx_non_linearity = _get_attrs(node)['act_type']
if mx_non_linearity == 'elu':
non_linearity = 'ELU'
slope = _get_attrs(node)['slope'] if 'slope' in _get_attrs(node) else 0.25
params = slope
elif mx_non_linearity == 'leaky':
non_linearity = 'LEAKYRELU'
slope = _get_attrs(node)['slope'] if 'slope' in _get_attrs(node) else 0.25
params = [slope]
elif mx_non_linearity == 'prelu':
non_linearity = 'PRELU'
params = args[_get_node_name(net, inputs[1][0])].asnumpy()
else:
raise TypeError('Unknown activation type %s' % mx_non_linearity)
builder.add_activation(name = name,
non_linearity = non_linearity,
input_name = input_name,
output_name = output_name,
params = params) |
Convert an elementwise add layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
def convert_elementwise_add(net, node, module, builder):
"""Convert an elementwise add layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_names, output_name = _get_input_output_name(net, node, [0, 1])
name = node['name']
builder.add_elementwise(name, input_names, output_name, 'ADD') |
Convert a convolution layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
def convert_convolution(net, node, module, builder):
"""Convert a convolution layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
param = _get_attrs(node)
inputs = node['inputs']
args, _ = module.get_params()
if 'no_bias' in param.keys():
has_bias = not literal_eval(param['no_bias'])
else:
has_bias = True
if 'pad' in param.keys() and literal_eval(param['pad']) != (0, 0):
pad = literal_eval(param['pad'])
builder.add_padding(
name=name+"_pad",
left=pad[1],
right=pad[1],
top=pad[0],
bottom=pad[0],
value=0,
input_name=input_name,
output_name=name+"_pad_output")
input_name = name+"_pad_output"
border_mode = "valid"
n_filters = int(param['num_filter'])
n_groups = int(param['num_group']) if 'num_group' in param else 1
W = args[_get_node_name(net, inputs[1][0])].asnumpy()
if has_bias:
Wb = args[_get_node_name(net, inputs[2][0])].asnumpy()
else:
Wb = None
channels = W.shape[1]
stride_height = 1
stride_width = 1
if 'stride' in param.keys():
stride_height, stride_width = literal_eval(param['stride'])
kernel_height, kernel_width = literal_eval(param['kernel'])
W = W.transpose((2, 3, 1, 0))
builder.add_convolution(
name=name,
kernel_channels=channels,
output_channels=n_filters,
height=kernel_height,
width=kernel_width,
stride_height=stride_height,
stride_width=stride_width,
border_mode=border_mode,
groups=n_groups,
W=W,
b=Wb,
has_bias=has_bias,
is_deconv=False,
output_shape=None,
input_name=input_name,
output_name=output_name) |
Convert a pooling layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
def convert_pooling(net, node, module, builder):
"""Convert a pooling layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
param = _get_attrs(node)
layer_type_mx = param['pool_type']
if layer_type_mx == 'max':
layer_type = 'MAX'
elif layer_type_mx == 'avg':
layer_type = 'AVERAGE'
else:
raise TypeError("Pooling type %s not supported" % layer_type_mx)
# Add padding if there is any
if 'pad' in param.keys() and literal_eval(param['pad']) != (0, 0):
pad = literal_eval(param['pad'])
builder.add_padding(
name=name+"_pad",
left=pad[1],
right=pad[1],
top=pad[0],
bottom=pad[0],
value=0,
input_name=input_name,
output_name=name+"_pad_output")
input_name = name+"_pad_output"
stride_height = 1
stride_width = 1
if 'stride' in param.keys():
stride_height, stride_width = literal_eval(param['stride'])
kernel_width, kernel_height = literal_eval(param['kernel'])
type_map = {'valid': 'VALID', 'full': 'INCLUDE_LAST_PIXEL'}
padding_type = param['pooling_convention'] if 'pooling_convention' in param else 'valid'
if padding_type not in type_map:
raise KeyError("%s type is not supported in this converter. It is a Github issue.")
padding_type = type_map[padding_type]
if 'global_pool' in param.keys():
is_global = literal_eval(param['global_pool'])
else:
is_global = False
# For reasons why we are not using the standard builder but having our own implementation,
# see the function documentation.
_add_pooling.add_pooling_with_padding_types(
builder=builder,
name=name,
height=kernel_height,
width=kernel_width,
stride_height=stride_height,
stride_width=stride_width,
layer_type=layer_type,
padding_type=padding_type,
exclude_pad_area=False,
is_global=is_global,
input_name=input_name,
output_name=output_name
) |
Convert a batchnorm layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
def convert_batchnorm(net, node, module, builder):
"""Convert a batchnorm layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
inputs = node['inputs']
eps = 1e-3 # Default value of eps for MXNet.
use_global_stats = False # Default value of use_global_stats for MXNet.
fix_gamma = True # Default value of fix_gamma for MXNet.
attrs = _get_attrs(node)
if 'eps' in attrs:
eps = literal_eval(attrs['eps'])
if 'fix_gamma' in attrs:
fix_gamma = literal_eval(attrs['fix_gamma'])
args, aux = module.get_params()
gamma = args[_get_node_name(net, inputs[1][0])].asnumpy()
beta = args[_get_node_name(net, inputs[2][0])].asnumpy()
mean = aux[_get_node_name(net, inputs[3][0])].asnumpy()
variance = aux[_get_node_name(net, inputs[4][0])].asnumpy()
nb_channels = gamma.shape[0]
if fix_gamma:
gamma.fill(1.)
builder.add_batchnorm(
name=name,
channels=nb_channels,
gamma=gamma,
beta=beta,
mean=mean,
variance=variance,
input_name=input_name,
output_name=output_name,
epsilon=eps) |
Convert concat layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
def convert_concat(net, node, module, builder):
"""Convert concat layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
input_names, output_name = _get_input_output_name(net, node, 'all')
name = node['name']
mode = 'CONCAT'
builder.add_elementwise(name = name, input_names = input_names,
output_name = output_name, mode = mode) |
convert from mxnet's opts to dmlc's opts
def dmlc_opts(opts):
"""convert from mxnet's opts to dmlc's opts
"""
args = ['--num-workers', str(opts.num_workers),
'--num-servers', str(opts.num_servers),
'--cluster', opts.launcher,
'--host-file', opts.hostfile,
'--sync-dst-dir', opts.sync_dst_dir]
# convert to dictionary
dopts = vars(opts)
for key in ['env_server', 'env_worker', 'env']:
for v in dopts[key]:
args.append('--' + key.replace("_","-"))
args.append(v)
args += opts.command
try:
from dmlc_tracker import opts
except ImportError:
print("Can't load dmlc_tracker package. Perhaps you need to run")
print(" git submodule update --init --recursive")
raise
dmlc_opts = opts.get_opts(args)
return dmlc_opts |
Unfuses the fused RNN in to a stack of rnn cells.
def _unfuse(self):
"""Unfuses the fused RNN in to a stack of rnn cells."""
assert not self._projection_size, "_unfuse does not support projection layer yet!"
assert not self._lstm_state_clip_min and not self._lstm_state_clip_max, \
"_unfuse does not support state clipping yet!"
get_cell = {'rnn_relu': lambda **kwargs: rnn_cell.RNNCell(self._hidden_size,
activation='relu',
**kwargs),
'rnn_tanh': lambda **kwargs: rnn_cell.RNNCell(self._hidden_size,
activation='tanh',
**kwargs),
'lstm': lambda **kwargs: rnn_cell.LSTMCell(self._hidden_size,
**kwargs),
'gru': lambda **kwargs: rnn_cell.GRUCell(self._hidden_size,
**kwargs)}[self._mode]
stack = rnn_cell.HybridSequentialRNNCell(prefix=self.prefix, params=self.params)
with stack.name_scope():
ni = self._input_size
for i in range(self._num_layers):
kwargs = {'input_size': ni,
'i2h_weight_initializer': self._i2h_weight_initializer,
'h2h_weight_initializer': self._h2h_weight_initializer,
'i2h_bias_initializer': self._i2h_bias_initializer,
'h2h_bias_initializer': self._h2h_bias_initializer}
if self._dir == 2:
stack.add(rnn_cell.BidirectionalCell(
get_cell(prefix='l%d_'%i, **kwargs),
get_cell(prefix='r%d_'%i, **kwargs)))
else:
stack.add(get_cell(prefix='l%d_'%i, **kwargs))
if self._dropout > 0 and i != self._num_layers - 1:
stack.add(rnn_cell.DropoutCell(self._dropout))
ni = self._hidden_size * self._dir
return stack |
Initial state for this cell.
Parameters
----------
batch_size: int
Only required for `NDArray` API. Size of the batch ('N' in layout).
Dimension of the input.
func : callable, default `ndarray.zeros`
Function for creating initial state.
For Symbol API, func can be `symbol.zeros`, `symbol.uniform`,
`symbol.var` etc. Use `symbol.var` if you want to directly
feed input as states.
For NDArray API, func can be `ndarray.zeros`, `ndarray.ones`, etc.
**kwargs :
Additional keyword arguments passed to func. For example
`mean`, `std`, `dtype`, etc.
Returns
-------
states : nested list of Symbol
Starting states for the first RNN step.
def begin_state(self, batch_size=0, func=ndarray.zeros, **kwargs):
"""Initial state for this cell.
Parameters
----------
batch_size: int
Only required for `NDArray` API. Size of the batch ('N' in layout).
Dimension of the input.
func : callable, default `ndarray.zeros`
Function for creating initial state.
For Symbol API, func can be `symbol.zeros`, `symbol.uniform`,
`symbol.var` etc. Use `symbol.var` if you want to directly
feed input as states.
For NDArray API, func can be `ndarray.zeros`, `ndarray.ones`, etc.
**kwargs :
Additional keyword arguments passed to func. For example
`mean`, `std`, `dtype`, etc.
Returns
-------
states : nested list of Symbol
Starting states for the first RNN step.
"""
states = []
for i, info in enumerate(self.state_info(batch_size)):
if info is not None:
info.update(kwargs)
else:
info = kwargs
states.append(func(name='%sh0_%d'%(self.prefix, i), **info))
return states |
forward using CUDNN or CPU kenrel
def _forward_kernel(self, F, inputs, states, **kwargs):
""" forward using CUDNN or CPU kenrel"""
if self._layout == 'NTC':
inputs = F.swapaxes(inputs, dim1=0, dim2=1)
if self._projection_size is None:
params = (kwargs['{}{}_{}_{}'.format(d, l, g, t)].reshape(-1)
for t in ['weight', 'bias']
for l in range(self._num_layers)
for d in ['l', 'r'][:self._dir]
for g in ['i2h', 'h2h'])
else:
params = (kwargs['{}{}_{}_{}'.format(d, l, g, t)].reshape(-1)
for t in ['weight', 'bias']
for l in range(self._num_layers)
for d in ['l', 'r'][:self._dir]
for g in ['i2h', 'h2h', 'h2r']
if g != 'h2r' or t != 'bias')
params = F._internal._rnn_param_concat(*params, dim=0)
rnn = F.RNN(inputs, params, *states, state_size=self._hidden_size,
projection_size=self._projection_size,
num_layers=self._num_layers, bidirectional=self._dir == 2,
p=self._dropout, state_outputs=True, mode=self._mode,
lstm_state_clip_min=self._lstm_state_clip_min,
lstm_state_clip_max=self._lstm_state_clip_max,
lstm_state_clip_nan=self._lstm_state_clip_nan)
if self._mode == 'lstm':
outputs, states = rnn[0], [rnn[1], rnn[2]]
else:
outputs, states = rnn[0], [rnn[1]]
if self._layout == 'NTC':
outputs = F.swapaxes(outputs, dim1=0, dim2=1)
return outputs, states |
Wait for network service to appear
@param server: host to connect to (str)
@param port: port (int)
@param timeout: in seconds, if None or 0 wait forever
@return: True of False, if timeout is None may return only True or
throw unhandled network exception
def wait_ssh_open(server, port, keep_waiting=None, timeout=None):
""" Wait for network service to appear
@param server: host to connect to (str)
@param port: port (int)
@param timeout: in seconds, if None or 0 wait forever
@return: True of False, if timeout is None may return only True or
throw unhandled network exception
"""
import socket
import errno
import time
log = logging.getLogger('wait_ssh_open')
sleep_s = 1
if timeout:
from time import time as now
# time module is needed to calc timeout shared between two exceptions
end = now() + timeout
while True:
log.debug("Sleeping for %s second(s)", sleep_s)
time.sleep(sleep_s)
s = socket.socket()
try:
if keep_waiting and not keep_waiting():
log.debug("keep_waiting() is set and evaluates to False")
return False
if timeout:
next_timeout = end - now()
if next_timeout < 0:
log.debug("connect time out")
return False
else:
log.debug("connect timeout %d s", next_timeout)
s.settimeout(next_timeout)
log.debug("connect %s:%d", server, port)
s.connect((server, port))
ret = s.recv(1024).decode()
if ret and ret.startswith('SSH'):
s.close()
log.info("wait_ssh_open: port %s:%s is open and ssh is ready", server, port)
return True
else:
log.debug("Didn't get the SSH banner")
s.close()
except ConnectionError as err:
log.debug("ConnectionError %s", err)
if sleep_s == 0:
sleep_s = 1
else:
sleep_s *= 2
except socket.gaierror as err:
log.debug("gaierror %s",err)
return False
except socket.timeout as err:
# this exception occurs only if timeout is set
if timeout:
return False
except TimeoutError as err:
# catch timeout exception from underlying network library
# this one is different from socket.timeout
raise |
Wait for network service to appear
@param server: host to connect to (str)
@param port: port (int)
@param timeout: in seconds, if None or 0 wait forever
@return: True of False, if timeout is None may return only True or
throw unhandled network exception
def wait_port_open(server, port, timeout=None):
""" Wait for network service to appear
@param server: host to connect to (str)
@param port: port (int)
@param timeout: in seconds, if None or 0 wait forever
@return: True of False, if timeout is None may return only True or
throw unhandled network exception
"""
import socket
import errno
import time
sleep_s = 0
if timeout:
from time import time as now
# time module is needed to calc timeout shared between two exceptions
end = now() + timeout
while True:
logging.debug("Sleeping for %s second(s)", sleep_s)
time.sleep(sleep_s)
s = socket.socket()
try:
if timeout:
next_timeout = end - now()
if next_timeout < 0:
return False
else:
s.settimeout(next_timeout)
logging.info("connect %s %d", server, port)
s.connect((server, port))
except ConnectionError as err:
logging.debug("ConnectionError %s", err)
if sleep_s == 0:
sleep_s = 1
except socket.gaierror as err:
logging.debug("gaierror %s",err)
return False
except socket.timeout as err:
# this exception occurs only if timeout is set
if timeout:
return False
except TimeoutError as err:
# catch timeout exception from underlying network library
# this one is different from socket.timeout
raise
else:
s.close()
logging.info("wait_port_open: port %s:%s is open", server, port)
return True |
Convert symbol for detail information.
Parameters
----------
symbol: Symbol
Symbol to be visualized.
shape: dict
A dict of shapes, str->shape (tuple), given input shapes.
line_length: int
Rotal length of printed lines
positions: list
Relative or absolute positions of log elements in each line.
Returns
------
None
Notes
-----
If ``mxnet`` is imported, the visualization module can be used in its short-form.
For example, if we ``import mxnet`` as follows::
import mxnet
this method in visualization module can be used in its short-form as::
mxnet.viz.print_summary(...)
def print_summary(symbol, shape=None, line_length=120, positions=[.44, .64, .74, 1.]):
"""Convert symbol for detail information.
Parameters
----------
symbol: Symbol
Symbol to be visualized.
shape: dict
A dict of shapes, str->shape (tuple), given input shapes.
line_length: int
Rotal length of printed lines
positions: list
Relative or absolute positions of log elements in each line.
Returns
------
None
Notes
-----
If ``mxnet`` is imported, the visualization module can be used in its short-form.
For example, if we ``import mxnet`` as follows::
import mxnet
this method in visualization module can be used in its short-form as::
mxnet.viz.print_summary(...)
"""
if not isinstance(symbol, Symbol):
raise TypeError("symbol must be Symbol")
show_shape = False
if shape is not None:
show_shape = True
interals = symbol.get_internals()
_, out_shapes, _ = interals.infer_shape(**shape)
if out_shapes is None:
raise ValueError("Input shape is incomplete")
shape_dict = dict(zip(interals.list_outputs(), out_shapes))
conf = json.loads(symbol.tojson())
nodes = conf["nodes"]
heads = set(conf["heads"][0])
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Previous Layer']
def print_row(fields, positions):
"""Print format row.
Parameters
----------
fields: list
Information field.
positions: list
Field length ratio.
Returns
------
None
"""
line = ''
for i, field in enumerate(fields):
line += str(field)
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
print('_' * line_length)
print_row(to_display, positions)
print('=' * line_length)
def print_layer_summary(node, out_shape):
"""print layer information
Parameters
----------
node: dict
Node information.
out_shape: dict
Node shape information.
Returns
------
Node total parameters.
"""
op = node["op"]
pre_node = []
pre_filter = 0
if op != "null":
inputs = node["inputs"]
for item in inputs:
input_node = nodes[item[0]]
input_name = input_node["name"]
if input_node["op"] != "null" or item[0] in heads:
# add precede
pre_node.append(input_name)
if show_shape:
if input_node["op"] != "null":
key = input_name + "_output"
else:
key = input_name
if key in shape_dict:
shape = shape_dict[key][1:]
pre_filter = pre_filter + int(shape[0])
cur_param = 0
if op == 'Convolution':
if "no_bias" in node["attrs"] and node["attrs"]["no_bias"] == 'True':
num_group = int(node['attrs'].get('num_group', '1'))
cur_param = pre_filter * int(node["attrs"]["num_filter"]) \
// num_group
for k in _str2tuple(node["attrs"]["kernel"]):
cur_param *= int(k)
else:
num_group = int(node['attrs'].get('num_group', '1'))
cur_param = pre_filter * int(node["attrs"]["num_filter"]) \
// num_group
for k in _str2tuple(node["attrs"]["kernel"]):
cur_param *= int(k)
cur_param += int(node["attrs"]["num_filter"])
elif op == 'FullyConnected':
if "no_bias" in node["attrs"] and node["attrs"]["no_bias"] == 'True':
cur_param = pre_filter * int(node["attrs"]["num_hidden"])
else:
cur_param = (pre_filter+1) * int(node["attrs"]["num_hidden"])
elif op == 'BatchNorm':
key = node["name"] + "_output"
if show_shape:
num_filter = shape_dict[key][1]
cur_param = int(num_filter) * 2
elif op == 'Embedding':
cur_param = int(node["attrs"]['input_dim']) * int(node["attrs"]['output_dim'])
if not pre_node:
first_connection = ''
else:
first_connection = pre_node[0]
fields = [node['name'] + '(' + op + ')',
"x".join([str(x) for x in out_shape]),
cur_param,
first_connection]
print_row(fields, positions)
if len(pre_node) > 1:
for i in range(1, len(pre_node)):
fields = ['', '', '', pre_node[i]]
print_row(fields, positions)
return cur_param
total_params = 0
for i, node in enumerate(nodes):
out_shape = []
op = node["op"]
if op == "null" and i > 0:
continue
if op != "null" or i in heads:
if show_shape:
if op != "null":
key = node["name"] + "_output"
else:
key = node["name"]
if key in shape_dict:
out_shape = shape_dict[key][1:]
total_params += print_layer_summary(nodes[i], out_shape)
if i == len(nodes) - 1:
print('=' * line_length)
else:
print('_' * line_length)
print("Total params: {params}".format(params=total_params))
print('_' * line_length) |
Creates a visualization (Graphviz digraph object) of the given computation graph.
Graphviz must be installed for this function to work.
Parameters
----------
title: str, optional
Title of the generated visualization.
symbol: Symbol
A symbol from the computation graph. The generated digraph will visualize the part
of the computation graph required to compute `symbol`.
shape: dict, optional
Specifies the shape of the input tensors. If specified, the visualization will include
the shape of the tensors between the nodes. `shape` is a dictionary mapping
input symbol names (str) to the corresponding tensor shape (tuple).
dtype: dict, optional
Specifies the type of the input tensors. If specified, the visualization will include
the type of the tensors between the nodes. `dtype` is a dictionary mapping
input symbol names (str) to the corresponding tensor type (e.g. `numpy.float32`).
node_attrs: dict, optional
Specifies the attributes for nodes in the generated visualization. `node_attrs` is
a dictionary of Graphviz attribute names and values. For example::
node_attrs={"shape":"oval","fixedsize":"false"}
will use oval shape for nodes and allow variable sized nodes in the visualization.
hide_weights: bool, optional
If True (default), then inputs with names of form *_weight* (corresponding to weight
tensors) or *_bias* (corresponding to bias vectors) will be hidden for a cleaner
visualization.
Returns
-------
dot: Digraph
A Graphviz digraph object visualizing the computation graph to compute `symbol`.
Example
-------
>>> net = mx.sym.Variable('data')
>>> net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=128)
>>> net = mx.sym.Activation(data=net, name='relu1', act_type="relu")
>>> net = mx.sym.FullyConnected(data=net, name='fc2', num_hidden=10)
>>> net = mx.sym.SoftmaxOutput(data=net, name='out')
>>> digraph = mx.viz.plot_network(net, shape={'data':(100,200)},
... node_attrs={"fixedsize":"false"})
>>> digraph.view()
Notes
-----
If ``mxnet`` is imported, the visualization module can be used in its short-form.
For example, if we ``import mxnet`` as follows::
import mxnet
this method in visualization module can be used in its short-form as::
mxnet.viz.plot_network(...)
def plot_network(symbol, title="plot", save_format='pdf', shape=None, dtype=None, node_attrs={},
hide_weights=True):
"""Creates a visualization (Graphviz digraph object) of the given computation graph.
Graphviz must be installed for this function to work.
Parameters
----------
title: str, optional
Title of the generated visualization.
symbol: Symbol
A symbol from the computation graph. The generated digraph will visualize the part
of the computation graph required to compute `symbol`.
shape: dict, optional
Specifies the shape of the input tensors. If specified, the visualization will include
the shape of the tensors between the nodes. `shape` is a dictionary mapping
input symbol names (str) to the corresponding tensor shape (tuple).
dtype: dict, optional
Specifies the type of the input tensors. If specified, the visualization will include
the type of the tensors between the nodes. `dtype` is a dictionary mapping
input symbol names (str) to the corresponding tensor type (e.g. `numpy.float32`).
node_attrs: dict, optional
Specifies the attributes for nodes in the generated visualization. `node_attrs` is
a dictionary of Graphviz attribute names and values. For example::
node_attrs={"shape":"oval","fixedsize":"false"}
will use oval shape for nodes and allow variable sized nodes in the visualization.
hide_weights: bool, optional
If True (default), then inputs with names of form *_weight* (corresponding to weight
tensors) or *_bias* (corresponding to bias vectors) will be hidden for a cleaner
visualization.
Returns
-------
dot: Digraph
A Graphviz digraph object visualizing the computation graph to compute `symbol`.
Example
-------
>>> net = mx.sym.Variable('data')
>>> net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=128)
>>> net = mx.sym.Activation(data=net, name='relu1', act_type="relu")
>>> net = mx.sym.FullyConnected(data=net, name='fc2', num_hidden=10)
>>> net = mx.sym.SoftmaxOutput(data=net, name='out')
>>> digraph = mx.viz.plot_network(net, shape={'data':(100,200)},
... node_attrs={"fixedsize":"false"})
>>> digraph.view()
Notes
-----
If ``mxnet`` is imported, the visualization module can be used in its short-form.
For example, if we ``import mxnet`` as follows::
import mxnet
this method in visualization module can be used in its short-form as::
mxnet.viz.plot_network(...)
"""
# todo add shape support
try:
from graphviz import Digraph
except:
raise ImportError("Draw network requires graphviz library")
if not isinstance(symbol, Symbol):
raise TypeError("symbol must be a Symbol")
internals = symbol.get_internals()
draw_shape = shape is not None
if draw_shape:
_, out_shapes, _ = internals.infer_shape(**shape)
if out_shapes is None:
raise ValueError("Input shape is incomplete")
shape_dict = dict(zip(internals.list_outputs(), out_shapes))
draw_type = dtype is not None
if draw_type:
_, out_types, _ = internals.infer_type(**dtype)
if out_types is None:
raise ValueError("Input type is incomplete")
type_dict = dict(zip(internals.list_outputs(), out_types))
conf = json.loads(symbol.tojson())
nodes = conf["nodes"]
# check if multiple nodes have the same name
if len(nodes) != len(set([node["name"] for node in nodes])):
seen_nodes = set()
# find all repeated names
repeated = set(node['name'] for node in nodes if node['name'] in seen_nodes
or seen_nodes.add(node['name']))
warning_message = "There are multiple variables with the same name in your graph, " \
"this may result in cyclic graph. Repeated names: " + ','.join(repeated)
warnings.warn(warning_message, RuntimeWarning)
# default attributes of node
node_attr = {"shape": "box", "fixedsize": "true",
"width": "1.3", "height": "0.8034", "style": "filled"}
# merge the dict provided by user and the default one
node_attr.update(node_attrs)
dot = Digraph(name=title, format=save_format)
# color map
cm = ("#8dd3c7", "#fb8072", "#ffffb3", "#bebada", "#80b1d3",
"#fdb462", "#b3de69", "#fccde5")
def looks_like_weight(name):
"""Internal helper to figure out if node should be hidden with `hide_weights`.
"""
weight_like = ('_weight', '_bias', '_beta', '_gamma',
'_moving_var', '_moving_mean', '_running_var', '_running_mean')
return name.endswith(weight_like)
# make nodes
hidden_nodes = set()
for node in nodes:
op = node["op"]
name = node["name"]
# input data
attr = copy.deepcopy(node_attr)
label = name
if op == "null":
if looks_like_weight(node["name"]):
if hide_weights:
hidden_nodes.add(node["name"])
# else we don't render a node, but
# don't add it to the hidden_nodes set
# so it gets rendered as an empty oval
continue
attr["shape"] = "oval" # inputs get their own shape
label = node["name"]
attr["fillcolor"] = cm[0]
elif op == "Convolution":
label = "Convolution\n{kernel}/{stride}, {filter}".format(
kernel="x".join(_str2tuple(node["attrs"]["kernel"])),
stride="x".join(_str2tuple(node["attrs"]["stride"]))
if "stride" in node["attrs"] else "1",
filter=node["attrs"]["num_filter"]
)
attr["fillcolor"] = cm[1]
elif op == "FullyConnected":
label = "FullyConnected\n{hidden}".format(hidden=node["attrs"]["num_hidden"])
attr["fillcolor"] = cm[1]
elif op == "BatchNorm":
attr["fillcolor"] = cm[3]
elif op == 'Activation':
act_type = node["attrs"]["act_type"]
label = 'Activation\n{activation}'.format(activation=act_type)
attr["fillcolor"] = cm[2]
elif op == 'LeakyReLU':
attrs = node.get("attrs")
act_type = attrs.get("act_type", "Leaky") if attrs else "Leaky"
label = 'LeakyReLU\n{activation}'.format(activation=act_type)
attr["fillcolor"] = cm[2]
elif op == "Pooling":
label = "Pooling\n{pooltype}, {kernel}/{stride}".format(pooltype=node["attrs"]["pool_type"],
kernel="x".join(_str2tuple(node["attrs"]["kernel"]))
if "kernel" in node["attrs"] else "[]",
stride="x".join(_str2tuple(node["attrs"]["stride"]))
if "stride" in node["attrs"] else "1")
attr["fillcolor"] = cm[4]
elif op in ("Concat", "Flatten", "Reshape"):
attr["fillcolor"] = cm[5]
elif op == "Softmax":
attr["fillcolor"] = cm[6]
else:
attr["fillcolor"] = cm[7]
if op == "Custom":
label = node["attrs"]["op_type"]
dot.node(name=name, label=label, **attr)
# add edges
for node in nodes: # pylint: disable=too-many-nested-blocks
op = node["op"]
name = node["name"]
if op == "null":
continue
else:
inputs = node["inputs"]
for item in inputs:
input_node = nodes[item[0]]
input_name = input_node["name"]
if input_name not in hidden_nodes:
attr = {"dir": "back", 'arrowtail':'open', 'label': ''}
# add shapes
if draw_shape:
if input_node["op"] != "null":
key = input_name + "_output"
if "attrs" in input_node:
params = input_node["attrs"]
if "num_outputs" in params:
key += str(int(params["num_outputs"]) - 1)
shape = shape_dict[key][1:]
label = "x".join([str(x) for x in shape])
attr["label"] = label
else:
key = input_name
shape = shape_dict[key][1:]
label = "x".join([str(x) for x in shape])
attr["label"] = label
if draw_type:
if input_node["op"] != "null":
key = input_name + "_output"
if "attrs" in input_node:
params = input_node["attrs"]
if "num_outputs" in params:
key += str(int(params["num_outputs"]) - 1)
dtype = type_dict[key]
attr["label"] += '(' + dtype.__name__ + ')'
else:
key = input_name
dtype = type_dict[key]
attr["label"] += '(' + dtype.__name__ + ')'
dot.edge(tail_name=name, head_name=input_name, **attr)
return dot |
Measure the accuracy of ResNet
Parameters
----------
data_iterator: Iter
examples of dataset
network:
ResNet
Returns
----------
tuple of array element
def evaluate_accuracy(data_iterator, network):
""" Measure the accuracy of ResNet
Parameters
----------
data_iterator: Iter
examples of dataset
network:
ResNet
Returns
----------
tuple of array element
"""
acc = mx.metric.Accuracy()
# Iterate through data and label
for i, (data, label) in enumerate(data_iterator):
# Get the data and label into the GPU
data = data.as_in_context(ctx[0])
label = label.as_in_context(ctx[0])
# Get network's output which is a probability distribution
# Apply argmax on the probability distribution to get network's classification.
output = network(data)
predictions = nd.argmax(output, axis=1)
# Give network's prediction and the correct label to update the metric
acc.update(preds=predictions, labels=label)
# Return the accuracy
return acc.get()[1] |
Training with multiple GPUs
Parameters
----------
batch_list: List
list of dataset
context: List
a list of all GPUs to be used for training
network:
ResNet
gluon_trainer:
rain module of gluon
def train_batch(batch_list, context, network, gluon_trainer):
""" Training with multiple GPUs
Parameters
----------
batch_list: List
list of dataset
context: List
a list of all GPUs to be used for training
network:
ResNet
gluon_trainer:
rain module of gluon
"""
# Split and load data into multiple GPUs
data = batch_list[0]
data = gluon.utils.split_and_load(data, context)
# Split and load label into multiple GPUs
label = batch_list[1]
label = gluon.utils.split_and_load(label, context)
# Run the forward and backward pass
forward_backward(network, data, label)
# Update the parameters
this_batch_size = batch_list[0].shape[0]
gluon_trainer.step(this_batch_size) |
Take an executor's underlying symbol graph and return its generated optimized version.
Parameters
----------
executor :
An executor for which you want to see an optimized symbol. Getting an optimized symbol
is useful to compare and verify the work TensorRT has done against a legacy behaviour.
Returns
-------
symbol : nnvm::Symbol
The nnvm symbol optimized.
def get_optimized_symbol(executor):
"""
Take an executor's underlying symbol graph and return its generated optimized version.
Parameters
----------
executor :
An executor for which you want to see an optimized symbol. Getting an optimized symbol
is useful to compare and verify the work TensorRT has done against a legacy behaviour.
Returns
-------
symbol : nnvm::Symbol
The nnvm symbol optimized.
"""
handle = SymbolHandle()
try:
check_call(_LIB.MXExecutorGetOptimizedSymbol(executor.handle, ctypes.byref(handle)))
result = sym.Symbol(handle=handle)
return result
except MXNetError:
logging.error('Error while trying to fetch TRT optimized symbol for graph. Please ensure '
'build was compiled with MXNET_USE_TENSORRT enabled.')
raise |
Bind current symbol to get an optimized trt executor.
Parameters
----------
symbol : Symbol
The symbol you wish to bind, and optimize with TensorRT.
ctx : Context
The device context the generated executor to run on.
all_params : Dict of str->ndarray
A dictionary of mappings from parameter names to parameter NDArrays.
type_dict : Dict of str->numpy.dtype
Input type dictionary, name->dtype
stype_dict : Dict of str->str
Input storage type dictionary, name->storage_type
group2ctx : Dict of string to mx.Context
The dict mapping the `ctx_group` attribute to the context assignment.
kwargs : Dict of str->shape
Input shape dictionary, name->shape
Returns
-------
executor : mxnet.Executor
An optimized TensorRT executor.
def tensorrt_bind(symbol, ctx, all_params, type_dict=None, stype_dict=None, group2ctx=None,
**kwargs):
"""Bind current symbol to get an optimized trt executor.
Parameters
----------
symbol : Symbol
The symbol you wish to bind, and optimize with TensorRT.
ctx : Context
The device context the generated executor to run on.
all_params : Dict of str->ndarray
A dictionary of mappings from parameter names to parameter NDArrays.
type_dict : Dict of str->numpy.dtype
Input type dictionary, name->dtype
stype_dict : Dict of str->str
Input storage type dictionary, name->storage_type
group2ctx : Dict of string to mx.Context
The dict mapping the `ctx_group` attribute to the context assignment.
kwargs : Dict of str->shape
Input shape dictionary, name->shape
Returns
-------
executor : mxnet.Executor
An optimized TensorRT executor.
"""
kwargs['shared_buffer'] = all_params
return symbol.simple_bind(ctx, type_dict=type_dict, stype_dict=stype_dict,
group2ctx=group2ctx, **kwargs) |
Parameters
----------
num_classes : int, default 1000
Number of classification classes.
num_layers : int
Number of layers for the variant of densenet. Options are 11, 13, 16, 19.
batch_norm : bool, default False
Use batch normalization.
dtype: str, float32 or float16
Data precision.
def get_symbol(num_classes, num_layers=11, batch_norm=False, dtype='float32', **kwargs):
"""
Parameters
----------
num_classes : int, default 1000
Number of classification classes.
num_layers : int
Number of layers for the variant of densenet. Options are 11, 13, 16, 19.
batch_norm : bool, default False
Use batch normalization.
dtype: str, float32 or float16
Data precision.
"""
vgg_spec = {11: ([1, 1, 2, 2, 2], [64, 128, 256, 512, 512]),
13: ([2, 2, 2, 2, 2], [64, 128, 256, 512, 512]),
16: ([2, 2, 3, 3, 3], [64, 128, 256, 512, 512]),
19: ([2, 2, 4, 4, 4], [64, 128, 256, 512, 512])}
if num_layers not in vgg_spec:
raise ValueError("Invalide num_layers {}. Possible choices are 11,13,16,19.".format(num_layers))
layers, filters = vgg_spec[num_layers]
data = mx.sym.Variable(name="data")
if dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
feature = get_feature(data, layers, filters, batch_norm)
classifier = get_classifier(feature, num_classes)
if dtype == 'float16':
classifier = mx.sym.Cast(data=classifier, dtype=np.float32)
symbol = mx.sym.SoftmaxOutput(data=classifier, name='softmax')
return symbol |
:param frame: an (w,h,channels) numpy array (image)
:return: DataBatch of (1,channels,data_shape,data_shape)
def create_batch(self, frame):
"""
:param frame: an (w,h,channels) numpy array (image)
:return: DataBatch of (1,channels,data_shape,data_shape)
"""
frame_resize = mx.nd.array(cv2.resize(frame, (self.data_shape[0], self.data_shape[1])))
#frame_resize = mx.img.imresize(frame, self.data_shape[0], self.data_shape[1], cv2.INTER_LINEAR)
# Change dimensions from (w,h,channels) to (channels, w, h)
frame_t = mx.nd.transpose(frame_resize, axes=(2,0,1))
frame_norm = frame_t - self.mean_pixels_nd
# Add dimension for batch, results in (1,channels,w,h)
batch_frame = [mx.nd.expand_dims(frame_norm, axis=0)]
batch_shape = [DataDesc('data', batch_frame[0].shape)]
batch = DataBatch(data=batch_frame, provide_data=batch_shape)
return batch |
detect all images in iterator
Parameters:
----------
det_iter : DetIter
iterator for all testing images
show_timer : Boolean
whether to print out detection exec time
Returns:
----------
list of detection results
def detect_iter(self, det_iter, show_timer=False):
"""
detect all images in iterator
Parameters:
----------
det_iter : DetIter
iterator for all testing images
show_timer : Boolean
whether to print out detection exec time
Returns:
----------
list of detection results
"""
num_images = det_iter._size
if not isinstance(det_iter, mx.io.PrefetchingIter):
det_iter = mx.io.PrefetchingIter(det_iter)
start = timer()
detections = self.mod.predict(det_iter).asnumpy()
time_elapsed = timer() - start
if show_timer:
logging.info("Detection time for {} images: {:.4f} sec".format(
num_images, time_elapsed))
result = Detector.filter_positive_detections(detections)
return result |
Return detections for batch
:param batch:
:return:
def detect_batch(self, batch):
"""
Return detections for batch
:param batch:
:return:
"""
self.mod.forward(batch, is_train=False)
detections = self.mod.get_outputs()[0]
positive_detections = Detector.filter_positive_detections(detections)
return positive_detections |
wrapper for detecting multiple images
Parameters:
----------
im_list : list of str
image path or list of image paths
root_dir : str
directory of input images, optional if image path already
has full directory information
extension : str
image extension, eg. ".jpg", optional
Returns:
----------
list of detection results in format [det0, det1...], det is in
format np.array([id, score, xmin, ymin, xmax, ymax]...)
def im_detect(self, im_list, root_dir=None, extension=None, show_timer=False):
"""
wrapper for detecting multiple images
Parameters:
----------
im_list : list of str
image path or list of image paths
root_dir : str
directory of input images, optional if image path already
has full directory information
extension : str
image extension, eg. ".jpg", optional
Returns:
----------
list of detection results in format [det0, det1...], det is in
format np.array([id, score, xmin, ymin, xmax, ymax]...)
"""
test_db = TestDB(im_list, root_dir=root_dir, extension=extension)
test_iter = DetIter(test_db, 1, self.data_shape, self.mean_pixels,
is_train=False)
return self.detect_iter(test_iter, show_timer) |
visualize detections in one image
Parameters:
----------
img : numpy.array
image, in bgr format
dets : numpy.array
ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...])
each row is one object
classes : tuple or list of str
class names
thresh : float
score threshold
def visualize_detection(self, img, dets, classes=[], thresh=0.6):
"""
visualize detections in one image
Parameters:
----------
img : numpy.array
image, in bgr format
dets : numpy.array
ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...])
each row is one object
classes : tuple or list of str
class names
thresh : float
score threshold
"""
import matplotlib.pyplot as plt
import random
plt.imshow(img)
height = img.shape[0]
width = img.shape[1]
colors = dict()
for det in dets:
(klass, score, x0, y0, x1, y1) = det
if score < thresh:
continue
cls_id = int(klass)
if cls_id not in colors:
colors[cls_id] = (random.random(), random.random(), random.random())
xmin = int(x0 * width)
ymin = int(y0 * height)
xmax = int(x1 * width)
ymax = int(y1 * height)
rect = plt.Rectangle((xmin, ymin), xmax - xmin,
ymax - ymin, fill=False,
edgecolor=colors[cls_id],
linewidth=3.5)
plt.gca().add_patch(rect)
class_name = str(cls_id)
if classes and len(classes) > cls_id:
class_name = classes[cls_id]
plt.gca().text(xmin, ymin - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor=colors[cls_id], alpha=0.5),
fontsize=12, color='white')
plt.show() |
First column (class id) is -1 for negative detections
:param detections:
:return:
def filter_positive_detections(detections):
"""
First column (class id) is -1 for negative detections
:param detections:
:return:
"""
class_idx = 0
assert(isinstance(detections, mx.nd.NDArray) or isinstance(detections, np.ndarray))
detections_per_image = []
# for each image
for i in range(detections.shape[0]):
result = []
det = detections[i, :, :]
for obj in det:
if obj[class_idx] >= 0:
result.append(obj)
detections_per_image.append(result)
logging.info("%d positive detections", len(result))
return detections_per_image |
wrapper for im_detect and visualize_detection
Parameters:
----------
im_list : list of str or str
image path or list of image paths
root_dir : str or None
directory of input images, optional if image path already
has full directory information
extension : str or None
image extension, eg. ".jpg", optional
Returns:
----------
def detect_and_visualize(self, im_list, root_dir=None, extension=None,
classes=[], thresh=0.6, show_timer=False):
"""
wrapper for im_detect and visualize_detection
Parameters:
----------
im_list : list of str or str
image path or list of image paths
root_dir : str or None
directory of input images, optional if image path already
has full directory information
extension : str or None
image extension, eg. ".jpg", optional
Returns:
----------
"""
dets = self.im_detect(im_list, root_dir, extension, show_timer=show_timer)
if not isinstance(im_list, list):
im_list = [im_list]
assert len(dets) == len(im_list)
for k, det in enumerate(dets):
img = cv2.imread(im_list[k])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.visualize_detection(img, det, classes, thresh) |
Runs the caffe upgrade tool on the prototxt to create a prototxt in the latest format.
This enable us to work just with latest structures, instead of supporting all the variants
:param caffe_root: link to caffe root folder, where the upgrade tool is located
:param deploy_proto: name of the original prototxt file
:return: name of new processed prototxt file
def process_network_proto(caffe_root, deploy_proto):
"""
Runs the caffe upgrade tool on the prototxt to create a prototxt in the latest format.
This enable us to work just with latest structures, instead of supporting all the variants
:param caffe_root: link to caffe root folder, where the upgrade tool is located
:param deploy_proto: name of the original prototxt file
:return: name of new processed prototxt file
"""
processed_deploy_proto = deploy_proto + ".processed"
from shutil import copyfile
copyfile(deploy_proto, processed_deploy_proto)
# run upgrade tool on new file name (same output file)
import os
upgrade_tool_command_line = caffe_root + '/build/tools/upgrade_net_proto_text.bin ' \
+ processed_deploy_proto + ' ' + processed_deploy_proto
os.system(upgrade_tool_command_line)
return processed_deploy_proto |
Reads from the caffe prototxt the network structure
:param processed_deploy_prototxt: name of prototxt to load, preferably the prototxt should
be processed before using a call to process_network_proto()
:return: network_def, layer_name_to_record, top_to_layers
network_def: caffe network structure, gives access to *all* the network information
layer_name_to_record: *ordered* dictionary which maps between layer name and a structure which
describes in a simple form the layer parameters
top_to_layers: dictionary which maps a blob name to an ordered list of layers which output it
when a top is used several times, like in inplace layhers, the list will contain all the layers
by order of appearance
def read_network_dag(processed_deploy_prototxt):
"""
Reads from the caffe prototxt the network structure
:param processed_deploy_prototxt: name of prototxt to load, preferably the prototxt should
be processed before using a call to process_network_proto()
:return: network_def, layer_name_to_record, top_to_layers
network_def: caffe network structure, gives access to *all* the network information
layer_name_to_record: *ordered* dictionary which maps between layer name and a structure which
describes in a simple form the layer parameters
top_to_layers: dictionary which maps a blob name to an ordered list of layers which output it
when a top is used several times, like in inplace layhers, the list will contain all the layers
by order of appearance
"""
from caffe.proto import caffe_pb2
from google.protobuf import text_format # pylint: disable=relative-import
from collections import OrderedDict
# load prototxt file
network_def = caffe_pb2.NetParameter()
with open(processed_deploy_prototxt, 'r') as proto_file:
text_format.Merge(str(proto_file.read()), network_def)
# map layer name to layer record
layer_name_to_record = OrderedDict()
for layer_def in network_def.layer:
if (len(layer_def.include) == 0) or \
(caffe_pb2.TEST in [item.phase for item in layer_def.include]):
layer_name_to_record[layer_def.name] = LayerRecord(layer_def)
top_to_layers = dict()
for layer in network_def.layer:
# no specific phase, or TEST phase is specifically asked for
if (len(layer.include) == 0) or (caffe_pb2.TEST in [item.phase for item in layer.include]):
for top in layer.top:
if top not in top_to_layers:
top_to_layers[top] = list()
top_to_layers[top].append(layer.name)
# find parents and children of all layers
for child_layer_name in layer_name_to_record.keys(): # pylint: disable=too-many-nested-blocks
child_layer_def = layer_name_to_record[child_layer_name]
for bottom in child_layer_def.bottoms:
if bottom in top_to_layers:
for parent_layer_name in top_to_layers[bottom]:
if parent_layer_name in layer_name_to_record:
parent_layer_def = layer_name_to_record[parent_layer_name]
if parent_layer_def not in child_layer_def.parents:
child_layer_def.parents.append(parent_layer_def)
if child_layer_def not in parent_layer_def.children:
parent_layer_def.children.append(child_layer_def)
# update filter, strid, pad for maxout "structures"
for layer_name in layer_name_to_record.keys():
layer_def = layer_name_to_record[layer_name]
if layer_def.type == 'Eltwise' and \
len(layer_def.parents) == 1 and \
layer_def.parents[0].type == 'Slice' and \
len(layer_def.parents[0].parents) == 1 and \
layer_def.parents[0].parents[0].type in ['Convolution', 'InnerProduct']:
layer_def.filter = layer_def.parents[0].parents[0].filter
layer_def.stride = layer_def.parents[0].parents[0].stride
layer_def.pad = layer_def.parents[0].parents[0].pad
return network_def, layer_name_to_record, top_to_layers |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.