repo
stringlengths 7
55
| path
stringlengths 4
223
| func_name
stringlengths 1
134
| original_string
stringlengths 75
104k
| language
stringclasses 1
value | code
stringlengths 75
104k
| code_tokens
listlengths 19
28.4k
| docstring
stringlengths 1
46.9k
| docstring_tokens
listlengths 1
1.97k
| sha
stringlengths 40
40
| url
stringlengths 87
315
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
apache/incubator-mxnet
|
python/mxnet/autograd.py
|
is_recording
|
def is_recording():
"""Get status on recording/not recording.
Returns
-------
Current state of recording.
"""
curr = ctypes.c_bool()
check_call(_LIB.MXAutogradIsRecording(ctypes.byref(curr)))
return curr.value
|
python
|
def is_recording():
"""Get status on recording/not recording.
Returns
-------
Current state of recording.
"""
curr = ctypes.c_bool()
check_call(_LIB.MXAutogradIsRecording(ctypes.byref(curr)))
return curr.value
|
[
"def",
"is_recording",
"(",
")",
":",
"curr",
"=",
"ctypes",
".",
"c_bool",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXAutogradIsRecording",
"(",
"ctypes",
".",
"byref",
"(",
"curr",
")",
")",
")",
"return",
"curr",
".",
"value"
] |
Get status on recording/not recording.
Returns
-------
Current state of recording.
|
[
"Get",
"status",
"on",
"recording",
"/",
"not",
"recording",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/autograd.py#L70-L79
|
train
|
apache/incubator-mxnet
|
python/mxnet/autograd.py
|
is_training
|
def is_training():
"""Get status on training/predicting.
Returns
-------
Current state of training/predicting.
"""
curr = ctypes.c_bool()
check_call(_LIB.MXAutogradIsTraining(ctypes.byref(curr)))
return curr.value
|
python
|
def is_training():
"""Get status on training/predicting.
Returns
-------
Current state of training/predicting.
"""
curr = ctypes.c_bool()
check_call(_LIB.MXAutogradIsTraining(ctypes.byref(curr)))
return curr.value
|
[
"def",
"is_training",
"(",
")",
":",
"curr",
"=",
"ctypes",
".",
"c_bool",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXAutogradIsTraining",
"(",
"ctypes",
".",
"byref",
"(",
"curr",
")",
")",
")",
"return",
"curr",
".",
"value"
] |
Get status on training/predicting.
Returns
-------
Current state of training/predicting.
|
[
"Get",
"status",
"on",
"training",
"/",
"predicting",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/autograd.py#L81-L90
|
train
|
apache/incubator-mxnet
|
python/mxnet/autograd.py
|
mark_variables
|
def mark_variables(variables, gradients, grad_reqs='write'):
"""Mark NDArrays as variables to compute gradient for autograd.
Parameters
----------
variables: NDArray or list of NDArray
gradients: NDArray or list of NDArray
grad_reqs: str or list of str
"""
if isinstance(variables, NDArray):
assert isinstance(gradients, NDArray)
variables = [variables]
gradients = [gradients]
if isinstance(grad_reqs, string_types):
grad_reqs = [_GRAD_REQ_MAP[grad_reqs]]*len(variables)
else:
grad_reqs = [_GRAD_REQ_MAP[i] for i in grad_reqs]
check_call(_LIB.MXAutogradMarkVariables(
len(variables),
c_handle_array(variables),
c_array_buf(mx_uint, array('I', grad_reqs)),
c_handle_array(gradients)))
|
python
|
def mark_variables(variables, gradients, grad_reqs='write'):
"""Mark NDArrays as variables to compute gradient for autograd.
Parameters
----------
variables: NDArray or list of NDArray
gradients: NDArray or list of NDArray
grad_reqs: str or list of str
"""
if isinstance(variables, NDArray):
assert isinstance(gradients, NDArray)
variables = [variables]
gradients = [gradients]
if isinstance(grad_reqs, string_types):
grad_reqs = [_GRAD_REQ_MAP[grad_reqs]]*len(variables)
else:
grad_reqs = [_GRAD_REQ_MAP[i] for i in grad_reqs]
check_call(_LIB.MXAutogradMarkVariables(
len(variables),
c_handle_array(variables),
c_array_buf(mx_uint, array('I', grad_reqs)),
c_handle_array(gradients)))
|
[
"def",
"mark_variables",
"(",
"variables",
",",
"gradients",
",",
"grad_reqs",
"=",
"'write'",
")",
":",
"if",
"isinstance",
"(",
"variables",
",",
"NDArray",
")",
":",
"assert",
"isinstance",
"(",
"gradients",
",",
"NDArray",
")",
"variables",
"=",
"[",
"variables",
"]",
"gradients",
"=",
"[",
"gradients",
"]",
"if",
"isinstance",
"(",
"grad_reqs",
",",
"string_types",
")",
":",
"grad_reqs",
"=",
"[",
"_GRAD_REQ_MAP",
"[",
"grad_reqs",
"]",
"]",
"*",
"len",
"(",
"variables",
")",
"else",
":",
"grad_reqs",
"=",
"[",
"_GRAD_REQ_MAP",
"[",
"i",
"]",
"for",
"i",
"in",
"grad_reqs",
"]",
"check_call",
"(",
"_LIB",
".",
"MXAutogradMarkVariables",
"(",
"len",
"(",
"variables",
")",
",",
"c_handle_array",
"(",
"variables",
")",
",",
"c_array_buf",
"(",
"mx_uint",
",",
"array",
"(",
"'I'",
",",
"grad_reqs",
")",
")",
",",
"c_handle_array",
"(",
"gradients",
")",
")",
")"
] |
Mark NDArrays as variables to compute gradient for autograd.
Parameters
----------
variables: NDArray or list of NDArray
gradients: NDArray or list of NDArray
grad_reqs: str or list of str
|
[
"Mark",
"NDArrays",
"as",
"variables",
"to",
"compute",
"gradient",
"for",
"autograd",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/autograd.py#L197-L220
|
train
|
apache/incubator-mxnet
|
python/mxnet/autograd.py
|
_parse_head
|
def _parse_head(heads, head_grads):
"""parse head gradient for backward and grad."""
if isinstance(heads, NDArray):
heads = [heads]
if isinstance(head_grads, NDArray):
head_grads = [head_grads]
head_handles = c_handle_array(heads)
if head_grads is None:
hgrad_handles = ctypes.c_void_p(0)
else:
assert len(heads) == len(head_grads), \
"heads and head_grads must be lists of the same length"
hgrad_handles = c_array(NDArrayHandle,
[i.handle if i is not None else NDArrayHandle(0)
for i in head_grads])
return head_handles, hgrad_handles
|
python
|
def _parse_head(heads, head_grads):
"""parse head gradient for backward and grad."""
if isinstance(heads, NDArray):
heads = [heads]
if isinstance(head_grads, NDArray):
head_grads = [head_grads]
head_handles = c_handle_array(heads)
if head_grads is None:
hgrad_handles = ctypes.c_void_p(0)
else:
assert len(heads) == len(head_grads), \
"heads and head_grads must be lists of the same length"
hgrad_handles = c_array(NDArrayHandle,
[i.handle if i is not None else NDArrayHandle(0)
for i in head_grads])
return head_handles, hgrad_handles
|
[
"def",
"_parse_head",
"(",
"heads",
",",
"head_grads",
")",
":",
"if",
"isinstance",
"(",
"heads",
",",
"NDArray",
")",
":",
"heads",
"=",
"[",
"heads",
"]",
"if",
"isinstance",
"(",
"head_grads",
",",
"NDArray",
")",
":",
"head_grads",
"=",
"[",
"head_grads",
"]",
"head_handles",
"=",
"c_handle_array",
"(",
"heads",
")",
"if",
"head_grads",
"is",
"None",
":",
"hgrad_handles",
"=",
"ctypes",
".",
"c_void_p",
"(",
"0",
")",
"else",
":",
"assert",
"len",
"(",
"heads",
")",
"==",
"len",
"(",
"head_grads",
")",
",",
"\"heads and head_grads must be lists of the same length\"",
"hgrad_handles",
"=",
"c_array",
"(",
"NDArrayHandle",
",",
"[",
"i",
".",
"handle",
"if",
"i",
"is",
"not",
"None",
"else",
"NDArrayHandle",
"(",
"0",
")",
"for",
"i",
"in",
"head_grads",
"]",
")",
"return",
"head_handles",
",",
"hgrad_handles"
] |
parse head gradient for backward and grad.
|
[
"parse",
"head",
"gradient",
"for",
"backward",
"and",
"grad",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/autograd.py#L223-L240
|
train
|
apache/incubator-mxnet
|
python/mxnet/autograd.py
|
backward
|
def backward(heads, head_grads=None, retain_graph=False, train_mode=True): #pylint: disable=redefined-outer-name
"""Compute the gradients of heads w.r.t previously marked variables.
Parameters
----------
heads: NDArray or list of NDArray
Output NDArray(s)
head_grads: NDArray or list of NDArray or None
Gradients with respect to heads.
train_mode: bool, optional
Whether to do backward for training or predicting.
"""
head_handles, hgrad_handles = _parse_head(heads, head_grads)
check_call(_LIB.MXAutogradBackwardEx(
len(head_handles),
head_handles,
hgrad_handles,
0,
ctypes.c_void_p(0),
ctypes.c_int(retain_graph),
ctypes.c_int(0),
ctypes.c_int(train_mode),
ctypes.c_void_p(0),
ctypes.c_void_p(0)))
|
python
|
def backward(heads, head_grads=None, retain_graph=False, train_mode=True): #pylint: disable=redefined-outer-name
"""Compute the gradients of heads w.r.t previously marked variables.
Parameters
----------
heads: NDArray or list of NDArray
Output NDArray(s)
head_grads: NDArray or list of NDArray or None
Gradients with respect to heads.
train_mode: bool, optional
Whether to do backward for training or predicting.
"""
head_handles, hgrad_handles = _parse_head(heads, head_grads)
check_call(_LIB.MXAutogradBackwardEx(
len(head_handles),
head_handles,
hgrad_handles,
0,
ctypes.c_void_p(0),
ctypes.c_int(retain_graph),
ctypes.c_int(0),
ctypes.c_int(train_mode),
ctypes.c_void_p(0),
ctypes.c_void_p(0)))
|
[
"def",
"backward",
"(",
"heads",
",",
"head_grads",
"=",
"None",
",",
"retain_graph",
"=",
"False",
",",
"train_mode",
"=",
"True",
")",
":",
"#pylint: disable=redefined-outer-name",
"head_handles",
",",
"hgrad_handles",
"=",
"_parse_head",
"(",
"heads",
",",
"head_grads",
")",
"check_call",
"(",
"_LIB",
".",
"MXAutogradBackwardEx",
"(",
"len",
"(",
"head_handles",
")",
",",
"head_handles",
",",
"hgrad_handles",
",",
"0",
",",
"ctypes",
".",
"c_void_p",
"(",
"0",
")",
",",
"ctypes",
".",
"c_int",
"(",
"retain_graph",
")",
",",
"ctypes",
".",
"c_int",
"(",
"0",
")",
",",
"ctypes",
".",
"c_int",
"(",
"train_mode",
")",
",",
"ctypes",
".",
"c_void_p",
"(",
"0",
")",
",",
"ctypes",
".",
"c_void_p",
"(",
"0",
")",
")",
")"
] |
Compute the gradients of heads w.r.t previously marked variables.
Parameters
----------
heads: NDArray or list of NDArray
Output NDArray(s)
head_grads: NDArray or list of NDArray or None
Gradients with respect to heads.
train_mode: bool, optional
Whether to do backward for training or predicting.
|
[
"Compute",
"the",
"gradients",
"of",
"heads",
"w",
".",
"r",
".",
"t",
"previously",
"marked",
"variables",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/autograd.py#L243-L267
|
train
|
apache/incubator-mxnet
|
python/mxnet/autograd.py
|
grad
|
def grad(heads, variables, head_grads=None, retain_graph=None, create_graph=False,
train_mode=True): #pylint: disable=redefined-outer-name
"""Compute the gradients of heads w.r.t variables. Gradients will be
returned as new NDArrays instead of stored into `variable.grad`.
Supports recording gradient graph for computing higher order gradients.
.. note::
Currently only a very limited set of operators support higher order \
gradients.
Parameters
----------
heads: NDArray or list of NDArray
Output NDArray(s)
variables: NDArray or list of NDArray
Input variables to compute gradients for.
head_grads: NDArray or list of NDArray or None
Gradients with respect to heads.
retain_graph: bool
Whether to keep computation graph to differentiate again, instead
of clearing history and release memory. Defaults to the same value
as create_graph.
create_graph: bool
Whether to record gradient graph for computing higher order
train_mode: bool, optional
Whether to do backward for training or prediction.
Returns
-------
NDArray or list of NDArray:
Gradients with respect to variables.
Examples
--------
>>> x = mx.nd.ones((1,))
>>> x.attach_grad()
>>> with mx.autograd.record():
... z = mx.nd.elemwise_add(mx.nd.exp(x), x)
>>> dx = mx.autograd.grad(z, [x], create_graph=True)
>>> print(dx)
[
[ 3.71828175]
<NDArray 1 @cpu(0)>]
"""
head_handles, hgrad_handles = _parse_head(heads, head_grads)
if isinstance(variables, NDArray):
variables = [variables]
else:
assert len(variables), "variables cannot be an empty list."
var_handles = c_handle_array(variables)
retain_graph = retain_graph if retain_graph is not None else create_graph
grad_vars = ctypes.POINTER(NDArrayHandle)()
grad_stypes = ctypes.POINTER(ctypes.c_int)()
check_call(_LIB.MXAutogradBackwardEx(
len(head_handles),
head_handles,
hgrad_handles,
len(var_handles),
var_handles,
ctypes.c_int(retain_graph),
ctypes.c_int(create_graph),
ctypes.c_int(train_mode),
ctypes.byref(grad_vars),
ctypes.byref(grad_stypes)))
ret = [_ndarray_cls(ctypes.cast(grad_vars[i], NDArrayHandle),
stype=grad_stypes[i])
for i in range(len(var_handles))]
if isinstance(variables, NDArray):
return ret[0]
return ret
|
python
|
def grad(heads, variables, head_grads=None, retain_graph=None, create_graph=False,
train_mode=True): #pylint: disable=redefined-outer-name
"""Compute the gradients of heads w.r.t variables. Gradients will be
returned as new NDArrays instead of stored into `variable.grad`.
Supports recording gradient graph for computing higher order gradients.
.. note::
Currently only a very limited set of operators support higher order \
gradients.
Parameters
----------
heads: NDArray or list of NDArray
Output NDArray(s)
variables: NDArray or list of NDArray
Input variables to compute gradients for.
head_grads: NDArray or list of NDArray or None
Gradients with respect to heads.
retain_graph: bool
Whether to keep computation graph to differentiate again, instead
of clearing history and release memory. Defaults to the same value
as create_graph.
create_graph: bool
Whether to record gradient graph for computing higher order
train_mode: bool, optional
Whether to do backward for training or prediction.
Returns
-------
NDArray or list of NDArray:
Gradients with respect to variables.
Examples
--------
>>> x = mx.nd.ones((1,))
>>> x.attach_grad()
>>> with mx.autograd.record():
... z = mx.nd.elemwise_add(mx.nd.exp(x), x)
>>> dx = mx.autograd.grad(z, [x], create_graph=True)
>>> print(dx)
[
[ 3.71828175]
<NDArray 1 @cpu(0)>]
"""
head_handles, hgrad_handles = _parse_head(heads, head_grads)
if isinstance(variables, NDArray):
variables = [variables]
else:
assert len(variables), "variables cannot be an empty list."
var_handles = c_handle_array(variables)
retain_graph = retain_graph if retain_graph is not None else create_graph
grad_vars = ctypes.POINTER(NDArrayHandle)()
grad_stypes = ctypes.POINTER(ctypes.c_int)()
check_call(_LIB.MXAutogradBackwardEx(
len(head_handles),
head_handles,
hgrad_handles,
len(var_handles),
var_handles,
ctypes.c_int(retain_graph),
ctypes.c_int(create_graph),
ctypes.c_int(train_mode),
ctypes.byref(grad_vars),
ctypes.byref(grad_stypes)))
ret = [_ndarray_cls(ctypes.cast(grad_vars[i], NDArrayHandle),
stype=grad_stypes[i])
for i in range(len(var_handles))]
if isinstance(variables, NDArray):
return ret[0]
return ret
|
[
"def",
"grad",
"(",
"heads",
",",
"variables",
",",
"head_grads",
"=",
"None",
",",
"retain_graph",
"=",
"None",
",",
"create_graph",
"=",
"False",
",",
"train_mode",
"=",
"True",
")",
":",
"#pylint: disable=redefined-outer-name",
"head_handles",
",",
"hgrad_handles",
"=",
"_parse_head",
"(",
"heads",
",",
"head_grads",
")",
"if",
"isinstance",
"(",
"variables",
",",
"NDArray",
")",
":",
"variables",
"=",
"[",
"variables",
"]",
"else",
":",
"assert",
"len",
"(",
"variables",
")",
",",
"\"variables cannot be an empty list.\"",
"var_handles",
"=",
"c_handle_array",
"(",
"variables",
")",
"retain_graph",
"=",
"retain_graph",
"if",
"retain_graph",
"is",
"not",
"None",
"else",
"create_graph",
"grad_vars",
"=",
"ctypes",
".",
"POINTER",
"(",
"NDArrayHandle",
")",
"(",
")",
"grad_stypes",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_int",
")",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXAutogradBackwardEx",
"(",
"len",
"(",
"head_handles",
")",
",",
"head_handles",
",",
"hgrad_handles",
",",
"len",
"(",
"var_handles",
")",
",",
"var_handles",
",",
"ctypes",
".",
"c_int",
"(",
"retain_graph",
")",
",",
"ctypes",
".",
"c_int",
"(",
"create_graph",
")",
",",
"ctypes",
".",
"c_int",
"(",
"train_mode",
")",
",",
"ctypes",
".",
"byref",
"(",
"grad_vars",
")",
",",
"ctypes",
".",
"byref",
"(",
"grad_stypes",
")",
")",
")",
"ret",
"=",
"[",
"_ndarray_cls",
"(",
"ctypes",
".",
"cast",
"(",
"grad_vars",
"[",
"i",
"]",
",",
"NDArrayHandle",
")",
",",
"stype",
"=",
"grad_stypes",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"var_handles",
")",
")",
"]",
"if",
"isinstance",
"(",
"variables",
",",
"NDArray",
")",
":",
"return",
"ret",
"[",
"0",
"]",
"return",
"ret"
] |
Compute the gradients of heads w.r.t variables. Gradients will be
returned as new NDArrays instead of stored into `variable.grad`.
Supports recording gradient graph for computing higher order gradients.
.. note::
Currently only a very limited set of operators support higher order \
gradients.
Parameters
----------
heads: NDArray or list of NDArray
Output NDArray(s)
variables: NDArray or list of NDArray
Input variables to compute gradients for.
head_grads: NDArray or list of NDArray or None
Gradients with respect to heads.
retain_graph: bool
Whether to keep computation graph to differentiate again, instead
of clearing history and release memory. Defaults to the same value
as create_graph.
create_graph: bool
Whether to record gradient graph for computing higher order
train_mode: bool, optional
Whether to do backward for training or prediction.
Returns
-------
NDArray or list of NDArray:
Gradients with respect to variables.
Examples
--------
>>> x = mx.nd.ones((1,))
>>> x.attach_grad()
>>> with mx.autograd.record():
... z = mx.nd.elemwise_add(mx.nd.exp(x), x)
>>> dx = mx.autograd.grad(z, [x], create_graph=True)
>>> print(dx)
[
[ 3.71828175]
<NDArray 1 @cpu(0)>]
|
[
"Compute",
"the",
"gradients",
"of",
"heads",
"w",
".",
"r",
".",
"t",
"variables",
".",
"Gradients",
"will",
"be",
"returned",
"as",
"new",
"NDArrays",
"instead",
"of",
"stored",
"into",
"variable",
".",
"grad",
".",
"Supports",
"recording",
"gradient",
"graph",
"for",
"computing",
"higher",
"order",
"gradients",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/autograd.py#L270-L344
|
train
|
apache/incubator-mxnet
|
python/mxnet/autograd.py
|
get_symbol
|
def get_symbol(x):
"""Retrieve recorded computation history as `Symbol`.
Parameters
----------
x : NDArray
Array representing the head of computation graph.
Returns
-------
Symbol
The retrieved Symbol.
"""
hdl = SymbolHandle()
check_call(_LIB.MXAutogradGetSymbol(x.handle, ctypes.byref(hdl)))
return Symbol(hdl)
|
python
|
def get_symbol(x):
"""Retrieve recorded computation history as `Symbol`.
Parameters
----------
x : NDArray
Array representing the head of computation graph.
Returns
-------
Symbol
The retrieved Symbol.
"""
hdl = SymbolHandle()
check_call(_LIB.MXAutogradGetSymbol(x.handle, ctypes.byref(hdl)))
return Symbol(hdl)
|
[
"def",
"get_symbol",
"(",
"x",
")",
":",
"hdl",
"=",
"SymbolHandle",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXAutogradGetSymbol",
"(",
"x",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"hdl",
")",
")",
")",
"return",
"Symbol",
"(",
"hdl",
")"
] |
Retrieve recorded computation history as `Symbol`.
Parameters
----------
x : NDArray
Array representing the head of computation graph.
Returns
-------
Symbol
The retrieved Symbol.
|
[
"Retrieve",
"recorded",
"computation",
"history",
"as",
"Symbol",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/autograd.py#L347-L362
|
train
|
apache/incubator-mxnet
|
example/recommenders/movielens_data.py
|
load_mldataset
|
def load_mldataset(filename):
"""Not particularly fast code to parse the text file and load it into three NDArray's
and product an NDArrayIter
"""
user = []
item = []
score = []
with open(filename) as f:
for line in f:
tks = line.strip().split('\t')
if len(tks) != 4:
continue
user.append(int(tks[0]))
item.append(int(tks[1]))
score.append(float(tks[2]))
user = mx.nd.array(user)
item = mx.nd.array(item)
score = mx.nd.array(score)
return gluon.data.ArrayDataset(user, item, score)
|
python
|
def load_mldataset(filename):
"""Not particularly fast code to parse the text file and load it into three NDArray's
and product an NDArrayIter
"""
user = []
item = []
score = []
with open(filename) as f:
for line in f:
tks = line.strip().split('\t')
if len(tks) != 4:
continue
user.append(int(tks[0]))
item.append(int(tks[1]))
score.append(float(tks[2]))
user = mx.nd.array(user)
item = mx.nd.array(item)
score = mx.nd.array(score)
return gluon.data.ArrayDataset(user, item, score)
|
[
"def",
"load_mldataset",
"(",
"filename",
")",
":",
"user",
"=",
"[",
"]",
"item",
"=",
"[",
"]",
"score",
"=",
"[",
"]",
"with",
"open",
"(",
"filename",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"tks",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"if",
"len",
"(",
"tks",
")",
"!=",
"4",
":",
"continue",
"user",
".",
"append",
"(",
"int",
"(",
"tks",
"[",
"0",
"]",
")",
")",
"item",
".",
"append",
"(",
"int",
"(",
"tks",
"[",
"1",
"]",
")",
")",
"score",
".",
"append",
"(",
"float",
"(",
"tks",
"[",
"2",
"]",
")",
")",
"user",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"user",
")",
"item",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"item",
")",
"score",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"score",
")",
"return",
"gluon",
".",
"data",
".",
"ArrayDataset",
"(",
"user",
",",
"item",
",",
"score",
")"
] |
Not particularly fast code to parse the text file and load it into three NDArray's
and product an NDArrayIter
|
[
"Not",
"particularly",
"fast",
"code",
"to",
"parse",
"the",
"text",
"file",
"and",
"load",
"it",
"into",
"three",
"NDArray",
"s",
"and",
"product",
"an",
"NDArrayIter"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/recommenders/movielens_data.py#L25-L43
|
train
|
apache/incubator-mxnet
|
cpp-package/scripts/OpWrapperGenerator.py
|
ParseAllOps
|
def ParseAllOps():
"""
MXNET_DLL int MXSymbolListAtomicSymbolCreators(mx_uint *out_size,
AtomicSymbolCreator **out_array);
MXNET_DLL int MXSymbolGetAtomicSymbolInfo(AtomicSymbolCreator creator,
const char **name,
const char **description,
mx_uint *num_args,
const char ***arg_names,
const char ***arg_type_infos,
const char ***arg_descriptions,
const char **key_var_num_args);
"""
cdll.libmxnet = cdll.LoadLibrary(sys.argv[1])
ListOP = cdll.libmxnet.MXSymbolListAtomicSymbolCreators
GetOpInfo = cdll.libmxnet.MXSymbolGetAtomicSymbolInfo
ListOP.argtypes=[POINTER(c_int), POINTER(POINTER(c_void_p))]
GetOpInfo.argtypes=[c_void_p, \
POINTER(c_char_p), \
POINTER(c_char_p), \
POINTER(c_int), \
POINTER(POINTER(c_char_p)), \
POINTER(POINTER(c_char_p)), \
POINTER(POINTER(c_char_p)), \
POINTER(c_char_p), \
POINTER(c_char_p)
]
nOps = c_int()
opHandlers = POINTER(c_void_p)()
r = ListOP(byref(nOps), byref(opHandlers))
ret = ''
ret2 = ''
for i in range(0, nOps.value):
handler = opHandlers[i]
name = c_char_p()
description = c_char_p()
nArgs = c_int()
argNames = POINTER(c_char_p)()
argTypes = POINTER(c_char_p)()
argDescs = POINTER(c_char_p)()
varArgName = c_char_p()
return_type = c_char_p()
GetOpInfo(handler, byref(name), byref(description), \
byref(nArgs), byref(argNames), byref(argTypes), \
byref(argDescs), byref(varArgName), byref(return_type))
if name.value.decode('utf-8').startswith('_'): # get rid of functions like __init__
continue
args = []
for i in range(0, nArgs.value):
arg = Arg(name.value.decode('utf-8'),
argNames[i].decode('utf-8'),
argTypes[i].decode('utf-8'),
argDescs[i].decode('utf-8'))
args.append(arg)
op = Op(name.value.decode('utf-8'), description.value.decode('utf-8'), args)
ret = ret + op.GetOpDefinitionString(True) + "\n"
ret2 = ret2 + op.GetOpDefinitionString(False) + "\n"
return ret + ret2
|
python
|
def ParseAllOps():
"""
MXNET_DLL int MXSymbolListAtomicSymbolCreators(mx_uint *out_size,
AtomicSymbolCreator **out_array);
MXNET_DLL int MXSymbolGetAtomicSymbolInfo(AtomicSymbolCreator creator,
const char **name,
const char **description,
mx_uint *num_args,
const char ***arg_names,
const char ***arg_type_infos,
const char ***arg_descriptions,
const char **key_var_num_args);
"""
cdll.libmxnet = cdll.LoadLibrary(sys.argv[1])
ListOP = cdll.libmxnet.MXSymbolListAtomicSymbolCreators
GetOpInfo = cdll.libmxnet.MXSymbolGetAtomicSymbolInfo
ListOP.argtypes=[POINTER(c_int), POINTER(POINTER(c_void_p))]
GetOpInfo.argtypes=[c_void_p, \
POINTER(c_char_p), \
POINTER(c_char_p), \
POINTER(c_int), \
POINTER(POINTER(c_char_p)), \
POINTER(POINTER(c_char_p)), \
POINTER(POINTER(c_char_p)), \
POINTER(c_char_p), \
POINTER(c_char_p)
]
nOps = c_int()
opHandlers = POINTER(c_void_p)()
r = ListOP(byref(nOps), byref(opHandlers))
ret = ''
ret2 = ''
for i in range(0, nOps.value):
handler = opHandlers[i]
name = c_char_p()
description = c_char_p()
nArgs = c_int()
argNames = POINTER(c_char_p)()
argTypes = POINTER(c_char_p)()
argDescs = POINTER(c_char_p)()
varArgName = c_char_p()
return_type = c_char_p()
GetOpInfo(handler, byref(name), byref(description), \
byref(nArgs), byref(argNames), byref(argTypes), \
byref(argDescs), byref(varArgName), byref(return_type))
if name.value.decode('utf-8').startswith('_'): # get rid of functions like __init__
continue
args = []
for i in range(0, nArgs.value):
arg = Arg(name.value.decode('utf-8'),
argNames[i].decode('utf-8'),
argTypes[i].decode('utf-8'),
argDescs[i].decode('utf-8'))
args.append(arg)
op = Op(name.value.decode('utf-8'), description.value.decode('utf-8'), args)
ret = ret + op.GetOpDefinitionString(True) + "\n"
ret2 = ret2 + op.GetOpDefinitionString(False) + "\n"
return ret + ret2
|
[
"def",
"ParseAllOps",
"(",
")",
":",
"cdll",
".",
"libmxnet",
"=",
"cdll",
".",
"LoadLibrary",
"(",
"sys",
".",
"argv",
"[",
"1",
"]",
")",
"ListOP",
"=",
"cdll",
".",
"libmxnet",
".",
"MXSymbolListAtomicSymbolCreators",
"GetOpInfo",
"=",
"cdll",
".",
"libmxnet",
".",
"MXSymbolGetAtomicSymbolInfo",
"ListOP",
".",
"argtypes",
"=",
"[",
"POINTER",
"(",
"c_int",
")",
",",
"POINTER",
"(",
"POINTER",
"(",
"c_void_p",
")",
")",
"]",
"GetOpInfo",
".",
"argtypes",
"=",
"[",
"c_void_p",
",",
"POINTER",
"(",
"c_char_p",
")",
",",
"POINTER",
"(",
"c_char_p",
")",
",",
"POINTER",
"(",
"c_int",
")",
",",
"POINTER",
"(",
"POINTER",
"(",
"c_char_p",
")",
")",
",",
"POINTER",
"(",
"POINTER",
"(",
"c_char_p",
")",
")",
",",
"POINTER",
"(",
"POINTER",
"(",
"c_char_p",
")",
")",
",",
"POINTER",
"(",
"c_char_p",
")",
",",
"POINTER",
"(",
"c_char_p",
")",
"]",
"nOps",
"=",
"c_int",
"(",
")",
"opHandlers",
"=",
"POINTER",
"(",
"c_void_p",
")",
"(",
")",
"r",
"=",
"ListOP",
"(",
"byref",
"(",
"nOps",
")",
",",
"byref",
"(",
"opHandlers",
")",
")",
"ret",
"=",
"''",
"ret2",
"=",
"''",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"nOps",
".",
"value",
")",
":",
"handler",
"=",
"opHandlers",
"[",
"i",
"]",
"name",
"=",
"c_char_p",
"(",
")",
"description",
"=",
"c_char_p",
"(",
")",
"nArgs",
"=",
"c_int",
"(",
")",
"argNames",
"=",
"POINTER",
"(",
"c_char_p",
")",
"(",
")",
"argTypes",
"=",
"POINTER",
"(",
"c_char_p",
")",
"(",
")",
"argDescs",
"=",
"POINTER",
"(",
"c_char_p",
")",
"(",
")",
"varArgName",
"=",
"c_char_p",
"(",
")",
"return_type",
"=",
"c_char_p",
"(",
")",
"GetOpInfo",
"(",
"handler",
",",
"byref",
"(",
"name",
")",
",",
"byref",
"(",
"description",
")",
",",
"byref",
"(",
"nArgs",
")",
",",
"byref",
"(",
"argNames",
")",
",",
"byref",
"(",
"argTypes",
")",
",",
"byref",
"(",
"argDescs",
")",
",",
"byref",
"(",
"varArgName",
")",
",",
"byref",
"(",
"return_type",
")",
")",
"if",
"name",
".",
"value",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"startswith",
"(",
"'_'",
")",
":",
"# get rid of functions like __init__",
"continue",
"args",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"nArgs",
".",
"value",
")",
":",
"arg",
"=",
"Arg",
"(",
"name",
".",
"value",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"argNames",
"[",
"i",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"argTypes",
"[",
"i",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"argDescs",
"[",
"i",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"args",
".",
"append",
"(",
"arg",
")",
"op",
"=",
"Op",
"(",
"name",
".",
"value",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"description",
".",
"value",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"args",
")",
"ret",
"=",
"ret",
"+",
"op",
".",
"GetOpDefinitionString",
"(",
"True",
")",
"+",
"\"\\n\"",
"ret2",
"=",
"ret2",
"+",
"op",
".",
"GetOpDefinitionString",
"(",
"False",
")",
"+",
"\"\\n\"",
"return",
"ret",
"+",
"ret2"
] |
MXNET_DLL int MXSymbolListAtomicSymbolCreators(mx_uint *out_size,
AtomicSymbolCreator **out_array);
MXNET_DLL int MXSymbolGetAtomicSymbolInfo(AtomicSymbolCreator creator,
const char **name,
const char **description,
mx_uint *num_args,
const char ***arg_names,
const char ***arg_type_infos,
const char ***arg_descriptions,
const char **key_var_num_args);
|
[
"MXNET_DLL",
"int",
"MXSymbolListAtomicSymbolCreators",
"(",
"mx_uint",
"*",
"out_size",
"AtomicSymbolCreator",
"**",
"out_array",
")",
";"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/cpp-package/scripts/OpWrapperGenerator.py#L306-L371
|
train
|
apache/incubator-mxnet
|
tools/caffe_translator/scripts/convert_caffe_model.py
|
main
|
def main():
"""Read .caffemodel path and .params path as input from command line
and use CaffeModelConverter to do the conversion"""
parser = argparse.ArgumentParser(description='.caffemodel to MXNet .params converter.')
parser.add_argument('caffemodel', help='Path to the .caffemodel file to convert.')
parser.add_argument('output_file_name', help='Name of the output .params file.')
args = parser.parse_args()
converter = CaffeModelConverter()
converter.convert(args.caffemodel, args.output_file_name)
|
python
|
def main():
"""Read .caffemodel path and .params path as input from command line
and use CaffeModelConverter to do the conversion"""
parser = argparse.ArgumentParser(description='.caffemodel to MXNet .params converter.')
parser.add_argument('caffemodel', help='Path to the .caffemodel file to convert.')
parser.add_argument('output_file_name', help='Name of the output .params file.')
args = parser.parse_args()
converter = CaffeModelConverter()
converter.convert(args.caffemodel, args.output_file_name)
|
[
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'.caffemodel to MXNet .params converter.'",
")",
"parser",
".",
"add_argument",
"(",
"'caffemodel'",
",",
"help",
"=",
"'Path to the .caffemodel file to convert.'",
")",
"parser",
".",
"add_argument",
"(",
"'output_file_name'",
",",
"help",
"=",
"'Name of the output .params file.'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"converter",
"=",
"CaffeModelConverter",
"(",
")",
"converter",
".",
"convert",
"(",
"args",
".",
"caffemodel",
",",
"args",
".",
"output_file_name",
")"
] |
Read .caffemodel path and .params path as input from command line
and use CaffeModelConverter to do the conversion
|
[
"Read",
".",
"caffemodel",
"path",
"and",
".",
"params",
"path",
"as",
"input",
"from",
"command",
"line",
"and",
"use",
"CaffeModelConverter",
"to",
"do",
"the",
"conversion"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_translator/scripts/convert_caffe_model.py#L108-L118
|
train
|
apache/incubator-mxnet
|
tools/caffe_translator/scripts/convert_caffe_model.py
|
CaffeModelConverter.add_param
|
def add_param(self, param_name, layer_index, blob_index):
"""Add a param to the .params file"""
blobs = self.layers[layer_index].blobs
self.dict_param[param_name] = mx.nd.array(caffe.io.blobproto_to_array(blobs[blob_index]))
|
python
|
def add_param(self, param_name, layer_index, blob_index):
"""Add a param to the .params file"""
blobs = self.layers[layer_index].blobs
self.dict_param[param_name] = mx.nd.array(caffe.io.blobproto_to_array(blobs[blob_index]))
|
[
"def",
"add_param",
"(",
"self",
",",
"param_name",
",",
"layer_index",
",",
"blob_index",
")",
":",
"blobs",
"=",
"self",
".",
"layers",
"[",
"layer_index",
"]",
".",
"blobs",
"self",
".",
"dict_param",
"[",
"param_name",
"]",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"caffe",
".",
"io",
".",
"blobproto_to_array",
"(",
"blobs",
"[",
"blob_index",
"]",
")",
")"
] |
Add a param to the .params file
|
[
"Add",
"a",
"param",
"to",
"the",
".",
"params",
"file"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_translator/scripts/convert_caffe_model.py#L33-L36
|
train
|
apache/incubator-mxnet
|
tools/caffe_translator/scripts/convert_caffe_model.py
|
CaffeModelConverter.add_arg_param
|
def add_arg_param(self, param_name, layer_index, blob_index):
"""Add an arg param to .params file. Example: weights of a fully connected layer."""
self.add_param('arg:%s' % param_name, layer_index, blob_index)
|
python
|
def add_arg_param(self, param_name, layer_index, blob_index):
"""Add an arg param to .params file. Example: weights of a fully connected layer."""
self.add_param('arg:%s' % param_name, layer_index, blob_index)
|
[
"def",
"add_arg_param",
"(",
"self",
",",
"param_name",
",",
"layer_index",
",",
"blob_index",
")",
":",
"self",
".",
"add_param",
"(",
"'arg:%s'",
"%",
"param_name",
",",
"layer_index",
",",
"blob_index",
")"
] |
Add an arg param to .params file. Example: weights of a fully connected layer.
|
[
"Add",
"an",
"arg",
"param",
"to",
".",
"params",
"file",
".",
"Example",
":",
"weights",
"of",
"a",
"fully",
"connected",
"layer",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_translator/scripts/convert_caffe_model.py#L38-L40
|
train
|
apache/incubator-mxnet
|
tools/caffe_translator/scripts/convert_caffe_model.py
|
CaffeModelConverter.add_aux_param
|
def add_aux_param(self, param_name, layer_index, blob_index):
"""Add an aux param to .params file. Example: moving_mean in BatchNorm layer """
self.add_param('aux:%s' % param_name, layer_index, blob_index)
|
python
|
def add_aux_param(self, param_name, layer_index, blob_index):
"""Add an aux param to .params file. Example: moving_mean in BatchNorm layer """
self.add_param('aux:%s' % param_name, layer_index, blob_index)
|
[
"def",
"add_aux_param",
"(",
"self",
",",
"param_name",
",",
"layer_index",
",",
"blob_index",
")",
":",
"self",
".",
"add_param",
"(",
"'aux:%s'",
"%",
"param_name",
",",
"layer_index",
",",
"blob_index",
")"
] |
Add an aux param to .params file. Example: moving_mean in BatchNorm layer
|
[
"Add",
"an",
"aux",
"param",
"to",
".",
"params",
"file",
".",
"Example",
":",
"moving_mean",
"in",
"BatchNorm",
"layer"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_translator/scripts/convert_caffe_model.py#L42-L44
|
train
|
apache/incubator-mxnet
|
tools/caffe_translator/scripts/convert_caffe_model.py
|
CaffeModelConverter.add_optional_arg_param
|
def add_optional_arg_param(self, param_name, layer_index, blob_index):
"""Add an arg param. If there is no such param in .caffemodel fie, silently ignore it."""
blobs = self.layers[layer_index].blobs
if blob_index < len(blobs):
self.add_arg_param(param_name, layer_index, blob_index)
|
python
|
def add_optional_arg_param(self, param_name, layer_index, blob_index):
"""Add an arg param. If there is no such param in .caffemodel fie, silently ignore it."""
blobs = self.layers[layer_index].blobs
if blob_index < len(blobs):
self.add_arg_param(param_name, layer_index, blob_index)
|
[
"def",
"add_optional_arg_param",
"(",
"self",
",",
"param_name",
",",
"layer_index",
",",
"blob_index",
")",
":",
"blobs",
"=",
"self",
".",
"layers",
"[",
"layer_index",
"]",
".",
"blobs",
"if",
"blob_index",
"<",
"len",
"(",
"blobs",
")",
":",
"self",
".",
"add_arg_param",
"(",
"param_name",
",",
"layer_index",
",",
"blob_index",
")"
] |
Add an arg param. If there is no such param in .caffemodel fie, silently ignore it.
|
[
"Add",
"an",
"arg",
"param",
".",
"If",
"there",
"is",
"no",
"such",
"param",
"in",
".",
"caffemodel",
"fie",
"silently",
"ignore",
"it",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_translator/scripts/convert_caffe_model.py#L46-L50
|
train
|
apache/incubator-mxnet
|
tools/caffe_translator/scripts/convert_caffe_model.py
|
CaffeModelConverter.convert
|
def convert(self, caffemodel_path, outmodel_path):
"""Convert a Caffe .caffemodel file to MXNet .params file"""
net_param = caffe_pb2.NetParameter()
with open(caffemodel_path, 'rb') as caffe_model_file:
net_param.ParseFromString(caffe_model_file.read())
layers = net_param.layer
self.layers = layers
for idx, layer in enumerate(layers):
layer_name = str(layer.name)
if layer.blobs:
# If this is a layer that has only weight and bias as parameter
if layer.type == 'Convolution' or layer.type == 'InnerProduct' \
or layer.type == 'Deconvolution':
# Add weight and bias to the dictionary
self.add_arg_param('%s_weight' % layer_name, layer_index=idx, blob_index=0)
self.add_optional_arg_param('%s_bias' % layer_name, layer_index=idx,
blob_index=1)
elif layer.type == 'BatchNorm':
gamma_param_name = '%s_gamma' % layer_name
beta_param_name = '%s_beta' % layer_name
next_layer = layers[idx + 1]
if next_layer.type == 'Scale':
# If next layer is scale layer, get gamma and beta from there
self.add_arg_param(gamma_param_name, layer_index=idx+1, blob_index=0)
self.add_arg_param(beta_param_name, layer_index=idx+1, blob_index=1)
mean_param_name = '%s_moving_mean' % layer_name
var_param_name = '%s_moving_var' % layer_name
self.add_aux_param(mean_param_name, layer_index=idx, blob_index=0)
self.add_aux_param(var_param_name, layer_index=idx, blob_index=1)
elif layer.type == 'Scale':
prev_layer = layers[idx - 1]
if prev_layer.type == 'BatchNorm':
continue
else:
# Use the naming convention used by CaffeOp
self.add_arg_param('%s_0_weight' % layer_name, layer_index=idx,
blob_index=0)
self.add_optional_arg_param('%s_1_bias' % layer_name,
layer_index=idx, blob_index=1)
mx.nd.save(outmodel_path, self.dict_param)
|
python
|
def convert(self, caffemodel_path, outmodel_path):
"""Convert a Caffe .caffemodel file to MXNet .params file"""
net_param = caffe_pb2.NetParameter()
with open(caffemodel_path, 'rb') as caffe_model_file:
net_param.ParseFromString(caffe_model_file.read())
layers = net_param.layer
self.layers = layers
for idx, layer in enumerate(layers):
layer_name = str(layer.name)
if layer.blobs:
# If this is a layer that has only weight and bias as parameter
if layer.type == 'Convolution' or layer.type == 'InnerProduct' \
or layer.type == 'Deconvolution':
# Add weight and bias to the dictionary
self.add_arg_param('%s_weight' % layer_name, layer_index=idx, blob_index=0)
self.add_optional_arg_param('%s_bias' % layer_name, layer_index=idx,
blob_index=1)
elif layer.type == 'BatchNorm':
gamma_param_name = '%s_gamma' % layer_name
beta_param_name = '%s_beta' % layer_name
next_layer = layers[idx + 1]
if next_layer.type == 'Scale':
# If next layer is scale layer, get gamma and beta from there
self.add_arg_param(gamma_param_name, layer_index=idx+1, blob_index=0)
self.add_arg_param(beta_param_name, layer_index=idx+1, blob_index=1)
mean_param_name = '%s_moving_mean' % layer_name
var_param_name = '%s_moving_var' % layer_name
self.add_aux_param(mean_param_name, layer_index=idx, blob_index=0)
self.add_aux_param(var_param_name, layer_index=idx, blob_index=1)
elif layer.type == 'Scale':
prev_layer = layers[idx - 1]
if prev_layer.type == 'BatchNorm':
continue
else:
# Use the naming convention used by CaffeOp
self.add_arg_param('%s_0_weight' % layer_name, layer_index=idx,
blob_index=0)
self.add_optional_arg_param('%s_1_bias' % layer_name,
layer_index=idx, blob_index=1)
mx.nd.save(outmodel_path, self.dict_param)
|
[
"def",
"convert",
"(",
"self",
",",
"caffemodel_path",
",",
"outmodel_path",
")",
":",
"net_param",
"=",
"caffe_pb2",
".",
"NetParameter",
"(",
")",
"with",
"open",
"(",
"caffemodel_path",
",",
"'rb'",
")",
"as",
"caffe_model_file",
":",
"net_param",
".",
"ParseFromString",
"(",
"caffe_model_file",
".",
"read",
"(",
")",
")",
"layers",
"=",
"net_param",
".",
"layer",
"self",
".",
"layers",
"=",
"layers",
"for",
"idx",
",",
"layer",
"in",
"enumerate",
"(",
"layers",
")",
":",
"layer_name",
"=",
"str",
"(",
"layer",
".",
"name",
")",
"if",
"layer",
".",
"blobs",
":",
"# If this is a layer that has only weight and bias as parameter",
"if",
"layer",
".",
"type",
"==",
"'Convolution'",
"or",
"layer",
".",
"type",
"==",
"'InnerProduct'",
"or",
"layer",
".",
"type",
"==",
"'Deconvolution'",
":",
"# Add weight and bias to the dictionary",
"self",
".",
"add_arg_param",
"(",
"'%s_weight'",
"%",
"layer_name",
",",
"layer_index",
"=",
"idx",
",",
"blob_index",
"=",
"0",
")",
"self",
".",
"add_optional_arg_param",
"(",
"'%s_bias'",
"%",
"layer_name",
",",
"layer_index",
"=",
"idx",
",",
"blob_index",
"=",
"1",
")",
"elif",
"layer",
".",
"type",
"==",
"'BatchNorm'",
":",
"gamma_param_name",
"=",
"'%s_gamma'",
"%",
"layer_name",
"beta_param_name",
"=",
"'%s_beta'",
"%",
"layer_name",
"next_layer",
"=",
"layers",
"[",
"idx",
"+",
"1",
"]",
"if",
"next_layer",
".",
"type",
"==",
"'Scale'",
":",
"# If next layer is scale layer, get gamma and beta from there",
"self",
".",
"add_arg_param",
"(",
"gamma_param_name",
",",
"layer_index",
"=",
"idx",
"+",
"1",
",",
"blob_index",
"=",
"0",
")",
"self",
".",
"add_arg_param",
"(",
"beta_param_name",
",",
"layer_index",
"=",
"idx",
"+",
"1",
",",
"blob_index",
"=",
"1",
")",
"mean_param_name",
"=",
"'%s_moving_mean'",
"%",
"layer_name",
"var_param_name",
"=",
"'%s_moving_var'",
"%",
"layer_name",
"self",
".",
"add_aux_param",
"(",
"mean_param_name",
",",
"layer_index",
"=",
"idx",
",",
"blob_index",
"=",
"0",
")",
"self",
".",
"add_aux_param",
"(",
"var_param_name",
",",
"layer_index",
"=",
"idx",
",",
"blob_index",
"=",
"1",
")",
"elif",
"layer",
".",
"type",
"==",
"'Scale'",
":",
"prev_layer",
"=",
"layers",
"[",
"idx",
"-",
"1",
"]",
"if",
"prev_layer",
".",
"type",
"==",
"'BatchNorm'",
":",
"continue",
"else",
":",
"# Use the naming convention used by CaffeOp",
"self",
".",
"add_arg_param",
"(",
"'%s_0_weight'",
"%",
"layer_name",
",",
"layer_index",
"=",
"idx",
",",
"blob_index",
"=",
"0",
")",
"self",
".",
"add_optional_arg_param",
"(",
"'%s_1_bias'",
"%",
"layer_name",
",",
"layer_index",
"=",
"idx",
",",
"blob_index",
"=",
"1",
")",
"mx",
".",
"nd",
".",
"save",
"(",
"outmodel_path",
",",
"self",
".",
"dict_param",
")"
] |
Convert a Caffe .caffemodel file to MXNet .params file
|
[
"Convert",
"a",
"Caffe",
".",
"caffemodel",
"file",
"to",
"MXNet",
".",
"params",
"file"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_translator/scripts/convert_caffe_model.py#L52-L106
|
train
|
apache/incubator-mxnet
|
example/rcnn/symnet/proposal_target.py
|
sample_rois
|
def sample_rois(rois, gt_boxes, num_classes, rois_per_image, fg_rois_per_image, fg_overlap, box_stds):
"""
generate random sample of ROIs comprising foreground and background examples
:param rois: [n, 5] (batch_index, x1, y1, x2, y2)
:param gt_boxes: [n, 5] (x1, y1, x2, y2, cls)
:param num_classes: number of classes
:param rois_per_image: total roi number
:param fg_rois_per_image: foreground roi number
:param fg_overlap: overlap threshold for fg rois
:param box_stds: std var of bbox reg
:return: (rois, labels, bbox_targets, bbox_weights)
"""
overlaps = bbox_overlaps(rois[:, 1:], gt_boxes[:, :4])
gt_assignment = overlaps.argmax(axis=1)
labels = gt_boxes[gt_assignment, 4]
max_overlaps = overlaps.max(axis=1)
# select foreground RoI with FG_THRESH overlap
fg_indexes = np.where(max_overlaps >= fg_overlap)[0]
# guard against the case when an image has fewer than fg_rois_per_image foreground RoIs
fg_rois_this_image = min(fg_rois_per_image, len(fg_indexes))
# sample foreground regions without replacement
if len(fg_indexes) > fg_rois_this_image:
fg_indexes = np.random.choice(fg_indexes, size=fg_rois_this_image, replace=False)
# select background RoIs as those within [0, FG_THRESH)
bg_indexes = np.where(max_overlaps < fg_overlap)[0]
# compute number of background RoIs to take from this image (guarding against there being fewer than desired)
bg_rois_this_image = rois_per_image - fg_rois_this_image
bg_rois_this_image = min(bg_rois_this_image, len(bg_indexes))
# sample bg rois without replacement
if len(bg_indexes) > bg_rois_this_image:
bg_indexes = np.random.choice(bg_indexes, size=bg_rois_this_image, replace=False)
# indexes selected
keep_indexes = np.append(fg_indexes, bg_indexes)
# pad more bg rois to ensure a fixed minibatch size
while len(keep_indexes) < rois_per_image:
gap = min(len(bg_indexes), rois_per_image - len(keep_indexes))
gap_indexes = np.random.choice(range(len(bg_indexes)), size=gap, replace=False)
keep_indexes = np.append(keep_indexes, bg_indexes[gap_indexes])
# sample rois and labels
rois = rois[keep_indexes]
labels = labels[keep_indexes]
# set labels of bg rois to be 0
labels[fg_rois_this_image:] = 0
# load or compute bbox_target
targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], :4], box_stds=box_stds)
bbox_targets = np.zeros((rois_per_image, 4 * num_classes), dtype=np.float32)
bbox_weights = np.zeros((rois_per_image, 4 * num_classes), dtype=np.float32)
for i in range(fg_rois_this_image):
cls_ind = int(labels[i])
bbox_targets[i, cls_ind * 4:(cls_ind + 1) * 4] = targets[i]
bbox_weights[i, cls_ind * 4:(cls_ind + 1) * 4] = 1
return rois, labels, bbox_targets, bbox_weights
|
python
|
def sample_rois(rois, gt_boxes, num_classes, rois_per_image, fg_rois_per_image, fg_overlap, box_stds):
"""
generate random sample of ROIs comprising foreground and background examples
:param rois: [n, 5] (batch_index, x1, y1, x2, y2)
:param gt_boxes: [n, 5] (x1, y1, x2, y2, cls)
:param num_classes: number of classes
:param rois_per_image: total roi number
:param fg_rois_per_image: foreground roi number
:param fg_overlap: overlap threshold for fg rois
:param box_stds: std var of bbox reg
:return: (rois, labels, bbox_targets, bbox_weights)
"""
overlaps = bbox_overlaps(rois[:, 1:], gt_boxes[:, :4])
gt_assignment = overlaps.argmax(axis=1)
labels = gt_boxes[gt_assignment, 4]
max_overlaps = overlaps.max(axis=1)
# select foreground RoI with FG_THRESH overlap
fg_indexes = np.where(max_overlaps >= fg_overlap)[0]
# guard against the case when an image has fewer than fg_rois_per_image foreground RoIs
fg_rois_this_image = min(fg_rois_per_image, len(fg_indexes))
# sample foreground regions without replacement
if len(fg_indexes) > fg_rois_this_image:
fg_indexes = np.random.choice(fg_indexes, size=fg_rois_this_image, replace=False)
# select background RoIs as those within [0, FG_THRESH)
bg_indexes = np.where(max_overlaps < fg_overlap)[0]
# compute number of background RoIs to take from this image (guarding against there being fewer than desired)
bg_rois_this_image = rois_per_image - fg_rois_this_image
bg_rois_this_image = min(bg_rois_this_image, len(bg_indexes))
# sample bg rois without replacement
if len(bg_indexes) > bg_rois_this_image:
bg_indexes = np.random.choice(bg_indexes, size=bg_rois_this_image, replace=False)
# indexes selected
keep_indexes = np.append(fg_indexes, bg_indexes)
# pad more bg rois to ensure a fixed minibatch size
while len(keep_indexes) < rois_per_image:
gap = min(len(bg_indexes), rois_per_image - len(keep_indexes))
gap_indexes = np.random.choice(range(len(bg_indexes)), size=gap, replace=False)
keep_indexes = np.append(keep_indexes, bg_indexes[gap_indexes])
# sample rois and labels
rois = rois[keep_indexes]
labels = labels[keep_indexes]
# set labels of bg rois to be 0
labels[fg_rois_this_image:] = 0
# load or compute bbox_target
targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], :4], box_stds=box_stds)
bbox_targets = np.zeros((rois_per_image, 4 * num_classes), dtype=np.float32)
bbox_weights = np.zeros((rois_per_image, 4 * num_classes), dtype=np.float32)
for i in range(fg_rois_this_image):
cls_ind = int(labels[i])
bbox_targets[i, cls_ind * 4:(cls_ind + 1) * 4] = targets[i]
bbox_weights[i, cls_ind * 4:(cls_ind + 1) * 4] = 1
return rois, labels, bbox_targets, bbox_weights
|
[
"def",
"sample_rois",
"(",
"rois",
",",
"gt_boxes",
",",
"num_classes",
",",
"rois_per_image",
",",
"fg_rois_per_image",
",",
"fg_overlap",
",",
"box_stds",
")",
":",
"overlaps",
"=",
"bbox_overlaps",
"(",
"rois",
"[",
":",
",",
"1",
":",
"]",
",",
"gt_boxes",
"[",
":",
",",
":",
"4",
"]",
")",
"gt_assignment",
"=",
"overlaps",
".",
"argmax",
"(",
"axis",
"=",
"1",
")",
"labels",
"=",
"gt_boxes",
"[",
"gt_assignment",
",",
"4",
"]",
"max_overlaps",
"=",
"overlaps",
".",
"max",
"(",
"axis",
"=",
"1",
")",
"# select foreground RoI with FG_THRESH overlap",
"fg_indexes",
"=",
"np",
".",
"where",
"(",
"max_overlaps",
">=",
"fg_overlap",
")",
"[",
"0",
"]",
"# guard against the case when an image has fewer than fg_rois_per_image foreground RoIs",
"fg_rois_this_image",
"=",
"min",
"(",
"fg_rois_per_image",
",",
"len",
"(",
"fg_indexes",
")",
")",
"# sample foreground regions without replacement",
"if",
"len",
"(",
"fg_indexes",
")",
">",
"fg_rois_this_image",
":",
"fg_indexes",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"fg_indexes",
",",
"size",
"=",
"fg_rois_this_image",
",",
"replace",
"=",
"False",
")",
"# select background RoIs as those within [0, FG_THRESH)",
"bg_indexes",
"=",
"np",
".",
"where",
"(",
"max_overlaps",
"<",
"fg_overlap",
")",
"[",
"0",
"]",
"# compute number of background RoIs to take from this image (guarding against there being fewer than desired)",
"bg_rois_this_image",
"=",
"rois_per_image",
"-",
"fg_rois_this_image",
"bg_rois_this_image",
"=",
"min",
"(",
"bg_rois_this_image",
",",
"len",
"(",
"bg_indexes",
")",
")",
"# sample bg rois without replacement",
"if",
"len",
"(",
"bg_indexes",
")",
">",
"bg_rois_this_image",
":",
"bg_indexes",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"bg_indexes",
",",
"size",
"=",
"bg_rois_this_image",
",",
"replace",
"=",
"False",
")",
"# indexes selected",
"keep_indexes",
"=",
"np",
".",
"append",
"(",
"fg_indexes",
",",
"bg_indexes",
")",
"# pad more bg rois to ensure a fixed minibatch size",
"while",
"len",
"(",
"keep_indexes",
")",
"<",
"rois_per_image",
":",
"gap",
"=",
"min",
"(",
"len",
"(",
"bg_indexes",
")",
",",
"rois_per_image",
"-",
"len",
"(",
"keep_indexes",
")",
")",
"gap_indexes",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"range",
"(",
"len",
"(",
"bg_indexes",
")",
")",
",",
"size",
"=",
"gap",
",",
"replace",
"=",
"False",
")",
"keep_indexes",
"=",
"np",
".",
"append",
"(",
"keep_indexes",
",",
"bg_indexes",
"[",
"gap_indexes",
"]",
")",
"# sample rois and labels",
"rois",
"=",
"rois",
"[",
"keep_indexes",
"]",
"labels",
"=",
"labels",
"[",
"keep_indexes",
"]",
"# set labels of bg rois to be 0",
"labels",
"[",
"fg_rois_this_image",
":",
"]",
"=",
"0",
"# load or compute bbox_target",
"targets",
"=",
"bbox_transform",
"(",
"rois",
"[",
":",
",",
"1",
":",
"]",
",",
"gt_boxes",
"[",
"gt_assignment",
"[",
"keep_indexes",
"]",
",",
":",
"4",
"]",
",",
"box_stds",
"=",
"box_stds",
")",
"bbox_targets",
"=",
"np",
".",
"zeros",
"(",
"(",
"rois_per_image",
",",
"4",
"*",
"num_classes",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"bbox_weights",
"=",
"np",
".",
"zeros",
"(",
"(",
"rois_per_image",
",",
"4",
"*",
"num_classes",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"for",
"i",
"in",
"range",
"(",
"fg_rois_this_image",
")",
":",
"cls_ind",
"=",
"int",
"(",
"labels",
"[",
"i",
"]",
")",
"bbox_targets",
"[",
"i",
",",
"cls_ind",
"*",
"4",
":",
"(",
"cls_ind",
"+",
"1",
")",
"*",
"4",
"]",
"=",
"targets",
"[",
"i",
"]",
"bbox_weights",
"[",
"i",
",",
"cls_ind",
"*",
"4",
":",
"(",
"cls_ind",
"+",
"1",
")",
"*",
"4",
"]",
"=",
"1",
"return",
"rois",
",",
"labels",
",",
"bbox_targets",
",",
"bbox_weights"
] |
generate random sample of ROIs comprising foreground and background examples
:param rois: [n, 5] (batch_index, x1, y1, x2, y2)
:param gt_boxes: [n, 5] (x1, y1, x2, y2, cls)
:param num_classes: number of classes
:param rois_per_image: total roi number
:param fg_rois_per_image: foreground roi number
:param fg_overlap: overlap threshold for fg rois
:param box_stds: std var of bbox reg
:return: (rois, labels, bbox_targets, bbox_weights)
|
[
"generate",
"random",
"sample",
"of",
"ROIs",
"comprising",
"foreground",
"and",
"background",
"examples",
":",
"param",
"rois",
":",
"[",
"n",
"5",
"]",
"(",
"batch_index",
"x1",
"y1",
"x2",
"y2",
")",
":",
"param",
"gt_boxes",
":",
"[",
"n",
"5",
"]",
"(",
"x1",
"y1",
"x2",
"y2",
"cls",
")",
":",
"param",
"num_classes",
":",
"number",
"of",
"classes",
":",
"param",
"rois_per_image",
":",
"total",
"roi",
"number",
":",
"param",
"fg_rois_per_image",
":",
"foreground",
"roi",
"number",
":",
"param",
"fg_overlap",
":",
"overlap",
"threshold",
"for",
"fg",
"rois",
":",
"param",
"box_stds",
":",
"std",
"var",
"of",
"bbox",
"reg",
":",
"return",
":",
"(",
"rois",
"labels",
"bbox_targets",
"bbox_weights",
")"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/rcnn/symnet/proposal_target.py#L28-L85
|
train
|
apache/incubator-mxnet
|
python/mxnet/operator.py
|
register
|
def register(reg_name):
"""Register a subclass of CustomOpProp to the registry with name reg_name."""
def do_register(prop_cls):
"""Register a subclass of CustomOpProp to the registry."""
fb_functype = CFUNCTYPE(c_int, c_int, POINTER(c_void_p), POINTER(c_int),
POINTER(c_int), c_int, c_void_p)
del_functype = CFUNCTYPE(c_int, c_void_p)
infershape_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int),
POINTER(POINTER(mx_int)), c_void_p)
infertype_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int), c_void_p)
inferstorage_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int), c_void_p)
inferstorage_backward_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int), \
POINTER(c_int), c_void_p)
list_functype = CFUNCTYPE(c_int, POINTER(POINTER(POINTER(c_char))), c_void_p)
deps_functype = CFUNCTYPE(c_int, c_int_p, c_int_p, c_int_p,
c_int_p, POINTER(c_int_p), c_void_p)
createop_functype = CFUNCTYPE(c_int, c_char_p, c_int, POINTER(POINTER(mx_uint)),
POINTER(c_int), POINTER(c_int),
POINTER(MXCallbackList), c_void_p)
req_enum = ('null', 'write', 'inplace', 'add')
def creator(op_type, argc, keys, vals, ret):
"""internal function"""
assert py_str(op_type) == reg_name
kwargs = dict([(py_str(keys[i]), py_str(vals[i])) for i in range(argc)])
op_prop = prop_cls(**kwargs)
def infer_shape_entry(num_tensor, tensor_dims,
tensor_shapes, _):
"""C Callback for ``CustomOpProp::InferShape``."""
try:
n_in = len(op_prop.list_arguments())
n_out = len(op_prop.list_outputs())
n_aux = len(op_prop.list_auxiliary_states())
assert num_tensor == n_in + n_out + n_aux
shapes = [[tensor_shapes[i][j] for j in range(tensor_dims[i])]
for i in range(n_in)]
ret = op_prop.infer_shape(shapes)
if len(ret) == 2:
ishape, oshape = ret
ashape = []
elif len(ret) == 3:
ishape, oshape, ashape = ret
else:
raise AssertionError("infer_shape must return 2 or 3 lists")
assert len(oshape) == n_out, \
"InferShape Error: expecting %d entries in returned output " \
"shapes, got %d."%(n_out, len(oshape))
assert len(ishape) == n_in, \
"InferShape Error: expecting %d entries in returned input " \
"shapes, got %d."%(n_in, len(ishape))
assert len(ashape) == n_aux, \
"InferShape Error: expecting %d entries in returned aux state " \
"shapes, got %d."%(n_aux, len(ashape))
rshape = list(ishape) + list(oshape) + list(ashape)
for i in range(n_in+n_out+n_aux):
tensor_shapes[i] = cast(c_array_buf(mx_int,
array('i', rshape[i])),
POINTER(mx_int))
tensor_dims[i] = len(rshape[i])
infer_shape_entry._ref_holder = [tensor_shapes]
except Exception:
print('Error in %s.infer_shape: %s' % (reg_name, traceback.format_exc()))
return False
return True
def infer_storage_type_backward_entry(num_tensor, tensor_stypes, tags, _):
# pylint: disable=C0301
"""C Callback for CustomOpProp::InferStorageTypeBackward"""
try:
tensors = [[] for i in range(5)]
for i in range(num_tensor):
tensors[tags[i]].append(_STORAGE_TYPE_ID_TO_STR[tensor_stypes[i]])
# Ordering of stypes: ograd, input, output, igrad, aux
tensors = [tensors[3], tensors[0], tensors[1], tensors[2], tensors[4]]
ret = op_prop.infer_storage_type_backward(tensors[0],
tensors[1],
tensors[2],
tensors[3],
tensors[4])
if len(ret) == 4:
ret += []
elif len(ret) == 5:
pass
else:
raise AssertionError("infer_storage_type_backward must return 4 or 5 lists")
assert len(ret[0]) == len(tensors[0]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned output gradient " \
"stypes, got %d."%(len(tensors[0]), len(ret[0]))
assert len(ret[1]) == len(tensors[1]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned input stypes, " \
"got %d."%(len(tensors[1]), len(ret[1]))
assert len(ret[2]) == len(tensors[2]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned output stypes, " \
"got %d."%(len(tensors[2]), len(ret[2]))
assert len(ret[3]) == len(tensors[3]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned input gradient stypes, " \
"got %d."%(len(tensors[3]), len(ret[3]))
assert len(ret[4]) == len(tensors[4]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned aux stypes, " \
"got %d."%(len(tensors[4]), len(ret[4]))
rstype = []
for i, ret_list in enumerate(ret):
rstype.extend(ret_list)
for i, stype in enumerate(rstype):
assert stype != _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_UNDEFINED], \
"stype should not be undefined"
assert stype in _STORAGE_TYPE_STR_TO_ID, \
"Provided stype: %s is not valid " \
"valid stypes are %s, %s, %s"%(stype,
_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT],
_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_ROW_SPARSE],
_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_CSR])
tensor_stypes[i] = _STORAGE_TYPE_STR_TO_ID[stype]
infer_storage_type_backward_entry._ref_holder = [tensor_stypes]
except Exception:
print('Error in %s.infer_type: %s' % (reg_name, traceback.format_exc()))
return False
return True
def infer_storage_type_entry(num_tensor, tensor_stypes, _):
"""C Callback for CustomOpProp::InferStorageType"""
try:
n_in = len(op_prop.list_arguments())
n_out = len(op_prop.list_outputs())
n_aux = len(op_prop.list_auxiliary_states())
assert num_tensor == n_in + n_out + n_aux
stypes = [_STORAGE_TYPE_ID_TO_STR[tensor_stypes[i]] for i in range(n_in)]
ret = op_prop.infer_storage_type(stypes)
if len(ret) == 2:
istype, ostype = ret
astype = []
elif len(ret) == 3:
istype, ostype, astype = ret
else:
raise AssertionError("infer_storage_type must return 2 or 3 lists")
assert len(ostype) == n_out, \
"InferStorageType Error: expecting %d entries in returned output " \
"stypes, got %d."%(n_out, len(ostype))
assert len(istype) == n_in, \
"InferStorageType Error: expecting %d entries in returned input " \
"stypes, got %d."%(n_in, len(istype))
assert len(astype) == n_aux, \
"InferStorageType Error: expecting %d entries in returned aux state " \
"stypes, got %d."%(n_aux, len(astype))
rtype = list(istype) + list(ostype) + list(astype)
for i, dtype in enumerate(rtype):
tensor_stypes[i] = _STORAGE_TYPE_STR_TO_ID[dtype]
infer_storage_type_entry._ref_holder = [tensor_stypes]
except Exception:
print('Error in %s.infer_type: %s' % (reg_name, traceback.format_exc()))
return False
return True
def infer_type_entry(num_tensor, tensor_types, _):
"""C Callback for CustomOpProp::InferType"""
try:
n_in = len(op_prop.list_arguments())
n_out = len(op_prop.list_outputs())
n_aux = len(op_prop.list_auxiliary_states())
assert num_tensor == n_in + n_out + n_aux
types = [_DTYPE_MX_TO_NP[tensor_types[i]] for i in range(n_in)]
ret = op_prop.infer_type(types)
if len(ret) == 2:
itype, otype = ret
atype = []
elif len(ret) == 3:
itype, otype, atype = ret
else:
raise AssertionError("infer_type must return 2 or 3 lists")
assert len(otype) == n_out, \
"InferType Error: expecting %d entries in returned output " \
"types, got %d."%(n_out, len(otype))
assert len(itype) == n_in, \
"InferType Error: expecting %d entries in returned input " \
"types, got %d."%(n_in, len(itype))
assert len(atype) == n_aux, \
"InferType Error: expecting %d entries in returned aux state " \
"types, got %d."%(n_aux, len(atype))
rtype = list(itype) + list(otype) + list(atype)
for i, dtype in enumerate(rtype):
tensor_types[i] = _DTYPE_NP_TO_MX[dtype]
infer_type_entry._ref_holder = [tensor_types]
except Exception:
print('Error in %s.infer_type: %s' % (reg_name, traceback.format_exc()))
return False
return True
def list_outputs_entry(out, _):
"""C Callback for CustomOpProp::ListOutputs"""
try:
ret = op_prop.list_outputs()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
list_outputs_entry._ref_holder = [out]
except Exception:
print('Error in %s.list_outputs: %s' % (reg_name, traceback.format_exc()))
return False
return True
def list_arguments_entry(out, _):
"""C Callback for CustomOpProp::ListArguments"""
try:
ret = op_prop.list_arguments()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
list_arguments_entry._ref_holder = [out]
except Exception:
print('Error in %s.list_arguments: %s' % (reg_name, traceback.format_exc()))
return False
return True
def list_auxiliary_states_entry(out, _):
"""C Callback for CustomOpProp::ListAuxiliaryStates"""
try:
ret = op_prop.list_auxiliary_states()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
list_auxiliary_states_entry._ref_holder = [out]
except Exception:
tb = traceback.format_exc()
print('Error in %s.list_auxiliary_states: %s' % (reg_name, tb))
return False
return True
def declare_backward_dependency_entry(out_grad, in_data, out_data, num_dep, deps, _):
"""C Callback for CustomOpProp::DeclareBacwardDependency"""
try:
out_grad = [out_grad[i] for i in range(len(op_prop.list_outputs()))]
in_data = [in_data[i] for i in range(len(op_prop.list_arguments()))]
out_data = [out_data[i] for i in range(len(op_prop.list_outputs()))]
rdeps = op_prop.declare_backward_dependency(out_grad, in_data, out_data)
num_dep[0] = len(rdeps)
_registry.result_deps = set()
for dep in rdeps:
_registry.result_deps.add(dep)
rdeps = cast(c_array_buf(c_int, array('i', rdeps)), c_int_p)
deps[0] = rdeps
declare_backward_dependency_entry._ref_holder = [deps]
except Exception:
tb = traceback.format_exc()
print('Error in %s.declare_backward_dependency: %s' % (reg_name, tb))
return False
return True
def create_operator_entry(ctx, num_inputs, shapes, ndims, dtypes, ret, _):
"""C Callback for CustomOpProp::CreateOperator"""
try:
ctx = py_str(ctx)
sep = ctx.find('(')
ctx = context.Context(ctx[:sep], int(ctx[sep+1:-1]))
ndims = [ndims[i] for i in range(num_inputs)]
shapes = [[shapes[i][j] for j in range(ndims[i])] for i in range(num_inputs)]
dtypes = [dtypes[i] for i in range(num_inputs)]
op = op_prop.create_operator(ctx, shapes, dtypes)
def forward_entry(num_ndarray, ndarraies, tags, reqs, is_train, _):
"""C Callback for CustomOp::Forward"""
try:
tensors = [[] for i in range(5)]
for i in range(num_ndarray):
if tags[i] == 1 or tags[i] == 4:
tensors[tags[i]].append(_ndarray_cls(cast(ndarraies[i],
NDArrayHandle),
writable=True))
else:
tensors[tags[i]].append(_ndarray_cls(cast(ndarraies[i],
NDArrayHandle),
writable=False))
reqs = [req_enum[reqs[i]] for i in range(len(tensors[1]))]
with ctx:
op.forward(is_train=is_train, req=reqs,
in_data=tensors[0], out_data=tensors[1],
aux=tensors[4])
except Exception:
print('Error in CustomOp.forward: %s' % traceback.format_exc())
return False
return True
def backward_entry(num_ndarray, ndarraies, tags, reqs, is_train, _):
"""C Callback for CustomOp::Backward"""
# pylint: disable=W0613
try:
tensors = [[] for i in range(5)]
num_outputs = len(op_prop.list_outputs())
num_args = len(op_prop.list_arguments())
for i in range(num_ndarray):
if i in _registry.result_deps or i >= (num_outputs * 2 + num_args):
# If it is a backward dependency or output or aux:
# Set stype as undefined so that it returns
# ndarray based on existing stype
stype = _STORAGE_TYPE_UNDEFINED
else:
# If it is some input, output or out grad ndarray not part of
# backward dependency it is empty and thus the ndarray should
# be set to default
stype = _STORAGE_TYPE_DEFAULT
if tags[i] == 2 or tags[i] == 4:
tensors[tags[i]].append(_ndarray_cls(cast(ndarraies[i],
NDArrayHandle),
writable=True,
stype=stype))
else:
tensors[tags[i]].append(_ndarray_cls(cast(ndarraies[i],
NDArrayHandle),
writable=False,
stype=stype))
reqs = [req_enum[reqs[i]] for i in range(len(tensors[2]))]
with ctx:
op.backward(req=reqs,
in_data=tensors[0], out_data=tensors[1],
in_grad=tensors[2], out_grad=tensors[3],
aux=tensors[4])
except Exception:
print('Error in CustomOp.backward: %s' % traceback.format_exc())
return False
return True
cur = _registry.inc()
def delete_entry(_):
"""C Callback for CustomOp::del"""
try:
del _registry.ref_holder[cur]
except Exception:
print('Error in CustomOp.delete: %s' % traceback.format_exc())
return False
return True
callbacks = [del_functype(delete_entry),
fb_functype(forward_entry),
fb_functype(backward_entry)]
callbacks = [cast(i, CFUNCTYPE(c_int)) for i in callbacks]
contexts = [None, None, None]
ret[0] = MXCallbackList(c_int(len(callbacks)),
cast(c_array(CFUNCTYPE(c_int), callbacks),
POINTER(CFUNCTYPE(c_int))),
cast(c_array(c_void_p, contexts),
POINTER(c_void_p)))
op._ref_holder = [ret]
_registry.ref_holder[cur] = op
except Exception:
print('Error in %s.create_operator: %s' % (reg_name, traceback.format_exc()))
return False
return True
cur = _registry.inc()
def delete_entry(_):
"""C Callback for CustomOpProp::del"""
try:
del _registry.ref_holder[cur]
except Exception:
print('Error in CustomOpProp.delete: %s' % traceback.format_exc())
return False
return True
callbacks = [del_functype(delete_entry),
list_functype(list_arguments_entry),
list_functype(list_outputs_entry),
list_functype(list_auxiliary_states_entry),
infershape_functype(infer_shape_entry),
deps_functype(declare_backward_dependency_entry),
createop_functype(create_operator_entry),
infertype_functype(infer_type_entry),
inferstorage_functype(infer_storage_type_entry),
inferstorage_backward_functype(infer_storage_type_backward_entry)]
callbacks = [cast(i, CFUNCTYPE(c_int)) for i in callbacks]
contexts = [None]*len(callbacks)
ret[0] = MXCallbackList(c_int(len(callbacks)),
cast(c_array(CFUNCTYPE(c_int), callbacks),
POINTER(CFUNCTYPE(c_int))),
cast(c_array(c_void_p, contexts),
POINTER(c_void_p)))
op_prop._ref_holder = [ret]
_registry.ref_holder[cur] = op_prop
return True
creator_functype = CFUNCTYPE(c_int, c_char_p, c_int, POINTER(c_char_p),
POINTER(c_char_p), POINTER(MXCallbackList))
creator_func = creator_functype(creator)
check_call(_LIB.MXCustomOpRegister(c_str(reg_name), creator_func))
cur = _registry.inc()
_registry.ref_holder[cur] = creator_func
return prop_cls
return do_register
|
python
|
def register(reg_name):
"""Register a subclass of CustomOpProp to the registry with name reg_name."""
def do_register(prop_cls):
"""Register a subclass of CustomOpProp to the registry."""
fb_functype = CFUNCTYPE(c_int, c_int, POINTER(c_void_p), POINTER(c_int),
POINTER(c_int), c_int, c_void_p)
del_functype = CFUNCTYPE(c_int, c_void_p)
infershape_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int),
POINTER(POINTER(mx_int)), c_void_p)
infertype_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int), c_void_p)
inferstorage_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int), c_void_p)
inferstorage_backward_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int), \
POINTER(c_int), c_void_p)
list_functype = CFUNCTYPE(c_int, POINTER(POINTER(POINTER(c_char))), c_void_p)
deps_functype = CFUNCTYPE(c_int, c_int_p, c_int_p, c_int_p,
c_int_p, POINTER(c_int_p), c_void_p)
createop_functype = CFUNCTYPE(c_int, c_char_p, c_int, POINTER(POINTER(mx_uint)),
POINTER(c_int), POINTER(c_int),
POINTER(MXCallbackList), c_void_p)
req_enum = ('null', 'write', 'inplace', 'add')
def creator(op_type, argc, keys, vals, ret):
"""internal function"""
assert py_str(op_type) == reg_name
kwargs = dict([(py_str(keys[i]), py_str(vals[i])) for i in range(argc)])
op_prop = prop_cls(**kwargs)
def infer_shape_entry(num_tensor, tensor_dims,
tensor_shapes, _):
"""C Callback for ``CustomOpProp::InferShape``."""
try:
n_in = len(op_prop.list_arguments())
n_out = len(op_prop.list_outputs())
n_aux = len(op_prop.list_auxiliary_states())
assert num_tensor == n_in + n_out + n_aux
shapes = [[tensor_shapes[i][j] for j in range(tensor_dims[i])]
for i in range(n_in)]
ret = op_prop.infer_shape(shapes)
if len(ret) == 2:
ishape, oshape = ret
ashape = []
elif len(ret) == 3:
ishape, oshape, ashape = ret
else:
raise AssertionError("infer_shape must return 2 or 3 lists")
assert len(oshape) == n_out, \
"InferShape Error: expecting %d entries in returned output " \
"shapes, got %d."%(n_out, len(oshape))
assert len(ishape) == n_in, \
"InferShape Error: expecting %d entries in returned input " \
"shapes, got %d."%(n_in, len(ishape))
assert len(ashape) == n_aux, \
"InferShape Error: expecting %d entries in returned aux state " \
"shapes, got %d."%(n_aux, len(ashape))
rshape = list(ishape) + list(oshape) + list(ashape)
for i in range(n_in+n_out+n_aux):
tensor_shapes[i] = cast(c_array_buf(mx_int,
array('i', rshape[i])),
POINTER(mx_int))
tensor_dims[i] = len(rshape[i])
infer_shape_entry._ref_holder = [tensor_shapes]
except Exception:
print('Error in %s.infer_shape: %s' % (reg_name, traceback.format_exc()))
return False
return True
def infer_storage_type_backward_entry(num_tensor, tensor_stypes, tags, _):
# pylint: disable=C0301
"""C Callback for CustomOpProp::InferStorageTypeBackward"""
try:
tensors = [[] for i in range(5)]
for i in range(num_tensor):
tensors[tags[i]].append(_STORAGE_TYPE_ID_TO_STR[tensor_stypes[i]])
# Ordering of stypes: ograd, input, output, igrad, aux
tensors = [tensors[3], tensors[0], tensors[1], tensors[2], tensors[4]]
ret = op_prop.infer_storage_type_backward(tensors[0],
tensors[1],
tensors[2],
tensors[3],
tensors[4])
if len(ret) == 4:
ret += []
elif len(ret) == 5:
pass
else:
raise AssertionError("infer_storage_type_backward must return 4 or 5 lists")
assert len(ret[0]) == len(tensors[0]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned output gradient " \
"stypes, got %d."%(len(tensors[0]), len(ret[0]))
assert len(ret[1]) == len(tensors[1]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned input stypes, " \
"got %d."%(len(tensors[1]), len(ret[1]))
assert len(ret[2]) == len(tensors[2]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned output stypes, " \
"got %d."%(len(tensors[2]), len(ret[2]))
assert len(ret[3]) == len(tensors[3]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned input gradient stypes, " \
"got %d."%(len(tensors[3]), len(ret[3]))
assert len(ret[4]) == len(tensors[4]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned aux stypes, " \
"got %d."%(len(tensors[4]), len(ret[4]))
rstype = []
for i, ret_list in enumerate(ret):
rstype.extend(ret_list)
for i, stype in enumerate(rstype):
assert stype != _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_UNDEFINED], \
"stype should not be undefined"
assert stype in _STORAGE_TYPE_STR_TO_ID, \
"Provided stype: %s is not valid " \
"valid stypes are %s, %s, %s"%(stype,
_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT],
_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_ROW_SPARSE],
_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_CSR])
tensor_stypes[i] = _STORAGE_TYPE_STR_TO_ID[stype]
infer_storage_type_backward_entry._ref_holder = [tensor_stypes]
except Exception:
print('Error in %s.infer_type: %s' % (reg_name, traceback.format_exc()))
return False
return True
def infer_storage_type_entry(num_tensor, tensor_stypes, _):
"""C Callback for CustomOpProp::InferStorageType"""
try:
n_in = len(op_prop.list_arguments())
n_out = len(op_prop.list_outputs())
n_aux = len(op_prop.list_auxiliary_states())
assert num_tensor == n_in + n_out + n_aux
stypes = [_STORAGE_TYPE_ID_TO_STR[tensor_stypes[i]] for i in range(n_in)]
ret = op_prop.infer_storage_type(stypes)
if len(ret) == 2:
istype, ostype = ret
astype = []
elif len(ret) == 3:
istype, ostype, astype = ret
else:
raise AssertionError("infer_storage_type must return 2 or 3 lists")
assert len(ostype) == n_out, \
"InferStorageType Error: expecting %d entries in returned output " \
"stypes, got %d."%(n_out, len(ostype))
assert len(istype) == n_in, \
"InferStorageType Error: expecting %d entries in returned input " \
"stypes, got %d."%(n_in, len(istype))
assert len(astype) == n_aux, \
"InferStorageType Error: expecting %d entries in returned aux state " \
"stypes, got %d."%(n_aux, len(astype))
rtype = list(istype) + list(ostype) + list(astype)
for i, dtype in enumerate(rtype):
tensor_stypes[i] = _STORAGE_TYPE_STR_TO_ID[dtype]
infer_storage_type_entry._ref_holder = [tensor_stypes]
except Exception:
print('Error in %s.infer_type: %s' % (reg_name, traceback.format_exc()))
return False
return True
def infer_type_entry(num_tensor, tensor_types, _):
"""C Callback for CustomOpProp::InferType"""
try:
n_in = len(op_prop.list_arguments())
n_out = len(op_prop.list_outputs())
n_aux = len(op_prop.list_auxiliary_states())
assert num_tensor == n_in + n_out + n_aux
types = [_DTYPE_MX_TO_NP[tensor_types[i]] for i in range(n_in)]
ret = op_prop.infer_type(types)
if len(ret) == 2:
itype, otype = ret
atype = []
elif len(ret) == 3:
itype, otype, atype = ret
else:
raise AssertionError("infer_type must return 2 or 3 lists")
assert len(otype) == n_out, \
"InferType Error: expecting %d entries in returned output " \
"types, got %d."%(n_out, len(otype))
assert len(itype) == n_in, \
"InferType Error: expecting %d entries in returned input " \
"types, got %d."%(n_in, len(itype))
assert len(atype) == n_aux, \
"InferType Error: expecting %d entries in returned aux state " \
"types, got %d."%(n_aux, len(atype))
rtype = list(itype) + list(otype) + list(atype)
for i, dtype in enumerate(rtype):
tensor_types[i] = _DTYPE_NP_TO_MX[dtype]
infer_type_entry._ref_holder = [tensor_types]
except Exception:
print('Error in %s.infer_type: %s' % (reg_name, traceback.format_exc()))
return False
return True
def list_outputs_entry(out, _):
"""C Callback for CustomOpProp::ListOutputs"""
try:
ret = op_prop.list_outputs()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
list_outputs_entry._ref_holder = [out]
except Exception:
print('Error in %s.list_outputs: %s' % (reg_name, traceback.format_exc()))
return False
return True
def list_arguments_entry(out, _):
"""C Callback for CustomOpProp::ListArguments"""
try:
ret = op_prop.list_arguments()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
list_arguments_entry._ref_holder = [out]
except Exception:
print('Error in %s.list_arguments: %s' % (reg_name, traceback.format_exc()))
return False
return True
def list_auxiliary_states_entry(out, _):
"""C Callback for CustomOpProp::ListAuxiliaryStates"""
try:
ret = op_prop.list_auxiliary_states()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
list_auxiliary_states_entry._ref_holder = [out]
except Exception:
tb = traceback.format_exc()
print('Error in %s.list_auxiliary_states: %s' % (reg_name, tb))
return False
return True
def declare_backward_dependency_entry(out_grad, in_data, out_data, num_dep, deps, _):
"""C Callback for CustomOpProp::DeclareBacwardDependency"""
try:
out_grad = [out_grad[i] for i in range(len(op_prop.list_outputs()))]
in_data = [in_data[i] for i in range(len(op_prop.list_arguments()))]
out_data = [out_data[i] for i in range(len(op_prop.list_outputs()))]
rdeps = op_prop.declare_backward_dependency(out_grad, in_data, out_data)
num_dep[0] = len(rdeps)
_registry.result_deps = set()
for dep in rdeps:
_registry.result_deps.add(dep)
rdeps = cast(c_array_buf(c_int, array('i', rdeps)), c_int_p)
deps[0] = rdeps
declare_backward_dependency_entry._ref_holder = [deps]
except Exception:
tb = traceback.format_exc()
print('Error in %s.declare_backward_dependency: %s' % (reg_name, tb))
return False
return True
def create_operator_entry(ctx, num_inputs, shapes, ndims, dtypes, ret, _):
"""C Callback for CustomOpProp::CreateOperator"""
try:
ctx = py_str(ctx)
sep = ctx.find('(')
ctx = context.Context(ctx[:sep], int(ctx[sep+1:-1]))
ndims = [ndims[i] for i in range(num_inputs)]
shapes = [[shapes[i][j] for j in range(ndims[i])] for i in range(num_inputs)]
dtypes = [dtypes[i] for i in range(num_inputs)]
op = op_prop.create_operator(ctx, shapes, dtypes)
def forward_entry(num_ndarray, ndarraies, tags, reqs, is_train, _):
"""C Callback for CustomOp::Forward"""
try:
tensors = [[] for i in range(5)]
for i in range(num_ndarray):
if tags[i] == 1 or tags[i] == 4:
tensors[tags[i]].append(_ndarray_cls(cast(ndarraies[i],
NDArrayHandle),
writable=True))
else:
tensors[tags[i]].append(_ndarray_cls(cast(ndarraies[i],
NDArrayHandle),
writable=False))
reqs = [req_enum[reqs[i]] for i in range(len(tensors[1]))]
with ctx:
op.forward(is_train=is_train, req=reqs,
in_data=tensors[0], out_data=tensors[1],
aux=tensors[4])
except Exception:
print('Error in CustomOp.forward: %s' % traceback.format_exc())
return False
return True
def backward_entry(num_ndarray, ndarraies, tags, reqs, is_train, _):
"""C Callback for CustomOp::Backward"""
# pylint: disable=W0613
try:
tensors = [[] for i in range(5)]
num_outputs = len(op_prop.list_outputs())
num_args = len(op_prop.list_arguments())
for i in range(num_ndarray):
if i in _registry.result_deps or i >= (num_outputs * 2 + num_args):
# If it is a backward dependency or output or aux:
# Set stype as undefined so that it returns
# ndarray based on existing stype
stype = _STORAGE_TYPE_UNDEFINED
else:
# If it is some input, output or out grad ndarray not part of
# backward dependency it is empty and thus the ndarray should
# be set to default
stype = _STORAGE_TYPE_DEFAULT
if tags[i] == 2 or tags[i] == 4:
tensors[tags[i]].append(_ndarray_cls(cast(ndarraies[i],
NDArrayHandle),
writable=True,
stype=stype))
else:
tensors[tags[i]].append(_ndarray_cls(cast(ndarraies[i],
NDArrayHandle),
writable=False,
stype=stype))
reqs = [req_enum[reqs[i]] for i in range(len(tensors[2]))]
with ctx:
op.backward(req=reqs,
in_data=tensors[0], out_data=tensors[1],
in_grad=tensors[2], out_grad=tensors[3],
aux=tensors[4])
except Exception:
print('Error in CustomOp.backward: %s' % traceback.format_exc())
return False
return True
cur = _registry.inc()
def delete_entry(_):
"""C Callback for CustomOp::del"""
try:
del _registry.ref_holder[cur]
except Exception:
print('Error in CustomOp.delete: %s' % traceback.format_exc())
return False
return True
callbacks = [del_functype(delete_entry),
fb_functype(forward_entry),
fb_functype(backward_entry)]
callbacks = [cast(i, CFUNCTYPE(c_int)) for i in callbacks]
contexts = [None, None, None]
ret[0] = MXCallbackList(c_int(len(callbacks)),
cast(c_array(CFUNCTYPE(c_int), callbacks),
POINTER(CFUNCTYPE(c_int))),
cast(c_array(c_void_p, contexts),
POINTER(c_void_p)))
op._ref_holder = [ret]
_registry.ref_holder[cur] = op
except Exception:
print('Error in %s.create_operator: %s' % (reg_name, traceback.format_exc()))
return False
return True
cur = _registry.inc()
def delete_entry(_):
"""C Callback for CustomOpProp::del"""
try:
del _registry.ref_holder[cur]
except Exception:
print('Error in CustomOpProp.delete: %s' % traceback.format_exc())
return False
return True
callbacks = [del_functype(delete_entry),
list_functype(list_arguments_entry),
list_functype(list_outputs_entry),
list_functype(list_auxiliary_states_entry),
infershape_functype(infer_shape_entry),
deps_functype(declare_backward_dependency_entry),
createop_functype(create_operator_entry),
infertype_functype(infer_type_entry),
inferstorage_functype(infer_storage_type_entry),
inferstorage_backward_functype(infer_storage_type_backward_entry)]
callbacks = [cast(i, CFUNCTYPE(c_int)) for i in callbacks]
contexts = [None]*len(callbacks)
ret[0] = MXCallbackList(c_int(len(callbacks)),
cast(c_array(CFUNCTYPE(c_int), callbacks),
POINTER(CFUNCTYPE(c_int))),
cast(c_array(c_void_p, contexts),
POINTER(c_void_p)))
op_prop._ref_holder = [ret]
_registry.ref_holder[cur] = op_prop
return True
creator_functype = CFUNCTYPE(c_int, c_char_p, c_int, POINTER(c_char_p),
POINTER(c_char_p), POINTER(MXCallbackList))
creator_func = creator_functype(creator)
check_call(_LIB.MXCustomOpRegister(c_str(reg_name), creator_func))
cur = _registry.inc()
_registry.ref_holder[cur] = creator_func
return prop_cls
return do_register
|
[
"def",
"register",
"(",
"reg_name",
")",
":",
"def",
"do_register",
"(",
"prop_cls",
")",
":",
"\"\"\"Register a subclass of CustomOpProp to the registry.\"\"\"",
"fb_functype",
"=",
"CFUNCTYPE",
"(",
"c_int",
",",
"c_int",
",",
"POINTER",
"(",
"c_void_p",
")",
",",
"POINTER",
"(",
"c_int",
")",
",",
"POINTER",
"(",
"c_int",
")",
",",
"c_int",
",",
"c_void_p",
")",
"del_functype",
"=",
"CFUNCTYPE",
"(",
"c_int",
",",
"c_void_p",
")",
"infershape_functype",
"=",
"CFUNCTYPE",
"(",
"c_int",
",",
"c_int",
",",
"POINTER",
"(",
"c_int",
")",
",",
"POINTER",
"(",
"POINTER",
"(",
"mx_int",
")",
")",
",",
"c_void_p",
")",
"infertype_functype",
"=",
"CFUNCTYPE",
"(",
"c_int",
",",
"c_int",
",",
"POINTER",
"(",
"c_int",
")",
",",
"c_void_p",
")",
"inferstorage_functype",
"=",
"CFUNCTYPE",
"(",
"c_int",
",",
"c_int",
",",
"POINTER",
"(",
"c_int",
")",
",",
"c_void_p",
")",
"inferstorage_backward_functype",
"=",
"CFUNCTYPE",
"(",
"c_int",
",",
"c_int",
",",
"POINTER",
"(",
"c_int",
")",
",",
"POINTER",
"(",
"c_int",
")",
",",
"c_void_p",
")",
"list_functype",
"=",
"CFUNCTYPE",
"(",
"c_int",
",",
"POINTER",
"(",
"POINTER",
"(",
"POINTER",
"(",
"c_char",
")",
")",
")",
",",
"c_void_p",
")",
"deps_functype",
"=",
"CFUNCTYPE",
"(",
"c_int",
",",
"c_int_p",
",",
"c_int_p",
",",
"c_int_p",
",",
"c_int_p",
",",
"POINTER",
"(",
"c_int_p",
")",
",",
"c_void_p",
")",
"createop_functype",
"=",
"CFUNCTYPE",
"(",
"c_int",
",",
"c_char_p",
",",
"c_int",
",",
"POINTER",
"(",
"POINTER",
"(",
"mx_uint",
")",
")",
",",
"POINTER",
"(",
"c_int",
")",
",",
"POINTER",
"(",
"c_int",
")",
",",
"POINTER",
"(",
"MXCallbackList",
")",
",",
"c_void_p",
")",
"req_enum",
"=",
"(",
"'null'",
",",
"'write'",
",",
"'inplace'",
",",
"'add'",
")",
"def",
"creator",
"(",
"op_type",
",",
"argc",
",",
"keys",
",",
"vals",
",",
"ret",
")",
":",
"\"\"\"internal function\"\"\"",
"assert",
"py_str",
"(",
"op_type",
")",
"==",
"reg_name",
"kwargs",
"=",
"dict",
"(",
"[",
"(",
"py_str",
"(",
"keys",
"[",
"i",
"]",
")",
",",
"py_str",
"(",
"vals",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"argc",
")",
"]",
")",
"op_prop",
"=",
"prop_cls",
"(",
"*",
"*",
"kwargs",
")",
"def",
"infer_shape_entry",
"(",
"num_tensor",
",",
"tensor_dims",
",",
"tensor_shapes",
",",
"_",
")",
":",
"\"\"\"C Callback for ``CustomOpProp::InferShape``.\"\"\"",
"try",
":",
"n_in",
"=",
"len",
"(",
"op_prop",
".",
"list_arguments",
"(",
")",
")",
"n_out",
"=",
"len",
"(",
"op_prop",
".",
"list_outputs",
"(",
")",
")",
"n_aux",
"=",
"len",
"(",
"op_prop",
".",
"list_auxiliary_states",
"(",
")",
")",
"assert",
"num_tensor",
"==",
"n_in",
"+",
"n_out",
"+",
"n_aux",
"shapes",
"=",
"[",
"[",
"tensor_shapes",
"[",
"i",
"]",
"[",
"j",
"]",
"for",
"j",
"in",
"range",
"(",
"tensor_dims",
"[",
"i",
"]",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"n_in",
")",
"]",
"ret",
"=",
"op_prop",
".",
"infer_shape",
"(",
"shapes",
")",
"if",
"len",
"(",
"ret",
")",
"==",
"2",
":",
"ishape",
",",
"oshape",
"=",
"ret",
"ashape",
"=",
"[",
"]",
"elif",
"len",
"(",
"ret",
")",
"==",
"3",
":",
"ishape",
",",
"oshape",
",",
"ashape",
"=",
"ret",
"else",
":",
"raise",
"AssertionError",
"(",
"\"infer_shape must return 2 or 3 lists\"",
")",
"assert",
"len",
"(",
"oshape",
")",
"==",
"n_out",
",",
"\"InferShape Error: expecting %d entries in returned output \"",
"\"shapes, got %d.\"",
"%",
"(",
"n_out",
",",
"len",
"(",
"oshape",
")",
")",
"assert",
"len",
"(",
"ishape",
")",
"==",
"n_in",
",",
"\"InferShape Error: expecting %d entries in returned input \"",
"\"shapes, got %d.\"",
"%",
"(",
"n_in",
",",
"len",
"(",
"ishape",
")",
")",
"assert",
"len",
"(",
"ashape",
")",
"==",
"n_aux",
",",
"\"InferShape Error: expecting %d entries in returned aux state \"",
"\"shapes, got %d.\"",
"%",
"(",
"n_aux",
",",
"len",
"(",
"ashape",
")",
")",
"rshape",
"=",
"list",
"(",
"ishape",
")",
"+",
"list",
"(",
"oshape",
")",
"+",
"list",
"(",
"ashape",
")",
"for",
"i",
"in",
"range",
"(",
"n_in",
"+",
"n_out",
"+",
"n_aux",
")",
":",
"tensor_shapes",
"[",
"i",
"]",
"=",
"cast",
"(",
"c_array_buf",
"(",
"mx_int",
",",
"array",
"(",
"'i'",
",",
"rshape",
"[",
"i",
"]",
")",
")",
",",
"POINTER",
"(",
"mx_int",
")",
")",
"tensor_dims",
"[",
"i",
"]",
"=",
"len",
"(",
"rshape",
"[",
"i",
"]",
")",
"infer_shape_entry",
".",
"_ref_holder",
"=",
"[",
"tensor_shapes",
"]",
"except",
"Exception",
":",
"print",
"(",
"'Error in %s.infer_shape: %s'",
"%",
"(",
"reg_name",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"return",
"False",
"return",
"True",
"def",
"infer_storage_type_backward_entry",
"(",
"num_tensor",
",",
"tensor_stypes",
",",
"tags",
",",
"_",
")",
":",
"# pylint: disable=C0301",
"\"\"\"C Callback for CustomOpProp::InferStorageTypeBackward\"\"\"",
"try",
":",
"tensors",
"=",
"[",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"5",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"num_tensor",
")",
":",
"tensors",
"[",
"tags",
"[",
"i",
"]",
"]",
".",
"append",
"(",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"tensor_stypes",
"[",
"i",
"]",
"]",
")",
"# Ordering of stypes: ograd, input, output, igrad, aux",
"tensors",
"=",
"[",
"tensors",
"[",
"3",
"]",
",",
"tensors",
"[",
"0",
"]",
",",
"tensors",
"[",
"1",
"]",
",",
"tensors",
"[",
"2",
"]",
",",
"tensors",
"[",
"4",
"]",
"]",
"ret",
"=",
"op_prop",
".",
"infer_storage_type_backward",
"(",
"tensors",
"[",
"0",
"]",
",",
"tensors",
"[",
"1",
"]",
",",
"tensors",
"[",
"2",
"]",
",",
"tensors",
"[",
"3",
"]",
",",
"tensors",
"[",
"4",
"]",
")",
"if",
"len",
"(",
"ret",
")",
"==",
"4",
":",
"ret",
"+=",
"[",
"]",
"elif",
"len",
"(",
"ret",
")",
"==",
"5",
":",
"pass",
"else",
":",
"raise",
"AssertionError",
"(",
"\"infer_storage_type_backward must return 4 or 5 lists\"",
")",
"assert",
"len",
"(",
"ret",
"[",
"0",
"]",
")",
"==",
"len",
"(",
"tensors",
"[",
"0",
"]",
")",
",",
"\"InferStorageTypeBackward Error: expecting == %d \"",
"\"entries in returned output gradient \"",
"\"stypes, got %d.\"",
"%",
"(",
"len",
"(",
"tensors",
"[",
"0",
"]",
")",
",",
"len",
"(",
"ret",
"[",
"0",
"]",
")",
")",
"assert",
"len",
"(",
"ret",
"[",
"1",
"]",
")",
"==",
"len",
"(",
"tensors",
"[",
"1",
"]",
")",
",",
"\"InferStorageTypeBackward Error: expecting == %d \"",
"\"entries in returned input stypes, \"",
"\"got %d.\"",
"%",
"(",
"len",
"(",
"tensors",
"[",
"1",
"]",
")",
",",
"len",
"(",
"ret",
"[",
"1",
"]",
")",
")",
"assert",
"len",
"(",
"ret",
"[",
"2",
"]",
")",
"==",
"len",
"(",
"tensors",
"[",
"2",
"]",
")",
",",
"\"InferStorageTypeBackward Error: expecting == %d \"",
"\"entries in returned output stypes, \"",
"\"got %d.\"",
"%",
"(",
"len",
"(",
"tensors",
"[",
"2",
"]",
")",
",",
"len",
"(",
"ret",
"[",
"2",
"]",
")",
")",
"assert",
"len",
"(",
"ret",
"[",
"3",
"]",
")",
"==",
"len",
"(",
"tensors",
"[",
"3",
"]",
")",
",",
"\"InferStorageTypeBackward Error: expecting == %d \"",
"\"entries in returned input gradient stypes, \"",
"\"got %d.\"",
"%",
"(",
"len",
"(",
"tensors",
"[",
"3",
"]",
")",
",",
"len",
"(",
"ret",
"[",
"3",
"]",
")",
")",
"assert",
"len",
"(",
"ret",
"[",
"4",
"]",
")",
"==",
"len",
"(",
"tensors",
"[",
"4",
"]",
")",
",",
"\"InferStorageTypeBackward Error: expecting == %d \"",
"\"entries in returned aux stypes, \"",
"\"got %d.\"",
"%",
"(",
"len",
"(",
"tensors",
"[",
"4",
"]",
")",
",",
"len",
"(",
"ret",
"[",
"4",
"]",
")",
")",
"rstype",
"=",
"[",
"]",
"for",
"i",
",",
"ret_list",
"in",
"enumerate",
"(",
"ret",
")",
":",
"rstype",
".",
"extend",
"(",
"ret_list",
")",
"for",
"i",
",",
"stype",
"in",
"enumerate",
"(",
"rstype",
")",
":",
"assert",
"stype",
"!=",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_UNDEFINED",
"]",
",",
"\"stype should not be undefined\"",
"assert",
"stype",
"in",
"_STORAGE_TYPE_STR_TO_ID",
",",
"\"Provided stype: %s is not valid \"",
"\"valid stypes are %s, %s, %s\"",
"%",
"(",
"stype",
",",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_DEFAULT",
"]",
",",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_ROW_SPARSE",
"]",
",",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_CSR",
"]",
")",
"tensor_stypes",
"[",
"i",
"]",
"=",
"_STORAGE_TYPE_STR_TO_ID",
"[",
"stype",
"]",
"infer_storage_type_backward_entry",
".",
"_ref_holder",
"=",
"[",
"tensor_stypes",
"]",
"except",
"Exception",
":",
"print",
"(",
"'Error in %s.infer_type: %s'",
"%",
"(",
"reg_name",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"return",
"False",
"return",
"True",
"def",
"infer_storage_type_entry",
"(",
"num_tensor",
",",
"tensor_stypes",
",",
"_",
")",
":",
"\"\"\"C Callback for CustomOpProp::InferStorageType\"\"\"",
"try",
":",
"n_in",
"=",
"len",
"(",
"op_prop",
".",
"list_arguments",
"(",
")",
")",
"n_out",
"=",
"len",
"(",
"op_prop",
".",
"list_outputs",
"(",
")",
")",
"n_aux",
"=",
"len",
"(",
"op_prop",
".",
"list_auxiliary_states",
"(",
")",
")",
"assert",
"num_tensor",
"==",
"n_in",
"+",
"n_out",
"+",
"n_aux",
"stypes",
"=",
"[",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"tensor_stypes",
"[",
"i",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"n_in",
")",
"]",
"ret",
"=",
"op_prop",
".",
"infer_storage_type",
"(",
"stypes",
")",
"if",
"len",
"(",
"ret",
")",
"==",
"2",
":",
"istype",
",",
"ostype",
"=",
"ret",
"astype",
"=",
"[",
"]",
"elif",
"len",
"(",
"ret",
")",
"==",
"3",
":",
"istype",
",",
"ostype",
",",
"astype",
"=",
"ret",
"else",
":",
"raise",
"AssertionError",
"(",
"\"infer_storage_type must return 2 or 3 lists\"",
")",
"assert",
"len",
"(",
"ostype",
")",
"==",
"n_out",
",",
"\"InferStorageType Error: expecting %d entries in returned output \"",
"\"stypes, got %d.\"",
"%",
"(",
"n_out",
",",
"len",
"(",
"ostype",
")",
")",
"assert",
"len",
"(",
"istype",
")",
"==",
"n_in",
",",
"\"InferStorageType Error: expecting %d entries in returned input \"",
"\"stypes, got %d.\"",
"%",
"(",
"n_in",
",",
"len",
"(",
"istype",
")",
")",
"assert",
"len",
"(",
"astype",
")",
"==",
"n_aux",
",",
"\"InferStorageType Error: expecting %d entries in returned aux state \"",
"\"stypes, got %d.\"",
"%",
"(",
"n_aux",
",",
"len",
"(",
"astype",
")",
")",
"rtype",
"=",
"list",
"(",
"istype",
")",
"+",
"list",
"(",
"ostype",
")",
"+",
"list",
"(",
"astype",
")",
"for",
"i",
",",
"dtype",
"in",
"enumerate",
"(",
"rtype",
")",
":",
"tensor_stypes",
"[",
"i",
"]",
"=",
"_STORAGE_TYPE_STR_TO_ID",
"[",
"dtype",
"]",
"infer_storage_type_entry",
".",
"_ref_holder",
"=",
"[",
"tensor_stypes",
"]",
"except",
"Exception",
":",
"print",
"(",
"'Error in %s.infer_type: %s'",
"%",
"(",
"reg_name",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"return",
"False",
"return",
"True",
"def",
"infer_type_entry",
"(",
"num_tensor",
",",
"tensor_types",
",",
"_",
")",
":",
"\"\"\"C Callback for CustomOpProp::InferType\"\"\"",
"try",
":",
"n_in",
"=",
"len",
"(",
"op_prop",
".",
"list_arguments",
"(",
")",
")",
"n_out",
"=",
"len",
"(",
"op_prop",
".",
"list_outputs",
"(",
")",
")",
"n_aux",
"=",
"len",
"(",
"op_prop",
".",
"list_auxiliary_states",
"(",
")",
")",
"assert",
"num_tensor",
"==",
"n_in",
"+",
"n_out",
"+",
"n_aux",
"types",
"=",
"[",
"_DTYPE_MX_TO_NP",
"[",
"tensor_types",
"[",
"i",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"n_in",
")",
"]",
"ret",
"=",
"op_prop",
".",
"infer_type",
"(",
"types",
")",
"if",
"len",
"(",
"ret",
")",
"==",
"2",
":",
"itype",
",",
"otype",
"=",
"ret",
"atype",
"=",
"[",
"]",
"elif",
"len",
"(",
"ret",
")",
"==",
"3",
":",
"itype",
",",
"otype",
",",
"atype",
"=",
"ret",
"else",
":",
"raise",
"AssertionError",
"(",
"\"infer_type must return 2 or 3 lists\"",
")",
"assert",
"len",
"(",
"otype",
")",
"==",
"n_out",
",",
"\"InferType Error: expecting %d entries in returned output \"",
"\"types, got %d.\"",
"%",
"(",
"n_out",
",",
"len",
"(",
"otype",
")",
")",
"assert",
"len",
"(",
"itype",
")",
"==",
"n_in",
",",
"\"InferType Error: expecting %d entries in returned input \"",
"\"types, got %d.\"",
"%",
"(",
"n_in",
",",
"len",
"(",
"itype",
")",
")",
"assert",
"len",
"(",
"atype",
")",
"==",
"n_aux",
",",
"\"InferType Error: expecting %d entries in returned aux state \"",
"\"types, got %d.\"",
"%",
"(",
"n_aux",
",",
"len",
"(",
"atype",
")",
")",
"rtype",
"=",
"list",
"(",
"itype",
")",
"+",
"list",
"(",
"otype",
")",
"+",
"list",
"(",
"atype",
")",
"for",
"i",
",",
"dtype",
"in",
"enumerate",
"(",
"rtype",
")",
":",
"tensor_types",
"[",
"i",
"]",
"=",
"_DTYPE_NP_TO_MX",
"[",
"dtype",
"]",
"infer_type_entry",
".",
"_ref_holder",
"=",
"[",
"tensor_types",
"]",
"except",
"Exception",
":",
"print",
"(",
"'Error in %s.infer_type: %s'",
"%",
"(",
"reg_name",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"return",
"False",
"return",
"True",
"def",
"list_outputs_entry",
"(",
"out",
",",
"_",
")",
":",
"\"\"\"C Callback for CustomOpProp::ListOutputs\"\"\"",
"try",
":",
"ret",
"=",
"op_prop",
".",
"list_outputs",
"(",
")",
"ret",
"=",
"[",
"c_str",
"(",
"i",
")",
"for",
"i",
"in",
"ret",
"]",
"+",
"[",
"c_char_p",
"(",
"0",
")",
"]",
"ret",
"=",
"c_array",
"(",
"c_char_p",
",",
"ret",
")",
"out",
"[",
"0",
"]",
"=",
"cast",
"(",
"ret",
",",
"POINTER",
"(",
"POINTER",
"(",
"c_char",
")",
")",
")",
"list_outputs_entry",
".",
"_ref_holder",
"=",
"[",
"out",
"]",
"except",
"Exception",
":",
"print",
"(",
"'Error in %s.list_outputs: %s'",
"%",
"(",
"reg_name",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"return",
"False",
"return",
"True",
"def",
"list_arguments_entry",
"(",
"out",
",",
"_",
")",
":",
"\"\"\"C Callback for CustomOpProp::ListArguments\"\"\"",
"try",
":",
"ret",
"=",
"op_prop",
".",
"list_arguments",
"(",
")",
"ret",
"=",
"[",
"c_str",
"(",
"i",
")",
"for",
"i",
"in",
"ret",
"]",
"+",
"[",
"c_char_p",
"(",
"0",
")",
"]",
"ret",
"=",
"c_array",
"(",
"c_char_p",
",",
"ret",
")",
"out",
"[",
"0",
"]",
"=",
"cast",
"(",
"ret",
",",
"POINTER",
"(",
"POINTER",
"(",
"c_char",
")",
")",
")",
"list_arguments_entry",
".",
"_ref_holder",
"=",
"[",
"out",
"]",
"except",
"Exception",
":",
"print",
"(",
"'Error in %s.list_arguments: %s'",
"%",
"(",
"reg_name",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"return",
"False",
"return",
"True",
"def",
"list_auxiliary_states_entry",
"(",
"out",
",",
"_",
")",
":",
"\"\"\"C Callback for CustomOpProp::ListAuxiliaryStates\"\"\"",
"try",
":",
"ret",
"=",
"op_prop",
".",
"list_auxiliary_states",
"(",
")",
"ret",
"=",
"[",
"c_str",
"(",
"i",
")",
"for",
"i",
"in",
"ret",
"]",
"+",
"[",
"c_char_p",
"(",
"0",
")",
"]",
"ret",
"=",
"c_array",
"(",
"c_char_p",
",",
"ret",
")",
"out",
"[",
"0",
"]",
"=",
"cast",
"(",
"ret",
",",
"POINTER",
"(",
"POINTER",
"(",
"c_char",
")",
")",
")",
"list_auxiliary_states_entry",
".",
"_ref_holder",
"=",
"[",
"out",
"]",
"except",
"Exception",
":",
"tb",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"print",
"(",
"'Error in %s.list_auxiliary_states: %s'",
"%",
"(",
"reg_name",
",",
"tb",
")",
")",
"return",
"False",
"return",
"True",
"def",
"declare_backward_dependency_entry",
"(",
"out_grad",
",",
"in_data",
",",
"out_data",
",",
"num_dep",
",",
"deps",
",",
"_",
")",
":",
"\"\"\"C Callback for CustomOpProp::DeclareBacwardDependency\"\"\"",
"try",
":",
"out_grad",
"=",
"[",
"out_grad",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"op_prop",
".",
"list_outputs",
"(",
")",
")",
")",
"]",
"in_data",
"=",
"[",
"in_data",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"op_prop",
".",
"list_arguments",
"(",
")",
")",
")",
"]",
"out_data",
"=",
"[",
"out_data",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"op_prop",
".",
"list_outputs",
"(",
")",
")",
")",
"]",
"rdeps",
"=",
"op_prop",
".",
"declare_backward_dependency",
"(",
"out_grad",
",",
"in_data",
",",
"out_data",
")",
"num_dep",
"[",
"0",
"]",
"=",
"len",
"(",
"rdeps",
")",
"_registry",
".",
"result_deps",
"=",
"set",
"(",
")",
"for",
"dep",
"in",
"rdeps",
":",
"_registry",
".",
"result_deps",
".",
"add",
"(",
"dep",
")",
"rdeps",
"=",
"cast",
"(",
"c_array_buf",
"(",
"c_int",
",",
"array",
"(",
"'i'",
",",
"rdeps",
")",
")",
",",
"c_int_p",
")",
"deps",
"[",
"0",
"]",
"=",
"rdeps",
"declare_backward_dependency_entry",
".",
"_ref_holder",
"=",
"[",
"deps",
"]",
"except",
"Exception",
":",
"tb",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"print",
"(",
"'Error in %s.declare_backward_dependency: %s'",
"%",
"(",
"reg_name",
",",
"tb",
")",
")",
"return",
"False",
"return",
"True",
"def",
"create_operator_entry",
"(",
"ctx",
",",
"num_inputs",
",",
"shapes",
",",
"ndims",
",",
"dtypes",
",",
"ret",
",",
"_",
")",
":",
"\"\"\"C Callback for CustomOpProp::CreateOperator\"\"\"",
"try",
":",
"ctx",
"=",
"py_str",
"(",
"ctx",
")",
"sep",
"=",
"ctx",
".",
"find",
"(",
"'('",
")",
"ctx",
"=",
"context",
".",
"Context",
"(",
"ctx",
"[",
":",
"sep",
"]",
",",
"int",
"(",
"ctx",
"[",
"sep",
"+",
"1",
":",
"-",
"1",
"]",
")",
")",
"ndims",
"=",
"[",
"ndims",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"num_inputs",
")",
"]",
"shapes",
"=",
"[",
"[",
"shapes",
"[",
"i",
"]",
"[",
"j",
"]",
"for",
"j",
"in",
"range",
"(",
"ndims",
"[",
"i",
"]",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"num_inputs",
")",
"]",
"dtypes",
"=",
"[",
"dtypes",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"num_inputs",
")",
"]",
"op",
"=",
"op_prop",
".",
"create_operator",
"(",
"ctx",
",",
"shapes",
",",
"dtypes",
")",
"def",
"forward_entry",
"(",
"num_ndarray",
",",
"ndarraies",
",",
"tags",
",",
"reqs",
",",
"is_train",
",",
"_",
")",
":",
"\"\"\"C Callback for CustomOp::Forward\"\"\"",
"try",
":",
"tensors",
"=",
"[",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"5",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"num_ndarray",
")",
":",
"if",
"tags",
"[",
"i",
"]",
"==",
"1",
"or",
"tags",
"[",
"i",
"]",
"==",
"4",
":",
"tensors",
"[",
"tags",
"[",
"i",
"]",
"]",
".",
"append",
"(",
"_ndarray_cls",
"(",
"cast",
"(",
"ndarraies",
"[",
"i",
"]",
",",
"NDArrayHandle",
")",
",",
"writable",
"=",
"True",
")",
")",
"else",
":",
"tensors",
"[",
"tags",
"[",
"i",
"]",
"]",
".",
"append",
"(",
"_ndarray_cls",
"(",
"cast",
"(",
"ndarraies",
"[",
"i",
"]",
",",
"NDArrayHandle",
")",
",",
"writable",
"=",
"False",
")",
")",
"reqs",
"=",
"[",
"req_enum",
"[",
"reqs",
"[",
"i",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"tensors",
"[",
"1",
"]",
")",
")",
"]",
"with",
"ctx",
":",
"op",
".",
"forward",
"(",
"is_train",
"=",
"is_train",
",",
"req",
"=",
"reqs",
",",
"in_data",
"=",
"tensors",
"[",
"0",
"]",
",",
"out_data",
"=",
"tensors",
"[",
"1",
"]",
",",
"aux",
"=",
"tensors",
"[",
"4",
"]",
")",
"except",
"Exception",
":",
"print",
"(",
"'Error in CustomOp.forward: %s'",
"%",
"traceback",
".",
"format_exc",
"(",
")",
")",
"return",
"False",
"return",
"True",
"def",
"backward_entry",
"(",
"num_ndarray",
",",
"ndarraies",
",",
"tags",
",",
"reqs",
",",
"is_train",
",",
"_",
")",
":",
"\"\"\"C Callback for CustomOp::Backward\"\"\"",
"# pylint: disable=W0613",
"try",
":",
"tensors",
"=",
"[",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"5",
")",
"]",
"num_outputs",
"=",
"len",
"(",
"op_prop",
".",
"list_outputs",
"(",
")",
")",
"num_args",
"=",
"len",
"(",
"op_prop",
".",
"list_arguments",
"(",
")",
")",
"for",
"i",
"in",
"range",
"(",
"num_ndarray",
")",
":",
"if",
"i",
"in",
"_registry",
".",
"result_deps",
"or",
"i",
">=",
"(",
"num_outputs",
"*",
"2",
"+",
"num_args",
")",
":",
"# If it is a backward dependency or output or aux:",
"# Set stype as undefined so that it returns",
"# ndarray based on existing stype",
"stype",
"=",
"_STORAGE_TYPE_UNDEFINED",
"else",
":",
"# If it is some input, output or out grad ndarray not part of",
"# backward dependency it is empty and thus the ndarray should",
"# be set to default",
"stype",
"=",
"_STORAGE_TYPE_DEFAULT",
"if",
"tags",
"[",
"i",
"]",
"==",
"2",
"or",
"tags",
"[",
"i",
"]",
"==",
"4",
":",
"tensors",
"[",
"tags",
"[",
"i",
"]",
"]",
".",
"append",
"(",
"_ndarray_cls",
"(",
"cast",
"(",
"ndarraies",
"[",
"i",
"]",
",",
"NDArrayHandle",
")",
",",
"writable",
"=",
"True",
",",
"stype",
"=",
"stype",
")",
")",
"else",
":",
"tensors",
"[",
"tags",
"[",
"i",
"]",
"]",
".",
"append",
"(",
"_ndarray_cls",
"(",
"cast",
"(",
"ndarraies",
"[",
"i",
"]",
",",
"NDArrayHandle",
")",
",",
"writable",
"=",
"False",
",",
"stype",
"=",
"stype",
")",
")",
"reqs",
"=",
"[",
"req_enum",
"[",
"reqs",
"[",
"i",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"tensors",
"[",
"2",
"]",
")",
")",
"]",
"with",
"ctx",
":",
"op",
".",
"backward",
"(",
"req",
"=",
"reqs",
",",
"in_data",
"=",
"tensors",
"[",
"0",
"]",
",",
"out_data",
"=",
"tensors",
"[",
"1",
"]",
",",
"in_grad",
"=",
"tensors",
"[",
"2",
"]",
",",
"out_grad",
"=",
"tensors",
"[",
"3",
"]",
",",
"aux",
"=",
"tensors",
"[",
"4",
"]",
")",
"except",
"Exception",
":",
"print",
"(",
"'Error in CustomOp.backward: %s'",
"%",
"traceback",
".",
"format_exc",
"(",
")",
")",
"return",
"False",
"return",
"True",
"cur",
"=",
"_registry",
".",
"inc",
"(",
")",
"def",
"delete_entry",
"(",
"_",
")",
":",
"\"\"\"C Callback for CustomOp::del\"\"\"",
"try",
":",
"del",
"_registry",
".",
"ref_holder",
"[",
"cur",
"]",
"except",
"Exception",
":",
"print",
"(",
"'Error in CustomOp.delete: %s'",
"%",
"traceback",
".",
"format_exc",
"(",
")",
")",
"return",
"False",
"return",
"True",
"callbacks",
"=",
"[",
"del_functype",
"(",
"delete_entry",
")",
",",
"fb_functype",
"(",
"forward_entry",
")",
",",
"fb_functype",
"(",
"backward_entry",
")",
"]",
"callbacks",
"=",
"[",
"cast",
"(",
"i",
",",
"CFUNCTYPE",
"(",
"c_int",
")",
")",
"for",
"i",
"in",
"callbacks",
"]",
"contexts",
"=",
"[",
"None",
",",
"None",
",",
"None",
"]",
"ret",
"[",
"0",
"]",
"=",
"MXCallbackList",
"(",
"c_int",
"(",
"len",
"(",
"callbacks",
")",
")",
",",
"cast",
"(",
"c_array",
"(",
"CFUNCTYPE",
"(",
"c_int",
")",
",",
"callbacks",
")",
",",
"POINTER",
"(",
"CFUNCTYPE",
"(",
"c_int",
")",
")",
")",
",",
"cast",
"(",
"c_array",
"(",
"c_void_p",
",",
"contexts",
")",
",",
"POINTER",
"(",
"c_void_p",
")",
")",
")",
"op",
".",
"_ref_holder",
"=",
"[",
"ret",
"]",
"_registry",
".",
"ref_holder",
"[",
"cur",
"]",
"=",
"op",
"except",
"Exception",
":",
"print",
"(",
"'Error in %s.create_operator: %s'",
"%",
"(",
"reg_name",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"return",
"False",
"return",
"True",
"cur",
"=",
"_registry",
".",
"inc",
"(",
")",
"def",
"delete_entry",
"(",
"_",
")",
":",
"\"\"\"C Callback for CustomOpProp::del\"\"\"",
"try",
":",
"del",
"_registry",
".",
"ref_holder",
"[",
"cur",
"]",
"except",
"Exception",
":",
"print",
"(",
"'Error in CustomOpProp.delete: %s'",
"%",
"traceback",
".",
"format_exc",
"(",
")",
")",
"return",
"False",
"return",
"True",
"callbacks",
"=",
"[",
"del_functype",
"(",
"delete_entry",
")",
",",
"list_functype",
"(",
"list_arguments_entry",
")",
",",
"list_functype",
"(",
"list_outputs_entry",
")",
",",
"list_functype",
"(",
"list_auxiliary_states_entry",
")",
",",
"infershape_functype",
"(",
"infer_shape_entry",
")",
",",
"deps_functype",
"(",
"declare_backward_dependency_entry",
")",
",",
"createop_functype",
"(",
"create_operator_entry",
")",
",",
"infertype_functype",
"(",
"infer_type_entry",
")",
",",
"inferstorage_functype",
"(",
"infer_storage_type_entry",
")",
",",
"inferstorage_backward_functype",
"(",
"infer_storage_type_backward_entry",
")",
"]",
"callbacks",
"=",
"[",
"cast",
"(",
"i",
",",
"CFUNCTYPE",
"(",
"c_int",
")",
")",
"for",
"i",
"in",
"callbacks",
"]",
"contexts",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"callbacks",
")",
"ret",
"[",
"0",
"]",
"=",
"MXCallbackList",
"(",
"c_int",
"(",
"len",
"(",
"callbacks",
")",
")",
",",
"cast",
"(",
"c_array",
"(",
"CFUNCTYPE",
"(",
"c_int",
")",
",",
"callbacks",
")",
",",
"POINTER",
"(",
"CFUNCTYPE",
"(",
"c_int",
")",
")",
")",
",",
"cast",
"(",
"c_array",
"(",
"c_void_p",
",",
"contexts",
")",
",",
"POINTER",
"(",
"c_void_p",
")",
")",
")",
"op_prop",
".",
"_ref_holder",
"=",
"[",
"ret",
"]",
"_registry",
".",
"ref_holder",
"[",
"cur",
"]",
"=",
"op_prop",
"return",
"True",
"creator_functype",
"=",
"CFUNCTYPE",
"(",
"c_int",
",",
"c_char_p",
",",
"c_int",
",",
"POINTER",
"(",
"c_char_p",
")",
",",
"POINTER",
"(",
"c_char_p",
")",
",",
"POINTER",
"(",
"MXCallbackList",
")",
")",
"creator_func",
"=",
"creator_functype",
"(",
"creator",
")",
"check_call",
"(",
"_LIB",
".",
"MXCustomOpRegister",
"(",
"c_str",
"(",
"reg_name",
")",
",",
"creator_func",
")",
")",
"cur",
"=",
"_registry",
".",
"inc",
"(",
")",
"_registry",
".",
"ref_holder",
"[",
"cur",
"]",
"=",
"creator_func",
"return",
"prop_cls",
"return",
"do_register"
] |
Register a subclass of CustomOpProp to the registry with name reg_name.
|
[
"Register",
"a",
"subclass",
"of",
"CustomOpProp",
"to",
"the",
"registry",
"with",
"name",
"reg_name",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/operator.py#L692-L1099
|
train
|
apache/incubator-mxnet
|
python/mxnet/operator.py
|
NDArrayOp.declare_backward_dependency
|
def declare_backward_dependency(self, out_grad, in_data, out_data):
"""Declare dependencies of this operator for backward pass.
Parameters
----------
out_grad : list of int
ids of out_grad blobs.
in_data : list of int
ids of in_data blobs.
out_data: list of int
ids of out_data blobs.
Returns
-------
deps : list of int
ids of the needed blobs.
"""
deps = []
if self.need_top_grad():
deps.extend(out_grad)
deps.extend(in_data)
deps.extend(out_data)
return deps
|
python
|
def declare_backward_dependency(self, out_grad, in_data, out_data):
"""Declare dependencies of this operator for backward pass.
Parameters
----------
out_grad : list of int
ids of out_grad blobs.
in_data : list of int
ids of in_data blobs.
out_data: list of int
ids of out_data blobs.
Returns
-------
deps : list of int
ids of the needed blobs.
"""
deps = []
if self.need_top_grad():
deps.extend(out_grad)
deps.extend(in_data)
deps.extend(out_data)
return deps
|
[
"def",
"declare_backward_dependency",
"(",
"self",
",",
"out_grad",
",",
"in_data",
",",
"out_data",
")",
":",
"deps",
"=",
"[",
"]",
"if",
"self",
".",
"need_top_grad",
"(",
")",
":",
"deps",
".",
"extend",
"(",
"out_grad",
")",
"deps",
".",
"extend",
"(",
"in_data",
")",
"deps",
".",
"extend",
"(",
"out_data",
")",
"return",
"deps"
] |
Declare dependencies of this operator for backward pass.
Parameters
----------
out_grad : list of int
ids of out_grad blobs.
in_data : list of int
ids of in_data blobs.
out_data: list of int
ids of out_data blobs.
Returns
-------
deps : list of int
ids of the needed blobs.
|
[
"Declare",
"dependencies",
"of",
"this",
"operator",
"for",
"backward",
"pass",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/operator.py#L402-L424
|
train
|
apache/incubator-mxnet
|
python/mxnet/operator.py
|
CustomOp.assign
|
def assign(self, dst, req, src):
"""Helper function for assigning into dst depending on requirements."""
if req == 'null':
return
elif req in ('write', 'inplace'):
dst[:] = src
elif req == 'add':
dst[:] += src
|
python
|
def assign(self, dst, req, src):
"""Helper function for assigning into dst depending on requirements."""
if req == 'null':
return
elif req in ('write', 'inplace'):
dst[:] = src
elif req == 'add':
dst[:] += src
|
[
"def",
"assign",
"(",
"self",
",",
"dst",
",",
"req",
",",
"src",
")",
":",
"if",
"req",
"==",
"'null'",
":",
"return",
"elif",
"req",
"in",
"(",
"'write'",
",",
"'inplace'",
")",
":",
"dst",
"[",
":",
"]",
"=",
"src",
"elif",
"req",
"==",
"'add'",
":",
"dst",
"[",
":",
"]",
"+=",
"src"
] |
Helper function for assigning into dst depending on requirements.
|
[
"Helper",
"function",
"for",
"assigning",
"into",
"dst",
"depending",
"on",
"requirements",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/operator.py#L463-L470
|
train
|
apache/incubator-mxnet
|
python/mxnet/operator.py
|
CustomOpProp.infer_type
|
def infer_type(self, in_type):
"""infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states.
"""
return in_type, [in_type[0]]*len(self.list_outputs()), \
[in_type[0]]*len(self.list_auxiliary_states())
|
python
|
def infer_type(self, in_type):
"""infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states.
"""
return in_type, [in_type[0]]*len(self.list_outputs()), \
[in_type[0]]*len(self.list_auxiliary_states())
|
[
"def",
"infer_type",
"(",
"self",
",",
"in_type",
")",
":",
"return",
"in_type",
",",
"[",
"in_type",
"[",
"0",
"]",
"]",
"*",
"len",
"(",
"self",
".",
"list_outputs",
"(",
")",
")",
",",
"[",
"in_type",
"[",
"0",
"]",
"]",
"*",
"len",
"(",
"self",
".",
"list_auxiliary_states",
"(",
")",
")"
] |
infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states.
|
[
"infer_type",
"interface",
".",
"override",
"to",
"create",
"new",
"operators"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/operator.py#L506-L527
|
train
|
apache/incubator-mxnet
|
python/mxnet/operator.py
|
CustomOpProp.infer_storage_type
|
def infer_storage_type(self, in_stype):
"""infer_storage_type interface. Used to infer storage type of
inputs and outputs in the forward pass. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
in_stype : list of stypes, valid stypes are default, row_sparse and
csr
Returns
-------
in_stype : list
list of argument stypes.
out_stype : list
list of output types calculated from in_stype,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_stype,
in the same order as declared in list_auxiliary_states.
"""
for i, stype in enumerate(in_stype):
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type implementation doesnt allow non default stypes: " \
"found non default stype '%s' for in_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default input/output stypes" % (stype, i)
return in_stype, \
[_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]*len(self.list_outputs()), \
[_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]*len(self.list_auxiliary_states())
|
python
|
def infer_storage_type(self, in_stype):
"""infer_storage_type interface. Used to infer storage type of
inputs and outputs in the forward pass. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
in_stype : list of stypes, valid stypes are default, row_sparse and
csr
Returns
-------
in_stype : list
list of argument stypes.
out_stype : list
list of output types calculated from in_stype,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_stype,
in the same order as declared in list_auxiliary_states.
"""
for i, stype in enumerate(in_stype):
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type implementation doesnt allow non default stypes: " \
"found non default stype '%s' for in_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default input/output stypes" % (stype, i)
return in_stype, \
[_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]*len(self.list_outputs()), \
[_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]*len(self.list_auxiliary_states())
|
[
"def",
"infer_storage_type",
"(",
"self",
",",
"in_stype",
")",
":",
"for",
"i",
",",
"stype",
"in",
"enumerate",
"(",
"in_stype",
")",
":",
"assert",
"stype",
"==",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_DEFAULT",
"]",
",",
"\"Default infer_storage_type implementation doesnt allow non default stypes: \"",
"\"found non default stype '%s' for in_stype[%d]. Please implement \"",
"\"infer_storage_type and infer_storage_type_backward interface \"",
"\"in your custom operator if you have non-default input/output stypes\"",
"%",
"(",
"stype",
",",
"i",
")",
"return",
"in_stype",
",",
"[",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_DEFAULT",
"]",
"]",
"*",
"len",
"(",
"self",
".",
"list_outputs",
"(",
")",
")",
",",
"[",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_DEFAULT",
"]",
"]",
"*",
"len",
"(",
"self",
".",
"list_auxiliary_states",
"(",
")",
")"
] |
infer_storage_type interface. Used to infer storage type of
inputs and outputs in the forward pass. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
in_stype : list of stypes, valid stypes are default, row_sparse and
csr
Returns
-------
in_stype : list
list of argument stypes.
out_stype : list
list of output types calculated from in_stype,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_stype,
in the same order as declared in list_auxiliary_states.
|
[
"infer_storage_type",
"interface",
".",
"Used",
"to",
"infer",
"storage",
"type",
"of",
"inputs",
"and",
"outputs",
"in",
"the",
"forward",
"pass",
".",
"When",
"this",
"interface",
"is",
"not",
"implemented",
"all",
"stypes",
"will",
"be",
"inferred",
"as",
"default",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/operator.py#L529-L558
|
train
|
apache/incubator-mxnet
|
python/mxnet/operator.py
|
CustomOpProp.infer_storage_type_backward
|
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
"""infer_storage_type_backward interface. Used to infer storage
type of inputs and outputs in the backward pass.
Will raise an error if undefined storage type is returned.
Returned lists have to be the same size as the input lists to infer_storage_type_backward,
otherwise an exception will be thrown. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
ograd_stype : list
list of output gradient storage types
in_stype : list
list of input storage types
out_stype : list
list of output storage types
igrad_stype : list
list of input gradient storage types
aux_stype : list
list of auxiliary storage types
Returns
-------
ograd_stype : list
list of inferred output gradient storage types
in_stype : list
list of inferred input storage types
out_stype : list
list of inferred output storage types
igrad_stype : list
list of inferred input gradient storage types
aux_stype : list
list of inferred storage types for auxiliary states
"""
for i, stype in enumerate(ograd_stype):
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type_backward implementation doesnt allow non default stypes: " \
"found non default stype '%s' for ograd_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default output gradient stypes" % (stype, i)
for i, stype in enumerate(igrad_stype):
if stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_UNDEFINED]:
stype = _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type_backward implementation doesnt allow non default stypes: " \
"found non default stype '%s' for igrad_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default input gradient stypes" % (stype, i)
stype_lists = [ograd_stype, in_stype, out_stype, igrad_stype, aux_stype]
for stype_list in stype_lists:
stype_list[:] = len(stype_list) * [_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]
return stype_lists[0], stype_lists[1], stype_lists[2], stype_lists[3], stype_lists[4]
|
python
|
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
"""infer_storage_type_backward interface. Used to infer storage
type of inputs and outputs in the backward pass.
Will raise an error if undefined storage type is returned.
Returned lists have to be the same size as the input lists to infer_storage_type_backward,
otherwise an exception will be thrown. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
ograd_stype : list
list of output gradient storage types
in_stype : list
list of input storage types
out_stype : list
list of output storage types
igrad_stype : list
list of input gradient storage types
aux_stype : list
list of auxiliary storage types
Returns
-------
ograd_stype : list
list of inferred output gradient storage types
in_stype : list
list of inferred input storage types
out_stype : list
list of inferred output storage types
igrad_stype : list
list of inferred input gradient storage types
aux_stype : list
list of inferred storage types for auxiliary states
"""
for i, stype in enumerate(ograd_stype):
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type_backward implementation doesnt allow non default stypes: " \
"found non default stype '%s' for ograd_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default output gradient stypes" % (stype, i)
for i, stype in enumerate(igrad_stype):
if stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_UNDEFINED]:
stype = _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type_backward implementation doesnt allow non default stypes: " \
"found non default stype '%s' for igrad_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default input gradient stypes" % (stype, i)
stype_lists = [ograd_stype, in_stype, out_stype, igrad_stype, aux_stype]
for stype_list in stype_lists:
stype_list[:] = len(stype_list) * [_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]
return stype_lists[0], stype_lists[1], stype_lists[2], stype_lists[3], stype_lists[4]
|
[
"def",
"infer_storage_type_backward",
"(",
"self",
",",
"ograd_stype",
",",
"in_stype",
",",
"out_stype",
",",
"igrad_stype",
",",
"aux_stype",
")",
":",
"for",
"i",
",",
"stype",
"in",
"enumerate",
"(",
"ograd_stype",
")",
":",
"assert",
"stype",
"==",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_DEFAULT",
"]",
",",
"\"Default infer_storage_type_backward implementation doesnt allow non default stypes: \"",
"\"found non default stype '%s' for ograd_stype[%d]. Please implement \"",
"\"infer_storage_type and infer_storage_type_backward interface \"",
"\"in your custom operator if you have non-default output gradient stypes\"",
"%",
"(",
"stype",
",",
"i",
")",
"for",
"i",
",",
"stype",
"in",
"enumerate",
"(",
"igrad_stype",
")",
":",
"if",
"stype",
"==",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_UNDEFINED",
"]",
":",
"stype",
"=",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_DEFAULT",
"]",
"assert",
"stype",
"==",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_DEFAULT",
"]",
",",
"\"Default infer_storage_type_backward implementation doesnt allow non default stypes: \"",
"\"found non default stype '%s' for igrad_stype[%d]. Please implement \"",
"\"infer_storage_type and infer_storage_type_backward interface \"",
"\"in your custom operator if you have non-default input gradient stypes\"",
"%",
"(",
"stype",
",",
"i",
")",
"stype_lists",
"=",
"[",
"ograd_stype",
",",
"in_stype",
",",
"out_stype",
",",
"igrad_stype",
",",
"aux_stype",
"]",
"for",
"stype_list",
"in",
"stype_lists",
":",
"stype_list",
"[",
":",
"]",
"=",
"len",
"(",
"stype_list",
")",
"*",
"[",
"_STORAGE_TYPE_ID_TO_STR",
"[",
"_STORAGE_TYPE_DEFAULT",
"]",
"]",
"return",
"stype_lists",
"[",
"0",
"]",
",",
"stype_lists",
"[",
"1",
"]",
",",
"stype_lists",
"[",
"2",
"]",
",",
"stype_lists",
"[",
"3",
"]",
",",
"stype_lists",
"[",
"4",
"]"
] |
infer_storage_type_backward interface. Used to infer storage
type of inputs and outputs in the backward pass.
Will raise an error if undefined storage type is returned.
Returned lists have to be the same size as the input lists to infer_storage_type_backward,
otherwise an exception will be thrown. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
ograd_stype : list
list of output gradient storage types
in_stype : list
list of input storage types
out_stype : list
list of output storage types
igrad_stype : list
list of input gradient storage types
aux_stype : list
list of auxiliary storage types
Returns
-------
ograd_stype : list
list of inferred output gradient storage types
in_stype : list
list of inferred input storage types
out_stype : list
list of inferred output storage types
igrad_stype : list
list of inferred input gradient storage types
aux_stype : list
list of inferred storage types for auxiliary states
|
[
"infer_storage_type_backward",
"interface",
".",
"Used",
"to",
"infer",
"storage",
"type",
"of",
"inputs",
"and",
"outputs",
"in",
"the",
"backward",
"pass",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/operator.py#L560-L612
|
train
|
apache/incubator-mxnet
|
python/mxnet/operator.py
|
CustomOpProp.declare_backward_dependency
|
def declare_backward_dependency(self, out_grad, in_data, out_data):
"""Declare dependencies of this operator for backward pass.
Parameters
----------
out_grad : list of int
ids of out_grad blobs.
in_data : list of int
ids of in_data blobs.
out_data: list of int
ids of out_data blobs.
Returns
-------
deps : list of int
ids of the needed blobs.
"""
deps = []
if self.need_top_grad_:
deps.extend(out_grad)
deps.extend(in_data)
deps.extend(out_data)
return deps
|
python
|
def declare_backward_dependency(self, out_grad, in_data, out_data):
"""Declare dependencies of this operator for backward pass.
Parameters
----------
out_grad : list of int
ids of out_grad blobs.
in_data : list of int
ids of in_data blobs.
out_data: list of int
ids of out_data blobs.
Returns
-------
deps : list of int
ids of the needed blobs.
"""
deps = []
if self.need_top_grad_:
deps.extend(out_grad)
deps.extend(in_data)
deps.extend(out_data)
return deps
|
[
"def",
"declare_backward_dependency",
"(",
"self",
",",
"out_grad",
",",
"in_data",
",",
"out_data",
")",
":",
"deps",
"=",
"[",
"]",
"if",
"self",
".",
"need_top_grad_",
":",
"deps",
".",
"extend",
"(",
"out_grad",
")",
"deps",
".",
"extend",
"(",
"in_data",
")",
"deps",
".",
"extend",
"(",
"out_data",
")",
"return",
"deps"
] |
Declare dependencies of this operator for backward pass.
Parameters
----------
out_grad : list of int
ids of out_grad blobs.
in_data : list of int
ids of in_data blobs.
out_data: list of int
ids of out_data blobs.
Returns
-------
deps : list of int
ids of the needed blobs.
|
[
"Declare",
"dependencies",
"of",
"this",
"operator",
"for",
"backward",
"pass",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/operator.py#L644-L666
|
train
|
apache/incubator-mxnet
|
python/mxnet/operator.py
|
_Registry.inc
|
def inc(self):
"""Get index for new entry."""
self.lock.acquire()
cur = self.counter
self.counter += 1
self.lock.release()
return cur
|
python
|
def inc(self):
"""Get index for new entry."""
self.lock.acquire()
cur = self.counter
self.counter += 1
self.lock.release()
return cur
|
[
"def",
"inc",
"(",
"self",
")",
":",
"self",
".",
"lock",
".",
"acquire",
"(",
")",
"cur",
"=",
"self",
".",
"counter",
"self",
".",
"counter",
"+=",
"1",
"self",
".",
"lock",
".",
"release",
"(",
")",
"return",
"cur"
] |
Get index for new entry.
|
[
"Get",
"index",
"for",
"new",
"entry",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/operator.py#L682-L688
|
train
|
apache/incubator-mxnet
|
tools/rec2idx.py
|
IndexCreator.close
|
def close(self):
"""Closes the record and index files."""
if not self.is_open:
return
super(IndexCreator, self).close()
self.fidx.close()
|
python
|
def close(self):
"""Closes the record and index files."""
if not self.is_open:
return
super(IndexCreator, self).close()
self.fidx.close()
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_open",
":",
"return",
"super",
"(",
"IndexCreator",
",",
"self",
")",
".",
"close",
"(",
")",
"self",
".",
"fidx",
".",
"close",
"(",
")"
] |
Closes the record and index files.
|
[
"Closes",
"the",
"record",
"and",
"index",
"files",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/rec2idx.py#L58-L63
|
train
|
apache/incubator-mxnet
|
tools/rec2idx.py
|
IndexCreator.tell
|
def tell(self):
"""Returns the current position of read head.
"""
pos = ctypes.c_size_t()
check_call(_LIB.MXRecordIOReaderTell(self.handle, ctypes.byref(pos)))
return pos.value
|
python
|
def tell(self):
"""Returns the current position of read head.
"""
pos = ctypes.c_size_t()
check_call(_LIB.MXRecordIOReaderTell(self.handle, ctypes.byref(pos)))
return pos.value
|
[
"def",
"tell",
"(",
"self",
")",
":",
"pos",
"=",
"ctypes",
".",
"c_size_t",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXRecordIOReaderTell",
"(",
"self",
".",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"pos",
")",
")",
")",
"return",
"pos",
".",
"value"
] |
Returns the current position of read head.
|
[
"Returns",
"the",
"current",
"position",
"of",
"read",
"head",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/rec2idx.py#L65-L70
|
train
|
apache/incubator-mxnet
|
tools/rec2idx.py
|
IndexCreator.create_index
|
def create_index(self):
"""Creates the index file from open record file
"""
self.reset()
counter = 0
pre_time = time.time()
while True:
if counter % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', counter)
pos = self.tell()
cont = self.read()
if cont is None:
break
key = self.key_type(counter)
self.fidx.write('%s\t%d\n'%(str(key), pos))
counter = counter + 1
|
python
|
def create_index(self):
"""Creates the index file from open record file
"""
self.reset()
counter = 0
pre_time = time.time()
while True:
if counter % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', counter)
pos = self.tell()
cont = self.read()
if cont is None:
break
key = self.key_type(counter)
self.fidx.write('%s\t%d\n'%(str(key), pos))
counter = counter + 1
|
[
"def",
"create_index",
"(",
"self",
")",
":",
"self",
".",
"reset",
"(",
")",
"counter",
"=",
"0",
"pre_time",
"=",
"time",
".",
"time",
"(",
")",
"while",
"True",
":",
"if",
"counter",
"%",
"1000",
"==",
"0",
":",
"cur_time",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"'time:'",
",",
"cur_time",
"-",
"pre_time",
",",
"' count:'",
",",
"counter",
")",
"pos",
"=",
"self",
".",
"tell",
"(",
")",
"cont",
"=",
"self",
".",
"read",
"(",
")",
"if",
"cont",
"is",
"None",
":",
"break",
"key",
"=",
"self",
".",
"key_type",
"(",
"counter",
")",
"self",
".",
"fidx",
".",
"write",
"(",
"'%s\\t%d\\n'",
"%",
"(",
"str",
"(",
"key",
")",
",",
"pos",
")",
")",
"counter",
"=",
"counter",
"+",
"1"
] |
Creates the index file from open record file
|
[
"Creates",
"the",
"index",
"file",
"from",
"open",
"record",
"file"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/rec2idx.py#L72-L88
|
train
|
apache/incubator-mxnet
|
docs/mxdoc.py
|
_run_cmd
|
def _run_cmd(cmds):
"""Run commands, raise exception if failed"""
if not isinstance(cmds, str):
cmds = "".join(cmds)
print("Execute \"%s\"" % cmds)
try:
subprocess.check_call(cmds, shell=True)
except subprocess.CalledProcessError as err:
print(err)
raise err
|
python
|
def _run_cmd(cmds):
"""Run commands, raise exception if failed"""
if not isinstance(cmds, str):
cmds = "".join(cmds)
print("Execute \"%s\"" % cmds)
try:
subprocess.check_call(cmds, shell=True)
except subprocess.CalledProcessError as err:
print(err)
raise err
|
[
"def",
"_run_cmd",
"(",
"cmds",
")",
":",
"if",
"not",
"isinstance",
"(",
"cmds",
",",
"str",
")",
":",
"cmds",
"=",
"\"\"",
".",
"join",
"(",
"cmds",
")",
"print",
"(",
"\"Execute \\\"%s\\\"\"",
"%",
"cmds",
")",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"cmds",
",",
"shell",
"=",
"True",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"err",
":",
"print",
"(",
"err",
")",
"raise",
"err"
] |
Run commands, raise exception if failed
|
[
"Run",
"commands",
"raise",
"exception",
"if",
"failed"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L73-L82
|
train
|
apache/incubator-mxnet
|
docs/mxdoc.py
|
generate_doxygen
|
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir)
|
python
|
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir)
|
[
"def",
"generate_doxygen",
"(",
"app",
")",
":",
"_run_cmd",
"(",
"\"cd %s/.. && make doxygen\"",
"%",
"app",
".",
"builder",
".",
"srcdir",
")",
"_run_cmd",
"(",
"\"cp -rf doxygen/html %s/doxygen\"",
"%",
"app",
".",
"builder",
".",
"outdir",
")"
] |
Run the doxygen make commands
|
[
"Run",
"the",
"doxygen",
"make",
"commands"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L84-L87
|
train
|
apache/incubator-mxnet
|
docs/mxdoc.py
|
build_mxnet
|
def build_mxnet(app):
"""Build mxnet .so lib"""
if not os.path.exists(os.path.join(app.builder.srcdir, '..', 'config.mk')):
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " %
app.builder.srcdir)
else:
_run_cmd("cd %s/.. && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " %
app.builder.srcdir)
|
python
|
def build_mxnet(app):
"""Build mxnet .so lib"""
if not os.path.exists(os.path.join(app.builder.srcdir, '..', 'config.mk')):
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " %
app.builder.srcdir)
else:
_run_cmd("cd %s/.. && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " %
app.builder.srcdir)
|
[
"def",
"build_mxnet",
"(",
"app",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"app",
".",
"builder",
".",
"srcdir",
",",
"'..'",
",",
"'config.mk'",
")",
")",
":",
"_run_cmd",
"(",
"\"cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 \"",
"%",
"app",
".",
"builder",
".",
"srcdir",
")",
"else",
":",
"_run_cmd",
"(",
"\"cd %s/.. && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 \"",
"%",
"app",
".",
"builder",
".",
"srcdir",
")"
] |
Build mxnet .so lib
|
[
"Build",
"mxnet",
".",
"so",
"lib"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L89-L96
|
train
|
apache/incubator-mxnet
|
docs/mxdoc.py
|
build_r_docs
|
def build_r_docs(app):
"""build r pdf"""
r_root = app.builder.srcdir + '/../R-package'
pdf_path = app.builder.srcdir + '/api/r/mxnet-r-reference-manual.pdf'
_run_cmd('cd ' + r_root +
'; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path)
dest_path = app.builder.outdir + '/api/r/'
_run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path)
|
python
|
def build_r_docs(app):
"""build r pdf"""
r_root = app.builder.srcdir + '/../R-package'
pdf_path = app.builder.srcdir + '/api/r/mxnet-r-reference-manual.pdf'
_run_cmd('cd ' + r_root +
'; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path)
dest_path = app.builder.outdir + '/api/r/'
_run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path)
|
[
"def",
"build_r_docs",
"(",
"app",
")",
":",
"r_root",
"=",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/../R-package'",
"pdf_path",
"=",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/api/r/mxnet-r-reference-manual.pdf'",
"_run_cmd",
"(",
"'cd '",
"+",
"r_root",
"+",
"'; R -e \"roxygen2::roxygenize()\"; R CMD Rd2pdf . --no-preview -o '",
"+",
"pdf_path",
")",
"dest_path",
"=",
"app",
".",
"builder",
".",
"outdir",
"+",
"'/api/r/'",
"_run_cmd",
"(",
"'mkdir -p '",
"+",
"dest_path",
"+",
"'; mv '",
"+",
"pdf_path",
"+",
"' '",
"+",
"dest_path",
")"
] |
build r pdf
|
[
"build",
"r",
"pdf"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L98-L105
|
train
|
apache/incubator-mxnet
|
docs/mxdoc.py
|
build_scala
|
def build_scala(app):
"""build scala for scala docs, java docs, and clojure docs to use"""
if any(v in _BUILD_VER for v in ['1.2.', '1.3.', '1.4.']):
_run_cmd("cd %s/.. && make scalapkg" % app.builder.srcdir)
_run_cmd("cd %s/.. && make scalainstall" % app.builder.srcdir)
else:
_run_cmd("cd %s/../scala-package && mvn -B install -DskipTests" % app.builder.srcdir)
|
python
|
def build_scala(app):
"""build scala for scala docs, java docs, and clojure docs to use"""
if any(v in _BUILD_VER for v in ['1.2.', '1.3.', '1.4.']):
_run_cmd("cd %s/.. && make scalapkg" % app.builder.srcdir)
_run_cmd("cd %s/.. && make scalainstall" % app.builder.srcdir)
else:
_run_cmd("cd %s/../scala-package && mvn -B install -DskipTests" % app.builder.srcdir)
|
[
"def",
"build_scala",
"(",
"app",
")",
":",
"if",
"any",
"(",
"v",
"in",
"_BUILD_VER",
"for",
"v",
"in",
"[",
"'1.2.'",
",",
"'1.3.'",
",",
"'1.4.'",
"]",
")",
":",
"_run_cmd",
"(",
"\"cd %s/.. && make scalapkg\"",
"%",
"app",
".",
"builder",
".",
"srcdir",
")",
"_run_cmd",
"(",
"\"cd %s/.. && make scalainstall\"",
"%",
"app",
".",
"builder",
".",
"srcdir",
")",
"else",
":",
"_run_cmd",
"(",
"\"cd %s/../scala-package && mvn -B install -DskipTests\"",
"%",
"app",
".",
"builder",
".",
"srcdir",
")"
] |
build scala for scala docs, java docs, and clojure docs to use
|
[
"build",
"scala",
"for",
"scala",
"docs",
"java",
"docs",
"and",
"clojure",
"docs",
"to",
"use"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L107-L113
|
train
|
apache/incubator-mxnet
|
docs/mxdoc.py
|
build_scala_docs
|
def build_scala_docs(app):
"""build scala doc and then move the outdir"""
scala_path = app.builder.srcdir + '/../scala-package'
scala_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep -v \"\/javaapi\" | egrep -v \"Suite\"'
scala_doc_classpath = ':'.join([
'`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `',
'`find macros -name "*.jar" | tr "\\n" ":" `',
'`find core -name "*.jar" | tr "\\n" ":" `',
'`find infer -name "*.jar" | tr "\\n" ":" `'
])
# There are unresolvable errors on mxnet 1.2.x. We are ignoring those errors while aborting the ci on newer versions
scala_ignore_errors = '; exit 0' if any(v in _BUILD_VER for v in ['1.2.', '1.3.']) else ''
_run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation {}'
.format(scala_path, scala_doc_sources, scala_doc_classpath, scala_ignore_errors))
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
# 'index' and 'package.html' do not exist in later versions of scala; delete these after upgrading scala>2.12.x
scaladocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0')
|
python
|
def build_scala_docs(app):
"""build scala doc and then move the outdir"""
scala_path = app.builder.srcdir + '/../scala-package'
scala_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep -v \"\/javaapi\" | egrep -v \"Suite\"'
scala_doc_classpath = ':'.join([
'`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `',
'`find macros -name "*.jar" | tr "\\n" ":" `',
'`find core -name "*.jar" | tr "\\n" ":" `',
'`find infer -name "*.jar" | tr "\\n" ":" `'
])
# There are unresolvable errors on mxnet 1.2.x. We are ignoring those errors while aborting the ci on newer versions
scala_ignore_errors = '; exit 0' if any(v in _BUILD_VER for v in ['1.2.', '1.3.']) else ''
_run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation {}'
.format(scala_path, scala_doc_sources, scala_doc_classpath, scala_ignore_errors))
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
# 'index' and 'package.html' do not exist in later versions of scala; delete these after upgrading scala>2.12.x
scaladocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0')
|
[
"def",
"build_scala_docs",
"(",
"app",
")",
":",
"scala_path",
"=",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/../scala-package'",
"scala_doc_sources",
"=",
"'find . -type f -name \"*.scala\" | egrep \\\"\\.\\/core|\\.\\/infer\\\" | egrep -v \\\"\\/javaapi\\\" | egrep -v \\\"Suite\\\"'",
"scala_doc_classpath",
"=",
"':'",
".",
"join",
"(",
"[",
"'`find native -name \"*.jar\" | grep \"target/lib/\" | tr \"\\\\n\" \":\" `'",
",",
"'`find macros -name \"*.jar\" | tr \"\\\\n\" \":\" `'",
",",
"'`find core -name \"*.jar\" | tr \"\\\\n\" \":\" `'",
",",
"'`find infer -name \"*.jar\" | tr \"\\\\n\" \":\" `'",
"]",
")",
"# There are unresolvable errors on mxnet 1.2.x. We are ignoring those errors while aborting the ci on newer versions",
"scala_ignore_errors",
"=",
"'; exit 0'",
"if",
"any",
"(",
"v",
"in",
"_BUILD_VER",
"for",
"v",
"in",
"[",
"'1.2.'",
",",
"'1.3.'",
"]",
")",
"else",
"''",
"_run_cmd",
"(",
"'cd {}; scaladoc `{}` -classpath {} -feature -deprecation {}'",
".",
"format",
"(",
"scala_path",
",",
"scala_doc_sources",
",",
"scala_doc_classpath",
",",
"scala_ignore_errors",
")",
")",
"dest_path",
"=",
"app",
".",
"builder",
".",
"outdir",
"+",
"'/api/scala/docs'",
"_run_cmd",
"(",
"'rm -rf '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'mkdir -p '",
"+",
"dest_path",
")",
"# 'index' and 'package.html' do not exist in later versions of scala; delete these after upgrading scala>2.12.x",
"scaladocs",
"=",
"[",
"'index'",
",",
"'index.html'",
",",
"'org'",
",",
"'lib'",
",",
"'index.js'",
",",
"'package.html'",
"]",
"for",
"doc_file",
"in",
"scaladocs",
":",
"_run_cmd",
"(",
"'cd '",
"+",
"scala_path",
"+",
"' && mv -f '",
"+",
"doc_file",
"+",
"' '",
"+",
"dest_path",
"+",
"'; exit 0'",
")"
] |
build scala doc and then move the outdir
|
[
"build",
"scala",
"doc",
"and",
"then",
"move",
"the",
"outdir"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L115-L135
|
train
|
apache/incubator-mxnet
|
docs/mxdoc.py
|
build_java_docs
|
def build_java_docs(app):
"""build java docs and then move the outdir"""
java_path = app.builder.srcdir + '/../scala-package'
java_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep \"\/javaapi\" | egrep -v \"Suite\"'
java_doc_classpath = ':'.join([
'`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `',
'`find macros -name "*.jar" | tr "\\n" ":" `',
'`find core -name "*.jar" | tr "\\n" ":" `',
'`find infer -name "*.jar" | tr "\\n" ":" `'
])
_run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation'
.format(java_path, java_doc_sources, java_doc_classpath))
dest_path = app.builder.outdir + '/api/java/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
javadocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html']
for doc_file in javadocs:
_run_cmd('cd ' + java_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0')
|
python
|
def build_java_docs(app):
"""build java docs and then move the outdir"""
java_path = app.builder.srcdir + '/../scala-package'
java_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep \"\/javaapi\" | egrep -v \"Suite\"'
java_doc_classpath = ':'.join([
'`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `',
'`find macros -name "*.jar" | tr "\\n" ":" `',
'`find core -name "*.jar" | tr "\\n" ":" `',
'`find infer -name "*.jar" | tr "\\n" ":" `'
])
_run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation'
.format(java_path, java_doc_sources, java_doc_classpath))
dest_path = app.builder.outdir + '/api/java/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
javadocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html']
for doc_file in javadocs:
_run_cmd('cd ' + java_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0')
|
[
"def",
"build_java_docs",
"(",
"app",
")",
":",
"java_path",
"=",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/../scala-package'",
"java_doc_sources",
"=",
"'find . -type f -name \"*.scala\" | egrep \\\"\\.\\/core|\\.\\/infer\\\" | egrep \\\"\\/javaapi\\\" | egrep -v \\\"Suite\\\"'",
"java_doc_classpath",
"=",
"':'",
".",
"join",
"(",
"[",
"'`find native -name \"*.jar\" | grep \"target/lib/\" | tr \"\\\\n\" \":\" `'",
",",
"'`find macros -name \"*.jar\" | tr \"\\\\n\" \":\" `'",
",",
"'`find core -name \"*.jar\" | tr \"\\\\n\" \":\" `'",
",",
"'`find infer -name \"*.jar\" | tr \"\\\\n\" \":\" `'",
"]",
")",
"_run_cmd",
"(",
"'cd {}; scaladoc `{}` -classpath {} -feature -deprecation'",
".",
"format",
"(",
"java_path",
",",
"java_doc_sources",
",",
"java_doc_classpath",
")",
")",
"dest_path",
"=",
"app",
".",
"builder",
".",
"outdir",
"+",
"'/api/java/docs'",
"_run_cmd",
"(",
"'rm -rf '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'mkdir -p '",
"+",
"dest_path",
")",
"javadocs",
"=",
"[",
"'index'",
",",
"'index.html'",
",",
"'org'",
",",
"'lib'",
",",
"'index.js'",
",",
"'package.html'",
"]",
"for",
"doc_file",
"in",
"javadocs",
":",
"_run_cmd",
"(",
"'cd '",
"+",
"java_path",
"+",
"' && mv -f '",
"+",
"doc_file",
"+",
"' '",
"+",
"dest_path",
"+",
"'; exit 0'",
")"
] |
build java docs and then move the outdir
|
[
"build",
"java",
"docs",
"and",
"then",
"move",
"the",
"outdir"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L137-L154
|
train
|
apache/incubator-mxnet
|
docs/mxdoc.py
|
build_clojure_docs
|
def build_clojure_docs(app):
"""build clojure doc and then move the outdir"""
clojure_path = app.builder.srcdir + '/../contrib/clojure-package'
_run_cmd('cd ' + clojure_path + '; lein codox')
dest_path = app.builder.outdir + '/api/clojure/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
clojure_doc_path = app.builder.srcdir + '/../contrib/clojure-package/target/doc'
_run_cmd('cd ' + clojure_doc_path + ' && cp -r * ' + dest_path + '; exit 0')
|
python
|
def build_clojure_docs(app):
"""build clojure doc and then move the outdir"""
clojure_path = app.builder.srcdir + '/../contrib/clojure-package'
_run_cmd('cd ' + clojure_path + '; lein codox')
dest_path = app.builder.outdir + '/api/clojure/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
clojure_doc_path = app.builder.srcdir + '/../contrib/clojure-package/target/doc'
_run_cmd('cd ' + clojure_doc_path + ' && cp -r * ' + dest_path + '; exit 0')
|
[
"def",
"build_clojure_docs",
"(",
"app",
")",
":",
"clojure_path",
"=",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/../contrib/clojure-package'",
"_run_cmd",
"(",
"'cd '",
"+",
"clojure_path",
"+",
"'; lein codox'",
")",
"dest_path",
"=",
"app",
".",
"builder",
".",
"outdir",
"+",
"'/api/clojure/docs'",
"_run_cmd",
"(",
"'rm -rf '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'mkdir -p '",
"+",
"dest_path",
")",
"clojure_doc_path",
"=",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/../contrib/clojure-package/target/doc'",
"_run_cmd",
"(",
"'cd '",
"+",
"clojure_doc_path",
"+",
"' && cp -r * '",
"+",
"dest_path",
"+",
"'; exit 0'",
")"
] |
build clojure doc and then move the outdir
|
[
"build",
"clojure",
"doc",
"and",
"then",
"move",
"the",
"outdir"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L156-L164
|
train
|
apache/incubator-mxnet
|
docs/mxdoc.py
|
_convert_md_table_to_rst
|
def _convert_md_table_to_rst(table):
"""Convert a markdown table to rst format"""
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
out += ' * - ' if j == 0 else ' - '
out += pypandoc.convert_text(
c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n'
out += '```\n'
return out
|
python
|
def _convert_md_table_to_rst(table):
"""Convert a markdown table to rst format"""
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
out += ' * - ' if j == 0 else ' - '
out += pypandoc.convert_text(
c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n'
out += '```\n'
return out
|
[
"def",
"_convert_md_table_to_rst",
"(",
"table",
")",
":",
"if",
"len",
"(",
"table",
")",
"<",
"3",
":",
"return",
"''",
"out",
"=",
"'```eval_rst\\n.. list-table::\\n :header-rows: 1\\n\\n'",
"for",
"i",
",",
"l",
"in",
"enumerate",
"(",
"table",
")",
":",
"cols",
"=",
"l",
".",
"split",
"(",
"'|'",
")",
"[",
"1",
":",
"-",
"1",
"]",
"if",
"i",
"==",
"0",
":",
"ncol",
"=",
"len",
"(",
"cols",
")",
"else",
":",
"if",
"len",
"(",
"cols",
")",
"!=",
"ncol",
":",
"return",
"''",
"if",
"i",
"==",
"1",
":",
"for",
"c",
"in",
"cols",
":",
"if",
"len",
"(",
"c",
")",
"is",
"not",
"0",
"and",
"'---'",
"not",
"in",
"c",
":",
"return",
"''",
"else",
":",
"for",
"j",
",",
"c",
"in",
"enumerate",
"(",
"cols",
")",
":",
"out",
"+=",
"' * - '",
"if",
"j",
"==",
"0",
"else",
"' - '",
"out",
"+=",
"pypandoc",
".",
"convert_text",
"(",
"c",
",",
"'rst'",
",",
"format",
"=",
"'md'",
")",
".",
"replace",
"(",
"'\\n'",
",",
"' '",
")",
".",
"replace",
"(",
"'\\r'",
",",
"''",
")",
"+",
"'\\n'",
"out",
"+=",
"'```\\n'",
"return",
"out"
] |
Convert a markdown table to rst format
|
[
"Convert",
"a",
"markdown",
"table",
"to",
"rst",
"format"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L166-L188
|
train
|
apache/incubator-mxnet
|
docs/mxdoc.py
|
convert_table
|
def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format"""
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname))
|
python
|
def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format"""
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname))
|
[
"def",
"convert_table",
"(",
"app",
",",
"docname",
",",
"source",
")",
":",
"num_tables",
"=",
"0",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"source",
")",
":",
"table",
"=",
"[",
"]",
"output",
"=",
"''",
"in_table",
"=",
"False",
"for",
"l",
"in",
"j",
".",
"split",
"(",
"'\\n'",
")",
":",
"r",
"=",
"l",
".",
"strip",
"(",
")",
"if",
"r",
".",
"startswith",
"(",
"'|'",
")",
":",
"table",
".",
"append",
"(",
"r",
")",
"in_table",
"=",
"True",
"else",
":",
"if",
"in_table",
"is",
"True",
":",
"converted",
"=",
"_convert_md_table_to_rst",
"(",
"table",
")",
"if",
"converted",
"is",
"''",
":",
"print",
"(",
"\"Failed to convert the markdown table\"",
")",
"print",
"(",
"table",
")",
"else",
":",
"num_tables",
"+=",
"1",
"output",
"+=",
"converted",
"in_table",
"=",
"False",
"table",
"=",
"[",
"]",
"output",
"+=",
"l",
"+",
"'\\n'",
"source",
"[",
"i",
"]",
"=",
"output",
"if",
"num_tables",
">",
"0",
":",
"print",
"(",
"'Converted %d tables in %s'",
"%",
"(",
"num_tables",
",",
"docname",
")",
")"
] |
Find tables in a markdown and then convert them into the rst format
|
[
"Find",
"tables",
"in",
"a",
"markdown",
"and",
"then",
"convert",
"them",
"into",
"the",
"rst",
"format"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L191-L217
|
train
|
apache/incubator-mxnet
|
docs/mxdoc.py
|
_parse_code_lines
|
def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
"""
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent)
|
python
|
def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
"""
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent)
|
[
"def",
"_parse_code_lines",
"(",
"lines",
")",
":",
"in_code",
"=",
"False",
"lang",
"=",
"None",
"indent",
"=",
"None",
"for",
"l",
"in",
"lines",
":",
"m",
"=",
"_CODE_MARK",
".",
"match",
"(",
"l",
")",
"if",
"m",
"is",
"not",
"None",
":",
"if",
"not",
"in_code",
":",
"if",
"m",
".",
"groups",
"(",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"in",
"_LANGS",
":",
"lang",
"=",
"m",
".",
"groups",
"(",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"indent",
"=",
"len",
"(",
"m",
".",
"groups",
"(",
")",
"[",
"0",
"]",
")",
"in_code",
"=",
"True",
"yield",
"(",
"l",
",",
"in_code",
",",
"lang",
",",
"indent",
")",
"else",
":",
"yield",
"(",
"l",
",",
"in_code",
",",
"lang",
",",
"indent",
")",
"lang",
"=",
"None",
"indent",
"=",
"None",
"in_code",
"=",
"False",
"else",
":",
"yield",
"(",
"l",
",",
"in_code",
",",
"lang",
",",
"indent",
")"
] |
A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
|
[
"A",
"iterator",
"that",
"returns",
"if",
"a",
"line",
"is",
"within",
"a",
"code",
"block"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L219-L248
|
train
|
apache/incubator-mxnet
|
docs/mxdoc.py
|
_get_blocks
|
def _get_blocks(lines):
"""split lines into code and non-code blocks
Returns
-------
iterator of (bool, str, list of str)
- if it is a code block
- source language
- lines of source
"""
cur_block = []
pre_lang = None
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
cur_block = []
cur_block.append(l)
pre_lang = cur_lang
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
|
python
|
def _get_blocks(lines):
"""split lines into code and non-code blocks
Returns
-------
iterator of (bool, str, list of str)
- if it is a code block
- source language
- lines of source
"""
cur_block = []
pre_lang = None
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
cur_block = []
cur_block.append(l)
pre_lang = cur_lang
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
|
[
"def",
"_get_blocks",
"(",
"lines",
")",
":",
"cur_block",
"=",
"[",
"]",
"pre_lang",
"=",
"None",
"pre_in_code",
"=",
"None",
"for",
"(",
"l",
",",
"in_code",
",",
"cur_lang",
",",
"_",
")",
"in",
"_parse_code_lines",
"(",
"lines",
")",
":",
"if",
"in_code",
"!=",
"pre_in_code",
":",
"if",
"pre_in_code",
"and",
"len",
"(",
"cur_block",
")",
">=",
"2",
":",
"cur_block",
"=",
"cur_block",
"[",
"1",
":",
"-",
"1",
"]",
"# remove ```",
"# remove empty lines at head",
"while",
"len",
"(",
"cur_block",
")",
">",
"0",
":",
"if",
"len",
"(",
"cur_block",
"[",
"0",
"]",
")",
"==",
"0",
":",
"cur_block",
".",
"pop",
"(",
"0",
")",
"else",
":",
"break",
"# remove empty lines at tail",
"while",
"len",
"(",
"cur_block",
")",
">",
"0",
":",
"if",
"len",
"(",
"cur_block",
"[",
"-",
"1",
"]",
")",
"==",
"0",
":",
"cur_block",
".",
"pop",
"(",
")",
"else",
":",
"break",
"if",
"len",
"(",
"cur_block",
")",
":",
"yield",
"(",
"pre_in_code",
",",
"pre_lang",
",",
"cur_block",
")",
"cur_block",
"=",
"[",
"]",
"cur_block",
".",
"append",
"(",
"l",
")",
"pre_lang",
"=",
"cur_lang",
"pre_in_code",
"=",
"in_code",
"if",
"len",
"(",
"cur_block",
")",
":",
"yield",
"(",
"pre_in_code",
",",
"pre_lang",
",",
"cur_block",
")"
] |
split lines into code and non-code blocks
Returns
-------
iterator of (bool, str, list of str)
- if it is a code block
- source language
- lines of source
|
[
"split",
"lines",
"into",
"code",
"and",
"non",
"-",
"code",
"blocks"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L260-L296
|
train
|
apache/incubator-mxnet
|
docs/mxdoc.py
|
_get_python_block_output
|
def _get_python_block_output(src, global_dict, local_dict):
"""Evaluate python source codes
Returns
(bool, str):
- True if success
- output
"""
src = '\n'.join([l for l in src.split('\n')
if not l.startswith('%') and not 'plt.show()' in l])
ret_status = True
err = ''
with _string_io() as s:
try:
exec(src, global_dict, global_dict)
except Exception as e:
err = str(e)
ret_status = False
return (ret_status, s.getvalue()+err)
|
python
|
def _get_python_block_output(src, global_dict, local_dict):
"""Evaluate python source codes
Returns
(bool, str):
- True if success
- output
"""
src = '\n'.join([l for l in src.split('\n')
if not l.startswith('%') and not 'plt.show()' in l])
ret_status = True
err = ''
with _string_io() as s:
try:
exec(src, global_dict, global_dict)
except Exception as e:
err = str(e)
ret_status = False
return (ret_status, s.getvalue()+err)
|
[
"def",
"_get_python_block_output",
"(",
"src",
",",
"global_dict",
",",
"local_dict",
")",
":",
"src",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"l",
"for",
"l",
"in",
"src",
".",
"split",
"(",
"'\\n'",
")",
"if",
"not",
"l",
".",
"startswith",
"(",
"'%'",
")",
"and",
"not",
"'plt.show()'",
"in",
"l",
"]",
")",
"ret_status",
"=",
"True",
"err",
"=",
"''",
"with",
"_string_io",
"(",
")",
"as",
"s",
":",
"try",
":",
"exec",
"(",
"src",
",",
"global_dict",
",",
"global_dict",
")",
"except",
"Exception",
"as",
"e",
":",
"err",
"=",
"str",
"(",
"e",
")",
"ret_status",
"=",
"False",
"return",
"(",
"ret_status",
",",
"s",
".",
"getvalue",
"(",
")",
"+",
"err",
")"
] |
Evaluate python source codes
Returns
(bool, str):
- True if success
- output
|
[
"Evaluate",
"python",
"source",
"codes"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L321-L339
|
train
|
apache/incubator-mxnet
|
docs/mxdoc.py
|
copy_artifacts
|
def copy_artifacts(app):
"""Copies artifacts needed for website presentation"""
dest_path = app.builder.outdir + '/error'
source_path = app.builder.srcdir + '/build_version_doc/artifacts'
_run_cmd('cd ' + app.builder.srcdir)
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
_run_cmd('cp ' + source_path + '/404.html ' + dest_path)
_run_cmd('cp ' + source_path + '/api.html ' + dest_path)
dest_path = app.builder.outdir + '/_static'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
_run_cmd('cp ' + app.builder.srcdir + '/_static/mxnet.css ' + dest_path)
|
python
|
def copy_artifacts(app):
"""Copies artifacts needed for website presentation"""
dest_path = app.builder.outdir + '/error'
source_path = app.builder.srcdir + '/build_version_doc/artifacts'
_run_cmd('cd ' + app.builder.srcdir)
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
_run_cmd('cp ' + source_path + '/404.html ' + dest_path)
_run_cmd('cp ' + source_path + '/api.html ' + dest_path)
dest_path = app.builder.outdir + '/_static'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
_run_cmd('cp ' + app.builder.srcdir + '/_static/mxnet.css ' + dest_path)
|
[
"def",
"copy_artifacts",
"(",
"app",
")",
":",
"dest_path",
"=",
"app",
".",
"builder",
".",
"outdir",
"+",
"'/error'",
"source_path",
"=",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/build_version_doc/artifacts'",
"_run_cmd",
"(",
"'cd '",
"+",
"app",
".",
"builder",
".",
"srcdir",
")",
"_run_cmd",
"(",
"'rm -rf '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'mkdir -p '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'cp '",
"+",
"source_path",
"+",
"'/404.html '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'cp '",
"+",
"source_path",
"+",
"'/api.html '",
"+",
"dest_path",
")",
"dest_path",
"=",
"app",
".",
"builder",
".",
"outdir",
"+",
"'/_static'",
"_run_cmd",
"(",
"'rm -rf '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'mkdir -p '",
"+",
"dest_path",
")",
"_run_cmd",
"(",
"'cp '",
"+",
"app",
".",
"builder",
".",
"srcdir",
"+",
"'/_static/mxnet.css '",
"+",
"dest_path",
")"
] |
Copies artifacts needed for website presentation
|
[
"Copies",
"artifacts",
"needed",
"for",
"website",
"presentation"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/docs/mxdoc.py#L443-L455
|
train
|
apache/incubator-mxnet
|
tools/caffe_converter/convert_caffe_modelzoo.py
|
download_caffe_model
|
def download_caffe_model(model_name, meta_info, dst_dir='./model'):
"""Download caffe model into disk by the given meta info """
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
model_name = os.path.join(dst_dir, model_name)
assert 'prototxt' in meta_info, "missing prototxt url"
proto_url, proto_sha1 = meta_info['prototxt']
prototxt = mx.gluon.utils.download(proto_url,
model_name+'_deploy.prototxt',
sha1_hash=proto_sha1)
assert 'caffemodel' in meta_info, "mssing caffemodel url"
caffemodel_url, caffemodel_sha1 = meta_info['caffemodel']
caffemodel = mx.gluon.utils.download(caffemodel_url,
model_name+'.caffemodel',
sha1_hash=caffemodel_sha1)
assert 'mean' in meta_info, 'no mean info'
mean = meta_info['mean']
if isinstance(mean[0], str):
mean_url, mean_sha1 = mean
mean = mx.gluon.utils.download(mean_url,
model_name+'_mean.binaryproto',
sha1_hash=mean_sha1)
return (prototxt, caffemodel, mean)
|
python
|
def download_caffe_model(model_name, meta_info, dst_dir='./model'):
"""Download caffe model into disk by the given meta info """
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
model_name = os.path.join(dst_dir, model_name)
assert 'prototxt' in meta_info, "missing prototxt url"
proto_url, proto_sha1 = meta_info['prototxt']
prototxt = mx.gluon.utils.download(proto_url,
model_name+'_deploy.prototxt',
sha1_hash=proto_sha1)
assert 'caffemodel' in meta_info, "mssing caffemodel url"
caffemodel_url, caffemodel_sha1 = meta_info['caffemodel']
caffemodel = mx.gluon.utils.download(caffemodel_url,
model_name+'.caffemodel',
sha1_hash=caffemodel_sha1)
assert 'mean' in meta_info, 'no mean info'
mean = meta_info['mean']
if isinstance(mean[0], str):
mean_url, mean_sha1 = mean
mean = mx.gluon.utils.download(mean_url,
model_name+'_mean.binaryproto',
sha1_hash=mean_sha1)
return (prototxt, caffemodel, mean)
|
[
"def",
"download_caffe_model",
"(",
"model_name",
",",
"meta_info",
",",
"dst_dir",
"=",
"'./model'",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dst_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"dst_dir",
")",
"model_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst_dir",
",",
"model_name",
")",
"assert",
"'prototxt'",
"in",
"meta_info",
",",
"\"missing prototxt url\"",
"proto_url",
",",
"proto_sha1",
"=",
"meta_info",
"[",
"'prototxt'",
"]",
"prototxt",
"=",
"mx",
".",
"gluon",
".",
"utils",
".",
"download",
"(",
"proto_url",
",",
"model_name",
"+",
"'_deploy.prototxt'",
",",
"sha1_hash",
"=",
"proto_sha1",
")",
"assert",
"'caffemodel'",
"in",
"meta_info",
",",
"\"mssing caffemodel url\"",
"caffemodel_url",
",",
"caffemodel_sha1",
"=",
"meta_info",
"[",
"'caffemodel'",
"]",
"caffemodel",
"=",
"mx",
".",
"gluon",
".",
"utils",
".",
"download",
"(",
"caffemodel_url",
",",
"model_name",
"+",
"'.caffemodel'",
",",
"sha1_hash",
"=",
"caffemodel_sha1",
")",
"assert",
"'mean'",
"in",
"meta_info",
",",
"'no mean info'",
"mean",
"=",
"meta_info",
"[",
"'mean'",
"]",
"if",
"isinstance",
"(",
"mean",
"[",
"0",
"]",
",",
"str",
")",
":",
"mean_url",
",",
"mean_sha1",
"=",
"mean",
"mean",
"=",
"mx",
".",
"gluon",
".",
"utils",
".",
"download",
"(",
"mean_url",
",",
"model_name",
"+",
"'_mean.binaryproto'",
",",
"sha1_hash",
"=",
"mean_sha1",
")",
"return",
"(",
"prototxt",
",",
"caffemodel",
",",
"mean",
")"
] |
Download caffe model into disk by the given meta info
|
[
"Download",
"caffe",
"model",
"into",
"disk",
"by",
"the",
"given",
"meta",
"info"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_converter/convert_caffe_modelzoo.py#L118-L142
|
train
|
apache/incubator-mxnet
|
tools/caffe_converter/convert_caffe_modelzoo.py
|
convert_caffe_model
|
def convert_caffe_model(model_name, meta_info, dst_dir='./model'):
"""Download, convert and save a caffe model"""
(prototxt, caffemodel, mean) = download_caffe_model(model_name, meta_info, dst_dir)
model_name = os.path.join(dst_dir, model_name)
convert_model(prototxt, caffemodel, model_name)
if isinstance(mean, str):
mx_mean = model_name + '-mean.nd'
convert_mean(mean, mx_mean)
mean = mx_mean
return (model_name, mean)
|
python
|
def convert_caffe_model(model_name, meta_info, dst_dir='./model'):
"""Download, convert and save a caffe model"""
(prototxt, caffemodel, mean) = download_caffe_model(model_name, meta_info, dst_dir)
model_name = os.path.join(dst_dir, model_name)
convert_model(prototxt, caffemodel, model_name)
if isinstance(mean, str):
mx_mean = model_name + '-mean.nd'
convert_mean(mean, mx_mean)
mean = mx_mean
return (model_name, mean)
|
[
"def",
"convert_caffe_model",
"(",
"model_name",
",",
"meta_info",
",",
"dst_dir",
"=",
"'./model'",
")",
":",
"(",
"prototxt",
",",
"caffemodel",
",",
"mean",
")",
"=",
"download_caffe_model",
"(",
"model_name",
",",
"meta_info",
",",
"dst_dir",
")",
"model_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst_dir",
",",
"model_name",
")",
"convert_model",
"(",
"prototxt",
",",
"caffemodel",
",",
"model_name",
")",
"if",
"isinstance",
"(",
"mean",
",",
"str",
")",
":",
"mx_mean",
"=",
"model_name",
"+",
"'-mean.nd'",
"convert_mean",
"(",
"mean",
",",
"mx_mean",
")",
"mean",
"=",
"mx_mean",
"return",
"(",
"model_name",
",",
"mean",
")"
] |
Download, convert and save a caffe model
|
[
"Download",
"convert",
"and",
"save",
"a",
"caffe",
"model"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/tools/caffe_converter/convert_caffe_modelzoo.py#L144-L154
|
train
|
apache/incubator-mxnet
|
example/gluon/lipnet/utils/multi.py
|
multi_p_run
|
def multi_p_run(tot_num, _func, worker, params, n_process):
"""
Run _func with multi-process using params.
"""
from multiprocessing import Process, Queue
out_q = Queue()
procs = []
split_num = split_seq(list(range(0, tot_num)), n_process)
print(tot_num, ">>", split_num)
split_len = len(split_num)
if n_process > split_len:
n_process = split_len
for i in range(n_process):
_p = Process(target=_func,
args=(worker, split_num[i][0], split_num[i][1],
params, out_q))
_p.daemon = True
procs.append(_p)
_p.start()
try:
result = []
for i in range(n_process):
result.append(out_q.get())
for i in procs:
i.join()
except KeyboardInterrupt:
print('Killing all the children in the pool.')
for i in procs:
i.terminate()
i.join()
return -1
while not out_q.empty():
print(out_q.get(block=False))
return result
|
python
|
def multi_p_run(tot_num, _func, worker, params, n_process):
"""
Run _func with multi-process using params.
"""
from multiprocessing import Process, Queue
out_q = Queue()
procs = []
split_num = split_seq(list(range(0, tot_num)), n_process)
print(tot_num, ">>", split_num)
split_len = len(split_num)
if n_process > split_len:
n_process = split_len
for i in range(n_process):
_p = Process(target=_func,
args=(worker, split_num[i][0], split_num[i][1],
params, out_q))
_p.daemon = True
procs.append(_p)
_p.start()
try:
result = []
for i in range(n_process):
result.append(out_q.get())
for i in procs:
i.join()
except KeyboardInterrupt:
print('Killing all the children in the pool.')
for i in procs:
i.terminate()
i.join()
return -1
while not out_q.empty():
print(out_q.get(block=False))
return result
|
[
"def",
"multi_p_run",
"(",
"tot_num",
",",
"_func",
",",
"worker",
",",
"params",
",",
"n_process",
")",
":",
"from",
"multiprocessing",
"import",
"Process",
",",
"Queue",
"out_q",
"=",
"Queue",
"(",
")",
"procs",
"=",
"[",
"]",
"split_num",
"=",
"split_seq",
"(",
"list",
"(",
"range",
"(",
"0",
",",
"tot_num",
")",
")",
",",
"n_process",
")",
"print",
"(",
"tot_num",
",",
"\">>\"",
",",
"split_num",
")",
"split_len",
"=",
"len",
"(",
"split_num",
")",
"if",
"n_process",
">",
"split_len",
":",
"n_process",
"=",
"split_len",
"for",
"i",
"in",
"range",
"(",
"n_process",
")",
":",
"_p",
"=",
"Process",
"(",
"target",
"=",
"_func",
",",
"args",
"=",
"(",
"worker",
",",
"split_num",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"split_num",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"params",
",",
"out_q",
")",
")",
"_p",
".",
"daemon",
"=",
"True",
"procs",
".",
"append",
"(",
"_p",
")",
"_p",
".",
"start",
"(",
")",
"try",
":",
"result",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n_process",
")",
":",
"result",
".",
"append",
"(",
"out_q",
".",
"get",
"(",
")",
")",
"for",
"i",
"in",
"procs",
":",
"i",
".",
"join",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"'Killing all the children in the pool.'",
")",
"for",
"i",
"in",
"procs",
":",
"i",
".",
"terminate",
"(",
")",
"i",
".",
"join",
"(",
")",
"return",
"-",
"1",
"while",
"not",
"out_q",
".",
"empty",
"(",
")",
":",
"print",
"(",
"out_q",
".",
"get",
"(",
"block",
"=",
"False",
")",
")",
"return",
"result"
] |
Run _func with multi-process using params.
|
[
"Run",
"_func",
"with",
"multi",
"-",
"process",
"using",
"params",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/utils/multi.py#L23-L63
|
train
|
apache/incubator-mxnet
|
example/gluon/lipnet/utils/multi.py
|
split_seq
|
def split_seq(sam_num, n_tile):
"""
Split the number(sam_num) into numbers by n_tile
"""
import math
print(sam_num)
print(n_tile)
start_num = sam_num[0::int(math.ceil(len(sam_num) / (n_tile)))]
end_num = start_num[1::]
end_num.append(len(sam_num))
return [[i, j] for i, j in zip(start_num, end_num)]
|
python
|
def split_seq(sam_num, n_tile):
"""
Split the number(sam_num) into numbers by n_tile
"""
import math
print(sam_num)
print(n_tile)
start_num = sam_num[0::int(math.ceil(len(sam_num) / (n_tile)))]
end_num = start_num[1::]
end_num.append(len(sam_num))
return [[i, j] for i, j in zip(start_num, end_num)]
|
[
"def",
"split_seq",
"(",
"sam_num",
",",
"n_tile",
")",
":",
"import",
"math",
"print",
"(",
"sam_num",
")",
"print",
"(",
"n_tile",
")",
"start_num",
"=",
"sam_num",
"[",
"0",
":",
":",
"int",
"(",
"math",
".",
"ceil",
"(",
"len",
"(",
"sam_num",
")",
"/",
"(",
"n_tile",
")",
")",
")",
"]",
"end_num",
"=",
"start_num",
"[",
"1",
":",
":",
"]",
"end_num",
".",
"append",
"(",
"len",
"(",
"sam_num",
")",
")",
"return",
"[",
"[",
"i",
",",
"j",
"]",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"start_num",
",",
"end_num",
")",
"]"
] |
Split the number(sam_num) into numbers by n_tile
|
[
"Split",
"the",
"number",
"(",
"sam_num",
")",
"into",
"numbers",
"by",
"n_tile"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/utils/multi.py#L66-L76
|
train
|
apache/incubator-mxnet
|
example/gluon/lipnet/utils/multi.py
|
put_worker
|
def put_worker(func, from_idx, to_idx, params, out_q):
"""
put worker
"""
succ, fail = func(from_idx, to_idx, params)
return out_q.put({'succ': succ, 'fail': fail})
|
python
|
def put_worker(func, from_idx, to_idx, params, out_q):
"""
put worker
"""
succ, fail = func(from_idx, to_idx, params)
return out_q.put({'succ': succ, 'fail': fail})
|
[
"def",
"put_worker",
"(",
"func",
",",
"from_idx",
",",
"to_idx",
",",
"params",
",",
"out_q",
")",
":",
"succ",
",",
"fail",
"=",
"func",
"(",
"from_idx",
",",
"to_idx",
",",
"params",
")",
"return",
"out_q",
".",
"put",
"(",
"{",
"'succ'",
":",
"succ",
",",
"'fail'",
":",
"fail",
"}",
")"
] |
put worker
|
[
"put",
"worker"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/gluon/lipnet/utils/multi.py#L79-L84
|
train
|
apache/incubator-mxnet
|
example/ssd/config/utils.py
|
namedtuple_with_defaults
|
def namedtuple_with_defaults(typename, field_names, default_values=()):
""" create a namedtuple with default values """
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (None, ) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T
|
python
|
def namedtuple_with_defaults(typename, field_names, default_values=()):
""" create a namedtuple with default values """
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (None, ) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T
|
[
"def",
"namedtuple_with_defaults",
"(",
"typename",
",",
"field_names",
",",
"default_values",
"=",
"(",
")",
")",
":",
"T",
"=",
"collections",
".",
"namedtuple",
"(",
"typename",
",",
"field_names",
")",
"T",
".",
"__new__",
".",
"__defaults__",
"=",
"(",
"None",
",",
")",
"*",
"len",
"(",
"T",
".",
"_fields",
")",
"if",
"isinstance",
"(",
"default_values",
",",
"collections",
".",
"Mapping",
")",
":",
"prototype",
"=",
"T",
"(",
"*",
"*",
"default_values",
")",
"else",
":",
"prototype",
"=",
"T",
"(",
"*",
"default_values",
")",
"T",
".",
"__new__",
".",
"__defaults__",
"=",
"tuple",
"(",
"prototype",
")",
"return",
"T"
] |
create a namedtuple with default values
|
[
"create",
"a",
"namedtuple",
"with",
"default",
"values"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/config/utils.py#L61-L70
|
train
|
apache/incubator-mxnet
|
example/ssd/config/utils.py
|
merge_dict
|
def merge_dict(a, b):
""" merge dict a, b, with b overriding keys in a """
c = a.copy()
c.update(b)
return c
|
python
|
def merge_dict(a, b):
""" merge dict a, b, with b overriding keys in a """
c = a.copy()
c.update(b)
return c
|
[
"def",
"merge_dict",
"(",
"a",
",",
"b",
")",
":",
"c",
"=",
"a",
".",
"copy",
"(",
")",
"c",
".",
"update",
"(",
"b",
")",
"return",
"c"
] |
merge dict a, b, with b overriding keys in a
|
[
"merge",
"dict",
"a",
"b",
"with",
"b",
"overriding",
"keys",
"in",
"a"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/config/utils.py#L72-L76
|
train
|
apache/incubator-mxnet
|
example/ssd/config/utils.py
|
zip_namedtuple
|
def zip_namedtuple(nt_list):
""" accept list of namedtuple, return a dict of zipped fields """
if not nt_list:
return dict()
if not isinstance(nt_list, list):
nt_list = [nt_list]
for nt in nt_list:
assert type(nt) == type(nt_list[0])
ret = {k : [v] for k, v in nt_list[0]._asdict().items()}
for nt in nt_list[1:]:
for k, v in nt._asdict().items():
ret[k].append(v)
return ret
|
python
|
def zip_namedtuple(nt_list):
""" accept list of namedtuple, return a dict of zipped fields """
if not nt_list:
return dict()
if not isinstance(nt_list, list):
nt_list = [nt_list]
for nt in nt_list:
assert type(nt) == type(nt_list[0])
ret = {k : [v] for k, v in nt_list[0]._asdict().items()}
for nt in nt_list[1:]:
for k, v in nt._asdict().items():
ret[k].append(v)
return ret
|
[
"def",
"zip_namedtuple",
"(",
"nt_list",
")",
":",
"if",
"not",
"nt_list",
":",
"return",
"dict",
"(",
")",
"if",
"not",
"isinstance",
"(",
"nt_list",
",",
"list",
")",
":",
"nt_list",
"=",
"[",
"nt_list",
"]",
"for",
"nt",
"in",
"nt_list",
":",
"assert",
"type",
"(",
"nt",
")",
"==",
"type",
"(",
"nt_list",
"[",
"0",
"]",
")",
"ret",
"=",
"{",
"k",
":",
"[",
"v",
"]",
"for",
"k",
",",
"v",
"in",
"nt_list",
"[",
"0",
"]",
".",
"_asdict",
"(",
")",
".",
"items",
"(",
")",
"}",
"for",
"nt",
"in",
"nt_list",
"[",
"1",
":",
"]",
":",
"for",
"k",
",",
"v",
"in",
"nt",
".",
"_asdict",
"(",
")",
".",
"items",
"(",
")",
":",
"ret",
"[",
"k",
"]",
".",
"append",
"(",
"v",
")",
"return",
"ret"
] |
accept list of namedtuple, return a dict of zipped fields
|
[
"accept",
"list",
"of",
"namedtuple",
"return",
"a",
"dict",
"of",
"zipped",
"fields"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/config/utils.py#L78-L90
|
train
|
apache/incubator-mxnet
|
example/ssd/config/utils.py
|
config_as_dict
|
def config_as_dict(cfg):
""" convert raw configuration to unified dictionary """
ret = cfg.__dict__.copy()
# random cropping params
del ret['rand_crop_samplers']
assert isinstance(cfg.rand_crop_samplers, list)
ret = merge_dict(ret, zip_namedtuple(cfg.rand_crop_samplers))
num_crop_sampler = len(cfg.rand_crop_samplers)
ret['num_crop_sampler'] = num_crop_sampler # must specify the #
ret['rand_crop_prob'] = 1.0 / (num_crop_sampler + 1) * num_crop_sampler
# random padding params
del ret['rand_pad']
ret = merge_dict(ret, cfg.rand_pad._asdict())
# color jitter
del ret['color_jitter']
ret = merge_dict(ret, cfg.color_jitter._asdict())
return ret
|
python
|
def config_as_dict(cfg):
""" convert raw configuration to unified dictionary """
ret = cfg.__dict__.copy()
# random cropping params
del ret['rand_crop_samplers']
assert isinstance(cfg.rand_crop_samplers, list)
ret = merge_dict(ret, zip_namedtuple(cfg.rand_crop_samplers))
num_crop_sampler = len(cfg.rand_crop_samplers)
ret['num_crop_sampler'] = num_crop_sampler # must specify the #
ret['rand_crop_prob'] = 1.0 / (num_crop_sampler + 1) * num_crop_sampler
# random padding params
del ret['rand_pad']
ret = merge_dict(ret, cfg.rand_pad._asdict())
# color jitter
del ret['color_jitter']
ret = merge_dict(ret, cfg.color_jitter._asdict())
return ret
|
[
"def",
"config_as_dict",
"(",
"cfg",
")",
":",
"ret",
"=",
"cfg",
".",
"__dict__",
".",
"copy",
"(",
")",
"# random cropping params",
"del",
"ret",
"[",
"'rand_crop_samplers'",
"]",
"assert",
"isinstance",
"(",
"cfg",
".",
"rand_crop_samplers",
",",
"list",
")",
"ret",
"=",
"merge_dict",
"(",
"ret",
",",
"zip_namedtuple",
"(",
"cfg",
".",
"rand_crop_samplers",
")",
")",
"num_crop_sampler",
"=",
"len",
"(",
"cfg",
".",
"rand_crop_samplers",
")",
"ret",
"[",
"'num_crop_sampler'",
"]",
"=",
"num_crop_sampler",
"# must specify the #",
"ret",
"[",
"'rand_crop_prob'",
"]",
"=",
"1.0",
"/",
"(",
"num_crop_sampler",
"+",
"1",
")",
"*",
"num_crop_sampler",
"# random padding params",
"del",
"ret",
"[",
"'rand_pad'",
"]",
"ret",
"=",
"merge_dict",
"(",
"ret",
",",
"cfg",
".",
"rand_pad",
".",
"_asdict",
"(",
")",
")",
"# color jitter",
"del",
"ret",
"[",
"'color_jitter'",
"]",
"ret",
"=",
"merge_dict",
"(",
"ret",
",",
"cfg",
".",
"color_jitter",
".",
"_asdict",
"(",
")",
")",
"return",
"ret"
] |
convert raw configuration to unified dictionary
|
[
"convert",
"raw",
"configuration",
"to",
"unified",
"dictionary"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/config/utils.py#L92-L108
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/onnx/onnx2mx/import_model.py
|
import_model
|
def import_model(model_file):
"""Imports the ONNX model file, passed as a parameter, into MXNet symbol and parameters.
Operator support and coverage -
https://cwiki.apache.org/confluence/display/MXNET/MXNet-ONNX+Integration
Parameters
----------
model_file : str
ONNX model file name
Returns
-------
sym : :class:`~mxnet.symbol.Symbol`
MXNet symbol object
arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
"""
graph = GraphProto()
try:
import onnx
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
# loads model file and returns ONNX protobuf object
model_proto = onnx.load_model(model_file)
sym, arg_params, aux_params = graph.from_onnx(model_proto.graph)
return sym, arg_params, aux_params
|
python
|
def import_model(model_file):
"""Imports the ONNX model file, passed as a parameter, into MXNet symbol and parameters.
Operator support and coverage -
https://cwiki.apache.org/confluence/display/MXNET/MXNet-ONNX+Integration
Parameters
----------
model_file : str
ONNX model file name
Returns
-------
sym : :class:`~mxnet.symbol.Symbol`
MXNet symbol object
arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
"""
graph = GraphProto()
try:
import onnx
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
# loads model file and returns ONNX protobuf object
model_proto = onnx.load_model(model_file)
sym, arg_params, aux_params = graph.from_onnx(model_proto.graph)
return sym, arg_params, aux_params
|
[
"def",
"import_model",
"(",
"model_file",
")",
":",
"graph",
"=",
"GraphProto",
"(",
")",
"try",
":",
"import",
"onnx",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"Onnx and protobuf need to be installed. \"",
"+",
"\"Instructions to install - https://github.com/onnx/onnx\"",
")",
"# loads model file and returns ONNX protobuf object",
"model_proto",
"=",
"onnx",
".",
"load_model",
"(",
"model_file",
")",
"sym",
",",
"arg_params",
",",
"aux_params",
"=",
"graph",
".",
"from_onnx",
"(",
"model_proto",
".",
"graph",
")",
"return",
"sym",
",",
"arg_params",
",",
"aux_params"
] |
Imports the ONNX model file, passed as a parameter, into MXNet symbol and parameters.
Operator support and coverage -
https://cwiki.apache.org/confluence/display/MXNET/MXNet-ONNX+Integration
Parameters
----------
model_file : str
ONNX model file name
Returns
-------
sym : :class:`~mxnet.symbol.Symbol`
MXNet symbol object
arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`
Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
|
[
"Imports",
"the",
"ONNX",
"model",
"file",
"passed",
"as",
"a",
"parameter",
"into",
"MXNet",
"symbol",
"and",
"parameters",
".",
"Operator",
"support",
"and",
"coverage",
"-",
"https",
":",
"//",
"cwiki",
".",
"apache",
".",
"org",
"/",
"confluence",
"/",
"display",
"/",
"MXNET",
"/",
"MXNet",
"-",
"ONNX",
"+",
"Integration"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/import_model.py#L24-L60
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/onnx/onnx2mx/import_model.py
|
get_model_metadata
|
def get_model_metadata(model_file):
"""
Returns the name and shape information of input and output tensors of the given ONNX model file.
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
Parameters
----------
model_file : str
ONNX model file name
Returns
-------
model_metadata : dict
A dictionary object mapping various metadata to its corresponding value.
The dictionary will have the following template::
'input_tensor_data' : list of tuples representing the shape of the input paramters
'output_tensor_data' : list of tuples representing the shape of the output of the model
"""
graph = GraphProto()
try:
import onnx
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
model_proto = onnx.load_model(model_file)
metadata = graph.get_graph_metadata(model_proto.graph)
return metadata
|
python
|
def get_model_metadata(model_file):
"""
Returns the name and shape information of input and output tensors of the given ONNX model file.
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
Parameters
----------
model_file : str
ONNX model file name
Returns
-------
model_metadata : dict
A dictionary object mapping various metadata to its corresponding value.
The dictionary will have the following template::
'input_tensor_data' : list of tuples representing the shape of the input paramters
'output_tensor_data' : list of tuples representing the shape of the output of the model
"""
graph = GraphProto()
try:
import onnx
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
model_proto = onnx.load_model(model_file)
metadata = graph.get_graph_metadata(model_proto.graph)
return metadata
|
[
"def",
"get_model_metadata",
"(",
"model_file",
")",
":",
"graph",
"=",
"GraphProto",
"(",
")",
"try",
":",
"import",
"onnx",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"\"Onnx and protobuf need to be installed. \"",
"+",
"\"Instructions to install - https://github.com/onnx/onnx\"",
")",
"model_proto",
"=",
"onnx",
".",
"load_model",
"(",
"model_file",
")",
"metadata",
"=",
"graph",
".",
"get_graph_metadata",
"(",
"model_proto",
".",
"graph",
")",
"return",
"metadata"
] |
Returns the name and shape information of input and output tensors of the given ONNX model file.
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
Parameters
----------
model_file : str
ONNX model file name
Returns
-------
model_metadata : dict
A dictionary object mapping various metadata to its corresponding value.
The dictionary will have the following template::
'input_tensor_data' : list of tuples representing the shape of the input paramters
'output_tensor_data' : list of tuples representing the shape of the output of the model
|
[
"Returns",
"the",
"name",
"and",
"shape",
"information",
"of",
"input",
"and",
"output",
"tensors",
"of",
"the",
"given",
"ONNX",
"model",
"file",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/onnx2mx/import_model.py#L62-L93
|
train
|
apache/incubator-mxnet
|
example/ssd/symbol/common.py
|
conv_act_layer
|
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False):
"""
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
"""
conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
stride=stride, num_filter=num_filter, name="{}_conv".format(name))
if use_batchnorm:
conv = mx.symbol.BatchNorm(data=conv, name="{}_bn".format(name))
relu = mx.symbol.Activation(data=conv, act_type=act_type, \
name="{}_{}".format(name, act_type))
return relu
|
python
|
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False):
"""
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
"""
conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
stride=stride, num_filter=num_filter, name="{}_conv".format(name))
if use_batchnorm:
conv = mx.symbol.BatchNorm(data=conv, name="{}_bn".format(name))
relu = mx.symbol.Activation(data=conv, act_type=act_type, \
name="{}_{}".format(name, act_type))
return relu
|
[
"def",
"conv_act_layer",
"(",
"from_layer",
",",
"name",
",",
"num_filter",
",",
"kernel",
"=",
"(",
"1",
",",
"1",
")",
",",
"pad",
"=",
"(",
"0",
",",
"0",
")",
",",
"stride",
"=",
"(",
"1",
",",
"1",
")",
",",
"act_type",
"=",
"\"relu\"",
",",
"use_batchnorm",
"=",
"False",
")",
":",
"conv",
"=",
"mx",
".",
"symbol",
".",
"Convolution",
"(",
"data",
"=",
"from_layer",
",",
"kernel",
"=",
"kernel",
",",
"pad",
"=",
"pad",
",",
"stride",
"=",
"stride",
",",
"num_filter",
"=",
"num_filter",
",",
"name",
"=",
"\"{}_conv\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"use_batchnorm",
":",
"conv",
"=",
"mx",
".",
"symbol",
".",
"BatchNorm",
"(",
"data",
"=",
"conv",
",",
"name",
"=",
"\"{}_bn\"",
".",
"format",
"(",
"name",
")",
")",
"relu",
"=",
"mx",
".",
"symbol",
".",
"Activation",
"(",
"data",
"=",
"conv",
",",
"act_type",
"=",
"act_type",
",",
"name",
"=",
"\"{}_{}\"",
".",
"format",
"(",
"name",
",",
"act_type",
")",
")",
"return",
"relu"
] |
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
|
[
"wrapper",
"for",
"a",
"small",
"Convolution",
"group"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/symbol/common.py#L21-L55
|
train
|
apache/incubator-mxnet
|
example/ssd/symbol/common.py
|
legacy_conv_act_layer
|
def legacy_conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False):
"""
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
"""
assert not use_batchnorm, "batchnorm not yet supported"
bias = mx.symbol.Variable(name="conv{}_bias".format(name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
conv = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=kernel, pad=pad, \
stride=stride, num_filter=num_filter, name="conv{}".format(name))
relu = mx.symbol.Activation(data=conv, act_type=act_type, \
name="{}{}".format(act_type, name))
if use_batchnorm:
relu = mx.symbol.BatchNorm(data=relu, name="bn{}".format(name))
return conv, relu
|
python
|
def legacy_conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False):
"""
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
"""
assert not use_batchnorm, "batchnorm not yet supported"
bias = mx.symbol.Variable(name="conv{}_bias".format(name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
conv = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=kernel, pad=pad, \
stride=stride, num_filter=num_filter, name="conv{}".format(name))
relu = mx.symbol.Activation(data=conv, act_type=act_type, \
name="{}{}".format(act_type, name))
if use_batchnorm:
relu = mx.symbol.BatchNorm(data=relu, name="bn{}".format(name))
return conv, relu
|
[
"def",
"legacy_conv_act_layer",
"(",
"from_layer",
",",
"name",
",",
"num_filter",
",",
"kernel",
"=",
"(",
"1",
",",
"1",
")",
",",
"pad",
"=",
"(",
"0",
",",
"0",
")",
",",
"stride",
"=",
"(",
"1",
",",
"1",
")",
",",
"act_type",
"=",
"\"relu\"",
",",
"use_batchnorm",
"=",
"False",
")",
":",
"assert",
"not",
"use_batchnorm",
",",
"\"batchnorm not yet supported\"",
"bias",
"=",
"mx",
".",
"symbol",
".",
"Variable",
"(",
"name",
"=",
"\"conv{}_bias\"",
".",
"format",
"(",
"name",
")",
",",
"init",
"=",
"mx",
".",
"init",
".",
"Constant",
"(",
"0.0",
")",
",",
"attr",
"=",
"{",
"'__lr_mult__'",
":",
"'2.0'",
"}",
")",
"conv",
"=",
"mx",
".",
"symbol",
".",
"Convolution",
"(",
"data",
"=",
"from_layer",
",",
"bias",
"=",
"bias",
",",
"kernel",
"=",
"kernel",
",",
"pad",
"=",
"pad",
",",
"stride",
"=",
"stride",
",",
"num_filter",
"=",
"num_filter",
",",
"name",
"=",
"\"conv{}\"",
".",
"format",
"(",
"name",
")",
")",
"relu",
"=",
"mx",
".",
"symbol",
".",
"Activation",
"(",
"data",
"=",
"conv",
",",
"act_type",
"=",
"act_type",
",",
"name",
"=",
"\"{}{}\"",
".",
"format",
"(",
"act_type",
",",
"name",
")",
")",
"if",
"use_batchnorm",
":",
"relu",
"=",
"mx",
".",
"symbol",
".",
"BatchNorm",
"(",
"data",
"=",
"relu",
",",
"name",
"=",
"\"bn{}\"",
".",
"format",
"(",
"name",
")",
")",
"return",
"conv",
",",
"relu"
] |
wrapper for a small Convolution group
Parameters:
----------
from_layer : mx.symbol
continue on which layer
name : str
base name of the new layers
num_filter : int
how many filters to use in Convolution layer
kernel : tuple (int, int)
kernel size (h, w)
pad : tuple (int, int)
padding size (h, w)
stride : tuple (int, int)
stride size (h, w)
act_type : str
activation type, can be relu...
use_batchnorm : bool
whether to use batch normalization
Returns:
----------
(conv, relu) mx.Symbols
|
[
"wrapper",
"for",
"a",
"small",
"Convolution",
"group"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/symbol/common.py#L57-L94
|
train
|
apache/incubator-mxnet
|
example/ssd/symbol/common.py
|
multi_layer_feature
|
def multi_layer_feature(body, from_layers, num_filters, strides, pads, min_filter=128):
"""Wrapper function to extract features from base network, attaching extra
layers and SSD specific layers
Parameters
----------
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
min_filter : int
minimum number of filters used in 1x1 convolution
Returns
-------
list of mx.Symbols
"""
# arguments check
assert len(from_layers) > 0
assert isinstance(from_layers[0], str) and len(from_layers[0].strip()) > 0
assert len(from_layers) == len(num_filters) == len(strides) == len(pads)
internals = body.get_internals()
layers = []
for k, params in enumerate(zip(from_layers, num_filters, strides, pads)):
from_layer, num_filter, s, p = params
if from_layer.strip():
# extract from base network
layer = internals[from_layer.strip() + '_output']
layers.append(layer)
else:
# attach from last feature layer
assert len(layers) > 0
assert num_filter > 0
layer = layers[-1]
num_1x1 = max(min_filter, num_filter // 2)
conv_1x1 = conv_act_layer(layer, 'multi_feat_%d_conv_1x1' % (k),
num_1x1, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='relu')
conv_3x3 = conv_act_layer(conv_1x1, 'multi_feat_%d_conv_3x3' % (k),
num_filter, kernel=(3, 3), pad=(p, p), stride=(s, s), act_type='relu')
layers.append(conv_3x3)
return layers
|
python
|
def multi_layer_feature(body, from_layers, num_filters, strides, pads, min_filter=128):
"""Wrapper function to extract features from base network, attaching extra
layers and SSD specific layers
Parameters
----------
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
min_filter : int
minimum number of filters used in 1x1 convolution
Returns
-------
list of mx.Symbols
"""
# arguments check
assert len(from_layers) > 0
assert isinstance(from_layers[0], str) and len(from_layers[0].strip()) > 0
assert len(from_layers) == len(num_filters) == len(strides) == len(pads)
internals = body.get_internals()
layers = []
for k, params in enumerate(zip(from_layers, num_filters, strides, pads)):
from_layer, num_filter, s, p = params
if from_layer.strip():
# extract from base network
layer = internals[from_layer.strip() + '_output']
layers.append(layer)
else:
# attach from last feature layer
assert len(layers) > 0
assert num_filter > 0
layer = layers[-1]
num_1x1 = max(min_filter, num_filter // 2)
conv_1x1 = conv_act_layer(layer, 'multi_feat_%d_conv_1x1' % (k),
num_1x1, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='relu')
conv_3x3 = conv_act_layer(conv_1x1, 'multi_feat_%d_conv_3x3' % (k),
num_filter, kernel=(3, 3), pad=(p, p), stride=(s, s), act_type='relu')
layers.append(conv_3x3)
return layers
|
[
"def",
"multi_layer_feature",
"(",
"body",
",",
"from_layers",
",",
"num_filters",
",",
"strides",
",",
"pads",
",",
"min_filter",
"=",
"128",
")",
":",
"# arguments check",
"assert",
"len",
"(",
"from_layers",
")",
">",
"0",
"assert",
"isinstance",
"(",
"from_layers",
"[",
"0",
"]",
",",
"str",
")",
"and",
"len",
"(",
"from_layers",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
">",
"0",
"assert",
"len",
"(",
"from_layers",
")",
"==",
"len",
"(",
"num_filters",
")",
"==",
"len",
"(",
"strides",
")",
"==",
"len",
"(",
"pads",
")",
"internals",
"=",
"body",
".",
"get_internals",
"(",
")",
"layers",
"=",
"[",
"]",
"for",
"k",
",",
"params",
"in",
"enumerate",
"(",
"zip",
"(",
"from_layers",
",",
"num_filters",
",",
"strides",
",",
"pads",
")",
")",
":",
"from_layer",
",",
"num_filter",
",",
"s",
",",
"p",
"=",
"params",
"if",
"from_layer",
".",
"strip",
"(",
")",
":",
"# extract from base network",
"layer",
"=",
"internals",
"[",
"from_layer",
".",
"strip",
"(",
")",
"+",
"'_output'",
"]",
"layers",
".",
"append",
"(",
"layer",
")",
"else",
":",
"# attach from last feature layer",
"assert",
"len",
"(",
"layers",
")",
">",
"0",
"assert",
"num_filter",
">",
"0",
"layer",
"=",
"layers",
"[",
"-",
"1",
"]",
"num_1x1",
"=",
"max",
"(",
"min_filter",
",",
"num_filter",
"//",
"2",
")",
"conv_1x1",
"=",
"conv_act_layer",
"(",
"layer",
",",
"'multi_feat_%d_conv_1x1'",
"%",
"(",
"k",
")",
",",
"num_1x1",
",",
"kernel",
"=",
"(",
"1",
",",
"1",
")",
",",
"pad",
"=",
"(",
"0",
",",
"0",
")",
",",
"stride",
"=",
"(",
"1",
",",
"1",
")",
",",
"act_type",
"=",
"'relu'",
")",
"conv_3x3",
"=",
"conv_act_layer",
"(",
"conv_1x1",
",",
"'multi_feat_%d_conv_3x3'",
"%",
"(",
"k",
")",
",",
"num_filter",
",",
"kernel",
"=",
"(",
"3",
",",
"3",
")",
",",
"pad",
"=",
"(",
"p",
",",
"p",
")",
",",
"stride",
"=",
"(",
"s",
",",
"s",
")",
",",
"act_type",
"=",
"'relu'",
")",
"layers",
".",
"append",
"(",
"conv_3x3",
")",
"return",
"layers"
] |
Wrapper function to extract features from base network, attaching extra
layers and SSD specific layers
Parameters
----------
from_layers : list of str
feature extraction layers, use '' for add extra layers
For example:
from_layers = ['relu4_3', 'fc7', '', '', '', '']
which means extract feature from relu4_3 and fc7, adding 4 extra layers
on top of fc7
num_filters : list of int
number of filters for extra layers, you can use -1 for extracted features,
however, if normalization and scale is applied, the number of filter for
that layer must be provided.
For example:
num_filters = [512, -1, 512, 256, 256, 256]
strides : list of int
strides for the 3x3 convolution appended, -1 can be used for extracted
feature layers
pads : list of int
paddings for the 3x3 convolution, -1 can be used for extracted layers
min_filter : int
minimum number of filters used in 1x1 convolution
Returns
-------
list of mx.Symbols
|
[
"Wrapper",
"function",
"to",
"extract",
"features",
"from",
"base",
"network",
"attaching",
"extra",
"layers",
"and",
"SSD",
"specific",
"layers"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/symbol/common.py#L96-L151
|
train
|
apache/incubator-mxnet
|
example/ssd/symbol/common.py
|
multibox_layer
|
def multibox_layer(from_layers, num_classes, sizes=[.2, .95],
ratios=[1], normalization=-1, num_channels=[],
clip=False, interm_layer=0, steps=[]):
"""
the basic aggregation module for SSD detection. Takes in multiple layers,
generate multiple object detection targets by customized layers
Parameters:
----------
from_layers : list of mx.symbol
generate multibox detection from layers
num_classes : int
number of classes excluding background, will automatically handle
background in this function
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
num_channels : list of int
number of input layer channels, used when normalization is enabled, the
length of list should equals to number of normalization layers
clip : bool
whether to clip out-of-image boxes
interm_layer : int
if > 0, will add a intermediate Convolution layer
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
Returns:
----------
list of outputs, as [loc_preds, cls_preds, anchor_boxes]
loc_preds : localization regression prediction
cls_preds : classification prediction
anchor_boxes : generated anchor boxes
"""
assert len(from_layers) > 0, "from_layers must not be empty list"
assert num_classes > 0, \
"num_classes {} must be larger than 0".format(num_classes)
assert len(ratios) > 0, "aspect ratios must not be empty list"
if not isinstance(ratios[0], list):
# provided only one ratio list, broadcast to all from_layers
ratios = [ratios] * len(from_layers)
assert len(ratios) == len(from_layers), \
"ratios and from_layers must have same length"
assert len(sizes) > 0, "sizes must not be empty list"
if len(sizes) == 2 and not isinstance(sizes[0], list):
# provided size range, we need to compute the sizes for each layer
assert sizes[0] > 0 and sizes[0] < 1
assert sizes[1] > 0 and sizes[1] < 1 and sizes[1] > sizes[0]
tmp = np.linspace(sizes[0], sizes[1], num=(len(from_layers)-1))
# Ref for start_offset value:
# https://arxiv.org/abs/1512.02325
start_offset = 0.1
min_sizes = [start_offset] + tmp.tolist()
max_sizes = tmp.tolist() + [tmp[-1]+start_offset]
sizes = zip(min_sizes, max_sizes)
assert len(sizes) == len(from_layers), \
"sizes and from_layers must have same length"
if not isinstance(normalization, list):
normalization = [normalization] * len(from_layers)
assert len(normalization) == len(from_layers)
assert sum(x > 0 for x in normalization) <= len(num_channels), \
"must provide number of channels for each normalized layer"
if steps:
assert len(steps) == len(from_layers), "provide steps for all layers or leave empty"
loc_pred_layers = []
cls_pred_layers = []
anchor_layers = []
num_classes += 1 # always use background as label 0
for k, from_layer in enumerate(from_layers):
from_name = from_layer.name
# normalize
if normalization[k] > 0:
from_layer = mx.symbol.L2Normalization(data=from_layer, \
mode="channel", name="{}_norm".format(from_name))
scale = mx.symbol.Variable(name="{}_scale".format(from_name),
shape=(1, num_channels.pop(0), 1, 1),
init=mx.init.Constant(normalization[k]),
attr={'__wd_mult__': '0.1'})
from_layer = mx.symbol.broadcast_mul(lhs=scale, rhs=from_layer)
if interm_layer > 0:
from_layer = mx.symbol.Convolution(data=from_layer, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=interm_layer, \
name="{}_inter_conv".format(from_name))
from_layer = mx.symbol.Activation(data=from_layer, act_type="relu", \
name="{}_inter_relu".format(from_name))
# estimate number of anchors per location
# here I follow the original version in caffe
# TODO: better way to shape the anchors??
size = sizes[k]
assert len(size) > 0, "must provide at least one size"
size_str = "(" + ",".join([str(x) for x in size]) + ")"
ratio = ratios[k]
assert len(ratio) > 0, "must provide at least one ratio"
ratio_str = "(" + ",".join([str(x) for x in ratio]) + ")"
num_anchors = len(size) -1 + len(ratio)
# create location prediction layer
num_loc_pred = num_anchors * 4
bias = mx.symbol.Variable(name="{}_loc_pred_conv_bias".format(from_name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
loc_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=num_loc_pred, \
name="{}_loc_pred_conv".format(from_name))
loc_pred = mx.symbol.transpose(loc_pred, axes=(0,2,3,1))
loc_pred = mx.symbol.Flatten(data=loc_pred)
loc_pred_layers.append(loc_pred)
# create class prediction layer
num_cls_pred = num_anchors * num_classes
bias = mx.symbol.Variable(name="{}_cls_pred_conv_bias".format(from_name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
cls_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=num_cls_pred, \
name="{}_cls_pred_conv".format(from_name))
cls_pred = mx.symbol.transpose(cls_pred, axes=(0,2,3,1))
cls_pred = mx.symbol.Flatten(data=cls_pred)
cls_pred_layers.append(cls_pred)
# create anchor generation layer
if steps:
step = (steps[k], steps[k])
else:
step = '(-1.0, -1.0)'
anchors = mx.symbol.contrib.MultiBoxPrior(from_layer, sizes=size_str, ratios=ratio_str,
clip=clip, name="{}_anchors".format(from_name),
steps=step)
anchors = mx.symbol.Flatten(data=anchors)
anchor_layers.append(anchors)
loc_preds = mx.symbol.Concat(*loc_pred_layers, num_args=len(loc_pred_layers), \
dim=1, name="multibox_loc_pred")
cls_preds = mx.symbol.Concat(*cls_pred_layers, num_args=len(cls_pred_layers), \
dim=1)
cls_preds = mx.symbol.Reshape(data=cls_preds, shape=(0, -1, num_classes))
cls_preds = mx.symbol.transpose(cls_preds, axes=(0, 2, 1), name="multibox_cls_pred")
anchor_boxes = mx.symbol.Concat(*anchor_layers, \
num_args=len(anchor_layers), dim=1)
anchor_boxes = mx.symbol.Reshape(data=anchor_boxes, shape=(0, -1, 4), name="multibox_anchors")
return [loc_preds, cls_preds, anchor_boxes]
|
python
|
def multibox_layer(from_layers, num_classes, sizes=[.2, .95],
ratios=[1], normalization=-1, num_channels=[],
clip=False, interm_layer=0, steps=[]):
"""
the basic aggregation module for SSD detection. Takes in multiple layers,
generate multiple object detection targets by customized layers
Parameters:
----------
from_layers : list of mx.symbol
generate multibox detection from layers
num_classes : int
number of classes excluding background, will automatically handle
background in this function
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
num_channels : list of int
number of input layer channels, used when normalization is enabled, the
length of list should equals to number of normalization layers
clip : bool
whether to clip out-of-image boxes
interm_layer : int
if > 0, will add a intermediate Convolution layer
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
Returns:
----------
list of outputs, as [loc_preds, cls_preds, anchor_boxes]
loc_preds : localization regression prediction
cls_preds : classification prediction
anchor_boxes : generated anchor boxes
"""
assert len(from_layers) > 0, "from_layers must not be empty list"
assert num_classes > 0, \
"num_classes {} must be larger than 0".format(num_classes)
assert len(ratios) > 0, "aspect ratios must not be empty list"
if not isinstance(ratios[0], list):
# provided only one ratio list, broadcast to all from_layers
ratios = [ratios] * len(from_layers)
assert len(ratios) == len(from_layers), \
"ratios and from_layers must have same length"
assert len(sizes) > 0, "sizes must not be empty list"
if len(sizes) == 2 and not isinstance(sizes[0], list):
# provided size range, we need to compute the sizes for each layer
assert sizes[0] > 0 and sizes[0] < 1
assert sizes[1] > 0 and sizes[1] < 1 and sizes[1] > sizes[0]
tmp = np.linspace(sizes[0], sizes[1], num=(len(from_layers)-1))
# Ref for start_offset value:
# https://arxiv.org/abs/1512.02325
start_offset = 0.1
min_sizes = [start_offset] + tmp.tolist()
max_sizes = tmp.tolist() + [tmp[-1]+start_offset]
sizes = zip(min_sizes, max_sizes)
assert len(sizes) == len(from_layers), \
"sizes and from_layers must have same length"
if not isinstance(normalization, list):
normalization = [normalization] * len(from_layers)
assert len(normalization) == len(from_layers)
assert sum(x > 0 for x in normalization) <= len(num_channels), \
"must provide number of channels for each normalized layer"
if steps:
assert len(steps) == len(from_layers), "provide steps for all layers or leave empty"
loc_pred_layers = []
cls_pred_layers = []
anchor_layers = []
num_classes += 1 # always use background as label 0
for k, from_layer in enumerate(from_layers):
from_name = from_layer.name
# normalize
if normalization[k] > 0:
from_layer = mx.symbol.L2Normalization(data=from_layer, \
mode="channel", name="{}_norm".format(from_name))
scale = mx.symbol.Variable(name="{}_scale".format(from_name),
shape=(1, num_channels.pop(0), 1, 1),
init=mx.init.Constant(normalization[k]),
attr={'__wd_mult__': '0.1'})
from_layer = mx.symbol.broadcast_mul(lhs=scale, rhs=from_layer)
if interm_layer > 0:
from_layer = mx.symbol.Convolution(data=from_layer, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=interm_layer, \
name="{}_inter_conv".format(from_name))
from_layer = mx.symbol.Activation(data=from_layer, act_type="relu", \
name="{}_inter_relu".format(from_name))
# estimate number of anchors per location
# here I follow the original version in caffe
# TODO: better way to shape the anchors??
size = sizes[k]
assert len(size) > 0, "must provide at least one size"
size_str = "(" + ",".join([str(x) for x in size]) + ")"
ratio = ratios[k]
assert len(ratio) > 0, "must provide at least one ratio"
ratio_str = "(" + ",".join([str(x) for x in ratio]) + ")"
num_anchors = len(size) -1 + len(ratio)
# create location prediction layer
num_loc_pred = num_anchors * 4
bias = mx.symbol.Variable(name="{}_loc_pred_conv_bias".format(from_name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
loc_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=num_loc_pred, \
name="{}_loc_pred_conv".format(from_name))
loc_pred = mx.symbol.transpose(loc_pred, axes=(0,2,3,1))
loc_pred = mx.symbol.Flatten(data=loc_pred)
loc_pred_layers.append(loc_pred)
# create class prediction layer
num_cls_pred = num_anchors * num_classes
bias = mx.symbol.Variable(name="{}_cls_pred_conv_bias".format(from_name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
cls_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \
stride=(1,1), pad=(1,1), num_filter=num_cls_pred, \
name="{}_cls_pred_conv".format(from_name))
cls_pred = mx.symbol.transpose(cls_pred, axes=(0,2,3,1))
cls_pred = mx.symbol.Flatten(data=cls_pred)
cls_pred_layers.append(cls_pred)
# create anchor generation layer
if steps:
step = (steps[k], steps[k])
else:
step = '(-1.0, -1.0)'
anchors = mx.symbol.contrib.MultiBoxPrior(from_layer, sizes=size_str, ratios=ratio_str,
clip=clip, name="{}_anchors".format(from_name),
steps=step)
anchors = mx.symbol.Flatten(data=anchors)
anchor_layers.append(anchors)
loc_preds = mx.symbol.Concat(*loc_pred_layers, num_args=len(loc_pred_layers), \
dim=1, name="multibox_loc_pred")
cls_preds = mx.symbol.Concat(*cls_pred_layers, num_args=len(cls_pred_layers), \
dim=1)
cls_preds = mx.symbol.Reshape(data=cls_preds, shape=(0, -1, num_classes))
cls_preds = mx.symbol.transpose(cls_preds, axes=(0, 2, 1), name="multibox_cls_pred")
anchor_boxes = mx.symbol.Concat(*anchor_layers, \
num_args=len(anchor_layers), dim=1)
anchor_boxes = mx.symbol.Reshape(data=anchor_boxes, shape=(0, -1, 4), name="multibox_anchors")
return [loc_preds, cls_preds, anchor_boxes]
|
[
"def",
"multibox_layer",
"(",
"from_layers",
",",
"num_classes",
",",
"sizes",
"=",
"[",
".2",
",",
".95",
"]",
",",
"ratios",
"=",
"[",
"1",
"]",
",",
"normalization",
"=",
"-",
"1",
",",
"num_channels",
"=",
"[",
"]",
",",
"clip",
"=",
"False",
",",
"interm_layer",
"=",
"0",
",",
"steps",
"=",
"[",
"]",
")",
":",
"assert",
"len",
"(",
"from_layers",
")",
">",
"0",
",",
"\"from_layers must not be empty list\"",
"assert",
"num_classes",
">",
"0",
",",
"\"num_classes {} must be larger than 0\"",
".",
"format",
"(",
"num_classes",
")",
"assert",
"len",
"(",
"ratios",
")",
">",
"0",
",",
"\"aspect ratios must not be empty list\"",
"if",
"not",
"isinstance",
"(",
"ratios",
"[",
"0",
"]",
",",
"list",
")",
":",
"# provided only one ratio list, broadcast to all from_layers",
"ratios",
"=",
"[",
"ratios",
"]",
"*",
"len",
"(",
"from_layers",
")",
"assert",
"len",
"(",
"ratios",
")",
"==",
"len",
"(",
"from_layers",
")",
",",
"\"ratios and from_layers must have same length\"",
"assert",
"len",
"(",
"sizes",
")",
">",
"0",
",",
"\"sizes must not be empty list\"",
"if",
"len",
"(",
"sizes",
")",
"==",
"2",
"and",
"not",
"isinstance",
"(",
"sizes",
"[",
"0",
"]",
",",
"list",
")",
":",
"# provided size range, we need to compute the sizes for each layer",
"assert",
"sizes",
"[",
"0",
"]",
">",
"0",
"and",
"sizes",
"[",
"0",
"]",
"<",
"1",
"assert",
"sizes",
"[",
"1",
"]",
">",
"0",
"and",
"sizes",
"[",
"1",
"]",
"<",
"1",
"and",
"sizes",
"[",
"1",
"]",
">",
"sizes",
"[",
"0",
"]",
"tmp",
"=",
"np",
".",
"linspace",
"(",
"sizes",
"[",
"0",
"]",
",",
"sizes",
"[",
"1",
"]",
",",
"num",
"=",
"(",
"len",
"(",
"from_layers",
")",
"-",
"1",
")",
")",
"# Ref for start_offset value:",
"# https://arxiv.org/abs/1512.02325",
"start_offset",
"=",
"0.1",
"min_sizes",
"=",
"[",
"start_offset",
"]",
"+",
"tmp",
".",
"tolist",
"(",
")",
"max_sizes",
"=",
"tmp",
".",
"tolist",
"(",
")",
"+",
"[",
"tmp",
"[",
"-",
"1",
"]",
"+",
"start_offset",
"]",
"sizes",
"=",
"zip",
"(",
"min_sizes",
",",
"max_sizes",
")",
"assert",
"len",
"(",
"sizes",
")",
"==",
"len",
"(",
"from_layers",
")",
",",
"\"sizes and from_layers must have same length\"",
"if",
"not",
"isinstance",
"(",
"normalization",
",",
"list",
")",
":",
"normalization",
"=",
"[",
"normalization",
"]",
"*",
"len",
"(",
"from_layers",
")",
"assert",
"len",
"(",
"normalization",
")",
"==",
"len",
"(",
"from_layers",
")",
"assert",
"sum",
"(",
"x",
">",
"0",
"for",
"x",
"in",
"normalization",
")",
"<=",
"len",
"(",
"num_channels",
")",
",",
"\"must provide number of channels for each normalized layer\"",
"if",
"steps",
":",
"assert",
"len",
"(",
"steps",
")",
"==",
"len",
"(",
"from_layers",
")",
",",
"\"provide steps for all layers or leave empty\"",
"loc_pred_layers",
"=",
"[",
"]",
"cls_pred_layers",
"=",
"[",
"]",
"anchor_layers",
"=",
"[",
"]",
"num_classes",
"+=",
"1",
"# always use background as label 0",
"for",
"k",
",",
"from_layer",
"in",
"enumerate",
"(",
"from_layers",
")",
":",
"from_name",
"=",
"from_layer",
".",
"name",
"# normalize",
"if",
"normalization",
"[",
"k",
"]",
">",
"0",
":",
"from_layer",
"=",
"mx",
".",
"symbol",
".",
"L2Normalization",
"(",
"data",
"=",
"from_layer",
",",
"mode",
"=",
"\"channel\"",
",",
"name",
"=",
"\"{}_norm\"",
".",
"format",
"(",
"from_name",
")",
")",
"scale",
"=",
"mx",
".",
"symbol",
".",
"Variable",
"(",
"name",
"=",
"\"{}_scale\"",
".",
"format",
"(",
"from_name",
")",
",",
"shape",
"=",
"(",
"1",
",",
"num_channels",
".",
"pop",
"(",
"0",
")",
",",
"1",
",",
"1",
")",
",",
"init",
"=",
"mx",
".",
"init",
".",
"Constant",
"(",
"normalization",
"[",
"k",
"]",
")",
",",
"attr",
"=",
"{",
"'__wd_mult__'",
":",
"'0.1'",
"}",
")",
"from_layer",
"=",
"mx",
".",
"symbol",
".",
"broadcast_mul",
"(",
"lhs",
"=",
"scale",
",",
"rhs",
"=",
"from_layer",
")",
"if",
"interm_layer",
">",
"0",
":",
"from_layer",
"=",
"mx",
".",
"symbol",
".",
"Convolution",
"(",
"data",
"=",
"from_layer",
",",
"kernel",
"=",
"(",
"3",
",",
"3",
")",
",",
"stride",
"=",
"(",
"1",
",",
"1",
")",
",",
"pad",
"=",
"(",
"1",
",",
"1",
")",
",",
"num_filter",
"=",
"interm_layer",
",",
"name",
"=",
"\"{}_inter_conv\"",
".",
"format",
"(",
"from_name",
")",
")",
"from_layer",
"=",
"mx",
".",
"symbol",
".",
"Activation",
"(",
"data",
"=",
"from_layer",
",",
"act_type",
"=",
"\"relu\"",
",",
"name",
"=",
"\"{}_inter_relu\"",
".",
"format",
"(",
"from_name",
")",
")",
"# estimate number of anchors per location",
"# here I follow the original version in caffe",
"# TODO: better way to shape the anchors??",
"size",
"=",
"sizes",
"[",
"k",
"]",
"assert",
"len",
"(",
"size",
")",
">",
"0",
",",
"\"must provide at least one size\"",
"size_str",
"=",
"\"(\"",
"+",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"size",
"]",
")",
"+",
"\")\"",
"ratio",
"=",
"ratios",
"[",
"k",
"]",
"assert",
"len",
"(",
"ratio",
")",
">",
"0",
",",
"\"must provide at least one ratio\"",
"ratio_str",
"=",
"\"(\"",
"+",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"ratio",
"]",
")",
"+",
"\")\"",
"num_anchors",
"=",
"len",
"(",
"size",
")",
"-",
"1",
"+",
"len",
"(",
"ratio",
")",
"# create location prediction layer",
"num_loc_pred",
"=",
"num_anchors",
"*",
"4",
"bias",
"=",
"mx",
".",
"symbol",
".",
"Variable",
"(",
"name",
"=",
"\"{}_loc_pred_conv_bias\"",
".",
"format",
"(",
"from_name",
")",
",",
"init",
"=",
"mx",
".",
"init",
".",
"Constant",
"(",
"0.0",
")",
",",
"attr",
"=",
"{",
"'__lr_mult__'",
":",
"'2.0'",
"}",
")",
"loc_pred",
"=",
"mx",
".",
"symbol",
".",
"Convolution",
"(",
"data",
"=",
"from_layer",
",",
"bias",
"=",
"bias",
",",
"kernel",
"=",
"(",
"3",
",",
"3",
")",
",",
"stride",
"=",
"(",
"1",
",",
"1",
")",
",",
"pad",
"=",
"(",
"1",
",",
"1",
")",
",",
"num_filter",
"=",
"num_loc_pred",
",",
"name",
"=",
"\"{}_loc_pred_conv\"",
".",
"format",
"(",
"from_name",
")",
")",
"loc_pred",
"=",
"mx",
".",
"symbol",
".",
"transpose",
"(",
"loc_pred",
",",
"axes",
"=",
"(",
"0",
",",
"2",
",",
"3",
",",
"1",
")",
")",
"loc_pred",
"=",
"mx",
".",
"symbol",
".",
"Flatten",
"(",
"data",
"=",
"loc_pred",
")",
"loc_pred_layers",
".",
"append",
"(",
"loc_pred",
")",
"# create class prediction layer",
"num_cls_pred",
"=",
"num_anchors",
"*",
"num_classes",
"bias",
"=",
"mx",
".",
"symbol",
".",
"Variable",
"(",
"name",
"=",
"\"{}_cls_pred_conv_bias\"",
".",
"format",
"(",
"from_name",
")",
",",
"init",
"=",
"mx",
".",
"init",
".",
"Constant",
"(",
"0.0",
")",
",",
"attr",
"=",
"{",
"'__lr_mult__'",
":",
"'2.0'",
"}",
")",
"cls_pred",
"=",
"mx",
".",
"symbol",
".",
"Convolution",
"(",
"data",
"=",
"from_layer",
",",
"bias",
"=",
"bias",
",",
"kernel",
"=",
"(",
"3",
",",
"3",
")",
",",
"stride",
"=",
"(",
"1",
",",
"1",
")",
",",
"pad",
"=",
"(",
"1",
",",
"1",
")",
",",
"num_filter",
"=",
"num_cls_pred",
",",
"name",
"=",
"\"{}_cls_pred_conv\"",
".",
"format",
"(",
"from_name",
")",
")",
"cls_pred",
"=",
"mx",
".",
"symbol",
".",
"transpose",
"(",
"cls_pred",
",",
"axes",
"=",
"(",
"0",
",",
"2",
",",
"3",
",",
"1",
")",
")",
"cls_pred",
"=",
"mx",
".",
"symbol",
".",
"Flatten",
"(",
"data",
"=",
"cls_pred",
")",
"cls_pred_layers",
".",
"append",
"(",
"cls_pred",
")",
"# create anchor generation layer",
"if",
"steps",
":",
"step",
"=",
"(",
"steps",
"[",
"k",
"]",
",",
"steps",
"[",
"k",
"]",
")",
"else",
":",
"step",
"=",
"'(-1.0, -1.0)'",
"anchors",
"=",
"mx",
".",
"symbol",
".",
"contrib",
".",
"MultiBoxPrior",
"(",
"from_layer",
",",
"sizes",
"=",
"size_str",
",",
"ratios",
"=",
"ratio_str",
",",
"clip",
"=",
"clip",
",",
"name",
"=",
"\"{}_anchors\"",
".",
"format",
"(",
"from_name",
")",
",",
"steps",
"=",
"step",
")",
"anchors",
"=",
"mx",
".",
"symbol",
".",
"Flatten",
"(",
"data",
"=",
"anchors",
")",
"anchor_layers",
".",
"append",
"(",
"anchors",
")",
"loc_preds",
"=",
"mx",
".",
"symbol",
".",
"Concat",
"(",
"*",
"loc_pred_layers",
",",
"num_args",
"=",
"len",
"(",
"loc_pred_layers",
")",
",",
"dim",
"=",
"1",
",",
"name",
"=",
"\"multibox_loc_pred\"",
")",
"cls_preds",
"=",
"mx",
".",
"symbol",
".",
"Concat",
"(",
"*",
"cls_pred_layers",
",",
"num_args",
"=",
"len",
"(",
"cls_pred_layers",
")",
",",
"dim",
"=",
"1",
")",
"cls_preds",
"=",
"mx",
".",
"symbol",
".",
"Reshape",
"(",
"data",
"=",
"cls_preds",
",",
"shape",
"=",
"(",
"0",
",",
"-",
"1",
",",
"num_classes",
")",
")",
"cls_preds",
"=",
"mx",
".",
"symbol",
".",
"transpose",
"(",
"cls_preds",
",",
"axes",
"=",
"(",
"0",
",",
"2",
",",
"1",
")",
",",
"name",
"=",
"\"multibox_cls_pred\"",
")",
"anchor_boxes",
"=",
"mx",
".",
"symbol",
".",
"Concat",
"(",
"*",
"anchor_layers",
",",
"num_args",
"=",
"len",
"(",
"anchor_layers",
")",
",",
"dim",
"=",
"1",
")",
"anchor_boxes",
"=",
"mx",
".",
"symbol",
".",
"Reshape",
"(",
"data",
"=",
"anchor_boxes",
",",
"shape",
"=",
"(",
"0",
",",
"-",
"1",
",",
"4",
")",
",",
"name",
"=",
"\"multibox_anchors\"",
")",
"return",
"[",
"loc_preds",
",",
"cls_preds",
",",
"anchor_boxes",
"]"
] |
the basic aggregation module for SSD detection. Takes in multiple layers,
generate multiple object detection targets by customized layers
Parameters:
----------
from_layers : list of mx.symbol
generate multibox detection from layers
num_classes : int
number of classes excluding background, will automatically handle
background in this function
sizes : list or list of list
[min_size, max_size] for all layers or [[], [], []...] for specific layers
ratios : list or list of list
[ratio1, ratio2...] for all layers or [[], [], ...] for specific layers
normalizations : int or list of int
use normalizations value for all layers or [...] for specific layers,
-1 indicate no normalizations and scales
num_channels : list of int
number of input layer channels, used when normalization is enabled, the
length of list should equals to number of normalization layers
clip : bool
whether to clip out-of-image boxes
interm_layer : int
if > 0, will add a intermediate Convolution layer
steps : list
specify steps for each MultiBoxPrior layer, leave empty, it will calculate
according to layer dimensions
Returns:
----------
list of outputs, as [loc_preds, cls_preds, anchor_boxes]
loc_preds : localization regression prediction
cls_preds : classification prediction
anchor_boxes : generated anchor boxes
|
[
"the",
"basic",
"aggregation",
"module",
"for",
"SSD",
"detection",
".",
"Takes",
"in",
"multiple",
"layers",
"generate",
"multiple",
"object",
"detection",
"targets",
"by",
"customized",
"layers"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/symbol/common.py#L153-L304
|
train
|
apache/incubator-mxnet
|
python/mxnet/gluon/loss.py
|
_apply_weighting
|
def _apply_weighting(F, loss, weight=None, sample_weight=None):
"""Apply weighting to loss.
Parameters
----------
loss : Symbol
The loss to be weighted.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch separately, `sample_weight` should have
shape (64, 1).
Returns
-------
loss : Symbol
Weighted loss
"""
if sample_weight is not None:
loss = F.broadcast_mul(loss, sample_weight)
if weight is not None:
assert isinstance(weight, numeric_types), "weight must be a number"
loss = loss * weight
return loss
|
python
|
def _apply_weighting(F, loss, weight=None, sample_weight=None):
"""Apply weighting to loss.
Parameters
----------
loss : Symbol
The loss to be weighted.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch separately, `sample_weight` should have
shape (64, 1).
Returns
-------
loss : Symbol
Weighted loss
"""
if sample_weight is not None:
loss = F.broadcast_mul(loss, sample_weight)
if weight is not None:
assert isinstance(weight, numeric_types), "weight must be a number"
loss = loss * weight
return loss
|
[
"def",
"_apply_weighting",
"(",
"F",
",",
"loss",
",",
"weight",
"=",
"None",
",",
"sample_weight",
"=",
"None",
")",
":",
"if",
"sample_weight",
"is",
"not",
"None",
":",
"loss",
"=",
"F",
".",
"broadcast_mul",
"(",
"loss",
",",
"sample_weight",
")",
"if",
"weight",
"is",
"not",
"None",
":",
"assert",
"isinstance",
"(",
"weight",
",",
"numeric_types",
")",
",",
"\"weight must be a number\"",
"loss",
"=",
"loss",
"*",
"weight",
"return",
"loss"
] |
Apply weighting to loss.
Parameters
----------
loss : Symbol
The loss to be weighted.
weight : float or None
Global scalar weight for loss.
sample_weight : Symbol or None
Per sample weighting. Must be broadcastable to
the same shape as loss. For example, if loss has
shape (64, 10) and you want to weight each sample
in the batch separately, `sample_weight` should have
shape (64, 1).
Returns
-------
loss : Symbol
Weighted loss
|
[
"Apply",
"weighting",
"to",
"loss",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/loss.py#L34-L62
|
train
|
apache/incubator-mxnet
|
python/mxnet/gluon/loss.py
|
_reshape_like
|
def _reshape_like(F, x, y):
"""Reshapes x to the same shape as y."""
return x.reshape(y.shape) if F is ndarray else F.reshape_like(x, y)
|
python
|
def _reshape_like(F, x, y):
"""Reshapes x to the same shape as y."""
return x.reshape(y.shape) if F is ndarray else F.reshape_like(x, y)
|
[
"def",
"_reshape_like",
"(",
"F",
",",
"x",
",",
"y",
")",
":",
"return",
"x",
".",
"reshape",
"(",
"y",
".",
"shape",
")",
"if",
"F",
"is",
"ndarray",
"else",
"F",
".",
"reshape_like",
"(",
"x",
",",
"y",
")"
] |
Reshapes x to the same shape as y.
|
[
"Reshapes",
"x",
"to",
"the",
"same",
"shape",
"as",
"y",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/loss.py#L65-L67
|
train
|
apache/incubator-mxnet
|
example/neural-style/nstyle.py
|
get_tv_grad_executor
|
def get_tv_grad_executor(img, ctx, tv_weight):
"""create TV gradient executor with input binded on img
"""
if tv_weight <= 0.0:
return None
nchannel = img.shape[1]
simg = mx.sym.Variable("img")
skernel = mx.sym.Variable("kernel")
channels = mx.sym.SliceChannel(simg, num_outputs=nchannel)
out = mx.sym.Concat(*[
mx.sym.Convolution(data=channels[i], weight=skernel,
num_filter=1,
kernel=(3, 3), pad=(1,1),
no_bias=True, stride=(1,1))
for i in range(nchannel)])
kernel = mx.nd.array(np.array([[0, -1, 0],
[-1, 4, -1],
[0, -1, 0]])
.reshape((1, 1, 3, 3)),
ctx) / 8.0
out = out * tv_weight
return out.bind(ctx, args={"img": img,
"kernel": kernel})
|
python
|
def get_tv_grad_executor(img, ctx, tv_weight):
"""create TV gradient executor with input binded on img
"""
if tv_weight <= 0.0:
return None
nchannel = img.shape[1]
simg = mx.sym.Variable("img")
skernel = mx.sym.Variable("kernel")
channels = mx.sym.SliceChannel(simg, num_outputs=nchannel)
out = mx.sym.Concat(*[
mx.sym.Convolution(data=channels[i], weight=skernel,
num_filter=1,
kernel=(3, 3), pad=(1,1),
no_bias=True, stride=(1,1))
for i in range(nchannel)])
kernel = mx.nd.array(np.array([[0, -1, 0],
[-1, 4, -1],
[0, -1, 0]])
.reshape((1, 1, 3, 3)),
ctx) / 8.0
out = out * tv_weight
return out.bind(ctx, args={"img": img,
"kernel": kernel})
|
[
"def",
"get_tv_grad_executor",
"(",
"img",
",",
"ctx",
",",
"tv_weight",
")",
":",
"if",
"tv_weight",
"<=",
"0.0",
":",
"return",
"None",
"nchannel",
"=",
"img",
".",
"shape",
"[",
"1",
"]",
"simg",
"=",
"mx",
".",
"sym",
".",
"Variable",
"(",
"\"img\"",
")",
"skernel",
"=",
"mx",
".",
"sym",
".",
"Variable",
"(",
"\"kernel\"",
")",
"channels",
"=",
"mx",
".",
"sym",
".",
"SliceChannel",
"(",
"simg",
",",
"num_outputs",
"=",
"nchannel",
")",
"out",
"=",
"mx",
".",
"sym",
".",
"Concat",
"(",
"*",
"[",
"mx",
".",
"sym",
".",
"Convolution",
"(",
"data",
"=",
"channels",
"[",
"i",
"]",
",",
"weight",
"=",
"skernel",
",",
"num_filter",
"=",
"1",
",",
"kernel",
"=",
"(",
"3",
",",
"3",
")",
",",
"pad",
"=",
"(",
"1",
",",
"1",
")",
",",
"no_bias",
"=",
"True",
",",
"stride",
"=",
"(",
"1",
",",
"1",
")",
")",
"for",
"i",
"in",
"range",
"(",
"nchannel",
")",
"]",
")",
"kernel",
"=",
"mx",
".",
"nd",
".",
"array",
"(",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"-",
"1",
",",
"0",
"]",
",",
"[",
"-",
"1",
",",
"4",
",",
"-",
"1",
"]",
",",
"[",
"0",
",",
"-",
"1",
",",
"0",
"]",
"]",
")",
".",
"reshape",
"(",
"(",
"1",
",",
"1",
",",
"3",
",",
"3",
")",
")",
",",
"ctx",
")",
"/",
"8.0",
"out",
"=",
"out",
"*",
"tv_weight",
"return",
"out",
".",
"bind",
"(",
"ctx",
",",
"args",
"=",
"{",
"\"img\"",
":",
"img",
",",
"\"kernel\"",
":",
"kernel",
"}",
")"
] |
create TV gradient executor with input binded on img
|
[
"create",
"TV",
"gradient",
"executor",
"with",
"input",
"binded",
"on",
"img"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/neural-style/nstyle.py#L143-L165
|
train
|
apache/incubator-mxnet
|
example/neural-style/nstyle.py
|
train_nstyle
|
def train_nstyle(args, callback=None):
"""Train a neural style network.
Args are from argparse and control input, output, hyper-parameters.
callback allows for display of training progress.
"""
# input
dev = mx.gpu(args.gpu) if args.gpu >= 0 else mx.cpu()
content_np = PreprocessContentImage(args.content_image, args.max_long_edge)
style_np = PreprocessStyleImage(args.style_image, shape=content_np.shape)
size = content_np.shape[2:]
# model
Executor = namedtuple('Executor', ['executor', 'data', 'data_grad'])
model_module = importlib.import_module('model_' + args.model)
style, content = model_module.get_symbol()
gram, gscale = style_gram_symbol(size, style)
model_executor = model_module.get_executor(gram, content, size, dev)
model_executor.data[:] = style_np
model_executor.executor.forward()
style_array = []
for i in range(len(model_executor.style)):
style_array.append(model_executor.style[i].copyto(mx.cpu()))
model_executor.data[:] = content_np
model_executor.executor.forward()
content_array = model_executor.content.copyto(mx.cpu())
# delete the executor
del model_executor
style_loss, content_loss = get_loss(gram, content)
model_executor = model_module.get_executor(
style_loss, content_loss, size, dev)
grad_array = []
for i in range(len(style_array)):
style_array[i].copyto(model_executor.arg_dict["target_gram_%d" % i])
grad_array.append(mx.nd.ones((1,), dev) * (float(args.style_weight) / gscale[i]))
grad_array.append(mx.nd.ones((1,), dev) * (float(args.content_weight)))
print([x.asscalar() for x in grad_array])
content_array.copyto(model_executor.arg_dict["target_content"])
# train
# initialize img with random noise
img = mx.nd.zeros(content_np.shape, ctx=dev)
img[:] = mx.rnd.uniform(-0.1, 0.1, img.shape)
lr = mx.lr_scheduler.FactorScheduler(step=args.lr_sched_delay,
factor=args.lr_sched_factor)
optimizer = mx.optimizer.NAG(
learning_rate = args.lr,
wd = 0.0001,
momentum=0.95,
lr_scheduler = lr)
optim_state = optimizer.create_state(0, img)
logging.info('start training arguments %s', args)
old_img = img.copyto(dev)
clip_norm = 1 * np.prod(img.shape)
tv_grad_executor = get_tv_grad_executor(img, dev, args.tv_weight)
for e in range(args.max_num_epochs):
img.copyto(model_executor.data)
model_executor.executor.forward()
model_executor.executor.backward(grad_array)
gnorm = mx.nd.norm(model_executor.data_grad).asscalar()
if gnorm > clip_norm:
model_executor.data_grad[:] *= clip_norm / gnorm
if tv_grad_executor is not None:
tv_grad_executor.forward()
optimizer.update(0, img,
model_executor.data_grad + tv_grad_executor.outputs[0],
optim_state)
else:
optimizer.update(0, img, model_executor.data_grad, optim_state)
new_img = img
eps = (mx.nd.norm(old_img - new_img) / mx.nd.norm(new_img)).asscalar()
old_img = new_img.copyto(dev)
logging.info('epoch %d, relative change %f', e, eps)
if eps < args.stop_eps:
logging.info('eps < args.stop_eps, training finished')
break
if callback:
cbdata = {
'eps': eps,
'epoch': e+1,
}
if (e+1) % args.save_epochs == 0:
outfn = args.output_dir + 'e_'+str(e+1)+'.jpg'
npimg = new_img.asnumpy()
SaveImage(npimg, outfn, args.remove_noise)
if callback:
cbdata['filename'] = outfn
cbdata['img'] = npimg
if callback:
callback(cbdata)
final_fn = args.output_dir + '/final.jpg'
SaveImage(new_img.asnumpy(), final_fn)
|
python
|
def train_nstyle(args, callback=None):
"""Train a neural style network.
Args are from argparse and control input, output, hyper-parameters.
callback allows for display of training progress.
"""
# input
dev = mx.gpu(args.gpu) if args.gpu >= 0 else mx.cpu()
content_np = PreprocessContentImage(args.content_image, args.max_long_edge)
style_np = PreprocessStyleImage(args.style_image, shape=content_np.shape)
size = content_np.shape[2:]
# model
Executor = namedtuple('Executor', ['executor', 'data', 'data_grad'])
model_module = importlib.import_module('model_' + args.model)
style, content = model_module.get_symbol()
gram, gscale = style_gram_symbol(size, style)
model_executor = model_module.get_executor(gram, content, size, dev)
model_executor.data[:] = style_np
model_executor.executor.forward()
style_array = []
for i in range(len(model_executor.style)):
style_array.append(model_executor.style[i].copyto(mx.cpu()))
model_executor.data[:] = content_np
model_executor.executor.forward()
content_array = model_executor.content.copyto(mx.cpu())
# delete the executor
del model_executor
style_loss, content_loss = get_loss(gram, content)
model_executor = model_module.get_executor(
style_loss, content_loss, size, dev)
grad_array = []
for i in range(len(style_array)):
style_array[i].copyto(model_executor.arg_dict["target_gram_%d" % i])
grad_array.append(mx.nd.ones((1,), dev) * (float(args.style_weight) / gscale[i]))
grad_array.append(mx.nd.ones((1,), dev) * (float(args.content_weight)))
print([x.asscalar() for x in grad_array])
content_array.copyto(model_executor.arg_dict["target_content"])
# train
# initialize img with random noise
img = mx.nd.zeros(content_np.shape, ctx=dev)
img[:] = mx.rnd.uniform(-0.1, 0.1, img.shape)
lr = mx.lr_scheduler.FactorScheduler(step=args.lr_sched_delay,
factor=args.lr_sched_factor)
optimizer = mx.optimizer.NAG(
learning_rate = args.lr,
wd = 0.0001,
momentum=0.95,
lr_scheduler = lr)
optim_state = optimizer.create_state(0, img)
logging.info('start training arguments %s', args)
old_img = img.copyto(dev)
clip_norm = 1 * np.prod(img.shape)
tv_grad_executor = get_tv_grad_executor(img, dev, args.tv_weight)
for e in range(args.max_num_epochs):
img.copyto(model_executor.data)
model_executor.executor.forward()
model_executor.executor.backward(grad_array)
gnorm = mx.nd.norm(model_executor.data_grad).asscalar()
if gnorm > clip_norm:
model_executor.data_grad[:] *= clip_norm / gnorm
if tv_grad_executor is not None:
tv_grad_executor.forward()
optimizer.update(0, img,
model_executor.data_grad + tv_grad_executor.outputs[0],
optim_state)
else:
optimizer.update(0, img, model_executor.data_grad, optim_state)
new_img = img
eps = (mx.nd.norm(old_img - new_img) / mx.nd.norm(new_img)).asscalar()
old_img = new_img.copyto(dev)
logging.info('epoch %d, relative change %f', e, eps)
if eps < args.stop_eps:
logging.info('eps < args.stop_eps, training finished')
break
if callback:
cbdata = {
'eps': eps,
'epoch': e+1,
}
if (e+1) % args.save_epochs == 0:
outfn = args.output_dir + 'e_'+str(e+1)+'.jpg'
npimg = new_img.asnumpy()
SaveImage(npimg, outfn, args.remove_noise)
if callback:
cbdata['filename'] = outfn
cbdata['img'] = npimg
if callback:
callback(cbdata)
final_fn = args.output_dir + '/final.jpg'
SaveImage(new_img.asnumpy(), final_fn)
|
[
"def",
"train_nstyle",
"(",
"args",
",",
"callback",
"=",
"None",
")",
":",
"# input",
"dev",
"=",
"mx",
".",
"gpu",
"(",
"args",
".",
"gpu",
")",
"if",
"args",
".",
"gpu",
">=",
"0",
"else",
"mx",
".",
"cpu",
"(",
")",
"content_np",
"=",
"PreprocessContentImage",
"(",
"args",
".",
"content_image",
",",
"args",
".",
"max_long_edge",
")",
"style_np",
"=",
"PreprocessStyleImage",
"(",
"args",
".",
"style_image",
",",
"shape",
"=",
"content_np",
".",
"shape",
")",
"size",
"=",
"content_np",
".",
"shape",
"[",
"2",
":",
"]",
"# model",
"Executor",
"=",
"namedtuple",
"(",
"'Executor'",
",",
"[",
"'executor'",
",",
"'data'",
",",
"'data_grad'",
"]",
")",
"model_module",
"=",
"importlib",
".",
"import_module",
"(",
"'model_'",
"+",
"args",
".",
"model",
")",
"style",
",",
"content",
"=",
"model_module",
".",
"get_symbol",
"(",
")",
"gram",
",",
"gscale",
"=",
"style_gram_symbol",
"(",
"size",
",",
"style",
")",
"model_executor",
"=",
"model_module",
".",
"get_executor",
"(",
"gram",
",",
"content",
",",
"size",
",",
"dev",
")",
"model_executor",
".",
"data",
"[",
":",
"]",
"=",
"style_np",
"model_executor",
".",
"executor",
".",
"forward",
"(",
")",
"style_array",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"model_executor",
".",
"style",
")",
")",
":",
"style_array",
".",
"append",
"(",
"model_executor",
".",
"style",
"[",
"i",
"]",
".",
"copyto",
"(",
"mx",
".",
"cpu",
"(",
")",
")",
")",
"model_executor",
".",
"data",
"[",
":",
"]",
"=",
"content_np",
"model_executor",
".",
"executor",
".",
"forward",
"(",
")",
"content_array",
"=",
"model_executor",
".",
"content",
".",
"copyto",
"(",
"mx",
".",
"cpu",
"(",
")",
")",
"# delete the executor",
"del",
"model_executor",
"style_loss",
",",
"content_loss",
"=",
"get_loss",
"(",
"gram",
",",
"content",
")",
"model_executor",
"=",
"model_module",
".",
"get_executor",
"(",
"style_loss",
",",
"content_loss",
",",
"size",
",",
"dev",
")",
"grad_array",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"style_array",
")",
")",
":",
"style_array",
"[",
"i",
"]",
".",
"copyto",
"(",
"model_executor",
".",
"arg_dict",
"[",
"\"target_gram_%d\"",
"%",
"i",
"]",
")",
"grad_array",
".",
"append",
"(",
"mx",
".",
"nd",
".",
"ones",
"(",
"(",
"1",
",",
")",
",",
"dev",
")",
"*",
"(",
"float",
"(",
"args",
".",
"style_weight",
")",
"/",
"gscale",
"[",
"i",
"]",
")",
")",
"grad_array",
".",
"append",
"(",
"mx",
".",
"nd",
".",
"ones",
"(",
"(",
"1",
",",
")",
",",
"dev",
")",
"*",
"(",
"float",
"(",
"args",
".",
"content_weight",
")",
")",
")",
"print",
"(",
"[",
"x",
".",
"asscalar",
"(",
")",
"for",
"x",
"in",
"grad_array",
"]",
")",
"content_array",
".",
"copyto",
"(",
"model_executor",
".",
"arg_dict",
"[",
"\"target_content\"",
"]",
")",
"# train",
"# initialize img with random noise",
"img",
"=",
"mx",
".",
"nd",
".",
"zeros",
"(",
"content_np",
".",
"shape",
",",
"ctx",
"=",
"dev",
")",
"img",
"[",
":",
"]",
"=",
"mx",
".",
"rnd",
".",
"uniform",
"(",
"-",
"0.1",
",",
"0.1",
",",
"img",
".",
"shape",
")",
"lr",
"=",
"mx",
".",
"lr_scheduler",
".",
"FactorScheduler",
"(",
"step",
"=",
"args",
".",
"lr_sched_delay",
",",
"factor",
"=",
"args",
".",
"lr_sched_factor",
")",
"optimizer",
"=",
"mx",
".",
"optimizer",
".",
"NAG",
"(",
"learning_rate",
"=",
"args",
".",
"lr",
",",
"wd",
"=",
"0.0001",
",",
"momentum",
"=",
"0.95",
",",
"lr_scheduler",
"=",
"lr",
")",
"optim_state",
"=",
"optimizer",
".",
"create_state",
"(",
"0",
",",
"img",
")",
"logging",
".",
"info",
"(",
"'start training arguments %s'",
",",
"args",
")",
"old_img",
"=",
"img",
".",
"copyto",
"(",
"dev",
")",
"clip_norm",
"=",
"1",
"*",
"np",
".",
"prod",
"(",
"img",
".",
"shape",
")",
"tv_grad_executor",
"=",
"get_tv_grad_executor",
"(",
"img",
",",
"dev",
",",
"args",
".",
"tv_weight",
")",
"for",
"e",
"in",
"range",
"(",
"args",
".",
"max_num_epochs",
")",
":",
"img",
".",
"copyto",
"(",
"model_executor",
".",
"data",
")",
"model_executor",
".",
"executor",
".",
"forward",
"(",
")",
"model_executor",
".",
"executor",
".",
"backward",
"(",
"grad_array",
")",
"gnorm",
"=",
"mx",
".",
"nd",
".",
"norm",
"(",
"model_executor",
".",
"data_grad",
")",
".",
"asscalar",
"(",
")",
"if",
"gnorm",
">",
"clip_norm",
":",
"model_executor",
".",
"data_grad",
"[",
":",
"]",
"*=",
"clip_norm",
"/",
"gnorm",
"if",
"tv_grad_executor",
"is",
"not",
"None",
":",
"tv_grad_executor",
".",
"forward",
"(",
")",
"optimizer",
".",
"update",
"(",
"0",
",",
"img",
",",
"model_executor",
".",
"data_grad",
"+",
"tv_grad_executor",
".",
"outputs",
"[",
"0",
"]",
",",
"optim_state",
")",
"else",
":",
"optimizer",
".",
"update",
"(",
"0",
",",
"img",
",",
"model_executor",
".",
"data_grad",
",",
"optim_state",
")",
"new_img",
"=",
"img",
"eps",
"=",
"(",
"mx",
".",
"nd",
".",
"norm",
"(",
"old_img",
"-",
"new_img",
")",
"/",
"mx",
".",
"nd",
".",
"norm",
"(",
"new_img",
")",
")",
".",
"asscalar",
"(",
")",
"old_img",
"=",
"new_img",
".",
"copyto",
"(",
"dev",
")",
"logging",
".",
"info",
"(",
"'epoch %d, relative change %f'",
",",
"e",
",",
"eps",
")",
"if",
"eps",
"<",
"args",
".",
"stop_eps",
":",
"logging",
".",
"info",
"(",
"'eps < args.stop_eps, training finished'",
")",
"break",
"if",
"callback",
":",
"cbdata",
"=",
"{",
"'eps'",
":",
"eps",
",",
"'epoch'",
":",
"e",
"+",
"1",
",",
"}",
"if",
"(",
"e",
"+",
"1",
")",
"%",
"args",
".",
"save_epochs",
"==",
"0",
":",
"outfn",
"=",
"args",
".",
"output_dir",
"+",
"'e_'",
"+",
"str",
"(",
"e",
"+",
"1",
")",
"+",
"'.jpg'",
"npimg",
"=",
"new_img",
".",
"asnumpy",
"(",
")",
"SaveImage",
"(",
"npimg",
",",
"outfn",
",",
"args",
".",
"remove_noise",
")",
"if",
"callback",
":",
"cbdata",
"[",
"'filename'",
"]",
"=",
"outfn",
"cbdata",
"[",
"'img'",
"]",
"=",
"npimg",
"if",
"callback",
":",
"callback",
"(",
"cbdata",
")",
"final_fn",
"=",
"args",
".",
"output_dir",
"+",
"'/final.jpg'",
"SaveImage",
"(",
"new_img",
".",
"asnumpy",
"(",
")",
",",
"final_fn",
")"
] |
Train a neural style network.
Args are from argparse and control input, output, hyper-parameters.
callback allows for display of training progress.
|
[
"Train",
"a",
"neural",
"style",
"network",
".",
"Args",
"are",
"from",
"argparse",
"and",
"control",
"input",
"output",
"hyper",
"-",
"parameters",
".",
"callback",
"allows",
"for",
"display",
"of",
"training",
"progress",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/neural-style/nstyle.py#L167-L271
|
train
|
apache/incubator-mxnet
|
example/ssd/dataset/iterator.py
|
DetIter._get_batch
|
def _get_batch(self):
"""
Load data/label from dataset
"""
batch_data = mx.nd.zeros((self.batch_size, 3, self._data_shape[0], self._data_shape[1]))
batch_label = []
for i in range(self.batch_size):
if (self._current + i) >= self._size:
if not self.is_train:
continue
# use padding from middle in each epoch
idx = (self._current + i + self._size // 2) % self._size
index = self._index[idx]
else:
index = self._index[self._current + i]
# index = self.debug_index
im_path = self._imdb.image_path_from_index(index)
with open(im_path, 'rb') as fp:
img_content = fp.read()
img = mx.img.imdecode(img_content)
gt = self._imdb.label_from_index(index).copy() if self.is_train else None
data, label = self._data_augmentation(img, gt)
batch_data[i] = data
if self.is_train:
batch_label.append(label)
self._data = {'data': batch_data}
if self.is_train:
self._label = {'label': mx.nd.array(np.array(batch_label))}
else:
self._label = {'label': None}
|
python
|
def _get_batch(self):
"""
Load data/label from dataset
"""
batch_data = mx.nd.zeros((self.batch_size, 3, self._data_shape[0], self._data_shape[1]))
batch_label = []
for i in range(self.batch_size):
if (self._current + i) >= self._size:
if not self.is_train:
continue
# use padding from middle in each epoch
idx = (self._current + i + self._size // 2) % self._size
index = self._index[idx]
else:
index = self._index[self._current + i]
# index = self.debug_index
im_path = self._imdb.image_path_from_index(index)
with open(im_path, 'rb') as fp:
img_content = fp.read()
img = mx.img.imdecode(img_content)
gt = self._imdb.label_from_index(index).copy() if self.is_train else None
data, label = self._data_augmentation(img, gt)
batch_data[i] = data
if self.is_train:
batch_label.append(label)
self._data = {'data': batch_data}
if self.is_train:
self._label = {'label': mx.nd.array(np.array(batch_label))}
else:
self._label = {'label': None}
|
[
"def",
"_get_batch",
"(",
"self",
")",
":",
"batch_data",
"=",
"mx",
".",
"nd",
".",
"zeros",
"(",
"(",
"self",
".",
"batch_size",
",",
"3",
",",
"self",
".",
"_data_shape",
"[",
"0",
"]",
",",
"self",
".",
"_data_shape",
"[",
"1",
"]",
")",
")",
"batch_label",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"batch_size",
")",
":",
"if",
"(",
"self",
".",
"_current",
"+",
"i",
")",
">=",
"self",
".",
"_size",
":",
"if",
"not",
"self",
".",
"is_train",
":",
"continue",
"# use padding from middle in each epoch",
"idx",
"=",
"(",
"self",
".",
"_current",
"+",
"i",
"+",
"self",
".",
"_size",
"//",
"2",
")",
"%",
"self",
".",
"_size",
"index",
"=",
"self",
".",
"_index",
"[",
"idx",
"]",
"else",
":",
"index",
"=",
"self",
".",
"_index",
"[",
"self",
".",
"_current",
"+",
"i",
"]",
"# index = self.debug_index",
"im_path",
"=",
"self",
".",
"_imdb",
".",
"image_path_from_index",
"(",
"index",
")",
"with",
"open",
"(",
"im_path",
",",
"'rb'",
")",
"as",
"fp",
":",
"img_content",
"=",
"fp",
".",
"read",
"(",
")",
"img",
"=",
"mx",
".",
"img",
".",
"imdecode",
"(",
"img_content",
")",
"gt",
"=",
"self",
".",
"_imdb",
".",
"label_from_index",
"(",
"index",
")",
".",
"copy",
"(",
")",
"if",
"self",
".",
"is_train",
"else",
"None",
"data",
",",
"label",
"=",
"self",
".",
"_data_augmentation",
"(",
"img",
",",
"gt",
")",
"batch_data",
"[",
"i",
"]",
"=",
"data",
"if",
"self",
".",
"is_train",
":",
"batch_label",
".",
"append",
"(",
"label",
")",
"self",
".",
"_data",
"=",
"{",
"'data'",
":",
"batch_data",
"}",
"if",
"self",
".",
"is_train",
":",
"self",
".",
"_label",
"=",
"{",
"'label'",
":",
"mx",
".",
"nd",
".",
"array",
"(",
"np",
".",
"array",
"(",
"batch_label",
")",
")",
"}",
"else",
":",
"self",
".",
"_label",
"=",
"{",
"'label'",
":",
"None",
"}"
] |
Load data/label from dataset
|
[
"Load",
"data",
"/",
"label",
"from",
"dataset"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/dataset/iterator.py#L228-L257
|
train
|
apache/incubator-mxnet
|
example/ssd/dataset/iterator.py
|
DetIter._data_augmentation
|
def _data_augmentation(self, data, label):
"""
perform data augmentations: crop, mirror, resize, sub mean, swap channels...
"""
if self.is_train and self._rand_samplers:
rand_crops = []
for rs in self._rand_samplers:
rand_crops += rs.sample(label)
num_rand_crops = len(rand_crops)
# randomly pick up one as input data
if num_rand_crops > 0:
index = int(np.random.uniform(0, 1) * num_rand_crops)
width = data.shape[1]
height = data.shape[0]
crop = rand_crops[index][0]
xmin = int(crop[0] * width)
ymin = int(crop[1] * height)
xmax = int(crop[2] * width)
ymax = int(crop[3] * height)
if xmin >= 0 and ymin >= 0 and xmax <= width and ymax <= height:
data = mx.img.fixed_crop(data, xmin, ymin, xmax-xmin, ymax-ymin)
else:
# padding mode
new_width = xmax - xmin
new_height = ymax - ymin
offset_x = 0 - xmin
offset_y = 0 - ymin
data_bak = data
data = mx.nd.full((new_height, new_width, 3), 128, dtype='uint8')
data[offset_y:offset_y+height, offset_x:offset_x + width, :] = data_bak
label = rand_crops[index][1]
if self.is_train:
interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, \
cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
else:
interp_methods = [cv2.INTER_LINEAR]
interp_method = interp_methods[int(np.random.uniform(0, 1) * len(interp_methods))]
data = mx.img.imresize(data, self._data_shape[1], self._data_shape[0], interp_method)
if self.is_train and self._rand_mirror:
if np.random.uniform(0, 1) > 0.5:
data = mx.nd.flip(data, axis=1)
valid_mask = np.where(label[:, 0] > -1)[0]
tmp = 1.0 - label[valid_mask, 1]
label[valid_mask, 1] = 1.0 - label[valid_mask, 3]
label[valid_mask, 3] = tmp
data = mx.nd.transpose(data, (2,0,1))
data = data.astype('float32')
data = data - self._mean_pixels
return data, label
|
python
|
def _data_augmentation(self, data, label):
"""
perform data augmentations: crop, mirror, resize, sub mean, swap channels...
"""
if self.is_train and self._rand_samplers:
rand_crops = []
for rs in self._rand_samplers:
rand_crops += rs.sample(label)
num_rand_crops = len(rand_crops)
# randomly pick up one as input data
if num_rand_crops > 0:
index = int(np.random.uniform(0, 1) * num_rand_crops)
width = data.shape[1]
height = data.shape[0]
crop = rand_crops[index][0]
xmin = int(crop[0] * width)
ymin = int(crop[1] * height)
xmax = int(crop[2] * width)
ymax = int(crop[3] * height)
if xmin >= 0 and ymin >= 0 and xmax <= width and ymax <= height:
data = mx.img.fixed_crop(data, xmin, ymin, xmax-xmin, ymax-ymin)
else:
# padding mode
new_width = xmax - xmin
new_height = ymax - ymin
offset_x = 0 - xmin
offset_y = 0 - ymin
data_bak = data
data = mx.nd.full((new_height, new_width, 3), 128, dtype='uint8')
data[offset_y:offset_y+height, offset_x:offset_x + width, :] = data_bak
label = rand_crops[index][1]
if self.is_train:
interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, \
cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
else:
interp_methods = [cv2.INTER_LINEAR]
interp_method = interp_methods[int(np.random.uniform(0, 1) * len(interp_methods))]
data = mx.img.imresize(data, self._data_shape[1], self._data_shape[0], interp_method)
if self.is_train and self._rand_mirror:
if np.random.uniform(0, 1) > 0.5:
data = mx.nd.flip(data, axis=1)
valid_mask = np.where(label[:, 0] > -1)[0]
tmp = 1.0 - label[valid_mask, 1]
label[valid_mask, 1] = 1.0 - label[valid_mask, 3]
label[valid_mask, 3] = tmp
data = mx.nd.transpose(data, (2,0,1))
data = data.astype('float32')
data = data - self._mean_pixels
return data, label
|
[
"def",
"_data_augmentation",
"(",
"self",
",",
"data",
",",
"label",
")",
":",
"if",
"self",
".",
"is_train",
"and",
"self",
".",
"_rand_samplers",
":",
"rand_crops",
"=",
"[",
"]",
"for",
"rs",
"in",
"self",
".",
"_rand_samplers",
":",
"rand_crops",
"+=",
"rs",
".",
"sample",
"(",
"label",
")",
"num_rand_crops",
"=",
"len",
"(",
"rand_crops",
")",
"# randomly pick up one as input data",
"if",
"num_rand_crops",
">",
"0",
":",
"index",
"=",
"int",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"1",
")",
"*",
"num_rand_crops",
")",
"width",
"=",
"data",
".",
"shape",
"[",
"1",
"]",
"height",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"crop",
"=",
"rand_crops",
"[",
"index",
"]",
"[",
"0",
"]",
"xmin",
"=",
"int",
"(",
"crop",
"[",
"0",
"]",
"*",
"width",
")",
"ymin",
"=",
"int",
"(",
"crop",
"[",
"1",
"]",
"*",
"height",
")",
"xmax",
"=",
"int",
"(",
"crop",
"[",
"2",
"]",
"*",
"width",
")",
"ymax",
"=",
"int",
"(",
"crop",
"[",
"3",
"]",
"*",
"height",
")",
"if",
"xmin",
">=",
"0",
"and",
"ymin",
">=",
"0",
"and",
"xmax",
"<=",
"width",
"and",
"ymax",
"<=",
"height",
":",
"data",
"=",
"mx",
".",
"img",
".",
"fixed_crop",
"(",
"data",
",",
"xmin",
",",
"ymin",
",",
"xmax",
"-",
"xmin",
",",
"ymax",
"-",
"ymin",
")",
"else",
":",
"# padding mode",
"new_width",
"=",
"xmax",
"-",
"xmin",
"new_height",
"=",
"ymax",
"-",
"ymin",
"offset_x",
"=",
"0",
"-",
"xmin",
"offset_y",
"=",
"0",
"-",
"ymin",
"data_bak",
"=",
"data",
"data",
"=",
"mx",
".",
"nd",
".",
"full",
"(",
"(",
"new_height",
",",
"new_width",
",",
"3",
")",
",",
"128",
",",
"dtype",
"=",
"'uint8'",
")",
"data",
"[",
"offset_y",
":",
"offset_y",
"+",
"height",
",",
"offset_x",
":",
"offset_x",
"+",
"width",
",",
":",
"]",
"=",
"data_bak",
"label",
"=",
"rand_crops",
"[",
"index",
"]",
"[",
"1",
"]",
"if",
"self",
".",
"is_train",
":",
"interp_methods",
"=",
"[",
"cv2",
".",
"INTER_LINEAR",
",",
"cv2",
".",
"INTER_CUBIC",
",",
"cv2",
".",
"INTER_AREA",
",",
"cv2",
".",
"INTER_NEAREST",
",",
"cv2",
".",
"INTER_LANCZOS4",
"]",
"else",
":",
"interp_methods",
"=",
"[",
"cv2",
".",
"INTER_LINEAR",
"]",
"interp_method",
"=",
"interp_methods",
"[",
"int",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"1",
")",
"*",
"len",
"(",
"interp_methods",
")",
")",
"]",
"data",
"=",
"mx",
".",
"img",
".",
"imresize",
"(",
"data",
",",
"self",
".",
"_data_shape",
"[",
"1",
"]",
",",
"self",
".",
"_data_shape",
"[",
"0",
"]",
",",
"interp_method",
")",
"if",
"self",
".",
"is_train",
"and",
"self",
".",
"_rand_mirror",
":",
"if",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"1",
")",
">",
"0.5",
":",
"data",
"=",
"mx",
".",
"nd",
".",
"flip",
"(",
"data",
",",
"axis",
"=",
"1",
")",
"valid_mask",
"=",
"np",
".",
"where",
"(",
"label",
"[",
":",
",",
"0",
"]",
">",
"-",
"1",
")",
"[",
"0",
"]",
"tmp",
"=",
"1.0",
"-",
"label",
"[",
"valid_mask",
",",
"1",
"]",
"label",
"[",
"valid_mask",
",",
"1",
"]",
"=",
"1.0",
"-",
"label",
"[",
"valid_mask",
",",
"3",
"]",
"label",
"[",
"valid_mask",
",",
"3",
"]",
"=",
"tmp",
"data",
"=",
"mx",
".",
"nd",
".",
"transpose",
"(",
"data",
",",
"(",
"2",
",",
"0",
",",
"1",
")",
")",
"data",
"=",
"data",
".",
"astype",
"(",
"'float32'",
")",
"data",
"=",
"data",
"-",
"self",
".",
"_mean_pixels",
"return",
"data",
",",
"label"
] |
perform data augmentations: crop, mirror, resize, sub mean, swap channels...
|
[
"perform",
"data",
"augmentations",
":",
"crop",
"mirror",
"resize",
"sub",
"mean",
"swap",
"channels",
"..."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/dataset/iterator.py#L259-L307
|
train
|
apache/incubator-mxnet
|
example/deep-embedded-clustering/data.py
|
get_mnist
|
def get_mnist():
""" Gets MNIST dataset """
np.random.seed(1234) # set seed for deterministic ordering
mnist_data = mx.test_utils.get_mnist()
X = np.concatenate([mnist_data['train_data'], mnist_data['test_data']])
Y = np.concatenate([mnist_data['train_label'], mnist_data['test_label']])
p = np.random.permutation(X.shape[0])
X = X[p].reshape((X.shape[0], -1)).astype(np.float32)*5
Y = Y[p]
return X, Y
|
python
|
def get_mnist():
""" Gets MNIST dataset """
np.random.seed(1234) # set seed for deterministic ordering
mnist_data = mx.test_utils.get_mnist()
X = np.concatenate([mnist_data['train_data'], mnist_data['test_data']])
Y = np.concatenate([mnist_data['train_label'], mnist_data['test_label']])
p = np.random.permutation(X.shape[0])
X = X[p].reshape((X.shape[0], -1)).astype(np.float32)*5
Y = Y[p]
return X, Y
|
[
"def",
"get_mnist",
"(",
")",
":",
"np",
".",
"random",
".",
"seed",
"(",
"1234",
")",
"# set seed for deterministic ordering",
"mnist_data",
"=",
"mx",
".",
"test_utils",
".",
"get_mnist",
"(",
")",
"X",
"=",
"np",
".",
"concatenate",
"(",
"[",
"mnist_data",
"[",
"'train_data'",
"]",
",",
"mnist_data",
"[",
"'test_data'",
"]",
"]",
")",
"Y",
"=",
"np",
".",
"concatenate",
"(",
"[",
"mnist_data",
"[",
"'train_label'",
"]",
",",
"mnist_data",
"[",
"'test_label'",
"]",
"]",
")",
"p",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
")",
"X",
"=",
"X",
"[",
"p",
"]",
".",
"reshape",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"-",
"1",
")",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"*",
"5",
"Y",
"=",
"Y",
"[",
"p",
"]",
"return",
"X",
",",
"Y"
] |
Gets MNIST dataset
|
[
"Gets",
"MNIST",
"dataset"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/deep-embedded-clustering/data.py#L25-L35
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor_manager.py
|
_split_input_slice
|
def _split_input_slice(batch_size, work_load_list):
"""Get input slice from the input shape.
Parameters
----------
batch_size : int
The number of samples in a mini-batch.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Returns
-------
slices : list of slice
The split slices to get a specific slice.
Raises
------
ValueError
In case of too many splits, leading to some empty slices.
"""
total_work_load = sum(work_load_list)
batch_num_list = [round(work_load * batch_size / total_work_load)
for work_load in work_load_list]
batch_num_sum = sum(batch_num_list)
if batch_num_sum < batch_size:
batch_num_list[-1] += batch_size - batch_num_sum
slices = []
end = 0
for batch_num in batch_num_list:
begin = int(min((end, batch_size)))
end = int(min((begin + batch_num, batch_size)))
if begin >= end:
raise ValueError('Too many slices. Some splits are empty.')
slices.append(slice(begin, end))
return slices
|
python
|
def _split_input_slice(batch_size, work_load_list):
"""Get input slice from the input shape.
Parameters
----------
batch_size : int
The number of samples in a mini-batch.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Returns
-------
slices : list of slice
The split slices to get a specific slice.
Raises
------
ValueError
In case of too many splits, leading to some empty slices.
"""
total_work_load = sum(work_load_list)
batch_num_list = [round(work_load * batch_size / total_work_load)
for work_load in work_load_list]
batch_num_sum = sum(batch_num_list)
if batch_num_sum < batch_size:
batch_num_list[-1] += batch_size - batch_num_sum
slices = []
end = 0
for batch_num in batch_num_list:
begin = int(min((end, batch_size)))
end = int(min((begin + batch_num, batch_size)))
if begin >= end:
raise ValueError('Too many slices. Some splits are empty.')
slices.append(slice(begin, end))
return slices
|
[
"def",
"_split_input_slice",
"(",
"batch_size",
",",
"work_load_list",
")",
":",
"total_work_load",
"=",
"sum",
"(",
"work_load_list",
")",
"batch_num_list",
"=",
"[",
"round",
"(",
"work_load",
"*",
"batch_size",
"/",
"total_work_load",
")",
"for",
"work_load",
"in",
"work_load_list",
"]",
"batch_num_sum",
"=",
"sum",
"(",
"batch_num_list",
")",
"if",
"batch_num_sum",
"<",
"batch_size",
":",
"batch_num_list",
"[",
"-",
"1",
"]",
"+=",
"batch_size",
"-",
"batch_num_sum",
"slices",
"=",
"[",
"]",
"end",
"=",
"0",
"for",
"batch_num",
"in",
"batch_num_list",
":",
"begin",
"=",
"int",
"(",
"min",
"(",
"(",
"end",
",",
"batch_size",
")",
")",
")",
"end",
"=",
"int",
"(",
"min",
"(",
"(",
"begin",
"+",
"batch_num",
",",
"batch_size",
")",
")",
")",
"if",
"begin",
">=",
"end",
":",
"raise",
"ValueError",
"(",
"'Too many slices. Some splits are empty.'",
")",
"slices",
".",
"append",
"(",
"slice",
"(",
"begin",
",",
"end",
")",
")",
"return",
"slices"
] |
Get input slice from the input shape.
Parameters
----------
batch_size : int
The number of samples in a mini-batch.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Returns
-------
slices : list of slice
The split slices to get a specific slice.
Raises
------
ValueError
In case of too many splits, leading to some empty slices.
|
[
"Get",
"input",
"slice",
"from",
"the",
"input",
"shape",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L31-L66
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor_manager.py
|
_check_arguments
|
def _check_arguments(symbol):
"""Check the argument names of symbol.
This function checks the duplication of arguments in Symbol.
The check is done for feedforward net for now.
Parameters
----------
symbol : Symbol
The network configuration.
"""
arg_set = set()
arg_names = symbol.list_arguments()
for name in arg_names:
if name in arg_set:
raise ValueError(('Find duplicated argument name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s') % (name, str(arg_names)))
arg_set.add(name)
aux_set = set()
aux_names = symbol.list_auxiliary_states()
for name in aux_names:
if name in aux_set:
raise ValueError(
('Find duplicated auxiliary param name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s, auxiliary params are %s'
) % (name, str(arg_names), str(aux_names)))
aux_set.add(name)
|
python
|
def _check_arguments(symbol):
"""Check the argument names of symbol.
This function checks the duplication of arguments in Symbol.
The check is done for feedforward net for now.
Parameters
----------
symbol : Symbol
The network configuration.
"""
arg_set = set()
arg_names = symbol.list_arguments()
for name in arg_names:
if name in arg_set:
raise ValueError(('Find duplicated argument name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s') % (name, str(arg_names)))
arg_set.add(name)
aux_set = set()
aux_names = symbol.list_auxiliary_states()
for name in aux_names:
if name in aux_set:
raise ValueError(
('Find duplicated auxiliary param name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s, auxiliary params are %s'
) % (name, str(arg_names), str(aux_names)))
aux_set.add(name)
|
[
"def",
"_check_arguments",
"(",
"symbol",
")",
":",
"arg_set",
"=",
"set",
"(",
")",
"arg_names",
"=",
"symbol",
".",
"list_arguments",
"(",
")",
"for",
"name",
"in",
"arg_names",
":",
"if",
"name",
"in",
"arg_set",
":",
"raise",
"ValueError",
"(",
"(",
"'Find duplicated argument name \\\"%s\\\", '",
"+",
"'please make the weight name non-duplicated(using name arguments), '",
"+",
"'arguments are %s'",
")",
"%",
"(",
"name",
",",
"str",
"(",
"arg_names",
")",
")",
")",
"arg_set",
".",
"add",
"(",
"name",
")",
"aux_set",
"=",
"set",
"(",
")",
"aux_names",
"=",
"symbol",
".",
"list_auxiliary_states",
"(",
")",
"for",
"name",
"in",
"aux_names",
":",
"if",
"name",
"in",
"aux_set",
":",
"raise",
"ValueError",
"(",
"(",
"'Find duplicated auxiliary param name \\\"%s\\\", '",
"+",
"'please make the weight name non-duplicated(using name arguments), '",
"+",
"'arguments are %s, auxiliary params are %s'",
")",
"%",
"(",
"name",
",",
"str",
"(",
"arg_names",
")",
",",
"str",
"(",
"aux_names",
")",
")",
")",
"aux_set",
".",
"add",
"(",
"name",
")"
] |
Check the argument names of symbol.
This function checks the duplication of arguments in Symbol.
The check is done for feedforward net for now.
Parameters
----------
symbol : Symbol
The network configuration.
|
[
"Check",
"the",
"argument",
"names",
"of",
"symbol",
".",
"This",
"function",
"checks",
"the",
"duplication",
"of",
"arguments",
"in",
"Symbol",
".",
"The",
"check",
"is",
"done",
"for",
"feedforward",
"net",
"for",
"now",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L68-L96
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor_manager.py
|
_load_general
|
def _load_general(data, targets):
"""Load a list of arrays into a list of arrays specified by slices."""
for d_src, d_targets in zip(data, targets):
if isinstance(d_targets, nd.NDArray):
d_src.copyto(d_targets)
else:
assert d_targets[-1][0].stop == d_src.shape[0], \
"Batch size miss match. Expected %d, got %d"%( \
d_targets[-1][0].stop, d_src.shape[0])
for slice_idx, d_dst in d_targets:
d_src[slice_idx].copyto(d_dst)
|
python
|
def _load_general(data, targets):
"""Load a list of arrays into a list of arrays specified by slices."""
for d_src, d_targets in zip(data, targets):
if isinstance(d_targets, nd.NDArray):
d_src.copyto(d_targets)
else:
assert d_targets[-1][0].stop == d_src.shape[0], \
"Batch size miss match. Expected %d, got %d"%( \
d_targets[-1][0].stop, d_src.shape[0])
for slice_idx, d_dst in d_targets:
d_src[slice_idx].copyto(d_dst)
|
[
"def",
"_load_general",
"(",
"data",
",",
"targets",
")",
":",
"for",
"d_src",
",",
"d_targets",
"in",
"zip",
"(",
"data",
",",
"targets",
")",
":",
"if",
"isinstance",
"(",
"d_targets",
",",
"nd",
".",
"NDArray",
")",
":",
"d_src",
".",
"copyto",
"(",
"d_targets",
")",
"else",
":",
"assert",
"d_targets",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
".",
"stop",
"==",
"d_src",
".",
"shape",
"[",
"0",
"]",
",",
"\"Batch size miss match. Expected %d, got %d\"",
"%",
"(",
"d_targets",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
".",
"stop",
",",
"d_src",
".",
"shape",
"[",
"0",
"]",
")",
"for",
"slice_idx",
",",
"d_dst",
"in",
"d_targets",
":",
"d_src",
"[",
"slice_idx",
"]",
".",
"copyto",
"(",
"d_dst",
")"
] |
Load a list of arrays into a list of arrays specified by slices.
|
[
"Load",
"a",
"list",
"of",
"arrays",
"into",
"a",
"list",
"of",
"arrays",
"specified",
"by",
"slices",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L98-L108
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor_manager.py
|
_bind_exec
|
def _bind_exec(sym, ctx, input_shapes, param_names, need_grad=False,
base_exec=None, shared_data_arrays=None, input_types=None, logger=logging):
"""bind executor for bucketing, potentially sharing data with an existing executor."""
arg_shape, _, aux_shape = sym.infer_shape(**input_shapes)
assert(arg_shape is not None)
if input_types is None:
input_types = {k: mx_real_t for k in input_shapes.keys()}
arg_types, _, aux_types = sym.infer_type(**input_types)
assert(arg_types is not None)
arg_arrays = []
grad_arrays = {} if need_grad is not False else None
arg_names = sym.list_arguments()
if need_grad is False:
need_grad = set()
elif need_grad is True:
need_grad = set(arg_names) - set(input_shapes.keys())
elif isinstance(need_grad, set):
pass
else:
raise AssertionError("need_grad must be boolean or set.")
grad_req = {name:('write' if name in need_grad else 'null') for name in arg_names}
# create or borrow arguments and gradients
for i, name in enumerate(arg_names):
if not name in param_names:
# data or label
if shared_data_arrays is not None and \
name in shared_data_arrays:
arg_arr = shared_data_arrays[name]
if np.prod(arg_arr.shape) >= np.prod(arg_shape[i]):
# good, we can share this memory
assert(arg_types[i] == arg_arr.dtype)
arg_arr = arg_arr.reshape(arg_shape[i])
else:
logger.warning(('bucketing: data "%s" has a shape %s' % (name, arg_shape[i])) +
(', which is larger than already allocated ') +
('shape %s' % (arg_arr.shape,)) +
('. Need to re-allocate. Consider putting ') +
('default_bucket_key to be the bucket taking the largest ') +
('input for better memory sharing.'))
arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
# replace existing shared array because the new one is bigger
shared_data_arrays[name] = arg_arr
else:
arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
if shared_data_arrays is not None:
shared_data_arrays[name] = arg_arr
arg_arrays.append(arg_arr)
else:
# model parameter
if base_exec is None:
arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
if name in need_grad:
grad_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
grad_arrays[name] = grad_arr
else:
arg_arr = base_exec.arg_dict[name]
assert arg_arr.shape == arg_shape[i]
assert arg_arr.dtype == arg_types[i]
if name in need_grad:
grad_arrays[name] = base_exec.grad_dict[name]
arg_arrays.append(arg_arr)
# create or borrow aux variables
if base_exec is None:
aux_arrays = [nd.zeros(s, ctx, dtype=t) for s, t in zip(aux_shape, aux_types)]
else:
for i, a in enumerate(base_exec.aux_arrays):
assert aux_shape[i] == a.shape
assert aux_types[i] == a.dtype
aux_arrays = [a for a in base_exec.aux_arrays]
executor = sym.bind(ctx=ctx, args=arg_arrays, args_grad=grad_arrays,
aux_states=aux_arrays,
grad_req=grad_req, shared_exec=base_exec)
return executor
|
python
|
def _bind_exec(sym, ctx, input_shapes, param_names, need_grad=False,
base_exec=None, shared_data_arrays=None, input_types=None, logger=logging):
"""bind executor for bucketing, potentially sharing data with an existing executor."""
arg_shape, _, aux_shape = sym.infer_shape(**input_shapes)
assert(arg_shape is not None)
if input_types is None:
input_types = {k: mx_real_t for k in input_shapes.keys()}
arg_types, _, aux_types = sym.infer_type(**input_types)
assert(arg_types is not None)
arg_arrays = []
grad_arrays = {} if need_grad is not False else None
arg_names = sym.list_arguments()
if need_grad is False:
need_grad = set()
elif need_grad is True:
need_grad = set(arg_names) - set(input_shapes.keys())
elif isinstance(need_grad, set):
pass
else:
raise AssertionError("need_grad must be boolean or set.")
grad_req = {name:('write' if name in need_grad else 'null') for name in arg_names}
# create or borrow arguments and gradients
for i, name in enumerate(arg_names):
if not name in param_names:
# data or label
if shared_data_arrays is not None and \
name in shared_data_arrays:
arg_arr = shared_data_arrays[name]
if np.prod(arg_arr.shape) >= np.prod(arg_shape[i]):
# good, we can share this memory
assert(arg_types[i] == arg_arr.dtype)
arg_arr = arg_arr.reshape(arg_shape[i])
else:
logger.warning(('bucketing: data "%s" has a shape %s' % (name, arg_shape[i])) +
(', which is larger than already allocated ') +
('shape %s' % (arg_arr.shape,)) +
('. Need to re-allocate. Consider putting ') +
('default_bucket_key to be the bucket taking the largest ') +
('input for better memory sharing.'))
arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
# replace existing shared array because the new one is bigger
shared_data_arrays[name] = arg_arr
else:
arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
if shared_data_arrays is not None:
shared_data_arrays[name] = arg_arr
arg_arrays.append(arg_arr)
else:
# model parameter
if base_exec is None:
arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
if name in need_grad:
grad_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
grad_arrays[name] = grad_arr
else:
arg_arr = base_exec.arg_dict[name]
assert arg_arr.shape == arg_shape[i]
assert arg_arr.dtype == arg_types[i]
if name in need_grad:
grad_arrays[name] = base_exec.grad_dict[name]
arg_arrays.append(arg_arr)
# create or borrow aux variables
if base_exec is None:
aux_arrays = [nd.zeros(s, ctx, dtype=t) for s, t in zip(aux_shape, aux_types)]
else:
for i, a in enumerate(base_exec.aux_arrays):
assert aux_shape[i] == a.shape
assert aux_types[i] == a.dtype
aux_arrays = [a for a in base_exec.aux_arrays]
executor = sym.bind(ctx=ctx, args=arg_arrays, args_grad=grad_arrays,
aux_states=aux_arrays,
grad_req=grad_req, shared_exec=base_exec)
return executor
|
[
"def",
"_bind_exec",
"(",
"sym",
",",
"ctx",
",",
"input_shapes",
",",
"param_names",
",",
"need_grad",
"=",
"False",
",",
"base_exec",
"=",
"None",
",",
"shared_data_arrays",
"=",
"None",
",",
"input_types",
"=",
"None",
",",
"logger",
"=",
"logging",
")",
":",
"arg_shape",
",",
"_",
",",
"aux_shape",
"=",
"sym",
".",
"infer_shape",
"(",
"*",
"*",
"input_shapes",
")",
"assert",
"(",
"arg_shape",
"is",
"not",
"None",
")",
"if",
"input_types",
"is",
"None",
":",
"input_types",
"=",
"{",
"k",
":",
"mx_real_t",
"for",
"k",
"in",
"input_shapes",
".",
"keys",
"(",
")",
"}",
"arg_types",
",",
"_",
",",
"aux_types",
"=",
"sym",
".",
"infer_type",
"(",
"*",
"*",
"input_types",
")",
"assert",
"(",
"arg_types",
"is",
"not",
"None",
")",
"arg_arrays",
"=",
"[",
"]",
"grad_arrays",
"=",
"{",
"}",
"if",
"need_grad",
"is",
"not",
"False",
"else",
"None",
"arg_names",
"=",
"sym",
".",
"list_arguments",
"(",
")",
"if",
"need_grad",
"is",
"False",
":",
"need_grad",
"=",
"set",
"(",
")",
"elif",
"need_grad",
"is",
"True",
":",
"need_grad",
"=",
"set",
"(",
"arg_names",
")",
"-",
"set",
"(",
"input_shapes",
".",
"keys",
"(",
")",
")",
"elif",
"isinstance",
"(",
"need_grad",
",",
"set",
")",
":",
"pass",
"else",
":",
"raise",
"AssertionError",
"(",
"\"need_grad must be boolean or set.\"",
")",
"grad_req",
"=",
"{",
"name",
":",
"(",
"'write'",
"if",
"name",
"in",
"need_grad",
"else",
"'null'",
")",
"for",
"name",
"in",
"arg_names",
"}",
"# create or borrow arguments and gradients",
"for",
"i",
",",
"name",
"in",
"enumerate",
"(",
"arg_names",
")",
":",
"if",
"not",
"name",
"in",
"param_names",
":",
"# data or label",
"if",
"shared_data_arrays",
"is",
"not",
"None",
"and",
"name",
"in",
"shared_data_arrays",
":",
"arg_arr",
"=",
"shared_data_arrays",
"[",
"name",
"]",
"if",
"np",
".",
"prod",
"(",
"arg_arr",
".",
"shape",
")",
">=",
"np",
".",
"prod",
"(",
"arg_shape",
"[",
"i",
"]",
")",
":",
"# good, we can share this memory",
"assert",
"(",
"arg_types",
"[",
"i",
"]",
"==",
"arg_arr",
".",
"dtype",
")",
"arg_arr",
"=",
"arg_arr",
".",
"reshape",
"(",
"arg_shape",
"[",
"i",
"]",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"(",
"'bucketing: data \"%s\" has a shape %s'",
"%",
"(",
"name",
",",
"arg_shape",
"[",
"i",
"]",
")",
")",
"+",
"(",
"', which is larger than already allocated '",
")",
"+",
"(",
"'shape %s'",
"%",
"(",
"arg_arr",
".",
"shape",
",",
")",
")",
"+",
"(",
"'. Need to re-allocate. Consider putting '",
")",
"+",
"(",
"'default_bucket_key to be the bucket taking the largest '",
")",
"+",
"(",
"'input for better memory sharing.'",
")",
")",
"arg_arr",
"=",
"nd",
".",
"zeros",
"(",
"arg_shape",
"[",
"i",
"]",
",",
"ctx",
",",
"dtype",
"=",
"arg_types",
"[",
"i",
"]",
")",
"# replace existing shared array because the new one is bigger",
"shared_data_arrays",
"[",
"name",
"]",
"=",
"arg_arr",
"else",
":",
"arg_arr",
"=",
"nd",
".",
"zeros",
"(",
"arg_shape",
"[",
"i",
"]",
",",
"ctx",
",",
"dtype",
"=",
"arg_types",
"[",
"i",
"]",
")",
"if",
"shared_data_arrays",
"is",
"not",
"None",
":",
"shared_data_arrays",
"[",
"name",
"]",
"=",
"arg_arr",
"arg_arrays",
".",
"append",
"(",
"arg_arr",
")",
"else",
":",
"# model parameter",
"if",
"base_exec",
"is",
"None",
":",
"arg_arr",
"=",
"nd",
".",
"zeros",
"(",
"arg_shape",
"[",
"i",
"]",
",",
"ctx",
",",
"dtype",
"=",
"arg_types",
"[",
"i",
"]",
")",
"if",
"name",
"in",
"need_grad",
":",
"grad_arr",
"=",
"nd",
".",
"zeros",
"(",
"arg_shape",
"[",
"i",
"]",
",",
"ctx",
",",
"dtype",
"=",
"arg_types",
"[",
"i",
"]",
")",
"grad_arrays",
"[",
"name",
"]",
"=",
"grad_arr",
"else",
":",
"arg_arr",
"=",
"base_exec",
".",
"arg_dict",
"[",
"name",
"]",
"assert",
"arg_arr",
".",
"shape",
"==",
"arg_shape",
"[",
"i",
"]",
"assert",
"arg_arr",
".",
"dtype",
"==",
"arg_types",
"[",
"i",
"]",
"if",
"name",
"in",
"need_grad",
":",
"grad_arrays",
"[",
"name",
"]",
"=",
"base_exec",
".",
"grad_dict",
"[",
"name",
"]",
"arg_arrays",
".",
"append",
"(",
"arg_arr",
")",
"# create or borrow aux variables",
"if",
"base_exec",
"is",
"None",
":",
"aux_arrays",
"=",
"[",
"nd",
".",
"zeros",
"(",
"s",
",",
"ctx",
",",
"dtype",
"=",
"t",
")",
"for",
"s",
",",
"t",
"in",
"zip",
"(",
"aux_shape",
",",
"aux_types",
")",
"]",
"else",
":",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"base_exec",
".",
"aux_arrays",
")",
":",
"assert",
"aux_shape",
"[",
"i",
"]",
"==",
"a",
".",
"shape",
"assert",
"aux_types",
"[",
"i",
"]",
"==",
"a",
".",
"dtype",
"aux_arrays",
"=",
"[",
"a",
"for",
"a",
"in",
"base_exec",
".",
"aux_arrays",
"]",
"executor",
"=",
"sym",
".",
"bind",
"(",
"ctx",
"=",
"ctx",
",",
"args",
"=",
"arg_arrays",
",",
"args_grad",
"=",
"grad_arrays",
",",
"aux_states",
"=",
"aux_arrays",
",",
"grad_req",
"=",
"grad_req",
",",
"shared_exec",
"=",
"base_exec",
")",
"return",
"executor"
] |
bind executor for bucketing, potentially sharing data with an existing executor.
|
[
"bind",
"executor",
"for",
"bucketing",
"potentially",
"sharing",
"data",
"with",
"an",
"existing",
"executor",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L119-L202
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor_manager.py
|
DataParallelExecutorGroup.load_data_batch
|
def load_data_batch(self, data_batch):
"""Load data and labels into arrays."""
_load_data(data_batch, self.data_arrays)
_load_label(data_batch, self.label_arrays)
|
python
|
def load_data_batch(self, data_batch):
"""Load data and labels into arrays."""
_load_data(data_batch, self.data_arrays)
_load_label(data_batch, self.label_arrays)
|
[
"def",
"load_data_batch",
"(",
"self",
",",
"data_batch",
")",
":",
"_load_data",
"(",
"data_batch",
",",
"self",
".",
"data_arrays",
")",
"_load_label",
"(",
"data_batch",
",",
"self",
".",
"label_arrays",
")"
] |
Load data and labels into arrays.
|
[
"Load",
"data",
"and",
"labels",
"into",
"arrays",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L274-L277
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor_manager.py
|
DataParallelExecutorGroup.forward
|
def forward(self, is_train=False):
"""Perform a forward pass on each executor."""
for texec in self.train_execs:
texec.forward(is_train=is_train)
|
python
|
def forward(self, is_train=False):
"""Perform a forward pass on each executor."""
for texec in self.train_execs:
texec.forward(is_train=is_train)
|
[
"def",
"forward",
"(",
"self",
",",
"is_train",
"=",
"False",
")",
":",
"for",
"texec",
"in",
"self",
".",
"train_execs",
":",
"texec",
".",
"forward",
"(",
"is_train",
"=",
"is_train",
")"
] |
Perform a forward pass on each executor.
|
[
"Perform",
"a",
"forward",
"pass",
"on",
"each",
"executor",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L279-L282
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor_manager.py
|
DataParallelExecutorGroup.update_metric
|
def update_metric(self, metric, labels, pre_sliced=False):
"""Update evaluation metric with label and current outputs."""
for current_exec, (texec, islice) in enumerate(zip(self.train_execs, self.slices)):
if not pre_sliced:
labels_slice = [label[islice] for label in labels]
else:
labels_slice = labels[current_exec]
metric.update(labels_slice, texec.outputs)
|
python
|
def update_metric(self, metric, labels, pre_sliced=False):
"""Update evaluation metric with label and current outputs."""
for current_exec, (texec, islice) in enumerate(zip(self.train_execs, self.slices)):
if not pre_sliced:
labels_slice = [label[islice] for label in labels]
else:
labels_slice = labels[current_exec]
metric.update(labels_slice, texec.outputs)
|
[
"def",
"update_metric",
"(",
"self",
",",
"metric",
",",
"labels",
",",
"pre_sliced",
"=",
"False",
")",
":",
"for",
"current_exec",
",",
"(",
"texec",
",",
"islice",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"self",
".",
"train_execs",
",",
"self",
".",
"slices",
")",
")",
":",
"if",
"not",
"pre_sliced",
":",
"labels_slice",
"=",
"[",
"label",
"[",
"islice",
"]",
"for",
"label",
"in",
"labels",
"]",
"else",
":",
"labels_slice",
"=",
"labels",
"[",
"current_exec",
"]",
"metric",
".",
"update",
"(",
"labels_slice",
",",
"texec",
".",
"outputs",
")"
] |
Update evaluation metric with label and current outputs.
|
[
"Update",
"evaluation",
"metric",
"with",
"label",
"and",
"current",
"outputs",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L289-L296
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor_manager.py
|
DataParallelExecutorManager.install_monitor
|
def install_monitor(self, monitor):
"""Install monitor on all executors."""
if self.sym_gen is not None:
raise NotImplementedError("Monitoring is not implemented for bucketing")
for train_exec in self.execgrp.train_execs:
monitor.install(train_exec)
|
python
|
def install_monitor(self, monitor):
"""Install monitor on all executors."""
if self.sym_gen is not None:
raise NotImplementedError("Monitoring is not implemented for bucketing")
for train_exec in self.execgrp.train_execs:
monitor.install(train_exec)
|
[
"def",
"install_monitor",
"(",
"self",
",",
"monitor",
")",
":",
"if",
"self",
".",
"sym_gen",
"is",
"not",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"Monitoring is not implemented for bucketing\"",
")",
"for",
"train_exec",
"in",
"self",
".",
"execgrp",
".",
"train_execs",
":",
"monitor",
".",
"install",
"(",
"train_exec",
")"
] |
Install monitor on all executors.
|
[
"Install",
"monitor",
"on",
"all",
"executors",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L355-L361
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor_manager.py
|
DataParallelExecutorManager.set_params
|
def set_params(self, arg_params, aux_params):
"""Set parameter and aux values.
Parameters
----------
arg_params : list of NDArray
Source parameter arrays
aux_params : list of NDArray
Source aux arrays.
"""
for texec in self.execgrp.train_execs:
texec.copy_params_from(arg_params, aux_params)
|
python
|
def set_params(self, arg_params, aux_params):
"""Set parameter and aux values.
Parameters
----------
arg_params : list of NDArray
Source parameter arrays
aux_params : list of NDArray
Source aux arrays.
"""
for texec in self.execgrp.train_execs:
texec.copy_params_from(arg_params, aux_params)
|
[
"def",
"set_params",
"(",
"self",
",",
"arg_params",
",",
"aux_params",
")",
":",
"for",
"texec",
"in",
"self",
".",
"execgrp",
".",
"train_execs",
":",
"texec",
".",
"copy_params_from",
"(",
"arg_params",
",",
"aux_params",
")"
] |
Set parameter and aux values.
Parameters
----------
arg_params : list of NDArray
Source parameter arrays
aux_params : list of NDArray
Source aux arrays.
|
[
"Set",
"parameter",
"and",
"aux",
"values",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L363-L375
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor_manager.py
|
DataParallelExecutorManager.load_data_batch
|
def load_data_batch(self, data_batch):
"""Load data and labels into arrays."""
if self.sym_gen is not None:
key = data_batch.bucket_key
if key not in self.execgrp_bucket:
# create new bucket entry
symbol = self.sym_gen(key)
execgrp = DataParallelExecutorGroup(symbol, self.arg_names,
self.param_names, self.ctx,
self.slices, data_batch,
shared_group=self.execgrp)
self.execgrp_bucket[key] = execgrp
self.curr_execgrp = self.execgrp_bucket[key]
else:
self.curr_execgrp = self.execgrp
self.curr_execgrp.load_data_batch(data_batch)
|
python
|
def load_data_batch(self, data_batch):
"""Load data and labels into arrays."""
if self.sym_gen is not None:
key = data_batch.bucket_key
if key not in self.execgrp_bucket:
# create new bucket entry
symbol = self.sym_gen(key)
execgrp = DataParallelExecutorGroup(symbol, self.arg_names,
self.param_names, self.ctx,
self.slices, data_batch,
shared_group=self.execgrp)
self.execgrp_bucket[key] = execgrp
self.curr_execgrp = self.execgrp_bucket[key]
else:
self.curr_execgrp = self.execgrp
self.curr_execgrp.load_data_batch(data_batch)
|
[
"def",
"load_data_batch",
"(",
"self",
",",
"data_batch",
")",
":",
"if",
"self",
".",
"sym_gen",
"is",
"not",
"None",
":",
"key",
"=",
"data_batch",
".",
"bucket_key",
"if",
"key",
"not",
"in",
"self",
".",
"execgrp_bucket",
":",
"# create new bucket entry",
"symbol",
"=",
"self",
".",
"sym_gen",
"(",
"key",
")",
"execgrp",
"=",
"DataParallelExecutorGroup",
"(",
"symbol",
",",
"self",
".",
"arg_names",
",",
"self",
".",
"param_names",
",",
"self",
".",
"ctx",
",",
"self",
".",
"slices",
",",
"data_batch",
",",
"shared_group",
"=",
"self",
".",
"execgrp",
")",
"self",
".",
"execgrp_bucket",
"[",
"key",
"]",
"=",
"execgrp",
"self",
".",
"curr_execgrp",
"=",
"self",
".",
"execgrp_bucket",
"[",
"key",
"]",
"else",
":",
"self",
".",
"curr_execgrp",
"=",
"self",
".",
"execgrp",
"self",
".",
"curr_execgrp",
".",
"load_data_batch",
"(",
"data_batch",
")"
] |
Load data and labels into arrays.
|
[
"Load",
"data",
"and",
"labels",
"into",
"arrays",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L415-L432
|
train
|
apache/incubator-mxnet
|
python/mxnet/executor_manager.py
|
DataParallelExecutorManager.update_metric
|
def update_metric(self, metric, labels, pre_sliced=False):
"""Update metric with the current executor."""
self.curr_execgrp.update_metric(metric, labels, pre_sliced)
|
python
|
def update_metric(self, metric, labels, pre_sliced=False):
"""Update metric with the current executor."""
self.curr_execgrp.update_metric(metric, labels, pre_sliced)
|
[
"def",
"update_metric",
"(",
"self",
",",
"metric",
",",
"labels",
",",
"pre_sliced",
"=",
"False",
")",
":",
"self",
".",
"curr_execgrp",
".",
"update_metric",
"(",
"metric",
",",
"labels",
",",
"pre_sliced",
")"
] |
Update metric with the current executor.
|
[
"Update",
"metric",
"with",
"the",
"current",
"executor",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/executor_manager.py#L442-L444
|
train
|
apache/incubator-mxnet
|
example/reinforcement-learning/dqn/replay_memory.py
|
ReplayMemory.clear
|
def clear(self):
"""
Clear all contents in the relay memory
"""
self.states[:] = 0
self.actions[:] = 0
self.rewards[:] = 0
self.terminate_flags[:] = 0
self.top = 0
self.size = 0
|
python
|
def clear(self):
"""
Clear all contents in the relay memory
"""
self.states[:] = 0
self.actions[:] = 0
self.rewards[:] = 0
self.terminate_flags[:] = 0
self.top = 0
self.size = 0
|
[
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"states",
"[",
":",
"]",
"=",
"0",
"self",
".",
"actions",
"[",
":",
"]",
"=",
"0",
"self",
".",
"rewards",
"[",
":",
"]",
"=",
"0",
"self",
".",
"terminate_flags",
"[",
":",
"]",
"=",
"0",
"self",
".",
"top",
"=",
"0",
"self",
".",
"size",
"=",
"0"
] |
Clear all contents in the relay memory
|
[
"Clear",
"all",
"contents",
"in",
"the",
"relay",
"memory"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/reinforcement-learning/dqn/replay_memory.py#L63-L72
|
train
|
apache/incubator-mxnet
|
cpp-package/scripts/lint.py
|
get_header_guard_dmlc
|
def get_header_guard_dmlc(filename):
"""Get Header Guard Convention for DMLC Projects.
For headers in include, directly use the path
For headers in src, use project name plus path
Examples: with project-name = dmlc
include/dmlc/timer.h -> DMLC_TIMTER_H_
src/io/libsvm_parser.h -> DMLC_IO_LIBSVM_PARSER_H_
"""
fileinfo = cpplint.FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
inc_list = ['include', 'api', 'wrapper']
if file_path_from_root.find('src/') != -1 and _HELPER.project_name is not None:
idx = file_path_from_root.find('src/')
file_path_from_root = _HELPER.project_name + file_path_from_root[idx + 3:]
else:
for spath in inc_list:
prefix = spath + os.sep
if file_path_from_root.startswith(prefix):
file_path_from_root = re.sub('^' + prefix, '', file_path_from_root)
break
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
|
python
|
def get_header_guard_dmlc(filename):
"""Get Header Guard Convention for DMLC Projects.
For headers in include, directly use the path
For headers in src, use project name plus path
Examples: with project-name = dmlc
include/dmlc/timer.h -> DMLC_TIMTER_H_
src/io/libsvm_parser.h -> DMLC_IO_LIBSVM_PARSER_H_
"""
fileinfo = cpplint.FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
inc_list = ['include', 'api', 'wrapper']
if file_path_from_root.find('src/') != -1 and _HELPER.project_name is not None:
idx = file_path_from_root.find('src/')
file_path_from_root = _HELPER.project_name + file_path_from_root[idx + 3:]
else:
for spath in inc_list:
prefix = spath + os.sep
if file_path_from_root.startswith(prefix):
file_path_from_root = re.sub('^' + prefix, '', file_path_from_root)
break
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
|
[
"def",
"get_header_guard_dmlc",
"(",
"filename",
")",
":",
"fileinfo",
"=",
"cpplint",
".",
"FileInfo",
"(",
"filename",
")",
"file_path_from_root",
"=",
"fileinfo",
".",
"RepositoryName",
"(",
")",
"inc_list",
"=",
"[",
"'include'",
",",
"'api'",
",",
"'wrapper'",
"]",
"if",
"file_path_from_root",
".",
"find",
"(",
"'src/'",
")",
"!=",
"-",
"1",
"and",
"_HELPER",
".",
"project_name",
"is",
"not",
"None",
":",
"idx",
"=",
"file_path_from_root",
".",
"find",
"(",
"'src/'",
")",
"file_path_from_root",
"=",
"_HELPER",
".",
"project_name",
"+",
"file_path_from_root",
"[",
"idx",
"+",
"3",
":",
"]",
"else",
":",
"for",
"spath",
"in",
"inc_list",
":",
"prefix",
"=",
"spath",
"+",
"os",
".",
"sep",
"if",
"file_path_from_root",
".",
"startswith",
"(",
"prefix",
")",
":",
"file_path_from_root",
"=",
"re",
".",
"sub",
"(",
"'^'",
"+",
"prefix",
",",
"''",
",",
"file_path_from_root",
")",
"break",
"return",
"re",
".",
"sub",
"(",
"r'[-./\\s]'",
",",
"'_'",
",",
"file_path_from_root",
")",
".",
"upper",
"(",
")",
"+",
"'_'"
] |
Get Header Guard Convention for DMLC Projects.
For headers in include, directly use the path
For headers in src, use project name plus path
Examples: with project-name = dmlc
include/dmlc/timer.h -> DMLC_TIMTER_H_
src/io/libsvm_parser.h -> DMLC_IO_LIBSVM_PARSER_H_
|
[
"Get",
"Header",
"Guard",
"Convention",
"for",
"DMLC",
"Projects",
".",
"For",
"headers",
"in",
"include",
"directly",
"use",
"the",
"path",
"For",
"headers",
"in",
"src",
"use",
"project",
"name",
"plus",
"path",
"Examples",
":",
"with",
"project",
"-",
"name",
"=",
"dmlc",
"include",
"/",
"dmlc",
"/",
"timer",
".",
"h",
"-",
">",
"DMLC_TIMTER_H_",
"src",
"/",
"io",
"/",
"libsvm_parser",
".",
"h",
"-",
">",
"DMLC_IO_LIBSVM_PARSER_H_"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/cpp-package/scripts/lint.py#L123-L144
|
train
|
apache/incubator-mxnet
|
cpp-package/scripts/lint.py
|
process
|
def process(fname, allow_type):
"""Process a file."""
fname = str(fname)
# HACK: ignore op.h which is automatically generated
if fname.endswith('op.h'):
return
arr = fname.rsplit('.', 1)
if fname.find('#') != -1 or arr[-1] not in allow_type:
return
if arr[-1] in CXX_SUFFIX:
_HELPER.process_cpp(fname, arr[-1])
if arr[-1] in PYTHON_SUFFIX:
_HELPER.process_python(fname)
|
python
|
def process(fname, allow_type):
"""Process a file."""
fname = str(fname)
# HACK: ignore op.h which is automatically generated
if fname.endswith('op.h'):
return
arr = fname.rsplit('.', 1)
if fname.find('#') != -1 or arr[-1] not in allow_type:
return
if arr[-1] in CXX_SUFFIX:
_HELPER.process_cpp(fname, arr[-1])
if arr[-1] in PYTHON_SUFFIX:
_HELPER.process_python(fname)
|
[
"def",
"process",
"(",
"fname",
",",
"allow_type",
")",
":",
"fname",
"=",
"str",
"(",
"fname",
")",
"# HACK: ignore op.h which is automatically generated",
"if",
"fname",
".",
"endswith",
"(",
"'op.h'",
")",
":",
"return",
"arr",
"=",
"fname",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"if",
"fname",
".",
"find",
"(",
"'#'",
")",
"!=",
"-",
"1",
"or",
"arr",
"[",
"-",
"1",
"]",
"not",
"in",
"allow_type",
":",
"return",
"if",
"arr",
"[",
"-",
"1",
"]",
"in",
"CXX_SUFFIX",
":",
"_HELPER",
".",
"process_cpp",
"(",
"fname",
",",
"arr",
"[",
"-",
"1",
"]",
")",
"if",
"arr",
"[",
"-",
"1",
"]",
"in",
"PYTHON_SUFFIX",
":",
"_HELPER",
".",
"process_python",
"(",
"fname",
")"
] |
Process a file.
|
[
"Process",
"a",
"file",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/cpp-package/scripts/lint.py#L148-L160
|
train
|
apache/incubator-mxnet
|
cpp-package/scripts/lint.py
|
main
|
def main():
"""Main entry function."""
if len(sys.argv) < 3:
print('Usage: <project-name> <filetype> <list-of-path to traverse>')
print('\tfiletype can be python/cpp/all')
exit(-1)
_HELPER.project_name = sys.argv[1]
file_type = sys.argv[2]
allow_type = []
if file_type == 'python' or file_type == 'all':
allow_type += [x for x in PYTHON_SUFFIX]
if file_type == 'cpp' or file_type == 'all':
allow_type += [x for x in CXX_SUFFIX]
allow_type = set(allow_type)
if os.name != 'nt':
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
for path in sys.argv[3:]:
if os.path.isfile(path):
process(path, allow_type)
else:
for root, dirs, files in os.walk(path):
for name in files:
process(os.path.join(root, name), allow_type)
nerr = _HELPER.print_summary(sys.stderr)
sys.exit(nerr > 0)
|
python
|
def main():
"""Main entry function."""
if len(sys.argv) < 3:
print('Usage: <project-name> <filetype> <list-of-path to traverse>')
print('\tfiletype can be python/cpp/all')
exit(-1)
_HELPER.project_name = sys.argv[1]
file_type = sys.argv[2]
allow_type = []
if file_type == 'python' or file_type == 'all':
allow_type += [x for x in PYTHON_SUFFIX]
if file_type == 'cpp' or file_type == 'all':
allow_type += [x for x in CXX_SUFFIX]
allow_type = set(allow_type)
if os.name != 'nt':
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
for path in sys.argv[3:]:
if os.path.isfile(path):
process(path, allow_type)
else:
for root, dirs, files in os.walk(path):
for name in files:
process(os.path.join(root, name), allow_type)
nerr = _HELPER.print_summary(sys.stderr)
sys.exit(nerr > 0)
|
[
"def",
"main",
"(",
")",
":",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"<",
"3",
":",
"print",
"(",
"'Usage: <project-name> <filetype> <list-of-path to traverse>'",
")",
"print",
"(",
"'\\tfiletype can be python/cpp/all'",
")",
"exit",
"(",
"-",
"1",
")",
"_HELPER",
".",
"project_name",
"=",
"sys",
".",
"argv",
"[",
"1",
"]",
"file_type",
"=",
"sys",
".",
"argv",
"[",
"2",
"]",
"allow_type",
"=",
"[",
"]",
"if",
"file_type",
"==",
"'python'",
"or",
"file_type",
"==",
"'all'",
":",
"allow_type",
"+=",
"[",
"x",
"for",
"x",
"in",
"PYTHON_SUFFIX",
"]",
"if",
"file_type",
"==",
"'cpp'",
"or",
"file_type",
"==",
"'all'",
":",
"allow_type",
"+=",
"[",
"x",
"for",
"x",
"in",
"CXX_SUFFIX",
"]",
"allow_type",
"=",
"set",
"(",
"allow_type",
")",
"if",
"os",
".",
"name",
"!=",
"'nt'",
":",
"sys",
".",
"stderr",
"=",
"codecs",
".",
"StreamReaderWriter",
"(",
"sys",
".",
"stderr",
",",
"codecs",
".",
"getreader",
"(",
"'utf8'",
")",
",",
"codecs",
".",
"getwriter",
"(",
"'utf8'",
")",
",",
"'replace'",
")",
"for",
"path",
"in",
"sys",
".",
"argv",
"[",
"3",
":",
"]",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"process",
"(",
"path",
",",
"allow_type",
")",
"else",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"path",
")",
":",
"for",
"name",
"in",
"files",
":",
"process",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"name",
")",
",",
"allow_type",
")",
"nerr",
"=",
"_HELPER",
".",
"print_summary",
"(",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"nerr",
">",
"0",
")"
] |
Main entry function.
|
[
"Main",
"entry",
"function",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/cpp-package/scripts/lint.py#L162-L190
|
train
|
apache/incubator-mxnet
|
cpp-package/scripts/lint.py
|
LintHelper._print_summary_map
|
def _print_summary_map(strm, result_map, ftype):
"""Print summary of certain result map."""
if len(result_map) == 0:
return 0
npass = len([x for k, x in result_map.iteritems() if len(x) == 0])
strm.write('=====%d/%d %s files passed check=====\n' % (npass, len(result_map), ftype))
for fname, emap in result_map.iteritems():
if len(emap) == 0:
continue
strm.write('%s: %d Errors of %d Categories map=%s\n' % (
fname, sum(emap.values()), len(emap), str(emap)))
return len(result_map) - npass
|
python
|
def _print_summary_map(strm, result_map, ftype):
"""Print summary of certain result map."""
if len(result_map) == 0:
return 0
npass = len([x for k, x in result_map.iteritems() if len(x) == 0])
strm.write('=====%d/%d %s files passed check=====\n' % (npass, len(result_map), ftype))
for fname, emap in result_map.iteritems():
if len(emap) == 0:
continue
strm.write('%s: %d Errors of %d Categories map=%s\n' % (
fname, sum(emap.values()), len(emap), str(emap)))
return len(result_map) - npass
|
[
"def",
"_print_summary_map",
"(",
"strm",
",",
"result_map",
",",
"ftype",
")",
":",
"if",
"len",
"(",
"result_map",
")",
"==",
"0",
":",
"return",
"0",
"npass",
"=",
"len",
"(",
"[",
"x",
"for",
"k",
",",
"x",
"in",
"result_map",
".",
"iteritems",
"(",
")",
"if",
"len",
"(",
"x",
")",
"==",
"0",
"]",
")",
"strm",
".",
"write",
"(",
"'=====%d/%d %s files passed check=====\\n'",
"%",
"(",
"npass",
",",
"len",
"(",
"result_map",
")",
",",
"ftype",
")",
")",
"for",
"fname",
",",
"emap",
"in",
"result_map",
".",
"iteritems",
"(",
")",
":",
"if",
"len",
"(",
"emap",
")",
"==",
"0",
":",
"continue",
"strm",
".",
"write",
"(",
"'%s: %d Errors of %d Categories map=%s\\n'",
"%",
"(",
"fname",
",",
"sum",
"(",
"emap",
".",
"values",
"(",
")",
")",
",",
"len",
"(",
"emap",
")",
",",
"str",
"(",
"emap",
")",
")",
")",
"return",
"len",
"(",
"result_map",
")",
"-",
"npass"
] |
Print summary of certain result map.
|
[
"Print",
"summary",
"of",
"certain",
"result",
"map",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/cpp-package/scripts/lint.py#L40-L51
|
train
|
apache/incubator-mxnet
|
cpp-package/scripts/lint.py
|
LintHelper.process_cpp
|
def process_cpp(self, path, suffix):
"""Process a cpp file."""
_cpplint_state.ResetErrorCounts()
cpplint.ProcessFile(str(path), _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
errors = _cpplint_state.errors_by_category.copy()
if suffix == 'h':
self.cpp_header_map[str(path)] = errors
else:
self.cpp_src_map[str(path)] = errors
|
python
|
def process_cpp(self, path, suffix):
"""Process a cpp file."""
_cpplint_state.ResetErrorCounts()
cpplint.ProcessFile(str(path), _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
errors = _cpplint_state.errors_by_category.copy()
if suffix == 'h':
self.cpp_header_map[str(path)] = errors
else:
self.cpp_src_map[str(path)] = errors
|
[
"def",
"process_cpp",
"(",
"self",
",",
"path",
",",
"suffix",
")",
":",
"_cpplint_state",
".",
"ResetErrorCounts",
"(",
")",
"cpplint",
".",
"ProcessFile",
"(",
"str",
"(",
"path",
")",
",",
"_cpplint_state",
".",
"verbose_level",
")",
"_cpplint_state",
".",
"PrintErrorCounts",
"(",
")",
"errors",
"=",
"_cpplint_state",
".",
"errors_by_category",
".",
"copy",
"(",
")",
"if",
"suffix",
"==",
"'h'",
":",
"self",
".",
"cpp_header_map",
"[",
"str",
"(",
"path",
")",
"]",
"=",
"errors",
"else",
":",
"self",
".",
"cpp_src_map",
"[",
"str",
"(",
"path",
")",
"]",
"=",
"errors"
] |
Process a cpp file.
|
[
"Process",
"a",
"cpp",
"file",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/cpp-package/scripts/lint.py#L78-L88
|
train
|
apache/incubator-mxnet
|
cpp-package/scripts/lint.py
|
LintHelper.process_python
|
def process_python(self, path):
"""Process a python file."""
(pylint_stdout, pylint_stderr) = epylint.py_run(
' '.join([str(path)] + self.pylint_opts), return_std=True)
emap = {}
print(pylint_stderr.read())
for line in pylint_stdout:
sys.stderr.write(line)
key = line.split(':')[-1].split('(')[0].strip()
if key not in self.pylint_cats:
continue
if key not in emap:
emap[key] = 1
else:
emap[key] += 1
sys.stderr.write('\n')
self.python_map[str(path)] = emap
|
python
|
def process_python(self, path):
"""Process a python file."""
(pylint_stdout, pylint_stderr) = epylint.py_run(
' '.join([str(path)] + self.pylint_opts), return_std=True)
emap = {}
print(pylint_stderr.read())
for line in pylint_stdout:
sys.stderr.write(line)
key = line.split(':')[-1].split('(')[0].strip()
if key not in self.pylint_cats:
continue
if key not in emap:
emap[key] = 1
else:
emap[key] += 1
sys.stderr.write('\n')
self.python_map[str(path)] = emap
|
[
"def",
"process_python",
"(",
"self",
",",
"path",
")",
":",
"(",
"pylint_stdout",
",",
"pylint_stderr",
")",
"=",
"epylint",
".",
"py_run",
"(",
"' '",
".",
"join",
"(",
"[",
"str",
"(",
"path",
")",
"]",
"+",
"self",
".",
"pylint_opts",
")",
",",
"return_std",
"=",
"True",
")",
"emap",
"=",
"{",
"}",
"print",
"(",
"pylint_stderr",
".",
"read",
"(",
")",
")",
"for",
"line",
"in",
"pylint_stdout",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"line",
")",
"key",
"=",
"line",
".",
"split",
"(",
"':'",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"'('",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"if",
"key",
"not",
"in",
"self",
".",
"pylint_cats",
":",
"continue",
"if",
"key",
"not",
"in",
"emap",
":",
"emap",
"[",
"key",
"]",
"=",
"1",
"else",
":",
"emap",
"[",
"key",
"]",
"+=",
"1",
"sys",
".",
"stderr",
".",
"write",
"(",
"'\\n'",
")",
"self",
".",
"python_map",
"[",
"str",
"(",
"path",
")",
"]",
"=",
"emap"
] |
Process a python file.
|
[
"Process",
"a",
"python",
"file",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/cpp-package/scripts/lint.py#L90-L106
|
train
|
apache/incubator-mxnet
|
cpp-package/scripts/lint.py
|
LintHelper.print_summary
|
def print_summary(self, strm):
"""Print summary of lint."""
nerr = 0
nerr += LintHelper._print_summary_map(strm, self.cpp_header_map, 'cpp-header')
nerr += LintHelper._print_summary_map(strm, self.cpp_src_map, 'cpp-soruce')
nerr += LintHelper._print_summary_map(strm, self.python_map, 'python')
if nerr == 0:
strm.write('All passed!\n')
else:
strm.write('%d files failed lint\n' % nerr)
return nerr
|
python
|
def print_summary(self, strm):
"""Print summary of lint."""
nerr = 0
nerr += LintHelper._print_summary_map(strm, self.cpp_header_map, 'cpp-header')
nerr += LintHelper._print_summary_map(strm, self.cpp_src_map, 'cpp-soruce')
nerr += LintHelper._print_summary_map(strm, self.python_map, 'python')
if nerr == 0:
strm.write('All passed!\n')
else:
strm.write('%d files failed lint\n' % nerr)
return nerr
|
[
"def",
"print_summary",
"(",
"self",
",",
"strm",
")",
":",
"nerr",
"=",
"0",
"nerr",
"+=",
"LintHelper",
".",
"_print_summary_map",
"(",
"strm",
",",
"self",
".",
"cpp_header_map",
",",
"'cpp-header'",
")",
"nerr",
"+=",
"LintHelper",
".",
"_print_summary_map",
"(",
"strm",
",",
"self",
".",
"cpp_src_map",
",",
"'cpp-soruce'",
")",
"nerr",
"+=",
"LintHelper",
".",
"_print_summary_map",
"(",
"strm",
",",
"self",
".",
"python_map",
",",
"'python'",
")",
"if",
"nerr",
"==",
"0",
":",
"strm",
".",
"write",
"(",
"'All passed!\\n'",
")",
"else",
":",
"strm",
".",
"write",
"(",
"'%d files failed lint\\n'",
"%",
"nerr",
")",
"return",
"nerr"
] |
Print summary of lint.
|
[
"Print",
"summary",
"of",
"lint",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/cpp-package/scripts/lint.py#L108-L118
|
train
|
apache/incubator-mxnet
|
python/mxnet/kvstore_server.py
|
_init_kvstore_server_module
|
def _init_kvstore_server_module():
"""Start server/scheduler."""
is_worker = ctypes.c_int()
check_call(_LIB.MXKVStoreIsWorkerNode(ctypes.byref(is_worker)))
if is_worker.value == 0:
kvstore = create('dist')
server = KVStoreServer(kvstore)
server.run()
sys.exit()
|
python
|
def _init_kvstore_server_module():
"""Start server/scheduler."""
is_worker = ctypes.c_int()
check_call(_LIB.MXKVStoreIsWorkerNode(ctypes.byref(is_worker)))
if is_worker.value == 0:
kvstore = create('dist')
server = KVStoreServer(kvstore)
server.run()
sys.exit()
|
[
"def",
"_init_kvstore_server_module",
"(",
")",
":",
"is_worker",
"=",
"ctypes",
".",
"c_int",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXKVStoreIsWorkerNode",
"(",
"ctypes",
".",
"byref",
"(",
"is_worker",
")",
")",
")",
"if",
"is_worker",
".",
"value",
"==",
"0",
":",
"kvstore",
"=",
"create",
"(",
"'dist'",
")",
"server",
"=",
"KVStoreServer",
"(",
"kvstore",
")",
"server",
".",
"run",
"(",
")",
"sys",
".",
"exit",
"(",
")"
] |
Start server/scheduler.
|
[
"Start",
"server",
"/",
"scheduler",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/kvstore_server.py#L75-L83
|
train
|
apache/incubator-mxnet
|
python/mxnet/kvstore_server.py
|
KVStoreServer._controller
|
def _controller(self):
"""Return the server controller."""
def server_controller(cmd_id, cmd_body, _):
"""Server controler."""
if not self.init_logginig:
# the reason put the codes here is because we cannot get
# kvstore.rank earlier
head = '%(asctime)-15s Server[' + str(
self.kvstore.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
self.init_logginig = True
if cmd_id == 0:
try:
optimizer = pickle.loads(cmd_body)
except:
raise
self.kvstore.set_optimizer(optimizer)
else:
print("server %d, unknown command (%d, %s)" % (
self.kvstore.rank, cmd_id, cmd_body))
return server_controller
|
python
|
def _controller(self):
"""Return the server controller."""
def server_controller(cmd_id, cmd_body, _):
"""Server controler."""
if not self.init_logginig:
# the reason put the codes here is because we cannot get
# kvstore.rank earlier
head = '%(asctime)-15s Server[' + str(
self.kvstore.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
self.init_logginig = True
if cmd_id == 0:
try:
optimizer = pickle.loads(cmd_body)
except:
raise
self.kvstore.set_optimizer(optimizer)
else:
print("server %d, unknown command (%d, %s)" % (
self.kvstore.rank, cmd_id, cmd_body))
return server_controller
|
[
"def",
"_controller",
"(",
"self",
")",
":",
"def",
"server_controller",
"(",
"cmd_id",
",",
"cmd_body",
",",
"_",
")",
":",
"\"\"\"Server controler.\"\"\"",
"if",
"not",
"self",
".",
"init_logginig",
":",
"# the reason put the codes here is because we cannot get",
"# kvstore.rank earlier",
"head",
"=",
"'%(asctime)-15s Server['",
"+",
"str",
"(",
"self",
".",
"kvstore",
".",
"rank",
")",
"+",
"'] %(message)s'",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
",",
"format",
"=",
"head",
")",
"self",
".",
"init_logginig",
"=",
"True",
"if",
"cmd_id",
"==",
"0",
":",
"try",
":",
"optimizer",
"=",
"pickle",
".",
"loads",
"(",
"cmd_body",
")",
"except",
":",
"raise",
"self",
".",
"kvstore",
".",
"set_optimizer",
"(",
"optimizer",
")",
"else",
":",
"print",
"(",
"\"server %d, unknown command (%d, %s)\"",
"%",
"(",
"self",
".",
"kvstore",
".",
"rank",
",",
"cmd_id",
",",
"cmd_body",
")",
")",
"return",
"server_controller"
] |
Return the server controller.
|
[
"Return",
"the",
"server",
"controller",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/kvstore_server.py#L41-L62
|
train
|
apache/incubator-mxnet
|
python/mxnet/kvstore_server.py
|
KVStoreServer.run
|
def run(self):
"""Run the server, whose behavior is like.
>>> while receive(x):
... if is_command x: controller(x)
... else if is_key_value x: updater(x)
"""
_ctrl_proto = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p)
check_call(_LIB.MXKVStoreRunServer(self.handle, _ctrl_proto(self._controller()), None))
|
python
|
def run(self):
"""Run the server, whose behavior is like.
>>> while receive(x):
... if is_command x: controller(x)
... else if is_key_value x: updater(x)
"""
_ctrl_proto = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p)
check_call(_LIB.MXKVStoreRunServer(self.handle, _ctrl_proto(self._controller()), None))
|
[
"def",
"run",
"(",
"self",
")",
":",
"_ctrl_proto",
"=",
"ctypes",
".",
"CFUNCTYPE",
"(",
"None",
",",
"ctypes",
".",
"c_int",
",",
"ctypes",
".",
"c_char_p",
",",
"ctypes",
".",
"c_void_p",
")",
"check_call",
"(",
"_LIB",
".",
"MXKVStoreRunServer",
"(",
"self",
".",
"handle",
",",
"_ctrl_proto",
"(",
"self",
".",
"_controller",
"(",
")",
")",
",",
"None",
")",
")"
] |
Run the server, whose behavior is like.
>>> while receive(x):
... if is_command x: controller(x)
... else if is_key_value x: updater(x)
|
[
"Run",
"the",
"server",
"whose",
"behavior",
"is",
"like",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/kvstore_server.py#L64-L73
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/register.py
|
_generate_ndarray_function_code
|
def _generate_ndarray_function_code(handle, name, func_name, signature_only=False):
"""Generate function for ndarray op by handle and function name."""
real_name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
key_var_num_args = ctypes.c_char_p()
ret_type = ctypes.c_char_p()
check_call(_LIB.MXSymbolGetAtomicSymbolInfo(
handle, ctypes.byref(real_name), ctypes.byref(desc),
ctypes.byref(num_args),
ctypes.byref(arg_names),
ctypes.byref(arg_types),
ctypes.byref(arg_descs),
ctypes.byref(key_var_num_args),
ctypes.byref(ret_type)))
narg = int(num_args.value)
arg_names = [py_str(arg_names[i]) for i in range(narg)]
arg_types = [py_str(arg_types[i]) for i in range(narg)]
key_var_num_args = py_str(key_var_num_args.value)
ret_type = py_str(ret_type.value) if ret_type.value is not None else ''
doc_str = _build_doc(name,
py_str(desc.value),
arg_names,
arg_types,
[py_str(arg_descs[i]) for i in range(narg)],
key_var_num_args,
ret_type)
dtype_name = None
arr_name = None
ndsignature = []
signature = []
ndarg_names = []
kwarg_names = []
for i in range(narg):
name, atype = arg_names[i], arg_types[i]
if name == 'dtype':
dtype_name = name
signature.append('%s=_Null'%name)
elif atype.startswith('NDArray') or atype.startswith('Symbol'):
assert not arr_name, \
"Op can only have one argument with variable " \
"size and it must be the last argument."
if atype.endswith('[]'):
ndsignature.append('*%s'%name)
arr_name = name
else:
ndsignature.append('%s=None'%name)
ndarg_names.append(name)
else:
signature.append('%s=_Null'%name)
kwarg_names.append(name)
signature.append('out=None')
signature.append('name=None')
signature.append('**kwargs')
signature = ndsignature + signature
code = []
if arr_name:
code.append("""
def %s(*%s, **kwargs):"""%(func_name, arr_name))
if not signature_only:
code.append("""
ndargs = []
for i in {}:
assert isinstance(i, NDArrayBase), \\
"Positional arguments must have NDArray type, " \\
"but got %s"%str(i)
ndargs.append(i)""".format(arr_name))
if dtype_name is not None:
code.append("""
if '%s' in kwargs:
kwargs['%s'] = _np.dtype(kwargs['%s']).name"""%(
dtype_name, dtype_name, dtype_name))
code.append("""
_ = kwargs.pop('name', None)
out = kwargs.pop('out', None)
keys = list(kwargs.keys())
vals = list(kwargs.values())""")
else:
code.append("""
def %s(%s):"""%(func_name, ', '.join(signature)))
if not signature_only:
code.append("""
ndargs = []
keys = list(kwargs.keys())
vals = list(kwargs.values())""")
# NDArray args
for name in ndarg_names: # pylint: disable=redefined-argument-from-local
code.append("""
if {name} is not None:
assert isinstance({name}, NDArrayBase), \\
"Argument {name} must have NDArray type, but got %s"%str({name})
ndargs.append({name})""".format(name=name))
# kwargs
for name in kwarg_names: # pylint: disable=redefined-argument-from-local
code.append("""
if %s is not _Null:
keys.append('%s')
vals.append(%s)"""%(name, name, name))
# dtype
if dtype_name is not None:
code.append("""
if %s is not _Null:
keys.append('%s')
vals.append(_np.dtype(%s).name)"""%(dtype_name, dtype_name, dtype_name))
if not signature_only:
code.append("""
return _imperative_invoke(%d, ndargs, keys, vals, out)"""%(
handle.value))
else:
code.append("""
return (0,)""")
doc_str_lines = _os.linesep+''.join([' '+s if s.strip() else s
for s in 'r"""{doc_str}"""'.format(doc_str=doc_str)
.splitlines(True)])
code.insert(1, doc_str_lines)
return ''.join(code), doc_str
|
python
|
def _generate_ndarray_function_code(handle, name, func_name, signature_only=False):
"""Generate function for ndarray op by handle and function name."""
real_name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
key_var_num_args = ctypes.c_char_p()
ret_type = ctypes.c_char_p()
check_call(_LIB.MXSymbolGetAtomicSymbolInfo(
handle, ctypes.byref(real_name), ctypes.byref(desc),
ctypes.byref(num_args),
ctypes.byref(arg_names),
ctypes.byref(arg_types),
ctypes.byref(arg_descs),
ctypes.byref(key_var_num_args),
ctypes.byref(ret_type)))
narg = int(num_args.value)
arg_names = [py_str(arg_names[i]) for i in range(narg)]
arg_types = [py_str(arg_types[i]) for i in range(narg)]
key_var_num_args = py_str(key_var_num_args.value)
ret_type = py_str(ret_type.value) if ret_type.value is not None else ''
doc_str = _build_doc(name,
py_str(desc.value),
arg_names,
arg_types,
[py_str(arg_descs[i]) for i in range(narg)],
key_var_num_args,
ret_type)
dtype_name = None
arr_name = None
ndsignature = []
signature = []
ndarg_names = []
kwarg_names = []
for i in range(narg):
name, atype = arg_names[i], arg_types[i]
if name == 'dtype':
dtype_name = name
signature.append('%s=_Null'%name)
elif atype.startswith('NDArray') or atype.startswith('Symbol'):
assert not arr_name, \
"Op can only have one argument with variable " \
"size and it must be the last argument."
if atype.endswith('[]'):
ndsignature.append('*%s'%name)
arr_name = name
else:
ndsignature.append('%s=None'%name)
ndarg_names.append(name)
else:
signature.append('%s=_Null'%name)
kwarg_names.append(name)
signature.append('out=None')
signature.append('name=None')
signature.append('**kwargs')
signature = ndsignature + signature
code = []
if arr_name:
code.append("""
def %s(*%s, **kwargs):"""%(func_name, arr_name))
if not signature_only:
code.append("""
ndargs = []
for i in {}:
assert isinstance(i, NDArrayBase), \\
"Positional arguments must have NDArray type, " \\
"but got %s"%str(i)
ndargs.append(i)""".format(arr_name))
if dtype_name is not None:
code.append("""
if '%s' in kwargs:
kwargs['%s'] = _np.dtype(kwargs['%s']).name"""%(
dtype_name, dtype_name, dtype_name))
code.append("""
_ = kwargs.pop('name', None)
out = kwargs.pop('out', None)
keys = list(kwargs.keys())
vals = list(kwargs.values())""")
else:
code.append("""
def %s(%s):"""%(func_name, ', '.join(signature)))
if not signature_only:
code.append("""
ndargs = []
keys = list(kwargs.keys())
vals = list(kwargs.values())""")
# NDArray args
for name in ndarg_names: # pylint: disable=redefined-argument-from-local
code.append("""
if {name} is not None:
assert isinstance({name}, NDArrayBase), \\
"Argument {name} must have NDArray type, but got %s"%str({name})
ndargs.append({name})""".format(name=name))
# kwargs
for name in kwarg_names: # pylint: disable=redefined-argument-from-local
code.append("""
if %s is not _Null:
keys.append('%s')
vals.append(%s)"""%(name, name, name))
# dtype
if dtype_name is not None:
code.append("""
if %s is not _Null:
keys.append('%s')
vals.append(_np.dtype(%s).name)"""%(dtype_name, dtype_name, dtype_name))
if not signature_only:
code.append("""
return _imperative_invoke(%d, ndargs, keys, vals, out)"""%(
handle.value))
else:
code.append("""
return (0,)""")
doc_str_lines = _os.linesep+''.join([' '+s if s.strip() else s
for s in 'r"""{doc_str}"""'.format(doc_str=doc_str)
.splitlines(True)])
code.insert(1, doc_str_lines)
return ''.join(code), doc_str
|
[
"def",
"_generate_ndarray_function_code",
"(",
"handle",
",",
"name",
",",
"func_name",
",",
"signature_only",
"=",
"False",
")",
":",
"real_name",
"=",
"ctypes",
".",
"c_char_p",
"(",
")",
"desc",
"=",
"ctypes",
".",
"c_char_p",
"(",
")",
"num_args",
"=",
"mx_uint",
"(",
")",
"arg_names",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"arg_types",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"arg_descs",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"key_var_num_args",
"=",
"ctypes",
".",
"c_char_p",
"(",
")",
"ret_type",
"=",
"ctypes",
".",
"c_char_p",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXSymbolGetAtomicSymbolInfo",
"(",
"handle",
",",
"ctypes",
".",
"byref",
"(",
"real_name",
")",
",",
"ctypes",
".",
"byref",
"(",
"desc",
")",
",",
"ctypes",
".",
"byref",
"(",
"num_args",
")",
",",
"ctypes",
".",
"byref",
"(",
"arg_names",
")",
",",
"ctypes",
".",
"byref",
"(",
"arg_types",
")",
",",
"ctypes",
".",
"byref",
"(",
"arg_descs",
")",
",",
"ctypes",
".",
"byref",
"(",
"key_var_num_args",
")",
",",
"ctypes",
".",
"byref",
"(",
"ret_type",
")",
")",
")",
"narg",
"=",
"int",
"(",
"num_args",
".",
"value",
")",
"arg_names",
"=",
"[",
"py_str",
"(",
"arg_names",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"narg",
")",
"]",
"arg_types",
"=",
"[",
"py_str",
"(",
"arg_types",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"narg",
")",
"]",
"key_var_num_args",
"=",
"py_str",
"(",
"key_var_num_args",
".",
"value",
")",
"ret_type",
"=",
"py_str",
"(",
"ret_type",
".",
"value",
")",
"if",
"ret_type",
".",
"value",
"is",
"not",
"None",
"else",
"''",
"doc_str",
"=",
"_build_doc",
"(",
"name",
",",
"py_str",
"(",
"desc",
".",
"value",
")",
",",
"arg_names",
",",
"arg_types",
",",
"[",
"py_str",
"(",
"arg_descs",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"narg",
")",
"]",
",",
"key_var_num_args",
",",
"ret_type",
")",
"dtype_name",
"=",
"None",
"arr_name",
"=",
"None",
"ndsignature",
"=",
"[",
"]",
"signature",
"=",
"[",
"]",
"ndarg_names",
"=",
"[",
"]",
"kwarg_names",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"narg",
")",
":",
"name",
",",
"atype",
"=",
"arg_names",
"[",
"i",
"]",
",",
"arg_types",
"[",
"i",
"]",
"if",
"name",
"==",
"'dtype'",
":",
"dtype_name",
"=",
"name",
"signature",
".",
"append",
"(",
"'%s=_Null'",
"%",
"name",
")",
"elif",
"atype",
".",
"startswith",
"(",
"'NDArray'",
")",
"or",
"atype",
".",
"startswith",
"(",
"'Symbol'",
")",
":",
"assert",
"not",
"arr_name",
",",
"\"Op can only have one argument with variable \"",
"\"size and it must be the last argument.\"",
"if",
"atype",
".",
"endswith",
"(",
"'[]'",
")",
":",
"ndsignature",
".",
"append",
"(",
"'*%s'",
"%",
"name",
")",
"arr_name",
"=",
"name",
"else",
":",
"ndsignature",
".",
"append",
"(",
"'%s=None'",
"%",
"name",
")",
"ndarg_names",
".",
"append",
"(",
"name",
")",
"else",
":",
"signature",
".",
"append",
"(",
"'%s=_Null'",
"%",
"name",
")",
"kwarg_names",
".",
"append",
"(",
"name",
")",
"signature",
".",
"append",
"(",
"'out=None'",
")",
"signature",
".",
"append",
"(",
"'name=None'",
")",
"signature",
".",
"append",
"(",
"'**kwargs'",
")",
"signature",
"=",
"ndsignature",
"+",
"signature",
"code",
"=",
"[",
"]",
"if",
"arr_name",
":",
"code",
".",
"append",
"(",
"\"\"\"\ndef %s(*%s, **kwargs):\"\"\"",
"%",
"(",
"func_name",
",",
"arr_name",
")",
")",
"if",
"not",
"signature_only",
":",
"code",
".",
"append",
"(",
"\"\"\"\n ndargs = []\n for i in {}:\n assert isinstance(i, NDArrayBase), \\\\\n \"Positional arguments must have NDArray type, \" \\\\\n \"but got %s\"%str(i)\n ndargs.append(i)\"\"\"",
".",
"format",
"(",
"arr_name",
")",
")",
"if",
"dtype_name",
"is",
"not",
"None",
":",
"code",
".",
"append",
"(",
"\"\"\"\n if '%s' in kwargs:\n kwargs['%s'] = _np.dtype(kwargs['%s']).name\"\"\"",
"%",
"(",
"dtype_name",
",",
"dtype_name",
",",
"dtype_name",
")",
")",
"code",
".",
"append",
"(",
"\"\"\"\n _ = kwargs.pop('name', None)\n out = kwargs.pop('out', None)\n keys = list(kwargs.keys())\n vals = list(kwargs.values())\"\"\"",
")",
"else",
":",
"code",
".",
"append",
"(",
"\"\"\"\ndef %s(%s):\"\"\"",
"%",
"(",
"func_name",
",",
"', '",
".",
"join",
"(",
"signature",
")",
")",
")",
"if",
"not",
"signature_only",
":",
"code",
".",
"append",
"(",
"\"\"\"\n ndargs = []\n keys = list(kwargs.keys())\n vals = list(kwargs.values())\"\"\"",
")",
"# NDArray args",
"for",
"name",
"in",
"ndarg_names",
":",
"# pylint: disable=redefined-argument-from-local",
"code",
".",
"append",
"(",
"\"\"\"\n if {name} is not None:\n assert isinstance({name}, NDArrayBase), \\\\\n \"Argument {name} must have NDArray type, but got %s\"%str({name})\n ndargs.append({name})\"\"\"",
".",
"format",
"(",
"name",
"=",
"name",
")",
")",
"# kwargs",
"for",
"name",
"in",
"kwarg_names",
":",
"# pylint: disable=redefined-argument-from-local",
"code",
".",
"append",
"(",
"\"\"\"\n if %s is not _Null:\n keys.append('%s')\n vals.append(%s)\"\"\"",
"%",
"(",
"name",
",",
"name",
",",
"name",
")",
")",
"# dtype",
"if",
"dtype_name",
"is",
"not",
"None",
":",
"code",
".",
"append",
"(",
"\"\"\"\n if %s is not _Null:\n keys.append('%s')\n vals.append(_np.dtype(%s).name)\"\"\"",
"%",
"(",
"dtype_name",
",",
"dtype_name",
",",
"dtype_name",
")",
")",
"if",
"not",
"signature_only",
":",
"code",
".",
"append",
"(",
"\"\"\"\n return _imperative_invoke(%d, ndargs, keys, vals, out)\"\"\"",
"%",
"(",
"handle",
".",
"value",
")",
")",
"else",
":",
"code",
".",
"append",
"(",
"\"\"\"\n return (0,)\"\"\"",
")",
"doc_str_lines",
"=",
"_os",
".",
"linesep",
"+",
"''",
".",
"join",
"(",
"[",
"' '",
"+",
"s",
"if",
"s",
".",
"strip",
"(",
")",
"else",
"s",
"for",
"s",
"in",
"'r\"\"\"{doc_str}\"\"\"'",
".",
"format",
"(",
"doc_str",
"=",
"doc_str",
")",
".",
"splitlines",
"(",
"True",
")",
"]",
")",
"code",
".",
"insert",
"(",
"1",
",",
"doc_str_lines",
")",
"return",
"''",
".",
"join",
"(",
"code",
")",
",",
"doc_str"
] |
Generate function for ndarray op by handle and function name.
|
[
"Generate",
"function",
"for",
"ndarray",
"op",
"by",
"handle",
"and",
"function",
"name",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/register.py#L31-L154
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/register.py
|
_make_ndarray_function
|
def _make_ndarray_function(handle, name, func_name):
"""Create a NDArray function from the FunctionHandle."""
code, doc_str = _generate_ndarray_function_code(handle, name, func_name)
local = {}
exec(code, None, local) # pylint: disable=exec-used
ndarray_function = local[func_name]
ndarray_function.__name__ = func_name
ndarray_function.__doc__ = doc_str
ndarray_function.__module__ = 'mxnet.ndarray'
return ndarray_function
|
python
|
def _make_ndarray_function(handle, name, func_name):
"""Create a NDArray function from the FunctionHandle."""
code, doc_str = _generate_ndarray_function_code(handle, name, func_name)
local = {}
exec(code, None, local) # pylint: disable=exec-used
ndarray_function = local[func_name]
ndarray_function.__name__ = func_name
ndarray_function.__doc__ = doc_str
ndarray_function.__module__ = 'mxnet.ndarray'
return ndarray_function
|
[
"def",
"_make_ndarray_function",
"(",
"handle",
",",
"name",
",",
"func_name",
")",
":",
"code",
",",
"doc_str",
"=",
"_generate_ndarray_function_code",
"(",
"handle",
",",
"name",
",",
"func_name",
")",
"local",
"=",
"{",
"}",
"exec",
"(",
"code",
",",
"None",
",",
"local",
")",
"# pylint: disable=exec-used",
"ndarray_function",
"=",
"local",
"[",
"func_name",
"]",
"ndarray_function",
".",
"__name__",
"=",
"func_name",
"ndarray_function",
".",
"__doc__",
"=",
"doc_str",
"ndarray_function",
".",
"__module__",
"=",
"'mxnet.ndarray'",
"return",
"ndarray_function"
] |
Create a NDArray function from the FunctionHandle.
|
[
"Create",
"a",
"NDArray",
"function",
"from",
"the",
"FunctionHandle",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/register.py#L158-L168
|
train
|
apache/incubator-mxnet
|
python/mxnet/contrib/text/utils.py
|
count_tokens_from_str
|
def count_tokens_from_str(source_str, token_delim=' ', seq_delim='\n',
to_lower=False, counter_to_update=None):
"""Counts tokens in the specified string.
For token_delim=\'<td>\' and seq_delim=\'<sd>\', a specified string of two sequences of
tokens may look like::
<td>token1<td>token2<td>token3<td><sd><td>token4<td>token5<td><sd>
<td> and <sd> are regular expressions. Make use of \\\\ to allow special characters as
delimiters. The list of
special characters can be found at https://docs.python.org/3/library/re.html.
Parameters
----------
source_str : str
A source string of tokens.
token_delim : str, default ' '
A token delimiter.
seq_delim : str, default '\\\\n'
A sequence delimiter.
to_lower : bool, default False
Whether to convert the source source_str to the lower case.
counter_to_update : collections.Counter or None, default None
The collections.Counter instance to be updated with the token counts of `source_str`. If
None, return a new collections.Counter instance counting tokens from `source_str`.
Returns
-------
collections.Counter
The `counter_to_update` collections.Counter instance after being updated with the token
counts of `source_str`. If `counter_to_update` is None, return a new collections.Counter
instance counting tokens from `source_str`.
Examples
--------
>>> source_str = ' Life is great ! \\n life is good . \\n'
>>> count_tokens_from_str(token_line, ' ', '\\n', True)
Counter({'!': 1, '.': 1, 'good': 1, 'great': 1, 'is': 2, 'life': 2})
>>> source_str = '*Life*is*great*!*\\n*life*is*good*.*\\n'
>>> count_tokens_from_str(token_line, '\\*', '\\n', True)
Counter({'is': 2, 'life': 2, '!': 1, 'great': 1, 'good': 1, '.': 1})
"""
source_str = filter(None,
re.split(token_delim + '|' + seq_delim, source_str))
if to_lower:
source_str = [t.lower() for t in source_str]
if counter_to_update is None:
return collections.Counter(source_str)
else:
counter_to_update.update(source_str)
return counter_to_update
|
python
|
def count_tokens_from_str(source_str, token_delim=' ', seq_delim='\n',
to_lower=False, counter_to_update=None):
"""Counts tokens in the specified string.
For token_delim=\'<td>\' and seq_delim=\'<sd>\', a specified string of two sequences of
tokens may look like::
<td>token1<td>token2<td>token3<td><sd><td>token4<td>token5<td><sd>
<td> and <sd> are regular expressions. Make use of \\\\ to allow special characters as
delimiters. The list of
special characters can be found at https://docs.python.org/3/library/re.html.
Parameters
----------
source_str : str
A source string of tokens.
token_delim : str, default ' '
A token delimiter.
seq_delim : str, default '\\\\n'
A sequence delimiter.
to_lower : bool, default False
Whether to convert the source source_str to the lower case.
counter_to_update : collections.Counter or None, default None
The collections.Counter instance to be updated with the token counts of `source_str`. If
None, return a new collections.Counter instance counting tokens from `source_str`.
Returns
-------
collections.Counter
The `counter_to_update` collections.Counter instance after being updated with the token
counts of `source_str`. If `counter_to_update` is None, return a new collections.Counter
instance counting tokens from `source_str`.
Examples
--------
>>> source_str = ' Life is great ! \\n life is good . \\n'
>>> count_tokens_from_str(token_line, ' ', '\\n', True)
Counter({'!': 1, '.': 1, 'good': 1, 'great': 1, 'is': 2, 'life': 2})
>>> source_str = '*Life*is*great*!*\\n*life*is*good*.*\\n'
>>> count_tokens_from_str(token_line, '\\*', '\\n', True)
Counter({'is': 2, 'life': 2, '!': 1, 'great': 1, 'good': 1, '.': 1})
"""
source_str = filter(None,
re.split(token_delim + '|' + seq_delim, source_str))
if to_lower:
source_str = [t.lower() for t in source_str]
if counter_to_update is None:
return collections.Counter(source_str)
else:
counter_to_update.update(source_str)
return counter_to_update
|
[
"def",
"count_tokens_from_str",
"(",
"source_str",
",",
"token_delim",
"=",
"' '",
",",
"seq_delim",
"=",
"'\\n'",
",",
"to_lower",
"=",
"False",
",",
"counter_to_update",
"=",
"None",
")",
":",
"source_str",
"=",
"filter",
"(",
"None",
",",
"re",
".",
"split",
"(",
"token_delim",
"+",
"'|'",
"+",
"seq_delim",
",",
"source_str",
")",
")",
"if",
"to_lower",
":",
"source_str",
"=",
"[",
"t",
".",
"lower",
"(",
")",
"for",
"t",
"in",
"source_str",
"]",
"if",
"counter_to_update",
"is",
"None",
":",
"return",
"collections",
".",
"Counter",
"(",
"source_str",
")",
"else",
":",
"counter_to_update",
".",
"update",
"(",
"source_str",
")",
"return",
"counter_to_update"
] |
Counts tokens in the specified string.
For token_delim=\'<td>\' and seq_delim=\'<sd>\', a specified string of two sequences of
tokens may look like::
<td>token1<td>token2<td>token3<td><sd><td>token4<td>token5<td><sd>
<td> and <sd> are regular expressions. Make use of \\\\ to allow special characters as
delimiters. The list of
special characters can be found at https://docs.python.org/3/library/re.html.
Parameters
----------
source_str : str
A source string of tokens.
token_delim : str, default ' '
A token delimiter.
seq_delim : str, default '\\\\n'
A sequence delimiter.
to_lower : bool, default False
Whether to convert the source source_str to the lower case.
counter_to_update : collections.Counter or None, default None
The collections.Counter instance to be updated with the token counts of `source_str`. If
None, return a new collections.Counter instance counting tokens from `source_str`.
Returns
-------
collections.Counter
The `counter_to_update` collections.Counter instance after being updated with the token
counts of `source_str`. If `counter_to_update` is None, return a new collections.Counter
instance counting tokens from `source_str`.
Examples
--------
>>> source_str = ' Life is great ! \\n life is good . \\n'
>>> count_tokens_from_str(token_line, ' ', '\\n', True)
Counter({'!': 1, '.': 1, 'good': 1, 'great': 1, 'is': 2, 'life': 2})
>>> source_str = '*Life*is*great*!*\\n*life*is*good*.*\\n'
>>> count_tokens_from_str(token_line, '\\*', '\\n', True)
Counter({'is': 2, 'life': 2, '!': 1, 'great': 1, 'good': 1, '.': 1})
|
[
"Counts",
"tokens",
"in",
"the",
"specified",
"string",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/text/utils.py#L28-L85
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/utils.py
|
zeros
|
def zeros(shape, ctx=None, dtype=None, stype=None, **kwargs):
"""Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or tuple of int
The shape of the empty array
ctx : Context, optional
An optional device context (default is the current default context)
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`)
stype: string, optional
The storage type of the empty array, such as 'row_sparse', 'csr', etc.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
A created array
Examples
--------
>>> mx.nd.zeros((1,2), mx.cpu(), stype='csr')
<CSRNDArray 1x2 @cpu(0)>
>>> mx.nd.zeros((1,2), mx.cpu(), 'float16', stype='row_sparse').asnumpy()
array([[ 0., 0.]], dtype=float16)
"""
if stype is None or stype == 'default':
return _zeros_ndarray(shape, ctx, dtype, **kwargs)
else:
return _zeros_sparse_ndarray(stype, shape, ctx, dtype, **kwargs)
|
python
|
def zeros(shape, ctx=None, dtype=None, stype=None, **kwargs):
"""Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or tuple of int
The shape of the empty array
ctx : Context, optional
An optional device context (default is the current default context)
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`)
stype: string, optional
The storage type of the empty array, such as 'row_sparse', 'csr', etc.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
A created array
Examples
--------
>>> mx.nd.zeros((1,2), mx.cpu(), stype='csr')
<CSRNDArray 1x2 @cpu(0)>
>>> mx.nd.zeros((1,2), mx.cpu(), 'float16', stype='row_sparse').asnumpy()
array([[ 0., 0.]], dtype=float16)
"""
if stype is None or stype == 'default':
return _zeros_ndarray(shape, ctx, dtype, **kwargs)
else:
return _zeros_sparse_ndarray(stype, shape, ctx, dtype, **kwargs)
|
[
"def",
"zeros",
"(",
"shape",
",",
"ctx",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"stype",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"stype",
"is",
"None",
"or",
"stype",
"==",
"'default'",
":",
"return",
"_zeros_ndarray",
"(",
"shape",
",",
"ctx",
",",
"dtype",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"_zeros_sparse_ndarray",
"(",
"stype",
",",
"shape",
",",
"ctx",
",",
"dtype",
",",
"*",
"*",
"kwargs",
")"
] |
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or tuple of int
The shape of the empty array
ctx : Context, optional
An optional device context (default is the current default context)
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`)
stype: string, optional
The storage type of the empty array, such as 'row_sparse', 'csr', etc.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
A created array
Examples
--------
>>> mx.nd.zeros((1,2), mx.cpu(), stype='csr')
<CSRNDArray 1x2 @cpu(0)>
>>> mx.nd.zeros((1,2), mx.cpu(), 'float16', stype='row_sparse').asnumpy()
array([[ 0., 0.]], dtype=float16)
|
[
"Return",
"a",
"new",
"array",
"of",
"given",
"shape",
"and",
"type",
"filled",
"with",
"zeros",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/utils.py#L40-L69
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/utils.py
|
empty
|
def empty(shape, ctx=None, dtype=None, stype=None):
"""Returns a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
stype : str, optional
An optional storage type (default is `default`).
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
A created array.
Examples
--------
>>> mx.nd.empty(1)
<NDArray 1 @cpu(0)>
>>> mx.nd.empty((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.empty((1,2), mx.gpu(0), 'float16')
<NDArray 1x2 @gpu(0)>
>>> mx.nd.empty((1,2), stype='csr')
<CSRNDArray 1x2 @cpu(0)>
"""
if stype is None or stype == 'default':
return _empty_ndarray(shape, ctx, dtype)
else:
return _empty_sparse_ndarray(stype, shape, ctx, dtype)
|
python
|
def empty(shape, ctx=None, dtype=None, stype=None):
"""Returns a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
stype : str, optional
An optional storage type (default is `default`).
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
A created array.
Examples
--------
>>> mx.nd.empty(1)
<NDArray 1 @cpu(0)>
>>> mx.nd.empty((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.empty((1,2), mx.gpu(0), 'float16')
<NDArray 1x2 @gpu(0)>
>>> mx.nd.empty((1,2), stype='csr')
<CSRNDArray 1x2 @cpu(0)>
"""
if stype is None or stype == 'default':
return _empty_ndarray(shape, ctx, dtype)
else:
return _empty_sparse_ndarray(stype, shape, ctx, dtype)
|
[
"def",
"empty",
"(",
"shape",
",",
"ctx",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"stype",
"=",
"None",
")",
":",
"if",
"stype",
"is",
"None",
"or",
"stype",
"==",
"'default'",
":",
"return",
"_empty_ndarray",
"(",
"shape",
",",
"ctx",
",",
"dtype",
")",
"else",
":",
"return",
"_empty_sparse_ndarray",
"(",
"stype",
",",
"shape",
",",
"ctx",
",",
"dtype",
")"
] |
Returns a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
stype : str, optional
An optional storage type (default is `default`).
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
A created array.
Examples
--------
>>> mx.nd.empty(1)
<NDArray 1 @cpu(0)>
>>> mx.nd.empty((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.empty((1,2), mx.gpu(0), 'float16')
<NDArray 1x2 @gpu(0)>
>>> mx.nd.empty((1,2), stype='csr')
<CSRNDArray 1x2 @cpu(0)>
|
[
"Returns",
"a",
"new",
"array",
"of",
"given",
"shape",
"and",
"type",
"without",
"initializing",
"entries",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/utils.py#L72-L105
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/utils.py
|
array
|
def array(source_array, ctx=None, dtype=None):
"""Creates an array from any object exposing the array interface.
Parameters
----------
source_array : array_like
An object exposing the array interface, an object whose `__array__`
method returns an array, or any (nested) sequence.
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `float32` otherwise.
Returns
-------
NDArray, RowSparseNDArray or CSRNDArray
An array with the same contents as the `source_array`.
Examples
--------
>>> import numpy as np
>>> mx.nd.array([1, 2, 3])
<NDArray 3 @cpu(0)>
>>> mx.nd.array([[1, 2], [3, 4]])
<NDArray 2x2 @cpu(0)>
>>> mx.nd.array(np.zeros((3, 2)))
<NDArray 3x2 @cpu(0)>
>>> mx.nd.array(np.zeros((3, 2)), mx.gpu(0))
<NDArray 3x2 @gpu(0)>
>>> mx.nd.array(mx.nd.zeros((3, 2), stype='row_sparse'))
<RowSparseNDArray 3x2 @cpu(0)>
"""
if spsp is not None and isinstance(source_array, spsp.csr.csr_matrix):
return _sparse_array(source_array, ctx=ctx, dtype=dtype)
elif isinstance(source_array, NDArray) and source_array.stype != 'default':
return _sparse_array(source_array, ctx=ctx, dtype=dtype)
else:
return _array(source_array, ctx=ctx, dtype=dtype)
|
python
|
def array(source_array, ctx=None, dtype=None):
"""Creates an array from any object exposing the array interface.
Parameters
----------
source_array : array_like
An object exposing the array interface, an object whose `__array__`
method returns an array, or any (nested) sequence.
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `float32` otherwise.
Returns
-------
NDArray, RowSparseNDArray or CSRNDArray
An array with the same contents as the `source_array`.
Examples
--------
>>> import numpy as np
>>> mx.nd.array([1, 2, 3])
<NDArray 3 @cpu(0)>
>>> mx.nd.array([[1, 2], [3, 4]])
<NDArray 2x2 @cpu(0)>
>>> mx.nd.array(np.zeros((3, 2)))
<NDArray 3x2 @cpu(0)>
>>> mx.nd.array(np.zeros((3, 2)), mx.gpu(0))
<NDArray 3x2 @gpu(0)>
>>> mx.nd.array(mx.nd.zeros((3, 2), stype='row_sparse'))
<RowSparseNDArray 3x2 @cpu(0)>
"""
if spsp is not None and isinstance(source_array, spsp.csr.csr_matrix):
return _sparse_array(source_array, ctx=ctx, dtype=dtype)
elif isinstance(source_array, NDArray) and source_array.stype != 'default':
return _sparse_array(source_array, ctx=ctx, dtype=dtype)
else:
return _array(source_array, ctx=ctx, dtype=dtype)
|
[
"def",
"array",
"(",
"source_array",
",",
"ctx",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"if",
"spsp",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"source_array",
",",
"spsp",
".",
"csr",
".",
"csr_matrix",
")",
":",
"return",
"_sparse_array",
"(",
"source_array",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
")",
"elif",
"isinstance",
"(",
"source_array",
",",
"NDArray",
")",
"and",
"source_array",
".",
"stype",
"!=",
"'default'",
":",
"return",
"_sparse_array",
"(",
"source_array",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
")",
"else",
":",
"return",
"_array",
"(",
"source_array",
",",
"ctx",
"=",
"ctx",
",",
"dtype",
"=",
"dtype",
")"
] |
Creates an array from any object exposing the array interface.
Parameters
----------
source_array : array_like
An object exposing the array interface, an object whose `__array__`
method returns an array, or any (nested) sequence.
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `float32` otherwise.
Returns
-------
NDArray, RowSparseNDArray or CSRNDArray
An array with the same contents as the `source_array`.
Examples
--------
>>> import numpy as np
>>> mx.nd.array([1, 2, 3])
<NDArray 3 @cpu(0)>
>>> mx.nd.array([[1, 2], [3, 4]])
<NDArray 2x2 @cpu(0)>
>>> mx.nd.array(np.zeros((3, 2)))
<NDArray 3x2 @cpu(0)>
>>> mx.nd.array(np.zeros((3, 2)), mx.gpu(0))
<NDArray 3x2 @gpu(0)>
>>> mx.nd.array(mx.nd.zeros((3, 2), stype='row_sparse'))
<RowSparseNDArray 3x2 @cpu(0)>
|
[
"Creates",
"an",
"array",
"from",
"any",
"object",
"exposing",
"the",
"array",
"interface",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/utils.py#L108-L146
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/utils.py
|
load
|
def load(fname):
"""Loads an array from file.
See more details in ``save``.
Parameters
----------
fname : str
The filename.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data.
"""
if not isinstance(fname, string_types):
raise TypeError('fname required to be a string')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoad(c_str(fname),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i])))
for i in range(out_size.value))
|
python
|
def load(fname):
"""Loads an array from file.
See more details in ``save``.
Parameters
----------
fname : str
The filename.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data.
"""
if not isinstance(fname, string_types):
raise TypeError('fname required to be a string')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoad(c_str(fname),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i])))
for i in range(out_size.value))
|
[
"def",
"load",
"(",
"fname",
")",
":",
"if",
"not",
"isinstance",
"(",
"fname",
",",
"string_types",
")",
":",
"raise",
"TypeError",
"(",
"'fname required to be a string'",
")",
"out_size",
"=",
"mx_uint",
"(",
")",
"out_name_size",
"=",
"mx_uint",
"(",
")",
"handles",
"=",
"ctypes",
".",
"POINTER",
"(",
"NDArrayHandle",
")",
"(",
")",
"names",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayLoad",
"(",
"c_str",
"(",
"fname",
")",
",",
"ctypes",
".",
"byref",
"(",
"out_size",
")",
",",
"ctypes",
".",
"byref",
"(",
"handles",
")",
",",
"ctypes",
".",
"byref",
"(",
"out_name_size",
")",
",",
"ctypes",
".",
"byref",
"(",
"names",
")",
")",
")",
"if",
"out_name_size",
".",
"value",
"==",
"0",
":",
"return",
"[",
"_ndarray_cls",
"(",
"NDArrayHandle",
"(",
"handles",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"out_size",
".",
"value",
")",
"]",
"else",
":",
"assert",
"out_name_size",
".",
"value",
"==",
"out_size",
".",
"value",
"return",
"dict",
"(",
"(",
"py_str",
"(",
"names",
"[",
"i",
"]",
")",
",",
"_ndarray_cls",
"(",
"NDArrayHandle",
"(",
"handles",
"[",
"i",
"]",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"out_size",
".",
"value",
")",
")"
] |
Loads an array from file.
See more details in ``save``.
Parameters
----------
fname : str
The filename.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data.
|
[
"Loads",
"an",
"array",
"from",
"file",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/utils.py#L149-L182
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/utils.py
|
load_frombuffer
|
def load_frombuffer(buf):
"""Loads an array dictionary or list from a buffer
See more details in ``save``.
Parameters
----------
buf : str
Buffer containing contents of a file as a string or bytes.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data.
"""
if not isinstance(buf, string_types + tuple([bytes])):
raise TypeError('buf required to be a string or bytes')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoadFromBuffer(buf,
mx_uint(len(buf)),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i])))
for i in range(out_size.value))
|
python
|
def load_frombuffer(buf):
"""Loads an array dictionary or list from a buffer
See more details in ``save``.
Parameters
----------
buf : str
Buffer containing contents of a file as a string or bytes.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data.
"""
if not isinstance(buf, string_types + tuple([bytes])):
raise TypeError('buf required to be a string or bytes')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoadFromBuffer(buf,
mx_uint(len(buf)),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i])))
for i in range(out_size.value))
|
[
"def",
"load_frombuffer",
"(",
"buf",
")",
":",
"if",
"not",
"isinstance",
"(",
"buf",
",",
"string_types",
"+",
"tuple",
"(",
"[",
"bytes",
"]",
")",
")",
":",
"raise",
"TypeError",
"(",
"'buf required to be a string or bytes'",
")",
"out_size",
"=",
"mx_uint",
"(",
")",
"out_name_size",
"=",
"mx_uint",
"(",
")",
"handles",
"=",
"ctypes",
".",
"POINTER",
"(",
"NDArrayHandle",
")",
"(",
")",
"names",
"=",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_char_p",
")",
"(",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArrayLoadFromBuffer",
"(",
"buf",
",",
"mx_uint",
"(",
"len",
"(",
"buf",
")",
")",
",",
"ctypes",
".",
"byref",
"(",
"out_size",
")",
",",
"ctypes",
".",
"byref",
"(",
"handles",
")",
",",
"ctypes",
".",
"byref",
"(",
"out_name_size",
")",
",",
"ctypes",
".",
"byref",
"(",
"names",
")",
")",
")",
"if",
"out_name_size",
".",
"value",
"==",
"0",
":",
"return",
"[",
"_ndarray_cls",
"(",
"NDArrayHandle",
"(",
"handles",
"[",
"i",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"out_size",
".",
"value",
")",
"]",
"else",
":",
"assert",
"out_name_size",
".",
"value",
"==",
"out_size",
".",
"value",
"return",
"dict",
"(",
"(",
"py_str",
"(",
"names",
"[",
"i",
"]",
")",
",",
"_ndarray_cls",
"(",
"NDArrayHandle",
"(",
"handles",
"[",
"i",
"]",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"out_size",
".",
"value",
")",
")"
] |
Loads an array dictionary or list from a buffer
See more details in ``save``.
Parameters
----------
buf : str
Buffer containing contents of a file as a string or bytes.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data.
|
[
"Loads",
"an",
"array",
"dictionary",
"or",
"list",
"from",
"a",
"buffer"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/utils.py#L185-L219
|
train
|
apache/incubator-mxnet
|
python/mxnet/ndarray/utils.py
|
save
|
def save(fname, data):
"""Saves a list of arrays or a dict of str->array to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
fname : str
The filename.
data : NDArray, RowSparseNDArray or CSRNDArray, \
or list of NDArray, RowSparseNDArray or CSRNDArray, \
or dict of str to NDArray, RowSparseNDArray or CSRNDArray
The data to save.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> y = mx.nd.ones((1,4))
>>> mx.nd.save('my_list', [x,y])
>>> mx.nd.save('my_dict', {'x':x, 'y':y})
>>> mx.nd.load('my_list')
[<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>]
>>> mx.nd.load('my_dict')
{'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>}
"""
if isinstance(data, NDArray):
data = [data]
handles = c_array(NDArrayHandle, [])
if isinstance(data, dict):
str_keys = data.keys()
nd_vals = data.values()
if any(not isinstance(k, string_types) for k in str_keys) or \
any(not isinstance(v, NDArray) for v in nd_vals):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys = c_str_array(str_keys)
handles = c_handle_array(nd_vals)
elif isinstance(data, list):
if any(not isinstance(v, NDArray) for v in data):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys = None
handles = c_handle_array(data)
else:
raise ValueError("data needs to either be a NDArray, dict of str, NDArray pairs "
"or a list of NDarrays.")
check_call(_LIB.MXNDArraySave(c_str(fname),
mx_uint(len(handles)),
handles,
keys))
|
python
|
def save(fname, data):
"""Saves a list of arrays or a dict of str->array to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
fname : str
The filename.
data : NDArray, RowSparseNDArray or CSRNDArray, \
or list of NDArray, RowSparseNDArray or CSRNDArray, \
or dict of str to NDArray, RowSparseNDArray or CSRNDArray
The data to save.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> y = mx.nd.ones((1,4))
>>> mx.nd.save('my_list', [x,y])
>>> mx.nd.save('my_dict', {'x':x, 'y':y})
>>> mx.nd.load('my_list')
[<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>]
>>> mx.nd.load('my_dict')
{'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>}
"""
if isinstance(data, NDArray):
data = [data]
handles = c_array(NDArrayHandle, [])
if isinstance(data, dict):
str_keys = data.keys()
nd_vals = data.values()
if any(not isinstance(k, string_types) for k in str_keys) or \
any(not isinstance(v, NDArray) for v in nd_vals):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys = c_str_array(str_keys)
handles = c_handle_array(nd_vals)
elif isinstance(data, list):
if any(not isinstance(v, NDArray) for v in data):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys = None
handles = c_handle_array(data)
else:
raise ValueError("data needs to either be a NDArray, dict of str, NDArray pairs "
"or a list of NDarrays.")
check_call(_LIB.MXNDArraySave(c_str(fname),
mx_uint(len(handles)),
handles,
keys))
|
[
"def",
"save",
"(",
"fname",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"NDArray",
")",
":",
"data",
"=",
"[",
"data",
"]",
"handles",
"=",
"c_array",
"(",
"NDArrayHandle",
",",
"[",
"]",
")",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"str_keys",
"=",
"data",
".",
"keys",
"(",
")",
"nd_vals",
"=",
"data",
".",
"values",
"(",
")",
"if",
"any",
"(",
"not",
"isinstance",
"(",
"k",
",",
"string_types",
")",
"for",
"k",
"in",
"str_keys",
")",
"or",
"any",
"(",
"not",
"isinstance",
"(",
"v",
",",
"NDArray",
")",
"for",
"v",
"in",
"nd_vals",
")",
":",
"raise",
"TypeError",
"(",
"'save only accept dict str->NDArray or list of NDArray'",
")",
"keys",
"=",
"c_str_array",
"(",
"str_keys",
")",
"handles",
"=",
"c_handle_array",
"(",
"nd_vals",
")",
"elif",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"if",
"any",
"(",
"not",
"isinstance",
"(",
"v",
",",
"NDArray",
")",
"for",
"v",
"in",
"data",
")",
":",
"raise",
"TypeError",
"(",
"'save only accept dict str->NDArray or list of NDArray'",
")",
"keys",
"=",
"None",
"handles",
"=",
"c_handle_array",
"(",
"data",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"data needs to either be a NDArray, dict of str, NDArray pairs \"",
"\"or a list of NDarrays.\"",
")",
"check_call",
"(",
"_LIB",
".",
"MXNDArraySave",
"(",
"c_str",
"(",
"fname",
")",
",",
"mx_uint",
"(",
"len",
"(",
"handles",
")",
")",
",",
"handles",
",",
"keys",
")",
")"
] |
Saves a list of arrays or a dict of str->array to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
fname : str
The filename.
data : NDArray, RowSparseNDArray or CSRNDArray, \
or list of NDArray, RowSparseNDArray or CSRNDArray, \
or dict of str to NDArray, RowSparseNDArray or CSRNDArray
The data to save.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> y = mx.nd.ones((1,4))
>>> mx.nd.save('my_list', [x,y])
>>> mx.nd.save('my_dict', {'x':x, 'y':y})
>>> mx.nd.load('my_list')
[<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>]
>>> mx.nd.load('my_dict')
{'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>}
|
[
"Saves",
"a",
"list",
"of",
"arrays",
"or",
"a",
"dict",
"of",
"str",
"-",
">",
"array",
"to",
"file",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/utils.py#L222-L273
|
train
|
apache/incubator-mxnet
|
python/mxnet/gluon/block.py
|
_common_prefix
|
def _common_prefix(names):
"""Get the common prefix for all names"""
if not names:
return ''
prefix = names[0]
for name in names:
i = 0
while i < len(prefix) and i < len(name) and prefix[i] == name[i]:
i += 1
prefix = prefix[:i]
return prefix
|
python
|
def _common_prefix(names):
"""Get the common prefix for all names"""
if not names:
return ''
prefix = names[0]
for name in names:
i = 0
while i < len(prefix) and i < len(name) and prefix[i] == name[i]:
i += 1
prefix = prefix[:i]
return prefix
|
[
"def",
"_common_prefix",
"(",
"names",
")",
":",
"if",
"not",
"names",
":",
"return",
"''",
"prefix",
"=",
"names",
"[",
"0",
"]",
"for",
"name",
"in",
"names",
":",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"prefix",
")",
"and",
"i",
"<",
"len",
"(",
"name",
")",
"and",
"prefix",
"[",
"i",
"]",
"==",
"name",
"[",
"i",
"]",
":",
"i",
"+=",
"1",
"prefix",
"=",
"prefix",
"[",
":",
"i",
"]",
"return",
"prefix"
] |
Get the common prefix for all names
|
[
"Get",
"the",
"common",
"prefix",
"for",
"all",
"names"
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L939-L949
|
train
|
apache/incubator-mxnet
|
python/mxnet/gluon/block.py
|
_infer_param_types
|
def _infer_param_types(in_params, out_params, arg_params, aux_params, default_dtype=mx_real_t):
"""Utility function that helps in inferring DType of args and auxs params
from given input param.
Parameters
----------
in_params: List of Symbol
List of input symbol variables.
out_params: Symbol
Output symbol variable.
arg_params: List of Str
List of names of argument parametrs.
aux_params: List of Str
List of names of auxiliary parameters.
default_dtype: numpy.dtype or str, default 'float32'
Default data type for arg_params and aux_params, if unable to infer the type.
Returns
-------
arg_types: List of numpy.dtype
List of arg_params type. Order is same as arg_params.
Defaults to 'float32', if unable to infer type.
aux_types: List of numpy.dtype
List of aux_params type. Order is same as aux_params.
Defaults to 'float32', if unable to infer type.
"""
arg_types = None
aux_types = None
# Get Input symbol details. This will be used to infer types of
# other parameters.
input_sym_names = [in_param.name for in_param in in_params]
# Try to infer input types. If not successful, we will set default dtype.
# If successful, we will try to infer other params in the graph.
input_sym_arg_types = []
can_infer_input_type = True
for in_param in in_params:
input_sym_arg_type = in_param.infer_type()[0]
if not input_sym_arg_type or len(input_sym_arg_type) < 1:
can_infer_input_type = False
break
else:
input_sym_arg_types.append(in_param.infer_type()[0][0])
# Try to infer types of other parameters.
if can_infer_input_type:
params = {k:v for k, v in zip(input_sym_names, input_sym_arg_types)}
arg_types, _, aux_types = out_params.infer_type(**params)
if arg_types is None or len(arg_types) != len(arg_params):
arg_types = []
for _ in arg_params:
arg_types.append(default_dtype)
if aux_types is None or len(aux_types) != len(aux_params):
aux_types = []
for _ in aux_params:
aux_types.append(default_dtype)
return (arg_types, aux_types)
|
python
|
def _infer_param_types(in_params, out_params, arg_params, aux_params, default_dtype=mx_real_t):
"""Utility function that helps in inferring DType of args and auxs params
from given input param.
Parameters
----------
in_params: List of Symbol
List of input symbol variables.
out_params: Symbol
Output symbol variable.
arg_params: List of Str
List of names of argument parametrs.
aux_params: List of Str
List of names of auxiliary parameters.
default_dtype: numpy.dtype or str, default 'float32'
Default data type for arg_params and aux_params, if unable to infer the type.
Returns
-------
arg_types: List of numpy.dtype
List of arg_params type. Order is same as arg_params.
Defaults to 'float32', if unable to infer type.
aux_types: List of numpy.dtype
List of aux_params type. Order is same as aux_params.
Defaults to 'float32', if unable to infer type.
"""
arg_types = None
aux_types = None
# Get Input symbol details. This will be used to infer types of
# other parameters.
input_sym_names = [in_param.name for in_param in in_params]
# Try to infer input types. If not successful, we will set default dtype.
# If successful, we will try to infer other params in the graph.
input_sym_arg_types = []
can_infer_input_type = True
for in_param in in_params:
input_sym_arg_type = in_param.infer_type()[0]
if not input_sym_arg_type or len(input_sym_arg_type) < 1:
can_infer_input_type = False
break
else:
input_sym_arg_types.append(in_param.infer_type()[0][0])
# Try to infer types of other parameters.
if can_infer_input_type:
params = {k:v for k, v in zip(input_sym_names, input_sym_arg_types)}
arg_types, _, aux_types = out_params.infer_type(**params)
if arg_types is None or len(arg_types) != len(arg_params):
arg_types = []
for _ in arg_params:
arg_types.append(default_dtype)
if aux_types is None or len(aux_types) != len(aux_params):
aux_types = []
for _ in aux_params:
aux_types.append(default_dtype)
return (arg_types, aux_types)
|
[
"def",
"_infer_param_types",
"(",
"in_params",
",",
"out_params",
",",
"arg_params",
",",
"aux_params",
",",
"default_dtype",
"=",
"mx_real_t",
")",
":",
"arg_types",
"=",
"None",
"aux_types",
"=",
"None",
"# Get Input symbol details. This will be used to infer types of",
"# other parameters.",
"input_sym_names",
"=",
"[",
"in_param",
".",
"name",
"for",
"in_param",
"in",
"in_params",
"]",
"# Try to infer input types. If not successful, we will set default dtype.",
"# If successful, we will try to infer other params in the graph.",
"input_sym_arg_types",
"=",
"[",
"]",
"can_infer_input_type",
"=",
"True",
"for",
"in_param",
"in",
"in_params",
":",
"input_sym_arg_type",
"=",
"in_param",
".",
"infer_type",
"(",
")",
"[",
"0",
"]",
"if",
"not",
"input_sym_arg_type",
"or",
"len",
"(",
"input_sym_arg_type",
")",
"<",
"1",
":",
"can_infer_input_type",
"=",
"False",
"break",
"else",
":",
"input_sym_arg_types",
".",
"append",
"(",
"in_param",
".",
"infer_type",
"(",
")",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"# Try to infer types of other parameters.",
"if",
"can_infer_input_type",
":",
"params",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"input_sym_names",
",",
"input_sym_arg_types",
")",
"}",
"arg_types",
",",
"_",
",",
"aux_types",
"=",
"out_params",
".",
"infer_type",
"(",
"*",
"*",
"params",
")",
"if",
"arg_types",
"is",
"None",
"or",
"len",
"(",
"arg_types",
")",
"!=",
"len",
"(",
"arg_params",
")",
":",
"arg_types",
"=",
"[",
"]",
"for",
"_",
"in",
"arg_params",
":",
"arg_types",
".",
"append",
"(",
"default_dtype",
")",
"if",
"aux_types",
"is",
"None",
"or",
"len",
"(",
"aux_types",
")",
"!=",
"len",
"(",
"aux_params",
")",
":",
"aux_types",
"=",
"[",
"]",
"for",
"_",
"in",
"aux_params",
":",
"aux_types",
".",
"append",
"(",
"default_dtype",
")",
"return",
"(",
"arg_types",
",",
"aux_types",
")"
] |
Utility function that helps in inferring DType of args and auxs params
from given input param.
Parameters
----------
in_params: List of Symbol
List of input symbol variables.
out_params: Symbol
Output symbol variable.
arg_params: List of Str
List of names of argument parametrs.
aux_params: List of Str
List of names of auxiliary parameters.
default_dtype: numpy.dtype or str, default 'float32'
Default data type for arg_params and aux_params, if unable to infer the type.
Returns
-------
arg_types: List of numpy.dtype
List of arg_params type. Order is same as arg_params.
Defaults to 'float32', if unable to infer type.
aux_types: List of numpy.dtype
List of aux_params type. Order is same as aux_params.
Defaults to 'float32', if unable to infer type.
|
[
"Utility",
"function",
"that",
"helps",
"in",
"inferring",
"DType",
"of",
"args",
"and",
"auxs",
"params",
"from",
"given",
"input",
"param",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L1108-L1168
|
train
|
apache/incubator-mxnet
|
python/mxnet/gluon/block.py
|
_BlockScope.create
|
def create(prefix, params, hint):
"""Creates prefix and params for new `Block`."""
current = getattr(_BlockScope._current, "value", None)
if current is None:
if prefix is None:
if not hasattr(_name.NameManager._current, "value"):
_name.NameManager._current.value = _name.NameManager()
prefix = _name.NameManager._current.value.get(None, hint) + '_'
if params is None:
params = ParameterDict(prefix)
else:
params = ParameterDict(params.prefix, params)
return prefix, params
if prefix is None:
count = current._counter.get(hint, 0)
prefix = '%s%d_'%(hint, count)
current._counter[hint] = count + 1
if params is None:
parent = current._block.params
params = ParameterDict(parent.prefix+prefix, parent._shared)
else:
params = ParameterDict(params.prefix, params)
return current._block.prefix+prefix, params
|
python
|
def create(prefix, params, hint):
"""Creates prefix and params for new `Block`."""
current = getattr(_BlockScope._current, "value", None)
if current is None:
if prefix is None:
if not hasattr(_name.NameManager._current, "value"):
_name.NameManager._current.value = _name.NameManager()
prefix = _name.NameManager._current.value.get(None, hint) + '_'
if params is None:
params = ParameterDict(prefix)
else:
params = ParameterDict(params.prefix, params)
return prefix, params
if prefix is None:
count = current._counter.get(hint, 0)
prefix = '%s%d_'%(hint, count)
current._counter[hint] = count + 1
if params is None:
parent = current._block.params
params = ParameterDict(parent.prefix+prefix, parent._shared)
else:
params = ParameterDict(params.prefix, params)
return current._block.prefix+prefix, params
|
[
"def",
"create",
"(",
"prefix",
",",
"params",
",",
"hint",
")",
":",
"current",
"=",
"getattr",
"(",
"_BlockScope",
".",
"_current",
",",
"\"value\"",
",",
"None",
")",
"if",
"current",
"is",
"None",
":",
"if",
"prefix",
"is",
"None",
":",
"if",
"not",
"hasattr",
"(",
"_name",
".",
"NameManager",
".",
"_current",
",",
"\"value\"",
")",
":",
"_name",
".",
"NameManager",
".",
"_current",
".",
"value",
"=",
"_name",
".",
"NameManager",
"(",
")",
"prefix",
"=",
"_name",
".",
"NameManager",
".",
"_current",
".",
"value",
".",
"get",
"(",
"None",
",",
"hint",
")",
"+",
"'_'",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"ParameterDict",
"(",
"prefix",
")",
"else",
":",
"params",
"=",
"ParameterDict",
"(",
"params",
".",
"prefix",
",",
"params",
")",
"return",
"prefix",
",",
"params",
"if",
"prefix",
"is",
"None",
":",
"count",
"=",
"current",
".",
"_counter",
".",
"get",
"(",
"hint",
",",
"0",
")",
"prefix",
"=",
"'%s%d_'",
"%",
"(",
"hint",
",",
"count",
")",
"current",
".",
"_counter",
"[",
"hint",
"]",
"=",
"count",
"+",
"1",
"if",
"params",
"is",
"None",
":",
"parent",
"=",
"current",
".",
"_block",
".",
"params",
"params",
"=",
"ParameterDict",
"(",
"parent",
".",
"prefix",
"+",
"prefix",
",",
"parent",
".",
"_shared",
")",
"else",
":",
"params",
"=",
"ParameterDict",
"(",
"params",
".",
"prefix",
",",
"params",
")",
"return",
"current",
".",
"_block",
".",
"prefix",
"+",
"prefix",
",",
"params"
] |
Creates prefix and params for new `Block`.
|
[
"Creates",
"prefix",
"and",
"params",
"for",
"new",
"Block",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L49-L72
|
train
|
apache/incubator-mxnet
|
python/mxnet/gluon/block.py
|
Block.collect_params
|
def collect_params(self, select=None):
"""Returns a :py:class:`ParameterDict` containing this :py:class:`Block` and all of its
children's Parameters(default), also can returns the select :py:class:`ParameterDict`
which match some given regular expressions.
For example, collect the specified parameters in ['conv1_weight', 'conv1_bias', 'fc_weight',
'fc_bias']::
model.collect_params('conv1_weight|conv1_bias|fc_weight|fc_bias')
or collect all parameters whose names end with 'weight' or 'bias', this can be done
using regular expressions::
model.collect_params('.*weight|.*bias')
Parameters
----------
select : str
regular expressions
Returns
-------
The selected :py:class:`ParameterDict`
"""
# We need to check here because blocks inside containers are not supported.
self._check_container_with_block()
ret = ParameterDict(self._params.prefix)
if not select:
ret.update(self.params)
else:
pattern = re.compile(select)
ret.update({name:value for name, value in self.params.items() if pattern.match(name)})
for cld in self._children.values():
ret.update(cld.collect_params(select=select))
return ret
|
python
|
def collect_params(self, select=None):
"""Returns a :py:class:`ParameterDict` containing this :py:class:`Block` and all of its
children's Parameters(default), also can returns the select :py:class:`ParameterDict`
which match some given regular expressions.
For example, collect the specified parameters in ['conv1_weight', 'conv1_bias', 'fc_weight',
'fc_bias']::
model.collect_params('conv1_weight|conv1_bias|fc_weight|fc_bias')
or collect all parameters whose names end with 'weight' or 'bias', this can be done
using regular expressions::
model.collect_params('.*weight|.*bias')
Parameters
----------
select : str
regular expressions
Returns
-------
The selected :py:class:`ParameterDict`
"""
# We need to check here because blocks inside containers are not supported.
self._check_container_with_block()
ret = ParameterDict(self._params.prefix)
if not select:
ret.update(self.params)
else:
pattern = re.compile(select)
ret.update({name:value for name, value in self.params.items() if pattern.match(name)})
for cld in self._children.values():
ret.update(cld.collect_params(select=select))
return ret
|
[
"def",
"collect_params",
"(",
"self",
",",
"select",
"=",
"None",
")",
":",
"# We need to check here because blocks inside containers are not supported.",
"self",
".",
"_check_container_with_block",
"(",
")",
"ret",
"=",
"ParameterDict",
"(",
"self",
".",
"_params",
".",
"prefix",
")",
"if",
"not",
"select",
":",
"ret",
".",
"update",
"(",
"self",
".",
"params",
")",
"else",
":",
"pattern",
"=",
"re",
".",
"compile",
"(",
"select",
")",
"ret",
".",
"update",
"(",
"{",
"name",
":",
"value",
"for",
"name",
",",
"value",
"in",
"self",
".",
"params",
".",
"items",
"(",
")",
"if",
"pattern",
".",
"match",
"(",
"name",
")",
"}",
")",
"for",
"cld",
"in",
"self",
".",
"_children",
".",
"values",
"(",
")",
":",
"ret",
".",
"update",
"(",
"cld",
".",
"collect_params",
"(",
"select",
"=",
"select",
")",
")",
"return",
"ret"
] |
Returns a :py:class:`ParameterDict` containing this :py:class:`Block` and all of its
children's Parameters(default), also can returns the select :py:class:`ParameterDict`
which match some given regular expressions.
For example, collect the specified parameters in ['conv1_weight', 'conv1_bias', 'fc_weight',
'fc_bias']::
model.collect_params('conv1_weight|conv1_bias|fc_weight|fc_bias')
or collect all parameters whose names end with 'weight' or 'bias', this can be done
using regular expressions::
model.collect_params('.*weight|.*bias')
Parameters
----------
select : str
regular expressions
Returns
-------
The selected :py:class:`ParameterDict`
|
[
"Returns",
"a",
":",
"py",
":",
"class",
":",
"ParameterDict",
"containing",
"this",
":",
"py",
":",
"class",
":",
"Block",
"and",
"all",
"of",
"its",
"children",
"s",
"Parameters",
"(",
"default",
")",
"also",
"can",
"returns",
"the",
"select",
":",
"py",
":",
"class",
":",
"ParameterDict",
"which",
"match",
"some",
"given",
"regular",
"expressions",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L271-L305
|
train
|
apache/incubator-mxnet
|
python/mxnet/gluon/block.py
|
Block.save_params
|
def save_params(self, filename):
"""[Deprecated] Please use save_parameters. Note that if you want load
from SymbolBlock later, please use export instead.
Save parameters to file.
filename : str
Path to file.
"""
warnings.warn("save_params is deprecated. Please use save_parameters. "
"Note that if you want load from SymbolBlock later, please "
"use export instead. For details, see "
"https://mxnet.incubator.apache.org/tutorials/gluon/save_lo"
"ad_params.html")
try:
self.collect_params().save(filename, strip_prefix=self.prefix)
except ValueError as e:
raise ValueError('%s\nsave_params is deprecated. Using ' \
'save_parameters may resolve this error.'%e.message)
|
python
|
def save_params(self, filename):
"""[Deprecated] Please use save_parameters. Note that if you want load
from SymbolBlock later, please use export instead.
Save parameters to file.
filename : str
Path to file.
"""
warnings.warn("save_params is deprecated. Please use save_parameters. "
"Note that if you want load from SymbolBlock later, please "
"use export instead. For details, see "
"https://mxnet.incubator.apache.org/tutorials/gluon/save_lo"
"ad_params.html")
try:
self.collect_params().save(filename, strip_prefix=self.prefix)
except ValueError as e:
raise ValueError('%s\nsave_params is deprecated. Using ' \
'save_parameters may resolve this error.'%e.message)
|
[
"def",
"save_params",
"(",
"self",
",",
"filename",
")",
":",
"warnings",
".",
"warn",
"(",
"\"save_params is deprecated. Please use save_parameters. \"",
"\"Note that if you want load from SymbolBlock later, please \"",
"\"use export instead. For details, see \"",
"\"https://mxnet.incubator.apache.org/tutorials/gluon/save_lo\"",
"\"ad_params.html\"",
")",
"try",
":",
"self",
".",
"collect_params",
"(",
")",
".",
"save",
"(",
"filename",
",",
"strip_prefix",
"=",
"self",
".",
"prefix",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"'%s\\nsave_params is deprecated. Using '",
"'save_parameters may resolve this error.'",
"%",
"e",
".",
"message",
")"
] |
[Deprecated] Please use save_parameters. Note that if you want load
from SymbolBlock later, please use export instead.
Save parameters to file.
filename : str
Path to file.
|
[
"[",
"Deprecated",
"]",
"Please",
"use",
"save_parameters",
".",
"Note",
"that",
"if",
"you",
"want",
"load",
"from",
"SymbolBlock",
"later",
"please",
"use",
"export",
"instead",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L336-L354
|
train
|
apache/incubator-mxnet
|
python/mxnet/gluon/block.py
|
Block.load_parameters
|
def load_parameters(self, filename, ctx=None, allow_missing=False,
ignore_extra=False):
"""Load parameters from file previously saved by `save_parameters`.
Parameters
----------
filename : str
Path to parameter file.
ctx : Context or list of Context, default cpu()
Context(s) to initialize loaded parameters on.
allow_missing : bool, default False
Whether to silently skip loading parameters not represents in the file.
ignore_extra : bool, default False
Whether to silently ignore parameters from the file that are not
present in this Block.
References
----------
`Saving and Loading Gluon Models \
<https://mxnet.incubator.apache.org/tutorials/gluon/save_load_params.html>`_
"""
loaded = ndarray.load(filename)
params = self._collect_params_with_prefix()
if not loaded and not params:
return
if not any('.' in i for i in loaded.keys()):
# legacy loading
del loaded
self.collect_params().load(
filename, ctx, allow_missing, ignore_extra, self.prefix)
return
if not allow_missing:
for name in params.keys():
assert name in loaded, \
"Parameter '%s' is missing in file '%s', which contains parameters: %s. " \
"Set allow_missing=True to ignore missing parameters."%(
name, filename, _brief_print_list(loaded.keys()))
for name in loaded:
if not ignore_extra and name not in params:
raise ValueError(
"Parameter '%s' loaded from file '%s' is not present in ParameterDict, " \
"which contains parameters %s. Set ignore_extra=True to ignore. "%(
name, filename, _brief_print_list(self._params.keys())))
if name in params:
params[name]._load_init(loaded[name], ctx)
|
python
|
def load_parameters(self, filename, ctx=None, allow_missing=False,
ignore_extra=False):
"""Load parameters from file previously saved by `save_parameters`.
Parameters
----------
filename : str
Path to parameter file.
ctx : Context or list of Context, default cpu()
Context(s) to initialize loaded parameters on.
allow_missing : bool, default False
Whether to silently skip loading parameters not represents in the file.
ignore_extra : bool, default False
Whether to silently ignore parameters from the file that are not
present in this Block.
References
----------
`Saving and Loading Gluon Models \
<https://mxnet.incubator.apache.org/tutorials/gluon/save_load_params.html>`_
"""
loaded = ndarray.load(filename)
params = self._collect_params_with_prefix()
if not loaded and not params:
return
if not any('.' in i for i in loaded.keys()):
# legacy loading
del loaded
self.collect_params().load(
filename, ctx, allow_missing, ignore_extra, self.prefix)
return
if not allow_missing:
for name in params.keys():
assert name in loaded, \
"Parameter '%s' is missing in file '%s', which contains parameters: %s. " \
"Set allow_missing=True to ignore missing parameters."%(
name, filename, _brief_print_list(loaded.keys()))
for name in loaded:
if not ignore_extra and name not in params:
raise ValueError(
"Parameter '%s' loaded from file '%s' is not present in ParameterDict, " \
"which contains parameters %s. Set ignore_extra=True to ignore. "%(
name, filename, _brief_print_list(self._params.keys())))
if name in params:
params[name]._load_init(loaded[name], ctx)
|
[
"def",
"load_parameters",
"(",
"self",
",",
"filename",
",",
"ctx",
"=",
"None",
",",
"allow_missing",
"=",
"False",
",",
"ignore_extra",
"=",
"False",
")",
":",
"loaded",
"=",
"ndarray",
".",
"load",
"(",
"filename",
")",
"params",
"=",
"self",
".",
"_collect_params_with_prefix",
"(",
")",
"if",
"not",
"loaded",
"and",
"not",
"params",
":",
"return",
"if",
"not",
"any",
"(",
"'.'",
"in",
"i",
"for",
"i",
"in",
"loaded",
".",
"keys",
"(",
")",
")",
":",
"# legacy loading",
"del",
"loaded",
"self",
".",
"collect_params",
"(",
")",
".",
"load",
"(",
"filename",
",",
"ctx",
",",
"allow_missing",
",",
"ignore_extra",
",",
"self",
".",
"prefix",
")",
"return",
"if",
"not",
"allow_missing",
":",
"for",
"name",
"in",
"params",
".",
"keys",
"(",
")",
":",
"assert",
"name",
"in",
"loaded",
",",
"\"Parameter '%s' is missing in file '%s', which contains parameters: %s. \"",
"\"Set allow_missing=True to ignore missing parameters.\"",
"%",
"(",
"name",
",",
"filename",
",",
"_brief_print_list",
"(",
"loaded",
".",
"keys",
"(",
")",
")",
")",
"for",
"name",
"in",
"loaded",
":",
"if",
"not",
"ignore_extra",
"and",
"name",
"not",
"in",
"params",
":",
"raise",
"ValueError",
"(",
"\"Parameter '%s' loaded from file '%s' is not present in ParameterDict, \"",
"\"which contains parameters %s. Set ignore_extra=True to ignore. \"",
"%",
"(",
"name",
",",
"filename",
",",
"_brief_print_list",
"(",
"self",
".",
"_params",
".",
"keys",
"(",
")",
")",
")",
")",
"if",
"name",
"in",
"params",
":",
"params",
"[",
"name",
"]",
".",
"_load_init",
"(",
"loaded",
"[",
"name",
"]",
",",
"ctx",
")"
] |
Load parameters from file previously saved by `save_parameters`.
Parameters
----------
filename : str
Path to parameter file.
ctx : Context or list of Context, default cpu()
Context(s) to initialize loaded parameters on.
allow_missing : bool, default False
Whether to silently skip loading parameters not represents in the file.
ignore_extra : bool, default False
Whether to silently ignore parameters from the file that are not
present in this Block.
References
----------
`Saving and Loading Gluon Models \
<https://mxnet.incubator.apache.org/tutorials/gluon/save_load_params.html>`_
|
[
"Load",
"parameters",
"from",
"file",
"previously",
"saved",
"by",
"save_parameters",
"."
] |
1af29e9c060a4c7d60eeaacba32afdb9a7775ba7
|
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/block.py#L356-L402
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.