repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
sony/nnabla | python/src/nnabla/utils/image_utils/pypng_utils.py | imsave | def imsave(path, img, channel_first=False, as_uint16=False, auto_scale=True):
"""
Save image by pypng module.
Args:
path (str): output filename
img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default.
channel_first:
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is (height, width, channel)
as_uint16 (bool):
If True, save image as uint16.
auto_scale (bool) :
Whether upscale pixel values or not.
If you want to save float image, this argument must be True.
In pypng backend, all below are supported.
- float ([0, 1]) to uint8 ([0, 255]) (if img.dtype==float and upscale==True and as_uint16==False)
- float to uint16 ([0, 65535]) (if img.dtype==float and upscale==True and as_uint16==True)
- uint8 to uint16 are supported (if img.dtype==np.uint8 and upscale==True and as_uint16==True)
"""
img = _imsave_before(img, channel_first, auto_scale)
if auto_scale:
img = upscale_pixel_intensity(img, as_uint16)
img = check_type_and_cast_if_necessary(img, as_uint16)
bitdepth = 8 if img.dtype == np.uint8 else 16
grayscale = True if len(img.shape) == 2 or (
len(img.shape) == 3 and img.shape[-1] == 1) else False
writer = png.Writer(img.shape[1], img.shape[0],
greyscale=grayscale, bitdepth=bitdepth)
writer.write(open(path, "wb"), img.reshape(img.shape[0], -1)) | python | def imsave(path, img, channel_first=False, as_uint16=False, auto_scale=True):
"""
Save image by pypng module.
Args:
path (str): output filename
img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default.
channel_first:
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is (height, width, channel)
as_uint16 (bool):
If True, save image as uint16.
auto_scale (bool) :
Whether upscale pixel values or not.
If you want to save float image, this argument must be True.
In pypng backend, all below are supported.
- float ([0, 1]) to uint8 ([0, 255]) (if img.dtype==float and upscale==True and as_uint16==False)
- float to uint16 ([0, 65535]) (if img.dtype==float and upscale==True and as_uint16==True)
- uint8 to uint16 are supported (if img.dtype==np.uint8 and upscale==True and as_uint16==True)
"""
img = _imsave_before(img, channel_first, auto_scale)
if auto_scale:
img = upscale_pixel_intensity(img, as_uint16)
img = check_type_and_cast_if_necessary(img, as_uint16)
bitdepth = 8 if img.dtype == np.uint8 else 16
grayscale = True if len(img.shape) == 2 or (
len(img.shape) == 3 and img.shape[-1] == 1) else False
writer = png.Writer(img.shape[1], img.shape[0],
greyscale=grayscale, bitdepth=bitdepth)
writer.write(open(path, "wb"), img.reshape(img.shape[0], -1)) | [
"def",
"imsave",
"(",
"path",
",",
"img",
",",
"channel_first",
"=",
"False",
",",
"as_uint16",
"=",
"False",
",",
"auto_scale",
"=",
"True",
")",
":",
"img",
"=",
"_imsave_before",
"(",
"img",
",",
"channel_first",
",",
"auto_scale",
")",
"if",
"auto_sc... | Save image by pypng module.
Args:
path (str): output filename
img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default.
channel_first:
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is (height, width, channel)
as_uint16 (bool):
If True, save image as uint16.
auto_scale (bool) :
Whether upscale pixel values or not.
If you want to save float image, this argument must be True.
In pypng backend, all below are supported.
- float ([0, 1]) to uint8 ([0, 255]) (if img.dtype==float and upscale==True and as_uint16==False)
- float to uint16 ([0, 65535]) (if img.dtype==float and upscale==True and as_uint16==True)
- uint8 to uint16 are supported (if img.dtype==np.uint8 and upscale==True and as_uint16==True) | [
"Save",
"image",
"by",
"pypng",
"module",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/image_utils/pypng_utils.py#L125-L160 | train | 223,700 |
sony/nnabla | python/src/nnabla/context.py | context_scope | def context_scope(ctx):
"""
Context as Python context.
.. code-block:: python
import nnabla as nn
import nnabla.functions as F
x = nn.Variable([2, 3 ,4])
ctx = nnabla_ext.cuda.context('0')
with context_scope(ctx):
# Inside with scope, the specified context is used.
with parameter_scope('w1'):
l1 = F.relu(F.affine(x, 64))
with parameter_scope('w2'):
l2 = F.relu(F.affine(x, 64))
"""
global current_ctx
global context_level
context_level += 1
prev_context = current_ctx
current_ctx = ctx
try:
yield
finally:
context_level -= 1
current_ctx = prev_context | python | def context_scope(ctx):
"""
Context as Python context.
.. code-block:: python
import nnabla as nn
import nnabla.functions as F
x = nn.Variable([2, 3 ,4])
ctx = nnabla_ext.cuda.context('0')
with context_scope(ctx):
# Inside with scope, the specified context is used.
with parameter_scope('w1'):
l1 = F.relu(F.affine(x, 64))
with parameter_scope('w2'):
l2 = F.relu(F.affine(x, 64))
"""
global current_ctx
global context_level
context_level += 1
prev_context = current_ctx
current_ctx = ctx
try:
yield
finally:
context_level -= 1
current_ctx = prev_context | [
"def",
"context_scope",
"(",
"ctx",
")",
":",
"global",
"current_ctx",
"global",
"context_level",
"context_level",
"+=",
"1",
"prev_context",
"=",
"current_ctx",
"current_ctx",
"=",
"ctx",
"try",
":",
"yield",
"finally",
":",
"context_level",
"-=",
"1",
"current... | Context as Python context.
.. code-block:: python
import nnabla as nn
import nnabla.functions as F
x = nn.Variable([2, 3 ,4])
ctx = nnabla_ext.cuda.context('0')
with context_scope(ctx):
# Inside with scope, the specified context is used.
with parameter_scope('w1'):
l1 = F.relu(F.affine(x, 64))
with parameter_scope('w2'):
l2 = F.relu(F.affine(x, 64)) | [
"Context",
"as",
"Python",
"context",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/context.py#L29-L56 | train | 223,701 |
sony/nnabla | python/src/nnabla/utils/converter/onnx/exporter.py | generate_scalar_constant | def generate_scalar_constant(output_name, tensor_name, scalar):
"""Convert a scalar value to a Constant buffer.
This is mainly used for xxScalar operators."""
t = onnx.helper.make_tensor(tensor_name,
data_type=TensorProto.FLOAT,
dims=[1], vals=[scalar])
c = onnx.helper.make_node("Constant",
[],
[output_name],
value=t)
return c | python | def generate_scalar_constant(output_name, tensor_name, scalar):
"""Convert a scalar value to a Constant buffer.
This is mainly used for xxScalar operators."""
t = onnx.helper.make_tensor(tensor_name,
data_type=TensorProto.FLOAT,
dims=[1], vals=[scalar])
c = onnx.helper.make_node("Constant",
[],
[output_name],
value=t)
return c | [
"def",
"generate_scalar_constant",
"(",
"output_name",
",",
"tensor_name",
",",
"scalar",
")",
":",
"t",
"=",
"onnx",
".",
"helper",
".",
"make_tensor",
"(",
"tensor_name",
",",
"data_type",
"=",
"TensorProto",
".",
"FLOAT",
",",
"dims",
"=",
"[",
"1",
"]"... | Convert a scalar value to a Constant buffer.
This is mainly used for xxScalar operators. | [
"Convert",
"a",
"scalar",
"value",
"to",
"a",
"Constant",
"buffer",
".",
"This",
"is",
"mainly",
"used",
"for",
"xxScalar",
"operators",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/converter/onnx/exporter.py#L42-L52 | train | 223,702 |
sony/nnabla | python/src/nnabla/utils/converter/onnx/exporter.py | replace_negative_size_with_batch_size | def replace_negative_size_with_batch_size(shape, batch_size):
"""Replace all dimensions with negative values to batch size"""
sl = []
for d in shape.dim:
if d < 0:
# Negative size means batch size
sl.append(batch_size)
else:
sl.append(d)
out_shape = nnabla_pb2.Shape()
out_shape.dim.extend(sl)
return out_shape | python | def replace_negative_size_with_batch_size(shape, batch_size):
"""Replace all dimensions with negative values to batch size"""
sl = []
for d in shape.dim:
if d < 0:
# Negative size means batch size
sl.append(batch_size)
else:
sl.append(d)
out_shape = nnabla_pb2.Shape()
out_shape.dim.extend(sl)
return out_shape | [
"def",
"replace_negative_size_with_batch_size",
"(",
"shape",
",",
"batch_size",
")",
":",
"sl",
"=",
"[",
"]",
"for",
"d",
"in",
"shape",
".",
"dim",
":",
"if",
"d",
"<",
"0",
":",
"# Negative size means batch size",
"sl",
".",
"append",
"(",
"batch_size",
... | Replace all dimensions with negative values to batch size | [
"Replace",
"all",
"dimensions",
"with",
"negative",
"values",
"to",
"batch",
"size"
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/converter/onnx/exporter.py#L121-L132 | train | 223,703 |
sony/nnabla | python/src/nnabla/utils/converter/onnx/exporter.py | OnnxExporter.BinarySigmoid | def BinarySigmoid(self, func):
'''
Currently, caffe2 does not support this function.
'''
n = onnx.helper.make_node(
'HardSigmoid',
func.input,
func.output,
alpha=1.0,
beta=0.0
)
return [n] | python | def BinarySigmoid(self, func):
'''
Currently, caffe2 does not support this function.
'''
n = onnx.helper.make_node(
'HardSigmoid',
func.input,
func.output,
alpha=1.0,
beta=0.0
)
return [n] | [
"def",
"BinarySigmoid",
"(",
"self",
",",
"func",
")",
":",
"n",
"=",
"onnx",
".",
"helper",
".",
"make_node",
"(",
"'HardSigmoid'",
",",
"func",
".",
"input",
",",
"func",
".",
"output",
",",
"alpha",
"=",
"1.0",
",",
"beta",
"=",
"0.0",
")",
"ret... | Currently, caffe2 does not support this function. | [
"Currently",
"caffe2",
"does",
"not",
"support",
"this",
"function",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/converter/onnx/exporter.py#L392-L403 | train | 223,704 |
sony/nnabla | python/src/nnabla/experimental/graph_converters/sequential.py | SequentialConverter.convert | def convert(self, vroot, entry_variables):
"""Convert a given graph.
Convert a given graph using the `converters` in the order of the registeration, i.e., sequentially.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
"""
for converter in self.converters:
vroot = converter.convert(vroot, entry_variables)
return vroot | python | def convert(self, vroot, entry_variables):
"""Convert a given graph.
Convert a given graph using the `converters` in the order of the registeration, i.e., sequentially.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
"""
for converter in self.converters:
vroot = converter.convert(vroot, entry_variables)
return vroot | [
"def",
"convert",
"(",
"self",
",",
"vroot",
",",
"entry_variables",
")",
":",
"for",
"converter",
"in",
"self",
".",
"converters",
":",
"vroot",
"=",
"converter",
".",
"convert",
"(",
"vroot",
",",
"entry_variables",
")",
"return",
"vroot"
] | Convert a given graph.
Convert a given graph using the `converters` in the order of the registeration, i.e., sequentially.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | [
"Convert",
"a",
"given",
"graph",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/experimental/graph_converters/sequential.py#L17-L29 | train | 223,705 |
sony/nnabla | python/src/nnabla/initializer.py | calc_normal_std_he_forward | def calc_normal_std_he_forward(inmaps, outmaps, kernel=(1, 1)):
r"""Calculates the standard deviation proposed by He et al.
.. math::
\sigma = \sqrt{\frac{2}{NK}}
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
s = I.calc_normal_std_he_forward(x.shape[1],64)
w = I.NormalInitializer(s)
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `He, et al. Delving Deep into Rectifiers: Surpassing Human-Level
Performance on ImageNet Classification.
<https://arxiv.org/abs/1502.01852>`_
"""
return np.sqrt(2. / (np.prod(kernel) * inmaps)) | python | def calc_normal_std_he_forward(inmaps, outmaps, kernel=(1, 1)):
r"""Calculates the standard deviation proposed by He et al.
.. math::
\sigma = \sqrt{\frac{2}{NK}}
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
s = I.calc_normal_std_he_forward(x.shape[1],64)
w = I.NormalInitializer(s)
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `He, et al. Delving Deep into Rectifiers: Surpassing Human-Level
Performance on ImageNet Classification.
<https://arxiv.org/abs/1502.01852>`_
"""
return np.sqrt(2. / (np.prod(kernel) * inmaps)) | [
"def",
"calc_normal_std_he_forward",
"(",
"inmaps",
",",
"outmaps",
",",
"kernel",
"=",
"(",
"1",
",",
"1",
")",
")",
":",
"return",
"np",
".",
"sqrt",
"(",
"2.",
"/",
"(",
"np",
".",
"prod",
"(",
"kernel",
")",
"*",
"inmaps",
")",
")"
] | r"""Calculates the standard deviation proposed by He et al.
.. math::
\sigma = \sqrt{\frac{2}{NK}}
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
s = I.calc_normal_std_he_forward(x.shape[1],64)
w = I.NormalInitializer(s)
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `He, et al. Delving Deep into Rectifiers: Surpassing Human-Level
Performance on ImageNet Classification.
<https://arxiv.org/abs/1502.01852>`_ | [
"r",
"Calculates",
"the",
"standard",
"deviation",
"proposed",
"by",
"He",
"et",
"al",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/initializer.py#L216-L249 | train | 223,706 |
sony/nnabla | python/src/nnabla/initializer.py | calc_normal_std_glorot | def calc_normal_std_glorot(inmaps, outmaps, kernel=(1, 1)):
r"""Calculates the standard deviation proposed by Glorot et al.
.. math::
\sigma = \sqrt{\frac{2}{NK + M}}
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
s = I.calc_normal_std_glorot(x.shape[1],64)
w = I.NormalInitializer(s)
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `Glorot and Bengio. Understanding the difficulty of training deep
feedforward neural networks
<http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_
"""
return np.sqrt(2. / (np.prod(kernel) * inmaps + outmaps)) | python | def calc_normal_std_glorot(inmaps, outmaps, kernel=(1, 1)):
r"""Calculates the standard deviation proposed by Glorot et al.
.. math::
\sigma = \sqrt{\frac{2}{NK + M}}
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
s = I.calc_normal_std_glorot(x.shape[1],64)
w = I.NormalInitializer(s)
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `Glorot and Bengio. Understanding the difficulty of training deep
feedforward neural networks
<http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_
"""
return np.sqrt(2. / (np.prod(kernel) * inmaps + outmaps)) | [
"def",
"calc_normal_std_glorot",
"(",
"inmaps",
",",
"outmaps",
",",
"kernel",
"=",
"(",
"1",
",",
"1",
")",
")",
":",
"return",
"np",
".",
"sqrt",
"(",
"2.",
"/",
"(",
"np",
".",
"prod",
"(",
"kernel",
")",
"*",
"inmaps",
"+",
"outmaps",
")",
")... | r"""Calculates the standard deviation proposed by Glorot et al.
.. math::
\sigma = \sqrt{\frac{2}{NK + M}}
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
s = I.calc_normal_std_glorot(x.shape[1],64)
w = I.NormalInitializer(s)
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `Glorot and Bengio. Understanding the difficulty of training deep
feedforward neural networks
<http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_ | [
"r",
"Calculates",
"the",
"standard",
"deviation",
"proposed",
"by",
"Glorot",
"et",
"al",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/initializer.py#L288-L321 | train | 223,707 |
sony/nnabla | python/src/nnabla/initializer.py | calc_uniform_lim_glorot | def calc_uniform_lim_glorot(inmaps, outmaps, kernel=(1, 1)):
r"""Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al.
.. math::
b &= \sqrt{\frac{6}{NK + M}}\\
a &= -b
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64)
w = I.UniformInitializer((lb,ub))
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `Glorot and Bengio. Understanding the difficulty of training deep
feedforward neural networks
<http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_
"""
d = np.sqrt(6. / (np.prod(kernel) * inmaps + outmaps))
return -d, d | python | def calc_uniform_lim_glorot(inmaps, outmaps, kernel=(1, 1)):
r"""Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al.
.. math::
b &= \sqrt{\frac{6}{NK + M}}\\
a &= -b
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64)
w = I.UniformInitializer((lb,ub))
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `Glorot and Bengio. Understanding the difficulty of training deep
feedforward neural networks
<http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_
"""
d = np.sqrt(6. / (np.prod(kernel) * inmaps + outmaps))
return -d, d | [
"def",
"calc_uniform_lim_glorot",
"(",
"inmaps",
",",
"outmaps",
",",
"kernel",
"=",
"(",
"1",
",",
"1",
")",
")",
":",
"d",
"=",
"np",
".",
"sqrt",
"(",
"6.",
"/",
"(",
"np",
".",
"prod",
"(",
"kernel",
")",
"*",
"inmaps",
"+",
"outmaps",
")",
... | r"""Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al.
.. math::
b &= \sqrt{\frac{6}{NK + M}}\\
a &= -b
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64)
w = I.UniformInitializer((lb,ub))
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `Glorot and Bengio. Understanding the difficulty of training deep
feedforward neural networks
<http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_ | [
"r",
"Calculates",
"the",
"lower",
"bound",
"and",
"the",
"upper",
"bound",
"of",
"the",
"uniform",
"distribution",
"proposed",
"by",
"Glorot",
"et",
"al",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/initializer.py#L324-L360 | train | 223,708 |
sony/nnabla | python/src/nnabla/utils/save.py | _get_unique_function_name | def _get_unique_function_name(function_type, functions):
'''Get a unique function name.
Args:
function_type(str): Name of Function. Ex) Convolution, Affine
functions(OrderedDict of (str, Function)
Returns: str
A unique function name
'''
function_name = function_name_base = function_type
count = 2
while function_name in functions:
function_name = '{}_{}'.format(function_name_base, count)
count += 1
return function_name | python | def _get_unique_function_name(function_type, functions):
'''Get a unique function name.
Args:
function_type(str): Name of Function. Ex) Convolution, Affine
functions(OrderedDict of (str, Function)
Returns: str
A unique function name
'''
function_name = function_name_base = function_type
count = 2
while function_name in functions:
function_name = '{}_{}'.format(function_name_base, count)
count += 1
return function_name | [
"def",
"_get_unique_function_name",
"(",
"function_type",
",",
"functions",
")",
":",
"function_name",
"=",
"function_name_base",
"=",
"function_type",
"count",
"=",
"2",
"while",
"function_name",
"in",
"functions",
":",
"function_name",
"=",
"'{}_{}'",
".",
"format... | Get a unique function name.
Args:
function_type(str): Name of Function. Ex) Convolution, Affine
functions(OrderedDict of (str, Function)
Returns: str
A unique function name | [
"Get",
"a",
"unique",
"function",
"name",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/save.py#L41-L56 | train | 223,709 |
sony/nnabla | python/src/nnabla/utils/save.py | _get_unique_variable_name | def _get_unique_variable_name(vname, variables):
'''Get a unique variable name.
Args:
vname(str): A candidate name.
variable(OrderedDict of str and Variable)
Returns: str
A unique variable name
'''
count = 2
vname_base = vname
while vname in variables:
vname = '{}_{}'.format(vname_base, count)
count += 1
return vname | python | def _get_unique_variable_name(vname, variables):
'''Get a unique variable name.
Args:
vname(str): A candidate name.
variable(OrderedDict of str and Variable)
Returns: str
A unique variable name
'''
count = 2
vname_base = vname
while vname in variables:
vname = '{}_{}'.format(vname_base, count)
count += 1
return vname | [
"def",
"_get_unique_variable_name",
"(",
"vname",
",",
"variables",
")",
":",
"count",
"=",
"2",
"vname_base",
"=",
"vname",
"while",
"vname",
"in",
"variables",
":",
"vname",
"=",
"'{}_{}'",
".",
"format",
"(",
"vname_base",
",",
"count",
")",
"count",
"+... | Get a unique variable name.
Args:
vname(str): A candidate name.
variable(OrderedDict of str and Variable)
Returns: str
A unique variable name | [
"Get",
"a",
"unique",
"variable",
"name",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/save.py#L59-L74 | train | 223,710 |
sony/nnabla | python/src/nnabla/functions.py | sum | def sum(x, axis=None, keepdims=False):
"""Reduction along axes with sum operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which the sum is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.
Returns:
~nnabla.Variable: N-D array.
"""
from .function_bases import sum as sum_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
return sum_base(x, axis, keepdims) | python | def sum(x, axis=None, keepdims=False):
"""Reduction along axes with sum operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which the sum is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.
Returns:
~nnabla.Variable: N-D array.
"""
from .function_bases import sum as sum_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
return sum_base(x, axis, keepdims) | [
"def",
"sum",
"(",
"x",
",",
"axis",
"=",
"None",
",",
"keepdims",
"=",
"False",
")",
":",
"from",
".",
"function_bases",
"import",
"sum",
"as",
"sum_base",
"if",
"axis",
"is",
"None",
":",
"axis",
"=",
"range",
"(",
"x",
".",
"ndim",
")",
"elif",
... | Reduction along axes with sum operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which the sum is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.
Returns:
~nnabla.Variable: N-D array. | [
"Reduction",
"along",
"axes",
"with",
"sum",
"operation",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L21-L38 | train | 223,711 |
sony/nnabla | python/src/nnabla/functions.py | mean | def mean(x, axis=None, keepdims=False):
"""Reduction along axes with mean operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which mean is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.
Returns:
~nnabla.Variable: N-D array.
"""
from .function_bases import mean as mean_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
return mean_base(x, axis, keepdims) | python | def mean(x, axis=None, keepdims=False):
"""Reduction along axes with mean operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which mean is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.
Returns:
~nnabla.Variable: N-D array.
"""
from .function_bases import mean as mean_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
return mean_base(x, axis, keepdims) | [
"def",
"mean",
"(",
"x",
",",
"axis",
"=",
"None",
",",
"keepdims",
"=",
"False",
")",
":",
"from",
".",
"function_bases",
"import",
"mean",
"as",
"mean_base",
"if",
"axis",
"is",
"None",
":",
"axis",
"=",
"range",
"(",
"x",
".",
"ndim",
")",
"elif... | Reduction along axes with mean operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which mean is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.
Returns:
~nnabla.Variable: N-D array. | [
"Reduction",
"along",
"axes",
"with",
"mean",
"operation",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L41-L59 | train | 223,712 |
sony/nnabla | python/src/nnabla/functions.py | prod | def prod(x, axis=None, keepdims=False):
"""Reduction along axes with product operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which product is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.
Returns:
~nnabla.Variable: N-D array.
Note:
Backward computation is not accurate in a zero value input.
"""
from .function_bases import prod as prod_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
return prod_base(x, axis, keepdims) | python | def prod(x, axis=None, keepdims=False):
"""Reduction along axes with product operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which product is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.
Returns:
~nnabla.Variable: N-D array.
Note:
Backward computation is not accurate in a zero value input.
"""
from .function_bases import prod as prod_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
return prod_base(x, axis, keepdims) | [
"def",
"prod",
"(",
"x",
",",
"axis",
"=",
"None",
",",
"keepdims",
"=",
"False",
")",
":",
"from",
".",
"function_bases",
"import",
"prod",
"as",
"prod_base",
"if",
"axis",
"is",
"None",
":",
"axis",
"=",
"range",
"(",
"x",
".",
"ndim",
")",
"elif... | Reduction along axes with product operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which product is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element.
Returns:
~nnabla.Variable: N-D array.
Note:
Backward computation is not accurate in a zero value input. | [
"Reduction",
"along",
"axes",
"with",
"product",
"operation",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L162-L183 | train | 223,713 |
sony/nnabla | python/src/nnabla/functions.py | reduce | def reduce(x, op='sum'):
"""Reduction function with given operation.
Args:
x (Variable): An input.
op (str): 'sum' or 'mean'.
Note:
This is deprecated. Use ``mean`` or ``sum`` instead.
"""
import warnings
warnings.warn(
"Deprecated API. Use ``sum`` or ``mean`` instead.", DeprecationWarning)
from .function_bases import reduce_sum, reduce_mean
if op == 'sum':
return reduce_sum(x)
elif op == 'mean':
return reduce_mean(x)
raise ValueError() | python | def reduce(x, op='sum'):
"""Reduction function with given operation.
Args:
x (Variable): An input.
op (str): 'sum' or 'mean'.
Note:
This is deprecated. Use ``mean`` or ``sum`` instead.
"""
import warnings
warnings.warn(
"Deprecated API. Use ``sum`` or ``mean`` instead.", DeprecationWarning)
from .function_bases import reduce_sum, reduce_mean
if op == 'sum':
return reduce_sum(x)
elif op == 'mean':
return reduce_mean(x)
raise ValueError() | [
"def",
"reduce",
"(",
"x",
",",
"op",
"=",
"'sum'",
")",
":",
"import",
"warnings",
"warnings",
".",
"warn",
"(",
"\"Deprecated API. Use ``sum`` or ``mean`` instead.\"",
",",
"DeprecationWarning",
")",
"from",
".",
"function_bases",
"import",
"reduce_sum",
",",
"r... | Reduction function with given operation.
Args:
x (Variable): An input.
op (str): 'sum' or 'mean'.
Note:
This is deprecated. Use ``mean`` or ``sum`` instead. | [
"Reduction",
"function",
"with",
"given",
"operation",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L186-L205 | train | 223,714 |
sony/nnabla | python/src/nnabla/functions.py | split | def split(x, axis=0):
"""
Split arrays at the specified axis.
It returns a number corresponding the size of the given
axis (i.e ``x.shape[axis]``) of :obj:`~nnabla.Variable` s.
Args:
x(~nnabla.Variable): N-D array
axis(int): Axis
Returns: A :obj:`tuple` of :obj:`~nnabla.Variable` s
See Also:
:func:`nnabla.function_bases.split`.
"""
from .function_bases import split as split_base
return split_base(x, axis, x.shape[axis]) | python | def split(x, axis=0):
"""
Split arrays at the specified axis.
It returns a number corresponding the size of the given
axis (i.e ``x.shape[axis]``) of :obj:`~nnabla.Variable` s.
Args:
x(~nnabla.Variable): N-D array
axis(int): Axis
Returns: A :obj:`tuple` of :obj:`~nnabla.Variable` s
See Also:
:func:`nnabla.function_bases.split`.
"""
from .function_bases import split as split_base
return split_base(x, axis, x.shape[axis]) | [
"def",
"split",
"(",
"x",
",",
"axis",
"=",
"0",
")",
":",
"from",
".",
"function_bases",
"import",
"split",
"as",
"split_base",
"return",
"split_base",
"(",
"x",
",",
"axis",
",",
"x",
".",
"shape",
"[",
"axis",
"]",
")"
] | Split arrays at the specified axis.
It returns a number corresponding the size of the given
axis (i.e ``x.shape[axis]``) of :obj:`~nnabla.Variable` s.
Args:
x(~nnabla.Variable): N-D array
axis(int): Axis
Returns: A :obj:`tuple` of :obj:`~nnabla.Variable` s
See Also:
:func:`nnabla.function_bases.split`. | [
"Split",
"arrays",
"at",
"the",
"specified",
"axis",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L208-L226 | train | 223,715 |
sony/nnabla | python/src/nnabla/functions.py | batch_normalization | def batch_normalization(x, beta, gamma, mean, variance, axes=[1], decay_rate=0.9, eps=1e-05, batch_stat=True, output_stat=False, n_outputs=None):
r"""
Batch normalization.
.. math::
\begin{eqnarray}
\mu &=& \frac{1}{M} \sum x_i \\
\sigma^2 &=& \frac{1}{M} \sum \left(x_i - \mu\right)^2 \\
\hat{x}_i &=& \frac{x_i - \mu}{\sqrt{\sigma^2 + \epsilon}} \\
y_i &=& \hat{x}_i \gamma + \beta.
\end{eqnarray}
At testing time, the mean and variance values used are those that were computed during training by moving average.
References:
* `Ioffe and Szegedy, Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift.
<https://arxiv.org/abs/1502.03167>`_
Args:
x(~nnabla.Variable): N-D array of input.
beta(~nnabla.Variable): N-D array of beta which is learned.
gamma(~nnabla.Variable): N-D array of gamma which is learned.
mean(~nnabla.Variable): N-D array of running mean (modified during forward execution).
variance(~nnabla.Variable): N-D array of running variance (modified during forward execution).
axes(repeated int64): Axes mean and variance are taken.
decay_rate(float): Decay rate of running mean and variance.
eps(float): Tiny value to avoid zero division by std.
batch_stat(bool): Use mini-batch statistics rather than running ones.
output_stat(bool): It true, the batch statistics of mean and variance,
will be returned as Variables. They are also differentiable.
Returns:
Returns batch normalization output as :obj:`~nnabla.Variable`.
If ``output_stat=True``, it also returns the mean and variance
of the mini-batch
* :obj:`~nnabla.Variable`: Output of the batch normalization
* :obj:`~nnabla.Variable`: Mean (if ``output_stat=True`)
* :obj:`~nnabla.Variable`: Variance (if ``output_stat=True`)
See Also:
``nnabla.function_bases.batch_normalization``.
"""
from .function_bases import batch_normalization as batch_normalization_base
n_outputs = 3 if output_stat else 1
assert batch_stat or (not output_stat)
if batch_stat and (mean.parent or variance.parent) is not None:
raise ValueError(
"if batch_stat is True, mean and variable must not have a parent function")
if len(axes) == 1:
return batch_normalization_base(x, beta, gamma, mean, variance,
axes=axes,
decay_rate=decay_rate,
eps=eps,
batch_stat=batch_stat,
n_outputs=n_outputs)
def transpose_and_reshape(x, axes):
transposed = transpose(x, transpose_axes)
return reshape(transposed, [rd(lambda x, y: x * y, transposed.shape[:len(axes)])] + list(
transposed.shape[len(axes):])), transposed.shape
def inverse_transpose_and_reshape(x, axes, variable_shape):
un_reshaped = reshape(
x, list(variable_shape[:len(axes)] + variable_shape[len(axes):]))
return transpose(un_reshaped, inv_transpose_axes)
def get_tranpose_args(ndim, axes):
transpose_axes = [i for i in list(
axes)] + [i for i in range(ndim) if i not in list(axes)]
inv_transpose_axes = np.argsort(transpose_axes).tolist()
return transpose_axes, inv_transpose_axes
transpose_axes, inv_transpose_axes = get_tranpose_args(len(x.shape), axes)
inp, transposed_inp_shape = transpose_and_reshape(x, axes)
beta, transposed_beta_shape = transpose_and_reshape(beta, axes)
gamma, transposed_gamma_shape = transpose_and_reshape(gamma, axes)
mean, transposed_mean_shape = transpose_and_reshape(mean, axes)
variance, transposed_variance_shape = transpose_and_reshape(variance, axes)
if n_outputs == 1:
out = batch_normalization_base(inp, beta, gamma, mean, variance,
axes=[0],
decay_rate=decay_rate,
eps=eps,
batch_stat=batch_stat,
n_outputs=n_outputs)
return inverse_transpose_and_reshape(out, axes, transposed_inp_shape)
out, mean, variance = batch_normalization_base(inp, beta, gamma, mean, variance,
axes=[0],
decay_rate=decay_rate,
eps=eps,
batch_stat=batch_stat,
n_outputs=n_outputs)
out = inverse_transpose_and_reshape(out, axes, transposed_inp_shape)
mean = inverse_transpose_and_reshape(mean, axes, transposed_mean_shape)
variance = inverse_transpose_and_reshape(
variance, axes, transposed_variance_shape)
return out, mean, variance | python | def batch_normalization(x, beta, gamma, mean, variance, axes=[1], decay_rate=0.9, eps=1e-05, batch_stat=True, output_stat=False, n_outputs=None):
r"""
Batch normalization.
.. math::
\begin{eqnarray}
\mu &=& \frac{1}{M} \sum x_i \\
\sigma^2 &=& \frac{1}{M} \sum \left(x_i - \mu\right)^2 \\
\hat{x}_i &=& \frac{x_i - \mu}{\sqrt{\sigma^2 + \epsilon}} \\
y_i &=& \hat{x}_i \gamma + \beta.
\end{eqnarray}
At testing time, the mean and variance values used are those that were computed during training by moving average.
References:
* `Ioffe and Szegedy, Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift.
<https://arxiv.org/abs/1502.03167>`_
Args:
x(~nnabla.Variable): N-D array of input.
beta(~nnabla.Variable): N-D array of beta which is learned.
gamma(~nnabla.Variable): N-D array of gamma which is learned.
mean(~nnabla.Variable): N-D array of running mean (modified during forward execution).
variance(~nnabla.Variable): N-D array of running variance (modified during forward execution).
axes(repeated int64): Axes mean and variance are taken.
decay_rate(float): Decay rate of running mean and variance.
eps(float): Tiny value to avoid zero division by std.
batch_stat(bool): Use mini-batch statistics rather than running ones.
output_stat(bool): It true, the batch statistics of mean and variance,
will be returned as Variables. They are also differentiable.
Returns:
Returns batch normalization output as :obj:`~nnabla.Variable`.
If ``output_stat=True``, it also returns the mean and variance
of the mini-batch
* :obj:`~nnabla.Variable`: Output of the batch normalization
* :obj:`~nnabla.Variable`: Mean (if ``output_stat=True`)
* :obj:`~nnabla.Variable`: Variance (if ``output_stat=True`)
See Also:
``nnabla.function_bases.batch_normalization``.
"""
from .function_bases import batch_normalization as batch_normalization_base
n_outputs = 3 if output_stat else 1
assert batch_stat or (not output_stat)
if batch_stat and (mean.parent or variance.parent) is not None:
raise ValueError(
"if batch_stat is True, mean and variable must not have a parent function")
if len(axes) == 1:
return batch_normalization_base(x, beta, gamma, mean, variance,
axes=axes,
decay_rate=decay_rate,
eps=eps,
batch_stat=batch_stat,
n_outputs=n_outputs)
def transpose_and_reshape(x, axes):
transposed = transpose(x, transpose_axes)
return reshape(transposed, [rd(lambda x, y: x * y, transposed.shape[:len(axes)])] + list(
transposed.shape[len(axes):])), transposed.shape
def inverse_transpose_and_reshape(x, axes, variable_shape):
un_reshaped = reshape(
x, list(variable_shape[:len(axes)] + variable_shape[len(axes):]))
return transpose(un_reshaped, inv_transpose_axes)
def get_tranpose_args(ndim, axes):
transpose_axes = [i for i in list(
axes)] + [i for i in range(ndim) if i not in list(axes)]
inv_transpose_axes = np.argsort(transpose_axes).tolist()
return transpose_axes, inv_transpose_axes
transpose_axes, inv_transpose_axes = get_tranpose_args(len(x.shape), axes)
inp, transposed_inp_shape = transpose_and_reshape(x, axes)
beta, transposed_beta_shape = transpose_and_reshape(beta, axes)
gamma, transposed_gamma_shape = transpose_and_reshape(gamma, axes)
mean, transposed_mean_shape = transpose_and_reshape(mean, axes)
variance, transposed_variance_shape = transpose_and_reshape(variance, axes)
if n_outputs == 1:
out = batch_normalization_base(inp, beta, gamma, mean, variance,
axes=[0],
decay_rate=decay_rate,
eps=eps,
batch_stat=batch_stat,
n_outputs=n_outputs)
return inverse_transpose_and_reshape(out, axes, transposed_inp_shape)
out, mean, variance = batch_normalization_base(inp, beta, gamma, mean, variance,
axes=[0],
decay_rate=decay_rate,
eps=eps,
batch_stat=batch_stat,
n_outputs=n_outputs)
out = inverse_transpose_and_reshape(out, axes, transposed_inp_shape)
mean = inverse_transpose_and_reshape(mean, axes, transposed_mean_shape)
variance = inverse_transpose_and_reshape(
variance, axes, transposed_variance_shape)
return out, mean, variance | [
"def",
"batch_normalization",
"(",
"x",
",",
"beta",
",",
"gamma",
",",
"mean",
",",
"variance",
",",
"axes",
"=",
"[",
"1",
"]",
",",
"decay_rate",
"=",
"0.9",
",",
"eps",
"=",
"1e-05",
",",
"batch_stat",
"=",
"True",
",",
"output_stat",
"=",
"False... | r"""
Batch normalization.
.. math::
\begin{eqnarray}
\mu &=& \frac{1}{M} \sum x_i \\
\sigma^2 &=& \frac{1}{M} \sum \left(x_i - \mu\right)^2 \\
\hat{x}_i &=& \frac{x_i - \mu}{\sqrt{\sigma^2 + \epsilon}} \\
y_i &=& \hat{x}_i \gamma + \beta.
\end{eqnarray}
At testing time, the mean and variance values used are those that were computed during training by moving average.
References:
* `Ioffe and Szegedy, Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift.
<https://arxiv.org/abs/1502.03167>`_
Args:
x(~nnabla.Variable): N-D array of input.
beta(~nnabla.Variable): N-D array of beta which is learned.
gamma(~nnabla.Variable): N-D array of gamma which is learned.
mean(~nnabla.Variable): N-D array of running mean (modified during forward execution).
variance(~nnabla.Variable): N-D array of running variance (modified during forward execution).
axes(repeated int64): Axes mean and variance are taken.
decay_rate(float): Decay rate of running mean and variance.
eps(float): Tiny value to avoid zero division by std.
batch_stat(bool): Use mini-batch statistics rather than running ones.
output_stat(bool): It true, the batch statistics of mean and variance,
will be returned as Variables. They are also differentiable.
Returns:
Returns batch normalization output as :obj:`~nnabla.Variable`.
If ``output_stat=True``, it also returns the mean and variance
of the mini-batch
* :obj:`~nnabla.Variable`: Output of the batch normalization
* :obj:`~nnabla.Variable`: Mean (if ``output_stat=True`)
* :obj:`~nnabla.Variable`: Variance (if ``output_stat=True`)
See Also:
``nnabla.function_bases.batch_normalization``. | [
"r",
"Batch",
"normalization",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L278-L380 | train | 223,716 |
sony/nnabla | python/src/nnabla/functions.py | fixed_point_quantize | def fixed_point_quantize(x, sign=True, n=8, delta=2**-4, quantize=True, ste_fine_grained=True, outputs=None):
r"""Fixed Point Quantize
Args:
x (Variable): An input variable.
sign (bool): Indicate the signed number or the unsigned number. Default is true.
n (int): Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case.
delta (float): Step size.
quantize (bool): If true, quantize input, otherwise not.
ste_fine_grained (bool): If true, STE is not 1.
Returns:
~nnabla.Variable: N-D array.
See Also:
``nnabla.function_bases.fixed_point_quantize``.
In the forward pass,
.. math::
\begin{equation}
q_i= \left\{
\begin{array}{ll}
max & if \ \ \ x_i > max \\
sign(x_i) \times floor(|x_i| \delta^{-1} + 2^{-1}) \times \delta & if \ \ min \le x_i \le max \\
min & if \ \ x_i < min \\
\end{array} \right.,
\end{equation}
where :math:`\delta` is the step size,
:math:`(min, max) :=(- (2^{n-1} - 1)\delta, (2^{n-1} - 1)\delta)` if :math:`sign` is true,
:math:`(min, max) := (0, (2^n - 1) \delta)` otherwise, and
:math:`n` is the total bit-width used.
In the backward pass when using `ste_fine_grained` as false,
.. math::
\begin{equation}
\frac{\partial q_i}{\partial x_i} = 1.
\end{equation}
In the backward pass when using `ste_fine_grained` as true,
.. math::
\begin{equation}
\frac{\partial q_i}{\partial x_i}= \left\{
\begin{array}{ll}
0 & if \ \ \ x_i > max \\
1 & if \ \ min \le x_i \le max \\
0 & if \ \ x_i < min \\
\end{array} \right..
\end{equation}
.. note::
Quantized values are stored as floating point number, since this function is for simulation purposes.
"""
from .function_bases import fixed_point_quantize as fixed_point_quantize_base
if not quantize:
return x
return fixed_point_quantize_base(x, sign, n, delta, ste_fine_grained, outputs=outputs) | python | def fixed_point_quantize(x, sign=True, n=8, delta=2**-4, quantize=True, ste_fine_grained=True, outputs=None):
r"""Fixed Point Quantize
Args:
x (Variable): An input variable.
sign (bool): Indicate the signed number or the unsigned number. Default is true.
n (int): Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case.
delta (float): Step size.
quantize (bool): If true, quantize input, otherwise not.
ste_fine_grained (bool): If true, STE is not 1.
Returns:
~nnabla.Variable: N-D array.
See Also:
``nnabla.function_bases.fixed_point_quantize``.
In the forward pass,
.. math::
\begin{equation}
q_i= \left\{
\begin{array}{ll}
max & if \ \ \ x_i > max \\
sign(x_i) \times floor(|x_i| \delta^{-1} + 2^{-1}) \times \delta & if \ \ min \le x_i \le max \\
min & if \ \ x_i < min \\
\end{array} \right.,
\end{equation}
where :math:`\delta` is the step size,
:math:`(min, max) :=(- (2^{n-1} - 1)\delta, (2^{n-1} - 1)\delta)` if :math:`sign` is true,
:math:`(min, max) := (0, (2^n - 1) \delta)` otherwise, and
:math:`n` is the total bit-width used.
In the backward pass when using `ste_fine_grained` as false,
.. math::
\begin{equation}
\frac{\partial q_i}{\partial x_i} = 1.
\end{equation}
In the backward pass when using `ste_fine_grained` as true,
.. math::
\begin{equation}
\frac{\partial q_i}{\partial x_i}= \left\{
\begin{array}{ll}
0 & if \ \ \ x_i > max \\
1 & if \ \ min \le x_i \le max \\
0 & if \ \ x_i < min \\
\end{array} \right..
\end{equation}
.. note::
Quantized values are stored as floating point number, since this function is for simulation purposes.
"""
from .function_bases import fixed_point_quantize as fixed_point_quantize_base
if not quantize:
return x
return fixed_point_quantize_base(x, sign, n, delta, ste_fine_grained, outputs=outputs) | [
"def",
"fixed_point_quantize",
"(",
"x",
",",
"sign",
"=",
"True",
",",
"n",
"=",
"8",
",",
"delta",
"=",
"2",
"**",
"-",
"4",
",",
"quantize",
"=",
"True",
",",
"ste_fine_grained",
"=",
"True",
",",
"outputs",
"=",
"None",
")",
":",
"from",
".",
... | r"""Fixed Point Quantize
Args:
x (Variable): An input variable.
sign (bool): Indicate the signed number or the unsigned number. Default is true.
n (int): Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case.
delta (float): Step size.
quantize (bool): If true, quantize input, otherwise not.
ste_fine_grained (bool): If true, STE is not 1.
Returns:
~nnabla.Variable: N-D array.
See Also:
``nnabla.function_bases.fixed_point_quantize``.
In the forward pass,
.. math::
\begin{equation}
q_i= \left\{
\begin{array}{ll}
max & if \ \ \ x_i > max \\
sign(x_i) \times floor(|x_i| \delta^{-1} + 2^{-1}) \times \delta & if \ \ min \le x_i \le max \\
min & if \ \ x_i < min \\
\end{array} \right.,
\end{equation}
where :math:`\delta` is the step size,
:math:`(min, max) :=(- (2^{n-1} - 1)\delta, (2^{n-1} - 1)\delta)` if :math:`sign` is true,
:math:`(min, max) := (0, (2^n - 1) \delta)` otherwise, and
:math:`n` is the total bit-width used.
In the backward pass when using `ste_fine_grained` as false,
.. math::
\begin{equation}
\frac{\partial q_i}{\partial x_i} = 1.
\end{equation}
In the backward pass when using `ste_fine_grained` as true,
.. math::
\begin{equation}
\frac{\partial q_i}{\partial x_i}= \left\{
\begin{array}{ll}
0 & if \ \ \ x_i > max \\
1 & if \ \ min \le x_i \le max \\
0 & if \ \ x_i < min \\
\end{array} \right..
\end{equation}
.. note::
Quantized values are stored as floating point number, since this function is for simulation purposes. | [
"r",
"Fixed",
"Point",
"Quantize"
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L424-L488 | train | 223,717 |
sony/nnabla | python/src/nnabla/functions.py | pow2_quantize | def pow2_quantize(x, sign=True, with_zero=True, n=8, m=1, quantize=True, ste_fine_grained=True, outputs=None):
r"""Pow2 Quantize
Args:
x (Variable): An input variable.
sign (bool): Indicate the signed number or the unsigned number. Default is true.
with_zero (bool): Indicate using zero as a quantized value. Default is true. Note that `zero` consumes one bit.
n (int): Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case. Default is 8.
m (int): :math:`2^m` is the upper bound of the dynamic range and :math:`-2^m` is the lower bound, :math:`m \in \mathcal{Z}`. Default is 1.
quantize (bool): If true, quantize input, otherwise not.
ste_fine_grained (bool): If true, STE is not 1.
Returns:
~nnabla.Variable: N-D array.
See Also:
``nnabla.function_bases.pow2_quantize``.
In the forward pass of `signed` case,
.. math::
q_i= \left\{
\begin{array}{ll}
max_{+} & if \ \ \overline{q_i} > max_{+} \\
\overline{q_i} & if \ \ min_{+} \le \overline{q_i} \le max_{+} \\
min_{+} & if \ \ 0 \le \overline{q_i} < min_{+} \\
min_{-} & if \ \ min_{-} < \overline{q_i} < 0 \\
\overline{q_i} & if \ \ max_{-} \le \overline{q_i} \le min_{-}\\
max_{-} & if \ \ \overline{q_i} < max_{-} \\
\end{array} \right.,
where
.. math::
&& max_{+} = 2^{m}, min_{+} = 2^{m - (2^{n-1} - 1)},\\
&& max_{-} = -2^{m}, min_{-} = -2^{m - (2^{n-1} - 1)},\\
&& \overline{q_i} = sign(x_i) \times 2^{round(\log_2 |x_i|)}.
This quantization uses the geometric mean between two power-of-two numbers
as quantization threshold.
In the forward pass of `unsigned` case,
.. math::
q_i= \left\{
\begin{array}{ll}
max & if \ \ \overline{q_i} > max \\
\overline{q_i} & if \ \ min \le \overline{q_i} \le max \\
min & if \ \ 0 < \overline{q_i} < min \\
\end{array} \right.,
where
.. math::
&& max = 2^{m}, min = 2^{m - (2^{n} - 1)},\\
&& \overline{q_i} = 2^{int(\log_2 |x_i|)}.
When using `with_zero` as true, a pruning threshold is used to round an input to
0 or :math:`min`. The pruning threshold is defined in this function as the following,
.. math::
pruning\ threshold = min \times 2^{-\frac{1}{2}}.
If an absolute value of the input is lesser than this value, the input is rounded to 0, otherwise :math:`min`.
In the backward pass when using ste_fine_grained as false,
.. math::
\frac{\partial q_i}{\partial x_i} = 1.
In the backward pass when using ste_fine_grained as true,
.. math::
\frac{\partial q_i}{\partial x_i}= \left\{
\begin{array}{ll}
0 & if \ \ \overline{q_i} > max_{+} \\
1 & if \ \ otherwise \\
0 & if \ \ \overline{q_i} < max_{-} \\
\end{array} \right..
"""
from .function_bases import pow2_quantize as pow2_quantize_base
if not quantize:
return x
return pow2_quantize_base(x, sign, with_zero, n, m, ste_fine_grained, outputs=outputs) | python | def pow2_quantize(x, sign=True, with_zero=True, n=8, m=1, quantize=True, ste_fine_grained=True, outputs=None):
r"""Pow2 Quantize
Args:
x (Variable): An input variable.
sign (bool): Indicate the signed number or the unsigned number. Default is true.
with_zero (bool): Indicate using zero as a quantized value. Default is true. Note that `zero` consumes one bit.
n (int): Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case. Default is 8.
m (int): :math:`2^m` is the upper bound of the dynamic range and :math:`-2^m` is the lower bound, :math:`m \in \mathcal{Z}`. Default is 1.
quantize (bool): If true, quantize input, otherwise not.
ste_fine_grained (bool): If true, STE is not 1.
Returns:
~nnabla.Variable: N-D array.
See Also:
``nnabla.function_bases.pow2_quantize``.
In the forward pass of `signed` case,
.. math::
q_i= \left\{
\begin{array}{ll}
max_{+} & if \ \ \overline{q_i} > max_{+} \\
\overline{q_i} & if \ \ min_{+} \le \overline{q_i} \le max_{+} \\
min_{+} & if \ \ 0 \le \overline{q_i} < min_{+} \\
min_{-} & if \ \ min_{-} < \overline{q_i} < 0 \\
\overline{q_i} & if \ \ max_{-} \le \overline{q_i} \le min_{-}\\
max_{-} & if \ \ \overline{q_i} < max_{-} \\
\end{array} \right.,
where
.. math::
&& max_{+} = 2^{m}, min_{+} = 2^{m - (2^{n-1} - 1)},\\
&& max_{-} = -2^{m}, min_{-} = -2^{m - (2^{n-1} - 1)},\\
&& \overline{q_i} = sign(x_i) \times 2^{round(\log_2 |x_i|)}.
This quantization uses the geometric mean between two power-of-two numbers
as quantization threshold.
In the forward pass of `unsigned` case,
.. math::
q_i= \left\{
\begin{array}{ll}
max & if \ \ \overline{q_i} > max \\
\overline{q_i} & if \ \ min \le \overline{q_i} \le max \\
min & if \ \ 0 < \overline{q_i} < min \\
\end{array} \right.,
where
.. math::
&& max = 2^{m}, min = 2^{m - (2^{n} - 1)},\\
&& \overline{q_i} = 2^{int(\log_2 |x_i|)}.
When using `with_zero` as true, a pruning threshold is used to round an input to
0 or :math:`min`. The pruning threshold is defined in this function as the following,
.. math::
pruning\ threshold = min \times 2^{-\frac{1}{2}}.
If an absolute value of the input is lesser than this value, the input is rounded to 0, otherwise :math:`min`.
In the backward pass when using ste_fine_grained as false,
.. math::
\frac{\partial q_i}{\partial x_i} = 1.
In the backward pass when using ste_fine_grained as true,
.. math::
\frac{\partial q_i}{\partial x_i}= \left\{
\begin{array}{ll}
0 & if \ \ \overline{q_i} > max_{+} \\
1 & if \ \ otherwise \\
0 & if \ \ \overline{q_i} < max_{-} \\
\end{array} \right..
"""
from .function_bases import pow2_quantize as pow2_quantize_base
if not quantize:
return x
return pow2_quantize_base(x, sign, with_zero, n, m, ste_fine_grained, outputs=outputs) | [
"def",
"pow2_quantize",
"(",
"x",
",",
"sign",
"=",
"True",
",",
"with_zero",
"=",
"True",
",",
"n",
"=",
"8",
",",
"m",
"=",
"1",
",",
"quantize",
"=",
"True",
",",
"ste_fine_grained",
"=",
"True",
",",
"outputs",
"=",
"None",
")",
":",
"from",
... | r"""Pow2 Quantize
Args:
x (Variable): An input variable.
sign (bool): Indicate the signed number or the unsigned number. Default is true.
with_zero (bool): Indicate using zero as a quantized value. Default is true. Note that `zero` consumes one bit.
n (int): Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case. Default is 8.
m (int): :math:`2^m` is the upper bound of the dynamic range and :math:`-2^m` is the lower bound, :math:`m \in \mathcal{Z}`. Default is 1.
quantize (bool): If true, quantize input, otherwise not.
ste_fine_grained (bool): If true, STE is not 1.
Returns:
~nnabla.Variable: N-D array.
See Also:
``nnabla.function_bases.pow2_quantize``.
In the forward pass of `signed` case,
.. math::
q_i= \left\{
\begin{array}{ll}
max_{+} & if \ \ \overline{q_i} > max_{+} \\
\overline{q_i} & if \ \ min_{+} \le \overline{q_i} \le max_{+} \\
min_{+} & if \ \ 0 \le \overline{q_i} < min_{+} \\
min_{-} & if \ \ min_{-} < \overline{q_i} < 0 \\
\overline{q_i} & if \ \ max_{-} \le \overline{q_i} \le min_{-}\\
max_{-} & if \ \ \overline{q_i} < max_{-} \\
\end{array} \right.,
where
.. math::
&& max_{+} = 2^{m}, min_{+} = 2^{m - (2^{n-1} - 1)},\\
&& max_{-} = -2^{m}, min_{-} = -2^{m - (2^{n-1} - 1)},\\
&& \overline{q_i} = sign(x_i) \times 2^{round(\log_2 |x_i|)}.
This quantization uses the geometric mean between two power-of-two numbers
as quantization threshold.
In the forward pass of `unsigned` case,
.. math::
q_i= \left\{
\begin{array}{ll}
max & if \ \ \overline{q_i} > max \\
\overline{q_i} & if \ \ min \le \overline{q_i} \le max \\
min & if \ \ 0 < \overline{q_i} < min \\
\end{array} \right.,
where
.. math::
&& max = 2^{m}, min = 2^{m - (2^{n} - 1)},\\
&& \overline{q_i} = 2^{int(\log_2 |x_i|)}.
When using `with_zero` as true, a pruning threshold is used to round an input to
0 or :math:`min`. The pruning threshold is defined in this function as the following,
.. math::
pruning\ threshold = min \times 2^{-\frac{1}{2}}.
If an absolute value of the input is lesser than this value, the input is rounded to 0, otherwise :math:`min`.
In the backward pass when using ste_fine_grained as false,
.. math::
\frac{\partial q_i}{\partial x_i} = 1.
In the backward pass when using ste_fine_grained as true,
.. math::
\frac{\partial q_i}{\partial x_i}= \left\{
\begin{array}{ll}
0 & if \ \ \overline{q_i} > max_{+} \\
1 & if \ \ otherwise \\
0 & if \ \ \overline{q_i} < max_{-} \\
\end{array} \right.. | [
"r",
"Pow2",
"Quantize"
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L491-L584 | train | 223,718 |
sony/nnabla | python/src/nnabla/functions.py | clip_by_value | def clip_by_value(x, min, max):
r"""Clip inputs by values.
.. math::
y = \begin{cases}
max & (x > max) \\
x & (otherwise) \\
min & (x < min)
\end{cases}.
Args:
x (Variable): An input variable.
min (Variable): A min variable by which `x` is clipped. Note that the shape of `min` must be the same as `x`'s.
max (Variable): A max variable by which `x` is clipped. Note that the shape of `max` must be the same as `x`'s
Returns:
~nnabla.Variable: N-D array.
"""
from .function_bases import maximum2 as maximum2_base
from .function_bases import minimum2 as minimum2_base
return minimum2_base(maximum2_base(x, min), max) | python | def clip_by_value(x, min, max):
r"""Clip inputs by values.
.. math::
y = \begin{cases}
max & (x > max) \\
x & (otherwise) \\
min & (x < min)
\end{cases}.
Args:
x (Variable): An input variable.
min (Variable): A min variable by which `x` is clipped. Note that the shape of `min` must be the same as `x`'s.
max (Variable): A max variable by which `x` is clipped. Note that the shape of `max` must be the same as `x`'s
Returns:
~nnabla.Variable: N-D array.
"""
from .function_bases import maximum2 as maximum2_base
from .function_bases import minimum2 as minimum2_base
return minimum2_base(maximum2_base(x, min), max) | [
"def",
"clip_by_value",
"(",
"x",
",",
"min",
",",
"max",
")",
":",
"from",
".",
"function_bases",
"import",
"maximum2",
"as",
"maximum2_base",
"from",
".",
"function_bases",
"import",
"minimum2",
"as",
"minimum2_base",
"return",
"minimum2_base",
"(",
"maximum2_... | r"""Clip inputs by values.
.. math::
y = \begin{cases}
max & (x > max) \\
x & (otherwise) \\
min & (x < min)
\end{cases}.
Args:
x (Variable): An input variable.
min (Variable): A min variable by which `x` is clipped. Note that the shape of `min` must be the same as `x`'s.
max (Variable): A max variable by which `x` is clipped. Note that the shape of `max` must be the same as `x`'s
Returns:
~nnabla.Variable: N-D array. | [
"r",
"Clip",
"inputs",
"by",
"values",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L587-L609 | train | 223,719 |
sony/nnabla | python/src/nnabla/functions.py | interpolate | def interpolate(x, scale=None, output_size=None, mode='linear', align_corners=None):
'''
Resize an ND array with interpolation.
Scaling factors for spatial dimensions are determined by either
``scale`` or ``output_size``.
``nd = len(scale)`` or ``nd = len(output_size)`` determines the number of
spatial dimensions, and the last ``nd`` dimensions of the input ``x`` are
considered as the spatial dimensions to be resized.
If ``scale`` is given, the ``output_size`` is calculated by
``output_size[i] = floor(scale[i] * x.shape[i - len(scale)])``.
Example:
.. code-block:: python
import numpy as np
import nnabla as nn
import nnabla.functions as F
x_data = np.random.rand(64, 3, 224, 224)
x = nn.Variable.from_numpy_array(x_data)
# Resize by scales
y = F.interpolate(x, scale=(2, 2), mode='linear')
print(y.shape) # (64, 3, 448, 448)
y.forward()
print(y.d) # Print output
# Resize to a size
y2 = F.interpolate(x, output_size=(320, 257), mode='linear')
print(y2.shape) # (64, 3, 320, 257)
y2.forward()
print(y2.d) # Print output
Args:
x(~nnabla.Variable): N-D array with an arbitrary number of dimensions.
scale(tuple of ints): Scale factors along axes. The default is
``None``, and if this is omitted, ``output_size`` must be specified.
output_size(tuple of ints): The output sizes for axes. If this is
given, the scale factors are determined by the output sizes and the
input sizes. The default is ``None``, and if this is omitted,
``scale`` must be specified.
mode(str): Interpolation mode chosen from ('linear'|'nearest').
The default is 'linear'.
align_corners(bool): If true, the corner pixels of input and output
arrays are aligned, such that the output corner pixels have the
same values with the input corner pixels.
The default is ``None``, and it becomes ``True`` if mode is
'linear', otherwise ``False``.
Returns:
~nnabla.Variable: N-D array.
'''
from .function_bases import interpolate as interpolate_base
import math
if scale is None and output_size is None:
raise ValueError('Either scale or output_size must be given')
elif output_size is None:
output_size = [int(math.floor(s * d))
for d, s in zip(x.shape[-len(scale):], scale)]
if align_corners is None:
if mode == 'linear':
align_corners = True
else:
align_corners = False
return interpolate_base(x, output_size, mode, align_corners) | python | def interpolate(x, scale=None, output_size=None, mode='linear', align_corners=None):
'''
Resize an ND array with interpolation.
Scaling factors for spatial dimensions are determined by either
``scale`` or ``output_size``.
``nd = len(scale)`` or ``nd = len(output_size)`` determines the number of
spatial dimensions, and the last ``nd`` dimensions of the input ``x`` are
considered as the spatial dimensions to be resized.
If ``scale`` is given, the ``output_size`` is calculated by
``output_size[i] = floor(scale[i] * x.shape[i - len(scale)])``.
Example:
.. code-block:: python
import numpy as np
import nnabla as nn
import nnabla.functions as F
x_data = np.random.rand(64, 3, 224, 224)
x = nn.Variable.from_numpy_array(x_data)
# Resize by scales
y = F.interpolate(x, scale=(2, 2), mode='linear')
print(y.shape) # (64, 3, 448, 448)
y.forward()
print(y.d) # Print output
# Resize to a size
y2 = F.interpolate(x, output_size=(320, 257), mode='linear')
print(y2.shape) # (64, 3, 320, 257)
y2.forward()
print(y2.d) # Print output
Args:
x(~nnabla.Variable): N-D array with an arbitrary number of dimensions.
scale(tuple of ints): Scale factors along axes. The default is
``None``, and if this is omitted, ``output_size`` must be specified.
output_size(tuple of ints): The output sizes for axes. If this is
given, the scale factors are determined by the output sizes and the
input sizes. The default is ``None``, and if this is omitted,
``scale`` must be specified.
mode(str): Interpolation mode chosen from ('linear'|'nearest').
The default is 'linear'.
align_corners(bool): If true, the corner pixels of input and output
arrays are aligned, such that the output corner pixels have the
same values with the input corner pixels.
The default is ``None``, and it becomes ``True`` if mode is
'linear', otherwise ``False``.
Returns:
~nnabla.Variable: N-D array.
'''
from .function_bases import interpolate as interpolate_base
import math
if scale is None and output_size is None:
raise ValueError('Either scale or output_size must be given')
elif output_size is None:
output_size = [int(math.floor(s * d))
for d, s in zip(x.shape[-len(scale):], scale)]
if align_corners is None:
if mode == 'linear':
align_corners = True
else:
align_corners = False
return interpolate_base(x, output_size, mode, align_corners) | [
"def",
"interpolate",
"(",
"x",
",",
"scale",
"=",
"None",
",",
"output_size",
"=",
"None",
",",
"mode",
"=",
"'linear'",
",",
"align_corners",
"=",
"None",
")",
":",
"from",
".",
"function_bases",
"import",
"interpolate",
"as",
"interpolate_base",
"import",... | Resize an ND array with interpolation.
Scaling factors for spatial dimensions are determined by either
``scale`` or ``output_size``.
``nd = len(scale)`` or ``nd = len(output_size)`` determines the number of
spatial dimensions, and the last ``nd`` dimensions of the input ``x`` are
considered as the spatial dimensions to be resized.
If ``scale`` is given, the ``output_size`` is calculated by
``output_size[i] = floor(scale[i] * x.shape[i - len(scale)])``.
Example:
.. code-block:: python
import numpy as np
import nnabla as nn
import nnabla.functions as F
x_data = np.random.rand(64, 3, 224, 224)
x = nn.Variable.from_numpy_array(x_data)
# Resize by scales
y = F.interpolate(x, scale=(2, 2), mode='linear')
print(y.shape) # (64, 3, 448, 448)
y.forward()
print(y.d) # Print output
# Resize to a size
y2 = F.interpolate(x, output_size=(320, 257), mode='linear')
print(y2.shape) # (64, 3, 320, 257)
y2.forward()
print(y2.d) # Print output
Args:
x(~nnabla.Variable): N-D array with an arbitrary number of dimensions.
scale(tuple of ints): Scale factors along axes. The default is
``None``, and if this is omitted, ``output_size`` must be specified.
output_size(tuple of ints): The output sizes for axes. If this is
given, the scale factors are determined by the output sizes and the
input sizes. The default is ``None``, and if this is omitted,
``scale`` must be specified.
mode(str): Interpolation mode chosen from ('linear'|'nearest').
The default is 'linear'.
align_corners(bool): If true, the corner pixels of input and output
arrays are aligned, such that the output corner pixels have the
same values with the input corner pixels.
The default is ``None``, and it becomes ``True`` if mode is
'linear', otherwise ``False``.
Returns:
~nnabla.Variable: N-D array. | [
"Resize",
"an",
"ND",
"array",
"with",
"interpolation",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L654-L724 | train | 223,720 |
sony/nnabla | python/src/nnabla/functions.py | sort | def sort(x, axis=-1, reverse=False, with_index=False, only_index=False):
"""Sorts the elements of `x` along a given `axis` in ascending order
by value. A negative `axis` counts from the last dimension of `x`,
so the default of -1 sorts along the last dimension. If `reverse`
is True, then the elements are soreted in descending order.
If `with_index` is True, result is a tuple ``(sorted, indices)``
or only ``indices`` if `only_index` is True. Setting `only_index`
to True implies that `with_index` is also True.
.. code-block:: python
import numpy as np
import nnabla as nn
import nnabla.functions as F
nn.set_auto_forward(True)
x = nn.Variable.from_numpy_array(np.random.rand(2, 3, 4))
sorted = F.sort(x)
assert np.allclose(sorted.d, np.sort(x.d))
sorted, indices = F.sort(x, with_index=True)
assert np.allclose(sorted.d, np.sort(x.d))
assert np.all(indices.d == np.argsort(x.d))
indices = F.sort(x, only_index=True)
assert np.all(indices.d == np.argsort(x.d))
Args:
x(~nnabla.Variable): N-D array
axis(int): Axis along which to sort.
reverse(bool): Sort in descending order.
with_index(bool): Return sorted values and index.
only_index(bool): Return only the sort index.
Returns: :obj:`~nnabla.Variable` `sorted` or :obj:`~nnabla.Variable` `indices` or (:obj:`~nnabla.Variable` `sorted`, :obj:`~nnabla.Variable` `indices`)
"""
from .function_bases import sort as sort_base
n_outputs = 2 if with_index and not only_index else 1
return sort_base(x, axis, reverse, with_index, only_index, n_outputs) | python | def sort(x, axis=-1, reverse=False, with_index=False, only_index=False):
"""Sorts the elements of `x` along a given `axis` in ascending order
by value. A negative `axis` counts from the last dimension of `x`,
so the default of -1 sorts along the last dimension. If `reverse`
is True, then the elements are soreted in descending order.
If `with_index` is True, result is a tuple ``(sorted, indices)``
or only ``indices`` if `only_index` is True. Setting `only_index`
to True implies that `with_index` is also True.
.. code-block:: python
import numpy as np
import nnabla as nn
import nnabla.functions as F
nn.set_auto_forward(True)
x = nn.Variable.from_numpy_array(np.random.rand(2, 3, 4))
sorted = F.sort(x)
assert np.allclose(sorted.d, np.sort(x.d))
sorted, indices = F.sort(x, with_index=True)
assert np.allclose(sorted.d, np.sort(x.d))
assert np.all(indices.d == np.argsort(x.d))
indices = F.sort(x, only_index=True)
assert np.all(indices.d == np.argsort(x.d))
Args:
x(~nnabla.Variable): N-D array
axis(int): Axis along which to sort.
reverse(bool): Sort in descending order.
with_index(bool): Return sorted values and index.
only_index(bool): Return only the sort index.
Returns: :obj:`~nnabla.Variable` `sorted` or :obj:`~nnabla.Variable` `indices` or (:obj:`~nnabla.Variable` `sorted`, :obj:`~nnabla.Variable` `indices`)
"""
from .function_bases import sort as sort_base
n_outputs = 2 if with_index and not only_index else 1
return sort_base(x, axis, reverse, with_index, only_index, n_outputs) | [
"def",
"sort",
"(",
"x",
",",
"axis",
"=",
"-",
"1",
",",
"reverse",
"=",
"False",
",",
"with_index",
"=",
"False",
",",
"only_index",
"=",
"False",
")",
":",
"from",
".",
"function_bases",
"import",
"sort",
"as",
"sort_base",
"n_outputs",
"=",
"2",
... | Sorts the elements of `x` along a given `axis` in ascending order
by value. A negative `axis` counts from the last dimension of `x`,
so the default of -1 sorts along the last dimension. If `reverse`
is True, then the elements are soreted in descending order.
If `with_index` is True, result is a tuple ``(sorted, indices)``
or only ``indices`` if `only_index` is True. Setting `only_index`
to True implies that `with_index` is also True.
.. code-block:: python
import numpy as np
import nnabla as nn
import nnabla.functions as F
nn.set_auto_forward(True)
x = nn.Variable.from_numpy_array(np.random.rand(2, 3, 4))
sorted = F.sort(x)
assert np.allclose(sorted.d, np.sort(x.d))
sorted, indices = F.sort(x, with_index=True)
assert np.allclose(sorted.d, np.sort(x.d))
assert np.all(indices.d == np.argsort(x.d))
indices = F.sort(x, only_index=True)
assert np.all(indices.d == np.argsort(x.d))
Args:
x(~nnabla.Variable): N-D array
axis(int): Axis along which to sort.
reverse(bool): Sort in descending order.
with_index(bool): Return sorted values and index.
only_index(bool): Return only the sort index.
Returns: :obj:`~nnabla.Variable` `sorted` or :obj:`~nnabla.Variable` `indices` or (:obj:`~nnabla.Variable` `sorted`, :obj:`~nnabla.Variable` `indices`) | [
"Sorts",
"the",
"elements",
"of",
"x",
"along",
"a",
"given",
"axis",
"in",
"ascending",
"order",
"by",
"value",
".",
"A",
"negative",
"axis",
"counts",
"from",
"the",
"last",
"dimension",
"of",
"x",
"so",
"the",
"default",
"of",
"-",
"1",
"sorts",
"al... | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/functions.py#L727-L768 | train | 223,721 |
sony/nnabla | python/src/nnabla/utils/download.py | download | def download(url, output_file=None, open_file=True, allow_overwrite=False):
'''Download a file from URL.
Args:
url (str): URL.
output_file (str, optional): If given, the downloaded file is written to the given path.
open_file (bool): If True, it returns an opened file stream of the downloaded file.
allow_overwrite (bool): If True, it overwrites an existing file.
Returns:
Returns file object if open_file is True, otherwise None.
'''
filename = url.split('/')[-1]
if output_file is None:
cache = os.path.join(get_data_home(), filename)
else:
cache = output_file
if os.path.exists(cache) and not allow_overwrite:
logger.info("> {} already exists.".format(cache))
logger.info("> If you have any issue when using this file, ")
logger.info("> manually remove the file and try download again.")
else:
r = request.urlopen(url)
try:
if six.PY2:
content_length = int(r.info().dict['content-length'])
elif six.PY3:
content_length = int(r.info()['Content-Length'])
except:
content_length = 0
unit = 1000000
content = b''
with tqdm(total=content_length, desc=filename, unit='B', unit_scale=True, unit_divisor=1024) as t:
while True:
data = r.read(unit)
l = len(data)
t.update(l)
if l == 0:
break
content += data
with open(cache, 'wb') as f:
f.write(content)
if not open_file:
return
return open(cache, 'rb') | python | def download(url, output_file=None, open_file=True, allow_overwrite=False):
'''Download a file from URL.
Args:
url (str): URL.
output_file (str, optional): If given, the downloaded file is written to the given path.
open_file (bool): If True, it returns an opened file stream of the downloaded file.
allow_overwrite (bool): If True, it overwrites an existing file.
Returns:
Returns file object if open_file is True, otherwise None.
'''
filename = url.split('/')[-1]
if output_file is None:
cache = os.path.join(get_data_home(), filename)
else:
cache = output_file
if os.path.exists(cache) and not allow_overwrite:
logger.info("> {} already exists.".format(cache))
logger.info("> If you have any issue when using this file, ")
logger.info("> manually remove the file and try download again.")
else:
r = request.urlopen(url)
try:
if six.PY2:
content_length = int(r.info().dict['content-length'])
elif six.PY3:
content_length = int(r.info()['Content-Length'])
except:
content_length = 0
unit = 1000000
content = b''
with tqdm(total=content_length, desc=filename, unit='B', unit_scale=True, unit_divisor=1024) as t:
while True:
data = r.read(unit)
l = len(data)
t.update(l)
if l == 0:
break
content += data
with open(cache, 'wb') as f:
f.write(content)
if not open_file:
return
return open(cache, 'rb') | [
"def",
"download",
"(",
"url",
",",
"output_file",
"=",
"None",
",",
"open_file",
"=",
"True",
",",
"allow_overwrite",
"=",
"False",
")",
":",
"filename",
"=",
"url",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"if",
"output_file",
"is",
"None... | Download a file from URL.
Args:
url (str): URL.
output_file (str, optional): If given, the downloaded file is written to the given path.
open_file (bool): If True, it returns an opened file stream of the downloaded file.
allow_overwrite (bool): If True, it overwrites an existing file.
Returns:
Returns file object if open_file is True, otherwise None. | [
"Download",
"a",
"file",
"from",
"URL",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/download.py#L35-L80 | train | 223,722 |
sony/nnabla | python/src/nnabla/utils/image_utils/cv2_utils.py | imread | def imread(path, grayscale=False, size=None, interpolate="bilinear",
channel_first=False, as_uint16=False, num_channels=-1):
"""
Read image by cv2 module.
Args:
path (str or 'file object'): File path or object to read.
grayscale (bool):
size (tupple of int):
(width, height).
If None, output img shape depends on the files to read.
channel_first (bool):
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is (height, width, channel).
interpolate (str):
must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"].
as_uint16 (bool):
If True, this function reads image as uint16.
num_channels (int):
channel size of output array.
Default is -1 which preserves raw image shape.
Returns:
numpy.ndarray
"""
_imread_before(grayscale, num_channels)
r_mode = cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_UNCHANGED
img = _imread_helper(path, r_mode)
if as_uint16 and img.dtype != np.uint16:
if img.dtype == np.uint8:
logger.warning("You want to read image as uint16, but the original bit-depth is 8 bit."
"All pixel values are simply increased by 256 times.")
img = img.astype(np.uint16) * 256
else:
raise ValueError(
"casting {} to uint16 is not safe.".format(img.dtype))
img = _cvtColor_helper(img, num_channels)
img = _imread_after(img, size, interpolate, channel_first, imresize)
return img | python | def imread(path, grayscale=False, size=None, interpolate="bilinear",
channel_first=False, as_uint16=False, num_channels=-1):
"""
Read image by cv2 module.
Args:
path (str or 'file object'): File path or object to read.
grayscale (bool):
size (tupple of int):
(width, height).
If None, output img shape depends on the files to read.
channel_first (bool):
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is (height, width, channel).
interpolate (str):
must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"].
as_uint16 (bool):
If True, this function reads image as uint16.
num_channels (int):
channel size of output array.
Default is -1 which preserves raw image shape.
Returns:
numpy.ndarray
"""
_imread_before(grayscale, num_channels)
r_mode = cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_UNCHANGED
img = _imread_helper(path, r_mode)
if as_uint16 and img.dtype != np.uint16:
if img.dtype == np.uint8:
logger.warning("You want to read image as uint16, but the original bit-depth is 8 bit."
"All pixel values are simply increased by 256 times.")
img = img.astype(np.uint16) * 256
else:
raise ValueError(
"casting {} to uint16 is not safe.".format(img.dtype))
img = _cvtColor_helper(img, num_channels)
img = _imread_after(img, size, interpolate, channel_first, imresize)
return img | [
"def",
"imread",
"(",
"path",
",",
"grayscale",
"=",
"False",
",",
"size",
"=",
"None",
",",
"interpolate",
"=",
"\"bilinear\"",
",",
"channel_first",
"=",
"False",
",",
"as_uint16",
"=",
"False",
",",
"num_channels",
"=",
"-",
"1",
")",
":",
"_imread_be... | Read image by cv2 module.
Args:
path (str or 'file object'): File path or object to read.
grayscale (bool):
size (tupple of int):
(width, height).
If None, output img shape depends on the files to read.
channel_first (bool):
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is (height, width, channel).
interpolate (str):
must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"].
as_uint16 (bool):
If True, this function reads image as uint16.
num_channels (int):
channel size of output array.
Default is -1 which preserves raw image shape.
Returns:
numpy.ndarray | [
"Read",
"image",
"by",
"cv2",
"module",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/image_utils/cv2_utils.py#L105-L149 | train | 223,723 |
sony/nnabla | python/src/nnabla/utils/learning_rate_scheduler.py | PolynomialScheduler.get_learning_rate | def get_learning_rate(self, iter):
'''
Get learning rate with polymomial decay based on current iteration.
Args:
iter (int): current iteration (starting with 0).
Returns:
float: Learning rate
'''
return self.init_lr * ((1.0 - iter * 1.0 / self.max_iter) ** self.power) | python | def get_learning_rate(self, iter):
'''
Get learning rate with polymomial decay based on current iteration.
Args:
iter (int): current iteration (starting with 0).
Returns:
float: Learning rate
'''
return self.init_lr * ((1.0 - iter * 1.0 / self.max_iter) ** self.power) | [
"def",
"get_learning_rate",
"(",
"self",
",",
"iter",
")",
":",
"return",
"self",
".",
"init_lr",
"*",
"(",
"(",
"1.0",
"-",
"iter",
"*",
"1.0",
"/",
"self",
".",
"max_iter",
")",
"**",
"self",
".",
"power",
")"
] | Get learning rate with polymomial decay based on current iteration.
Args:
iter (int): current iteration (starting with 0).
Returns:
float: Learning rate | [
"Get",
"learning",
"rate",
"with",
"polymomial",
"decay",
"based",
"on",
"current",
"iteration",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/learning_rate_scheduler.py#L59-L69 | train | 223,724 |
sony/nnabla | python/src/nnabla/utils/learning_rate_scheduler.py | CosineScheduler.get_learning_rate | def get_learning_rate(self, iter):
'''
Get learning rate with cosine decay based on current iteration.
Args:
iter (int): Current iteration (starting with 0).
Returns:
float: Learning rate
'''
return self.init_lr * ((math.cos(iter * 1.0 / (self.max_iter) * math.pi) + 1.0) * 0.5) | python | def get_learning_rate(self, iter):
'''
Get learning rate with cosine decay based on current iteration.
Args:
iter (int): Current iteration (starting with 0).
Returns:
float: Learning rate
'''
return self.init_lr * ((math.cos(iter * 1.0 / (self.max_iter) * math.pi) + 1.0) * 0.5) | [
"def",
"get_learning_rate",
"(",
"self",
",",
"iter",
")",
":",
"return",
"self",
".",
"init_lr",
"*",
"(",
"(",
"math",
".",
"cos",
"(",
"iter",
"*",
"1.0",
"/",
"(",
"self",
".",
"max_iter",
")",
"*",
"math",
".",
"pi",
")",
"+",
"1.0",
")",
... | Get learning rate with cosine decay based on current iteration.
Args:
iter (int): Current iteration (starting with 0).
Returns:
float: Learning rate | [
"Get",
"learning",
"rate",
"with",
"cosine",
"decay",
"based",
"on",
"current",
"iteration",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/learning_rate_scheduler.py#L87-L97 | train | 223,725 |
sony/nnabla | python/src/nnabla/parametric_functions.py | affine | def affine(inp, n_outmaps,
base_axis=1,
w_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True,
apply_w=None, apply_b=None):
"""
The affine layer, also known as the fully connected layer. Computes
.. math::
{\\mathbf y} = {\\mathbf A} {\\mathbf x} + {\\mathbf b}.
where :math:`{\\mathbf x}, {\\mathbf y}` are the inputs and outputs respectively,
and :math:`{\\mathbf A}, {\\mathbf b}` are constants.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix.
n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
apply_w (function): Lambda, function, or callable object applied to the weights.
apply_b (function): Lambda, function, or callable object applied to the bias.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)f
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
inmaps = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, True, not fix_parameters)
if apply_w is not None:
w = apply_w(w)
b = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, True, not fix_parameters)
if apply_b is not None:
b = apply_b(b)
return F.affine(inp, w, b, base_axis) | python | def affine(inp, n_outmaps,
base_axis=1,
w_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True,
apply_w=None, apply_b=None):
"""
The affine layer, also known as the fully connected layer. Computes
.. math::
{\\mathbf y} = {\\mathbf A} {\\mathbf x} + {\\mathbf b}.
where :math:`{\\mathbf x}, {\\mathbf y}` are the inputs and outputs respectively,
and :math:`{\\mathbf A}, {\\mathbf b}` are constants.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix.
n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
apply_w (function): Lambda, function, or callable object applied to the weights.
apply_b (function): Lambda, function, or callable object applied to the bias.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)f
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
inmaps = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, True, not fix_parameters)
if apply_w is not None:
w = apply_w(w)
b = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, True, not fix_parameters)
if apply_b is not None:
b = apply_b(b)
return F.affine(inp, w, b, base_axis) | [
"def",
"affine",
"(",
"inp",
",",
"n_outmaps",
",",
"base_axis",
"=",
"1",
",",
"w_init",
"=",
"None",
",",
"b_init",
"=",
"None",
",",
"fix_parameters",
"=",
"False",
",",
"rng",
"=",
"None",
",",
"with_bias",
"=",
"True",
",",
"apply_w",
"=",
"None... | The affine layer, also known as the fully connected layer. Computes
.. math::
{\\mathbf y} = {\\mathbf A} {\\mathbf x} + {\\mathbf b}.
where :math:`{\\mathbf x}, {\\mathbf y}` are the inputs and outputs respectively,
and :math:`{\\mathbf A}, {\\mathbf b}` are constants.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix.
n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
apply_w (function): Lambda, function, or callable object applied to the weights.
apply_b (function): Lambda, function, or callable object applied to the bias.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)f | [
"The",
"affine",
"layer",
"also",
"known",
"as",
"the",
"fully",
"connected",
"layer",
".",
"Computes"
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L132-L183 | train | 223,726 |
sony/nnabla | python/src/nnabla/parametric_functions.py | binary_weight_affine | def binary_weight_affine(inp, n_outmaps,
base_axis=1, quantize_zero_to=1.0,
w_init=None, wb_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True):
"""Binary Weight Affine, multiplier-less inner-product with a scale factor.
Binary Weight Affine is the affine function, but the inner product
in this function is the following,
.. math::
y_j = \\frac{1}{\\|\\mathbf{w}_j\\|_{\\ell_1}} \sum_{i} sign(w_{ji}) x_i
Therefore :math:`sign(w_{ji})` is either :math:`1` or :math:`-1` and the inner product
simplifies to addition followed by scaling factor :math:`\\alpha = \\frac{1}{\\|\\mathbf{w}_j\\|_{\\ell_1}}`.
The number of ::math:`\\alpha` is the outmaps of the affine function.
References:
Rastegari, Mohammad, et al. "XNOR-Net: ImageNet Classification Using
Binary Convolutional Neural Networks." arXiv preprint
arXiv:1603.05279 (2016).
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the binarized weights (`binary_weight`)
2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the binary weights will not be in sync.
3) Quantized values are stored as floating point number for `binary_weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
quantize_zero_to (float): Input value at zero is quantized to this value.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
wb_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the binary weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By defalut, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
fan_in = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(fan_in, n_outmap), rng=rng)
if wb_init is None:
fan_in = np.prod(inp.shape[base_axis:])
wb_init = UniformInitializer(
calc_uniform_lim_glorot(fan_in, n_outmap), rng=rng)
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, True, not fix_parameters)
wb = get_parameter_or_create(
"Wb", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
wb_init, False)
alpha = get_parameter_or_create(
"alpha", n_outmaps, ConstantInitializer(0), False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, True, not fix_parameters)
return F.binary_weight_affine(inp, w, wb, alpha, b, base_axis, quantize_zero_to) | python | def binary_weight_affine(inp, n_outmaps,
base_axis=1, quantize_zero_to=1.0,
w_init=None, wb_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True):
"""Binary Weight Affine, multiplier-less inner-product with a scale factor.
Binary Weight Affine is the affine function, but the inner product
in this function is the following,
.. math::
y_j = \\frac{1}{\\|\\mathbf{w}_j\\|_{\\ell_1}} \sum_{i} sign(w_{ji}) x_i
Therefore :math:`sign(w_{ji})` is either :math:`1` or :math:`-1` and the inner product
simplifies to addition followed by scaling factor :math:`\\alpha = \\frac{1}{\\|\\mathbf{w}_j\\|_{\\ell_1}}`.
The number of ::math:`\\alpha` is the outmaps of the affine function.
References:
Rastegari, Mohammad, et al. "XNOR-Net: ImageNet Classification Using
Binary Convolutional Neural Networks." arXiv preprint
arXiv:1603.05279 (2016).
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the binarized weights (`binary_weight`)
2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the binary weights will not be in sync.
3) Quantized values are stored as floating point number for `binary_weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
quantize_zero_to (float): Input value at zero is quantized to this value.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
wb_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the binary weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By defalut, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
fan_in = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(fan_in, n_outmap), rng=rng)
if wb_init is None:
fan_in = np.prod(inp.shape[base_axis:])
wb_init = UniformInitializer(
calc_uniform_lim_glorot(fan_in, n_outmap), rng=rng)
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, True, not fix_parameters)
wb = get_parameter_or_create(
"Wb", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
wb_init, False)
alpha = get_parameter_or_create(
"alpha", n_outmaps, ConstantInitializer(0), False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, True, not fix_parameters)
return F.binary_weight_affine(inp, w, wb, alpha, b, base_axis, quantize_zero_to) | [
"def",
"binary_weight_affine",
"(",
"inp",
",",
"n_outmaps",
",",
"base_axis",
"=",
"1",
",",
"quantize_zero_to",
"=",
"1.0",
",",
"w_init",
"=",
"None",
",",
"wb_init",
"=",
"None",
",",
"b_init",
"=",
"None",
",",
"fix_parameters",
"=",
"False",
",",
"... | Binary Weight Affine, multiplier-less inner-product with a scale factor.
Binary Weight Affine is the affine function, but the inner product
in this function is the following,
.. math::
y_j = \\frac{1}{\\|\\mathbf{w}_j\\|_{\\ell_1}} \sum_{i} sign(w_{ji}) x_i
Therefore :math:`sign(w_{ji})` is either :math:`1` or :math:`-1` and the inner product
simplifies to addition followed by scaling factor :math:`\\alpha = \\frac{1}{\\|\\mathbf{w}_j\\|_{\\ell_1}}`.
The number of ::math:`\\alpha` is the outmaps of the affine function.
References:
Rastegari, Mohammad, et al. "XNOR-Net: ImageNet Classification Using
Binary Convolutional Neural Networks." arXiv preprint
arXiv:1603.05279 (2016).
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the binarized weights (`binary_weight`)
2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the binary weights will not be in sync.
3) Quantized values are stored as floating point number for `binary_weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
quantize_zero_to (float): Input value at zero is quantized to this value.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
wb_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the binary weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By defalut, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable` | [
"Binary",
"Weight",
"Affine",
"multiplier",
"-",
"less",
"inner",
"-",
"product",
"with",
"a",
"scale",
"factor",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L409-L488 | train | 223,727 |
sony/nnabla | python/src/nnabla/parametric_functions.py | inq_affine | def inq_affine(inp, n_outmaps, base_axis=1, num_bits=4,
inq_iterations=(), selection_algorithm='random',
seed=-1, w_init=None, i_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True):
"""Incremental Network Quantization Affine Layer
During training, the weights are sequentially quantized to power-of-two
values, which allows the training of a multiplierless network.
Using `inq_iterations`, one can specify after how many forward passes
half of the learnable weights are fixed and quantized to powers-of-two.
After reaching the last value in `inq_iterations`, all weights are fixed.
For more details, please refer to the reference.
Reference:
Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:
Towards lossless CNNs with low-precision weights.
<https://arxiv.org/abs/1702.03044>
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
quantize_zero_to (float): Input value at zero is quantized to this value.
num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0"
inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights.
selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly)
seed (int): Random seed for INQ algorithm
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
fan_in = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(fan_in, n_outmap), rng=rng)
if i_init is None:
fan_in = np.prod(inp.shape[base_axis:])
i_init = ConstantInitializer()
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, True, not fix_parameters)
i = get_parameter_or_create(
"I", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
i_init, False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, True, not fix_parameters)
return F.inq_affine(inp, w, i, b, base_axis, num_bits, inq_iterations, selection_algorithm, seed) | python | def inq_affine(inp, n_outmaps, base_axis=1, num_bits=4,
inq_iterations=(), selection_algorithm='random',
seed=-1, w_init=None, i_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True):
"""Incremental Network Quantization Affine Layer
During training, the weights are sequentially quantized to power-of-two
values, which allows the training of a multiplierless network.
Using `inq_iterations`, one can specify after how many forward passes
half of the learnable weights are fixed and quantized to powers-of-two.
After reaching the last value in `inq_iterations`, all weights are fixed.
For more details, please refer to the reference.
Reference:
Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:
Towards lossless CNNs with low-precision weights.
<https://arxiv.org/abs/1702.03044>
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
quantize_zero_to (float): Input value at zero is quantized to this value.
num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0"
inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights.
selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly)
seed (int): Random seed for INQ algorithm
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
fan_in = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(fan_in, n_outmap), rng=rng)
if i_init is None:
fan_in = np.prod(inp.shape[base_axis:])
i_init = ConstantInitializer()
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, True, not fix_parameters)
i = get_parameter_or_create(
"I", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
i_init, False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, True, not fix_parameters)
return F.inq_affine(inp, w, i, b, base_axis, num_bits, inq_iterations, selection_algorithm, seed) | [
"def",
"inq_affine",
"(",
"inp",
",",
"n_outmaps",
",",
"base_axis",
"=",
"1",
",",
"num_bits",
"=",
"4",
",",
"inq_iterations",
"=",
"(",
")",
",",
"selection_algorithm",
"=",
"'random'",
",",
"seed",
"=",
"-",
"1",
",",
"w_init",
"=",
"None",
",",
... | Incremental Network Quantization Affine Layer
During training, the weights are sequentially quantized to power-of-two
values, which allows the training of a multiplierless network.
Using `inq_iterations`, one can specify after how many forward passes
half of the learnable weights are fixed and quantized to powers-of-two.
After reaching the last value in `inq_iterations`, all weights are fixed.
For more details, please refer to the reference.
Reference:
Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:
Towards lossless CNNs with low-precision weights.
<https://arxiv.org/abs/1702.03044>
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
quantize_zero_to (float): Input value at zero is quantized to this value.
num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0"
inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights.
selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly)
seed (int): Random seed for INQ algorithm
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable` | [
"Incremental",
"Network",
"Quantization",
"Affine",
"Layer"
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L496-L559 | train | 223,728 |
sony/nnabla | python/src/nnabla/parametric_functions.py | binary_connect_convolution | def binary_connect_convolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
quantize_zero_to=1.0,
w_init=None, wb_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None,
with_bias=True):
"""Binary Connect Convolution, multiplier-less inner-product.
Binary Connect Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} sign(w_{n, m, i, j}) x_{m, a + i, b + j}.
Therefore :math:`sign(w_i)` is either :math:`1` or :math:`-1` and the inner product
simplifies to addition.
This function should be used together with BatchNormalization.
References:
M. Courbariaux, Y. Bengio, and J.-P. David. "BinaryConnect:
Training Deep Neural Networks with binary weights during propagations."
Advances in Neural Information Processing Systems. 2015.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the binarized weights (`binary_weight`)
2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the binary weights will not be in sync.
3) Quantized values are stored as floating point number for `binary_weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction.
quantize_zero_to (float): Input value at zero is quantized to this value.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
wb_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for binary weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if wb_init is None:
wb_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis]) + tuple(kernel),
w_init, True, not fix_parameters)
wb = get_parameter_or_create(
"Wb", (outmaps, inp.shape[base_axis]) + tuple(kernel),
wb_init, False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
return F.binary_connect_convolution(inp, w, wb, b, base_axis, pad, stride, dilation, group, quantize_zero_to) | python | def binary_connect_convolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
quantize_zero_to=1.0,
w_init=None, wb_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None,
with_bias=True):
"""Binary Connect Convolution, multiplier-less inner-product.
Binary Connect Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} sign(w_{n, m, i, j}) x_{m, a + i, b + j}.
Therefore :math:`sign(w_i)` is either :math:`1` or :math:`-1` and the inner product
simplifies to addition.
This function should be used together with BatchNormalization.
References:
M. Courbariaux, Y. Bengio, and J.-P. David. "BinaryConnect:
Training Deep Neural Networks with binary weights during propagations."
Advances in Neural Information Processing Systems. 2015.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the binarized weights (`binary_weight`)
2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the binary weights will not be in sync.
3) Quantized values are stored as floating point number for `binary_weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction.
quantize_zero_to (float): Input value at zero is quantized to this value.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
wb_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for binary weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if wb_init is None:
wb_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis]) + tuple(kernel),
w_init, True, not fix_parameters)
wb = get_parameter_or_create(
"Wb", (outmaps, inp.shape[base_axis]) + tuple(kernel),
wb_init, False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
return F.binary_connect_convolution(inp, w, wb, b, base_axis, pad, stride, dilation, group, quantize_zero_to) | [
"def",
"binary_connect_convolution",
"(",
"inp",
",",
"outmaps",
",",
"kernel",
",",
"pad",
"=",
"None",
",",
"stride",
"=",
"None",
",",
"dilation",
"=",
"None",
",",
"group",
"=",
"1",
",",
"quantize_zero_to",
"=",
"1.0",
",",
"w_init",
"=",
"None",
... | Binary Connect Convolution, multiplier-less inner-product.
Binary Connect Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} sign(w_{n, m, i, j}) x_{m, a + i, b + j}.
Therefore :math:`sign(w_i)` is either :math:`1` or :math:`-1` and the inner product
simplifies to addition.
This function should be used together with BatchNormalization.
References:
M. Courbariaux, Y. Bengio, and J.-P. David. "BinaryConnect:
Training Deep Neural Networks with binary weights during propagations."
Advances in Neural Information Processing Systems. 2015.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the binarized weights (`binary_weight`)
2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the binary weights will not be in sync.
3) Quantized values are stored as floating point number for `binary_weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction.
quantize_zero_to (float): Input value at zero is quantized to this value.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
wb_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for binary weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable` | [
"Binary",
"Connect",
"Convolution",
"multiplier",
"-",
"less",
"inner",
"-",
"product",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L942-L1022 | train | 223,729 |
sony/nnabla | python/src/nnabla/parametric_functions.py | inq_convolution | def inq_convolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
num_bits=4, inq_iterations=(), selection_algorithm='random',
seed=-1, w_init=None, i_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None,
with_bias=True):
"""Incremental Network Quantization Convolution Layer
During training, the weights are sequentially quantized to power-of-two
values, which allows the training of a multiplierless network.
Using `inq_iterations`, one can specify after how many forward passes
half of the learnable weights are fixed and quantized to powers-of-two.
After reaching the last value in `inq_iterations`, all weights are fixed.
For more details, please refer to the reference.
Reference:
Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:
Towards lossless CNNs with low-precision weights.
<https://arxiv.org/abs/1702.03044>
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0"
inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights.
selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly)
seed (int): Random seed for INQ algorithm
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if i_init is None:
i_init = ConstantInitializer()
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis]) + tuple(kernel),
w_init, True, not fix_parameters)
i = get_parameter_or_create(
"I", (outmaps, inp.shape[base_axis]) + tuple(kernel),
i_init, False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
return F.inq_convolution(inp, w, i, b, base_axis, pad, stride, dilation, group, num_bits, inq_iterations, selection_algorithm, seed) | python | def inq_convolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
num_bits=4, inq_iterations=(), selection_algorithm='random',
seed=-1, w_init=None, i_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None,
with_bias=True):
"""Incremental Network Quantization Convolution Layer
During training, the weights are sequentially quantized to power-of-two
values, which allows the training of a multiplierless network.
Using `inq_iterations`, one can specify after how many forward passes
half of the learnable weights are fixed and quantized to powers-of-two.
After reaching the last value in `inq_iterations`, all weights are fixed.
For more details, please refer to the reference.
Reference:
Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:
Towards lossless CNNs with low-precision weights.
<https://arxiv.org/abs/1702.03044>
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0"
inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights.
selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly)
seed (int): Random seed for INQ algorithm
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if i_init is None:
i_init = ConstantInitializer()
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis]) + tuple(kernel),
w_init, True, not fix_parameters)
i = get_parameter_or_create(
"I", (outmaps, inp.shape[base_axis]) + tuple(kernel),
i_init, False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
return F.inq_convolution(inp, w, i, b, base_axis, pad, stride, dilation, group, num_bits, inq_iterations, selection_algorithm, seed) | [
"def",
"inq_convolution",
"(",
"inp",
",",
"outmaps",
",",
"kernel",
",",
"pad",
"=",
"None",
",",
"stride",
"=",
"None",
",",
"dilation",
"=",
"None",
",",
"group",
"=",
"1",
",",
"num_bits",
"=",
"4",
",",
"inq_iterations",
"=",
"(",
")",
",",
"s... | Incremental Network Quantization Convolution Layer
During training, the weights are sequentially quantized to power-of-two
values, which allows the training of a multiplierless network.
Using `inq_iterations`, one can specify after how many forward passes
half of the learnable weights are fixed and quantized to powers-of-two.
After reaching the last value in `inq_iterations`, all weights are fixed.
For more details, please refer to the reference.
Reference:
Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:
Towards lossless CNNs with low-precision weights.
<https://arxiv.org/abs/1702.03044>
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0"
inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights.
selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly)
seed (int): Random seed for INQ algorithm
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable` | [
"Incremental",
"Network",
"Quantization",
"Convolution",
"Layer"
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1122-L1180 | train | 223,730 |
sony/nnabla | python/src/nnabla/parametric_functions.py | depthwise_convolution | def depthwise_convolution(inp, kernel, pad=None, stride=None, dilation=None,
multiplier=1, w_init=None, b_init=None, base_axis=1,
fix_parameters=False, rng=None, with_bias=True):
"""
N-D Depthwise Convolution with a bias term.
Reference:
- F. Chollet: Chollet, Francois. "Xception: Deep Learning with Depthwise Separable Convolutions. https://arxiv.org/abs/1610.02357
Args:
inp (~nnabla.Variable): N-D array.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
multiplier (:obj:`int`): Number of output feature maps per input feature map.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`: N-D array. See :obj:`~nnabla.functions.depthwise_convolution` for the output shape.
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(
inp.shape[base_axis] * multiplier,
inp.shape[base_axis],
tuple(kernel)),
rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", (inp.shape[base_axis] * multiplier,) + tuple(kernel),
w_init, True, not fix_parameters)
b = None
if with_bias:
b = get_parameter_or_create(
"b", (inp.shape[base_axis] * multiplier,),
b_init, True, not fix_parameters)
return F.depthwise_convolution(inp, w, b, base_axis, pad, stride, dilation,
multiplier) | python | def depthwise_convolution(inp, kernel, pad=None, stride=None, dilation=None,
multiplier=1, w_init=None, b_init=None, base_axis=1,
fix_parameters=False, rng=None, with_bias=True):
"""
N-D Depthwise Convolution with a bias term.
Reference:
- F. Chollet: Chollet, Francois. "Xception: Deep Learning with Depthwise Separable Convolutions. https://arxiv.org/abs/1610.02357
Args:
inp (~nnabla.Variable): N-D array.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
multiplier (:obj:`int`): Number of output feature maps per input feature map.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`: N-D array. See :obj:`~nnabla.functions.depthwise_convolution` for the output shape.
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(
inp.shape[base_axis] * multiplier,
inp.shape[base_axis],
tuple(kernel)),
rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", (inp.shape[base_axis] * multiplier,) + tuple(kernel),
w_init, True, not fix_parameters)
b = None
if with_bias:
b = get_parameter_or_create(
"b", (inp.shape[base_axis] * multiplier,),
b_init, True, not fix_parameters)
return F.depthwise_convolution(inp, w, b, base_axis, pad, stride, dilation,
multiplier) | [
"def",
"depthwise_convolution",
"(",
"inp",
",",
"kernel",
",",
"pad",
"=",
"None",
",",
"stride",
"=",
"None",
",",
"dilation",
"=",
"None",
",",
"multiplier",
"=",
"1",
",",
"w_init",
"=",
"None",
",",
"b_init",
"=",
"None",
",",
"base_axis",
"=",
... | N-D Depthwise Convolution with a bias term.
Reference:
- F. Chollet: Chollet, Francois. "Xception: Deep Learning with Depthwise Separable Convolutions. https://arxiv.org/abs/1610.02357
Args:
inp (~nnabla.Variable): N-D array.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
multiplier (:obj:`int`): Number of output feature maps per input feature map.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`: N-D array. See :obj:`~nnabla.functions.depthwise_convolution` for the output shape. | [
"N",
"-",
"D",
"Depthwise",
"Convolution",
"with",
"a",
"bias",
"term",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1187-L1233 | train | 223,731 |
sony/nnabla | python/src/nnabla/parametric_functions.py | batch_normalization | def batch_normalization(inp, axes=[1], decay_rate=0.9, eps=1e-5,
batch_stat=True, output_stat=False, fix_parameters=False,
param_init=None):
"""
Batch normalization layer.
.. math::
\\begin{array}{lcl}
\\mu &=& \\frac{1}{M} \\sum x_i\\\\
\\sigma^2 &=& \\frac{1}{M} \\sum \\left(x_i - \\mu\\right)^2\\\\
\\hat{x}_i &=& \\frac{x_i - \\mu}{\\sqrt{\\sigma^2 + \\epsilon }}\\\\
y_i &= & \\hat{x}_i \\gamma + \\beta.
\\end{array}
where :math:`x_i, y_i` are the inputs.
In testing, the mean and variance computed by moving average calculated during training are used.
Args:
inp (~nnabla.Variable): N-D array of input.
axes (:obj:`tuple` of :obj:`int`):
Mean and variance for each element in ``axes`` are calculated using
elements on the rest axes. For example, if an input is 4 dimensions,
and ``axes`` is ``[1]``, batch mean is calculated as
``np.mean(inp.d, axis=(0, 2, 3), keepdims=True)``
(using numpy expression as an example).
decay_rate (float): Decay rate of running mean and variance.
eps (float): Tiny value to avoid zero division by std.
batch_stat (bool): Use mini-batch statistics rather than running ones.
output_stat (bool): Output batch mean and variance.
fix_parameters (bool): When set to `True`, the beta and gamma will not be updated.
param_init (dict):
Parameter initializers can be set with a dict. A key of the dict must
be ``'beta'``, ``'gamma'``, ``'mean'`` or ``'var'``.
A value of the dict must be an :obj:`~nnabla.initializer.Initializer`
or a :obj:`numpy.ndarray`.
E.g. ``{'beta': ConstantIntializer(0), 'gamma': np.ones(gamma_shape) * 2}``.
Returns:
:class:`~nnabla.Variable`: N-D array.
References:
- Ioffe and Szegedy, Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. https://arxiv.org/abs/1502.03167
The shape of parameters has the same number of dimensions with the input
data, and the shapes in ``axes`` has the same dimensions with the input, while the rest has ``1``.
If an input is 4-dim and ``axes=[1]``, the parameter shape will be
``param_shape = np.mean(inp.d, axis=(0, 2, 3), keepdims=True).shape``
(using numpy expression as an example).
"""
shape_stat = [1 for _ in inp.shape]
for i in range(len(axes)):
shape_stat[axes[i]] = inp.shape[axes[i]]
if param_init is None:
param_init = {}
beta_init = param_init.get('beta', ConstantInitializer(0))
gamma_init = param_init.get('gamma', ConstantInitializer(1))
mean_init = param_init.get('mean', ConstantInitializer(0))
var_init = param_init.get('var', ConstantInitializer(1))
beta = get_parameter_or_create(
"beta", shape_stat, beta_init, True, not fix_parameters)
gamma = get_parameter_or_create(
"gamma", shape_stat, gamma_init, True, not fix_parameters)
mean = get_parameter_or_create(
"mean", shape_stat, mean_init, False)
var = get_parameter_or_create(
"var", shape_stat, var_init, False)
return F.batch_normalization(inp, beta, gamma, mean, var, axes,
decay_rate, eps, batch_stat, output_stat) | python | def batch_normalization(inp, axes=[1], decay_rate=0.9, eps=1e-5,
batch_stat=True, output_stat=False, fix_parameters=False,
param_init=None):
"""
Batch normalization layer.
.. math::
\\begin{array}{lcl}
\\mu &=& \\frac{1}{M} \\sum x_i\\\\
\\sigma^2 &=& \\frac{1}{M} \\sum \\left(x_i - \\mu\\right)^2\\\\
\\hat{x}_i &=& \\frac{x_i - \\mu}{\\sqrt{\\sigma^2 + \\epsilon }}\\\\
y_i &= & \\hat{x}_i \\gamma + \\beta.
\\end{array}
where :math:`x_i, y_i` are the inputs.
In testing, the mean and variance computed by moving average calculated during training are used.
Args:
inp (~nnabla.Variable): N-D array of input.
axes (:obj:`tuple` of :obj:`int`):
Mean and variance for each element in ``axes`` are calculated using
elements on the rest axes. For example, if an input is 4 dimensions,
and ``axes`` is ``[1]``, batch mean is calculated as
``np.mean(inp.d, axis=(0, 2, 3), keepdims=True)``
(using numpy expression as an example).
decay_rate (float): Decay rate of running mean and variance.
eps (float): Tiny value to avoid zero division by std.
batch_stat (bool): Use mini-batch statistics rather than running ones.
output_stat (bool): Output batch mean and variance.
fix_parameters (bool): When set to `True`, the beta and gamma will not be updated.
param_init (dict):
Parameter initializers can be set with a dict. A key of the dict must
be ``'beta'``, ``'gamma'``, ``'mean'`` or ``'var'``.
A value of the dict must be an :obj:`~nnabla.initializer.Initializer`
or a :obj:`numpy.ndarray`.
E.g. ``{'beta': ConstantIntializer(0), 'gamma': np.ones(gamma_shape) * 2}``.
Returns:
:class:`~nnabla.Variable`: N-D array.
References:
- Ioffe and Szegedy, Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. https://arxiv.org/abs/1502.03167
The shape of parameters has the same number of dimensions with the input
data, and the shapes in ``axes`` has the same dimensions with the input, while the rest has ``1``.
If an input is 4-dim and ``axes=[1]``, the parameter shape will be
``param_shape = np.mean(inp.d, axis=(0, 2, 3), keepdims=True).shape``
(using numpy expression as an example).
"""
shape_stat = [1 for _ in inp.shape]
for i in range(len(axes)):
shape_stat[axes[i]] = inp.shape[axes[i]]
if param_init is None:
param_init = {}
beta_init = param_init.get('beta', ConstantInitializer(0))
gamma_init = param_init.get('gamma', ConstantInitializer(1))
mean_init = param_init.get('mean', ConstantInitializer(0))
var_init = param_init.get('var', ConstantInitializer(1))
beta = get_parameter_or_create(
"beta", shape_stat, beta_init, True, not fix_parameters)
gamma = get_parameter_or_create(
"gamma", shape_stat, gamma_init, True, not fix_parameters)
mean = get_parameter_or_create(
"mean", shape_stat, mean_init, False)
var = get_parameter_or_create(
"var", shape_stat, var_init, False)
return F.batch_normalization(inp, beta, gamma, mean, var, axes,
decay_rate, eps, batch_stat, output_stat) | [
"def",
"batch_normalization",
"(",
"inp",
",",
"axes",
"=",
"[",
"1",
"]",
",",
"decay_rate",
"=",
"0.9",
",",
"eps",
"=",
"1e-5",
",",
"batch_stat",
"=",
"True",
",",
"output_stat",
"=",
"False",
",",
"fix_parameters",
"=",
"False",
",",
"param_init",
... | Batch normalization layer.
.. math::
\\begin{array}{lcl}
\\mu &=& \\frac{1}{M} \\sum x_i\\\\
\\sigma^2 &=& \\frac{1}{M} \\sum \\left(x_i - \\mu\\right)^2\\\\
\\hat{x}_i &=& \\frac{x_i - \\mu}{\\sqrt{\\sigma^2 + \\epsilon }}\\\\
y_i &= & \\hat{x}_i \\gamma + \\beta.
\\end{array}
where :math:`x_i, y_i` are the inputs.
In testing, the mean and variance computed by moving average calculated during training are used.
Args:
inp (~nnabla.Variable): N-D array of input.
axes (:obj:`tuple` of :obj:`int`):
Mean and variance for each element in ``axes`` are calculated using
elements on the rest axes. For example, if an input is 4 dimensions,
and ``axes`` is ``[1]``, batch mean is calculated as
``np.mean(inp.d, axis=(0, 2, 3), keepdims=True)``
(using numpy expression as an example).
decay_rate (float): Decay rate of running mean and variance.
eps (float): Tiny value to avoid zero division by std.
batch_stat (bool): Use mini-batch statistics rather than running ones.
output_stat (bool): Output batch mean and variance.
fix_parameters (bool): When set to `True`, the beta and gamma will not be updated.
param_init (dict):
Parameter initializers can be set with a dict. A key of the dict must
be ``'beta'``, ``'gamma'``, ``'mean'`` or ``'var'``.
A value of the dict must be an :obj:`~nnabla.initializer.Initializer`
or a :obj:`numpy.ndarray`.
E.g. ``{'beta': ConstantIntializer(0), 'gamma': np.ones(gamma_shape) * 2}``.
Returns:
:class:`~nnabla.Variable`: N-D array.
References:
- Ioffe and Szegedy, Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. https://arxiv.org/abs/1502.03167
The shape of parameters has the same number of dimensions with the input
data, and the shapes in ``axes`` has the same dimensions with the input, while the rest has ``1``.
If an input is 4-dim and ``axes=[1]``, the parameter shape will be
``param_shape = np.mean(inp.d, axis=(0, 2, 3), keepdims=True).shape``
(using numpy expression as an example). | [
"Batch",
"normalization",
"layer",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1611-L1682 | train | 223,732 |
sony/nnabla | python/src/nnabla/parametric_functions.py | mean_subtraction | def mean_subtraction(inp, base_axis=1, update_running_mean=True, fix_parameters=False):
"""
Mean subtraction layer.
It subtracts the mean of the elements of the input array,
and normalizes it to :math:`0`. Preprocessing arrays with this function has the effect of improving accuracy
in various tasks such as image classification.
At training time, this function is defined as
.. math::
\\begin{array}{lcl}
\\mu &=& \\frac{1}{M} \\sum x_i \\\\
y_i &=& x_i - \\mu
\\end{array}
At testing time, the mean values used are those that were computed during training by moving average.
Note:
The backward performs an approximated differentiation that takes into account only the latest mini-batch.
Args:
inp (~nnabla.Variable): N-D array of input.
base_axis (int): Base axis of Mean Subtraction operation. Dimensions up to base_axis is treated as sample dimension.
update_running_mean (bool): When set to `True`, the running mean will not be updated.
fix_parameters (bool): dummy parameter. This argument dose not affect anything.
Returns:
~nnabla.Variable: N-D array.
"""
assert len(inp.shape) >= base_axis
shape = inp.shape[base_axis:]
mean = get_parameter_or_create(
"mean", shape, ConstantInitializer(0), False)
t = get_parameter_or_create(
"t", (1, ), ConstantInitializer(0), False)
return F.mean_subtraction(inp, mean, t, base_axis=base_axis, update_running_mean=update_running_mean) | python | def mean_subtraction(inp, base_axis=1, update_running_mean=True, fix_parameters=False):
"""
Mean subtraction layer.
It subtracts the mean of the elements of the input array,
and normalizes it to :math:`0`. Preprocessing arrays with this function has the effect of improving accuracy
in various tasks such as image classification.
At training time, this function is defined as
.. math::
\\begin{array}{lcl}
\\mu &=& \\frac{1}{M} \\sum x_i \\\\
y_i &=& x_i - \\mu
\\end{array}
At testing time, the mean values used are those that were computed during training by moving average.
Note:
The backward performs an approximated differentiation that takes into account only the latest mini-batch.
Args:
inp (~nnabla.Variable): N-D array of input.
base_axis (int): Base axis of Mean Subtraction operation. Dimensions up to base_axis is treated as sample dimension.
update_running_mean (bool): When set to `True`, the running mean will not be updated.
fix_parameters (bool): dummy parameter. This argument dose not affect anything.
Returns:
~nnabla.Variable: N-D array.
"""
assert len(inp.shape) >= base_axis
shape = inp.shape[base_axis:]
mean = get_parameter_or_create(
"mean", shape, ConstantInitializer(0), False)
t = get_parameter_or_create(
"t", (1, ), ConstantInitializer(0), False)
return F.mean_subtraction(inp, mean, t, base_axis=base_axis, update_running_mean=update_running_mean) | [
"def",
"mean_subtraction",
"(",
"inp",
",",
"base_axis",
"=",
"1",
",",
"update_running_mean",
"=",
"True",
",",
"fix_parameters",
"=",
"False",
")",
":",
"assert",
"len",
"(",
"inp",
".",
"shape",
")",
">=",
"base_axis",
"shape",
"=",
"inp",
".",
"shape... | Mean subtraction layer.
It subtracts the mean of the elements of the input array,
and normalizes it to :math:`0`. Preprocessing arrays with this function has the effect of improving accuracy
in various tasks such as image classification.
At training time, this function is defined as
.. math::
\\begin{array}{lcl}
\\mu &=& \\frac{1}{M} \\sum x_i \\\\
y_i &=& x_i - \\mu
\\end{array}
At testing time, the mean values used are those that were computed during training by moving average.
Note:
The backward performs an approximated differentiation that takes into account only the latest mini-batch.
Args:
inp (~nnabla.Variable): N-D array of input.
base_axis (int): Base axis of Mean Subtraction operation. Dimensions up to base_axis is treated as sample dimension.
update_running_mean (bool): When set to `True`, the running mean will not be updated.
fix_parameters (bool): dummy parameter. This argument dose not affect anything.
Returns:
~nnabla.Variable: N-D array. | [
"Mean",
"subtraction",
"layer",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1689-L1726 | train | 223,733 |
sony/nnabla | python/src/nnabla/parametric_functions.py | prelu | def prelu(inp, base_axis=1, shared=True, fix_parameters=False):
"""
Parametrized Rectified Linear Unit function defined as
.. math::
y_i = \max(0, x_i) + w_i \min(0, -x_i)
where negative slope :math:`w` is learned and can vary across channels (an
axis specified with base_axis). Weights are initialized with :math:`-1`.
Args:
x(~nnabla.Variable): N-D array as input
base_axis(int): Dimensions up to base_axis is treated as sample dimension.
shared(bool): Use shared weight value or not
fix_parameters (bool): When set to `True`, the negative slope values
will not be updated.
Returns:
~nnabla.Variable: N-D array.
"""
shape = tuple() if shared else (inp.shape[base_axis],)
w = get_parameter_or_create("slope", shape,
ConstantInitializer(-1), True, not fix_parameters)
return F.prelu(inp, w, base_axis) | python | def prelu(inp, base_axis=1, shared=True, fix_parameters=False):
"""
Parametrized Rectified Linear Unit function defined as
.. math::
y_i = \max(0, x_i) + w_i \min(0, -x_i)
where negative slope :math:`w` is learned and can vary across channels (an
axis specified with base_axis). Weights are initialized with :math:`-1`.
Args:
x(~nnabla.Variable): N-D array as input
base_axis(int): Dimensions up to base_axis is treated as sample dimension.
shared(bool): Use shared weight value or not
fix_parameters (bool): When set to `True`, the negative slope values
will not be updated.
Returns:
~nnabla.Variable: N-D array.
"""
shape = tuple() if shared else (inp.shape[base_axis],)
w = get_parameter_or_create("slope", shape,
ConstantInitializer(-1), True, not fix_parameters)
return F.prelu(inp, w, base_axis) | [
"def",
"prelu",
"(",
"inp",
",",
"base_axis",
"=",
"1",
",",
"shared",
"=",
"True",
",",
"fix_parameters",
"=",
"False",
")",
":",
"shape",
"=",
"tuple",
"(",
")",
"if",
"shared",
"else",
"(",
"inp",
".",
"shape",
"[",
"base_axis",
"]",
",",
")",
... | Parametrized Rectified Linear Unit function defined as
.. math::
y_i = \max(0, x_i) + w_i \min(0, -x_i)
where negative slope :math:`w` is learned and can vary across channels (an
axis specified with base_axis). Weights are initialized with :math:`-1`.
Args:
x(~nnabla.Variable): N-D array as input
base_axis(int): Dimensions up to base_axis is treated as sample dimension.
shared(bool): Use shared weight value or not
fix_parameters (bool): When set to `True`, the negative slope values
will not be updated.
Returns:
~nnabla.Variable: N-D array. | [
"Parametrized",
"Rectified",
"Linear",
"Unit",
"function",
"defined",
"as"
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1762-L1786 | train | 223,734 |
sony/nnabla | python/src/nnabla/parametric_functions.py | fixed_point_quantized_affine | def fixed_point_quantized_affine(inp, n_outmaps,
base_axis=1,
w_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True,
quantize_w=True, sign_w=True, n_w=8, delta_w=2**-4, ste_fine_grained_w=True,
quantize_b=True, sign_b=True, n_b=8, delta_b=2**-4, ste_fine_grained_b=True):
"""Fixed-Point Quantized Affine.
Fixed-Point Quantized Affine is the affine function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_j = \sum_{i} Q(w_{ji}) x_i,
where :math:`Q(w_{ji})` is the fixed-point quantization function.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) CPU and GPU implementations now use float value for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix.
n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
quantize_w (bool): Quantize weights if `True`.
sign_w (bool): Use signed quantization if `True`.
n_w (int): Bit width used for weight.
delta_w (float): Step size for weight.
ste_fine_grained_w (bool): STE is fine-grained if `True`.
quantize_b (bool): Quantize bias if `True`.
n_b (int): Bit width used for bias.
delta_w (float): Step size for bias.
ste_fine_grained_b (bool): STE is fine-grained if `True`.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
inmaps = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
# Floating Weight
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, True, not fix_parameters)
# Quantized Weight
if quantize_w:
w_q = get_parameter_or_create(
"W_q", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, False)
# Link computation graph
real_w_q = F.fixed_point_quantize(w, quantize=quantize_w,
sign=sign_w, n=n_w, delta=delta_w,
ste_fine_grained=ste_fine_grained_w,
outputs=[w_q.data])
real_w_q.persistent = True
else:
real_w_q = w
# Bias
# Floating
b = None
b_q = None
real_b_q = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, True, not fix_parameters)
if quantize_b:
b_q = get_parameter_or_create(
"b_q", n_outmaps, b_init, False)
# Link computation graph
real_b_q = F.fixed_point_quantize(b, quantize=quantize_b,
sign=sign_b, n=n_b, delta=delta_b,
ste_fine_grained=ste_fine_grained_b,
outputs=[b_q.data])
real_b_q.persistent = True
else:
real_b_q = b
return F.affine(inp, real_w_q, real_b_q, base_axis) | python | def fixed_point_quantized_affine(inp, n_outmaps,
base_axis=1,
w_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True,
quantize_w=True, sign_w=True, n_w=8, delta_w=2**-4, ste_fine_grained_w=True,
quantize_b=True, sign_b=True, n_b=8, delta_b=2**-4, ste_fine_grained_b=True):
"""Fixed-Point Quantized Affine.
Fixed-Point Quantized Affine is the affine function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_j = \sum_{i} Q(w_{ji}) x_i,
where :math:`Q(w_{ji})` is the fixed-point quantization function.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) CPU and GPU implementations now use float value for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix.
n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
quantize_w (bool): Quantize weights if `True`.
sign_w (bool): Use signed quantization if `True`.
n_w (int): Bit width used for weight.
delta_w (float): Step size for weight.
ste_fine_grained_w (bool): STE is fine-grained if `True`.
quantize_b (bool): Quantize bias if `True`.
n_b (int): Bit width used for bias.
delta_w (float): Step size for bias.
ste_fine_grained_b (bool): STE is fine-grained if `True`.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
inmaps = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
# Floating Weight
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, True, not fix_parameters)
# Quantized Weight
if quantize_w:
w_q = get_parameter_or_create(
"W_q", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, False)
# Link computation graph
real_w_q = F.fixed_point_quantize(w, quantize=quantize_w,
sign=sign_w, n=n_w, delta=delta_w,
ste_fine_grained=ste_fine_grained_w,
outputs=[w_q.data])
real_w_q.persistent = True
else:
real_w_q = w
# Bias
# Floating
b = None
b_q = None
real_b_q = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, True, not fix_parameters)
if quantize_b:
b_q = get_parameter_or_create(
"b_q", n_outmaps, b_init, False)
# Link computation graph
real_b_q = F.fixed_point_quantize(b, quantize=quantize_b,
sign=sign_b, n=n_b, delta=delta_b,
ste_fine_grained=ste_fine_grained_b,
outputs=[b_q.data])
real_b_q.persistent = True
else:
real_b_q = b
return F.affine(inp, real_w_q, real_b_q, base_axis) | [
"def",
"fixed_point_quantized_affine",
"(",
"inp",
",",
"n_outmaps",
",",
"base_axis",
"=",
"1",
",",
"w_init",
"=",
"None",
",",
"b_init",
"=",
"None",
",",
"fix_parameters",
"=",
"False",
",",
"rng",
"=",
"None",
",",
"with_bias",
"=",
"True",
",",
"qu... | Fixed-Point Quantized Affine.
Fixed-Point Quantized Affine is the affine function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_j = \sum_{i} Q(w_{ji}) x_i,
where :math:`Q(w_{ji})` is the fixed-point quantization function.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) CPU and GPU implementations now use float value for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix.
n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
quantize_w (bool): Quantize weights if `True`.
sign_w (bool): Use signed quantization if `True`.
n_w (int): Bit width used for weight.
delta_w (float): Step size for weight.
ste_fine_grained_w (bool): STE is fine-grained if `True`.
quantize_b (bool): Quantize bias if `True`.
n_b (int): Bit width used for bias.
delta_w (float): Step size for bias.
ste_fine_grained_b (bool): STE is fine-grained if `True`.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`) | [
"Fixed",
"-",
"Point",
"Quantized",
"Affine",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1795-L1901 | train | 223,735 |
sony/nnabla | python/src/nnabla/parametric_functions.py | fixed_point_quantized_convolution | def fixed_point_quantized_convolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
w_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None, with_bias=True,
quantize_w=True, sign_w=True, n_w=8, delta_w=2**-4, ste_fine_grained_w=True,
quantize_b=True, sign_b=True, n_b=8, delta_b=2**-4, ste_fine_grained_b=True,):
"""Fixed-Point Quantized Convolution.
Fixed-Point Quantized Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j},
where :math:`Q(w_{n, m, i, j})` is the fixed-point quantization function.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) CPU and GPU implementations now use float value for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
quantize_w (bool): Quantize weights if `True`.
quantize_bias (bool): Quantize bias if `True`.
sign_w (bool): Use signed quantization if `True`.
n_w (int): Bit width used for weight.
delta_w (float): Step size for weight.
ste_fine_grained_w (bool): STE is fine-grained if `True`.
quantize_b (bool): Quantize bias if `True`.
n_b (int): Bit width used for bias.
delta_w (float): Step size for bias.
ste_fine_grained_b (bool): STE is fine-grained if `True`.
Returns:
:class:`~nnabla.Variable`: N-D array.
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
# Floating Weight
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis] // group) + tuple(kernel),
w_init, True, not fix_parameters)
# Quantized Weight
if quantize_w:
w_q = get_parameter_or_create(
"W_q", (outmaps, inp.shape[base_axis] // group) + tuple(kernel),
w_init, False)
# Link computation graph
real_w_q = F.fixed_point_quantize(w, quantize=quantize_w,
sign=sign_w, n=n_w, delta=delta_w,
ste_fine_grained=ste_fine_grained_w,
outputs=[w_q.data])
real_w_q.persistent = True
else:
real_w_q = w
# Bias
# Floating
b = None
b_q = None
real_b_q = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
if quantize_b:
b_q = get_parameter_or_create(
"b_q", (outmaps,), b_init, False)
# Link computation graph
real_b_q = F.fixed_point_quantize(b, quantize=quantize_b,
sign=sign_b, n=n_b, delta=delta_b,
ste_fine_grained=ste_fine_grained_b,
outputs=[b_q.data])
real_b_q.persistent = True
else:
real_b_q = b
return F.convolution(inp, real_w_q, real_b_q, base_axis, pad, stride, dilation, group) | python | def fixed_point_quantized_convolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
w_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None, with_bias=True,
quantize_w=True, sign_w=True, n_w=8, delta_w=2**-4, ste_fine_grained_w=True,
quantize_b=True, sign_b=True, n_b=8, delta_b=2**-4, ste_fine_grained_b=True,):
"""Fixed-Point Quantized Convolution.
Fixed-Point Quantized Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j},
where :math:`Q(w_{n, m, i, j})` is the fixed-point quantization function.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) CPU and GPU implementations now use float value for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
quantize_w (bool): Quantize weights if `True`.
quantize_bias (bool): Quantize bias if `True`.
sign_w (bool): Use signed quantization if `True`.
n_w (int): Bit width used for weight.
delta_w (float): Step size for weight.
ste_fine_grained_w (bool): STE is fine-grained if `True`.
quantize_b (bool): Quantize bias if `True`.
n_b (int): Bit width used for bias.
delta_w (float): Step size for bias.
ste_fine_grained_b (bool): STE is fine-grained if `True`.
Returns:
:class:`~nnabla.Variable`: N-D array.
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
# Floating Weight
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis] // group) + tuple(kernel),
w_init, True, not fix_parameters)
# Quantized Weight
if quantize_w:
w_q = get_parameter_or_create(
"W_q", (outmaps, inp.shape[base_axis] // group) + tuple(kernel),
w_init, False)
# Link computation graph
real_w_q = F.fixed_point_quantize(w, quantize=quantize_w,
sign=sign_w, n=n_w, delta=delta_w,
ste_fine_grained=ste_fine_grained_w,
outputs=[w_q.data])
real_w_q.persistent = True
else:
real_w_q = w
# Bias
# Floating
b = None
b_q = None
real_b_q = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
if quantize_b:
b_q = get_parameter_or_create(
"b_q", (outmaps,), b_init, False)
# Link computation graph
real_b_q = F.fixed_point_quantize(b, quantize=quantize_b,
sign=sign_b, n=n_b, delta=delta_b,
ste_fine_grained=ste_fine_grained_b,
outputs=[b_q.data])
real_b_q.persistent = True
else:
real_b_q = b
return F.convolution(inp, real_w_q, real_b_q, base_axis, pad, stride, dilation, group) | [
"def",
"fixed_point_quantized_convolution",
"(",
"inp",
",",
"outmaps",
",",
"kernel",
",",
"pad",
"=",
"None",
",",
"stride",
"=",
"None",
",",
"dilation",
"=",
"None",
",",
"group",
"=",
"1",
",",
"w_init",
"=",
"None",
",",
"b_init",
"=",
"None",
",... | Fixed-Point Quantized Convolution.
Fixed-Point Quantized Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j},
where :math:`Q(w_{n, m, i, j})` is the fixed-point quantization function.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) CPU and GPU implementations now use float value for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
quantize_w (bool): Quantize weights if `True`.
quantize_bias (bool): Quantize bias if `True`.
sign_w (bool): Use signed quantization if `True`.
n_w (int): Bit width used for weight.
delta_w (float): Step size for weight.
ste_fine_grained_w (bool): STE is fine-grained if `True`.
quantize_b (bool): Quantize bias if `True`.
n_b (int): Bit width used for bias.
delta_w (float): Step size for bias.
ste_fine_grained_b (bool): STE is fine-grained if `True`.
Returns:
:class:`~nnabla.Variable`: N-D array. | [
"Fixed",
"-",
"Point",
"Quantized",
"Convolution",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L1910-L2017 | train | 223,736 |
sony/nnabla | python/src/nnabla/parametric_functions.py | pow2_quantized_affine | def pow2_quantized_affine(inp, n_outmaps,
base_axis=1,
w_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True,
quantize_w=True, sign_w=True, with_zero_w=False, n_w=8, m_w=2, ste_fine_grained_w=True,
quantize_b=True, sign_b=True, with_zero_b=False, n_b=8, m_b=2, ste_fine_grained_b=True):
"""Pow2 Quantized Affine.
Pow2 Quantized Affine is the affine function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_j = \sum_{i} Q(w_{ji}) x_i,
where :math:`Q(w_{ji})` is the power-of-2 quantization function.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) Quantized values are stored as floating point number for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix.
n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
quantize_w (bool): Quantize weights if `True`.
sign_w (bool): Use signed quantization if `True`.
with_zero_w (bool): Indicate using zero as a quantized value. Default is false.
n_w (int): Bit width used for weight.
m_w (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for weights. Default is 2.
ste_fine_grained_w (bool): STE is fine-grained if `True`.
quantize_b (bool): Quantize bias if `True`.
with_zero_b (bool): Indicate using zero as a quantized value. Default is false.
n_b (int): Bit width used for bias.
m_b (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for bias. Default is 2.
ste_fine_grained_b (bool): STE is fine-grained if `True`.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
inmaps = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
# Floating Weight
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, True, not fix_parameters)
# Quantized Weight
if quantize_w:
w_q = get_parameter_or_create(
"W_q", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, False)
# Link computation graph
real_w_q = F.pow2_quantize(w, quantize=quantize_w,
sign=sign_w, with_zero=with_zero_w,
n=n_w, m=m_w, ste_fine_grained=ste_fine_grained_w,
outputs=[w_q.data])
real_w_q.persistent = True
else:
real_w_q = w
# Bias
# Floating
b = None
b_q = None
real_b_q = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, True, not fix_parameters)
if quantize_b:
b_q = get_parameter_or_create(
"b_q", n_outmaps, b_init, False)
real_b_q = F.pow2_quantize(b, quantize=quantize_b,
sign=sign_b, with_zero=with_zero_b,
n=n_b, m=m_b, ste_fine_grained=ste_fine_grained_b,
outputs=[b_q.data])
real_b_q.persistent = True
else:
real_b_q = b
return F.affine(inp, real_w_q, real_b_q, base_axis) | python | def pow2_quantized_affine(inp, n_outmaps,
base_axis=1,
w_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True,
quantize_w=True, sign_w=True, with_zero_w=False, n_w=8, m_w=2, ste_fine_grained_w=True,
quantize_b=True, sign_b=True, with_zero_b=False, n_b=8, m_b=2, ste_fine_grained_b=True):
"""Pow2 Quantized Affine.
Pow2 Quantized Affine is the affine function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_j = \sum_{i} Q(w_{ji}) x_i,
where :math:`Q(w_{ji})` is the power-of-2 quantization function.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) Quantized values are stored as floating point number for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix.
n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
quantize_w (bool): Quantize weights if `True`.
sign_w (bool): Use signed quantization if `True`.
with_zero_w (bool): Indicate using zero as a quantized value. Default is false.
n_w (int): Bit width used for weight.
m_w (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for weights. Default is 2.
ste_fine_grained_w (bool): STE is fine-grained if `True`.
quantize_b (bool): Quantize bias if `True`.
with_zero_b (bool): Indicate using zero as a quantized value. Default is false.
n_b (int): Bit width used for bias.
m_b (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for bias. Default is 2.
ste_fine_grained_b (bool): STE is fine-grained if `True`.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
inmaps = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
# Floating Weight
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, True, not fix_parameters)
# Quantized Weight
if quantize_w:
w_q = get_parameter_or_create(
"W_q", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, False)
# Link computation graph
real_w_q = F.pow2_quantize(w, quantize=quantize_w,
sign=sign_w, with_zero=with_zero_w,
n=n_w, m=m_w, ste_fine_grained=ste_fine_grained_w,
outputs=[w_q.data])
real_w_q.persistent = True
else:
real_w_q = w
# Bias
# Floating
b = None
b_q = None
real_b_q = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, True, not fix_parameters)
if quantize_b:
b_q = get_parameter_or_create(
"b_q", n_outmaps, b_init, False)
real_b_q = F.pow2_quantize(b, quantize=quantize_b,
sign=sign_b, with_zero=with_zero_b,
n=n_b, m=m_b, ste_fine_grained=ste_fine_grained_b,
outputs=[b_q.data])
real_b_q.persistent = True
else:
real_b_q = b
return F.affine(inp, real_w_q, real_b_q, base_axis) | [
"def",
"pow2_quantized_affine",
"(",
"inp",
",",
"n_outmaps",
",",
"base_axis",
"=",
"1",
",",
"w_init",
"=",
"None",
",",
"b_init",
"=",
"None",
",",
"fix_parameters",
"=",
"False",
",",
"rng",
"=",
"None",
",",
"with_bias",
"=",
"True",
",",
"quantize_... | Pow2 Quantized Affine.
Pow2 Quantized Affine is the affine function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_j = \sum_{i} Q(w_{ji}) x_i,
where :math:`Q(w_{ji})` is the power-of-2 quantization function.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) Quantized values are stored as floating point number for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix.
n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
quantize_w (bool): Quantize weights if `True`.
sign_w (bool): Use signed quantization if `True`.
with_zero_w (bool): Indicate using zero as a quantized value. Default is false.
n_w (int): Bit width used for weight.
m_w (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for weights. Default is 2.
ste_fine_grained_w (bool): STE is fine-grained if `True`.
quantize_b (bool): Quantize bias if `True`.
with_zero_b (bool): Indicate using zero as a quantized value. Default is false.
n_b (int): Bit width used for bias.
m_b (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for bias. Default is 2.
ste_fine_grained_b (bool): STE is fine-grained if `True`.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`) | [
"Pow2",
"Quantized",
"Affine",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L2026-L2132 | train | 223,737 |
sony/nnabla | python/src/nnabla/parametric_functions.py | pow2_quantized_convolution | def pow2_quantized_convolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
w_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None, with_bias=True,
quantize_w=True, with_zero_w=False, sign_w=True, n_w=8, m_w=2, ste_fine_grained_w=True,
quantize_b=True, with_zero_b=False, sign_b=True, n_b=8, m_b=2, ste_fine_grained_b=True,):
"""Pow2 Quantized Convolution.
Pow2 Quantized Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j},
where :math:`Q(w_{n, m, i, j})` is the power-of-2 quantization function.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) Quantized values are stored as floating point number for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
quantize_w (bool): Quantize weights if `True`.
sign_w (bool): Use signed quantization if `True`.
n_w (int): Bit width used for weight.
m_w (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for weights. Default is 2.
ste_fine_grained_w (bool): STE is fine-grained if `True`.
quantize_b (bool): Quantize bias if `True`.
sign_b (bool): Use signed quantization if `True`.
n_b (int): Bit width used for bias.
m_b (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for bias. Default is 2.
ste_fine_grained_b (bool): STE is fine-grained if `True`.
Returns:
:class:`~nnabla.Variable`: N-D array.
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
# Floating Weight
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis] // group) + tuple(kernel),
w_init, True, not fix_parameters)
# Quantized Weight
if quantize_w:
w_q = get_parameter_or_create(
"W_q", (outmaps, inp.shape[base_axis] // group) + tuple(kernel),
w_init, False)
# Link computation graph
real_w_q = F.pow2_quantize(w, quantize=quantize_w,
sign=sign_w, with_zero=with_zero_w,
n=n_w, m=m_w, ste_fine_grained=ste_fine_grained_w,
outputs=[w_q.data])
real_w_q.persistent = True
else:
real_w_q = w
# Bias
# Floating
b = None
b_q = None
real_b_q = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
if quantize_b:
b_q = get_parameter_or_create(
"b_q", (outmaps,), b_init, False)
# Link computation graph
real_b_q = F.pow2_quantize(b, quantize=quantize_b,
sign=sign_b, with_zero=with_zero_b,
n=n_b, m=m_b, ste_fine_grained=ste_fine_grained_b,
outputs=[b_q.data])
real_b_q.persistent = True
else:
real_b_q = b
return F.convolution(inp, real_w_q, real_b_q, base_axis, pad, stride, dilation, group) | python | def pow2_quantized_convolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
w_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None, with_bias=True,
quantize_w=True, with_zero_w=False, sign_w=True, n_w=8, m_w=2, ste_fine_grained_w=True,
quantize_b=True, with_zero_b=False, sign_b=True, n_b=8, m_b=2, ste_fine_grained_b=True,):
"""Pow2 Quantized Convolution.
Pow2 Quantized Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j},
where :math:`Q(w_{n, m, i, j})` is the power-of-2 quantization function.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) Quantized values are stored as floating point number for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
quantize_w (bool): Quantize weights if `True`.
sign_w (bool): Use signed quantization if `True`.
n_w (int): Bit width used for weight.
m_w (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for weights. Default is 2.
ste_fine_grained_w (bool): STE is fine-grained if `True`.
quantize_b (bool): Quantize bias if `True`.
sign_b (bool): Use signed quantization if `True`.
n_b (int): Bit width used for bias.
m_b (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for bias. Default is 2.
ste_fine_grained_b (bool): STE is fine-grained if `True`.
Returns:
:class:`~nnabla.Variable`: N-D array.
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
# Floating Weight
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis] // group) + tuple(kernel),
w_init, True, not fix_parameters)
# Quantized Weight
if quantize_w:
w_q = get_parameter_or_create(
"W_q", (outmaps, inp.shape[base_axis] // group) + tuple(kernel),
w_init, False)
# Link computation graph
real_w_q = F.pow2_quantize(w, quantize=quantize_w,
sign=sign_w, with_zero=with_zero_w,
n=n_w, m=m_w, ste_fine_grained=ste_fine_grained_w,
outputs=[w_q.data])
real_w_q.persistent = True
else:
real_w_q = w
# Bias
# Floating
b = None
b_q = None
real_b_q = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
if quantize_b:
b_q = get_parameter_or_create(
"b_q", (outmaps,), b_init, False)
# Link computation graph
real_b_q = F.pow2_quantize(b, quantize=quantize_b,
sign=sign_b, with_zero=with_zero_b,
n=n_b, m=m_b, ste_fine_grained=ste_fine_grained_b,
outputs=[b_q.data])
real_b_q.persistent = True
else:
real_b_q = b
return F.convolution(inp, real_w_q, real_b_q, base_axis, pad, stride, dilation, group) | [
"def",
"pow2_quantized_convolution",
"(",
"inp",
",",
"outmaps",
",",
"kernel",
",",
"pad",
"=",
"None",
",",
"stride",
"=",
"None",
",",
"dilation",
"=",
"None",
",",
"group",
"=",
"1",
",",
"w_init",
"=",
"None",
",",
"b_init",
"=",
"None",
",",
"b... | Pow2 Quantized Convolution.
Pow2 Quantized Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j},
where :math:`Q(w_{n, m, i, j})` is the power-of-2 quantization function.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) Quantized values are stored as floating point number for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
quantize_w (bool): Quantize weights if `True`.
sign_w (bool): Use signed quantization if `True`.
n_w (int): Bit width used for weight.
m_w (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for weights. Default is 2.
ste_fine_grained_w (bool): STE is fine-grained if `True`.
quantize_b (bool): Quantize bias if `True`.
sign_b (bool): Use signed quantization if `True`.
n_b (int): Bit width used for bias.
m_b (int): :math:`2^m` is upper bound and :math:`-2^m` is lower bound for bias. Default is 2.
ste_fine_grained_b (bool): STE is fine-grained if `True`.
Returns:
:class:`~nnabla.Variable`: N-D array. | [
"Pow2",
"Quantized",
"Convolution",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L2141-L2249 | train | 223,738 |
sony/nnabla | python/src/nnabla/parametric_functions.py | pruned_affine | def pruned_affine(inp, n_outmaps,
base_axis=1,
w_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True,
prune_w=True, rate_w=0.9, prune_b=True, rate_b=0.9):
"""Pruned Affine.
Pruned Affine is the affine function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_j = \sum_{i} Q(w_{ji}) x_i,
where :math:`Q(w_{ji})` is the pruning function, i.e., `F.prune`.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) CPU and GPU implementations now use float value for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix.
n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
prune_w (bool): Quantize weights if `True`.
rate_w (float): Pruning rate for weights.
prune_b (bool): Quantize bias if `True`.
rate_b (float): Pruning rate for bias.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
inmaps = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
# Floating Weight
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, True, not fix_parameters)
# sparsed Weight
if prune_w:
w_q = get_parameter_or_create(
"W_q", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, False)
# Link computation graph
real_w_q = F.prune(w, rate=rate_w, outputs=[w_q.data])
real_w_q.persistent = True
else:
real_w_q = w
# Bias
# Floating
real_b_q = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, True, not fix_parameters)
if prune_b:
b_q = get_parameter_or_create(
"b_q", n_outmaps, b_init, False)
# Link computation graph
real_b_q = F.prune(b, rate=rate_b, outputs=[b_q.data])
real_b_q.persistent = True
else:
real_b_q = b
return F.affine(inp, real_w_q, real_b_q, base_axis) | python | def pruned_affine(inp, n_outmaps,
base_axis=1,
w_init=None, b_init=None,
fix_parameters=False, rng=None, with_bias=True,
prune_w=True, rate_w=0.9, prune_b=True, rate_b=0.9):
"""Pruned Affine.
Pruned Affine is the affine function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_j = \sum_{i} Q(w_{ji}) x_i,
where :math:`Q(w_{ji})` is the pruning function, i.e., `F.prune`.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) CPU and GPU implementations now use float value for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix.
n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
prune_w (bool): Quantize weights if `True`.
rate_w (float): Pruning rate for weights.
prune_b (bool): Quantize bias if `True`.
rate_b (float): Pruning rate for bias.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
"""
if not hasattr(n_outmaps, '__iter__'):
n_outmaps = [n_outmaps]
n_outmaps = list(n_outmaps)
n_outmap = int(np.prod(n_outmaps))
if w_init is None:
inmaps = np.prod(inp.shape[base_axis:])
w_init = UniformInitializer(
calc_uniform_lim_glorot(inmaps, n_outmap), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
# Floating Weight
w = get_parameter_or_create(
"W", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, True, not fix_parameters)
# sparsed Weight
if prune_w:
w_q = get_parameter_or_create(
"W_q", [int(np.prod(inp.shape[base_axis:]))] + n_outmaps,
w_init, False)
# Link computation graph
real_w_q = F.prune(w, rate=rate_w, outputs=[w_q.data])
real_w_q.persistent = True
else:
real_w_q = w
# Bias
# Floating
real_b_q = None
if with_bias:
b = get_parameter_or_create(
"b", n_outmaps, b_init, True, not fix_parameters)
if prune_b:
b_q = get_parameter_or_create(
"b_q", n_outmaps, b_init, False)
# Link computation graph
real_b_q = F.prune(b, rate=rate_b, outputs=[b_q.data])
real_b_q.persistent = True
else:
real_b_q = b
return F.affine(inp, real_w_q, real_b_q, base_axis) | [
"def",
"pruned_affine",
"(",
"inp",
",",
"n_outmaps",
",",
"base_axis",
"=",
"1",
",",
"w_init",
"=",
"None",
",",
"b_init",
"=",
"None",
",",
"fix_parameters",
"=",
"False",
",",
"rng",
"=",
"None",
",",
"with_bias",
"=",
"True",
",",
"prune_w",
"=",
... | Pruned Affine.
Pruned Affine is the affine function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_j = \sum_{i} Q(w_{ji}) x_i,
where :math:`Q(w_{ji})` is the pruning function, i.e., `F.prune`.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) CPU and GPU implementations now use float value for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it is a matrix.
n_outmaps (:obj:`int` or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
prune_w (bool): Quantize weights if `True`.
rate_w (float): Pruning rate for weights.
prune_b (bool): Quantize bias if `True`.
rate_b (float): Pruning rate for bias.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array. (:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`) | [
"Pruned",
"Affine",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L2258-L2351 | train | 223,739 |
sony/nnabla | python/src/nnabla/parametric_functions.py | pruned_convolution | def pruned_convolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
w_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None, with_bias=True,
prune_w=True, rate_w=0.9, prune_b=True, rate_b=0.9):
"""Pruned Convolution.
Pruned Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j},
where :math:`Q(w_{ji})` is the pruning function, i.e., `F.prune`.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) CPU and GPU implementations now use float value for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
prune_w (bool): Quantize weights if `True`.
rate_w (float): Pruning rate for weights.
prune_b (bool): Quantize bias if `True`.
rate_b (float): Pruning rate for bias.
Returns:
:class:`~nnabla.Variable`: N-D array.
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
# Floating Weight
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis] // group) + tuple(kernel),
w_init, True, not fix_parameters)
# Quantized Weight
if prune_w:
w_q = get_parameter_or_create(
"W_q", (outmaps, inp.shape[base_axis] // group) + tuple(kernel),
w_init, False)
# Link computation graph
real_w_q = F.prune(w, rate=rate_w, outputs=[w_q.data])
real_w_q.persistent = True
else:
real_w_q = w
# Bias
# Floating
real_b_q = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
if prune_b:
b_q = get_parameter_or_create(
"b_q", (outmaps,), b_init, False)
# Link computation graph
real_b_q = F.prune(b, rate=rate_b, outputs=[b_q.data])
real_b_q.persistent = True
else:
real_b_q = b
return F.convolution(inp, real_w_q, real_b_q, base_axis, pad, stride, dilation, group) | python | def pruned_convolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
w_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None, with_bias=True,
prune_w=True, rate_w=0.9, prune_b=True, rate_b=0.9):
"""Pruned Convolution.
Pruned Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j},
where :math:`Q(w_{ji})` is the pruning function, i.e., `F.prune`.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) CPU and GPU implementations now use float value for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
prune_w (bool): Quantize weights if `True`.
rate_w (float): Pruning rate for weights.
prune_b (bool): Quantize bias if `True`.
rate_b (float): Pruning rate for bias.
Returns:
:class:`~nnabla.Variable`: N-D array.
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
# Floating Weight
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis] // group) + tuple(kernel),
w_init, True, not fix_parameters)
# Quantized Weight
if prune_w:
w_q = get_parameter_or_create(
"W_q", (outmaps, inp.shape[base_axis] // group) + tuple(kernel),
w_init, False)
# Link computation graph
real_w_q = F.prune(w, rate=rate_w, outputs=[w_q.data])
real_w_q.persistent = True
else:
real_w_q = w
# Bias
# Floating
real_b_q = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
if prune_b:
b_q = get_parameter_or_create(
"b_q", (outmaps,), b_init, False)
# Link computation graph
real_b_q = F.prune(b, rate=rate_b, outputs=[b_q.data])
real_b_q.persistent = True
else:
real_b_q = b
return F.convolution(inp, real_w_q, real_b_q, base_axis, pad, stride, dilation, group) | [
"def",
"pruned_convolution",
"(",
"inp",
",",
"outmaps",
",",
"kernel",
",",
"pad",
"=",
"None",
",",
"stride",
"=",
"None",
",",
"dilation",
"=",
"None",
",",
"group",
"=",
"1",
",",
"w_init",
"=",
"None",
",",
"b_init",
"=",
"None",
",",
"base_axis... | Pruned Convolution.
Pruned Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} Q(w_{n, m, i, j}) x_{m, a + i, b + j},
where :math:`Q(w_{ji})` is the pruning function, i.e., `F.prune`.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the quantized weights (`quantized weight`)
2) The weights and the quantized weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the quantized weights will not be in sync.
3) CPU and GPU implementations now use float value for `quantized weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels more sparse by grouping connections along map direction.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
prune_w (bool): Quantize weights if `True`.
rate_w (float): Pruning rate for weights.
prune_b (bool): Quantize bias if `True`.
rate_b (float): Pruning rate for bias.
Returns:
:class:`~nnabla.Variable`: N-D array. | [
"Pruned",
"Convolution",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L2360-L2451 | train | 223,740 |
sony/nnabla | python/src/nnabla/parametric_functions.py | lstm_cell | def lstm_cell(x, h, c, state_size, w_init=None, b_init=None, fix_parameters=False):
"""Long Short-Term Memory.
Long Short-Term Memory, or LSTM, is a building block for recurrent neural networks (RNN) layers.
LSTM unit consists of a cell and input, output, forget gates whose functions are defined as following:
.. math::
f_t&&=\\sigma(W_fx_t+U_fh_{t-1}+b_f) \\\\
i_t&&=\\sigma(W_ix_t+U_ih_{t-1}+b_i) \\\\
o_t&&=\\sigma(W_ox_t+U_oh_{t-1}+b_o) \\\\
c_t&&=f_t\\odot c_{t-1}+i_t\\odot\\tanh(W_cx_t+U_ch_{t-1}+b_c) \\\\
h_t&&=o_t\\odot\\tanh(c_t).
References:
S. Hochreiter, and J. Schmidhuber. "Long Short-Term Memory."
Neural Computation. 1997.
Args:
x (~nnabla.Variable): Input N-D array with shape (batch_size, input_size).
h (~nnabla.Variable): Input N-D array with shape (batch_size, state_size).
c (~nnabla.Variable): Input N-D array with shape (batch_size, state_size).
state_size (int): Internal state size is set to `state_size`.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`, optional): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`, optional): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
Returns:
:class:`~nnabla.Variable`
"""
xh = F.concatenate(*(x, h), axis=1)
iofc = affine(xh, (4, state_size), w_init=w_init,
b_init=b_init, fix_parameters=fix_parameters)
i_t, o_t, f_t, gate = F.split(iofc, axis=1)
c_t = F.sigmoid(f_t) * c + F.sigmoid(i_t) * F.tanh(gate)
h_t = F.sigmoid(o_t) * F.tanh(c_t)
return h_t, c_t | python | def lstm_cell(x, h, c, state_size, w_init=None, b_init=None, fix_parameters=False):
"""Long Short-Term Memory.
Long Short-Term Memory, or LSTM, is a building block for recurrent neural networks (RNN) layers.
LSTM unit consists of a cell and input, output, forget gates whose functions are defined as following:
.. math::
f_t&&=\\sigma(W_fx_t+U_fh_{t-1}+b_f) \\\\
i_t&&=\\sigma(W_ix_t+U_ih_{t-1}+b_i) \\\\
o_t&&=\\sigma(W_ox_t+U_oh_{t-1}+b_o) \\\\
c_t&&=f_t\\odot c_{t-1}+i_t\\odot\\tanh(W_cx_t+U_ch_{t-1}+b_c) \\\\
h_t&&=o_t\\odot\\tanh(c_t).
References:
S. Hochreiter, and J. Schmidhuber. "Long Short-Term Memory."
Neural Computation. 1997.
Args:
x (~nnabla.Variable): Input N-D array with shape (batch_size, input_size).
h (~nnabla.Variable): Input N-D array with shape (batch_size, state_size).
c (~nnabla.Variable): Input N-D array with shape (batch_size, state_size).
state_size (int): Internal state size is set to `state_size`.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`, optional): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`, optional): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
Returns:
:class:`~nnabla.Variable`
"""
xh = F.concatenate(*(x, h), axis=1)
iofc = affine(xh, (4, state_size), w_init=w_init,
b_init=b_init, fix_parameters=fix_parameters)
i_t, o_t, f_t, gate = F.split(iofc, axis=1)
c_t = F.sigmoid(f_t) * c + F.sigmoid(i_t) * F.tanh(gate)
h_t = F.sigmoid(o_t) * F.tanh(c_t)
return h_t, c_t | [
"def",
"lstm_cell",
"(",
"x",
",",
"h",
",",
"c",
",",
"state_size",
",",
"w_init",
"=",
"None",
",",
"b_init",
"=",
"None",
",",
"fix_parameters",
"=",
"False",
")",
":",
"xh",
"=",
"F",
".",
"concatenate",
"(",
"*",
"(",
"x",
",",
"h",
")",
"... | Long Short-Term Memory.
Long Short-Term Memory, or LSTM, is a building block for recurrent neural networks (RNN) layers.
LSTM unit consists of a cell and input, output, forget gates whose functions are defined as following:
.. math::
f_t&&=\\sigma(W_fx_t+U_fh_{t-1}+b_f) \\\\
i_t&&=\\sigma(W_ix_t+U_ih_{t-1}+b_i) \\\\
o_t&&=\\sigma(W_ox_t+U_oh_{t-1}+b_o) \\\\
c_t&&=f_t\\odot c_{t-1}+i_t\\odot\\tanh(W_cx_t+U_ch_{t-1}+b_c) \\\\
h_t&&=o_t\\odot\\tanh(c_t).
References:
S. Hochreiter, and J. Schmidhuber. "Long Short-Term Memory."
Neural Computation. 1997.
Args:
x (~nnabla.Variable): Input N-D array with shape (batch_size, input_size).
h (~nnabla.Variable): Input N-D array with shape (batch_size, state_size).
c (~nnabla.Variable): Input N-D array with shape (batch_size, state_size).
state_size (int): Internal state size is set to `state_size`.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`, optional): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`, optional): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
Returns:
:class:`~nnabla.Variable` | [
"Long",
"Short",
"-",
"Term",
"Memory",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L2459-L2497 | train | 223,741 |
sony/nnabla | python/src/nnabla/parametric_functions.py | spectral_norm | def spectral_norm(w, dim=0, itr=1, eps=1e-12, test=False, u_init=None, fix_parameters=True):
"""Spectral Normalization.
.. math::
W_{sn} = \\frac{W}{\\sigma(W)}.
where :math:`W` is the input matrix, and the :math:`\\sigma(W)` is the spectral norm of :math:`W`. The spectral norm is approximately computed by the power iteration.
References:
Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida,
"Spectral Normalization for Generative Adversarial Networks",
International Conference on Learning Representations. 2018.
Args:
W (~nnabla.Variable): Input N-D array with shape. This is normally network parameter.
dim (`int`): Output dimension. Default is 0. If the dimension is not 0, then the specified dimension becomes the most-left dimension by transposing.
itr (`int`): Number of iterations. Default is 1.
eps (`float`): Epsilon for the normalization. Default is 1e-12.
test (`bool`): Use test mode. Default is False.
Returns:
~nnabla.Variable: Spectrally normalized :math:`W_{sn}` with the same shape as :math:`W`.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
b, c, h, w = 4, 64, 32, 32
# Spectrally normalized convolution
apply_w = lambda w: PF.spectral_norm(w, dim=0)
h = nn.Variable.from_numpy_array(np.random.randn(b, c, h, w))
h = PF.convolution(h, with_bias=False, apply_w=apply_w)
# Spectrally normalized affine
apply_w = lambda w: PF.spectral_norm(w, dim=1)
h = nn.Variable.from_numpy_array(np.random.randn(b, c))
h = PF.affine(h, with_bias=False, apply_w=apply_w)
# Spectrally normalized embed
apply_w = lambda w: PF.spectral_norm(w, dim=1)
h = nn.Variable.from_numpy_array(np.random.randn(b, c))
h = PF.embed(h, c, apply_w=apply_w)
"""
assert (0 <= dim and dim < len(w.shape)
), "`dim` must be `0 <= dim and dim < len(w.shape)`."
assert 0 < itr, "`itr` must be greater than 0."
assert 0 < eps, "`eps` must be greater than 0."
if dim == len(w.shape) - 1:
w_sn = _spectral_norm_outer_most_dim(w, dim=dim, itr=itr, eps=eps, test=test,
u_init=u_init, fix_parameters=fix_parameters)
else:
w_sn = _spectral_norm(w, dim=dim, itr=itr, eps=eps, test=test,
u_init=u_init, fix_parameters=fix_parameters)
return w_sn | python | def spectral_norm(w, dim=0, itr=1, eps=1e-12, test=False, u_init=None, fix_parameters=True):
"""Spectral Normalization.
.. math::
W_{sn} = \\frac{W}{\\sigma(W)}.
where :math:`W` is the input matrix, and the :math:`\\sigma(W)` is the spectral norm of :math:`W`. The spectral norm is approximately computed by the power iteration.
References:
Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida,
"Spectral Normalization for Generative Adversarial Networks",
International Conference on Learning Representations. 2018.
Args:
W (~nnabla.Variable): Input N-D array with shape. This is normally network parameter.
dim (`int`): Output dimension. Default is 0. If the dimension is not 0, then the specified dimension becomes the most-left dimension by transposing.
itr (`int`): Number of iterations. Default is 1.
eps (`float`): Epsilon for the normalization. Default is 1e-12.
test (`bool`): Use test mode. Default is False.
Returns:
~nnabla.Variable: Spectrally normalized :math:`W_{sn}` with the same shape as :math:`W`.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
b, c, h, w = 4, 64, 32, 32
# Spectrally normalized convolution
apply_w = lambda w: PF.spectral_norm(w, dim=0)
h = nn.Variable.from_numpy_array(np.random.randn(b, c, h, w))
h = PF.convolution(h, with_bias=False, apply_w=apply_w)
# Spectrally normalized affine
apply_w = lambda w: PF.spectral_norm(w, dim=1)
h = nn.Variable.from_numpy_array(np.random.randn(b, c))
h = PF.affine(h, with_bias=False, apply_w=apply_w)
# Spectrally normalized embed
apply_w = lambda w: PF.spectral_norm(w, dim=1)
h = nn.Variable.from_numpy_array(np.random.randn(b, c))
h = PF.embed(h, c, apply_w=apply_w)
"""
assert (0 <= dim and dim < len(w.shape)
), "`dim` must be `0 <= dim and dim < len(w.shape)`."
assert 0 < itr, "`itr` must be greater than 0."
assert 0 < eps, "`eps` must be greater than 0."
if dim == len(w.shape) - 1:
w_sn = _spectral_norm_outer_most_dim(w, dim=dim, itr=itr, eps=eps, test=test,
u_init=u_init, fix_parameters=fix_parameters)
else:
w_sn = _spectral_norm(w, dim=dim, itr=itr, eps=eps, test=test,
u_init=u_init, fix_parameters=fix_parameters)
return w_sn | [
"def",
"spectral_norm",
"(",
"w",
",",
"dim",
"=",
"0",
",",
"itr",
"=",
"1",
",",
"eps",
"=",
"1e-12",
",",
"test",
"=",
"False",
",",
"u_init",
"=",
"None",
",",
"fix_parameters",
"=",
"True",
")",
":",
"assert",
"(",
"0",
"<=",
"dim",
"and",
... | Spectral Normalization.
.. math::
W_{sn} = \\frac{W}{\\sigma(W)}.
where :math:`W` is the input matrix, and the :math:`\\sigma(W)` is the spectral norm of :math:`W`. The spectral norm is approximately computed by the power iteration.
References:
Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida,
"Spectral Normalization for Generative Adversarial Networks",
International Conference on Learning Representations. 2018.
Args:
W (~nnabla.Variable): Input N-D array with shape. This is normally network parameter.
dim (`int`): Output dimension. Default is 0. If the dimension is not 0, then the specified dimension becomes the most-left dimension by transposing.
itr (`int`): Number of iterations. Default is 1.
eps (`float`): Epsilon for the normalization. Default is 1e-12.
test (`bool`): Use test mode. Default is False.
Returns:
~nnabla.Variable: Spectrally normalized :math:`W_{sn}` with the same shape as :math:`W`.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
b, c, h, w = 4, 64, 32, 32
# Spectrally normalized convolution
apply_w = lambda w: PF.spectral_norm(w, dim=0)
h = nn.Variable.from_numpy_array(np.random.randn(b, c, h, w))
h = PF.convolution(h, with_bias=False, apply_w=apply_w)
# Spectrally normalized affine
apply_w = lambda w: PF.spectral_norm(w, dim=1)
h = nn.Variable.from_numpy_array(np.random.randn(b, c))
h = PF.affine(h, with_bias=False, apply_w=apply_w)
# Spectrally normalized embed
apply_w = lambda w: PF.spectral_norm(w, dim=1)
h = nn.Variable.from_numpy_array(np.random.randn(b, c))
h = PF.embed(h, c, apply_w=apply_w) | [
"Spectral",
"Normalization",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L2556-L2618 | train | 223,742 |
sony/nnabla | python/src/nnabla/parametric_functions.py | LSTMCell.reset_state | def reset_state(self):
"""
Resets states h and c to zero.
"""
self.h.data.zero()
self.c.data.zero() | python | def reset_state(self):
"""
Resets states h and c to zero.
"""
self.h.data.zero()
self.c.data.zero() | [
"def",
"reset_state",
"(",
"self",
")",
":",
"self",
".",
"h",
".",
"data",
".",
"zero",
"(",
")",
"self",
".",
"c",
".",
"data",
".",
"zero",
"(",
")"
] | Resets states h and c to zero. | [
"Resets",
"states",
"h",
"and",
"c",
"to",
"zero",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parametric_functions.py#L2526-L2532 | train | 223,743 |
sony/nnabla | python/benchmark/function/function_benchmark.py | Timer.lap | def lap(self):
"""Calculate lap time.
Returns:
float: Lap time. The duration from the previous call of ``lap()``
or initialization at first call.
float: Total time. The duration from initialization.
"""
now = time.time()
lap_time = now - self.lap_time
total_time = now - self.start
self.lap_time = now
return lap_time, total_time | python | def lap(self):
"""Calculate lap time.
Returns:
float: Lap time. The duration from the previous call of ``lap()``
or initialization at first call.
float: Total time. The duration from initialization.
"""
now = time.time()
lap_time = now - self.lap_time
total_time = now - self.start
self.lap_time = now
return lap_time, total_time | [
"def",
"lap",
"(",
"self",
")",
":",
"now",
"=",
"time",
".",
"time",
"(",
")",
"lap_time",
"=",
"now",
"-",
"self",
".",
"lap_time",
"total_time",
"=",
"now",
"-",
"self",
".",
"start",
"self",
".",
"lap_time",
"=",
"now",
"return",
"lap_time",
",... | Calculate lap time.
Returns:
float: Lap time. The duration from the previous call of ``lap()``
or initialization at first call.
float: Total time. The duration from initialization. | [
"Calculate",
"lap",
"time",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/benchmark/function/function_benchmark.py#L45-L58 | train | 223,744 |
sony/nnabla | python/benchmark/function/function_benchmark.py | FunctionBenchmarkWriter.write | def write(self, fb):
"""Write a single function benchmark.
Args:
fb (FunctionBenchmark): FunctionBenchmark class instance.
Before passing to this, you should call ``fb.benchmark()``.
"""
print('[{}.{}]'.format(fb.module, fb.func.__name__), file=self.file)
print('class = {}'.format(fb.func_ins.name), file=self.file)
print('inspecs = {}'.format(repr(fb.inspecs)), file=self.file)
print('func_args = {}'.format(repr(fb.func_args)), file=self.file)
print('func_kwargs = {}'.format(repr(fb.func_kwargs)), file=self.file)
print('ext = ({}, {})'.format(
repr(fb.ext), repr(fb.ext_kwargs)), file=self.file)
if self.setup_stat is not None:
self._write_a_stat('setup', self.setup_stat)
if self.foward_stat is not None:
self._write_a_stat('forward', self.forward_stat)
if self.backward_stat is not None:
self._write_a_stat('backward', self.backward_stat) | python | def write(self, fb):
"""Write a single function benchmark.
Args:
fb (FunctionBenchmark): FunctionBenchmark class instance.
Before passing to this, you should call ``fb.benchmark()``.
"""
print('[{}.{}]'.format(fb.module, fb.func.__name__), file=self.file)
print('class = {}'.format(fb.func_ins.name), file=self.file)
print('inspecs = {}'.format(repr(fb.inspecs)), file=self.file)
print('func_args = {}'.format(repr(fb.func_args)), file=self.file)
print('func_kwargs = {}'.format(repr(fb.func_kwargs)), file=self.file)
print('ext = ({}, {})'.format(
repr(fb.ext), repr(fb.ext_kwargs)), file=self.file)
if self.setup_stat is not None:
self._write_a_stat('setup', self.setup_stat)
if self.foward_stat is not None:
self._write_a_stat('forward', self.forward_stat)
if self.backward_stat is not None:
self._write_a_stat('backward', self.backward_stat) | [
"def",
"write",
"(",
"self",
",",
"fb",
")",
":",
"print",
"(",
"'[{}.{}]'",
".",
"format",
"(",
"fb",
".",
"module",
",",
"fb",
".",
"func",
".",
"__name__",
")",
",",
"file",
"=",
"self",
".",
"file",
")",
"print",
"(",
"'class = {}'",
".",
"fo... | Write a single function benchmark.
Args:
fb (FunctionBenchmark): FunctionBenchmark class instance.
Before passing to this, you should call ``fb.benchmark()``. | [
"Write",
"a",
"single",
"function",
"benchmark",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/benchmark/function/function_benchmark.py#L87-L107 | train | 223,745 |
sony/nnabla | python/benchmark/function/function_benchmark.py | FunctionBenchmark._setup | def _setup(self, delete=True):
"""Create a function instance and execute setup.
Args:
delete (bool): Delete buffered variables.
"""
if delete:
self.clear()
with nn.context_scope(self.ctx):
outputs = self.func(
*(self.inputs_f + self.func_args), **self.func_kwargs)
if not hasattr(outputs, '__iter__'):
self.outputs = [outputs]
else:
self.outputs = outputs
self.func_ins = self.outputs[0].parent
self.inputs = self.func_ins.inputs | python | def _setup(self, delete=True):
"""Create a function instance and execute setup.
Args:
delete (bool): Delete buffered variables.
"""
if delete:
self.clear()
with nn.context_scope(self.ctx):
outputs = self.func(
*(self.inputs_f + self.func_args), **self.func_kwargs)
if not hasattr(outputs, '__iter__'):
self.outputs = [outputs]
else:
self.outputs = outputs
self.func_ins = self.outputs[0].parent
self.inputs = self.func_ins.inputs | [
"def",
"_setup",
"(",
"self",
",",
"delete",
"=",
"True",
")",
":",
"if",
"delete",
":",
"self",
".",
"clear",
"(",
")",
"with",
"nn",
".",
"context_scope",
"(",
"self",
".",
"ctx",
")",
":",
"outputs",
"=",
"self",
".",
"func",
"(",
"*",
"(",
... | Create a function instance and execute setup.
Args:
delete (bool): Delete buffered variables. | [
"Create",
"a",
"function",
"instance",
"and",
"execute",
"setup",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/benchmark/function/function_benchmark.py#L243-L260 | train | 223,746 |
sony/nnabla | python/benchmark/function/function_benchmark.py | FunctionBenchmark.benchmark_setup | def benchmark_setup(self):
"""Benchmark setup execution.
"""
def f():
self._setup()
self.mod_ext.synchronize(**self.ext_kwargs)
f() # Ignore first
self.setup_stat = self._calc_benchmark_stat(f) | python | def benchmark_setup(self):
"""Benchmark setup execution.
"""
def f():
self._setup()
self.mod_ext.synchronize(**self.ext_kwargs)
f() # Ignore first
self.setup_stat = self._calc_benchmark_stat(f) | [
"def",
"benchmark_setup",
"(",
"self",
")",
":",
"def",
"f",
"(",
")",
":",
"self",
".",
"_setup",
"(",
")",
"self",
".",
"mod_ext",
".",
"synchronize",
"(",
"*",
"*",
"self",
".",
"ext_kwargs",
")",
"f",
"(",
")",
"# Ignore first",
"self",
".",
"s... | Benchmark setup execution. | [
"Benchmark",
"setup",
"execution",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/benchmark/function/function_benchmark.py#L276-L283 | train | 223,747 |
sony/nnabla | python/benchmark/function/function_benchmark.py | FunctionBenchmark.benchmark_forward | def benchmark_forward(self):
"""Benchmark forward execution.
"""
self._setup()
def f():
self._forward()
self.mod_ext.synchronize(**self.ext_kwargs)
f() # Ignore first
self.forward_stat = self._calc_benchmark_stat(f) | python | def benchmark_forward(self):
"""Benchmark forward execution.
"""
self._setup()
def f():
self._forward()
self.mod_ext.synchronize(**self.ext_kwargs)
f() # Ignore first
self.forward_stat = self._calc_benchmark_stat(f) | [
"def",
"benchmark_forward",
"(",
"self",
")",
":",
"self",
".",
"_setup",
"(",
")",
"def",
"f",
"(",
")",
":",
"self",
".",
"_forward",
"(",
")",
"self",
".",
"mod_ext",
".",
"synchronize",
"(",
"*",
"*",
"self",
".",
"ext_kwargs",
")",
"f",
"(",
... | Benchmark forward execution. | [
"Benchmark",
"forward",
"execution",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/benchmark/function/function_benchmark.py#L285-L294 | train | 223,748 |
sony/nnabla | python/benchmark/function/function_benchmark.py | FunctionBenchmark.benchmark_backward | def benchmark_backward(self):
"""Benchmark backward execution.
Note:
If backward execution throws any exception,
this benchmark system considers the error is because the function
doesn't support backward operation, then set the benchmark
``None``.
"""
try:
self._benchmark_backward()
except RuntimeError as e:
# Seems like not implemented.
print(e)
self.mod_ext.synchronize(**self.ext_kwargs)
self.backward_stat = None | python | def benchmark_backward(self):
"""Benchmark backward execution.
Note:
If backward execution throws any exception,
this benchmark system considers the error is because the function
doesn't support backward operation, then set the benchmark
``None``.
"""
try:
self._benchmark_backward()
except RuntimeError as e:
# Seems like not implemented.
print(e)
self.mod_ext.synchronize(**self.ext_kwargs)
self.backward_stat = None | [
"def",
"benchmark_backward",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_benchmark_backward",
"(",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"# Seems like not implemented.",
"print",
"(",
"e",
")",
"self",
".",
"mod_ext",
".",
"synchronize",
"(",
... | Benchmark backward execution.
Note:
If backward execution throws any exception,
this benchmark system considers the error is because the function
doesn't support backward operation, then set the benchmark
``None``. | [
"Benchmark",
"backward",
"execution",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/benchmark/function/function_benchmark.py#L308-L324 | train | 223,749 |
sony/nnabla | python/src/nnabla_ext/cpu/__init__.py | context | def context(type_config='float', **kw):
"""CPU Context."""
backends = ['cpu:float']
if type_config == 'half':
backends = ['cpu:half', 'cpu:float']
elif type_config == 'float':
pass
else:
raise ValueError("Unknown data type config is given %s" % type_config)
return nn.Context(backends, array_classes()[0], '') | python | def context(type_config='float', **kw):
"""CPU Context."""
backends = ['cpu:float']
if type_config == 'half':
backends = ['cpu:half', 'cpu:float']
elif type_config == 'float':
pass
else:
raise ValueError("Unknown data type config is given %s" % type_config)
return nn.Context(backends, array_classes()[0], '') | [
"def",
"context",
"(",
"type_config",
"=",
"'float'",
",",
"*",
"*",
"kw",
")",
":",
"backends",
"=",
"[",
"'cpu:float'",
"]",
"if",
"type_config",
"==",
"'half'",
":",
"backends",
"=",
"[",
"'cpu:half'",
",",
"'cpu:float'",
"]",
"elif",
"type_config",
"... | CPU Context. | [
"CPU",
"Context",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla_ext/cpu/__init__.py#L31-L40 | train | 223,750 |
sony/nnabla | python/src/nnabla/utils/converter/nnablart/utils.py | revise_buffer_size | def revise_buffer_size(info, settings):
'''
This function is used to revise buffer size, use byte
as its unit, instead of data item.
This is only used for nnb, not for csrc.
When settings contains user customized data type, not pure
FLOAT32, it affects the memory consumption.
'''
size_mapping = {
'FLOAT32': 4,
'FIXED16': 2,
'FIXED8': 1
}
var_dict = settings['variables']
buffer_index = 0
info._variable_sizes = []
info._variable_buffer_index = collections.OrderedDict()
info._variable_buffer_size = collections.OrderedDict()
info._buffer_ids = {}
for n, v in enumerate(info._network.variable):
byte_per_item = size_mapping.get(var_dict.get(
v.name, 'FLOAT32').split('_')[0], 4)
size = nnabla.utils.converter.calc_shape_size(
v.shape, info._batch_size) * byte_per_item
info._variable_sizes.append(size)
if v.type == 'Buffer':
info._variable_buffer_index[buffer_index] = [n]
for vid in info._variable_buffer_index[buffer_index]:
info._buffer_ids[vid] = buffer_index
info._variable_buffer_size[buffer_index] = size
buffer_index += 1 | python | def revise_buffer_size(info, settings):
'''
This function is used to revise buffer size, use byte
as its unit, instead of data item.
This is only used for nnb, not for csrc.
When settings contains user customized data type, not pure
FLOAT32, it affects the memory consumption.
'''
size_mapping = {
'FLOAT32': 4,
'FIXED16': 2,
'FIXED8': 1
}
var_dict = settings['variables']
buffer_index = 0
info._variable_sizes = []
info._variable_buffer_index = collections.OrderedDict()
info._variable_buffer_size = collections.OrderedDict()
info._buffer_ids = {}
for n, v in enumerate(info._network.variable):
byte_per_item = size_mapping.get(var_dict.get(
v.name, 'FLOAT32').split('_')[0], 4)
size = nnabla.utils.converter.calc_shape_size(
v.shape, info._batch_size) * byte_per_item
info._variable_sizes.append(size)
if v.type == 'Buffer':
info._variable_buffer_index[buffer_index] = [n]
for vid in info._variable_buffer_index[buffer_index]:
info._buffer_ids[vid] = buffer_index
info._variable_buffer_size[buffer_index] = size
buffer_index += 1 | [
"def",
"revise_buffer_size",
"(",
"info",
",",
"settings",
")",
":",
"size_mapping",
"=",
"{",
"'FLOAT32'",
":",
"4",
",",
"'FIXED16'",
":",
"2",
",",
"'FIXED8'",
":",
"1",
"}",
"var_dict",
"=",
"settings",
"[",
"'variables'",
"]",
"buffer_index",
"=",
"... | This function is used to revise buffer size, use byte
as its unit, instead of data item.
This is only used for nnb, not for csrc.
When settings contains user customized data type, not pure
FLOAT32, it affects the memory consumption. | [
"This",
"function",
"is",
"used",
"to",
"revise",
"buffer",
"size",
"use",
"byte",
"as",
"its",
"unit",
"instead",
"of",
"data",
"item",
".",
"This",
"is",
"only",
"used",
"for",
"nnb",
"not",
"for",
"csrc",
".",
"When",
"settings",
"contains",
"user",
... | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/converter/nnablart/utils.py#L111-L143 | train | 223,751 |
sony/nnabla | python/src/nnabla/models/imagenet/base.py | ImageNetBase.category_names | def category_names(self):
'''
Returns category names of 1000 ImageNet classes.
'''
if hasattr(self, '_category_names'):
return self._category_names
with open(os.path.join(os.path.dirname(__file__), 'category_names.txt'), 'r') as fd:
self._category_names = fd.read().splitlines()
return self._category_names | python | def category_names(self):
'''
Returns category names of 1000 ImageNet classes.
'''
if hasattr(self, '_category_names'):
return self._category_names
with open(os.path.join(os.path.dirname(__file__), 'category_names.txt'), 'r') as fd:
self._category_names = fd.read().splitlines()
return self._category_names | [
"def",
"category_names",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_category_names'",
")",
":",
"return",
"self",
".",
"_category_names",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"("... | Returns category names of 1000 ImageNet classes. | [
"Returns",
"category",
"names",
"of",
"1000",
"ImageNet",
"classes",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/models/imagenet/base.py#L29-L37 | train | 223,752 |
sony/nnabla | python/src/nnabla/utils/profiler.py | GraphProfilerCsvWriter.write | def write(self):
"""
Write result to the file.
The output file is specified by ``file``.
"""
writer = csv.writer(self.file)
for f, b in zip(self.gb.result["forward"], self.gb.result["backward"]):
f = f._asdict()
b = b._asdict()
if not self.check_same(f, b):
raise AssertionError()
args_info = ", ".join(["{}: {}".format(k, v)
for k, v in f["args_info"]])
out = [f["parameter_scope"], f["function_name"], f["inputs_shape"], args_info,
f["mean_time"], b["mean_time"], f["n_run"], b["n_run"]]
writer.writerow(out)
writer.writerow([])
writer.writerow(["forward all", self.gb.result["forward_all"]])
writer.writerow(
["forward_all_n_run", self.gb.result["n_run_forward_all"]])
writer.writerow([])
writer.writerow(["backward all", self.gb.result["backward_all"]])
writer.writerow(
["backward_all_n_run", self.gb.result["n_run_backward_all"]])
if set(self.gb.result.keys()) >= {"training", "n_run_training"}:
writer.writerow([])
writer.writerow(
["training(forward + backward + update)", self.gb.result["training"]])
writer.writerow(
["training_n_run", self.gb.result["n_run_training"]]) | python | def write(self):
"""
Write result to the file.
The output file is specified by ``file``.
"""
writer = csv.writer(self.file)
for f, b in zip(self.gb.result["forward"], self.gb.result["backward"]):
f = f._asdict()
b = b._asdict()
if not self.check_same(f, b):
raise AssertionError()
args_info = ", ".join(["{}: {}".format(k, v)
for k, v in f["args_info"]])
out = [f["parameter_scope"], f["function_name"], f["inputs_shape"], args_info,
f["mean_time"], b["mean_time"], f["n_run"], b["n_run"]]
writer.writerow(out)
writer.writerow([])
writer.writerow(["forward all", self.gb.result["forward_all"]])
writer.writerow(
["forward_all_n_run", self.gb.result["n_run_forward_all"]])
writer.writerow([])
writer.writerow(["backward all", self.gb.result["backward_all"]])
writer.writerow(
["backward_all_n_run", self.gb.result["n_run_backward_all"]])
if set(self.gb.result.keys()) >= {"training", "n_run_training"}:
writer.writerow([])
writer.writerow(
["training(forward + backward + update)", self.gb.result["training"]])
writer.writerow(
["training_n_run", self.gb.result["n_run_training"]]) | [
"def",
"write",
"(",
"self",
")",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"self",
".",
"file",
")",
"for",
"f",
",",
"b",
"in",
"zip",
"(",
"self",
".",
"gb",
".",
"result",
"[",
"\"forward\"",
"]",
",",
"self",
".",
"gb",
".",
"result",... | Write result to the file.
The output file is specified by ``file``. | [
"Write",
"result",
"to",
"the",
"file",
".",
"The",
"output",
"file",
"is",
"specified",
"by",
"file",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/profiler.py#L103-L139 | train | 223,753 |
sony/nnabla | python/src/nnabla/monitor.py | plot_series | def plot_series(filename, plot_kwargs=None):
'''Plot series data from MonitorSeries output text file.
Args:
filename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.
plot_kwags (dict, optional):
Keyward arguments passed to :function:`matplotlib.pyplot.plot`.
Note:
matplotlib package is required.
'''
import matplotlib.pyplot as plt
if plot_kwargs is None:
plot_kwargs = {}
data = np.genfromtxt(filename, dtype='i8,f4', names=['k', 'v'])
index = data['k']
values = data['v']
plt.plot(index, values, **plot_kwargs) | python | def plot_series(filename, plot_kwargs=None):
'''Plot series data from MonitorSeries output text file.
Args:
filename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.
plot_kwags (dict, optional):
Keyward arguments passed to :function:`matplotlib.pyplot.plot`.
Note:
matplotlib package is required.
'''
import matplotlib.pyplot as plt
if plot_kwargs is None:
plot_kwargs = {}
data = np.genfromtxt(filename, dtype='i8,f4', names=['k', 'v'])
index = data['k']
values = data['v']
plt.plot(index, values, **plot_kwargs) | [
"def",
"plot_series",
"(",
"filename",
",",
"plot_kwargs",
"=",
"None",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"if",
"plot_kwargs",
"is",
"None",
":",
"plot_kwargs",
"=",
"{",
"}",
"data",
"=",
"np",
".",
"genfromtxt",
"(",
"filena... | Plot series data from MonitorSeries output text file.
Args:
filename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.
plot_kwags (dict, optional):
Keyward arguments passed to :function:`matplotlib.pyplot.plot`.
Note:
matplotlib package is required. | [
"Plot",
"series",
"data",
"from",
"MonitorSeries",
"output",
"text",
"file",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/monitor.py#L378-L398 | train | 223,754 |
sony/nnabla | python/src/nnabla/monitor.py | plot_time_elapsed | def plot_time_elapsed(filename, elapsed=False, unit='s', plot_kwargs=None):
'''Plot series data from MonitorTimeElapsed output text file.
Args:
filename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.
elapsed (bool): If ``True``, it plots the total elapsed time.
unit (str):
Time unit chosen from ``'s'``, ``'m'``, ``'h'``, or ``'d'``.
plot_kwags (dict, optional):
Keyward arguments passed to :function:`matplotlib.pyplot.plot`.
Note:
matplotlib package is required.
'''
import matplotlib.pyplot as plt
if plot_kwargs is None:
plot_kwargs = {}
data_column = 3 if elapsed else 1
data = np.genfromtxt(filename, dtype='i8,f4',
usecols=(0, data_column), names=['k', 'v'])
index = data['k']
values = data['v']
if unit == 's':
pass
elif unit == 'm':
values /= 60
elif unit == 'h':
values /= 3600
elif unit == 'd':
values /= 3600 * 24
else:
raise ValueError('The argument `unit` must be chosen from {s|m|h|d}.')
plt.plot(index, values, **plot_kwargs) | python | def plot_time_elapsed(filename, elapsed=False, unit='s', plot_kwargs=None):
'''Plot series data from MonitorTimeElapsed output text file.
Args:
filename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.
elapsed (bool): If ``True``, it plots the total elapsed time.
unit (str):
Time unit chosen from ``'s'``, ``'m'``, ``'h'``, or ``'d'``.
plot_kwags (dict, optional):
Keyward arguments passed to :function:`matplotlib.pyplot.plot`.
Note:
matplotlib package is required.
'''
import matplotlib.pyplot as plt
if plot_kwargs is None:
plot_kwargs = {}
data_column = 3 if elapsed else 1
data = np.genfromtxt(filename, dtype='i8,f4',
usecols=(0, data_column), names=['k', 'v'])
index = data['k']
values = data['v']
if unit == 's':
pass
elif unit == 'm':
values /= 60
elif unit == 'h':
values /= 3600
elif unit == 'd':
values /= 3600 * 24
else:
raise ValueError('The argument `unit` must be chosen from {s|m|h|d}.')
plt.plot(index, values, **plot_kwargs) | [
"def",
"plot_time_elapsed",
"(",
"filename",
",",
"elapsed",
"=",
"False",
",",
"unit",
"=",
"'s'",
",",
"plot_kwargs",
"=",
"None",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"if",
"plot_kwargs",
"is",
"None",
":",
"plot_kwargs",
"=",
... | Plot series data from MonitorTimeElapsed output text file.
Args:
filename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.
elapsed (bool): If ``True``, it plots the total elapsed time.
unit (str):
Time unit chosen from ``'s'``, ``'m'``, ``'h'``, or ``'d'``.
plot_kwags (dict, optional):
Keyward arguments passed to :function:`matplotlib.pyplot.plot`.
Note:
matplotlib package is required. | [
"Plot",
"series",
"data",
"from",
"MonitorTimeElapsed",
"output",
"text",
"file",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/monitor.py#L401-L436 | train | 223,755 |
sony/nnabla | python/src/nnabla/monitor.py | MonitorSeries.add | def add(self, index, value):
"""Add a value to the series.
Args:
index (int): Index.
value (float): Value.
"""
self.buf.append(value)
if (index - self.flush_at) < self.interval:
return
value = np.mean(self.buf)
if self.verbose:
logger.info("iter={} {{{}}}={}".format(index, self.name, value))
if self.fd is not None:
print("{} {:g}".format(index, value), file=self.fd)
self.flush_at = index
self.buf = [] | python | def add(self, index, value):
"""Add a value to the series.
Args:
index (int): Index.
value (float): Value.
"""
self.buf.append(value)
if (index - self.flush_at) < self.interval:
return
value = np.mean(self.buf)
if self.verbose:
logger.info("iter={} {{{}}}={}".format(index, self.name, value))
if self.fd is not None:
print("{} {:g}".format(index, value), file=self.fd)
self.flush_at = index
self.buf = [] | [
"def",
"add",
"(",
"self",
",",
"index",
",",
"value",
")",
":",
"self",
".",
"buf",
".",
"append",
"(",
"value",
")",
"if",
"(",
"index",
"-",
"self",
".",
"flush_at",
")",
"<",
"self",
".",
"interval",
":",
"return",
"value",
"=",
"np",
".",
... | Add a value to the series.
Args:
index (int): Index.
value (float): Value. | [
"Add",
"a",
"value",
"to",
"the",
"series",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/monitor.py#L83-L100 | train | 223,756 |
sony/nnabla | python/src/nnabla/monitor.py | MonitorTimeElapsed.add | def add(self, index):
"""Calculate time elapsed from the point previously called
this method or this object is created to this is called.
Args:
index (int): Index to be displayed, and be used to take intervals.
"""
if (index - self.flush_at) < self.interval:
return
now = time.time()
elapsed = now - self.lap
elapsed_total = now - self.start
it = index - self.flush_at
self.lap = now
if self.verbose:
logger.info("iter={} {{{}}}={}[sec/{}iter] {}[sec]".format(
index, self.name, elapsed, it, elapsed_total))
if self.fd is not None:
print("{} {} {} {}".format(index, elapsed,
it, elapsed_total), file=self.fd)
self.flush_at = index | python | def add(self, index):
"""Calculate time elapsed from the point previously called
this method or this object is created to this is called.
Args:
index (int): Index to be displayed, and be used to take intervals.
"""
if (index - self.flush_at) < self.interval:
return
now = time.time()
elapsed = now - self.lap
elapsed_total = now - self.start
it = index - self.flush_at
self.lap = now
if self.verbose:
logger.info("iter={} {{{}}}={}[sec/{}iter] {}[sec]".format(
index, self.name, elapsed, it, elapsed_total))
if self.fd is not None:
print("{} {} {} {}".format(index, elapsed,
it, elapsed_total), file=self.fd)
self.flush_at = index | [
"def",
"add",
"(",
"self",
",",
"index",
")",
":",
"if",
"(",
"index",
"-",
"self",
".",
"flush_at",
")",
"<",
"self",
".",
"interval",
":",
"return",
"now",
"=",
"time",
".",
"time",
"(",
")",
"elapsed",
"=",
"now",
"-",
"self",
".",
"lap",
"e... | Calculate time elapsed from the point previously called
this method or this object is created to this is called.
Args:
index (int): Index to be displayed, and be used to take intervals. | [
"Calculate",
"time",
"elapsed",
"from",
"the",
"point",
"previously",
"called",
"this",
"method",
"or",
"this",
"object",
"is",
"created",
"to",
"this",
"is",
"called",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/monitor.py#L145-L166 | train | 223,757 |
sony/nnabla | python/src/nnabla/monitor.py | MonitorImage.add | def add(self, index, var):
"""Add a minibatch of images to the monitor.
Args:
index (int): Index.
var (:obj:`~nnabla.Variable`, :obj:`~nnabla.NdArray`, or :obj:`~numpy.ndarray`):
A minibatch of images with ``(N, ..., C, H, W)`` format.
If C == 2, blue channel is appended with ones. If C > 3,
the array will be sliced to remove C > 3 sub-array.
"""
import nnabla as nn
from nnabla.utils.image_utils import imsave
if index != 0 and (index + 1) % self.interval != 0:
return
if isinstance(var, nn.Variable):
data = var.d.copy()
elif isinstance(var, nn.NdArray):
data = var.data.copy()
else:
assert isinstance(var, np.ndarray)
data = var.copy()
assert data.ndim > 2
channels = data.shape[-3]
data = data.reshape(-1, *data.shape[-3:])
data = data[:min(data.shape[0], self.num_images)]
data = self.normalize_method(data)
if channels > 3:
data = data[:, :3]
elif channels == 2:
data = np.concatenate(
[data, np.ones((data.shape[0], 1) + data.shape[-2:])], axis=1)
path_tmpl = os.path.join(self.save_dir, '{:06d}-{}.png')
for j in range(min(self.num_images, data.shape[0])):
img = data[j].transpose(1, 2, 0)
if img.shape[-1] == 1:
img = img[..., 0]
path = path_tmpl.format(index, '{:03d}'.format(j))
imsave(path, img)
if self.verbose:
logger.info("iter={} {{{}}} are written to {}.".format(
index, self.name, path_tmpl.format(index, '*'))) | python | def add(self, index, var):
"""Add a minibatch of images to the monitor.
Args:
index (int): Index.
var (:obj:`~nnabla.Variable`, :obj:`~nnabla.NdArray`, or :obj:`~numpy.ndarray`):
A minibatch of images with ``(N, ..., C, H, W)`` format.
If C == 2, blue channel is appended with ones. If C > 3,
the array will be sliced to remove C > 3 sub-array.
"""
import nnabla as nn
from nnabla.utils.image_utils import imsave
if index != 0 and (index + 1) % self.interval != 0:
return
if isinstance(var, nn.Variable):
data = var.d.copy()
elif isinstance(var, nn.NdArray):
data = var.data.copy()
else:
assert isinstance(var, np.ndarray)
data = var.copy()
assert data.ndim > 2
channels = data.shape[-3]
data = data.reshape(-1, *data.shape[-3:])
data = data[:min(data.shape[0], self.num_images)]
data = self.normalize_method(data)
if channels > 3:
data = data[:, :3]
elif channels == 2:
data = np.concatenate(
[data, np.ones((data.shape[0], 1) + data.shape[-2:])], axis=1)
path_tmpl = os.path.join(self.save_dir, '{:06d}-{}.png')
for j in range(min(self.num_images, data.shape[0])):
img = data[j].transpose(1, 2, 0)
if img.shape[-1] == 1:
img = img[..., 0]
path = path_tmpl.format(index, '{:03d}'.format(j))
imsave(path, img)
if self.verbose:
logger.info("iter={} {{{}}} are written to {}.".format(
index, self.name, path_tmpl.format(index, '*'))) | [
"def",
"add",
"(",
"self",
",",
"index",
",",
"var",
")",
":",
"import",
"nnabla",
"as",
"nn",
"from",
"nnabla",
".",
"utils",
".",
"image_utils",
"import",
"imsave",
"if",
"index",
"!=",
"0",
"and",
"(",
"index",
"+",
"1",
")",
"%",
"self",
".",
... | Add a minibatch of images to the monitor.
Args:
index (int): Index.
var (:obj:`~nnabla.Variable`, :obj:`~nnabla.NdArray`, or :obj:`~numpy.ndarray`):
A minibatch of images with ``(N, ..., C, H, W)`` format.
If C == 2, blue channel is appended with ones. If C > 3,
the array will be sliced to remove C > 3 sub-array. | [
"Add",
"a",
"minibatch",
"of",
"images",
"to",
"the",
"monitor",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/monitor.py#L222-L263 | train | 223,758 |
sony/nnabla | python/src/nnabla/utils/data_iterator.py | data_iterator_simple | def data_iterator_simple(load_func,
num_examples,
batch_size,
shuffle=False,
rng=None,
with_memory_cache=True,
with_file_cache=True,
cache_dir=None,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
"""A generator that ``yield`` s minibatch data as a tuple, as defined in ``load_func`` .
It can unlimitedly yield minibatches at your request, queried from the provided data.
Args:
load_func (function): Takes a single argument `i`, an index of an
example in your dataset to be loaded, and returns a tuple of data.
Every call by any index `i` must return a tuple of arrays with
the same shape.
num_examples (int): Number of examples in your dataset. Random sequence
of indexes is generated according to this number.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
with_file_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`
to wrap ``data_source``.
If ``data_source`` is slow, enabling this option a is good idea.
Default value is False.
cache_dir (str):
Location of file_cache.
If this value is None, :py:class:`.data_source.DataSourceWithFileCache`
creates file caches implicitly on temporary directory and erases them all
when data_iterator is finished.
Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.
Default is None.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator.
Here is an example of `load_func` which returns an image and a label of a
classification dataset.
.. code-block:: python
import numpy as np
from nnabla.utils.image_utils import imread
image_paths = load_image_paths()
labels = load_labels()
def my_load_func(i):
'''
Returns:
image: c x h x w array
label: 0-shape array
'''
img = imread(image_paths[i]).astype('float32')
return np.rollaxis(img, 2), np.array(labels[i])
"""
return data_iterator(SimpleDataSource(load_func,
num_examples,
shuffle=shuffle,
rng=rng),
batch_size=batch_size,
with_memory_cache=with_memory_cache,
with_file_cache=with_file_cache,
cache_dir=cache_dir,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks) | python | def data_iterator_simple(load_func,
num_examples,
batch_size,
shuffle=False,
rng=None,
with_memory_cache=True,
with_file_cache=True,
cache_dir=None,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
"""A generator that ``yield`` s minibatch data as a tuple, as defined in ``load_func`` .
It can unlimitedly yield minibatches at your request, queried from the provided data.
Args:
load_func (function): Takes a single argument `i`, an index of an
example in your dataset to be loaded, and returns a tuple of data.
Every call by any index `i` must return a tuple of arrays with
the same shape.
num_examples (int): Number of examples in your dataset. Random sequence
of indexes is generated according to this number.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
with_file_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`
to wrap ``data_source``.
If ``data_source`` is slow, enabling this option a is good idea.
Default value is False.
cache_dir (str):
Location of file_cache.
If this value is None, :py:class:`.data_source.DataSourceWithFileCache`
creates file caches implicitly on temporary directory and erases them all
when data_iterator is finished.
Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.
Default is None.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator.
Here is an example of `load_func` which returns an image and a label of a
classification dataset.
.. code-block:: python
import numpy as np
from nnabla.utils.image_utils import imread
image_paths = load_image_paths()
labels = load_labels()
def my_load_func(i):
'''
Returns:
image: c x h x w array
label: 0-shape array
'''
img = imread(image_paths[i]).astype('float32')
return np.rollaxis(img, 2), np.array(labels[i])
"""
return data_iterator(SimpleDataSource(load_func,
num_examples,
shuffle=shuffle,
rng=rng),
batch_size=batch_size,
with_memory_cache=with_memory_cache,
with_file_cache=with_file_cache,
cache_dir=cache_dir,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks) | [
"def",
"data_iterator_simple",
"(",
"load_func",
",",
"num_examples",
",",
"batch_size",
",",
"shuffle",
"=",
"False",
",",
"rng",
"=",
"None",
",",
"with_memory_cache",
"=",
"True",
",",
"with_file_cache",
"=",
"True",
",",
"cache_dir",
"=",
"None",
",",
"e... | A generator that ``yield`` s minibatch data as a tuple, as defined in ``load_func`` .
It can unlimitedly yield minibatches at your request, queried from the provided data.
Args:
load_func (function): Takes a single argument `i`, an index of an
example in your dataset to be loaded, and returns a tuple of data.
Every call by any index `i` must return a tuple of arrays with
the same shape.
num_examples (int): Number of examples in your dataset. Random sequence
of indexes is generated according to this number.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
with_file_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`
to wrap ``data_source``.
If ``data_source`` is slow, enabling this option a is good idea.
Default value is False.
cache_dir (str):
Location of file_cache.
If this value is None, :py:class:`.data_source.DataSourceWithFileCache`
creates file caches implicitly on temporary directory and erases them all
when data_iterator is finished.
Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.
Default is None.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator.
Here is an example of `load_func` which returns an image and a label of a
classification dataset.
.. code-block:: python
import numpy as np
from nnabla.utils.image_utils import imread
image_paths = load_image_paths()
labels = load_labels()
def my_load_func(i):
'''
Returns:
image: c x h x w array
label: 0-shape array
'''
img = imread(image_paths[i]).astype('float32')
return np.rollaxis(img, 2), np.array(labels[i]) | [
"A",
"generator",
"that",
"yield",
"s",
"minibatch",
"data",
"as",
"a",
"tuple",
"as",
"defined",
"in",
"load_func",
".",
"It",
"can",
"unlimitedly",
"yield",
"minibatches",
"at",
"your",
"request",
"queried",
"from",
"the",
"provided",
"data",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/data_iterator.py#L426-L511 | train | 223,759 |
sony/nnabla | python/src/nnabla/utils/data_iterator.py | data_iterator_csv_dataset | def data_iterator_csv_dataset(uri,
batch_size,
shuffle=False,
rng=None,
normalize=True,
with_memory_cache=True,
with_file_cache=True,
cache_dir=None,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
'''data_iterator_csv_dataset
Get data directly from a dataset provided as a CSV file.
You can read files located on the local file system, http(s) servers or Amazon AWS S3 storage.
For example,
.. code-block:: python
batch = data_iterator_csv_dataset('CSV_FILE.csv', batch_size, shuffle=True)
Args:
uri (str): Location of dataset CSV file.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
normalize (bool): If True, each sample in the data gets normalized by a factor of 255.
Default is True.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
with_file_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`
to wrap ``data_source``.
If ``data_source`` is slow, enabling this option a is good idea.
Default value is False.
cache_dir (str):
Location of file_cache.
If this value is None, :py:class:`.data_source.DataSourceWithFileCache`
creates file caches implicitly on temporary directory and erases them all
when data_iterator is finished.
Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.
Default is None.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator
'''
ds = CsvDataSource(uri,
shuffle=shuffle,
rng=rng,
normalize=normalize)
return data_iterator(ds,
batch_size=batch_size,
with_memory_cache=with_memory_cache,
with_file_cache=with_file_cache,
cache_dir=cache_dir,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks) | python | def data_iterator_csv_dataset(uri,
batch_size,
shuffle=False,
rng=None,
normalize=True,
with_memory_cache=True,
with_file_cache=True,
cache_dir=None,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
'''data_iterator_csv_dataset
Get data directly from a dataset provided as a CSV file.
You can read files located on the local file system, http(s) servers or Amazon AWS S3 storage.
For example,
.. code-block:: python
batch = data_iterator_csv_dataset('CSV_FILE.csv', batch_size, shuffle=True)
Args:
uri (str): Location of dataset CSV file.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
normalize (bool): If True, each sample in the data gets normalized by a factor of 255.
Default is True.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
with_file_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`
to wrap ``data_source``.
If ``data_source`` is slow, enabling this option a is good idea.
Default value is False.
cache_dir (str):
Location of file_cache.
If this value is None, :py:class:`.data_source.DataSourceWithFileCache`
creates file caches implicitly on temporary directory and erases them all
when data_iterator is finished.
Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.
Default is None.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator
'''
ds = CsvDataSource(uri,
shuffle=shuffle,
rng=rng,
normalize=normalize)
return data_iterator(ds,
batch_size=batch_size,
with_memory_cache=with_memory_cache,
with_file_cache=with_file_cache,
cache_dir=cache_dir,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks) | [
"def",
"data_iterator_csv_dataset",
"(",
"uri",
",",
"batch_size",
",",
"shuffle",
"=",
"False",
",",
"rng",
"=",
"None",
",",
"normalize",
"=",
"True",
",",
"with_memory_cache",
"=",
"True",
",",
"with_file_cache",
"=",
"True",
",",
"cache_dir",
"=",
"None"... | data_iterator_csv_dataset
Get data directly from a dataset provided as a CSV file.
You can read files located on the local file system, http(s) servers or Amazon AWS S3 storage.
For example,
.. code-block:: python
batch = data_iterator_csv_dataset('CSV_FILE.csv', batch_size, shuffle=True)
Args:
uri (str): Location of dataset CSV file.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
normalize (bool): If True, each sample in the data gets normalized by a factor of 255.
Default is True.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
with_file_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`
to wrap ``data_source``.
If ``data_source`` is slow, enabling this option a is good idea.
Default value is False.
cache_dir (str):
Location of file_cache.
If this value is None, :py:class:`.data_source.DataSourceWithFileCache`
creates file caches implicitly on temporary directory and erases them all
when data_iterator is finished.
Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.
Default is None.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator | [
"data_iterator_csv_dataset",
"Get",
"data",
"directly",
"from",
"a",
"dataset",
"provided",
"as",
"a",
"CSV",
"file",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/data_iterator.py#L514-L585 | train | 223,760 |
sony/nnabla | python/src/nnabla/utils/data_iterator.py | data_iterator_cache | def data_iterator_cache(uri,
batch_size,
shuffle=False,
rng=None,
normalize=True,
with_memory_cache=True,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
'''data_iterator_cache
Get data from the cache directory.
Cache files are read from the local file system.
For example,
.. code-block:: python
batch = data_iterator_cache('CACHE_DIR', batch_size, shuffle=True)
Args:
uri (str): Location of directory with cache files.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
normalize (bool): If True, each sample in the data gets normalized by a factor of 255.
Default is True.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator
'''
ds = CacheDataSource(uri,
shuffle=shuffle,
rng=rng,
normalize=normalize)
return data_iterator(ds,
batch_size=batch_size,
with_memory_cache=with_memory_cache,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks) | python | def data_iterator_cache(uri,
batch_size,
shuffle=False,
rng=None,
normalize=True,
with_memory_cache=True,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
'''data_iterator_cache
Get data from the cache directory.
Cache files are read from the local file system.
For example,
.. code-block:: python
batch = data_iterator_cache('CACHE_DIR', batch_size, shuffle=True)
Args:
uri (str): Location of directory with cache files.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
normalize (bool): If True, each sample in the data gets normalized by a factor of 255.
Default is True.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator
'''
ds = CacheDataSource(uri,
shuffle=shuffle,
rng=rng,
normalize=normalize)
return data_iterator(ds,
batch_size=batch_size,
with_memory_cache=with_memory_cache,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks) | [
"def",
"data_iterator_cache",
"(",
"uri",
",",
"batch_size",
",",
"shuffle",
"=",
"False",
",",
"rng",
"=",
"None",
",",
"normalize",
"=",
"True",
",",
"with_memory_cache",
"=",
"True",
",",
"epoch_begin_callbacks",
"=",
"[",
"]",
",",
"epoch_end_callbacks",
... | data_iterator_cache
Get data from the cache directory.
Cache files are read from the local file system.
For example,
.. code-block:: python
batch = data_iterator_cache('CACHE_DIR', batch_size, shuffle=True)
Args:
uri (str): Location of directory with cache files.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
normalize (bool): If True, each sample in the data gets normalized by a factor of 255.
Default is True.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator | [
"data_iterator_cache",
"Get",
"data",
"from",
"the",
"cache",
"directory",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/data_iterator.py#L588-L643 | train | 223,761 |
sony/nnabla | python/src/nnabla/utils/data_iterator.py | data_iterator_concat_datasets | def data_iterator_concat_datasets(data_source_list,
batch_size,
shuffle=False,
rng=None,
with_memory_cache=True,
with_file_cache=False,
cache_dir=None,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
'''data_iterator_concat_datasets
Get data from multiple datasets.
For example,
.. code-block:: python
batch = data_iterator_concat_datasets([DataSource0, DataSource1, ...], batch_size)
Args:
data_source_list (list of DataSource): list of datasets.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
with_file_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`
to wrap ``data_source``.
If ``data_source`` is slow, enabling this option a is good idea.
Default value is False.
cache_dir (str):
Location of file_cache.
If this value is None, :py:class:`.data_source.DataSourceWithFileCache`
creates file caches implicitly on temporary directory and erases them all
when data_iterator is finished.
Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.
Default is None.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator
'''
ds = ConcatDataSource(data_source_list,
shuffle=shuffle,
rng=rng)
return data_iterator(ds,
batch_size=batch_size,
with_memory_cache=with_memory_cache,
with_file_cache=with_file_cache,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks) | python | def data_iterator_concat_datasets(data_source_list,
batch_size,
shuffle=False,
rng=None,
with_memory_cache=True,
with_file_cache=False,
cache_dir=None,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
'''data_iterator_concat_datasets
Get data from multiple datasets.
For example,
.. code-block:: python
batch = data_iterator_concat_datasets([DataSource0, DataSource1, ...], batch_size)
Args:
data_source_list (list of DataSource): list of datasets.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
with_file_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`
to wrap ``data_source``.
If ``data_source`` is slow, enabling this option a is good idea.
Default value is False.
cache_dir (str):
Location of file_cache.
If this value is None, :py:class:`.data_source.DataSourceWithFileCache`
creates file caches implicitly on temporary directory and erases them all
when data_iterator is finished.
Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.
Default is None.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator
'''
ds = ConcatDataSource(data_source_list,
shuffle=shuffle,
rng=rng)
return data_iterator(ds,
batch_size=batch_size,
with_memory_cache=with_memory_cache,
with_file_cache=with_file_cache,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks) | [
"def",
"data_iterator_concat_datasets",
"(",
"data_source_list",
",",
"batch_size",
",",
"shuffle",
"=",
"False",
",",
"rng",
"=",
"None",
",",
"with_memory_cache",
"=",
"True",
",",
"with_file_cache",
"=",
"False",
",",
"cache_dir",
"=",
"None",
",",
"epoch_beg... | data_iterator_concat_datasets
Get data from multiple datasets.
For example,
.. code-block:: python
batch = data_iterator_concat_datasets([DataSource0, DataSource1, ...], batch_size)
Args:
data_source_list (list of DataSource): list of datasets.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
with_file_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`
to wrap ``data_source``.
If ``data_source`` is slow, enabling this option a is good idea.
Default value is False.
cache_dir (str):
Location of file_cache.
If this value is None, :py:class:`.data_source.DataSourceWithFileCache`
creates file caches implicitly on temporary directory and erases them all
when data_iterator is finished.
Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.
Default is None.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator | [
"data_iterator_concat_datasets",
"Get",
"data",
"from",
"multiple",
"datasets",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/data_iterator.py#L646-L709 | train | 223,762 |
sony/nnabla | python/src/nnabla/utils/data_iterator.py | DataIterator.slice | def slice(self, rng, num_of_slices=None, slice_pos=None,
slice_start=None, slice_end=None,
cache_dir=None):
'''
Slices the data iterator so that newly generated data iterator has access to limited portion of the original data.
Args:
rng (numpy.random.RandomState): Random generator for Initializer.
num_of_slices(int): Total number of slices to be made. Muts be used together with `slice_pos`.
slice_pos(int): Position of the slice to be assigned to the new data iterator. Must be used together with `num_of_slices`.
slice_start(int): Starting position of the range to be sliced into new data iterator. Must be used together with `slice_end`.
slice_end(int) : End position of the range to be sliced into new data iterator. Must be used together with `slice_start`.
cache_dir(str) : Directory to save cache files
Example:
.. code-block:: python
from nnabla.utils.data_iterator import data_iterator_simple
import numpy as np
def load_func1(index):
d = np.ones((2, 2)) * index
return d
di = data_iterator_simple(load_func1, 1000, batch_size=3)
di_s1 = di.slice(None, num_of_slices=10, slice_pos=0)
di_s2 = di.slice(None, num_of_slices=10, slice_pos=1)
di_s3 = di.slice(None, slice_start=100, slice_end=200)
di_s4 = di.slice(None, slice_start=300, slice_end=400)
'''
if num_of_slices is not None and slice_pos is not None and slice_start is None and slice_end is None:
size = self._size // num_of_slices
amount = self._size % num_of_slices
slice_start = slice_pos * size
if slice_pos < amount:
slice_start += slice_pos
else:
slice_start += amount
slice_end = slice_start + size
if slice_end > self._size:
slice_start -= (slice_end - self._size)
slice_end = self._size
elif num_of_slices is None and slice_pos is None and slice_start is not None and slice_end is not None:
pass
else:
logger.critical(
'You must specify position(num_of_slice and slice_pos) or range(slice_start and slice_end).')
return None
if cache_dir is None:
ds = self._data_source
while '_data_source' in dir(ds):
if '_cache_dir' in dir(ds):
cache_dir = ds._cache_dir
ds = ds._data_source
if cache_dir is None:
return DataIterator(
DataSourceWithMemoryCache(
SlicedDataSource(
self._data_source,
self._data_source.shuffle,
slice_start=slice_start,
slice_end=slice_end),
shuffle=self._shuffle,
rng=rng),
self._batch_size)
else:
return DataIterator(
DataSourceWithMemoryCache(
DataSourceWithFileCache(
SlicedDataSource(
self._data_source,
self._data_source.shuffle,
slice_start=slice_start,
slice_end=slice_end),
cache_dir=cache_dir,
cache_file_name_prefix='cache_sliced_{:08d}_{:08d}'.format(
slice_start,
slice_end),
shuffle=self._shuffle,
rng=rng),
shuffle=self._shuffle,
rng=rng),
self._batch_size) | python | def slice(self, rng, num_of_slices=None, slice_pos=None,
slice_start=None, slice_end=None,
cache_dir=None):
'''
Slices the data iterator so that newly generated data iterator has access to limited portion of the original data.
Args:
rng (numpy.random.RandomState): Random generator for Initializer.
num_of_slices(int): Total number of slices to be made. Muts be used together with `slice_pos`.
slice_pos(int): Position of the slice to be assigned to the new data iterator. Must be used together with `num_of_slices`.
slice_start(int): Starting position of the range to be sliced into new data iterator. Must be used together with `slice_end`.
slice_end(int) : End position of the range to be sliced into new data iterator. Must be used together with `slice_start`.
cache_dir(str) : Directory to save cache files
Example:
.. code-block:: python
from nnabla.utils.data_iterator import data_iterator_simple
import numpy as np
def load_func1(index):
d = np.ones((2, 2)) * index
return d
di = data_iterator_simple(load_func1, 1000, batch_size=3)
di_s1 = di.slice(None, num_of_slices=10, slice_pos=0)
di_s2 = di.slice(None, num_of_slices=10, slice_pos=1)
di_s3 = di.slice(None, slice_start=100, slice_end=200)
di_s4 = di.slice(None, slice_start=300, slice_end=400)
'''
if num_of_slices is not None and slice_pos is not None and slice_start is None and slice_end is None:
size = self._size // num_of_slices
amount = self._size % num_of_slices
slice_start = slice_pos * size
if slice_pos < amount:
slice_start += slice_pos
else:
slice_start += amount
slice_end = slice_start + size
if slice_end > self._size:
slice_start -= (slice_end - self._size)
slice_end = self._size
elif num_of_slices is None and slice_pos is None and slice_start is not None and slice_end is not None:
pass
else:
logger.critical(
'You must specify position(num_of_slice and slice_pos) or range(slice_start and slice_end).')
return None
if cache_dir is None:
ds = self._data_source
while '_data_source' in dir(ds):
if '_cache_dir' in dir(ds):
cache_dir = ds._cache_dir
ds = ds._data_source
if cache_dir is None:
return DataIterator(
DataSourceWithMemoryCache(
SlicedDataSource(
self._data_source,
self._data_source.shuffle,
slice_start=slice_start,
slice_end=slice_end),
shuffle=self._shuffle,
rng=rng),
self._batch_size)
else:
return DataIterator(
DataSourceWithMemoryCache(
DataSourceWithFileCache(
SlicedDataSource(
self._data_source,
self._data_source.shuffle,
slice_start=slice_start,
slice_end=slice_end),
cache_dir=cache_dir,
cache_file_name_prefix='cache_sliced_{:08d}_{:08d}'.format(
slice_start,
slice_end),
shuffle=self._shuffle,
rng=rng),
shuffle=self._shuffle,
rng=rng),
self._batch_size) | [
"def",
"slice",
"(",
"self",
",",
"rng",
",",
"num_of_slices",
"=",
"None",
",",
"slice_pos",
"=",
"None",
",",
"slice_start",
"=",
"None",
",",
"slice_end",
"=",
"None",
",",
"cache_dir",
"=",
"None",
")",
":",
"if",
"num_of_slices",
"is",
"not",
"Non... | Slices the data iterator so that newly generated data iterator has access to limited portion of the original data.
Args:
rng (numpy.random.RandomState): Random generator for Initializer.
num_of_slices(int): Total number of slices to be made. Muts be used together with `slice_pos`.
slice_pos(int): Position of the slice to be assigned to the new data iterator. Must be used together with `num_of_slices`.
slice_start(int): Starting position of the range to be sliced into new data iterator. Must be used together with `slice_end`.
slice_end(int) : End position of the range to be sliced into new data iterator. Must be used together with `slice_start`.
cache_dir(str) : Directory to save cache files
Example:
.. code-block:: python
from nnabla.utils.data_iterator import data_iterator_simple
import numpy as np
def load_func1(index):
d = np.ones((2, 2)) * index
return d
di = data_iterator_simple(load_func1, 1000, batch_size=3)
di_s1 = di.slice(None, num_of_slices=10, slice_pos=0)
di_s2 = di.slice(None, num_of_slices=10, slice_pos=1)
di_s3 = di.slice(None, slice_start=100, slice_end=200)
di_s4 = di.slice(None, slice_start=300, slice_end=400) | [
"Slices",
"the",
"data",
"iterator",
"so",
"that",
"newly",
"generated",
"data",
"iterator",
"has",
"access",
"to",
"limited",
"portion",
"of",
"the",
"original",
"data",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/data_iterator.py#L230-L320 | train | 223,763 |
sony/nnabla | python/src/nnabla/auto_forward.py | auto_forward | def auto_forward(auto=True):
"""
Context for dynamic graph execution mode.
Args:
auto (bool): Whether forward computation is executed during a
computation graph construction.
Returns: bool
"""
global __auto_forward_state
prev = __auto_forward_state
__auto_forward_state = auto
yield
__auto_forward_state = prev | python | def auto_forward(auto=True):
"""
Context for dynamic graph execution mode.
Args:
auto (bool): Whether forward computation is executed during a
computation graph construction.
Returns: bool
"""
global __auto_forward_state
prev = __auto_forward_state
__auto_forward_state = auto
yield
__auto_forward_state = prev | [
"def",
"auto_forward",
"(",
"auto",
"=",
"True",
")",
":",
"global",
"__auto_forward_state",
"prev",
"=",
"__auto_forward_state",
"__auto_forward_state",
"=",
"auto",
"yield",
"__auto_forward_state",
"=",
"prev"
] | Context for dynamic graph execution mode.
Args:
auto (bool): Whether forward computation is executed during a
computation graph construction.
Returns: bool | [
"Context",
"for",
"dynamic",
"graph",
"execution",
"mode",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/auto_forward.py#L23-L38 | train | 223,764 |
sony/nnabla | python/src/nnabla/utils/function_profile.py | FunctionProfile.print_stats | def print_stats(self, reset=True):
'''Manually print profiling result.
Args:
reset (bool): If False is specified, the profiling statistics so
far is maintained. If ``True`` (default),
:obj:`~reset_stats`
is called to reset the profiling statistics.
'''
if not self.ncalls:
return
stats = self.stats
code = self.fn.__code__
print('--- Function Profiling ---')
print('File "{}", line {}, function {}'.format(
code.co_filename,
code.co_firstlineno,
self.fn.__name__))
stats.sort_stats(*self.sort_keys)
stats.print_stats(*self.print_restrictions)
print('--------------------------')
if reset:
self.reset_stats() | python | def print_stats(self, reset=True):
'''Manually print profiling result.
Args:
reset (bool): If False is specified, the profiling statistics so
far is maintained. If ``True`` (default),
:obj:`~reset_stats`
is called to reset the profiling statistics.
'''
if not self.ncalls:
return
stats = self.stats
code = self.fn.__code__
print('--- Function Profiling ---')
print('File "{}", line {}, function {}'.format(
code.co_filename,
code.co_firstlineno,
self.fn.__name__))
stats.sort_stats(*self.sort_keys)
stats.print_stats(*self.print_restrictions)
print('--------------------------')
if reset:
self.reset_stats() | [
"def",
"print_stats",
"(",
"self",
",",
"reset",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"ncalls",
":",
"return",
"stats",
"=",
"self",
".",
"stats",
"code",
"=",
"self",
".",
"fn",
".",
"__code__",
"print",
"(",
"'--- Function Profiling ---'",
... | Manually print profiling result.
Args:
reset (bool): If False is specified, the profiling statistics so
far is maintained. If ``True`` (default),
:obj:`~reset_stats`
is called to reset the profiling statistics. | [
"Manually",
"print",
"profiling",
"result",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/function_profile.py#L87-L111 | train | 223,765 |
sony/nnabla | python/src/nnabla/models/utils.py | get_model_home | def get_model_home():
'''
Returns a root folder path for downloading models.
'''
d = os.path.join(get_data_home(), 'nnp_models')
if not os.path.isdir(d):
os.makedirs(d)
return d | python | def get_model_home():
'''
Returns a root folder path for downloading models.
'''
d = os.path.join(get_data_home(), 'nnp_models')
if not os.path.isdir(d):
os.makedirs(d)
return d | [
"def",
"get_model_home",
"(",
")",
":",
"d",
"=",
"os",
".",
"path",
".",
"join",
"(",
"get_data_home",
"(",
")",
",",
"'nnp_models'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"d",
")",
":",
"os",
".",
"makedirs",
"(",
"d",
")",
... | Returns a root folder path for downloading models. | [
"Returns",
"a",
"root",
"folder",
"path",
"for",
"downloading",
"models",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/models/utils.py#L23-L30 | train | 223,766 |
sony/nnabla | python/src/nnabla/models/utils.py | get_model_url_base | def get_model_url_base():
'''
Returns a root folder for models.
'''
url_base = get_model_url_base_from_env()
if url_base is not None:
logger.info('NNBLA_MODELS_URL_BASE is set as {}.'.format(url_base))
else:
url_base = 'https://nnabla.org/pretrained-models/nnp_models/'
return url_base | python | def get_model_url_base():
'''
Returns a root folder for models.
'''
url_base = get_model_url_base_from_env()
if url_base is not None:
logger.info('NNBLA_MODELS_URL_BASE is set as {}.'.format(url_base))
else:
url_base = 'https://nnabla.org/pretrained-models/nnp_models/'
return url_base | [
"def",
"get_model_url_base",
"(",
")",
":",
"url_base",
"=",
"get_model_url_base_from_env",
"(",
")",
"if",
"url_base",
"is",
"not",
"None",
":",
"logger",
".",
"info",
"(",
"'NNBLA_MODELS_URL_BASE is set as {}.'",
".",
"format",
"(",
"url_base",
")",
")",
"else... | Returns a root folder for models. | [
"Returns",
"a",
"root",
"folder",
"for",
"models",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/models/utils.py#L41-L50 | train | 223,767 |
sony/nnabla | python/src/nnabla/utils/data_source_loader.py | load_image_imread | def load_image_imread(file, shape=None, max_range=1.0):
'''
Load image from file like object.
:param file: Image contents
:type file: file like object.
:param shape: shape of output array
e.g. (3, 128, 192) : n_color, height, width.
:type shape: tuple of int
:param float max_range: the value of return array ranges from 0 to `max_range`.
:return: numpy array
'''
img255 = imread(
file) # return value is from zero to 255 (even if the image has 16-bitdepth.)
if len(img255.shape) == 2: # gray image
height, width = img255.shape
if shape is None:
out_height, out_width, out_n_color = height, width, 1
else:
out_n_color, out_height, out_width = shape
assert(out_n_color == 1)
if out_height != height or out_width != width:
# imresize returns 0 to 255 image.
img255 = imresize(img255, (out_height, out_width))
img255 = img255.reshape((out_n_color, out_height, out_width))
elif len(img255.shape) == 3: # RGB image
height, width, n_color = img255.shape
if shape is None:
out_height, out_width, out_n_color = height, width, n_color
else:
out_n_color, out_height, out_width = shape
assert(out_n_color == n_color)
if out_height != height or out_width != width or out_n_color != n_color:
# imresize returns 0 to 255 image.
img255 = imresize(img255, (out_height, out_width, out_n_color))
img255 = img255.transpose(2, 0, 1)
if max_range < 0 or max_range == 255.0:
return img255
else:
return img255 * (max_range / 255.0) | python | def load_image_imread(file, shape=None, max_range=1.0):
'''
Load image from file like object.
:param file: Image contents
:type file: file like object.
:param shape: shape of output array
e.g. (3, 128, 192) : n_color, height, width.
:type shape: tuple of int
:param float max_range: the value of return array ranges from 0 to `max_range`.
:return: numpy array
'''
img255 = imread(
file) # return value is from zero to 255 (even if the image has 16-bitdepth.)
if len(img255.shape) == 2: # gray image
height, width = img255.shape
if shape is None:
out_height, out_width, out_n_color = height, width, 1
else:
out_n_color, out_height, out_width = shape
assert(out_n_color == 1)
if out_height != height or out_width != width:
# imresize returns 0 to 255 image.
img255 = imresize(img255, (out_height, out_width))
img255 = img255.reshape((out_n_color, out_height, out_width))
elif len(img255.shape) == 3: # RGB image
height, width, n_color = img255.shape
if shape is None:
out_height, out_width, out_n_color = height, width, n_color
else:
out_n_color, out_height, out_width = shape
assert(out_n_color == n_color)
if out_height != height or out_width != width or out_n_color != n_color:
# imresize returns 0 to 255 image.
img255 = imresize(img255, (out_height, out_width, out_n_color))
img255 = img255.transpose(2, 0, 1)
if max_range < 0 or max_range == 255.0:
return img255
else:
return img255 * (max_range / 255.0) | [
"def",
"load_image_imread",
"(",
"file",
",",
"shape",
"=",
"None",
",",
"max_range",
"=",
"1.0",
")",
":",
"img255",
"=",
"imread",
"(",
"file",
")",
"# return value is from zero to 255 (even if the image has 16-bitdepth.)",
"if",
"len",
"(",
"img255",
".",
"shap... | Load image from file like object.
:param file: Image contents
:type file: file like object.
:param shape: shape of output array
e.g. (3, 128, 192) : n_color, height, width.
:type shape: tuple of int
:param float max_range: the value of return array ranges from 0 to `max_range`.
:return: numpy array | [
"Load",
"image",
"from",
"file",
"like",
"object",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/data_source_loader.py#L195-L238 | train | 223,768 |
sony/nnabla | python/src/nnabla/utils/data_source_loader.py | load_csv | def load_csv(file, shape=None, normalize=False):
"""
Load CSV file.
:param file: CSV file.
:type file: file like object
:param shape : data array is reshape to this shape.
:type shape: tuple of int
:return: numpy array
"""
value_list = []
if six.PY2:
for row in csv.reader(file):
value_list.append(list(map(float, row)))
elif six.PY34:
for row in csv.reader([l.decode('utf-8') for l in file.readlines()]):
value_list.append(list(map(float, row)))
if shape is None:
return numpy.array(value_list)
else:
return numpy.array(value_list).reshape(shape) | python | def load_csv(file, shape=None, normalize=False):
"""
Load CSV file.
:param file: CSV file.
:type file: file like object
:param shape : data array is reshape to this shape.
:type shape: tuple of int
:return: numpy array
"""
value_list = []
if six.PY2:
for row in csv.reader(file):
value_list.append(list(map(float, row)))
elif six.PY34:
for row in csv.reader([l.decode('utf-8') for l in file.readlines()]):
value_list.append(list(map(float, row)))
if shape is None:
return numpy.array(value_list)
else:
return numpy.array(value_list).reshape(shape) | [
"def",
"load_csv",
"(",
"file",
",",
"shape",
"=",
"None",
",",
"normalize",
"=",
"False",
")",
":",
"value_list",
"=",
"[",
"]",
"if",
"six",
".",
"PY2",
":",
"for",
"row",
"in",
"csv",
".",
"reader",
"(",
"file",
")",
":",
"value_list",
".",
"a... | Load CSV file.
:param file: CSV file.
:type file: file like object
:param shape : data array is reshape to this shape.
:type shape: tuple of int
:return: numpy array | [
"Load",
"CSV",
"file",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/data_source_loader.py#L346-L367 | train | 223,769 |
sony/nnabla | python/src/nnabla/experimental/viewers.py | SimpleGraph.save | def save(self, vleaf, fpath, cleanup=False, format=None):
"""Save the graph to a given file path.
Args:
vleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt.
fpath (`str`): The file path used to save.
cleanup (`bool`): Clean up the source file after rendering. Default is False.
format (str):
Force overwrite ``format`` (``'pdf', 'png', ...)``) configuration.
"""
graph = self.create_graphviz_digraph(vleaf, format=format)
graph.render(fpath, cleanup=cleanup) | python | def save(self, vleaf, fpath, cleanup=False, format=None):
"""Save the graph to a given file path.
Args:
vleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt.
fpath (`str`): The file path used to save.
cleanup (`bool`): Clean up the source file after rendering. Default is False.
format (str):
Force overwrite ``format`` (``'pdf', 'png', ...)``) configuration.
"""
graph = self.create_graphviz_digraph(vleaf, format=format)
graph.render(fpath, cleanup=cleanup) | [
"def",
"save",
"(",
"self",
",",
"vleaf",
",",
"fpath",
",",
"cleanup",
"=",
"False",
",",
"format",
"=",
"None",
")",
":",
"graph",
"=",
"self",
".",
"create_graphviz_digraph",
"(",
"vleaf",
",",
"format",
"=",
"format",
")",
"graph",
".",
"render",
... | Save the graph to a given file path.
Args:
vleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt.
fpath (`str`): The file path used to save.
cleanup (`bool`): Clean up the source file after rendering. Default is False.
format (str):
Force overwrite ``format`` (``'pdf', 'png', ...)``) configuration. | [
"Save",
"the",
"graph",
"to",
"a",
"given",
"file",
"path",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/experimental/viewers.py#L180-L192 | train | 223,770 |
sony/nnabla | python/src/nnabla/experimental/viewers.py | SimpleGraph.view | def view(self, vleaf, fpath=None, cleanup=True, format=None):
"""View the graph.
Args:
vleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt.
fpath (`str`): The file path used to save.
cleanup (`bool`): Clean up the source file after rendering. Default is True.
format (str):
Force overwrite ``format`` (``'pdf', 'png', ...)``) configuration.
"""
graph = self.create_graphviz_digraph(vleaf, format=format)
graph.view(fpath, cleanup=cleanup) | python | def view(self, vleaf, fpath=None, cleanup=True, format=None):
"""View the graph.
Args:
vleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt.
fpath (`str`): The file path used to save.
cleanup (`bool`): Clean up the source file after rendering. Default is True.
format (str):
Force overwrite ``format`` (``'pdf', 'png', ...)``) configuration.
"""
graph = self.create_graphviz_digraph(vleaf, format=format)
graph.view(fpath, cleanup=cleanup) | [
"def",
"view",
"(",
"self",
",",
"vleaf",
",",
"fpath",
"=",
"None",
",",
"cleanup",
"=",
"True",
",",
"format",
"=",
"None",
")",
":",
"graph",
"=",
"self",
".",
"create_graphviz_digraph",
"(",
"vleaf",
",",
"format",
"=",
"format",
")",
"graph",
".... | View the graph.
Args:
vleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt.
fpath (`str`): The file path used to save.
cleanup (`bool`): Clean up the source file after rendering. Default is True.
format (str):
Force overwrite ``format`` (``'pdf', 'png', ...)``) configuration. | [
"View",
"the",
"graph",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/experimental/viewers.py#L194-L206 | train | 223,771 |
sony/nnabla | python/src/nnabla/experimental/parametric_function_class/module.py | Module.get_modules | def get_modules(self, memo=None, prefix=""):
"""Get modules.
This function is internally used as the helper method for other methods.
Args:
memo (set, optional): Module set in order to memorize to visit.
prefix (str, optional): Prefix to a specific parameter name.
Yields:
`Module`: The module class.
"""
if memo is None:
memo = set()
if self not in memo:
memo.add(self)
yield prefix, self
for k, v in self.__dict__.items():
if not isinstance(v, Module):
continue
name, module = k, v
submodule_prefix = "{}/{}".format(prefix,
name) if prefix != "" else name
for m in module.get_modules(memo, submodule_prefix):
yield m | python | def get_modules(self, memo=None, prefix=""):
"""Get modules.
This function is internally used as the helper method for other methods.
Args:
memo (set, optional): Module set in order to memorize to visit.
prefix (str, optional): Prefix to a specific parameter name.
Yields:
`Module`: The module class.
"""
if memo is None:
memo = set()
if self not in memo:
memo.add(self)
yield prefix, self
for k, v in self.__dict__.items():
if not isinstance(v, Module):
continue
name, module = k, v
submodule_prefix = "{}/{}".format(prefix,
name) if prefix != "" else name
for m in module.get_modules(memo, submodule_prefix):
yield m | [
"def",
"get_modules",
"(",
"self",
",",
"memo",
"=",
"None",
",",
"prefix",
"=",
"\"\"",
")",
":",
"if",
"memo",
"is",
"None",
":",
"memo",
"=",
"set",
"(",
")",
"if",
"self",
"not",
"in",
"memo",
":",
"memo",
".",
"add",
"(",
"self",
")",
"yie... | Get modules.
This function is internally used as the helper method for other methods.
Args:
memo (set, optional): Module set in order to memorize to visit.
prefix (str, optional): Prefix to a specific parameter name.
Yields:
`Module`: The module class. | [
"Get",
"modules",
"."
] | aaf3d33b7cbb38f2a03aa754178ba8f7c8481320 | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/experimental/parametric_function_class/module.py#L58-L83 | train | 223,772 |
jazzband/django-push-notifications | push_notifications/fields.py | HexIntegerField.get_prep_value | def get_prep_value(self, value):
""" Return the integer value to be stored from the hex string """
if value is None or value == "":
return None
if isinstance(value, six.string_types):
value = _hex_string_to_unsigned_integer(value)
if _using_signed_storage():
value = _unsigned_to_signed_integer(value)
return value | python | def get_prep_value(self, value):
""" Return the integer value to be stored from the hex string """
if value is None or value == "":
return None
if isinstance(value, six.string_types):
value = _hex_string_to_unsigned_integer(value)
if _using_signed_storage():
value = _unsigned_to_signed_integer(value)
return value | [
"def",
"get_prep_value",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
"or",
"value",
"==",
"\"\"",
":",
"return",
"None",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"value",
"=",
"_hex_string_to_unsi... | Return the integer value to be stored from the hex string | [
"Return",
"the",
"integer",
"value",
"to",
"be",
"stored",
"from",
"the",
"hex",
"string"
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/fields.py#L91-L99 | train | 223,773 |
jazzband/django-push-notifications | push_notifications/fields.py | HexIntegerField.from_db_value | def from_db_value(self, value, expression, connection, context):
""" Return an unsigned int representation from all db backends """
if value is None:
return value
if _using_signed_storage():
value = _signed_to_unsigned_integer(value)
return value | python | def from_db_value(self, value, expression, connection, context):
""" Return an unsigned int representation from all db backends """
if value is None:
return value
if _using_signed_storage():
value = _signed_to_unsigned_integer(value)
return value | [
"def",
"from_db_value",
"(",
"self",
",",
"value",
",",
"expression",
",",
"connection",
",",
"context",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"value",
"if",
"_using_signed_storage",
"(",
")",
":",
"value",
"=",
"_signed_to_unsigned_integer",
... | Return an unsigned int representation from all db backends | [
"Return",
"an",
"unsigned",
"int",
"representation",
"from",
"all",
"db",
"backends"
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/fields.py#L101-L107 | train | 223,774 |
jazzband/django-push-notifications | push_notifications/fields.py | HexIntegerField.to_python | def to_python(self, value):
""" Return a str representation of the hexadecimal """
if isinstance(value, six.string_types):
return value
if value is None:
return value
return _unsigned_integer_to_hex_string(value) | python | def to_python(self, value):
""" Return a str representation of the hexadecimal """
if isinstance(value, six.string_types):
return value
if value is None:
return value
return _unsigned_integer_to_hex_string(value) | [
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"return",
"value",
"if",
"value",
"is",
"None",
":",
"return",
"value",
"return",
"_unsigned_integer_to_hex_string",
"(",
... | Return a str representation of the hexadecimal | [
"Return",
"a",
"str",
"representation",
"of",
"the",
"hexadecimal"
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/fields.py#L109-L115 | train | 223,775 |
jazzband/django-push-notifications | push_notifications/apns.py | apns_send_bulk_message | def apns_send_bulk_message(
registration_ids, alert, application_id=None, certfile=None, **kwargs
):
"""
Sends an APNS notification to one or more registration_ids.
The registration_ids argument needs to be a list.
Note that if set alert should always be a string. If it is not set,
it won"t be included in the notification. You will need to pass None
to this for silent notifications.
"""
results = _apns_send(
registration_ids, alert, batch=True, application_id=application_id,
certfile=certfile, **kwargs
)
inactive_tokens = [token for token, result in results.items() if result == "Unregistered"]
models.APNSDevice.objects.filter(registration_id__in=inactive_tokens).update(active=False)
return results | python | def apns_send_bulk_message(
registration_ids, alert, application_id=None, certfile=None, **kwargs
):
"""
Sends an APNS notification to one or more registration_ids.
The registration_ids argument needs to be a list.
Note that if set alert should always be a string. If it is not set,
it won"t be included in the notification. You will need to pass None
to this for silent notifications.
"""
results = _apns_send(
registration_ids, alert, batch=True, application_id=application_id,
certfile=certfile, **kwargs
)
inactive_tokens = [token for token, result in results.items() if result == "Unregistered"]
models.APNSDevice.objects.filter(registration_id__in=inactive_tokens).update(active=False)
return results | [
"def",
"apns_send_bulk_message",
"(",
"registration_ids",
",",
"alert",
",",
"application_id",
"=",
"None",
",",
"certfile",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"results",
"=",
"_apns_send",
"(",
"registration_ids",
",",
"alert",
",",
"batch",
"=... | Sends an APNS notification to one or more registration_ids.
The registration_ids argument needs to be a list.
Note that if set alert should always be a string. If it is not set,
it won"t be included in the notification. You will need to pass None
to this for silent notifications. | [
"Sends",
"an",
"APNS",
"notification",
"to",
"one",
"or",
"more",
"registration_ids",
".",
"The",
"registration_ids",
"argument",
"needs",
"to",
"be",
"a",
"list",
"."
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/apns.py#L123-L141 | train | 223,776 |
jazzband/django-push-notifications | push_notifications/gcm.py | _cm_send_request | def _cm_send_request(
registration_ids, data, cloud_type="GCM", application_id=None,
use_fcm_notifications=True, **kwargs
):
"""
Sends a FCM or GCM notification to one or more registration_ids as json data.
The registration_ids needs to be a list.
"""
payload = {"registration_ids": registration_ids} if registration_ids else {}
data = data.copy()
# If using FCM, optionnally autodiscovers notification related keys
# https://firebase.google.com/docs/cloud-messaging/concept-options#notifications_and_data_messages
if cloud_type == "FCM" and use_fcm_notifications:
notification_payload = {}
if "message" in data:
notification_payload["body"] = data.pop("message", None)
for key in FCM_NOTIFICATIONS_PAYLOAD_KEYS:
value_from_extra = data.pop(key, None)
if value_from_extra:
notification_payload[key] = value_from_extra
value_from_kwargs = kwargs.pop(key, None)
if value_from_kwargs:
notification_payload[key] = value_from_kwargs
if notification_payload:
payload["notification"] = notification_payload
if data:
payload["data"] = data
# Attach any additional non falsy keyword args (targets, options)
# See ref : https://firebase.google.com/docs/cloud-messaging/http-server-ref#table1
payload.update({
k: v for k, v in kwargs.items() if v and (k in FCM_TARGETS_KEYS or k in FCM_OPTIONS_KEYS)
})
# Sort the keys for deterministic output (useful for tests)
json_payload = json.dumps(payload, separators=(",", ":"), sort_keys=True).encode("utf-8")
# Sends requests and handles the response
if cloud_type == "GCM":
response = json.loads(_gcm_send(
json_payload, "application/json", application_id=application_id
))
elif cloud_type == "FCM":
response = json.loads(_fcm_send(
json_payload, "application/json", application_id=application_id
))
else:
raise ImproperlyConfigured("cloud_type must be FCM or GCM not %s" % str(cloud_type))
return _cm_handle_response(registration_ids, response, cloud_type, application_id) | python | def _cm_send_request(
registration_ids, data, cloud_type="GCM", application_id=None,
use_fcm_notifications=True, **kwargs
):
"""
Sends a FCM or GCM notification to one or more registration_ids as json data.
The registration_ids needs to be a list.
"""
payload = {"registration_ids": registration_ids} if registration_ids else {}
data = data.copy()
# If using FCM, optionnally autodiscovers notification related keys
# https://firebase.google.com/docs/cloud-messaging/concept-options#notifications_and_data_messages
if cloud_type == "FCM" and use_fcm_notifications:
notification_payload = {}
if "message" in data:
notification_payload["body"] = data.pop("message", None)
for key in FCM_NOTIFICATIONS_PAYLOAD_KEYS:
value_from_extra = data.pop(key, None)
if value_from_extra:
notification_payload[key] = value_from_extra
value_from_kwargs = kwargs.pop(key, None)
if value_from_kwargs:
notification_payload[key] = value_from_kwargs
if notification_payload:
payload["notification"] = notification_payload
if data:
payload["data"] = data
# Attach any additional non falsy keyword args (targets, options)
# See ref : https://firebase.google.com/docs/cloud-messaging/http-server-ref#table1
payload.update({
k: v for k, v in kwargs.items() if v and (k in FCM_TARGETS_KEYS or k in FCM_OPTIONS_KEYS)
})
# Sort the keys for deterministic output (useful for tests)
json_payload = json.dumps(payload, separators=(",", ":"), sort_keys=True).encode("utf-8")
# Sends requests and handles the response
if cloud_type == "GCM":
response = json.loads(_gcm_send(
json_payload, "application/json", application_id=application_id
))
elif cloud_type == "FCM":
response = json.loads(_fcm_send(
json_payload, "application/json", application_id=application_id
))
else:
raise ImproperlyConfigured("cloud_type must be FCM or GCM not %s" % str(cloud_type))
return _cm_handle_response(registration_ids, response, cloud_type, application_id) | [
"def",
"_cm_send_request",
"(",
"registration_ids",
",",
"data",
",",
"cloud_type",
"=",
"\"GCM\"",
",",
"application_id",
"=",
"None",
",",
"use_fcm_notifications",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"payload",
"=",
"{",
"\"registration_ids\"",
"... | Sends a FCM or GCM notification to one or more registration_ids as json data.
The registration_ids needs to be a list. | [
"Sends",
"a",
"FCM",
"or",
"GCM",
"notification",
"to",
"one",
"or",
"more",
"registration_ids",
"as",
"json",
"data",
".",
"The",
"registration_ids",
"needs",
"to",
"be",
"a",
"list",
"."
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/gcm.py#L111-L164 | train | 223,777 |
jazzband/django-push-notifications | push_notifications/gcm.py | _cm_handle_canonical_id | def _cm_handle_canonical_id(canonical_id, current_id, cloud_type):
"""
Handle situation when FCM server response contains canonical ID
"""
devices = GCMDevice.objects.filter(cloud_message_type=cloud_type)
if devices.filter(registration_id=canonical_id, active=True).exists():
devices.filter(registration_id=current_id).update(active=False)
else:
devices.filter(registration_id=current_id).update(registration_id=canonical_id) | python | def _cm_handle_canonical_id(canonical_id, current_id, cloud_type):
"""
Handle situation when FCM server response contains canonical ID
"""
devices = GCMDevice.objects.filter(cloud_message_type=cloud_type)
if devices.filter(registration_id=canonical_id, active=True).exists():
devices.filter(registration_id=current_id).update(active=False)
else:
devices.filter(registration_id=current_id).update(registration_id=canonical_id) | [
"def",
"_cm_handle_canonical_id",
"(",
"canonical_id",
",",
"current_id",
",",
"cloud_type",
")",
":",
"devices",
"=",
"GCMDevice",
".",
"objects",
".",
"filter",
"(",
"cloud_message_type",
"=",
"cloud_type",
")",
"if",
"devices",
".",
"filter",
"(",
"registrati... | Handle situation when FCM server response contains canonical ID | [
"Handle",
"situation",
"when",
"FCM",
"server",
"response",
"contains",
"canonical",
"ID"
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/gcm.py#L167-L175 | train | 223,778 |
jazzband/django-push-notifications | push_notifications/conf/app.py | AppConfig._validate_applications | def _validate_applications(self, apps):
"""Validate the application collection"""
for application_id, application_config in apps.items():
self._validate_config(application_id, application_config)
application_config["APPLICATION_ID"] = application_id | python | def _validate_applications(self, apps):
"""Validate the application collection"""
for application_id, application_config in apps.items():
self._validate_config(application_id, application_config)
application_config["APPLICATION_ID"] = application_id | [
"def",
"_validate_applications",
"(",
"self",
",",
"apps",
")",
":",
"for",
"application_id",
",",
"application_config",
"in",
"apps",
".",
"items",
"(",
")",
":",
"self",
".",
"_validate_config",
"(",
"application_id",
",",
"application_config",
")",
"applicati... | Validate the application collection | [
"Validate",
"the",
"application",
"collection"
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/conf/app.py#L78-L83 | train | 223,779 |
jazzband/django-push-notifications | push_notifications/conf/app.py | AppConfig._validate_apns_certificate | def _validate_apns_certificate(self, certfile):
"""Validate the APNS certificate at startup."""
try:
with open(certfile, "r") as f:
content = f.read()
check_apns_certificate(content)
except Exception as e:
raise ImproperlyConfigured(
"The APNS certificate file at %r is not readable: %s" % (certfile, e)
) | python | def _validate_apns_certificate(self, certfile):
"""Validate the APNS certificate at startup."""
try:
with open(certfile, "r") as f:
content = f.read()
check_apns_certificate(content)
except Exception as e:
raise ImproperlyConfigured(
"The APNS certificate file at %r is not readable: %s" % (certfile, e)
) | [
"def",
"_validate_apns_certificate",
"(",
"self",
",",
"certfile",
")",
":",
"try",
":",
"with",
"open",
"(",
"certfile",
",",
"\"r\"",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"check_apns_certificate",
"(",
"content",
")",
"excep... | Validate the APNS certificate at startup. | [
"Validate",
"the",
"APNS",
"certificate",
"at",
"startup",
"."
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/conf/app.py#L136-L146 | train | 223,780 |
jazzband/django-push-notifications | push_notifications/conf/app.py | AppConfig._validate_allowed_settings | def _validate_allowed_settings(self, application_id, application_config, allowed_settings):
"""Confirm only allowed settings are present."""
for setting_key in application_config.keys():
if setting_key not in allowed_settings:
raise ImproperlyConfigured(
"Platform {}, app {} does not support the setting: {}.".format(
application_config["PLATFORM"], application_id, setting_key
)
) | python | def _validate_allowed_settings(self, application_id, application_config, allowed_settings):
"""Confirm only allowed settings are present."""
for setting_key in application_config.keys():
if setting_key not in allowed_settings:
raise ImproperlyConfigured(
"Platform {}, app {} does not support the setting: {}.".format(
application_config["PLATFORM"], application_id, setting_key
)
) | [
"def",
"_validate_allowed_settings",
"(",
"self",
",",
"application_id",
",",
"application_config",
",",
"allowed_settings",
")",
":",
"for",
"setting_key",
"in",
"application_config",
".",
"keys",
"(",
")",
":",
"if",
"setting_key",
"not",
"in",
"allowed_settings",... | Confirm only allowed settings are present. | [
"Confirm",
"only",
"allowed",
"settings",
"are",
"present",
"."
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/conf/app.py#L203-L212 | train | 223,781 |
jazzband/django-push-notifications | push_notifications/conf/app.py | AppConfig._validate_required_settings | def _validate_required_settings(
self, application_id, application_config, required_settings
):
"""All required keys must be present"""
for setting_key in required_settings:
if setting_key not in application_config.keys():
raise ImproperlyConfigured(
MISSING_SETTING.format(
application_id=application_id, setting=setting_key
)
) | python | def _validate_required_settings(
self, application_id, application_config, required_settings
):
"""All required keys must be present"""
for setting_key in required_settings:
if setting_key not in application_config.keys():
raise ImproperlyConfigured(
MISSING_SETTING.format(
application_id=application_id, setting=setting_key
)
) | [
"def",
"_validate_required_settings",
"(",
"self",
",",
"application_id",
",",
"application_config",
",",
"required_settings",
")",
":",
"for",
"setting_key",
"in",
"required_settings",
":",
"if",
"setting_key",
"not",
"in",
"application_config",
".",
"keys",
"(",
"... | All required keys must be present | [
"All",
"required",
"keys",
"must",
"be",
"present"
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/conf/app.py#L214-L225 | train | 223,782 |
jazzband/django-push-notifications | push_notifications/conf/app.py | AppConfig._get_application_settings | def _get_application_settings(self, application_id, platform, settings_key):
"""
Walks through PUSH_NOTIFICATIONS_SETTINGS to find the correct setting value
or raises ImproperlyConfigured.
"""
if not application_id:
conf_cls = "push_notifications.conf.AppConfig"
raise ImproperlyConfigured(
"{} requires the application_id be specified at all times.".format(conf_cls)
)
# verify that the application config exists
app_config = self._settings.get("APPLICATIONS").get(application_id, None)
if app_config is None:
raise ImproperlyConfigured(
"No application configured with application_id: {}.".format(application_id)
)
# fetch a setting for the incorrect type of platform
if app_config.get("PLATFORM") != platform:
raise ImproperlyConfigured(
SETTING_MISMATCH.format(
application_id=application_id,
platform=app_config.get("PLATFORM"),
setting=settings_key
)
)
# finally, try to fetch the setting
if settings_key not in app_config:
raise ImproperlyConfigured(
MISSING_SETTING.format(
application_id=application_id, setting=settings_key
)
)
return app_config.get(settings_key) | python | def _get_application_settings(self, application_id, platform, settings_key):
"""
Walks through PUSH_NOTIFICATIONS_SETTINGS to find the correct setting value
or raises ImproperlyConfigured.
"""
if not application_id:
conf_cls = "push_notifications.conf.AppConfig"
raise ImproperlyConfigured(
"{} requires the application_id be specified at all times.".format(conf_cls)
)
# verify that the application config exists
app_config = self._settings.get("APPLICATIONS").get(application_id, None)
if app_config is None:
raise ImproperlyConfigured(
"No application configured with application_id: {}.".format(application_id)
)
# fetch a setting for the incorrect type of platform
if app_config.get("PLATFORM") != platform:
raise ImproperlyConfigured(
SETTING_MISMATCH.format(
application_id=application_id,
platform=app_config.get("PLATFORM"),
setting=settings_key
)
)
# finally, try to fetch the setting
if settings_key not in app_config:
raise ImproperlyConfigured(
MISSING_SETTING.format(
application_id=application_id, setting=settings_key
)
)
return app_config.get(settings_key) | [
"def",
"_get_application_settings",
"(",
"self",
",",
"application_id",
",",
"platform",
",",
"settings_key",
")",
":",
"if",
"not",
"application_id",
":",
"conf_cls",
"=",
"\"push_notifications.conf.AppConfig\"",
"raise",
"ImproperlyConfigured",
"(",
"\"{} requires the a... | Walks through PUSH_NOTIFICATIONS_SETTINGS to find the correct setting value
or raises ImproperlyConfigured. | [
"Walks",
"through",
"PUSH_NOTIFICATIONS_SETTINGS",
"to",
"find",
"the",
"correct",
"setting",
"value",
"or",
"raises",
"ImproperlyConfigured",
"."
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/conf/app.py#L227-L264 | train | 223,783 |
jazzband/django-push-notifications | push_notifications/wns.py | _wns_authenticate | def _wns_authenticate(scope="notify.windows.com", application_id=None):
"""
Requests an Access token for WNS communication.
:return: dict: {'access_token': <str>, 'expires_in': <int>, 'token_type': 'bearer'}
"""
client_id = get_manager().get_wns_package_security_id(application_id)
client_secret = get_manager().get_wns_secret_key(application_id)
if not client_id:
raise ImproperlyConfigured(
'You need to set PUSH_NOTIFICATIONS_SETTINGS["WNS_PACKAGE_SECURITY_ID"] to use WNS.'
)
if not client_secret:
raise ImproperlyConfigured(
'You need to set PUSH_NOTIFICATIONS_SETTINGS["WNS_SECRET_KEY"] to use WNS.'
)
headers = {
"Content-Type": "application/x-www-form-urlencoded",
}
params = {
"grant_type": "client_credentials",
"client_id": client_id,
"client_secret": client_secret,
"scope": scope,
}
data = urlencode(params).encode("utf-8")
request = Request(SETTINGS["WNS_ACCESS_URL"], data=data, headers=headers)
try:
response = urlopen(request)
except HTTPError as err:
if err.code == 400:
# One of your settings is probably jacked up.
# https://msdn.microsoft.com/en-us/library/windows/apps/xaml/hh868245
raise WNSAuthenticationError("Authentication failed, check your WNS settings.")
raise err
oauth_data = response.read().decode("utf-8")
try:
oauth_data = json.loads(oauth_data)
except Exception:
# Upstream WNS issue
raise WNSAuthenticationError("Received invalid JSON data from WNS.")
access_token = oauth_data.get("access_token")
if not access_token:
# Upstream WNS issue
raise WNSAuthenticationError("Access token missing from WNS response.")
return access_token | python | def _wns_authenticate(scope="notify.windows.com", application_id=None):
"""
Requests an Access token for WNS communication.
:return: dict: {'access_token': <str>, 'expires_in': <int>, 'token_type': 'bearer'}
"""
client_id = get_manager().get_wns_package_security_id(application_id)
client_secret = get_manager().get_wns_secret_key(application_id)
if not client_id:
raise ImproperlyConfigured(
'You need to set PUSH_NOTIFICATIONS_SETTINGS["WNS_PACKAGE_SECURITY_ID"] to use WNS.'
)
if not client_secret:
raise ImproperlyConfigured(
'You need to set PUSH_NOTIFICATIONS_SETTINGS["WNS_SECRET_KEY"] to use WNS.'
)
headers = {
"Content-Type": "application/x-www-form-urlencoded",
}
params = {
"grant_type": "client_credentials",
"client_id": client_id,
"client_secret": client_secret,
"scope": scope,
}
data = urlencode(params).encode("utf-8")
request = Request(SETTINGS["WNS_ACCESS_URL"], data=data, headers=headers)
try:
response = urlopen(request)
except HTTPError as err:
if err.code == 400:
# One of your settings is probably jacked up.
# https://msdn.microsoft.com/en-us/library/windows/apps/xaml/hh868245
raise WNSAuthenticationError("Authentication failed, check your WNS settings.")
raise err
oauth_data = response.read().decode("utf-8")
try:
oauth_data = json.loads(oauth_data)
except Exception:
# Upstream WNS issue
raise WNSAuthenticationError("Received invalid JSON data from WNS.")
access_token = oauth_data.get("access_token")
if not access_token:
# Upstream WNS issue
raise WNSAuthenticationError("Access token missing from WNS response.")
return access_token | [
"def",
"_wns_authenticate",
"(",
"scope",
"=",
"\"notify.windows.com\"",
",",
"application_id",
"=",
"None",
")",
":",
"client_id",
"=",
"get_manager",
"(",
")",
".",
"get_wns_package_security_id",
"(",
"application_id",
")",
"client_secret",
"=",
"get_manager",
"("... | Requests an Access token for WNS communication.
:return: dict: {'access_token': <str>, 'expires_in': <int>, 'token_type': 'bearer'} | [
"Requests",
"an",
"Access",
"token",
"for",
"WNS",
"communication",
"."
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/wns.py#L31-L82 | train | 223,784 |
jazzband/django-push-notifications | push_notifications/wns.py | _wns_send | def _wns_send(uri, data, wns_type="wns/toast", application_id=None):
"""
Sends a notification data and authentication to WNS.
:param uri: str: The device's unique notification URI
:param data: dict: The notification data to be sent.
:return:
"""
access_token = _wns_authenticate(application_id=application_id)
content_type = "text/xml"
if wns_type == "wns/raw":
content_type = "application/octet-stream"
headers = {
# content_type is "text/xml" (toast/badge/tile) | "application/octet-stream" (raw)
"Content-Type": content_type,
"Authorization": "Bearer %s" % (access_token),
"X-WNS-Type": wns_type, # wns/toast | wns/badge | wns/tile | wns/raw
}
if type(data) is str:
data = data.encode("utf-8")
request = Request(uri, data, headers)
# A lot of things can happen, let them know which one.
try:
response = urlopen(request)
except HTTPError as err:
if err.code == 400:
msg = "One or more headers were specified incorrectly or conflict with another header."
elif err.code == 401:
msg = "The cloud service did not present a valid authentication ticket."
elif err.code == 403:
msg = "The cloud service is not authorized to send a notification to this URI."
elif err.code == 404:
msg = "The channel URI is not valid or is not recognized by WNS."
elif err.code == 405:
msg = "Invalid method. Only POST or DELETE is allowed."
elif err.code == 406:
msg = "The cloud service exceeded its throttle limit"
elif err.code == 410:
msg = "The channel expired."
elif err.code == 413:
msg = "The notification payload exceeds the 500 byte limit."
elif err.code == 500:
msg = "An internal failure caused notification delivery to fail."
elif err.code == 503:
msg = "The server is currently unavailable."
else:
raise err
raise WNSNotificationResponseError("HTTP %i: %s" % (err.code, msg))
return response.read().decode("utf-8") | python | def _wns_send(uri, data, wns_type="wns/toast", application_id=None):
"""
Sends a notification data and authentication to WNS.
:param uri: str: The device's unique notification URI
:param data: dict: The notification data to be sent.
:return:
"""
access_token = _wns_authenticate(application_id=application_id)
content_type = "text/xml"
if wns_type == "wns/raw":
content_type = "application/octet-stream"
headers = {
# content_type is "text/xml" (toast/badge/tile) | "application/octet-stream" (raw)
"Content-Type": content_type,
"Authorization": "Bearer %s" % (access_token),
"X-WNS-Type": wns_type, # wns/toast | wns/badge | wns/tile | wns/raw
}
if type(data) is str:
data = data.encode("utf-8")
request = Request(uri, data, headers)
# A lot of things can happen, let them know which one.
try:
response = urlopen(request)
except HTTPError as err:
if err.code == 400:
msg = "One or more headers were specified incorrectly or conflict with another header."
elif err.code == 401:
msg = "The cloud service did not present a valid authentication ticket."
elif err.code == 403:
msg = "The cloud service is not authorized to send a notification to this URI."
elif err.code == 404:
msg = "The channel URI is not valid or is not recognized by WNS."
elif err.code == 405:
msg = "Invalid method. Only POST or DELETE is allowed."
elif err.code == 406:
msg = "The cloud service exceeded its throttle limit"
elif err.code == 410:
msg = "The channel expired."
elif err.code == 413:
msg = "The notification payload exceeds the 500 byte limit."
elif err.code == 500:
msg = "An internal failure caused notification delivery to fail."
elif err.code == 503:
msg = "The server is currently unavailable."
else:
raise err
raise WNSNotificationResponseError("HTTP %i: %s" % (err.code, msg))
return response.read().decode("utf-8") | [
"def",
"_wns_send",
"(",
"uri",
",",
"data",
",",
"wns_type",
"=",
"\"wns/toast\"",
",",
"application_id",
"=",
"None",
")",
":",
"access_token",
"=",
"_wns_authenticate",
"(",
"application_id",
"=",
"application_id",
")",
"content_type",
"=",
"\"text/xml\"",
"i... | Sends a notification data and authentication to WNS.
:param uri: str: The device's unique notification URI
:param data: dict: The notification data to be sent.
:return: | [
"Sends",
"a",
"notification",
"data",
"and",
"authentication",
"to",
"WNS",
"."
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/wns.py#L85-L139 | train | 223,785 |
jazzband/django-push-notifications | push_notifications/wns.py | _wns_prepare_toast | def _wns_prepare_toast(data, **kwargs):
"""
Creates the xml tree for a `toast` notification
:param data: dict: The notification data to be converted to an xml tree.
{
"text": ["Title text", "Message Text", "Another message!"],
"image": ["src1", "src2"],
}
:return: str
"""
root = ET.Element("toast")
visual = ET.SubElement(root, "visual")
binding = ET.SubElement(visual, "binding")
binding.attrib["template"] = kwargs.pop("template", "ToastText01")
if "text" in data:
for count, item in enumerate(data["text"], start=1):
elem = ET.SubElement(binding, "text")
elem.text = item
elem.attrib["id"] = str(count)
if "image" in data:
for count, item in enumerate(data["image"], start=1):
elem = ET.SubElement(binding, "img")
elem.attrib["src"] = item
elem.attrib["id"] = str(count)
return ET.tostring(root) | python | def _wns_prepare_toast(data, **kwargs):
"""
Creates the xml tree for a `toast` notification
:param data: dict: The notification data to be converted to an xml tree.
{
"text": ["Title text", "Message Text", "Another message!"],
"image": ["src1", "src2"],
}
:return: str
"""
root = ET.Element("toast")
visual = ET.SubElement(root, "visual")
binding = ET.SubElement(visual, "binding")
binding.attrib["template"] = kwargs.pop("template", "ToastText01")
if "text" in data:
for count, item in enumerate(data["text"], start=1):
elem = ET.SubElement(binding, "text")
elem.text = item
elem.attrib["id"] = str(count)
if "image" in data:
for count, item in enumerate(data["image"], start=1):
elem = ET.SubElement(binding, "img")
elem.attrib["src"] = item
elem.attrib["id"] = str(count)
return ET.tostring(root) | [
"def",
"_wns_prepare_toast",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"root",
"=",
"ET",
".",
"Element",
"(",
"\"toast\"",
")",
"visual",
"=",
"ET",
".",
"SubElement",
"(",
"root",
",",
"\"visual\"",
")",
"binding",
"=",
"ET",
".",
"SubElement",... | Creates the xml tree for a `toast` notification
:param data: dict: The notification data to be converted to an xml tree.
{
"text": ["Title text", "Message Text", "Another message!"],
"image": ["src1", "src2"],
}
:return: str | [
"Creates",
"the",
"xml",
"tree",
"for",
"a",
"toast",
"notification"
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/wns.py#L142-L169 | train | 223,786 |
jazzband/django-push-notifications | push_notifications/wns.py | wns_send_bulk_message | def wns_send_bulk_message(
uri_list, message=None, xml_data=None, raw_data=None, application_id=None, **kwargs
):
"""
WNS doesn't support bulk notification, so we loop through each uri.
:param uri_list: list: A list of uris the notification will be sent to.
:param message: str: The notification data to be sent.
:param xml_data: dict: A dictionary containing data to be converted to an xml tree.
:param raw_data: str: Data to be sent via a `raw` notification.
"""
res = []
if uri_list:
for uri in uri_list:
r = wns_send_message(
uri=uri, message=message, xml_data=xml_data,
raw_data=raw_data, application_id=application_id, **kwargs
)
res.append(r)
return res | python | def wns_send_bulk_message(
uri_list, message=None, xml_data=None, raw_data=None, application_id=None, **kwargs
):
"""
WNS doesn't support bulk notification, so we loop through each uri.
:param uri_list: list: A list of uris the notification will be sent to.
:param message: str: The notification data to be sent.
:param xml_data: dict: A dictionary containing data to be converted to an xml tree.
:param raw_data: str: Data to be sent via a `raw` notification.
"""
res = []
if uri_list:
for uri in uri_list:
r = wns_send_message(
uri=uri, message=message, xml_data=xml_data,
raw_data=raw_data, application_id=application_id, **kwargs
)
res.append(r)
return res | [
"def",
"wns_send_bulk_message",
"(",
"uri_list",
",",
"message",
"=",
"None",
",",
"xml_data",
"=",
"None",
",",
"raw_data",
"=",
"None",
",",
"application_id",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"res",
"=",
"[",
"]",
"if",
"uri_list",
":"... | WNS doesn't support bulk notification, so we loop through each uri.
:param uri_list: list: A list of uris the notification will be sent to.
:param message: str: The notification data to be sent.
:param xml_data: dict: A dictionary containing data to be converted to an xml tree.
:param raw_data: str: Data to be sent via a `raw` notification. | [
"WNS",
"doesn",
"t",
"support",
"bulk",
"notification",
"so",
"we",
"loop",
"through",
"each",
"uri",
"."
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/wns.py#L237-L256 | train | 223,787 |
jazzband/django-push-notifications | push_notifications/wns.py | _add_sub_elements_from_dict | def _add_sub_elements_from_dict(parent, sub_dict):
"""
Add SubElements to the parent element.
:param parent: ElementTree.Element: The parent element for the newly created SubElement.
:param sub_dict: dict: Used to create a new SubElement. See `dict_to_xml_schema`
method docstring for more information. e.g.:
{"example": {
"attrs": {
"key1": "value1",
...
},
...
}}
"""
for key, value in sub_dict.items():
if isinstance(value, list):
for repeated_element in value:
sub_element = ET.SubElement(parent, key)
_add_element_attrs(sub_element, repeated_element.get("attrs", {}))
children = repeated_element.get("children", None)
if isinstance(children, dict):
_add_sub_elements_from_dict(sub_element, children)
elif isinstance(children, str):
sub_element.text = children
else:
sub_element = ET.SubElement(parent, key)
_add_element_attrs(sub_element, value.get("attrs", {}))
children = value.get("children", None)
if isinstance(children, dict):
_add_sub_elements_from_dict(sub_element, children)
elif isinstance(children, str):
sub_element.text = children | python | def _add_sub_elements_from_dict(parent, sub_dict):
"""
Add SubElements to the parent element.
:param parent: ElementTree.Element: The parent element for the newly created SubElement.
:param sub_dict: dict: Used to create a new SubElement. See `dict_to_xml_schema`
method docstring for more information. e.g.:
{"example": {
"attrs": {
"key1": "value1",
...
},
...
}}
"""
for key, value in sub_dict.items():
if isinstance(value, list):
for repeated_element in value:
sub_element = ET.SubElement(parent, key)
_add_element_attrs(sub_element, repeated_element.get("attrs", {}))
children = repeated_element.get("children", None)
if isinstance(children, dict):
_add_sub_elements_from_dict(sub_element, children)
elif isinstance(children, str):
sub_element.text = children
else:
sub_element = ET.SubElement(parent, key)
_add_element_attrs(sub_element, value.get("attrs", {}))
children = value.get("children", None)
if isinstance(children, dict):
_add_sub_elements_from_dict(sub_element, children)
elif isinstance(children, str):
sub_element.text = children | [
"def",
"_add_sub_elements_from_dict",
"(",
"parent",
",",
"sub_dict",
")",
":",
"for",
"key",
",",
"value",
"in",
"sub_dict",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"for",
"repeated_element",
"in",
"value",
... | Add SubElements to the parent element.
:param parent: ElementTree.Element: The parent element for the newly created SubElement.
:param sub_dict: dict: Used to create a new SubElement. See `dict_to_xml_schema`
method docstring for more information. e.g.:
{"example": {
"attrs": {
"key1": "value1",
...
},
...
}} | [
"Add",
"SubElements",
"to",
"the",
"parent",
"element",
"."
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/wns.py#L325-L357 | train | 223,788 |
jazzband/django-push-notifications | push_notifications/wns.py | _add_element_attrs | def _add_element_attrs(elem, attrs):
"""
Add attributes to the given element.
:param elem: ElementTree.Element: The element the attributes are being added to.
:param attrs: dict: A dictionary of attributes. e.g.:
{"attribute1": "value", "attribute2": "another"}
:return: ElementTree.Element
"""
for attr, value in attrs.items():
elem.attrib[attr] = value
return elem | python | def _add_element_attrs(elem, attrs):
"""
Add attributes to the given element.
:param elem: ElementTree.Element: The element the attributes are being added to.
:param attrs: dict: A dictionary of attributes. e.g.:
{"attribute1": "value", "attribute2": "another"}
:return: ElementTree.Element
"""
for attr, value in attrs.items():
elem.attrib[attr] = value
return elem | [
"def",
"_add_element_attrs",
"(",
"elem",
",",
"attrs",
")",
":",
"for",
"attr",
",",
"value",
"in",
"attrs",
".",
"items",
"(",
")",
":",
"elem",
".",
"attrib",
"[",
"attr",
"]",
"=",
"value",
"return",
"elem"
] | Add attributes to the given element.
:param elem: ElementTree.Element: The element the attributes are being added to.
:param attrs: dict: A dictionary of attributes. e.g.:
{"attribute1": "value", "attribute2": "another"}
:return: ElementTree.Element | [
"Add",
"attributes",
"to",
"the",
"given",
"element",
"."
] | c4a0d710711fa27bfb6533c0bf3468cb67a62679 | https://github.com/jazzband/django-push-notifications/blob/c4a0d710711fa27bfb6533c0bf3468cb67a62679/push_notifications/wns.py#L360-L371 | train | 223,789 |
skydive-project/skydive | contrib/python/api/skydive/websocket/client.py | WSClient.login | def login(self, host_spec="", username="", password=""):
""" Authenticate with infrastructure via the Skydive analyzer
This method will also set the authentication cookie to be used in
the future requests
:param host_spec: Host IP and port (e.g. 192.168.10.1:8082)
:type host_spec: string
:param username: Username to use for login
:type username: string
:param password: Password to use for login
:type password: string
:return: True on successful authentication, False otherwise
"""
warnings.warn(
"shouldn't use this function anymore ! use connect which handles"
"handles authentication directly.",
DeprecationWarning
)
scheme = "http"
if not host_spec:
u = urlparse(self.endpoint)
host_spec = u.netloc
if u.scheme == "wss":
scheme = "https"
if self.username:
username = self.username
if self.password:
password = self.password
auth = Authenticate(host_spec, scheme=scheme,
username=username, password=password)
try:
auth.login()
cookie = 'authtok={}'.format(auth.authtok)
if self.cookies:
self.cookies.append(cookie)
else:
self.cookies = [cookie, ]
return True
except Exception:
return False | python | def login(self, host_spec="", username="", password=""):
""" Authenticate with infrastructure via the Skydive analyzer
This method will also set the authentication cookie to be used in
the future requests
:param host_spec: Host IP and port (e.g. 192.168.10.1:8082)
:type host_spec: string
:param username: Username to use for login
:type username: string
:param password: Password to use for login
:type password: string
:return: True on successful authentication, False otherwise
"""
warnings.warn(
"shouldn't use this function anymore ! use connect which handles"
"handles authentication directly.",
DeprecationWarning
)
scheme = "http"
if not host_spec:
u = urlparse(self.endpoint)
host_spec = u.netloc
if u.scheme == "wss":
scheme = "https"
if self.username:
username = self.username
if self.password:
password = self.password
auth = Authenticate(host_spec, scheme=scheme,
username=username, password=password)
try:
auth.login()
cookie = 'authtok={}'.format(auth.authtok)
if self.cookies:
self.cookies.append(cookie)
else:
self.cookies = [cookie, ]
return True
except Exception:
return False | [
"def",
"login",
"(",
"self",
",",
"host_spec",
"=",
"\"\"",
",",
"username",
"=",
"\"\"",
",",
"password",
"=",
"\"\"",
")",
":",
"warnings",
".",
"warn",
"(",
"\"shouldn't use this function anymore ! use connect which handles\"",
"\"handles authentication directly.\"",... | Authenticate with infrastructure via the Skydive analyzer
This method will also set the authentication cookie to be used in
the future requests
:param host_spec: Host IP and port (e.g. 192.168.10.1:8082)
:type host_spec: string
:param username: Username to use for login
:type username: string
:param password: Password to use for login
:type password: string
:return: True on successful authentication, False otherwise | [
"Authenticate",
"with",
"infrastructure",
"via",
"the",
"Skydive",
"analyzer"
] | 9a68cc2213bb2f756fbf27a13f060805f2a47025 | https://github.com/skydive-project/skydive/blob/9a68cc2213bb2f756fbf27a13f060805f2a47025/contrib/python/api/skydive/websocket/client.py#L228-L270 | train | 223,790 |
kivy/buildozer | buildozer/targets/android.py | TargetAndroid._sdkmanager | def _sdkmanager(self, *args, **kwargs):
"""Call the sdkmanager in our Android SDK with the given arguments."""
# Use the android-sdk dir as cwd by default
kwargs['cwd'] = kwargs.get('cwd', self.android_sdk_dir)
command = self.sdkmanager_path + ' ' + ' '.join(args)
return_child = kwargs.pop('return_child', False)
if return_child:
return self.buildozer.cmd_expect(command, **kwargs)
else:
kwargs['get_stdout'] = kwargs.get('get_stdout', True)
return self.buildozer.cmd(command, **kwargs) | python | def _sdkmanager(self, *args, **kwargs):
"""Call the sdkmanager in our Android SDK with the given arguments."""
# Use the android-sdk dir as cwd by default
kwargs['cwd'] = kwargs.get('cwd', self.android_sdk_dir)
command = self.sdkmanager_path + ' ' + ' '.join(args)
return_child = kwargs.pop('return_child', False)
if return_child:
return self.buildozer.cmd_expect(command, **kwargs)
else:
kwargs['get_stdout'] = kwargs.get('get_stdout', True)
return self.buildozer.cmd(command, **kwargs) | [
"def",
"_sdkmanager",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Use the android-sdk dir as cwd by default",
"kwargs",
"[",
"'cwd'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'cwd'",
",",
"self",
".",
"android_sdk_dir",
")",
"command",
... | Call the sdkmanager in our Android SDK with the given arguments. | [
"Call",
"the",
"sdkmanager",
"in",
"our",
"Android",
"SDK",
"with",
"the",
"given",
"arguments",
"."
] | 586152c6ce2b6cde4d5a081d9711f9cb037a901c | https://github.com/kivy/buildozer/blob/586152c6ce2b6cde4d5a081d9711f9cb037a901c/buildozer/targets/android.py#L98-L108 | train | 223,791 |
kivy/buildozer | buildozer/targets/android.py | TargetAndroid._android_get_installed_platform_tools_version | def _android_get_installed_platform_tools_version(self):
"""
Crudely parse out the installed platform-tools version
"""
platform_tools_dir = os.path.join(
self.android_sdk_dir,
'platform-tools')
if not os.path.exists(platform_tools_dir):
return None
data_file = os.path.join(platform_tools_dir, 'source.properties')
if not os.path.exists(data_file):
return None
with open(data_file, 'r') as fileh:
lines = fileh.readlines()
for line in lines:
if line.startswith('Pkg.Revision='):
break
else:
self.buildozer.error('Read {} but found no Pkg.Revision'.format(data_file))
# Don't actually exit, in case the build env is
# okay. Something else will fault if it's important.
return None
revision = line.split('=')[1].strip()
return revision | python | def _android_get_installed_platform_tools_version(self):
"""
Crudely parse out the installed platform-tools version
"""
platform_tools_dir = os.path.join(
self.android_sdk_dir,
'platform-tools')
if not os.path.exists(platform_tools_dir):
return None
data_file = os.path.join(platform_tools_dir, 'source.properties')
if not os.path.exists(data_file):
return None
with open(data_file, 'r') as fileh:
lines = fileh.readlines()
for line in lines:
if line.startswith('Pkg.Revision='):
break
else:
self.buildozer.error('Read {} but found no Pkg.Revision'.format(data_file))
# Don't actually exit, in case the build env is
# okay. Something else will fault if it's important.
return None
revision = line.split('=')[1].strip()
return revision | [
"def",
"_android_get_installed_platform_tools_version",
"(",
"self",
")",
":",
"platform_tools_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"android_sdk_dir",
",",
"'platform-tools'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"pl... | Crudely parse out the installed platform-tools version | [
"Crudely",
"parse",
"out",
"the",
"installed",
"platform",
"-",
"tools",
"version"
] | 586152c6ce2b6cde4d5a081d9711f9cb037a901c | https://github.com/kivy/buildozer/blob/586152c6ce2b6cde4d5a081d9711f9cb037a901c/buildozer/targets/android.py#L424-L454 | train | 223,792 |
kivy/buildozer | buildozer/targets/android.py | TargetAndroid._android_update_sdk | def _android_update_sdk(self, *sdkmanager_commands):
"""Update the tools and package-tools if possible"""
auto_accept_license = self.buildozer.config.getbooldefault(
'app', 'android.accept_sdk_license', False)
if auto_accept_license:
# `SIGPIPE` is not being reported somehow, but `EPIPE` is.
# This leads to a stderr "Broken pipe" message which is harmless,
# but doesn't look good on terminal, hence redirecting to /dev/null
yes_command = 'yes 2>/dev/null'
command = '{} | {} --licenses'.format(
yes_command, self.sdkmanager_path)
self.buildozer.cmd(command, cwd=self.android_sdk_dir)
self._sdkmanager(*sdkmanager_commands) | python | def _android_update_sdk(self, *sdkmanager_commands):
"""Update the tools and package-tools if possible"""
auto_accept_license = self.buildozer.config.getbooldefault(
'app', 'android.accept_sdk_license', False)
if auto_accept_license:
# `SIGPIPE` is not being reported somehow, but `EPIPE` is.
# This leads to a stderr "Broken pipe" message which is harmless,
# but doesn't look good on terminal, hence redirecting to /dev/null
yes_command = 'yes 2>/dev/null'
command = '{} | {} --licenses'.format(
yes_command, self.sdkmanager_path)
self.buildozer.cmd(command, cwd=self.android_sdk_dir)
self._sdkmanager(*sdkmanager_commands) | [
"def",
"_android_update_sdk",
"(",
"self",
",",
"*",
"sdkmanager_commands",
")",
":",
"auto_accept_license",
"=",
"self",
".",
"buildozer",
".",
"config",
".",
"getbooldefault",
"(",
"'app'",
",",
"'android.accept_sdk_license'",
",",
"False",
")",
"if",
"auto_acce... | Update the tools and package-tools if possible | [
"Update",
"the",
"tools",
"and",
"package",
"-",
"tools",
"if",
"possible"
] | 586152c6ce2b6cde4d5a081d9711f9cb037a901c | https://github.com/kivy/buildozer/blob/586152c6ce2b6cde4d5a081d9711f9cb037a901c/buildozer/targets/android.py#L457-L470 | train | 223,793 |
kivy/buildozer | buildozer/targets/android.py | TargetAndroid.cmd_logcat | def cmd_logcat(self, *args):
'''Show the log from the device
'''
self.check_requirements()
serial = self.serials[0:]
if not serial:
return
filters = self.buildozer.config.getrawdefault(
"app", "android.logcat_filters", "", section_sep=":", split_char=" ")
filters = " ".join(filters)
self.buildozer.environ['ANDROID_SERIAL'] = serial[0]
self.buildozer.cmd('{adb} logcat {filters}'.format(adb=self.adb_cmd,
filters=filters),
cwd=self.buildozer.global_platform_dir,
show_output=True)
self.buildozer.environ.pop('ANDROID_SERIAL', None) | python | def cmd_logcat(self, *args):
'''Show the log from the device
'''
self.check_requirements()
serial = self.serials[0:]
if not serial:
return
filters = self.buildozer.config.getrawdefault(
"app", "android.logcat_filters", "", section_sep=":", split_char=" ")
filters = " ".join(filters)
self.buildozer.environ['ANDROID_SERIAL'] = serial[0]
self.buildozer.cmd('{adb} logcat {filters}'.format(adb=self.adb_cmd,
filters=filters),
cwd=self.buildozer.global_platform_dir,
show_output=True)
self.buildozer.environ.pop('ANDROID_SERIAL', None) | [
"def",
"cmd_logcat",
"(",
"self",
",",
"*",
"args",
")",
":",
"self",
".",
"check_requirements",
"(",
")",
"serial",
"=",
"self",
".",
"serials",
"[",
"0",
":",
"]",
"if",
"not",
"serial",
":",
"return",
"filters",
"=",
"self",
".",
"buildozer",
".",... | Show the log from the device | [
"Show",
"the",
"log",
"from",
"the",
"device"
] | 586152c6ce2b6cde4d5a081d9711f9cb037a901c | https://github.com/kivy/buildozer/blob/586152c6ce2b6cde4d5a081d9711f9cb037a901c/buildozer/targets/android.py#L1203-L1218 | train | 223,794 |
kivy/buildozer | buildozer/target.py | Target.path_or_git_url | def path_or_git_url(self, repo, owner='kivy', branch='master',
url_format='https://github.com/{owner}/{repo}.git',
platform=None,
squash_hyphen=True):
"""Get source location for a git checkout
This method will check the `buildozer.spec` for the keys:
{repo}_dir
{repo}_url
{repo}_branch
and use them to determine the source location for a git checkout.
If a `platform` is specified, {platform}.{repo} will be used
as the base for the buildozer key
`{repo}_dir` specifies a custom checkout location
(relative to `buildozer.root_dir`). If present, `path` will be
set to this value and `url`, `branch` will be set to None,
None. Otherwise, `{repo}_url` and `{repo}_branch` will be
examined.
If no keys are present, the kwargs will be used to create
a sensible default URL and branch.
:Parameters:
`repo`: str (required)
name of repository to fetch. Used both for buildozer
keys ({platform}.{repo}_dir|_url|_branch) and in building
default git URL
`branch`: str (default 'master')
Specific branch to retrieve if none specified in
buildozer.spec.
`owner`: str
owner of repo.
`platform`: str or None
platform prefix to use when retrieving `buildozer.spec`
keys. If specified, key names will be {platform}.{repo}
instead of just {repo}
`squash_hyphen`: boolean
if True, change '-' to '_' when looking for
keys in buildozer.spec. This lets us keep backwards
compatibility with old buildozer.spec files
`url_format`: format string
Used to construct default git URL.
can use {repo} {owner} and {branch} if needed.
:Returns:
A Tuple (path, url, branch) where
`path`
Path to a custom git checkout. If specified,
both `url` and `branch` will be None
`url`
URL of git repository from where code should be
checked-out
`branch`
branch name (or tag) that should be used for the
check-out.
"""
if squash_hyphen:
key = repo.replace('-', '_')
else:
key = repo
if platform:
key = "{}.{}".format(platform, key)
config = self.buildozer.config
path = config.getdefault('app', '{}_dir'.format(key), None)
if path is not None:
path = join(self.buildozer.root_dir, path)
url = None
branch = None
else:
branch = config.getdefault('app', '{}_branch'.format(key), branch)
default_url = url_format.format(owner=owner, repo=repo, branch=branch)
url = config.getdefault('app', '{}_url'.format(key), default_url)
if branch != 'master':
url = "--branch {} {}".format(branch, url)
return path, url, branch | python | def path_or_git_url(self, repo, owner='kivy', branch='master',
url_format='https://github.com/{owner}/{repo}.git',
platform=None,
squash_hyphen=True):
"""Get source location for a git checkout
This method will check the `buildozer.spec` for the keys:
{repo}_dir
{repo}_url
{repo}_branch
and use them to determine the source location for a git checkout.
If a `platform` is specified, {platform}.{repo} will be used
as the base for the buildozer key
`{repo}_dir` specifies a custom checkout location
(relative to `buildozer.root_dir`). If present, `path` will be
set to this value and `url`, `branch` will be set to None,
None. Otherwise, `{repo}_url` and `{repo}_branch` will be
examined.
If no keys are present, the kwargs will be used to create
a sensible default URL and branch.
:Parameters:
`repo`: str (required)
name of repository to fetch. Used both for buildozer
keys ({platform}.{repo}_dir|_url|_branch) and in building
default git URL
`branch`: str (default 'master')
Specific branch to retrieve if none specified in
buildozer.spec.
`owner`: str
owner of repo.
`platform`: str or None
platform prefix to use when retrieving `buildozer.spec`
keys. If specified, key names will be {platform}.{repo}
instead of just {repo}
`squash_hyphen`: boolean
if True, change '-' to '_' when looking for
keys in buildozer.spec. This lets us keep backwards
compatibility with old buildozer.spec files
`url_format`: format string
Used to construct default git URL.
can use {repo} {owner} and {branch} if needed.
:Returns:
A Tuple (path, url, branch) where
`path`
Path to a custom git checkout. If specified,
both `url` and `branch` will be None
`url`
URL of git repository from where code should be
checked-out
`branch`
branch name (or tag) that should be used for the
check-out.
"""
if squash_hyphen:
key = repo.replace('-', '_')
else:
key = repo
if platform:
key = "{}.{}".format(platform, key)
config = self.buildozer.config
path = config.getdefault('app', '{}_dir'.format(key), None)
if path is not None:
path = join(self.buildozer.root_dir, path)
url = None
branch = None
else:
branch = config.getdefault('app', '{}_branch'.format(key), branch)
default_url = url_format.format(owner=owner, repo=repo, branch=branch)
url = config.getdefault('app', '{}_url'.format(key), default_url)
if branch != 'master':
url = "--branch {} {}".format(branch, url)
return path, url, branch | [
"def",
"path_or_git_url",
"(",
"self",
",",
"repo",
",",
"owner",
"=",
"'kivy'",
",",
"branch",
"=",
"'master'",
",",
"url_format",
"=",
"'https://github.com/{owner}/{repo}.git'",
",",
"platform",
"=",
"None",
",",
"squash_hyphen",
"=",
"True",
")",
":",
"if",... | Get source location for a git checkout
This method will check the `buildozer.spec` for the keys:
{repo}_dir
{repo}_url
{repo}_branch
and use them to determine the source location for a git checkout.
If a `platform` is specified, {platform}.{repo} will be used
as the base for the buildozer key
`{repo}_dir` specifies a custom checkout location
(relative to `buildozer.root_dir`). If present, `path` will be
set to this value and `url`, `branch` will be set to None,
None. Otherwise, `{repo}_url` and `{repo}_branch` will be
examined.
If no keys are present, the kwargs will be used to create
a sensible default URL and branch.
:Parameters:
`repo`: str (required)
name of repository to fetch. Used both for buildozer
keys ({platform}.{repo}_dir|_url|_branch) and in building
default git URL
`branch`: str (default 'master')
Specific branch to retrieve if none specified in
buildozer.spec.
`owner`: str
owner of repo.
`platform`: str or None
platform prefix to use when retrieving `buildozer.spec`
keys. If specified, key names will be {platform}.{repo}
instead of just {repo}
`squash_hyphen`: boolean
if True, change '-' to '_' when looking for
keys in buildozer.spec. This lets us keep backwards
compatibility with old buildozer.spec files
`url_format`: format string
Used to construct default git URL.
can use {repo} {owner} and {branch} if needed.
:Returns:
A Tuple (path, url, branch) where
`path`
Path to a custom git checkout. If specified,
both `url` and `branch` will be None
`url`
URL of git repository from where code should be
checked-out
`branch`
branch name (or tag) that should be used for the
check-out. | [
"Get",
"source",
"location",
"for",
"a",
"git",
"checkout"
] | 586152c6ce2b6cde4d5a081d9711f9cb037a901c | https://github.com/kivy/buildozer/blob/586152c6ce2b6cde4d5a081d9711f9cb037a901c/buildozer/target.py#L151-L230 | train | 223,795 |
kivy/buildozer | buildozer/target.py | Target.install_or_update_repo | def install_or_update_repo(self, repo, **kwargs):
"""Install or update a git repository into the platform directory.
This will clone the contents of a git repository to
`buildozer.platform_dir`. The location of this repo can be
speficied via URL and branch name, or via a custom (local)
directory name.
:Parameters:
**kwargs:
Any valid arguments for :meth:`path_or_git_url`
:Returns:
fully qualified path to updated git repo
"""
cmd = self.buildozer.cmd
install_dir = join(self.buildozer.platform_dir, repo)
custom_dir, clone_url, clone_branch = self.path_or_git_url(repo, **kwargs)
if not self.buildozer.file_exists(install_dir):
if custom_dir:
cmd('mkdir -p "{}"'.format(install_dir))
cmd('cp -a "{}"/* "{}"/'.format(custom_dir, install_dir))
else:
cmd('git clone {}'.format(clone_url),
cwd=self.buildozer.platform_dir)
elif self.platform_update:
if custom_dir:
cmd('cp -a "{}"/* "{}"/'.format(custom_dir, install_dir))
else:
cmd('git clean -dxf', cwd=install_dir)
cmd('git pull origin {}'.format(clone_branch), cwd=install_dir)
return install_dir | python | def install_or_update_repo(self, repo, **kwargs):
"""Install or update a git repository into the platform directory.
This will clone the contents of a git repository to
`buildozer.platform_dir`. The location of this repo can be
speficied via URL and branch name, or via a custom (local)
directory name.
:Parameters:
**kwargs:
Any valid arguments for :meth:`path_or_git_url`
:Returns:
fully qualified path to updated git repo
"""
cmd = self.buildozer.cmd
install_dir = join(self.buildozer.platform_dir, repo)
custom_dir, clone_url, clone_branch = self.path_or_git_url(repo, **kwargs)
if not self.buildozer.file_exists(install_dir):
if custom_dir:
cmd('mkdir -p "{}"'.format(install_dir))
cmd('cp -a "{}"/* "{}"/'.format(custom_dir, install_dir))
else:
cmd('git clone {}'.format(clone_url),
cwd=self.buildozer.platform_dir)
elif self.platform_update:
if custom_dir:
cmd('cp -a "{}"/* "{}"/'.format(custom_dir, install_dir))
else:
cmd('git clean -dxf', cwd=install_dir)
cmd('git pull origin {}'.format(clone_branch), cwd=install_dir)
return install_dir | [
"def",
"install_or_update_repo",
"(",
"self",
",",
"repo",
",",
"*",
"*",
"kwargs",
")",
":",
"cmd",
"=",
"self",
".",
"buildozer",
".",
"cmd",
"install_dir",
"=",
"join",
"(",
"self",
".",
"buildozer",
".",
"platform_dir",
",",
"repo",
")",
"custom_dir"... | Install or update a git repository into the platform directory.
This will clone the contents of a git repository to
`buildozer.platform_dir`. The location of this repo can be
speficied via URL and branch name, or via a custom (local)
directory name.
:Parameters:
**kwargs:
Any valid arguments for :meth:`path_or_git_url`
:Returns:
fully qualified path to updated git repo | [
"Install",
"or",
"update",
"a",
"git",
"repository",
"into",
"the",
"platform",
"directory",
"."
] | 586152c6ce2b6cde4d5a081d9711f9cb037a901c | https://github.com/kivy/buildozer/blob/586152c6ce2b6cde4d5a081d9711f9cb037a901c/buildozer/target.py#L232-L263 | train | 223,796 |
kivy/buildozer | buildozer/__init__.py | set_config_token_from_env | def set_config_token_from_env(section, token, config):
'''Given a config section and token, checks for an appropriate
environment variable. If the variable exists, sets the config entry to
its value.
The environment variable checked is of the form SECTION_TOKEN, all
upper case, with any dots replaced by underscores.
Returns True if the environment variable exists and was used, or
False otherwise.
'''
env_var_name = ''.join([section.upper(), '_',
token.upper().replace('.', '_')])
env_var = os.environ.get(env_var_name)
if env_var is None:
return False
config.set(section, token, env_var)
return True | python | def set_config_token_from_env(section, token, config):
'''Given a config section and token, checks for an appropriate
environment variable. If the variable exists, sets the config entry to
its value.
The environment variable checked is of the form SECTION_TOKEN, all
upper case, with any dots replaced by underscores.
Returns True if the environment variable exists and was used, or
False otherwise.
'''
env_var_name = ''.join([section.upper(), '_',
token.upper().replace('.', '_')])
env_var = os.environ.get(env_var_name)
if env_var is None:
return False
config.set(section, token, env_var)
return True | [
"def",
"set_config_token_from_env",
"(",
"section",
",",
"token",
",",
"config",
")",
":",
"env_var_name",
"=",
"''",
".",
"join",
"(",
"[",
"section",
".",
"upper",
"(",
")",
",",
"'_'",
",",
"token",
".",
"upper",
"(",
")",
".",
"replace",
"(",
"'.... | Given a config section and token, checks for an appropriate
environment variable. If the variable exists, sets the config entry to
its value.
The environment variable checked is of the form SECTION_TOKEN, all
upper case, with any dots replaced by underscores.
Returns True if the environment variable exists and was used, or
False otherwise. | [
"Given",
"a",
"config",
"section",
"and",
"token",
"checks",
"for",
"an",
"appropriate",
"environment",
"variable",
".",
"If",
"the",
"variable",
"exists",
"sets",
"the",
"config",
"entry",
"to",
"its",
"value",
"."
] | 586152c6ce2b6cde4d5a081d9711f9cb037a901c | https://github.com/kivy/buildozer/blob/586152c6ce2b6cde4d5a081d9711f9cb037a901c/buildozer/__init__.py#L1252-L1270 | train | 223,797 |
kivy/buildozer | buildozer/__init__.py | Buildozer.prepare_for_build | def prepare_for_build(self):
'''Prepare the build.
'''
assert(self.target is not None)
if hasattr(self.target, '_build_prepared'):
return
self.info('Preparing build')
self.info('Check requirements for {0}'.format(self.targetname))
self.target.check_requirements()
self.info('Install platform')
self.target.install_platform()
self.info('Check application requirements')
self.check_application_requirements()
self.info('Check garden requirements')
self.check_garden_requirements()
self.info('Compile platform')
self.target.compile_platform()
# flag to prevent multiple build
self.target._build_prepared = True | python | def prepare_for_build(self):
'''Prepare the build.
'''
assert(self.target is not None)
if hasattr(self.target, '_build_prepared'):
return
self.info('Preparing build')
self.info('Check requirements for {0}'.format(self.targetname))
self.target.check_requirements()
self.info('Install platform')
self.target.install_platform()
self.info('Check application requirements')
self.check_application_requirements()
self.info('Check garden requirements')
self.check_garden_requirements()
self.info('Compile platform')
self.target.compile_platform()
# flag to prevent multiple build
self.target._build_prepared = True | [
"def",
"prepare_for_build",
"(",
"self",
")",
":",
"assert",
"(",
"self",
".",
"target",
"is",
"not",
"None",
")",
"if",
"hasattr",
"(",
"self",
".",
"target",
",",
"'_build_prepared'",
")",
":",
"return",
"self",
".",
"info",
"(",
"'Preparing build'",
"... | Prepare the build. | [
"Prepare",
"the",
"build",
"."
] | 586152c6ce2b6cde4d5a081d9711f9cb037a901c | https://github.com/kivy/buildozer/blob/586152c6ce2b6cde4d5a081d9711f9cb037a901c/buildozer/__init__.py#L173-L198 | train | 223,798 |
kivy/buildozer | buildozer/__init__.py | Buildozer.build | def build(self):
'''Do the build.
The target can set build_mode to 'release' or 'debug' before calling
this method.
(:meth:`prepare_for_build` must have been call before.)
'''
assert(self.target is not None)
assert(hasattr(self.target, '_build_prepared'))
if hasattr(self.target, '_build_done'):
return
# increment the build number
self.build_id = int(self.state.get('cache.build_id', '0')) + 1
self.state['cache.build_id'] = str(self.build_id)
self.info('Build the application #{}'.format(self.build_id))
self.build_application()
self.info('Package the application')
self.target.build_package()
# flag to prevent multiple build
self.target._build_done = True | python | def build(self):
'''Do the build.
The target can set build_mode to 'release' or 'debug' before calling
this method.
(:meth:`prepare_for_build` must have been call before.)
'''
assert(self.target is not None)
assert(hasattr(self.target, '_build_prepared'))
if hasattr(self.target, '_build_done'):
return
# increment the build number
self.build_id = int(self.state.get('cache.build_id', '0')) + 1
self.state['cache.build_id'] = str(self.build_id)
self.info('Build the application #{}'.format(self.build_id))
self.build_application()
self.info('Package the application')
self.target.build_package()
# flag to prevent multiple build
self.target._build_done = True | [
"def",
"build",
"(",
"self",
")",
":",
"assert",
"(",
"self",
".",
"target",
"is",
"not",
"None",
")",
"assert",
"(",
"hasattr",
"(",
"self",
".",
"target",
",",
"'_build_prepared'",
")",
")",
"if",
"hasattr",
"(",
"self",
".",
"target",
",",
"'_buil... | Do the build.
The target can set build_mode to 'release' or 'debug' before calling
this method.
(:meth:`prepare_for_build` must have been call before.) | [
"Do",
"the",
"build",
"."
] | 586152c6ce2b6cde4d5a081d9711f9cb037a901c | https://github.com/kivy/buildozer/blob/586152c6ce2b6cde4d5a081d9711f9cb037a901c/buildozer/__init__.py#L200-L225 | train | 223,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.