code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import logging
import re
from caffe2.python import core as caffe2_core
from caffe2.python.compatibility import container_abcs
from caffe2.proto import caffe2_legacy_pb2
from enum import Enum
from onnx import (defs, checker, helper, numpy_helper, mapping,
ModelProto, GraphProto, NodeProto, AttributeProto, TensorProto, OperatorSetIdProto)
from onnx.helper import make_tensor, make_tensor_value_info, make_attribute, make_model
import numpy as np
from caffe2.python.onnx.helper import c2_native_run_net
from caffe2.python.onnx.error import Unsupported
import caffe2.python._import_c_extension as C
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Caffe2Frontend(object):
# This number controls the semantics of the operators we target. Whenever
# ONNX makes a BC breaking change to semantics of operators, having this set
# to an accurate number will prevent our models form exporting. However,
# we should strive to keep this up-to-date as much as possible.
target_opset_version = 9
_renamed_operators = {
'SpatialBN': 'BatchNormalization',
'Conv1D': 'Conv',
'Conv2D': 'Conv',
'Conv3D': 'Conv',
'ConvTranspose1D': 'ConvTranspose',
'ConvTranspose2D': 'ConvTranspose',
'ConvTranspose3D': 'ConvTranspose',
'MaxPool1D': 'MaxPool',
'MaxPool2D': 'MaxPool',
'MaxPool3D': 'MaxPool',
'AveragePool1D': 'AveragePool',
'AveragePool2D': 'AveragePool',
'AveragePool3D': 'AveragePool',
}
# caffe2 arguments that are completely removed in onnx
_blacklist_caffe2_args = {
'order': {b'NCHW'},
'cudnn_exhaustive_search': {0, 1},
'exhaustive_search': {0, 1},
'use_cudnn': {0, 1},
}
_global_renamed_args = {
'kernels': 'kernel_shape',
}
_per_op_renamed_args = {
'Squeeze': {'dims': 'axes'},
'Transpose': {'axes': 'perm'},
}
_special_operators = {}
# Dummy name generator
_dummy_name = C.DummyName()
@classmethod
def dummy_name(cls):
return cls._dummy_name.new_dummy_name()
@classmethod
def _common_caffe2_arg_to_onnx_attr(cls, op_def, arg):
# name
op_type = op_def.type
name = cls._global_renamed_args.get(arg.name, arg.name)
if op_type in cls._per_op_renamed_args:
# Per-op attribute renames override the global attribute renames
name = cls._per_op_renamed_args[op_type].get(arg.name, name)
# value
if arg.HasField('f'):
value = arg.f
elif arg.HasField('i'):
value = arg.i
elif arg.HasField('s'):
value = arg.s
elif arg.floats:
value = arg.floats
elif arg.ints:
value = arg.ints
elif arg.strings:
value = arg.strings
else:
raise ValueError('Could not find data field in arg: {}'.format(arg))
if name in cls._blacklist_caffe2_args:
assert value in cls._blacklist_caffe2_args[arg.name]
return None
return helper.make_attribute(name, value)
@classmethod
def caffe2_arg_to_onnx_attr(cls, op_def, arg):
return cls._common_caffe2_arg_to_onnx_attr(op_def, arg)
@classmethod
def _common_caffe2_op_to_onnx_node(cls, op_def, shapes):
node_def = NodeProto()
node_def.name = op_def.name
node_def.op_type = cls._renamed_operators.get(op_def.type, op_def.type)
node_def.input.extend(op_def.input)
node_def.output.extend(op_def.output)
attrs = filter(None, [cls.caffe2_arg_to_onnx_attr(op_def, arg)
for arg in op_def.arg])
node_def.attribute.extend(attrs)
return node_def
@classmethod
def caffe2_op_to_onnx_node(cls, op_def, shapes):
if C.support_onnx_export(op_def.type):
node_strs, tensor_strs = C.export_to_onnx(cls._dummy_name, op_def.SerializeToString(), shapes)
nodes = []
for s in node_strs:
node = NodeProto()
node.ParseFromString(s)
nodes.append(node)
const_tensors = []
for s in tensor_strs:
tensor = TensorProto()
tensor.ParseFromString(s)
const_tensors.append(tensor)
return nodes, const_tensors
elif op_def.type in cls._special_operators:
translator = getattr(cls, cls._special_operators[op_def.type])
else:
translator = cls._common_caffe2_op_to_onnx_node
nodes = translator(op_def, shapes)
const_tensors = []
if isinstance(nodes, tuple):
nodes, const_tensors = nodes
if not isinstance(nodes, container_abcs.Iterable):
nodes = [nodes]
return nodes, const_tensors
@staticmethod
def _all_names_in_net(net):
if net is None:
return set()
names = set()
names.update(net.external_input)
names.update(net.external_output)
for op in net.op:
names.update(op.input)
names.update(op.output)
return names
@staticmethod
def _extract_value_info(tensor):
return make_tensor_value_info(
name=tensor.name,
elem_type=tensor.data_type,
shape=tensor.dims)
@classmethod
def caffe2_net_to_onnx_graph(cls,
predict_net,
init_net=None,
value_info=None):
if value_info is None:
value_info = {}
if not isinstance(value_info, dict):
raise ValueError('Please pass value_info as a '
'name -> (type, shape) dictionary')
cls._filter_fake_init(init_net, value_info)
cls._ssa_rewrite(predict_net, init_net, value_info)
if init_net:
initializer = cls.caffe2_init_net_to_initializer(init_net)
value_info.update({init.name: (init.data_type, init.dims)
for init in initializer})
else:
initializer = []
# Check if value_info contains the types/shapes of all the blobs, in
# which case we don't need to infer them by running the net.
run_native_net = False
for op in predict_net.op:
for name in itertools.chain(op.input, op.output):
if name not in value_info:
run_native_net = True
break
# Check whether we have got type shape info of all input
missing = (set(list(predict_net.external_input)) -
set(value_info.keys()))
if missing:
raise RuntimeError('Could not find value info of inputs: {}'.format(
', '.join(missing)))
ws = None
outputs = None
if run_native_net:
inputs = {}
for name in predict_net.external_input:
elem_type, shape = value_info[name]
inputs[name] = np.random.randn(*shape).astype(
mapping.TENSOR_TYPE_TO_NP_TYPE[elem_type])
ws, outputs = c2_native_run_net(
init_net,
predict_net,
inputs)
for name in predict_net.external_output:
output = outputs[name]
elem_type = mapping.NP_TYPE_TO_TENSOR_TYPE[output.dtype]
shape = output.shape
value_info[name] = (elem_type, shape)
graph_def = GraphProto()
graph_def.name = predict_net.name
graph_def.initializer.extend(initializer)
# This is a mapping from Caffe2 names to ONNX names
graph_def.input.extend(
make_tensor_value_info(
name=name,
elem_type=value_info[name][0],
shape=value_info[name][1])
for name in predict_net.external_input)
cls._dummy_name.reset(cls._all_names_in_net(predict_net) | cls._all_names_in_net(init_net))
for op in predict_net.op:
shapes = {}
for name in itertools.chain(op.input, op.output):
if ws:
blob = ws.FetchBlob(name)
if hasattr(blob, 'shape'):
shapes[name] = blob.shape
else:
shapes[name] = value_info[name][1]
nodes, const_tensors = cls.caffe2_op_to_onnx_node(op, shapes=shapes)
graph_def.node.extend(nodes)
graph_def.initializer.extend(const_tensors)
graph_def.input.extend([cls._extract_value_info(tensor) for tensor in const_tensors])
all_output = set(sum((list(node.output) for node in graph_def.node),
[init.name for init in graph_def.initializer]))
redundant_output = set(vi.name for vi in graph_def.output) - all_output
if redundant_output:
logger.warning(
'There are graph output not produced by any node or initializer: {}'
'! Will drop them.'.format(', '.join(redundant_output)))
graph_def.output.extend(
make_tensor_value_info(
name=name,
elem_type=value_info[name][0],
shape=value_info[name][1])
for name in predict_net.external_output
if name in all_output)
return graph_def
@classmethod
def caffe2_init_net_to_initializer(cls, init_net):
ws, _ = c2_native_run_net(init_net=None, predict_net=init_net, inputs=[])
output_names = []
for op in init_net.op:
output_names.extend(op.output)
initializer = [numpy_helper.from_array(ws.FetchBlob(name), name=name)
for name in sorted(set(output_names))]
return initializer
@classmethod
def _filter_fake_init(cls, init_net, value_info):
if init_net:
fake_inits = [op for op in init_net.op
if len(op.output) == 1 and op.output[0] in value_info and
re.match('GivenTensor.*Fill|ConstantFill', op.type)]
for fake_init in fake_inits:
init_net.op.remove(fake_init)
del fake_inits[:]
del fake_inits
@classmethod
def ssa_rewrite(cls, net, init_net, value_info):
return cls._ssa_rewrite(net, init_net, value_info)
@classmethod
def _ssa_rewrite(cls, net, init_net, value_info):
def ssa_name(name, version, version_cnt=None):
if version == 0:
return name
if version_cnt and len(version_cnt.get(name, {})) <= 1:
return name
return '{}_{}'.format(name, version)
if init_net:
for op in init_net.op:
assert re.match('GivenTensor.*Fill', op.type), "type is {}, \n{}".format(op.type, op)
assert len(op.output) == 1
ssa, blob_versions = caffe2_core.get_ssa(net)
version_cnt = {}
versioned_blobs = []
for versioned_input, versioned_output in ssa:
versioned_blobs += versioned_input
versioned_blobs += versioned_output
for (name, version) in versioned_blobs:
if name not in version_cnt:
version_cnt[name] = {version}
else:
version_cnt[name].add(version)
assert len(net.op) == len(ssa)
for op, (versioned_inputs, versioned_outputs) in zip(net.op, ssa):
op.input[:] = [ssa_name(name, version, version_cnt)
for name, version in versioned_inputs]
op.output[:] = [ssa_name(name, version, version_cnt)
for name, version in versioned_outputs]
net.external_output[:] = [ssa_name(name, blob_versions[name], version_cnt)
for name in net.external_output]
@classmethod
def caffe2_net_to_onnx_model(cls, *args, **kwargs):
opset_id = OperatorSetIdProto()
opset_id.domain = '' # ONNX default domain
opset_id.version = cls.target_opset_version
model = make_model(cls.caffe2_net_to_onnx_graph(*args, **kwargs),
opset_imports=[opset_id], # current supported opset version
producer_name='onnx-caffe2', # producer name
)
checker.check_model(model)
return model
caffe2_net_to_onnx_graph = Caffe2Frontend.caffe2_net_to_onnx_graph
caffe2_net_to_onnx_model = Caffe2Frontend.caffe2_net_to_onnx_model
caffe2_init_net_to_initializer = Caffe2Frontend.caffe2_init_net_to_initializer
ssa_rewrite = Caffe2Frontend.ssa_rewrite | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/onnx/frontend.py | 0.803019 | 0.217462 | frontend.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
class Parser(object):
# List of tuples (regex_str, lambda(regex_match, formatter))
# If a lambda returns True it will be called repeatedly with replacement
# otherwise it will only be called on text that hasn't been parsed yet.
regexes = [
# Code blocks of various formats
('````(.+?)````',
lambda m, f: f.addCode(m.group(1))
),
('```(.+?)```',
lambda m, f: f.addCode(m.group(1))
),
(r'((( {2})+)(\S.*)(\n\s*\n|\n))+',
lambda m, f: f.addCode(m.group(0))
),
(r'([^\.])\n',
lambda m, f: f.addRaw('{c} '.format(c=m.group(1))) or True
),
('`(.+?)`',
lambda m, f: f.addCode(m.group(1), True)
),
# Make links clickable
('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]'
r'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
lambda m, f: f.addLink(m.group(0), m.group(0))
),
(r'\*\*(.+?)\*\*',
lambda m, f: f.addEmphasis(m.group(1), 2)
),
(r'\*(.+?)\*',
lambda m, f: f.addEmphasis(m.group(1), 1)
),
]
def __init__(self, text, formatter):
self.text = text
self.lines = []
self.formatter = formatter
def parseText(self):
UNPARSED = 0
PARSED = 1
parsed_block = [(UNPARSED, self.text)]
for regex, func in self.regexes:
index = 0
while index < len(parsed_block):
label, text = parsed_block[index]
# Already been parsed
if (label == PARSED):
index += 1
continue
match = re.search(regex, text)
if match:
parsed_block.pop(index)
start = match.start(0)
end = match.end(0)
f = self.formatter.clone()
merge = func(match, f)
if merge:
merged = text[:start] + f.dump() + text[end:]
parsed_block.insert(index, (UNPARSED, merged))
else:
if text[:start]:
parsed_block.insert(index,
(UNPARSED, text[:start]))
index += 1
parsed_block.insert(index, (PARSED, f.dump()))
index += 1
if text[end:]:
parsed_block.insert(index,
(UNPARSED, text[end:]))
else:
index += 1
self.lines += [i for _, i in parsed_block]
self.text = ' '.join(self.lines)
def parse(self):
self.parseText()
return self.text | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/docs/parser.py | 0.565059 | 0.465448 | parser.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.modeling.net_modifier import NetModifier
import numpy as np
class GetEntryFromBlobs(NetModifier):
"""
This class modifies the net passed in by adding ops to get a certain entry
from certain blobs.
Args:
blobs: list of blobs to get entry from
logging_frequency: frequency for printing entry values to logs
i1, i2: the first, second dimension of the blob. (currently, we assume
the blobs to be 2-dimensional blobs). When i2 = -1, print all entries
in blob[i1]
"""
def __init__(self, blobs, logging_frequency, i1=0, i2=0):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._i1 = i1
self._i2 = i2
self._field_name_suffix = '_{0}_{1}'.format(i1, i2) if i2 >= 0 \
else '_{0}_all'.format(i1)
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
i1, i2 = [self._i1, self._i2]
if i1 < 0:
raise ValueError('index is out of range')
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
blob_i1 = net.Slice([blob], starts=[i1, 0], ends=[i1 + 1, -1])
if self._i2 == -1:
blob_i1_i2 = net.Copy([blob_i1],
[net.NextScopedBlob(prefix=blob + '_{0}_all'.format(i1))])
else:
blob_i1_i2 = net.Slice([blob_i1],
net.NextScopedBlob(prefix=blob + '_{0}_{1}'.format(i1, i2)),
starts=[0, i2], ends=[-1, i2 + 1])
if self._logging_frequency >= 1:
net.Print(blob_i1_i2, [], every_n=self._logging_frequency)
if modify_output_record:
output_field_name = str(blob) + self._field_name_suffix
output_scalar = schema.Scalar((np.float), blob_i1_i2)
if net.output_record() is None:
net.set_output_record(
schema.Struct((output_field_name, output_scalar))
)
else:
net.AppendOutputRecordField(output_field_name, output_scalar)
def field_name_suffix(self):
return self._field_name_suffix | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/modeling/get_entry_from_blobs.py | 0.772745 | 0.222658 | get_entry_from_blobs.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.modeling.net_modifier import NetModifier
import numpy as np
class ComputeHistogramForBlobs(NetModifier):
"""
This class modifies the net passed in by adding ops to compute histogram for
certain blobs.
Args:
blobs: list of blobs to compute histogram for
logging_frequency: frequency for printing
lower_bound: left boundary of histogram values
upper_bound: right boundary of histogram values
num_buckets: number of buckets to use in [lower_bound, upper_bound)
accumulate: boolean to output accumulate or per-batch histogram
"""
def __init__(self, blobs, logging_frequency, num_buckets=30,
lower_bound=0.0, upper_bound=1.0, accumulate=False):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._accumulate = accumulate
if self._accumulate:
self._field_name_suffix = '_acc_normalized_hist'
else:
self._field_name_suffix = '_curr_normalized_hist'
self._num_buckets = int(num_buckets)
assert self._num_buckets > 0, (
"num_buckets need to be greater than 0, got {}".format(num_buckets))
self._lower_bound = float(lower_bound)
self._upper_bound = float(upper_bound)
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
blob_float = net.Cast(blob, net.NextScopedBlob(prefix=blob +
'_float'), to=core.DataType.FLOAT)
curr_hist, acc_hist = net.AccumulateHistogram(
[blob_float],
[net.NextScopedBlob(prefix=blob + '_curr_hist'),
net.NextScopedBlob(prefix=blob + '_acc_hist')],
num_buckets=self._num_buckets,
lower_bound=self._lower_bound,
upper_bound=self._upper_bound)
if self._accumulate:
hist = net.Cast(
acc_hist,
net.NextScopedBlob(prefix=blob + '_cast_hist'),
to=core.DataType.FLOAT)
else:
hist = net.Cast(
curr_hist,
net.NextScopedBlob(prefix=blob + '_cast_hist'),
to=core.DataType.FLOAT)
normalized_hist = net.NormalizeL1(
hist,
net.NextScopedBlob(prefix=blob + self._field_name_suffix)
)
if self._logging_frequency >= 1:
net.Print(normalized_hist, [], every_n=self._logging_frequency)
if modify_output_record:
output_field_name = str(blob) + self._field_name_suffix
output_scalar = schema.Scalar((np.float32, (self._num_buckets + 2,)),
normalized_hist)
if net.output_record() is None:
net.set_output_record(
schema.Struct((output_field_name, output_scalar))
)
else:
net.AppendOutputRecordField(
output_field_name,
output_scalar)
def field_name_suffix(self):
return self._field_name_suffix | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/modeling/compute_histogram_for_blobs.py | 0.8398 | 0.325092 | compute_histogram_for_blobs.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.modeling.net_modifier import NetModifier
import numpy as np
class ComputeStatisticsForBlobs(NetModifier):
"""
This class modifies the net passed in by adding ops to compute statistics
for certain blobs. For each blob in the list, its min, max, mean and standard
deviation will be computed.
Args:
blobs: list of blobs to compute norm for
logging_frequency: frequency for printing norms to logs
"""
def __init__(self, blobs, logging_frequency):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._field_name_suffix = '_summary'
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
cast_blob = net.Cast(blob, to=core.DataType.FLOAT)
stats_name = net.NextScopedBlob(prefix=blob + self._field_name_suffix)
stats = net.Summarize(cast_blob, stats_name, to_file=0)
net.Print(stats, [], every_n=self._logging_frequency)
if modify_output_record:
output_field_name = str(blob) + self._field_name_suffix
output_scalar = schema.Scalar((np.float, (1,)), stats)
if net.output_record() is None:
net.set_output_record(
schema.Struct((output_field_name, output_scalar))
)
else:
net.AppendOutputRecordField(
output_field_name,
output_scalar)
def field_name_suffix(self):
return self._field_name_suffix | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/modeling/compute_statistics_for_blobs.py | 0.851891 | 0.179171 | compute_statistics_for_blobs.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from caffe2.proto import caffe2_pb2
from caffe2.python.optimizer import get_param_device
from caffe2.python.modeling.net_modifier import NetModifier
import logging
logger = logging.getLogger(__name__)
class GradientClipping(NetModifier):
L1_NORM = 'l1_norm'
L2_NORM = 'l2_norm'
BY_NORM = 'by_norm'
BY_VALUE = 'by_value'
GRAD_CLIP_METHODS = [BY_NORM, BY_VALUE]
CLIP_GRADIENT_NORM_TYPES = [L2_NORM, L1_NORM]
def __init__(self, grad_clip_method, clip_norm_type='l2_norm',
clip_threshold=0.1, use_parameter_norm=False,
compute_norm_ratio=False, clip_max=1, clip_min=-1,
blobs_to_include=None, blobs_to_exclude=None):
"""
Clips gradient to avoid gradient magnitude explosion or vanishing gradient.
Args:
grad_clip_method: ways to clip the gradients
clip_norm_type: type of norm used in the necessary computation
clip_threshold: threshold used to determine whether to clip
use_parameter_norm: a boolean to indicate whether to incorporate
the norm of the parameter
compute_norm_ratio: a boolean to compute the ratio between gradient norm
and parameter norm explicitly for debugging purpose
clip_max: when clipping by_value, any value that is greater than
clip_max will be clipped to clip_max
clip_min: when clipping by_value, any value that is smaller than
clip_min will be clipped to clip_min
blobs_to_include: names of blobs whose gradient is to be clipped. If it is set
to none, all param 's gradient in grad_map will be clipped.
blobs_to_exclude: names of blobs whose gradient is not to be clipped.
"""
assert grad_clip_method in self.GRAD_CLIP_METHODS, (
"This method of clipping, {}, has not been implemented.".format(
clip_norm_type))
if clip_norm_type is not None:
assert clip_norm_type in self.CLIP_GRADIENT_NORM_TYPES, (
"This method of clipping, {}, has not been implemented.".format(
clip_norm_type))
self.grad_clip_method = grad_clip_method
self.clip_norm_type = clip_norm_type
self.clip_threshold = float(clip_threshold)
self.use_parameter_norm = use_parameter_norm
self.compute_norm_ratio = compute_norm_ratio
self.clip_max = float(clip_max)
self.clip_min = float(clip_min)
self.blobs_to_include = blobs_to_include
self.blobs_to_exclude = blobs_to_exclude
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
assert grad_map is not None
CPU = core.DeviceOption(caffe2_pb2.CPU)
final_param_map = {}
if self.blobs_to_include is None:
final_param_map = grad_map
else:
for blob in self.blobs_to_include:
param = core.BlobReference(blob)
if not net.BlobIsDefined(param):
raise Exception('param {0} is not defined in net {1}'.format(
param, net.Name()))
final_param_map[param] = grad_map[param]
if self.blobs_to_exclude is not None:
for blob in self.blobs_to_exclude:
final_param_map.pop(blob, None)
for param, grad in final_param_map.items():
# currently sparse gradients won't be clipped
# further implementation is needed to enable it
if isinstance(grad, core.GradientSlice):
continue
device = get_param_device(
param,
grad_map[str(param)],
param_to_device=blob_to_device,
default_device=CPU,
)
with core.DeviceScope(device):
if self.grad_clip_method == self.BY_NORM:
if self.clip_norm_type == self.L2_NORM:
p = 2
elif self.clip_norm_type == self.L1_NORM:
p = 1
grad_norm = net.LpNorm(
[grad],
net.NextScopedBlob(prefix=str(grad) + '_l{}_norm'.format(p)),
p=p,
)
if p == 2:
grad_norm = net.Pow([grad_norm], exponent=0.5)
op_inputs = [grad, grad_norm]
if self.use_parameter_norm:
param_norm = net.LpNorm(
[param],
net.NextScopedBlob(
prefix=str(param) + '_l{}_norm'.format(p)),
p=p,
)
if p == 2:
param_norm = net.Pow([param_norm], exponent=0.5)
op_inputs.append(param_norm)
if self.compute_norm_ratio:
net.Div(
[grad_norm, param_norm],
[net.NextScopedBlob(
prefix=str(param) + "_norm_ratio")]
)
net.ClipTensorByScaling(
op_inputs,
[grad],
threshold=self.clip_threshold,
)
elif self.grad_clip_method == self.BY_VALUE:
net.Clip(
[grad],
[grad],
max=self.clip_max,
min=self.clip_min,
) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/modeling/gradient_clipping.py | 0.883826 | 0.250844 | gradient_clipping.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import numpy as np
import time
import os
from caffe2.python import core, workspace, experiment_util, data_parallel_model
from caffe2.python import dyndep, optimizer
from caffe2.python import timeout_guard, model_helper, brew
from caffe2.proto import caffe2_pb2
import caffe2.python.models.resnet as resnet
import caffe2.python.models.shufflenet as shufflenet
from caffe2.python.modeling.initializers import Initializer, PseudoFP16Initializer
import caffe2.python.predictor.predictor_exporter as pred_exp
import caffe2.python.predictor.predictor_py_utils as pred_utils
from caffe2.python.predictor_constants import predictor_constants as predictor_constants
'''
Parallelized multi-GPU distributed trainer for Resne(X)t & Shufflenet.
Can be used to train on imagenet data, for example.
The default parameters can train a standard Resnet-50 (1x64d), and parameters
can be provided to train ResNe(X)t models (e.g., ResNeXt-101 32x4d).
To run the trainer in single-machine multi-gpu mode by setting num_shards = 1.
To run the trainer in multi-machine multi-gpu mode with M machines,
run the same program on all machines, specifying num_shards = M, and
shard_id = a unique integer in the set [0, M-1].
For rendezvous (the trainer processes have to know about each other),
you can either use a directory path that is visible to all processes
(e.g. NFS directory), or use a Redis instance. Use the former by
passing the `file_store_path` argument. Use the latter by passing the
`redis_host` and `redis_port` arguments.
'''
logging.basicConfig()
log = logging.getLogger("Imagenet_trainer")
log.setLevel(logging.DEBUG)
dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:file_store_handler_ops')
dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:redis_store_handler_ops')
def AddImageInput(
model,
reader,
batch_size,
img_size,
dtype,
is_test,
mean_per_channel=None,
std_per_channel=None,
):
'''
The image input operator loads image and label data from the reader and
applies transformations to the images (random cropping, mirroring, ...).
'''
data, label = brew.image_input(
model,
reader, ["data", "label"],
batch_size=batch_size,
output_type=dtype,
use_gpu_transform=True if core.IsGPUDeviceType(model._device_type) else False,
use_caffe_datum=True,
mean_per_channel=mean_per_channel,
std_per_channel=std_per_channel,
# mean_per_channel takes precedence over mean
mean=128.,
std=128.,
scale=256,
crop=img_size,
mirror=1,
is_test=is_test,
)
data = model.StopGradient(data, data)
def AddNullInput(model, reader, batch_size, img_size, dtype):
'''
The null input function uses a gaussian fill operator to emulate real image
input. A label blob is hardcoded to a single value. This is useful if you
want to test compute throughput or don't have a dataset available.
'''
suffix = "_fp16" if dtype == "float16" else ""
model.param_init_net.GaussianFill(
[],
["data" + suffix],
shape=[batch_size, 3, img_size, img_size],
)
if dtype == "float16":
model.param_init_net.FloatToHalf("data" + suffix, "data")
model.param_init_net.ConstantFill(
[],
["label"],
shape=[batch_size],
value=1,
dtype=core.DataType.INT32,
)
def SaveModel(args, train_model, epoch, use_ideep):
prefix = "[]_{}".format(train_model._device_prefix, train_model._devices[0])
predictor_export_meta = pred_exp.PredictorExportMeta(
predict_net=train_model.net.Proto(),
parameters=data_parallel_model.GetCheckpointParams(train_model),
inputs=[prefix + "/data"],
outputs=[prefix + "/softmax"],
shapes={
prefix + "/softmax": (1, args.num_labels),
prefix + "/data": (args.num_channels, args.image_size, args.image_size)
}
)
# save the train_model for the current epoch
model_path = "%s/%s_%d.mdl" % (
args.file_store_path,
args.save_model_name,
epoch,
)
# set db_type to be "minidb" instead of "log_file_db", which breaks
# the serialization in save_to_db. Need to switch back to log_file_db
# after migration
pred_exp.save_to_db(
db_type="minidb",
db_destination=model_path,
predictor_export_meta=predictor_export_meta,
use_ideep=use_ideep
)
def LoadModel(path, model, use_ideep):
'''
Load pretrained model from file
'''
log.info("Loading path: {}".format(path))
meta_net_def = pred_exp.load_from_db(path, 'minidb')
init_net = core.Net(pred_utils.GetNet(
meta_net_def, predictor_constants.GLOBAL_INIT_NET_TYPE))
predict_init_net = core.Net(pred_utils.GetNet(
meta_net_def, predictor_constants.PREDICT_INIT_NET_TYPE))
if use_ideep:
predict_init_net.RunAllOnIDEEP()
else:
predict_init_net.RunAllOnGPU()
if use_ideep:
init_net.RunAllOnIDEEP()
else:
init_net.RunAllOnGPU()
assert workspace.RunNetOnce(predict_init_net)
assert workspace.RunNetOnce(init_net)
# Hack: fix iteration counter which is in CUDA context after load model
itercnt = workspace.FetchBlob("optimizer_iteration")
workspace.FeedBlob(
"optimizer_iteration",
itercnt,
device_option=core.DeviceOption(caffe2_pb2.CPU, 0)
)
def RunEpoch(
args,
epoch,
train_model,
test_model,
total_batch_size,
num_shards,
expname,
explog,
):
'''
Run one epoch of the trainer.
TODO: add checkpointing here.
'''
# TODO: add loading from checkpoint
log.info("Starting epoch {}/{}".format(epoch, args.num_epochs))
epoch_iters = int(args.epoch_size / total_batch_size / num_shards)
test_epoch_iters = int(args.test_epoch_size / total_batch_size / num_shards)
for i in range(epoch_iters):
# This timeout is required (temporarily) since CUDA-NCCL
# operators might deadlock when synchronizing between GPUs.
timeout = args.first_iter_timeout if i == 0 else args.timeout
with timeout_guard.CompleteInTimeOrDie(timeout):
t1 = time.time()
workspace.RunNet(train_model.net.Proto().name)
t2 = time.time()
dt = t2 - t1
fmt = "Finished iteration {}/{} of epoch {} ({:.2f} images/sec)"
log.info(fmt.format(i + 1, epoch_iters, epoch, total_batch_size / dt))
prefix = "{}_{}".format(
train_model._device_prefix,
train_model._devices[0])
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
train_fmt = "Training loss: {}, accuracy: {}"
log.info(train_fmt.format(loss, accuracy))
num_images = epoch * epoch_iters * total_batch_size
prefix = "{}_{}".format(train_model._device_prefix, train_model._devices[0])
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
learning_rate = workspace.FetchBlob(
data_parallel_model.GetLearningRateBlobNames(train_model)[0]
)
test_accuracy = 0
test_accuracy_top5 = 0
if test_model is not None:
# Run 100 iters of testing
ntests = 0
for _ in range(test_epoch_iters):
workspace.RunNet(test_model.net.Proto().name)
for g in test_model._devices:
test_accuracy += np.asscalar(workspace.FetchBlob(
"{}_{}".format(test_model._device_prefix, g) + '/accuracy'
))
test_accuracy_top5 += np.asscalar(workspace.FetchBlob(
"{}_{}".format(test_model._device_prefix, g) + '/accuracy_top5'
))
ntests += 1
test_accuracy /= ntests
test_accuracy_top5 /= ntests
else:
test_accuracy = (-1)
test_accuracy_top5 = (-1)
explog.log(
input_count=num_images,
batch_count=(i + epoch * epoch_iters),
additional_values={
'accuracy': accuracy,
'loss': loss,
'learning_rate': learning_rate,
'epoch': epoch,
'top1_test_accuracy': test_accuracy,
'top5_test_accuracy': test_accuracy_top5,
}
)
assert loss < 40, "Exploded gradients :("
# TODO: add checkpointing
return epoch + 1
def Train(args):
if args.model == "resnext":
model_name = "resnext" + str(args.num_layers)
elif args.model == "shufflenet":
model_name = "shufflenet"
# Either use specified device list or generate one
if args.gpus is not None:
gpus = [int(x) for x in args.gpus.split(',')]
num_gpus = len(gpus)
else:
gpus = list(range(args.num_gpus))
num_gpus = args.num_gpus
log.info("Running on GPUs: {}".format(gpus))
# Verify valid batch size
total_batch_size = args.batch_size
batch_per_device = total_batch_size // num_gpus
assert \
total_batch_size % num_gpus == 0, \
"Number of GPUs must divide batch size"
# Verify valid image mean/std per channel
if args.image_mean_per_channel:
assert \
len(args.image_mean_per_channel) == args.num_channels, \
"The number of channels of image mean doesn't match input"
if args.image_std_per_channel:
assert \
len(args.image_std_per_channel) == args.num_channels, \
"The number of channels of image std doesn't match input"
# Round down epoch size to closest multiple of batch size across machines
global_batch_size = total_batch_size * args.num_shards
epoch_iters = int(args.epoch_size / global_batch_size)
assert \
epoch_iters > 0, \
"Epoch size must be larger than batch size times shard count"
args.epoch_size = epoch_iters * global_batch_size
log.info("Using epoch size: {}".format(args.epoch_size))
# Create ModelHelper object
if args.use_ideep:
train_arg_scope = {
'use_cudnn': False,
'cudnn_exhaustive_search': False,
'training_mode': 1
}
else:
train_arg_scope = {
'order': 'NCHW',
'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': (args.cudnn_workspace_limit_mb * 1024 * 1024),
}
train_model = model_helper.ModelHelper(
name=model_name, arg_scope=train_arg_scope
)
num_shards = args.num_shards
shard_id = args.shard_id
# Expect interfaces to be comma separated.
# Use of multiple network interfaces is not yet complete,
# so simply use the first one in the list.
interfaces = args.distributed_interfaces.split(",")
# Rendezvous using MPI when run with mpirun
if os.getenv("OMPI_COMM_WORLD_SIZE") is not None:
num_shards = int(os.getenv("OMPI_COMM_WORLD_SIZE", 1))
shard_id = int(os.getenv("OMPI_COMM_WORLD_RANK", 0))
if num_shards > 1:
rendezvous = dict(
kv_handler=None,
num_shards=num_shards,
shard_id=shard_id,
engine="GLOO",
transport=args.distributed_transport,
interface=interfaces[0],
mpi_rendezvous=True,
exit_nets=None)
elif num_shards > 1:
# Create rendezvous for distributed computation
store_handler = "store_handler"
if args.redis_host is not None:
# Use Redis for rendezvous if Redis host is specified
workspace.RunOperatorOnce(
core.CreateOperator(
"RedisStoreHandlerCreate", [], [store_handler],
host=args.redis_host,
port=args.redis_port,
prefix=args.run_id,
)
)
else:
# Use filesystem for rendezvous otherwise
workspace.RunOperatorOnce(
core.CreateOperator(
"FileStoreHandlerCreate", [], [store_handler],
path=args.file_store_path,
prefix=args.run_id,
)
)
rendezvous = dict(
kv_handler=store_handler,
shard_id=shard_id,
num_shards=num_shards,
engine="GLOO",
transport=args.distributed_transport,
interface=interfaces[0],
exit_nets=None)
else:
rendezvous = None
# Model building functions
def create_resnext_model_ops(model, loss_scale):
initializer = (PseudoFP16Initializer if args.dtype == 'float16'
else Initializer)
with brew.arg_scope([brew.conv, brew.fc],
WeightInitializer=initializer,
BiasInitializer=initializer,
enable_tensor_core=args.enable_tensor_core,
float16_compute=args.float16_compute):
pred = resnet.create_resnext(
model,
"data",
num_input_channels=args.num_channels,
num_labels=args.num_labels,
num_layers=args.num_layers,
num_groups=args.resnext_num_groups,
num_width_per_group=args.resnext_width_per_group,
no_bias=True,
no_loss=True,
)
if args.dtype == 'float16':
pred = model.net.HalfToFloat(pred, pred + '_fp32')
softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
['softmax', 'loss'])
loss = model.Scale(loss, scale=loss_scale)
brew.accuracy(model, [softmax, "label"], "accuracy", top_k=1)
brew.accuracy(model, [softmax, "label"], "accuracy_top5", top_k=5)
return [loss]
def create_shufflenet_model_ops(model, loss_scale):
initializer = (PseudoFP16Initializer if args.dtype == 'float16'
else Initializer)
with brew.arg_scope([brew.conv, brew.fc],
WeightInitializer=initializer,
BiasInitializer=initializer,
enable_tensor_core=args.enable_tensor_core,
float16_compute=args.float16_compute):
pred = shufflenet.create_shufflenet(
model,
"data",
num_input_channels=args.num_channels,
num_labels=args.num_labels,
no_loss=True,
)
if args.dtype == 'float16':
pred = model.net.HalfToFloat(pred, pred + '_fp32')
softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
['softmax', 'loss'])
loss = model.Scale(loss, scale=loss_scale)
brew.accuracy(model, [softmax, "label"], "accuracy", top_k=1)
brew.accuracy(model, [softmax, "label"], "accuracy_top5", top_k=5)
return [loss]
def add_optimizer(model):
stepsz = int(30 * args.epoch_size / total_batch_size / num_shards)
if args.float16_compute:
# TODO: merge with multi-precision optimizer
opt = optimizer.build_fp16_sgd(
model,
args.base_learning_rate,
momentum=0.9,
nesterov=1,
weight_decay=args.weight_decay, # weight decay included
policy="step",
stepsize=stepsz,
gamma=0.1
)
else:
optimizer.add_weight_decay(model, args.weight_decay)
opt = optimizer.build_multi_precision_sgd(
model,
args.base_learning_rate,
momentum=0.9,
nesterov=1,
policy="step",
stepsize=stepsz,
gamma=0.1
)
return opt
# Define add_image_input function.
# Depends on the "train_data" argument.
# Note that the reader will be shared with between all GPUS.
if args.train_data == "null":
def add_image_input(model):
AddNullInput(
model,
None,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
)
else:
reader = train_model.CreateDB(
"reader",
db=args.train_data,
db_type=args.db_type,
num_shards=num_shards,
shard_id=shard_id,
)
def add_image_input(model):
AddImageInput(
model,
reader,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
is_test=False,
mean_per_channel=args.image_mean_per_channel,
std_per_channel=args.image_std_per_channel,
)
def add_post_sync_ops(model):
"""Add ops applied after initial parameter sync."""
for param_info in model.GetOptimizationParamInfo(model.GetParams()):
if param_info.blob_copy is not None:
model.param_init_net.HalfToFloat(
param_info.blob,
param_info.blob_copy[core.DataType.FLOAT]
)
data_parallel_model.Parallelize(
train_model,
input_builder_fun=add_image_input,
forward_pass_builder_fun=create_resnext_model_ops
if args.model == "resnext" else create_shufflenet_model_ops,
optimizer_builder_fun=add_optimizer,
post_sync_builder_fun=add_post_sync_ops,
devices=gpus,
rendezvous=rendezvous,
optimize_gradient_memory=False,
use_nccl=args.use_nccl,
cpu_device=args.use_cpu,
ideep=args.use_ideep,
shared_model=args.use_cpu,
combine_spatial_bn=args.use_cpu,
)
data_parallel_model.OptimizeGradientMemory(train_model, {}, set(), False)
workspace.RunNetOnce(train_model.param_init_net)
workspace.CreateNet(train_model.net)
# Add test model, if specified
test_model = None
if (args.test_data is not None):
log.info("----- Create test net ----")
if args.use_ideep:
test_arg_scope = {
'use_cudnn': False,
'cudnn_exhaustive_search': False,
}
else:
test_arg_scope = {
'order': "NCHW",
'use_cudnn': True,
'cudnn_exhaustive_search': True,
}
test_model = model_helper.ModelHelper(
name=model_name + "_test",
arg_scope=test_arg_scope,
init_params=False,
)
test_reader = test_model.CreateDB(
"test_reader",
db=args.test_data,
db_type=args.db_type,
)
def test_input_fn(model):
AddImageInput(
model,
test_reader,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
is_test=True,
mean_per_channel=args.image_mean_per_channel,
std_per_channel=args.image_std_per_channel,
)
data_parallel_model.Parallelize(
test_model,
input_builder_fun=test_input_fn,
forward_pass_builder_fun=create_resnext_model_ops
if args.model == "resnext" else create_shufflenet_model_ops,
post_sync_builder_fun=add_post_sync_ops,
param_update_builder_fun=None,
devices=gpus,
use_nccl=args.use_nccl,
cpu_device=args.use_cpu,
)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net)
epoch = 0
# load the pre-trained model and reset epoch
if args.load_model_path is not None:
LoadModel(args.load_model_path, train_model, args.use_ideep)
# Sync the model params
data_parallel_model.FinalizeAfterCheckpoint(train_model)
# reset epoch. load_model_path should end with *_X.mdl,
# where X is the epoch number
last_str = args.load_model_path.split('_')[-1]
if last_str.endswith('.mdl'):
epoch = int(last_str[:-4])
log.info("Reset epoch to {}".format(epoch))
else:
log.warning("The format of load_model_path doesn't match!")
expname = "%s_gpu%d_b%d_L%d_lr%.2f_v2" % (
model_name,
args.num_gpus,
total_batch_size,
args.num_labels,
args.base_learning_rate,
)
explog = experiment_util.ModelTrainerLog(expname, args)
# Run the training one epoch a time
while epoch < args.num_epochs:
epoch = RunEpoch(
args,
epoch,
train_model,
test_model,
total_batch_size,
num_shards,
expname,
explog
)
# Save the model for each epoch
SaveModel(args, train_model, epoch, args.use_ideep)
model_path = "%s/%s_" % (
args.file_store_path,
args.save_model_name
)
# remove the saved model from the previous epoch if it exists
if os.path.isfile(model_path + str(epoch - 1) + ".mdl"):
os.remove(model_path + str(epoch - 1) + ".mdl")
def main():
# TODO: use argv
parser = argparse.ArgumentParser(
description="Caffe2: ImageNet Trainer"
)
parser.add_argument("--train_data", type=str, default=None, required=True,
help="Path to training data (or 'null' to simulate)")
parser.add_argument("--num_layers", type=int, default=50,
help="The number of layers in ResNe(X)t model")
parser.add_argument("--resnext_num_groups", type=int, default=1,
help="The cardinality of resnext")
parser.add_argument("--resnext_width_per_group", type=int, default=64,
help="The cardinality of resnext")
parser.add_argument("--test_data", type=str, default=None,
help="Path to test data")
parser.add_argument("--image_mean_per_channel", type=float, nargs='+',
help="The per channel mean for the images")
parser.add_argument("--image_std_per_channel", type=float, nargs='+',
help="The per channel standard deviation for the images")
parser.add_argument("--test_epoch_size", type=int, default=50000,
help="Number of test images")
parser.add_argument("--db_type", type=str, default="lmdb",
help="Database type (such as lmdb or leveldb)")
parser.add_argument("--gpus", type=str,
help="Comma separated list of GPU devices to use")
parser.add_argument("--num_gpus", type=int, default=1,
help="Number of GPU devices (instead of --gpus)")
parser.add_argument("--num_channels", type=int, default=3,
help="Number of color channels")
parser.add_argument("--image_size", type=int, default=224,
help="Input image size (to crop to)")
parser.add_argument("--num_labels", type=int, default=1000,
help="Number of labels")
parser.add_argument("--batch_size", type=int, default=32,
help="Batch size, total over all GPUs")
parser.add_argument("--epoch_size", type=int, default=1500000,
help="Number of images/epoch, total over all machines")
parser.add_argument("--num_epochs", type=int, default=1000,
help="Num epochs.")
parser.add_argument("--base_learning_rate", type=float, default=0.1,
help="Initial learning rate.")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help="Weight decay (L2 regularization)")
parser.add_argument("--cudnn_workspace_limit_mb", type=int, default=64,
help="CuDNN workspace limit in MBs")
parser.add_argument("--num_shards", type=int, default=1,
help="Number of machines in distributed run")
parser.add_argument("--shard_id", type=int, default=0,
help="Shard id.")
parser.add_argument("--run_id", type=str,
help="Unique run identifier (e.g. uuid)")
parser.add_argument("--redis_host", type=str,
help="Host of Redis server (for rendezvous)")
parser.add_argument("--redis_port", type=int, default=6379,
help="Port of Redis server (for rendezvous)")
parser.add_argument("--file_store_path", type=str, default="/tmp",
help="Path to directory to use for rendezvous")
parser.add_argument("--save_model_name", type=str, default="resnext_model",
help="Save the trained model to a given name")
parser.add_argument("--load_model_path", type=str, default=None,
help="Load previously saved model to continue training")
parser.add_argument("--use_cpu", action="store_true",
help="Use CPU instead of GPU")
parser.add_argument("--use_nccl", action="store_true",
help="Use nccl for inter-GPU collectives")
parser.add_argument("--use_ideep", type=bool, default=False,
help="Use ideep")
parser.add_argument('--dtype', default='float',
choices=['float', 'float16'],
help='Data type used for training')
parser.add_argument('--float16_compute', action='store_true',
help="Use float 16 compute, if available")
parser.add_argument('--enable_tensor_core', action='store_true',
help='Enable Tensor Core math for Conv and FC ops')
parser.add_argument("--distributed_transport", type=str, default="tcp",
help="Transport to use for distributed run [tcp|ibverbs]")
parser.add_argument("--distributed_interfaces", type=str, default="",
help="Network interfaces to use for distributed run")
parser.add_argument("--first_iter_timeout", type=int, default=600,
help="Timeout (secs) of the first iteration "
"(default: %(default)s)")
parser.add_argument("--timeout", type=int, default=60,
help="Timeout (secs) of each (except the first) iteration "
"(default: %(default)s)")
parser.add_argument("--model",
default="resnext", const="resnext", nargs="?",
choices=["shufflenet", "resnext"],
help="List of models which can be run")
args = parser.parse_args()
Train(args)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
main() | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/examples/resnet50_trainer.py | 0.663451 | 0.234264 | resnet50_trainer.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import numpy as np
import time
import os
from caffe2.python import core, workspace, experiment_util, data_parallel_model
from caffe2.python import dyndep, optimizer
from caffe2.python import timeout_guard, model_helper, brew
from caffe2.proto import caffe2_pb2
import caffe2.python.models.resnet as resnet
import caffe2.python.models.shufflenet as shufflenet
from caffe2.python.modeling.initializers import Initializer, PseudoFP16Initializer
import caffe2.python.predictor.predictor_exporter as pred_exp
import caffe2.python.predictor.predictor_py_utils as pred_utils
from caffe2.python.predictor_constants import predictor_constants as predictor_constants
'''
Parallelized multi-GPU distributed trainer for Resne(X)t & Shufflenet.
Can be used to train on imagenet data, for example.
The default parameters can train a standard Resnet-50 (1x64d), and parameters
can be provided to train ResNe(X)t models (e.g., ResNeXt-101 32x4d).
To run the trainer in single-machine multi-gpu mode by setting num_shards = 1.
To run the trainer in multi-machine multi-gpu mode with M machines,
run the same program on all machines, specifying num_shards = M, and
shard_id = a unique integer in the set [0, M-1].
For rendezvous (the trainer processes have to know about each other),
you can either use a directory path that is visible to all processes
(e.g. NFS directory), or use a Redis instance. Use the former by
passing the `file_store_path` argument. Use the latter by passing the
`redis_host` and `redis_port` arguments.
'''
logging.basicConfig()
log = logging.getLogger("Imagenet_trainer")
log.setLevel(logging.DEBUG)
dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:file_store_handler_ops')
dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:redis_store_handler_ops')
def AddImageInput(
model,
reader,
batch_size,
img_size,
dtype,
is_test,
mean_per_channel=None,
std_per_channel=None,
):
'''
The image input operator loads image and label data from the reader and
applies transformations to the images (random cropping, mirroring, ...).
'''
data, label = brew.image_input(
model,
reader, ["data", "label"],
batch_size=batch_size,
output_type=dtype,
use_gpu_transform=True if core.IsGPUDeviceType(model._device_type) else False,
use_caffe_datum=True,
mean_per_channel=mean_per_channel,
std_per_channel=std_per_channel,
# mean_per_channel takes precedence over mean
mean=128.,
std=128.,
scale=256,
crop=img_size,
mirror=1,
is_test=is_test,
)
data = model.StopGradient(data, data)
def AddNullInput(model, reader, batch_size, img_size, dtype):
'''
The null input function uses a gaussian fill operator to emulate real image
input. A label blob is hardcoded to a single value. This is useful if you
want to test compute throughput or don't have a dataset available.
'''
suffix = "_fp16" if dtype == "float16" else ""
model.param_init_net.GaussianFill(
[],
["data" + suffix],
shape=[batch_size, 3, img_size, img_size],
)
if dtype == "float16":
model.param_init_net.FloatToHalf("data" + suffix, "data")
model.param_init_net.ConstantFill(
[],
["label"],
shape=[batch_size],
value=1,
dtype=core.DataType.INT32,
)
def SaveModel(args, train_model, epoch, use_ideep):
prefix = "[]_{}".format(train_model._device_prefix, train_model._devices[0])
predictor_export_meta = pred_exp.PredictorExportMeta(
predict_net=train_model.net.Proto(),
parameters=data_parallel_model.GetCheckpointParams(train_model),
inputs=[prefix + "/data"],
outputs=[prefix + "/softmax"],
shapes={
prefix + "/softmax": (1, args.num_labels),
prefix + "/data": (args.num_channels, args.image_size, args.image_size)
}
)
# save the train_model for the current epoch
model_path = "%s/%s_%d.mdl" % (
args.file_store_path,
args.save_model_name,
epoch,
)
# set db_type to be "minidb" instead of "log_file_db", which breaks
# the serialization in save_to_db. Need to switch back to log_file_db
# after migration
pred_exp.save_to_db(
db_type="minidb",
db_destination=model_path,
predictor_export_meta=predictor_export_meta,
use_ideep=use_ideep
)
def LoadModel(path, model, use_ideep):
'''
Load pretrained model from file
'''
log.info("Loading path: {}".format(path))
meta_net_def = pred_exp.load_from_db(path, 'minidb')
init_net = core.Net(pred_utils.GetNet(
meta_net_def, predictor_constants.GLOBAL_INIT_NET_TYPE))
predict_init_net = core.Net(pred_utils.GetNet(
meta_net_def, predictor_constants.PREDICT_INIT_NET_TYPE))
if use_ideep:
predict_init_net.RunAllOnIDEEP()
else:
predict_init_net.RunAllOnGPU()
if use_ideep:
init_net.RunAllOnIDEEP()
else:
init_net.RunAllOnGPU()
assert workspace.RunNetOnce(predict_init_net)
assert workspace.RunNetOnce(init_net)
# Hack: fix iteration counter which is in CUDA context after load model
itercnt = workspace.FetchBlob("optimizer_iteration")
workspace.FeedBlob(
"optimizer_iteration",
itercnt,
device_option=core.DeviceOption(caffe2_pb2.CPU, 0)
)
def RunEpoch(
args,
epoch,
train_model,
test_model,
total_batch_size,
num_shards,
expname,
explog,
):
'''
Run one epoch of the trainer.
TODO: add checkpointing here.
'''
# TODO: add loading from checkpoint
log.info("Starting epoch {}/{}".format(epoch, args.num_epochs))
epoch_iters = int(args.epoch_size / total_batch_size / num_shards)
test_epoch_iters = int(args.test_epoch_size / total_batch_size / num_shards)
for i in range(epoch_iters):
# This timeout is required (temporarily) since CUDA-NCCL
# operators might deadlock when synchronizing between GPUs.
timeout = args.first_iter_timeout if i == 0 else args.timeout
with timeout_guard.CompleteInTimeOrDie(timeout):
t1 = time.time()
workspace.RunNet(train_model.net.Proto().name)
t2 = time.time()
dt = t2 - t1
fmt = "Finished iteration {}/{} of epoch {} ({:.2f} images/sec)"
log.info(fmt.format(i + 1, epoch_iters, epoch, total_batch_size / dt))
prefix = "{}_{}".format(
train_model._device_prefix,
train_model._devices[0])
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
train_fmt = "Training loss: {}, accuracy: {}"
log.info(train_fmt.format(loss, accuracy))
num_images = epoch * epoch_iters * total_batch_size
prefix = "{}_{}".format(train_model._device_prefix, train_model._devices[0])
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
learning_rate = workspace.FetchBlob(
data_parallel_model.GetLearningRateBlobNames(train_model)[0]
)
test_accuracy = 0
test_accuracy_top5 = 0
if test_model is not None:
# Run 100 iters of testing
ntests = 0
for _ in range(test_epoch_iters):
workspace.RunNet(test_model.net.Proto().name)
for g in test_model._devices:
test_accuracy += np.asscalar(workspace.FetchBlob(
"{}_{}".format(test_model._device_prefix, g) + '/accuracy'
))
test_accuracy_top5 += np.asscalar(workspace.FetchBlob(
"{}_{}".format(test_model._device_prefix, g) + '/accuracy_top5'
))
ntests += 1
test_accuracy /= ntests
test_accuracy_top5 /= ntests
else:
test_accuracy = (-1)
test_accuracy_top5 = (-1)
explog.log(
input_count=num_images,
batch_count=(i + epoch * epoch_iters),
additional_values={
'accuracy': accuracy,
'loss': loss,
'learning_rate': learning_rate,
'epoch': epoch,
'top1_test_accuracy': test_accuracy,
'top5_test_accuracy': test_accuracy_top5,
}
)
assert loss < 40, "Exploded gradients :("
# TODO: add checkpointing
return epoch + 1
def Train(args):
if args.model == "resnext":
model_name = "resnext" + str(args.num_layers)
elif args.model == "shufflenet":
model_name = "shufflenet"
# Either use specified device list or generate one
if args.gpus is not None:
gpus = [int(x) for x in args.gpus.split(',')]
num_gpus = len(gpus)
else:
gpus = list(range(args.num_gpus))
num_gpus = args.num_gpus
log.info("Running on GPUs: {}".format(gpus))
# Verify valid batch size
total_batch_size = args.batch_size
batch_per_device = total_batch_size // num_gpus
assert \
total_batch_size % num_gpus == 0, \
"Number of GPUs must divide batch size"
# Verify valid image mean/std per channel
if args.image_mean_per_channel:
assert \
len(args.image_mean_per_channel) == args.num_channels, \
"The number of channels of image mean doesn't match input"
if args.image_std_per_channel:
assert \
len(args.image_std_per_channel) == args.num_channels, \
"The number of channels of image std doesn't match input"
# Round down epoch size to closest multiple of batch size across machines
global_batch_size = total_batch_size * args.num_shards
epoch_iters = int(args.epoch_size / global_batch_size)
assert \
epoch_iters > 0, \
"Epoch size must be larger than batch size times shard count"
args.epoch_size = epoch_iters * global_batch_size
log.info("Using epoch size: {}".format(args.epoch_size))
# Create ModelHelper object
if args.use_ideep:
train_arg_scope = {
'use_cudnn': False,
'cudnn_exhaustive_search': False,
'training_mode': 1
}
else:
train_arg_scope = {
'order': 'NCHW',
'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': (args.cudnn_workspace_limit_mb * 1024 * 1024),
}
train_model = model_helper.ModelHelper(
name=model_name, arg_scope=train_arg_scope
)
num_shards = args.num_shards
shard_id = args.shard_id
# Expect interfaces to be comma separated.
# Use of multiple network interfaces is not yet complete,
# so simply use the first one in the list.
interfaces = args.distributed_interfaces.split(",")
# Rendezvous using MPI when run with mpirun
if os.getenv("OMPI_COMM_WORLD_SIZE") is not None:
num_shards = int(os.getenv("OMPI_COMM_WORLD_SIZE", 1))
shard_id = int(os.getenv("OMPI_COMM_WORLD_RANK", 0))
if num_shards > 1:
rendezvous = dict(
kv_handler=None,
num_shards=num_shards,
shard_id=shard_id,
engine="GLOO",
transport=args.distributed_transport,
interface=interfaces[0],
mpi_rendezvous=True,
exit_nets=None)
elif num_shards > 1:
# Create rendezvous for distributed computation
store_handler = "store_handler"
if args.redis_host is not None:
# Use Redis for rendezvous if Redis host is specified
workspace.RunOperatorOnce(
core.CreateOperator(
"RedisStoreHandlerCreate", [], [store_handler],
host=args.redis_host,
port=args.redis_port,
prefix=args.run_id,
)
)
else:
# Use filesystem for rendezvous otherwise
workspace.RunOperatorOnce(
core.CreateOperator(
"FileStoreHandlerCreate", [], [store_handler],
path=args.file_store_path,
prefix=args.run_id,
)
)
rendezvous = dict(
kv_handler=store_handler,
shard_id=shard_id,
num_shards=num_shards,
engine="GLOO",
transport=args.distributed_transport,
interface=interfaces[0],
exit_nets=None)
else:
rendezvous = None
# Model building functions
def create_resnext_model_ops(model, loss_scale):
initializer = (PseudoFP16Initializer if args.dtype == 'float16'
else Initializer)
with brew.arg_scope([brew.conv, brew.fc],
WeightInitializer=initializer,
BiasInitializer=initializer,
enable_tensor_core=args.enable_tensor_core,
float16_compute=args.float16_compute):
pred = resnet.create_resnext(
model,
"data",
num_input_channels=args.num_channels,
num_labels=args.num_labels,
num_layers=args.num_layers,
num_groups=args.resnext_num_groups,
num_width_per_group=args.resnext_width_per_group,
no_bias=True,
no_loss=True,
)
if args.dtype == 'float16':
pred = model.net.HalfToFloat(pred, pred + '_fp32')
softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
['softmax', 'loss'])
loss = model.Scale(loss, scale=loss_scale)
brew.accuracy(model, [softmax, "label"], "accuracy", top_k=1)
brew.accuracy(model, [softmax, "label"], "accuracy_top5", top_k=5)
return [loss]
def create_shufflenet_model_ops(model, loss_scale):
initializer = (PseudoFP16Initializer if args.dtype == 'float16'
else Initializer)
with brew.arg_scope([brew.conv, brew.fc],
WeightInitializer=initializer,
BiasInitializer=initializer,
enable_tensor_core=args.enable_tensor_core,
float16_compute=args.float16_compute):
pred = shufflenet.create_shufflenet(
model,
"data",
num_input_channels=args.num_channels,
num_labels=args.num_labels,
no_loss=True,
)
if args.dtype == 'float16':
pred = model.net.HalfToFloat(pred, pred + '_fp32')
softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
['softmax', 'loss'])
loss = model.Scale(loss, scale=loss_scale)
brew.accuracy(model, [softmax, "label"], "accuracy", top_k=1)
brew.accuracy(model, [softmax, "label"], "accuracy_top5", top_k=5)
return [loss]
def add_optimizer(model):
stepsz = int(30 * args.epoch_size / total_batch_size / num_shards)
if args.float16_compute:
# TODO: merge with multi-precision optimizer
opt = optimizer.build_fp16_sgd(
model,
args.base_learning_rate,
momentum=0.9,
nesterov=1,
weight_decay=args.weight_decay, # weight decay included
policy="step",
stepsize=stepsz,
gamma=0.1
)
else:
optimizer.add_weight_decay(model, args.weight_decay)
opt = optimizer.build_multi_precision_sgd(
model,
args.base_learning_rate,
momentum=0.9,
nesterov=1,
policy="step",
stepsize=stepsz,
gamma=0.1
)
return opt
# Define add_image_input function.
# Depends on the "train_data" argument.
# Note that the reader will be shared with between all GPUS.
if args.train_data == "null":
def add_image_input(model):
AddNullInput(
model,
None,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
)
else:
reader = train_model.CreateDB(
"reader",
db=args.train_data,
db_type=args.db_type,
num_shards=num_shards,
shard_id=shard_id,
)
def add_image_input(model):
AddImageInput(
model,
reader,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
is_test=False,
mean_per_channel=args.image_mean_per_channel,
std_per_channel=args.image_std_per_channel,
)
def add_post_sync_ops(model):
"""Add ops applied after initial parameter sync."""
for param_info in model.GetOptimizationParamInfo(model.GetParams()):
if param_info.blob_copy is not None:
model.param_init_net.HalfToFloat(
param_info.blob,
param_info.blob_copy[core.DataType.FLOAT]
)
data_parallel_model.Parallelize(
train_model,
input_builder_fun=add_image_input,
forward_pass_builder_fun=create_resnext_model_ops
if args.model == "resnext" else create_shufflenet_model_ops,
optimizer_builder_fun=add_optimizer,
post_sync_builder_fun=add_post_sync_ops,
devices=gpus,
rendezvous=rendezvous,
optimize_gradient_memory=False,
use_nccl=args.use_nccl,
cpu_device=args.use_cpu,
ideep=args.use_ideep,
shared_model=args.use_cpu,
combine_spatial_bn=args.use_cpu,
)
data_parallel_model.OptimizeGradientMemory(train_model, {}, set(), False)
workspace.RunNetOnce(train_model.param_init_net)
workspace.CreateNet(train_model.net)
# Add test model, if specified
test_model = None
if (args.test_data is not None):
log.info("----- Create test net ----")
if args.use_ideep:
test_arg_scope = {
'use_cudnn': False,
'cudnn_exhaustive_search': False,
}
else:
test_arg_scope = {
'order': "NCHW",
'use_cudnn': True,
'cudnn_exhaustive_search': True,
}
test_model = model_helper.ModelHelper(
name=model_name + "_test",
arg_scope=test_arg_scope,
init_params=False,
)
test_reader = test_model.CreateDB(
"test_reader",
db=args.test_data,
db_type=args.db_type,
)
def test_input_fn(model):
AddImageInput(
model,
test_reader,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
is_test=True,
mean_per_channel=args.image_mean_per_channel,
std_per_channel=args.image_std_per_channel,
)
data_parallel_model.Parallelize(
test_model,
input_builder_fun=test_input_fn,
forward_pass_builder_fun=create_resnext_model_ops
if args.model == "resnext" else create_shufflenet_model_ops,
post_sync_builder_fun=add_post_sync_ops,
param_update_builder_fun=None,
devices=gpus,
use_nccl=args.use_nccl,
cpu_device=args.use_cpu,
)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net)
epoch = 0
# load the pre-trained model and reset epoch
if args.load_model_path is not None:
LoadModel(args.load_model_path, train_model, args.use_ideep)
# Sync the model params
data_parallel_model.FinalizeAfterCheckpoint(train_model)
# reset epoch. load_model_path should end with *_X.mdl,
# where X is the epoch number
last_str = args.load_model_path.split('_')[-1]
if last_str.endswith('.mdl'):
epoch = int(last_str[:-4])
log.info("Reset epoch to {}".format(epoch))
else:
log.warning("The format of load_model_path doesn't match!")
expname = "%s_gpu%d_b%d_L%d_lr%.2f_v2" % (
model_name,
args.num_gpus,
total_batch_size,
args.num_labels,
args.base_learning_rate,
)
explog = experiment_util.ModelTrainerLog(expname, args)
# Run the training one epoch a time
while epoch < args.num_epochs:
epoch = RunEpoch(
args,
epoch,
train_model,
test_model,
total_batch_size,
num_shards,
expname,
explog
)
# Save the model for each epoch
SaveModel(args, train_model, epoch, args.use_ideep)
model_path = "%s/%s_" % (
args.file_store_path,
args.save_model_name
)
# remove the saved model from the previous epoch if it exists
if os.path.isfile(model_path + str(epoch - 1) + ".mdl"):
os.remove(model_path + str(epoch - 1) + ".mdl")
def main():
# TODO: use argv
parser = argparse.ArgumentParser(
description="Caffe2: ImageNet Trainer"
)
parser.add_argument("--train_data", type=str, default=None, required=True,
help="Path to training data (or 'null' to simulate)")
parser.add_argument("--num_layers", type=int, default=50,
help="The number of layers in ResNe(X)t model")
parser.add_argument("--resnext_num_groups", type=int, default=1,
help="The cardinality of resnext")
parser.add_argument("--resnext_width_per_group", type=int, default=64,
help="The cardinality of resnext")
parser.add_argument("--test_data", type=str, default=None,
help="Path to test data")
parser.add_argument("--image_mean_per_channel", type=float, nargs='+',
help="The per channel mean for the images")
parser.add_argument("--image_std_per_channel", type=float, nargs='+',
help="The per channel standard deviation for the images")
parser.add_argument("--test_epoch_size", type=int, default=50000,
help="Number of test images")
parser.add_argument("--db_type", type=str, default="lmdb",
help="Database type (such as lmdb or leveldb)")
parser.add_argument("--gpus", type=str,
help="Comma separated list of GPU devices to use")
parser.add_argument("--num_gpus", type=int, default=1,
help="Number of GPU devices (instead of --gpus)")
parser.add_argument("--num_channels", type=int, default=3,
help="Number of color channels")
parser.add_argument("--image_size", type=int, default=224,
help="Input image size (to crop to)")
parser.add_argument("--num_labels", type=int, default=1000,
help="Number of labels")
parser.add_argument("--batch_size", type=int, default=32,
help="Batch size, total over all GPUs")
parser.add_argument("--epoch_size", type=int, default=1500000,
help="Number of images/epoch, total over all machines")
parser.add_argument("--num_epochs", type=int, default=1000,
help="Num epochs.")
parser.add_argument("--base_learning_rate", type=float, default=0.1,
help="Initial learning rate.")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help="Weight decay (L2 regularization)")
parser.add_argument("--cudnn_workspace_limit_mb", type=int, default=64,
help="CuDNN workspace limit in MBs")
parser.add_argument("--num_shards", type=int, default=1,
help="Number of machines in distributed run")
parser.add_argument("--shard_id", type=int, default=0,
help="Shard id.")
parser.add_argument("--run_id", type=str,
help="Unique run identifier (e.g. uuid)")
parser.add_argument("--redis_host", type=str,
help="Host of Redis server (for rendezvous)")
parser.add_argument("--redis_port", type=int, default=6379,
help="Port of Redis server (for rendezvous)")
parser.add_argument("--file_store_path", type=str, default="/tmp",
help="Path to directory to use for rendezvous")
parser.add_argument("--save_model_name", type=str, default="resnext_model",
help="Save the trained model to a given name")
parser.add_argument("--load_model_path", type=str, default=None,
help="Load previously saved model to continue training")
parser.add_argument("--use_cpu", action="store_true",
help="Use CPU instead of GPU")
parser.add_argument("--use_nccl", action="store_true",
help="Use nccl for inter-GPU collectives")
parser.add_argument("--use_ideep", type=bool, default=False,
help="Use ideep")
parser.add_argument('--dtype', default='float',
choices=['float', 'float16'],
help='Data type used for training')
parser.add_argument('--float16_compute', action='store_true',
help="Use float 16 compute, if available")
parser.add_argument('--enable_tensor_core', action='store_true',
help='Enable Tensor Core math for Conv and FC ops')
parser.add_argument("--distributed_transport", type=str, default="tcp",
help="Transport to use for distributed run [tcp|ibverbs]")
parser.add_argument("--distributed_interfaces", type=str, default="",
help="Network interfaces to use for distributed run")
parser.add_argument("--first_iter_timeout", type=int, default=600,
help="Timeout (secs) of the first iteration "
"(default: %(default)s)")
parser.add_argument("--timeout", type=int, default=60,
help="Timeout (secs) of each (except the first) iteration "
"(default: %(default)s)")
parser.add_argument("--model",
default="resnext", const="resnext", nargs="?",
choices=["shufflenet", "resnext"],
help="List of models which can be run")
args = parser.parse_args()
Train(args)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
main() | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/examples/imagenet_trainer.py | 0.663451 | 0.234264 | imagenet_trainer.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import numpy as np
import lmdb
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, model_helper
'''
Simple example to create an lmdb database of random image data and labels.
This can be used a skeleton to write your own data import.
It also runs a dummy-model with Caffe2 that reads the data and
validates the checksum is same.
'''
def create_db(output_file):
print(">>> Write database...")
LMDB_MAP_SIZE = 1 << 40 # MODIFY
env = lmdb.open(output_file, map_size=LMDB_MAP_SIZE)
checksum = 0
with env.begin(write=True) as txn:
for j in range(0, 128):
# MODIFY: add your own data reader / creator
label = j % 10
width = 64
height = 32
img_data = np.random.rand(3, width, height)
# ...
# Create TensorProtos
tensor_protos = caffe2_pb2.TensorProtos()
img_tensor = tensor_protos.protos.add()
img_tensor.dims.extend(img_data.shape)
img_tensor.data_type = 1
flatten_img = img_data.reshape(np.prod(img_data.shape))
img_tensor.float_data.extend(flatten_img)
label_tensor = tensor_protos.protos.add()
label_tensor.data_type = 2
label_tensor.int32_data.append(label)
txn.put(
'{}'.format(j).encode('ascii'),
tensor_protos.SerializeToString()
)
checksum += np.sum(img_data) * label
if (j % 16 == 0):
print("Inserted {} rows".format(j))
print("Checksum/write: {}".format(int(checksum)))
return checksum
def read_db_with_caffe2(db_file, expected_checksum):
print(">>> Read database...")
model = model_helper.ModelHelper(name="lmdbtest")
batch_size = 32
data, label = model.TensorProtosDBInput(
[], ["data", "label"], batch_size=batch_size,
db=db_file, db_type="lmdb")
checksum = 0
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
for _ in range(0, 4):
workspace.RunNet(model.net.Proto().name)
img_datas = workspace.FetchBlob("data")
labels = workspace.FetchBlob("label")
for j in range(batch_size):
checksum += np.sum(img_datas[j, :]) * labels[j]
print("Checksum/read: {}".format(int(checksum)))
assert np.abs(expected_checksum - checksum < 0.1), \
"Read/write checksums dont match"
def main():
parser = argparse.ArgumentParser(
description="Example LMDB creation"
)
parser.add_argument("--output_file", type=str, default=None,
help="Path to write the database to",
required=True)
args = parser.parse_args()
checksum = create_db(args.output_file)
# For testing reading:
read_db_with_caffe2(args.output_file, checksum)
if __name__ == '__main__':
main() | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/examples/lmdb_create_example.py | 0.673084 | 0.307358 | lmdb_create_example.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import brew
"""
Utilitiy for creating ShuffleNet
"ShuffleNet V2: Practical Guidelines for EfficientCNN Architecture Design" by Ma et. al. 2018
"""
OUTPUT_CHANNELS = {
'0.5x': [24, 48, 96, 192, 1024],
'1.0x': [24, 116, 232, 464, 1024],
'1.5x': [24, 176, 352, 704, 1024],
'2.0x': [24, 244, 488, 976, 2048],
}
class ShuffleNetV2Builder():
def __init__(
self,
model,
data,
num_input_channels,
num_labels,
num_groups=2,
width='1.0x',
is_test=False,
detection=False,
bn_epsilon=1e-5,
):
self.model = model
self.prev_blob = data
self.num_input_channels = num_input_channels
self.num_labels = num_labels
self.num_groups = num_groups
self.output_channels = OUTPUT_CHANNELS[width]
self.stage_repeats = [3, 7, 3]
self.is_test = is_test
self.detection = detection
self.bn_epsilon = bn_epsilon
def create(self):
in_channels = self.output_channels[0]
self.prev_blob = brew.conv(self.model, self.prev_blob, 'stage1_conv',
self.num_input_channels, in_channels,
weight_init=("MSRAFill", {}),
kernel=3, stride=2)
self.prev_blob = brew.max_pool(self.model, self.prev_blob,
'stage1_pool', kernel=3, stride=2)
# adds stage#{2,3,4}; see table 5 of the ShufflenetV2 paper.
for idx, (out_channels, n_repeats) in enumerate(zip(
self.output_channels[1:4], self.stage_repeats
)):
prefix = 'stage{}_stride{}'.format(idx + 2, 2)
self.add_spatial_ds_unit(prefix, in_channels, out_channels)
in_channels = out_channels
for i in range(n_repeats):
prefix = 'stage{}_stride{}_repeat{}'.format(
idx + 2, 1, i + 1
)
self.add_basic_unit(prefix, in_channels)
self.last_conv = brew.conv(self.model, self.prev_blob, 'conv5',
in_channels, self.output_channels[4],
kernel=1)
self.avg_pool = self.model.AveragePool(self.last_conv, 'avg_pool',
kernel=7)
self.last_out = brew.fc(self.model,
self.avg_pool,
'last_out_L{}'.format(self.num_labels),
self.output_channels[4],
self.num_labels)
# spatial down sampling unit with stride=2
def add_spatial_ds_unit(self, prefix, in_channels, out_channels, stride=2):
right = left = self.prev_blob
out_channels = out_channels // 2
# Enlarge the receptive field for detection task
if self.detection:
left = self.add_detection_unit(left, prefix + '_left_detection',
in_channels, in_channels)
left = self.add_dwconv3x3_bn(left, prefix + 'left_dwconv',
in_channels, stride)
left = self.add_conv1x1_bn(left, prefix + '_left_conv1', in_channels,
out_channels)
if self.detection:
right = self.add_detection_unit(right, prefix + '_right_detection',
in_channels, in_channels)
right = self.add_conv1x1_bn(right, prefix + '_right_conv1',
in_channels, out_channels)
right = self.add_dwconv3x3_bn(right, prefix + '_right_dwconv',
out_channels, stride)
right = self.add_conv1x1_bn(right, prefix + '_right_conv2',
out_channels, out_channels)
self.prev_blob = brew.concat(self.model, [right, left],
prefix + '_concat')
self.prev_blob = self.model.net.ChannelShuffle(
self.prev_blob, prefix + '_ch_shuffle',
group=self.num_groups, kernel=1
)
# basic unit with stride=1
def add_basic_unit(self, prefix, in_channels, stride=1):
in_channels = in_channels // 2
left = prefix + '_left'
right = prefix + '_right'
self.model.net.Split(self.prev_blob, [left, right])
if self.detection:
right = self.add_detection_unit(right, prefix + '_right_detection',
in_channels, in_channels)
right = self.add_conv1x1_bn(right, prefix + '_right_conv1',
in_channels, in_channels)
right = self.add_dwconv3x3_bn(right, prefix + '_right_dwconv',
in_channels, stride)
right = self.add_conv1x1_bn(right, prefix + '_right_conv2',
in_channels, in_channels)
self.prev_blob = brew.concat(self.model, [right, left],
prefix + '_concat')
self.prev_blob = self.model.net.ChannelShuffle(
self.prev_blob, prefix + '_ch_shuffle',
group=self.num_groups, kernel=1
)
# helper functions to create net's units
def add_detection_unit(self, prev_blob, prefix, in_channels, out_channels,
kernel=3, pad=1):
out_blob = brew.conv(self.model, prev_blob, prefix + '_conv',
in_channels, out_channels, kernel=kernel,
weight_init=("MSRAFill", {}),
group=in_channels, pad=pad)
out_blob = brew.spatial_bn(self.model, out_blob, prefix + '_bn',
out_channels, epsilon=self.bn_epsilon,
is_test=self.is_test)
return out_blob
def add_conv1x1_bn(self, prev_blob, blob, in_channels, out_channels):
prev_blob = brew.conv(self.model, prev_blob, blob, in_channels,
out_channels, kernel=1,
weight_init=("MSRAFill", {}))
prev_blob = brew.spatial_bn(self.model, prev_blob, prev_blob + '_bn',
out_channels,
epsilon=self.bn_epsilon,
is_test=self.is_test)
prev_blob = brew.relu(self.model, prev_blob, prev_blob)
return prev_blob
def add_dwconv3x3_bn(self, prev_blob, blob, channels, stride):
prev_blob = brew.conv(self.model, prev_blob, blob, channels,
channels, kernel=3,
weight_init=("MSRAFill", {}),
stride=stride, group=channels, pad=1)
prev_blob = brew.spatial_bn(self.model, prev_blob,
prev_blob + '_bn',
channels,
epsilon=self.bn_epsilon,
is_test=self.is_test)
return prev_blob
def create_shufflenet(
model,
data,
num_input_channels,
num_labels,
label=None,
is_test=False,
no_loss=False,
):
builder = ShuffleNetV2Builder(model, data, num_input_channels,
num_labels,
is_test=is_test)
builder.create()
if no_loss:
return builder.last_out
if (label is not None):
(softmax, loss) = model.SoftmaxWithLoss(
[builder.last_out, label],
["softmax", "loss"],
)
return (softmax, loss) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/models/shufflenet.py | 0.899011 | 0.245605 | shufflenet.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from caffe2.python import brew
import logging
'''
Utility for creating ResNe(X)t
"Deep Residual Learning for Image Recognition" by He, Zhang et. al. 2015
"Aggregated Residual Transformations for Deep Neural Networks" by Xie et. al. 2016
'''
class ResNetBuilder():
'''
Helper class for constructing residual blocks.
'''
def __init__(
self,
model,
prev_blob,
no_bias,
is_test,
bn_epsilon=1e-5,
bn_momentum=0.9,
):
self.model = model
self.comp_count = 0
self.comp_idx = 0
self.prev_blob = prev_blob
self.is_test = is_test
self.bn_epsilon = bn_epsilon
self.bn_momentum = bn_momentum
self.no_bias = 1 if no_bias else 0
def add_conv(
self,
in_filters,
out_filters,
kernel,
stride=1,
group=1,
pad=0,
):
self.comp_idx += 1
self.prev_blob = brew.conv(
self.model,
self.prev_blob,
'comp_%d_conv_%d' % (self.comp_count, self.comp_idx),
in_filters,
out_filters,
weight_init=("MSRAFill", {}),
kernel=kernel,
stride=stride,
group=group,
pad=pad,
no_bias=self.no_bias,
)
return self.prev_blob
def add_relu(self):
self.prev_blob = brew.relu(
self.model,
self.prev_blob,
self.prev_blob, # in-place
)
return self.prev_blob
def add_spatial_bn(self, num_filters):
self.prev_blob = brew.spatial_bn(
self.model,
self.prev_blob,
'comp_%d_spatbn_%d' % (self.comp_count, self.comp_idx),
num_filters,
epsilon=self.bn_epsilon,
momentum=self.bn_momentum,
is_test=self.is_test,
)
return self.prev_blob
'''
Add a "bottleneck" component as described in He et. al. Figure 3 (right)
'''
def add_bottleneck(
self,
input_filters, # num of feature maps from preceding layer
base_filters, # num of filters internally in the component
output_filters, # num of feature maps to output
stride=1,
group=1,
spatial_batch_norm=True,
):
self.comp_idx = 0
shortcut_blob = self.prev_blob
# 1x1
self.add_conv(
input_filters,
base_filters,
kernel=1,
stride=1,
)
if spatial_batch_norm:
self.add_spatial_bn(base_filters)
self.add_relu()
# 3x3 (note the pad, required for keeping dimensions)
self.add_conv(
base_filters,
base_filters,
kernel=3,
stride=stride,
group=group,
pad=1,
)
if spatial_batch_norm:
self.add_spatial_bn(base_filters)
self.add_relu()
# 1x1
last_conv = self.add_conv(base_filters, output_filters, kernel=1)
if spatial_batch_norm:
last_conv = self.add_spatial_bn(output_filters)
# Summation with input signal (shortcut)
# When the number of feature maps mismatch between the input
# and output (this usually happens when the residual stage
# changes), we need to do a projection for the short cut
if output_filters != input_filters:
shortcut_blob = brew.conv(
self.model,
shortcut_blob,
'shortcut_projection_%d' % self.comp_count,
input_filters,
output_filters,
weight_init=("MSRAFill", {}),
kernel=1,
stride=stride,
no_bias=self.no_bias,
)
if spatial_batch_norm:
shortcut_blob = brew.spatial_bn(
self.model,
shortcut_blob,
'shortcut_projection_%d_spatbn' % self.comp_count,
output_filters,
epsilon=self.bn_epsilon,
momentum=self.bn_momentum,
is_test=self.is_test,
)
self.prev_blob = brew.sum(
self.model, [shortcut_blob, last_conv],
'comp_%d_sum_%d' % (self.comp_count, self.comp_idx)
)
self.comp_idx += 1
self.add_relu()
# Keep track of number of high level components if this ResNetBuilder
self.comp_count += 1
return output_filters
def add_simple_block(
self,
input_filters,
num_filters,
down_sampling=False,
spatial_batch_norm=True
):
self.comp_idx = 0
shortcut_blob = self.prev_blob
# 3x3
self.add_conv(
input_filters,
num_filters,
kernel=3,
stride=(1 if down_sampling is False else 2),
pad=1
)
if spatial_batch_norm:
self.add_spatial_bn(num_filters)
self.add_relu()
last_conv = self.add_conv(num_filters, num_filters, kernel=3, pad=1)
if spatial_batch_norm:
last_conv = self.add_spatial_bn(num_filters)
# Increase of dimensions, need a projection for the shortcut
if (num_filters != input_filters):
shortcut_blob = brew.conv(
self.model,
shortcut_blob,
'shortcut_projection_%d' % self.comp_count,
input_filters,
num_filters,
weight_init=("MSRAFill", {}),
kernel=1,
stride=(1 if down_sampling is False else 2),
no_bias=self.no_bias,
)
if spatial_batch_norm:
shortcut_blob = brew.spatial_bn(
self.model,
shortcut_blob,
'shortcut_projection_%d_spatbn' % self.comp_count,
num_filters,
epsilon=1e-3,
is_test=self.is_test,
)
self.prev_blob = brew.sum(
self.model, [shortcut_blob, last_conv],
'comp_%d_sum_%d' % (self.comp_count, self.comp_idx)
)
self.comp_idx += 1
self.add_relu()
# Keep track of number of high level components if this ResNetBuilder
self.comp_count += 1
def create_resnet_32x32(
model, data, num_input_channels, num_groups, num_labels, is_test=False
):
'''
Create residual net for smaller images (sec 4.2 of He et. al (2015))
num_groups = 'n' in the paper
'''
# conv1 + maxpool
brew.conv(
model, data, 'conv1', num_input_channels, 16, kernel=3, stride=1
)
brew.spatial_bn(
model, 'conv1', 'conv1_spatbn', 16, epsilon=1e-3, is_test=is_test
)
brew.relu(model, 'conv1_spatbn', 'relu1')
# Number of blocks as described in sec 4.2
filters = [16, 32, 64]
builder = ResNetBuilder(model, 'relu1', no_bias=0, is_test=is_test)
prev_filters = 16
for groupidx in range(0, 3):
for blockidx in range(0, 2 * num_groups):
builder.add_simple_block(
prev_filters if blockidx == 0 else filters[groupidx],
filters[groupidx],
down_sampling=(True if blockidx == 0 and
groupidx > 0 else False))
prev_filters = filters[groupidx]
# Final layers
brew.average_pool(
model, builder.prev_blob, 'final_avg', kernel=8, stride=1
)
brew.fc(model, 'final_avg', 'last_out', 64, num_labels)
softmax = brew.softmax(model, 'last_out', 'softmax')
return softmax
RESNEXT_BLOCK_CONFIG = {
18: (2, 2, 2, 2),
34: (3, 4, 6, 3),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
200: (3, 24, 36, 3),
}
RESNEXT_STRIDES = [1, 2, 2, 2]
logging.basicConfig()
log = logging.getLogger("resnext_builder")
log.setLevel(logging.DEBUG)
# The conv1 and final_avg kernel/stride args provide a basic mechanism for
# adapting resnet50 for different sizes of input images.
def create_resnext(
model,
data,
num_input_channels,
num_labels,
num_layers,
num_groups,
num_width_per_group,
label=None,
is_test=False,
no_loss=False,
no_bias=1,
conv1_kernel=7,
conv1_stride=2,
final_avg_kernel=7,
log=None,
bn_epsilon=1e-5,
bn_momentum=0.9,
):
if num_layers not in RESNEXT_BLOCK_CONFIG:
log.error("{}-layer is invalid for resnext config".format(num_layers))
num_blocks = RESNEXT_BLOCK_CONFIG[num_layers]
strides = RESNEXT_STRIDES
num_filters = [64, 256, 512, 1024, 2048]
if num_layers in [18, 34]:
num_filters = [64, 64, 128, 256, 512]
# the number of features before the last FC layer
num_features = num_filters[-1]
# conv1 + maxpool
conv_blob = brew.conv(
model,
data,
'conv1',
num_input_channels,
num_filters[0],
weight_init=("MSRAFill", {}),
kernel=conv1_kernel,
stride=conv1_stride,
pad=3,
no_bias=no_bias
)
bn_blob = brew.spatial_bn(
model,
conv_blob,
'conv1_spatbn_relu',
num_filters[0],
epsilon=bn_epsilon,
momentum=bn_momentum,
is_test=is_test
)
relu_blob = brew.relu(model, bn_blob, bn_blob)
max_pool = brew.max_pool(model, relu_blob, 'pool1', kernel=3, stride=2, pad=1)
# Residual blocks...
builder = ResNetBuilder(model, max_pool, no_bias=no_bias,
is_test=is_test, bn_epsilon=1e-5, bn_momentum=0.9)
inner_dim = num_groups * num_width_per_group
# 4 different kinds of residual blocks
for residual_idx in range(4):
residual_num = num_blocks[residual_idx]
residual_stride = strides[residual_idx]
dim_in = num_filters[residual_idx]
for blk_idx in range(residual_num):
dim_in = builder.add_bottleneck(
dim_in,
inner_dim,
num_filters[residual_idx + 1], # dim out
stride=residual_stride if blk_idx == 0 else 1,
group=num_groups,
)
inner_dim *= 2
# Final layers
final_avg = brew.average_pool(
model,
builder.prev_blob,
'final_avg',
kernel=final_avg_kernel,
stride=1,
global_pooling=True,
)
# Final dimension of the "image" is reduced to 7x7
last_out = brew.fc(
model, final_avg, 'last_out_L{}'.format(num_labels), num_features, num_labels
)
if no_loss:
return last_out
# If we create model for training, use softmax-with-loss
if (label is not None):
(softmax, loss) = model.SoftmaxWithLoss(
[last_out, label],
["softmax", "loss"],
)
return (softmax, loss)
else:
# For inference, we just return softmax
return brew.softmax(model, last_out, "softmax")
# The conv1 and final_avg kernel/stride args provide a basic mechanism for
# adapting resnet50 for different sizes of input images.
def create_resnet50(
model,
data,
num_input_channels,
num_labels,
label=None,
is_test=False,
no_loss=False,
no_bias=0,
conv1_kernel=7,
conv1_stride=2,
final_avg_kernel=7,
):
# resnet50 is a special case for ResNeXt50-1x64d
return create_resnext(
model,
data,
num_input_channels,
num_labels,
num_layers=50,
num_groups=1,
num_width_per_group=64,
label=label,
is_test=is_test,
no_loss=no_loss,
no_bias=no_bias,
conv1_kernel=conv1_kernel,
conv1_stride=conv1_stride,
final_avg_kernel=final_avg_kernel,
) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/models/resnet.py | 0.864396 | 0.308002 | resnet.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import collections
import logging
import math
import numpy as np
import random
import time
import sys
import os
import caffe2.proto.caffe2_pb2 as caffe2_pb2
from caffe2.python import core, workspace, data_parallel_model
import caffe2.python.models.seq2seq.seq2seq_util as seq2seq_util
from caffe2.python.models.seq2seq.seq2seq_model_helper import Seq2SeqModelHelper
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stderr))
Batch = collections.namedtuple('Batch', [
'encoder_inputs',
'encoder_lengths',
'decoder_inputs',
'decoder_lengths',
'targets',
'target_weights',
])
def prepare_batch(batch):
encoder_lengths = [len(entry[0]) for entry in batch]
max_encoder_length = max(encoder_lengths)
decoder_lengths = []
max_decoder_length = max([len(entry[1]) for entry in batch])
batch_encoder_inputs = []
batch_decoder_inputs = []
batch_targets = []
batch_target_weights = []
for source_seq, target_seq in batch:
encoder_pads = (
[seq2seq_util.PAD_ID] * (max_encoder_length - len(source_seq))
)
batch_encoder_inputs.append(
list(reversed(source_seq)) + encoder_pads
)
decoder_pads = (
[seq2seq_util.PAD_ID] * (max_decoder_length - len(target_seq))
)
target_seq_with_go_token = [seq2seq_util.GO_ID] + target_seq
decoder_lengths.append(len(target_seq_with_go_token))
batch_decoder_inputs.append(target_seq_with_go_token + decoder_pads)
target_seq_with_eos = target_seq + [seq2seq_util.EOS_ID]
targets = target_seq_with_eos + decoder_pads
batch_targets.append(targets)
if len(source_seq) + len(target_seq) == 0:
target_weights = [0] * len(targets)
else:
target_weights = [
1 if target != seq2seq_util.PAD_ID else 0
for target in targets
]
batch_target_weights.append(target_weights)
return Batch(
encoder_inputs=np.array(
batch_encoder_inputs,
dtype=np.int32,
).transpose(),
encoder_lengths=np.array(encoder_lengths, dtype=np.int32),
decoder_inputs=np.array(
batch_decoder_inputs,
dtype=np.int32,
).transpose(),
decoder_lengths=np.array(decoder_lengths, dtype=np.int32),
targets=np.array(
batch_targets,
dtype=np.int32,
).transpose(),
target_weights=np.array(
batch_target_weights,
dtype=np.float32,
).transpose(),
)
class Seq2SeqModelCaffe2(object):
def _build_model(
self,
init_params,
):
model = Seq2SeqModelHelper(init_params=init_params)
self._build_shared(model)
self._build_embeddings(model)
forward_model = Seq2SeqModelHelper(init_params=init_params)
self._build_shared(forward_model)
self._build_embeddings(forward_model)
if self.num_gpus == 0:
loss_blobs = self.model_build_fun(model)
model.AddGradientOperators(loss_blobs)
self.norm_clipped_grad_update(
model,
scope='norm_clipped_grad_update'
)
self.forward_model_build_fun(forward_model)
else:
assert (self.batch_size % self.num_gpus) == 0
data_parallel_model.Parallelize_GPU(
forward_model,
input_builder_fun=lambda m: None,
forward_pass_builder_fun=self.forward_model_build_fun,
param_update_builder_fun=None,
devices=list(range(self.num_gpus)),
)
def clipped_grad_update_bound(model):
self.norm_clipped_grad_update(
model,
scope='norm_clipped_grad_update',
)
data_parallel_model.Parallelize_GPU(
model,
input_builder_fun=lambda m: None,
forward_pass_builder_fun=self.model_build_fun,
param_update_builder_fun=clipped_grad_update_bound,
devices=list(range(self.num_gpus)),
)
self.norm_clipped_sparse_grad_update(
model,
scope='norm_clipped_sparse_grad_update',
)
self.model = model
self.forward_net = forward_model.net
def _build_shared(self, model):
optimizer_params = self.model_params['optimizer_params']
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
self.learning_rate = model.AddParam(
name='learning_rate',
init_value=float(optimizer_params['learning_rate']),
trainable=False,
)
self.global_step = model.AddParam(
name='global_step',
init_value=0,
trainable=False,
)
self.start_time = model.AddParam(
name='start_time',
init_value=time.time(),
trainable=False,
)
def _build_embeddings(self, model):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
sqrt3 = math.sqrt(3)
self.encoder_embeddings = model.param_init_net.UniformFill(
[],
'encoder_embeddings',
shape=[
self.source_vocab_size,
self.model_params['encoder_embedding_size'],
],
min=-sqrt3,
max=sqrt3,
)
model.params.append(self.encoder_embeddings)
self.decoder_embeddings = model.param_init_net.UniformFill(
[],
'decoder_embeddings',
shape=[
self.target_vocab_size,
self.model_params['decoder_embedding_size'],
],
min=-sqrt3,
max=sqrt3,
)
model.params.append(self.decoder_embeddings)
def model_build_fun(self, model, forward_only=False, loss_scale=None):
encoder_inputs = model.net.AddExternalInput(
workspace.GetNameScope() + 'encoder_inputs',
)
encoder_lengths = model.net.AddExternalInput(
workspace.GetNameScope() + 'encoder_lengths',
)
decoder_inputs = model.net.AddExternalInput(
workspace.GetNameScope() + 'decoder_inputs',
)
decoder_lengths = model.net.AddExternalInput(
workspace.GetNameScope() + 'decoder_lengths',
)
targets = model.net.AddExternalInput(
workspace.GetNameScope() + 'targets',
)
target_weights = model.net.AddExternalInput(
workspace.GetNameScope() + 'target_weights',
)
attention_type = self.model_params['attention']
assert attention_type in ['none', 'regular', 'dot']
(
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_states,
final_encoder_cell_states,
encoder_units_per_layer,
) = seq2seq_util.build_embedding_encoder(
model=model,
encoder_params=self.encoder_params,
num_decoder_layers=len(self.model_params['decoder_layer_configs']),
inputs=encoder_inputs,
input_lengths=encoder_lengths,
vocab_size=self.source_vocab_size,
embeddings=self.encoder_embeddings,
embedding_size=self.model_params['encoder_embedding_size'],
use_attention=(attention_type != 'none'),
num_gpus=self.num_gpus,
)
(
decoder_outputs,
decoder_output_size,
) = seq2seq_util.build_embedding_decoder(
model,
decoder_layer_configs=self.model_params['decoder_layer_configs'],
inputs=decoder_inputs,
input_lengths=decoder_lengths,
encoder_lengths=encoder_lengths,
encoder_outputs=encoder_outputs,
weighted_encoder_outputs=weighted_encoder_outputs,
final_encoder_hidden_states=final_encoder_hidden_states,
final_encoder_cell_states=final_encoder_cell_states,
encoder_units_per_layer=encoder_units_per_layer,
vocab_size=self.target_vocab_size,
embeddings=self.decoder_embeddings,
embedding_size=self.model_params['decoder_embedding_size'],
attention_type=attention_type,
forward_only=False,
num_gpus=self.num_gpus,
)
output_logits = seq2seq_util.output_projection(
model=model,
decoder_outputs=decoder_outputs,
decoder_output_size=decoder_output_size,
target_vocab_size=self.target_vocab_size,
decoder_softmax_size=self.model_params['decoder_softmax_size'],
)
targets, _ = model.net.Reshape(
[targets],
['targets', 'targets_old_shape'],
shape=[-1],
)
target_weights, _ = model.net.Reshape(
[target_weights],
['target_weights', 'target_weights_old_shape'],
shape=[-1],
)
_, loss_per_word = model.net.SoftmaxWithLoss(
[output_logits, targets, target_weights],
['OutputProbs_INVALID', 'loss_per_word'],
only_loss=True,
)
num_words = model.net.SumElements(
[target_weights],
'num_words',
)
total_loss_scalar = model.net.Mul(
[loss_per_word, num_words],
'total_loss_scalar',
)
total_loss_scalar_weighted = model.net.Scale(
[total_loss_scalar],
'total_loss_scalar_weighted',
scale=1.0 / self.batch_size,
)
return [total_loss_scalar_weighted]
def forward_model_build_fun(self, model, loss_scale=None):
return self.model_build_fun(
model=model,
forward_only=True,
loss_scale=loss_scale
)
def _calc_norm_ratio(self, model, params, scope, ONE):
with core.NameScope(scope):
grad_squared_sums = []
for i, param in enumerate(params):
logger.info(param)
grad = (
model.param_to_grad[param]
if not isinstance(
model.param_to_grad[param],
core.GradientSlice,
) else model.param_to_grad[param].values
)
grad_squared = model.net.Sqr(
[grad],
'grad_{}_squared'.format(i),
)
grad_squared_sum = model.net.SumElements(
grad_squared,
'grad_{}_squared_sum'.format(i),
)
grad_squared_sums.append(grad_squared_sum)
grad_squared_full_sum = model.net.Sum(
grad_squared_sums,
'grad_squared_full_sum',
)
global_norm = model.net.Pow(
grad_squared_full_sum,
'global_norm',
exponent=0.5,
)
clip_norm = model.param_init_net.ConstantFill(
[],
'clip_norm',
shape=[],
value=float(self.model_params['max_gradient_norm']),
)
max_norm = model.net.Max(
[global_norm, clip_norm],
'max_norm',
)
norm_ratio = model.net.Div(
[clip_norm, max_norm],
'norm_ratio',
)
return norm_ratio
def _apply_norm_ratio(
self, norm_ratio, model, params, learning_rate, scope, ONE
):
for param in params:
param_grad = model.param_to_grad[param]
nlr = model.net.Negative(
[learning_rate],
'negative_learning_rate',
)
with core.NameScope(scope):
update_coeff = model.net.Mul(
[nlr, norm_ratio],
'update_coeff',
broadcast=1,
)
if isinstance(param_grad, core.GradientSlice):
param_grad_values = param_grad.values
model.net.ScatterWeightedSum(
[
param,
ONE,
param_grad.indices,
param_grad_values,
update_coeff,
],
param,
)
else:
model.net.WeightedSum(
[
param,
ONE,
param_grad,
update_coeff,
],
param,
)
def norm_clipped_grad_update(self, model, scope):
if self.num_gpus == 0:
learning_rate = self.learning_rate
else:
learning_rate = model.CopyCPUToGPU(self.learning_rate, 'LR')
params = []
for param in model.GetParams(top_scope=True):
if param in model.param_to_grad:
if not isinstance(
model.param_to_grad[param],
core.GradientSlice,
):
params.append(param)
ONE = model.param_init_net.ConstantFill(
[],
'ONE',
shape=[1],
value=1.0,
)
logger.info('Dense trainable variables: ')
norm_ratio = self._calc_norm_ratio(model, params, scope, ONE)
self._apply_norm_ratio(
norm_ratio, model, params, learning_rate, scope, ONE
)
def norm_clipped_sparse_grad_update(self, model, scope):
learning_rate = self.learning_rate
params = []
for param in model.GetParams(top_scope=True):
if param in model.param_to_grad:
if isinstance(
model.param_to_grad[param],
core.GradientSlice,
):
params.append(param)
ONE = model.param_init_net.ConstantFill(
[],
'ONE',
shape=[1],
value=1.0,
)
logger.info('Sparse trainable variables: ')
norm_ratio = self._calc_norm_ratio(model, params, scope, ONE)
self._apply_norm_ratio(
norm_ratio, model, params, learning_rate, scope, ONE
)
def total_loss_scalar(self):
if self.num_gpus == 0:
return workspace.FetchBlob('total_loss_scalar')
else:
total_loss = 0
for i in range(self.num_gpus):
name = 'gpu_{}/total_loss_scalar'.format(i)
gpu_loss = workspace.FetchBlob(name)
total_loss += gpu_loss
return total_loss
def _init_model(self):
workspace.RunNetOnce(self.model.param_init_net)
def create_net(net):
workspace.CreateNet(
net,
input_blobs=[str(i) for i in net.external_inputs],
)
create_net(self.model.net)
create_net(self.forward_net)
def __init__(
self,
model_params,
source_vocab_size,
target_vocab_size,
num_gpus=1,
num_cpus=1,
):
self.model_params = model_params
self.encoder_type = 'rnn'
self.encoder_params = model_params['encoder_type']
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.num_gpus = num_gpus
self.num_cpus = num_cpus
self.batch_size = model_params['batch_size']
workspace.GlobalInit([
'caffe2',
# NOTE: modify log level for debugging purposes
'--caffe2_log_level=0',
# NOTE: modify log level for debugging purposes
'--v=0',
# Fail gracefully if one of the threads fails
'--caffe2_handle_executor_threads_exceptions=1',
'--caffe2_mkl_num_threads=' + str(self.num_cpus),
])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
workspace.ResetWorkspace()
def initialize_from_scratch(self):
logger.info('Initializing Seq2SeqModelCaffe2 from scratch: Start')
self._build_model(init_params=True)
self._init_model()
logger.info('Initializing Seq2SeqModelCaffe2 from scratch: Finish')
def get_current_step(self):
return workspace.FetchBlob(self.global_step)[0]
def inc_current_step(self):
workspace.FeedBlob(
self.global_step,
np.array([self.get_current_step() + 1]),
)
def step(
self,
batch,
forward_only
):
if self.num_gpus < 1:
batch_obj = prepare_batch(batch)
for batch_obj_name, batch_obj_value in zip(
Batch._fields,
batch_obj,
):
workspace.FeedBlob(batch_obj_name, batch_obj_value)
else:
for i in range(self.num_gpus):
gpu_batch = batch[i::self.num_gpus]
batch_obj = prepare_batch(gpu_batch)
for batch_obj_name, batch_obj_value in zip(
Batch._fields,
batch_obj,
):
name = 'gpu_{}/{}'.format(i, batch_obj_name)
if batch_obj_name in ['encoder_inputs', 'decoder_inputs']:
dev = core.DeviceOption(caffe2_pb2.CPU)
else:
dev = core.DeviceOption(workspace.GpuDeviceType, i)
workspace.FeedBlob(name, batch_obj_value, device_option=dev)
if forward_only:
workspace.RunNet(self.forward_net)
else:
workspace.RunNet(self.model.net)
self.inc_current_step()
return self.total_loss_scalar()
def save(self, checkpoint_path_prefix, current_step):
checkpoint_path = '{0}-{1}'.format(
checkpoint_path_prefix,
current_step,
)
assert workspace.RunOperatorOnce(core.CreateOperator(
'Save',
self.model.GetAllParams(),
[],
absolute_path=True,
db=checkpoint_path,
db_type='minidb',
))
checkpoint_config_path = os.path.join(
os.path.dirname(checkpoint_path_prefix),
'checkpoint',
)
with open(checkpoint_config_path, 'w') as checkpoint_config_file:
checkpoint_config_file.write(
'model_checkpoint_path: "' + checkpoint_path + '"\n'
'all_model_checkpoint_paths: "' + checkpoint_path + '"\n'
)
logger.info('Saved checkpoint file to ' + checkpoint_path)
return checkpoint_path
def gen_batches(source_corpus, target_corpus, source_vocab, target_vocab,
batch_size, max_length):
with open(source_corpus) as source, open(target_corpus) as target:
parallel_sentences = []
for source_sentence, target_sentence in zip(source, target):
numerized_source_sentence = seq2seq_util.get_numberized_sentence(
source_sentence,
source_vocab,
)
numerized_target_sentence = seq2seq_util.get_numberized_sentence(
target_sentence,
target_vocab,
)
if (
len(numerized_source_sentence) > 0 and
len(numerized_target_sentence) > 0 and
(
max_length is None or (
len(numerized_source_sentence) <= max_length and
len(numerized_target_sentence) <= max_length
)
)
):
parallel_sentences.append((
numerized_source_sentence,
numerized_target_sentence,
))
parallel_sentences.sort(key=lambda s_t: (len(s_t[0]), len(s_t[1])))
batches, batch = [], []
for sentence_pair in parallel_sentences:
batch.append(sentence_pair)
if len(batch) >= batch_size:
batches.append(batch)
batch = []
if len(batch) > 0:
while len(batch) < batch_size:
batch.append(batch[-1])
assert len(batch) == batch_size
batches.append(batch)
random.shuffle(batches)
return batches
def run_seq2seq_model(args, model_params=None):
source_vocab = seq2seq_util.gen_vocab(
args.source_corpus,
args.unk_threshold,
)
target_vocab = seq2seq_util.gen_vocab(
args.target_corpus,
args.unk_threshold,
)
logger.info('Source vocab size {}'.format(len(source_vocab)))
logger.info('Target vocab size {}'.format(len(target_vocab)))
batches = gen_batches(args.source_corpus, args.target_corpus, source_vocab,
target_vocab, model_params['batch_size'],
args.max_length)
logger.info('Number of training batches {}'.format(len(batches)))
batches_eval = gen_batches(args.source_corpus_eval, args.target_corpus_eval,
source_vocab, target_vocab,
model_params['batch_size'], args.max_length)
logger.info('Number of eval batches {}'.format(len(batches_eval)))
with Seq2SeqModelCaffe2(
model_params=model_params,
source_vocab_size=len(source_vocab),
target_vocab_size=len(target_vocab),
num_gpus=args.num_gpus,
num_cpus=20,
) as model_obj:
model_obj.initialize_from_scratch()
for i in range(args.epochs):
logger.info('Epoch {}'.format(i))
total_loss = 0
for batch in batches:
total_loss += model_obj.step(
batch=batch,
forward_only=False,
)
logger.info('\ttraining loss {}'.format(total_loss))
total_loss = 0
for batch in batches_eval:
total_loss += model_obj.step(
batch=batch,
forward_only=True,
)
logger.info('\teval loss {}'.format(total_loss))
if args.checkpoint is not None:
model_obj.save(args.checkpoint, i)
def main():
random.seed(31415)
parser = argparse.ArgumentParser(
description='Caffe2: Seq2Seq Training'
)
parser.add_argument('--source-corpus', type=str, default=None,
help='Path to source corpus in a text file format. Each '
'line in the file should contain a single sentence',
required=True)
parser.add_argument('--target-corpus', type=str, default=None,
help='Path to target corpus in a text file format',
required=True)
parser.add_argument('--max-length', type=int, default=None,
help='Maximal lengths of train and eval sentences')
parser.add_argument('--unk-threshold', type=int, default=50,
help='Threshold frequency under which token becomes '
'labeled unknown token')
parser.add_argument('--batch-size', type=int, default=32,
help='Training batch size')
parser.add_argument('--epochs', type=int, default=10,
help='Number of iterations over training data')
parser.add_argument('--learning-rate', type=float, default=0.5,
help='Learning rate')
parser.add_argument('--max-gradient-norm', type=float, default=1.0,
help='Max global norm of gradients at the end of each '
'backward pass. We do clipping to match the number.')
parser.add_argument('--num-gpus', type=int, default=0,
help='Number of GPUs for data parallel model')
parser.add_argument('--use-bidirectional-encoder', action='store_true',
help='Set flag to use bidirectional recurrent network '
'for first layer of encoder')
parser.add_argument('--use-attention', action='store_true',
help='Set flag to use seq2seq with attention model')
parser.add_argument('--source-corpus-eval', type=str, default=None,
help='Path to source corpus for evaluation in a text '
'file format', required=True)
parser.add_argument('--target-corpus-eval', type=str, default=None,
help='Path to target corpus for evaluation in a text '
'file format', required=True)
parser.add_argument('--encoder-cell-num-units', type=int, default=512,
help='Number of cell units per encoder layer')
parser.add_argument('--encoder-num-layers', type=int, default=2,
help='Number encoder layers')
parser.add_argument('--decoder-cell-num-units', type=int, default=512,
help='Number of cell units in the decoder layer')
parser.add_argument('--decoder-num-layers', type=int, default=2,
help='Number decoder layers')
parser.add_argument('--encoder-embedding-size', type=int, default=256,
help='Size of embedding in the encoder layer')
parser.add_argument('--decoder-embedding-size', type=int, default=512,
help='Size of embedding in the decoder layer')
parser.add_argument('--decoder-softmax-size', type=int, default=None,
help='Size of softmax layer in the decoder')
parser.add_argument('--checkpoint', type=str, default=None,
help='Path to checkpoint')
args = parser.parse_args()
encoder_layer_configs = [
dict(
num_units=args.encoder_cell_num_units,
),
] * args.encoder_num_layers
if args.use_bidirectional_encoder:
assert args.encoder_cell_num_units % 2 == 0
encoder_layer_configs[0]['num_units'] /= 2
decoder_layer_configs = [
dict(
num_units=args.decoder_cell_num_units,
),
] * args.decoder_num_layers
run_seq2seq_model(args, model_params=dict(
attention=('regular' if args.use_attention else 'none'),
decoder_layer_configs=decoder_layer_configs,
encoder_type=dict(
encoder_layer_configs=encoder_layer_configs,
use_bidirectional_encoder=args.use_bidirectional_encoder,
),
batch_size=args.batch_size,
optimizer_params=dict(
learning_rate=args.learning_rate,
),
encoder_embedding_size=args.encoder_embedding_size,
decoder_embedding_size=args.decoder_embedding_size,
decoder_softmax_size=args.decoder_softmax_size,
max_gradient_norm=args.max_gradient_norm,
))
if __name__ == '__main__':
main() | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/models/seq2seq/train.py | 0.517083 | 0.187765 | train.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
import argparse
from future.utils import viewitems
import logging
import numpy as np
from six import with_metaclass
import sys
from caffe2.python import core, rnn_cell, workspace
from caffe2.python.models.seq2seq.beam_search import BeamSearchForwardOnly
from caffe2.python.models.seq2seq.seq2seq_model_helper import Seq2SeqModelHelper
import caffe2.python.models.seq2seq.seq2seq_util as seq2seq_util
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stderr))
def _weighted_sum(model, values, weight, output_name):
values_weights = zip(values, [weight] * len(values))
values_weights_flattened = [x for v_w in values_weights for x in v_w]
return model.net.WeightedSum(
values_weights_flattened,
output_name,
)
class Seq2SeqModelCaffe2EnsembleDecoderBase(with_metaclass(ABCMeta, object)):
@abstractmethod
def get_model_file(self, model):
pass
@abstractmethod
def get_db_type(self):
pass
def build_word_rewards(self, vocab_size, word_reward, unk_reward):
word_rewards = np.full([vocab_size], word_reward, dtype=np.float32)
word_rewards[seq2seq_util.PAD_ID] = 0
word_rewards[seq2seq_util.GO_ID] = 0
word_rewards[seq2seq_util.EOS_ID] = 0
word_rewards[seq2seq_util.UNK_ID] = word_reward + unk_reward
return word_rewards
def load_models(self):
db_reader = 'reader'
for model, scope_name in zip(
self.models,
self.decoder_scope_names,
):
params_for_current_model = [
param
for param in self.model.GetAllParams()
if str(param).startswith(scope_name)
]
assert workspace.RunOperatorOnce(core.CreateOperator(
'CreateDB',
[], [db_reader],
db=self.get_model_file(model),
db_type=self.get_db_type())
), 'Failed to create db {}'.format(self.get_model_file(model))
assert workspace.RunOperatorOnce(core.CreateOperator(
'Load',
[db_reader],
params_for_current_model,
load_all=1,
add_prefix=scope_name + '/',
strip_prefix='gpu_0/',
))
logger.info('Model {} is loaded from a checkpoint {}'.format(
scope_name, self.get_model_file(model)))
class Seq2SeqModelCaffe2EnsembleDecoder(Seq2SeqModelCaffe2EnsembleDecoderBase):
def get_model_file(self, model):
return model['model_file']
def get_db_type(self):
return 'minidb'
def scope(self, scope_name, blob_name):
return (
scope_name + '/' + blob_name
if scope_name is not None
else blob_name
)
def _build_decoder(
self,
model,
step_model,
model_params,
scope,
previous_tokens,
timestep,
fake_seq_lengths,
):
attention_type = model_params['attention']
assert attention_type in ['none', 'regular']
use_attention = (attention_type != 'none')
with core.NameScope(scope):
encoder_embeddings = seq2seq_util.build_embeddings(
model=model,
vocab_size=self.source_vocab_size,
embedding_size=model_params['encoder_embedding_size'],
name='encoder_embeddings',
freeze_embeddings=False,
)
(
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_states,
final_encoder_cell_states,
encoder_units_per_layer,
) = seq2seq_util.build_embedding_encoder(
model=model,
encoder_params=model_params['encoder_type'],
num_decoder_layers=len(model_params['decoder_layer_configs']),
inputs=self.encoder_inputs,
input_lengths=self.encoder_lengths,
vocab_size=self.source_vocab_size,
embeddings=encoder_embeddings,
embedding_size=model_params['encoder_embedding_size'],
use_attention=use_attention,
num_gpus=0,
forward_only=True,
scope=scope,
)
with core.NameScope(scope):
if use_attention:
# [max_source_length, beam_size, encoder_output_dim]
encoder_outputs = model.net.Tile(
encoder_outputs,
'encoder_outputs_tiled',
tiles=self.beam_size,
axis=1,
)
if weighted_encoder_outputs is not None:
weighted_encoder_outputs = model.net.Tile(
weighted_encoder_outputs,
'weighted_encoder_outputs_tiled',
tiles=self.beam_size,
axis=1,
)
decoder_embeddings = seq2seq_util.build_embeddings(
model=model,
vocab_size=self.target_vocab_size,
embedding_size=model_params['decoder_embedding_size'],
name='decoder_embeddings',
freeze_embeddings=False,
)
embedded_tokens_t_prev = step_model.net.Gather(
[decoder_embeddings, previous_tokens],
'embedded_tokens_t_prev',
)
decoder_cells = []
decoder_units_per_layer = []
for i, layer_config in enumerate(model_params['decoder_layer_configs']):
num_units = layer_config['num_units']
decoder_units_per_layer.append(num_units)
if i == 0:
input_size = model_params['decoder_embedding_size']
else:
input_size = (
model_params['decoder_layer_configs'][i - 1]['num_units']
)
cell = rnn_cell.LSTMCell(
forward_only=True,
input_size=input_size,
hidden_size=num_units,
forget_bias=0.0,
memory_optimization=False,
)
decoder_cells.append(cell)
with core.NameScope(scope):
if final_encoder_hidden_states is not None:
for i in range(len(final_encoder_hidden_states)):
if final_encoder_hidden_states[i] is not None:
final_encoder_hidden_states[i] = model.net.Tile(
final_encoder_hidden_states[i],
'final_encoder_hidden_tiled_{}'.format(i),
tiles=self.beam_size,
axis=1,
)
if final_encoder_cell_states is not None:
for i in range(len(final_encoder_cell_states)):
if final_encoder_cell_states[i] is not None:
final_encoder_cell_states[i] = model.net.Tile(
final_encoder_cell_states[i],
'final_encoder_cell_tiled_{}'.format(i),
tiles=self.beam_size,
axis=1,
)
initial_states = \
seq2seq_util.build_initial_rnn_decoder_states(
model=model,
encoder_units_per_layer=encoder_units_per_layer,
decoder_units_per_layer=decoder_units_per_layer,
final_encoder_hidden_states=final_encoder_hidden_states,
final_encoder_cell_states=final_encoder_cell_states,
use_attention=use_attention,
)
attention_decoder = seq2seq_util.LSTMWithAttentionDecoder(
encoder_outputs=encoder_outputs,
encoder_output_dim=encoder_units_per_layer[-1],
encoder_lengths=None,
vocab_size=self.target_vocab_size,
attention_type=attention_type,
embedding_size=model_params['decoder_embedding_size'],
decoder_num_units=decoder_units_per_layer[-1],
decoder_cells=decoder_cells,
weighted_encoder_outputs=weighted_encoder_outputs,
name=scope,
)
states_prev = step_model.net.AddExternalInputs(*[
'{}/{}_prev'.format(scope, s)
for s in attention_decoder.get_state_names()
])
decoder_outputs, states = attention_decoder.apply(
model=step_model,
input_t=embedded_tokens_t_prev,
seq_lengths=fake_seq_lengths,
states=states_prev,
timestep=timestep,
)
state_configs = [
BeamSearchForwardOnly.StateConfig(
initial_value=initial_state,
state_prev_link=BeamSearchForwardOnly.LinkConfig(
blob=state_prev,
offset=0,
window=1,
),
state_link=BeamSearchForwardOnly.LinkConfig(
blob=state,
offset=1,
window=1,
),
)
for initial_state, state_prev, state in zip(
initial_states,
states_prev,
states,
)
]
with core.NameScope(scope):
decoder_outputs_flattened, _ = step_model.net.Reshape(
[decoder_outputs],
[
'decoder_outputs_flattened',
'decoder_outputs_and_contexts_combination_old_shape',
],
shape=[-1, attention_decoder.get_output_dim()],
)
output_logits = seq2seq_util.output_projection(
model=step_model,
decoder_outputs=decoder_outputs_flattened,
decoder_output_size=attention_decoder.get_output_dim(),
target_vocab_size=self.target_vocab_size,
decoder_softmax_size=model_params['decoder_softmax_size'],
)
# [1, beam_size, target_vocab_size]
output_probs = step_model.net.Softmax(
output_logits,
'output_probs',
)
output_log_probs = step_model.net.Log(
output_probs,
'output_log_probs',
)
if use_attention:
attention_weights = attention_decoder.get_attention_weights()
else:
attention_weights = step_model.net.ConstantFill(
[self.encoder_inputs],
'zero_attention_weights_tmp_1',
value=0.0,
)
attention_weights = step_model.net.Transpose(
attention_weights,
'zero_attention_weights_tmp_2',
)
attention_weights = step_model.net.Tile(
attention_weights,
'zero_attention_weights_tmp',
tiles=self.beam_size,
axis=0,
)
return (
state_configs,
output_log_probs,
attention_weights,
)
def __init__(
self,
translate_params,
):
self.models = translate_params['ensemble_models']
decoding_params = translate_params['decoding_params']
self.beam_size = decoding_params['beam_size']
assert len(self.models) > 0
source_vocab = self.models[0]['source_vocab']
target_vocab = self.models[0]['target_vocab']
for model in self.models:
assert model['source_vocab'] == source_vocab
assert model['target_vocab'] == target_vocab
self.source_vocab_size = len(source_vocab)
self.target_vocab_size = len(target_vocab)
self.decoder_scope_names = [
'model{}'.format(i) for i in range(len(self.models))
]
self.model = Seq2SeqModelHelper(init_params=True)
self.encoder_inputs = self.model.net.AddExternalInput('encoder_inputs')
self.encoder_lengths = self.model.net.AddExternalInput(
'encoder_lengths'
)
self.max_output_seq_len = self.model.net.AddExternalInput(
'max_output_seq_len'
)
fake_seq_lengths = self.model.param_init_net.ConstantFill(
[],
'fake_seq_lengths',
shape=[self.beam_size],
value=100000,
dtype=core.DataType.INT32,
)
beam_decoder = BeamSearchForwardOnly(
beam_size=self.beam_size,
model=self.model,
go_token_id=seq2seq_util.GO_ID,
eos_token_id=seq2seq_util.EOS_ID,
)
step_model = beam_decoder.get_step_model()
state_configs = []
output_log_probs = []
attention_weights = []
for model, scope_name in zip(
self.models,
self.decoder_scope_names,
):
(
state_configs_per_decoder,
output_log_probs_per_decoder,
attention_weights_per_decoder,
) = self._build_decoder(
model=self.model,
step_model=step_model,
model_params=model['model_params'],
scope=scope_name,
previous_tokens=beam_decoder.get_previous_tokens(),
timestep=beam_decoder.get_timestep(),
fake_seq_lengths=fake_seq_lengths,
)
state_configs.extend(state_configs_per_decoder)
output_log_probs.append(output_log_probs_per_decoder)
if attention_weights_per_decoder is not None:
attention_weights.append(attention_weights_per_decoder)
assert len(attention_weights) > 0
num_decoders_with_attention_blob = (
self.model.param_init_net.ConstantFill(
[],
'num_decoders_with_attention_blob',
value=1 / float(len(attention_weights)),
shape=[1],
)
)
# [beam_size, encoder_length, 1]
attention_weights_average = _weighted_sum(
model=step_model,
values=attention_weights,
weight=num_decoders_with_attention_blob,
output_name='attention_weights_average',
)
num_decoders_blob = self.model.param_init_net.ConstantFill(
[],
'num_decoders_blob',
value=1 / float(len(output_log_probs)),
shape=[1],
)
# [beam_size, target_vocab_size]
output_log_probs_average = _weighted_sum(
model=step_model,
values=output_log_probs,
weight=num_decoders_blob,
output_name='output_log_probs_average',
)
word_rewards = self.model.param_init_net.ConstantFill(
[],
'word_rewards',
shape=[self.target_vocab_size],
value=0.0,
dtype=core.DataType.FLOAT,
)
(
self.output_token_beam_list,
self.output_prev_index_beam_list,
self.output_score_beam_list,
self.output_attention_weights_beam_list,
) = beam_decoder.apply(
inputs=self.encoder_inputs,
length=self.max_output_seq_len,
log_probs=output_log_probs_average,
attentions=attention_weights_average,
state_configs=state_configs,
data_dependencies=[],
word_rewards=word_rewards,
)
workspace.RunNetOnce(self.model.param_init_net)
workspace.FeedBlob(
'word_rewards',
self.build_word_rewards(
vocab_size=self.target_vocab_size,
word_reward=translate_params['decoding_params']['word_reward'],
unk_reward=translate_params['decoding_params']['unk_reward'],
)
)
workspace.CreateNet(
self.model.net,
input_blobs=[
str(self.encoder_inputs),
str(self.encoder_lengths),
str(self.max_output_seq_len),
],
)
logger.info('Params created: ')
for param in self.model.params:
logger.info(param)
def decode(self, numberized_input, max_output_seq_len):
workspace.FeedBlob(
self.encoder_inputs,
np.array([
[token_id] for token_id in reversed(numberized_input)
]).astype(dtype=np.int32),
)
workspace.FeedBlob(
self.encoder_lengths,
np.array([len(numberized_input)]).astype(dtype=np.int32),
)
workspace.FeedBlob(
self.max_output_seq_len,
np.array([max_output_seq_len]).astype(dtype=np.int64),
)
workspace.RunNet(self.model.net)
num_steps = max_output_seq_len
score_beam_list = workspace.FetchBlob(self.output_score_beam_list)
token_beam_list = (
workspace.FetchBlob(self.output_token_beam_list)
)
prev_index_beam_list = (
workspace.FetchBlob(self.output_prev_index_beam_list)
)
attention_weights_beam_list = (
workspace.FetchBlob(self.output_attention_weights_beam_list)
)
best_indices = (num_steps, 0)
for i in range(num_steps + 1):
for hyp_index in range(self.beam_size):
if (
(
token_beam_list[i][hyp_index][0] ==
seq2seq_util.EOS_ID or
i == num_steps
) and
(
score_beam_list[i][hyp_index][0] >
score_beam_list[best_indices[0]][best_indices[1]][0]
)
):
best_indices = (i, hyp_index)
i, hyp_index = best_indices
output = []
attention_weights_per_token = []
best_score = -score_beam_list[i][hyp_index][0]
while i > 0:
output.append(token_beam_list[i][hyp_index][0])
attention_weights_per_token.append(
attention_weights_beam_list[i][hyp_index]
)
hyp_index = prev_index_beam_list[i][hyp_index][0]
i -= 1
attention_weights_per_token = reversed(attention_weights_per_token)
# encoder_inputs are reversed, see get_batch func
attention_weights_per_token = [
list(reversed(attention_weights))[:len(numberized_input)]
for attention_weights in attention_weights_per_token
]
output = list(reversed(output))
return output, attention_weights_per_token, best_score
def run_seq2seq_beam_decoder(args, model_params, decoding_params):
source_vocab = seq2seq_util.gen_vocab(
args.source_corpus,
args.unk_threshold,
)
logger.info('Source vocab size {}'.format(len(source_vocab)))
target_vocab = seq2seq_util.gen_vocab(
args.target_corpus,
args.unk_threshold,
)
inversed_target_vocab = {v: k for (k, v) in viewitems(target_vocab)}
logger.info('Target vocab size {}'.format(len(target_vocab)))
decoder = Seq2SeqModelCaffe2EnsembleDecoder(
translate_params=dict(
ensemble_models=[dict(
source_vocab=source_vocab,
target_vocab=target_vocab,
model_params=model_params,
model_file=args.checkpoint,
)],
decoding_params=decoding_params,
),
)
decoder.load_models()
for line in sys.stdin:
numerized_source_sentence = seq2seq_util.get_numberized_sentence(
line,
source_vocab,
)
translation, alignment, _ = decoder.decode(
numerized_source_sentence,
2 * len(numerized_source_sentence) + 5,
)
print(' '.join([inversed_target_vocab[tid] for tid in translation]))
def main():
parser = argparse.ArgumentParser(
description='Caffe2: Seq2Seq Translation',
)
parser.add_argument('--source-corpus', type=str, default=None,
help='Path to source corpus in a text file format. Each '
'line in the file should contain a single sentence',
required=True)
parser.add_argument('--target-corpus', type=str, default=None,
help='Path to target corpus in a text file format',
required=True)
parser.add_argument('--unk-threshold', type=int, default=50,
help='Threshold frequency under which token becomes '
'labeled unknown token')
parser.add_argument('--use-bidirectional-encoder', action='store_true',
help='Set flag to use bidirectional recurrent network '
'in encoder')
parser.add_argument('--use-attention', action='store_true',
help='Set flag to use seq2seq with attention model')
parser.add_argument('--encoder-cell-num-units', type=int, default=512,
help='Number of cell units per encoder layer')
parser.add_argument('--encoder-num-layers', type=int, default=2,
help='Number encoder layers')
parser.add_argument('--decoder-cell-num-units', type=int, default=512,
help='Number of cell units in the decoder layer')
parser.add_argument('--decoder-num-layers', type=int, default=2,
help='Number decoder layers')
parser.add_argument('--encoder-embedding-size', type=int, default=256,
help='Size of embedding in the encoder layer')
parser.add_argument('--decoder-embedding-size', type=int, default=512,
help='Size of embedding in the decoder layer')
parser.add_argument('--decoder-softmax-size', type=int, default=None,
help='Size of softmax layer in the decoder')
parser.add_argument('--beam-size', type=int, default=6,
help='Size of beam for the decoder')
parser.add_argument('--word-reward', type=float, default=0.0,
help='Reward per each word generated.')
parser.add_argument('--unk-reward', type=float, default=0.0,
help='Reward per each UNK token generated. '
'Typically should be negative.')
parser.add_argument('--checkpoint', type=str, default=None,
help='Path to checkpoint', required=True)
args = parser.parse_args()
encoder_layer_configs = [
dict(
num_units=args.encoder_cell_num_units,
),
] * args.encoder_num_layers
if args.use_bidirectional_encoder:
assert args.encoder_cell_num_units % 2 == 0
encoder_layer_configs[0]['num_units'] /= 2
decoder_layer_configs = [
dict(
num_units=args.decoder_cell_num_units,
),
] * args.decoder_num_layers
run_seq2seq_beam_decoder(
args,
model_params=dict(
attention=('regular' if args.use_attention else 'none'),
decoder_layer_configs=decoder_layer_configs,
encoder_type=dict(
encoder_layer_configs=encoder_layer_configs,
use_bidirectional_encoder=args.use_bidirectional_encoder,
),
encoder_embedding_size=args.encoder_embedding_size,
decoder_embedding_size=args.decoder_embedding_size,
decoder_softmax_size=args.decoder_softmax_size,
),
decoding_params=dict(
beam_size=args.beam_size,
word_reward=args.word_reward,
unk_reward=args.unk_reward,
),
)
if __name__ == '__main__':
main() | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/models/seq2seq/translate.py | 0.554229 | 0.206354 | translate.py | pypi |
""" A bunch of util functions to build Seq2Seq models with Caffe2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
from future.utils import viewitems
import caffe2.proto.caffe2_pb2 as caffe2_pb2
from caffe2.python import attention, core, rnn_cell, brew
PAD_ID = 0
PAD = '<PAD>'
GO_ID = 1
GO = '<GO>'
EOS_ID = 2
EOS = '<EOS>'
UNK_ID = 3
UNK = '<UNK>'
def gen_vocab(corpus, unk_threshold):
vocab = collections.defaultdict(lambda: len(vocab))
freqs = collections.defaultdict(lambda: 0)
# Adding padding tokens to the vocabulary to maintain consistency with IDs
vocab[PAD]
vocab[GO]
vocab[EOS]
vocab[UNK]
with open(corpus) as f:
for sentence in f:
tokens = sentence.strip().split()
for token in tokens:
freqs[token] += 1
for token, freq in viewitems(freqs):
if freq > unk_threshold:
vocab[token]
return vocab
def get_numberized_sentence(sentence, vocab):
numerized_sentence = []
for token in sentence.strip().split():
if token in vocab:
numerized_sentence.append(vocab[token])
else:
numerized_sentence.append(vocab[UNK])
return numerized_sentence
def rnn_unidirectional_layer(
model,
inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=None,
):
""" Unidirectional LSTM encoder."""
with core.NameScope(scope):
initial_cell_state = model.param_init_net.ConstantFill(
[],
'initial_cell_state',
shape=[num_units],
value=0.0,
)
initial_hidden_state = model.param_init_net.ConstantFill(
[],
'initial_hidden_state',
shape=[num_units],
value=0.0,
)
cell = rnn_cell.LSTMCell(
input_size=input_size,
hidden_size=num_units,
forget_bias=0.0,
memory_optimization=False,
name=(scope + '/' if scope else '') + 'lstm',
forward_only=forward_only,
)
dropout_ratio = (
None if dropout_keep_prob is None else (1.0 - dropout_keep_prob)
)
if dropout_ratio is not None:
cell = rnn_cell.DropoutCell(
internal_cell=cell,
dropout_ratio=dropout_ratio,
name=(scope + '/' if scope else '') + 'dropout',
forward_only=forward_only,
is_test=False,
)
outputs_with_grads = []
if return_sequence_output:
outputs_with_grads.append(0)
if return_final_state:
outputs_with_grads.extend([1, 3])
outputs, (_, final_hidden_state, _, final_cell_state) = (
cell.apply_over_sequence(
model=model,
inputs=inputs,
seq_lengths=input_lengths,
initial_states=(initial_hidden_state, initial_cell_state),
outputs_with_grads=outputs_with_grads,
)
)
return outputs, final_hidden_state, final_cell_state
def rnn_bidirectional_layer(
model,
inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=None,
):
outputs_fw, final_hidden_fw, final_cell_fw = rnn_unidirectional_layer(
model,
inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=(scope + '/' if scope else '') + 'fw',
)
with core.NameScope(scope):
reversed_inputs = model.net.ReversePackedSegs(
[inputs, input_lengths],
['reversed_inputs'],
)
outputs_bw, final_hidden_bw, final_cell_bw = rnn_unidirectional_layer(
model,
reversed_inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=(scope + '/' if scope else '') + 'bw',
)
with core.NameScope(scope):
outputs_bw = model.net.ReversePackedSegs(
[outputs_bw, input_lengths],
['outputs_bw'],
)
# Concatenate forward and backward results
if return_sequence_output:
with core.NameScope(scope):
outputs, _ = model.net.Concat(
[outputs_fw, outputs_bw],
['outputs', 'outputs_dim'],
axis=2,
)
else:
outputs = None
if return_final_state:
with core.NameScope(scope):
final_hidden_state, _ = model.net.Concat(
[final_hidden_fw, final_hidden_bw],
['final_hidden_state', 'final_hidden_state_dim'],
axis=2,
)
final_cell_state, _ = model.net.Concat(
[final_cell_fw, final_cell_bw],
['final_cell_state', 'final_cell_state_dim'],
axis=2,
)
else:
final_hidden_state = None
final_cell_state = None
return outputs, final_hidden_state, final_cell_state
def build_embeddings(
model,
vocab_size,
embedding_size,
name,
freeze_embeddings,
):
embeddings = model.param_init_net.GaussianFill(
[],
name,
shape=[vocab_size, embedding_size],
std=0.1,
)
if not freeze_embeddings:
model.params.append(embeddings)
return embeddings
def get_layer_scope(scope, layer_type, i):
prefix = (scope + '/' if scope else '') + layer_type
return '{}/layer{}'.format(prefix, i)
def build_embedding_encoder(
model,
encoder_params,
num_decoder_layers,
inputs,
input_lengths,
vocab_size,
embeddings,
embedding_size,
use_attention,
num_gpus=0,
forward_only=False,
scope=None,
):
with core.NameScope(scope or ''):
if num_gpus == 0:
embedded_encoder_inputs = model.net.Gather(
[embeddings, inputs],
['embedded_encoder_inputs'],
)
else:
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
embedded_encoder_inputs_cpu = model.net.Gather(
[embeddings, inputs],
['embedded_encoder_inputs_cpu'],
)
embedded_encoder_inputs = model.CopyCPUToGPU(
embedded_encoder_inputs_cpu,
'embedded_encoder_inputs',
)
layer_inputs = embedded_encoder_inputs
layer_input_size = embedding_size
encoder_units_per_layer = []
final_encoder_hidden_states = []
final_encoder_cell_states = []
num_encoder_layers = len(encoder_params['encoder_layer_configs'])
use_bidirectional_encoder = encoder_params.get(
'use_bidirectional_encoder',
False,
)
for i, layer_config in enumerate(encoder_params['encoder_layer_configs']):
if use_bidirectional_encoder and i == 0:
layer_func = rnn_bidirectional_layer
output_dims = 2 * layer_config['num_units']
else:
layer_func = rnn_unidirectional_layer
output_dims = layer_config['num_units']
encoder_units_per_layer.append(output_dims)
is_final_layer = (i == num_encoder_layers - 1)
dropout_keep_prob = layer_config.get(
'dropout_keep_prob',
None,
)
return_final_state = i >= (num_encoder_layers - num_decoder_layers)
(
layer_outputs,
final_layer_hidden_state,
final_layer_cell_state,
) = layer_func(
model=model,
inputs=layer_inputs,
input_lengths=input_lengths,
input_size=layer_input_size,
num_units=layer_config['num_units'],
dropout_keep_prob=dropout_keep_prob,
forward_only=forward_only,
return_sequence_output=(not is_final_layer) or use_attention,
return_final_state=return_final_state,
scope=get_layer_scope(scope, 'encoder', i),
)
if not is_final_layer:
layer_inputs = layer_outputs
layer_input_size = output_dims
final_encoder_hidden_states.append(final_layer_hidden_state)
final_encoder_cell_states.append(final_layer_cell_state)
encoder_outputs = layer_outputs
weighted_encoder_outputs = None
return (
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_states,
final_encoder_cell_states,
encoder_units_per_layer,
)
class LSTMWithAttentionDecoder(object):
def scope(self, name):
return self.name + '/' + name if self.name is not None else name
def _get_attention_type(self, attention_type_as_string):
if attention_type_as_string == 'regular':
return attention.AttentionType.Regular
elif attention_type_as_string == 'recurrent':
return attention.AttentionType.Recurrent
else:
assert False, 'Unknown type ' + attention_type_as_string
def __init__(
self,
encoder_outputs,
encoder_output_dim,
encoder_lengths,
vocab_size,
attention_type,
embedding_size,
decoder_num_units,
decoder_cells,
residual_output_layers=None,
name=None,
weighted_encoder_outputs=None,
):
self.name = name
self.num_layers = len(decoder_cells)
if attention_type == 'none':
self.cell = rnn_cell.MultiRNNCell(
decoder_cells,
name=self.scope('decoder'),
residual_output_layers=residual_output_layers,
)
self.use_attention = False
self.decoder_output_dim = decoder_num_units
self.output_indices = self.cell.output_indices
else:
decoder_cell = rnn_cell.MultiRNNCell(
decoder_cells,
name=self.scope('decoder'),
residual_output_layers=residual_output_layers,
)
self.cell = rnn_cell.AttentionCell(
encoder_output_dim=encoder_output_dim,
encoder_outputs=encoder_outputs,
encoder_lengths=encoder_lengths,
decoder_cell=decoder_cell,
decoder_state_dim=decoder_num_units,
name=self.scope('attention_decoder'),
attention_type=self._get_attention_type(attention_type),
weighted_encoder_outputs=weighted_encoder_outputs,
attention_memory_optimization=True,
)
self.use_attention = True
self.decoder_output_dim = decoder_num_units + encoder_output_dim
self.output_indices = decoder_cell.output_indices
self.output_indices.append(2 * self.num_layers)
def get_state_names(self):
return self.cell.get_state_names()
def get_outputs_with_grads(self):
# sequence (all) output locations are at twice their state index
return [2 * i for i in self.output_indices]
def get_output_dim(self):
return self.decoder_output_dim
def get_attention_weights(self):
assert self.use_attention
# [batch_size, encoder_length, 1]
return self.cell.get_attention_weights()
def apply(
self,
model,
input_t,
seq_lengths,
states,
timestep,
):
return self.cell.apply(
model=model,
input_t=input_t,
seq_lengths=seq_lengths,
states=states,
timestep=timestep,
)
def apply_over_sequence(
self,
model,
inputs,
seq_lengths,
initial_states,
):
return self.cell.apply_over_sequence(
model=model,
inputs=inputs,
seq_lengths=seq_lengths,
initial_states=initial_states,
outputs_with_grads=self.get_outputs_with_grads(),
)
def build_initial_rnn_decoder_states(
model,
encoder_units_per_layer,
decoder_units_per_layer,
final_encoder_hidden_states,
final_encoder_cell_states,
use_attention,
):
num_encoder_layers = len(encoder_units_per_layer)
num_decoder_layers = len(decoder_units_per_layer)
if num_encoder_layers > num_decoder_layers:
offset = num_encoder_layers - num_decoder_layers
else:
offset = 0
initial_states = []
for i, decoder_num_units in enumerate(decoder_units_per_layer):
if (
final_encoder_hidden_states and
len(final_encoder_hidden_states) > (i + offset)
):
final_encoder_hidden_state = final_encoder_hidden_states[i + offset]
else:
final_encoder_hidden_state = None
if final_encoder_hidden_state is None:
decoder_initial_hidden_state = model.param_init_net.ConstantFill(
[],
'decoder_initial_hidden_state_{}'.format(i),
shape=[decoder_num_units],
value=0.0,
)
model.params.append(decoder_initial_hidden_state)
elif decoder_num_units != encoder_units_per_layer[i + offset]:
decoder_initial_hidden_state = brew.fc(
model,
final_encoder_hidden_state,
'decoder_initial_hidden_state_{}'.format(i),
encoder_units_per_layer[i + offset],
decoder_num_units,
axis=2,
)
else:
decoder_initial_hidden_state = final_encoder_hidden_state
initial_states.append(decoder_initial_hidden_state)
if (
final_encoder_cell_states and
len(final_encoder_cell_states) > (i + offset)
):
final_encoder_cell_state = final_encoder_cell_states[i + offset]
else:
final_encoder_cell_state = None
if final_encoder_cell_state is None:
decoder_initial_cell_state = model.param_init_net.ConstantFill(
[],
'decoder_initial_cell_state_{}'.format(i),
shape=[decoder_num_units],
value=0.0,
)
model.params.append(decoder_initial_cell_state)
elif decoder_num_units != encoder_units_per_layer[i + offset]:
decoder_initial_cell_state = brew.fc(
model,
final_encoder_cell_state,
'decoder_initial_cell_state_{}'.format(i),
encoder_units_per_layer[i + offset],
decoder_num_units,
axis=2,
)
else:
decoder_initial_cell_state = final_encoder_cell_state
initial_states.append(decoder_initial_cell_state)
if use_attention:
initial_attention_weighted_encoder_context = (
model.param_init_net.ConstantFill(
[],
'initial_attention_weighted_encoder_context',
shape=[encoder_units_per_layer[-1]],
value=0.0,
)
)
model.params.append(initial_attention_weighted_encoder_context)
initial_states.append(initial_attention_weighted_encoder_context)
return initial_states
def build_embedding_decoder(
model,
decoder_layer_configs,
inputs,
input_lengths,
encoder_lengths,
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_states,
final_encoder_cell_states,
encoder_units_per_layer,
vocab_size,
embeddings,
embedding_size,
attention_type,
forward_only,
num_gpus=0,
scope=None,
):
with core.NameScope(scope or ''):
if num_gpus == 0:
embedded_decoder_inputs = model.net.Gather(
[embeddings, inputs],
['embedded_decoder_inputs'],
)
else:
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
embedded_decoder_inputs_cpu = model.net.Gather(
[embeddings, inputs],
['embedded_decoder_inputs_cpu'],
)
embedded_decoder_inputs = model.CopyCPUToGPU(
embedded_decoder_inputs_cpu,
'embedded_decoder_inputs',
)
decoder_cells = []
decoder_units_per_layer = []
for i, layer_config in enumerate(decoder_layer_configs):
num_units = layer_config['num_units']
decoder_units_per_layer.append(num_units)
if i == 0:
input_size = embedding_size
else:
input_size = decoder_cells[-1].get_output_dim()
cell = rnn_cell.LSTMCell(
forward_only=forward_only,
input_size=input_size,
hidden_size=num_units,
forget_bias=0.0,
memory_optimization=False,
)
dropout_keep_prob = layer_config.get('dropout_keep_prob', None)
if dropout_keep_prob is not None:
dropout_ratio = 1.0 - layer_config.dropout_keep_prob
cell = rnn_cell.DropoutCell(
internal_cell=cell,
dropout_ratio=dropout_ratio,
forward_only=forward_only,
is_test=False,
name=get_layer_scope(scope, 'decoder_dropout', i),
)
decoder_cells.append(cell)
states = build_initial_rnn_decoder_states(
model=model,
encoder_units_per_layer=encoder_units_per_layer,
decoder_units_per_layer=decoder_units_per_layer,
final_encoder_hidden_states=final_encoder_hidden_states,
final_encoder_cell_states=final_encoder_cell_states,
use_attention=(attention_type != 'none'),
)
attention_decoder = LSTMWithAttentionDecoder(
encoder_outputs=encoder_outputs,
encoder_output_dim=encoder_units_per_layer[-1],
encoder_lengths=encoder_lengths,
vocab_size=vocab_size,
attention_type=attention_type,
embedding_size=embedding_size,
decoder_num_units=decoder_units_per_layer[-1],
decoder_cells=decoder_cells,
weighted_encoder_outputs=weighted_encoder_outputs,
name=scope,
)
decoder_outputs, _ = attention_decoder.apply_over_sequence(
model=model,
inputs=embedded_decoder_inputs,
seq_lengths=input_lengths,
initial_states=states,
)
# we do softmax over the whole sequence
# (max_length in the batch * batch_size) x decoder embedding size
# -1 because we don't know max_length yet
decoder_outputs_flattened, _ = model.net.Reshape(
[decoder_outputs],
[
'decoder_outputs_flattened',
'decoder_outputs_and_contexts_combination_old_shape',
],
shape=[-1, attention_decoder.get_output_dim()],
)
decoder_outputs = decoder_outputs_flattened
decoder_output_dim = attention_decoder.get_output_dim()
return (decoder_outputs, decoder_output_dim)
def output_projection(
model,
decoder_outputs,
decoder_output_size,
target_vocab_size,
decoder_softmax_size,
):
if decoder_softmax_size is not None:
decoder_outputs = brew.fc(
model,
decoder_outputs,
'decoder_outputs_scaled',
dim_in=decoder_output_size,
dim_out=decoder_softmax_size,
)
decoder_output_size = decoder_softmax_size
output_projection_w = model.param_init_net.XavierFill(
[],
'output_projection_w',
shape=[target_vocab_size, decoder_output_size],
)
output_projection_b = model.param_init_net.XavierFill(
[],
'output_projection_b',
shape=[target_vocab_size],
)
model.params.extend([
output_projection_w,
output_projection_b,
])
output_logits = model.net.FC(
[
decoder_outputs,
output_projection_w,
output_projection_b,
],
['output_logits'],
)
return output_logits | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/models/seq2seq/seq2seq_util.py | 0.854308 | 0.346984 | seq2seq_util.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class LastNWindowCollector(ModelLayer):
"""
Collect last-N samples from input record. If you have complex data,
use PackRecords to pack it before using this layer.
This layer is not thread safe.
"""
def __init__(self, model, input_record, num_to_collect,
name='last_n_window_collector', **kwargs):
super(LastNWindowCollector, self).__init__(
model, name, input_record, **kwargs)
assert num_to_collect > 0
self.num_to_collect = num_to_collect
assert isinstance(input_record, schema.Scalar), \
"Got {!r}".format(input_record)
self.last_n = self.create_param(param_name='last_n',
shape=[0],
initializer=('ConstantFill', {}),
optimizer=model.NoOptim)
self.next_blob = self.create_param(
param_name='next',
shape=[],
initializer=('ConstantFill',
{'value': 0, 'dtype': core.DataType.INT32}),
optimizer=model.NoOptim
)
self.mutex = self.create_param(
param_name='mutex',
shape=None,
initializer=('CreateMutex',),
optimizer=model.NoOptim,
)
self.num_visited_blob = self.create_param(
param_name='num_visited',
shape=[],
initializer=('ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64,
}),
optimizer=model.NoOptim,
)
self.output_schema = schema.Struct(
(
'last_n',
schema.from_blob_list(input_record, [self.last_n])
),
('num_visited', schema.Scalar(blob=self.num_visited_blob)),
('mutex', schema.Scalar(blob=self.mutex)),
)
def add_ops(self, net):
net.LastNWindowCollector(
[self.last_n, self.next_blob, self.input_record(), self.mutex,
self.num_visited_blob],
[self.last_n, self.next_blob, self.num_visited_blob],
num_to_collect=self.num_to_collect,
) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/last_n_window_collector.py | 0.795936 | 0.261679 | last_n_window_collector.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
from caffe2.python import context
@context.define_context(allow_default=True)
class TagContext(object):
"""
Scope driven way to provide tags to the layers.
"""
def __init__(self, tags=None):
# Tags is expected to be list to keep order of adding/removing things
self.tags = tags or []
def add_tags(self, tags):
self.tags.extend(tags)
def remove_tags(self, tags):
assert self.tags[-len(tags):] == tags
self.tags = self.tags[:-len(tags)]
class Tags(object):
# TODO(amalevich): Tags might need to live in their own contexts, add this
# split later
EXCLUDE_FROM_TRAIN = 'exclude_from_train'
EXCLUDE_FROM_EVAL = 'exclude_from_eval'
EXCLUDE_FROM_PREDICTION = 'exclude_from_prediction'
EXCLUDE_FROM_ACCUMULATE_PRED = 'exclude_from_accumulate_pred'
PREPROCESSING = 'preprocessing'
HANDLE_AS_SPARSE_LAYER = 'handle_as_sparse_layer'
PREFER_GPU = 'prefer_gpu'
CPU_ONLY = 'cpu_only'
LOCAL = 'local'
# The following three tags are hints to **distributed training framework**.
"""
Indicates a layer contains a sparse shardable parameter. The parameter
should be sharded nd operators on those parameters should be done on
distributed parameter servers.
"""
SPARSE_SHARDED = 'sparse_sharded'
"""
Indicates a layer contains a sparse parameters among others, and that the
parameters should not be sharded (i.e. should be placed together on a node).
"""
SPARSE_DONT_SHARD = 'sparse_dont_shard'
"""
Used to manually indicate a component for an operator. Parameters for
all operators with the same component should be colocated on the same
parameter server.
"""
COMPONENT = 'component:'
"""
Valid tag prefixes for distributed training framework.
"""
"""
Used to pass on info to the 'extra_info' field in the net
Proto. Typically to provide info for distributed training.
"""
EXTRA_INFO = 'extra_info:'
"""
An empty tag, used to make conditional statement on with(Tags) block more concise
"""
EMPTY_TAG = 'empty_tag'
DT_TAGS = (SPARSE_SHARDED, SPARSE_DONT_SHARD, COMPONENT)
# In certain cases we want to have different schema for training and
# prediction, as an example in prediction we might need to have only
# subset of ids present in the original schema. This tag is one of the ways
# to mark operators that will be removed from prediction and should
# override schema for predictors.
PREDICTION_SCHEMA = 'prediction_schema'
# This is to mark layers in the feature transform process.
FEATURE_TRANSFORM = 'feature_transform'
# This is to mark the output layers in the feature transform process
FEATURE_TRANSFORM_SCHEMA = 'feature_transform_schema'
def __init__(self, tags):
if not isinstance(tags, list):
tags = [tags]
self.tags = tags
def __enter__(self):
TagContext.current().add_tags(self.tags)
return self
def __exit__(self, type, value, traceback):
TagContext.current().remove_tags(self.tags)
def __call__(self, func):
@six.wraps(func)
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper
Tags.TRAIN_ONLY = [Tags.EXCLUDE_FROM_PREDICTION, Tags.EXCLUDE_FROM_EVAL,
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
Tags.EVAL_ONLY = [Tags.EXCLUDE_FROM_PREDICTION, Tags.EXCLUDE_FROM_TRAIN,
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
Tags.PREDICTION_ONLY = [Tags.EXCLUDE_FROM_TRAIN, Tags.EXCLUDE_FROM_EVAL,
Tags.EXCLUDE_FROM_ACCUMULATE_PRED] | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/tags.py | 0.548432 | 0.300357 | tags.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
from caffe2.python import schema
from caffe2.python.layers.layers import (
get_categorical_limit,
ModelLayer,
)
from caffe2.python.layers.tags import Tags
logger = logging.getLogger(__name__)
class PositionWeighted(ModelLayer):
def __init__(self, model, input_record, weight_optim=None,
name="position_weights"):
super(PositionWeighted, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.List), "Incorrect input type"
length_metadata = input_record.lengths.metadata
max_length = (length_metadata.categorical_limit if length_metadata is
not None else None)
if max_length is not None:
self.shape = max_length
else:
self.shape = get_categorical_limit(input_record)
logger.warning(
'{}: categorical_limit of lengths is not available, using '
'categorical_limit of the keys: {}'.format(
str(input_record.lengths()), self.shape))
self.pos_w = self.create_param(param_name='pos_w',
shape=[self.shape, ],
initializer=('ConstantFill', {'value': 1.0}),
optimizer=weight_optim)
self.output_schema = schema.Struct(
('position_weights',
schema.Scalar((np.float32, self.shape),
self.get_next_blob_reference("pos_w_gather")))
)
self.tags.update({Tags.HANDLE_AS_SPARSE_LAYER})
def get_memory_usage(self):
return self.shape
def add_ops(self, net):
inc_seq = net.LengthsRangeFill(
[self.input_record.lengths()],
self.input_record.lengths() + '_pos_w_seq'
)
net.Gather(
[self.pos_w, inc_seq],
self.output_schema.position_weights.field_blobs()) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/position_weighted.py | 0.829216 | 0.162979 | position_weighted.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema, scope, workspace
from caffe2.python.layers.layers import (
ModelLayer,
)
import caffe2.proto.caffe2_pb2 as caffe2_pb2
import numpy as np
import six
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Functional(ModelLayer):
def __init__(self, model, input_record, output_names_or_num, function,
name='functional', output_dtypes=None, tags=None, **kwargs):
# allow coercion
input_record = schema.as_record(input_record)
super(Functional, self).__init__(model, name, input_record, tags=tags, **kwargs)
self._function = function
self._kwargs = kwargs
return_struct = (
isinstance(output_names_or_num, list) or
(isinstance(output_names_or_num, six.integer_types) and
output_names_or_num != 1)
)
with scope.NameScope(self.name, reset=True):
if isinstance(output_names_or_num, int):
struct_output_schema = schema.NewRecord(
model.net, schema.RawTuple(output_names_or_num))
elif isinstance(output_names_or_num, schema.Field):
self.output_schema = output_names_or_num.clone(keep_blobs=True)
return
else:
if not isinstance(output_names_or_num, list):
output_names_or_num = [output_names_or_num]
out_tuple = [(out, np.void) for out in output_names_or_num]
struct_output_schema = schema.NewRecord(
model.net, schema.Struct(*out_tuple))
num_outputs = len(struct_output_schema.field_blobs())
# functional layer returns Struct if more than one outputs or output is
# a list, otherwise Scalar
if return_struct:
self.output_schema = struct_output_schema
else:
self.output_schema = struct_output_schema[0]
# If output_dtypes is provided, use it for output schema. Otherwise
# the shape and type will be inferred.
if output_dtypes is not None:
if not isinstance(output_dtypes, list):
output_dtypes = [output_dtypes] * num_outputs
assert len(output_dtypes) == num_outputs
for dtype, scalar in zip(output_dtypes,
self.output_schema.all_scalars()):
scalar.set_type(dtype)
return
# Fake execution of the function to infer shapes and types automatically
had_issues = False
try:
type_net = core.Net('_temp_type_and_shape_inference_net')
schema.InitEmptyRecord(type_net, input_record, enforce_types=True)
function(type_net, self.input_record, self.output_schema, **kwargs)
(shapes, types) = workspace.InferShapesAndTypes([type_net], {})
for i in range(num_outputs):
scalar_schema = (self.output_schema[i] if return_struct
else self.output_schema)
blob = scalar_schema()
if blob not in types or blob not in shapes:
had_issues = True
continue
if shapes[blob] == []:
# Scalar type
shape = tuple()
elif shapes[blob][0] == 0:
shape = tuple(shapes[blob][1:])
else:
logger.warning("unexpected shape: {}".format(shapes[blob]))
# If batch dimension is not first - give up on shape
# inference for that blob
had_issues = True
continue
# TODO(amalevich): Move it to some shared library
dtype = None
if types[blob] == caffe2_pb2.TensorProto.DOUBLE:
dtype = (np.float64, shape)
elif types[blob] == caffe2_pb2.TensorProto.FLOAT:
dtype = (np.float32, shape)
elif types[blob] == caffe2_pb2.TensorProto.INT32:
dtype = (np.int32, shape)
elif types[blob] == caffe2_pb2.TensorProto.INT64:
dtype = (np.int64, shape)
elif types[blob] == caffe2_pb2.TensorProto.FLOAT16:
dtype = (np.float16, shape)
if dtype is not None:
scalar_schema.set_type(dtype)
except TypeError as ex:
had_issues = True
logger.warning(str(ex))
if had_issues:
logger.warning(
"Type inference had problems for layer: {}".format(self.name))
def add_ops(self, net):
self._function(
net, self.input_record, self.output_schema, **(self._kwargs)) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/functional.py | 0.712432 | 0.227491 | functional.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
import numpy as np
class Conv(ModelLayer):
"""
Convolutional layer
Input:
- input_record: at least has the shape info of C (num_channels)
- output_dim: number of convolutional filters
- kernel_h, kernel_w: kernel size for h and w
- stride_h, stride_w: stride for h and w
- pad_b, pad_l, pad_r, pad_t: padding sizes, if stride == 1,
'None' value will do auto padding
- order: either 'NHWC' or 'NCHW'
"""
def __init__(self, model, input_record, output_dim, kernel_h, kernel_w,
stride_h, stride_w, pad_b=None, pad_l=None, pad_r=None,
pad_t=None, order='NHWC', kernel_init=None, bias_init=None,
kernel_optim=None, bias_optim=None,
name='conv', **kwargs):
super(Conv, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
# input num_channels (C) is needed
input_dims = input_record.field_type().shape
assert (kernel_h > 0 and isinstance(kernel_h, int)), (
"kernel_h should be positive integer")
assert (kernel_w > 0 and isinstance(kernel_w, int)), (
"kernel_w should be positive integer")
self.kernel_h = kernel_h
self.kernel_w = kernel_w
assert (stride_h > 0 and isinstance(stride_h, int)), (
"stride_h should be positive integer")
assert (stride_w > 0 and isinstance(stride_w, int)), (
"stride_w should be positive integer")
self.stride_h = stride_h
self.stride_w = stride_w
# output_dim calculation (http://cs231n.github.io/convolutional-networks/)
# output_dim_w = (input_dim_w - kernel_w + pad_r + pad_l) / stride_w + 1
# so, do auto_padding requires
# pad_r, pad_l = [(input_dim_w - 1) * stride_w - input_dim_w + kernel_w] / 2
# similair for pad_t and pad_b to auto pad kernel_h
# here we only do auto padding for stride = 1 case
if stride_h == 1:
pad_t = int((kernel_h - 1) / 2) if pad_t is None else pad_t
pad_b = int((kernel_h - 1) / 2) if pad_b is None else pad_b
else:
pad_t = 0 if pad_t is None else pad_t
pad_b = 0 if pad_b is None else pad_b
if stride_w == 1:
pad_r = int((kernel_w - 1) / 2) if pad_r is None else pad_r
pad_l = int((kernel_w - 1) / 2) if pad_l is None else pad_l
else:
pad_r = 0 if pad_r is None else pad_r
pad_l = 0 if pad_l is None else pad_l
assert (pad_t >= 0 and isinstance(pad_t, int)), "pad_t should be int >= 0"
assert (pad_b >= 0 and isinstance(pad_b, int)), "pad_b should be int >= 0"
assert (pad_r >= 0 and isinstance(pad_r, int)), "pad_r should be int >= 0"
assert (pad_l >= 0 and isinstance(pad_l, int)), "pad_l should be int >= 0"
self.pad_t = pad_t
self.pad_b = pad_b
self.pad_r = pad_r
self.pad_l = pad_l
assert order in ['NHWC', 'NCHW'], "order should either 'NHWC' or 'NCHW'"
self.order = order
if order == 'NHWC':
input_c = input_dims[-1]
kernel_shape = [output_dim, kernel_h, kernel_w, input_c]
elif order == 'NCHW':
input_c = input_dims[0]
kernel_shape = [output_dim, input_c, kernel_h, kernel_w]
assert input_c > 0, (
"Number of input channels in conv parameters should be positive")
kernel_init = kernel_init if kernel_init else (
'XavierFill', {}
)
bias_init = bias_init if bias_init else (
'ConstantFill', {'value': 0.0}
)
self.kernel = self.create_param(
param_name='conv_kernel',
shape=kernel_shape,
initializer=kernel_init,
optimizer=kernel_optim,
)
self.bias = self.create_param(
param_name='conv_bias',
shape=[output_dim],
initializer=bias_init,
optimizer=bias_optim,
)
# the output_schema only has the num of output channels
# output_h and output_w would be inferred internally
self.output_schema = schema.Scalar(
(np.float32, (output_dim,)),
self.get_next_blob_reference('output')
)
def add_ops(self, net):
net.Conv(
self.input_record.field_blobs() + [self.kernel, self.bias],
self.output_schema.field_blobs(),
kernel_h=self.kernel_h,
kernel_w=self.kernel_w,
stride_h=self.stride_h,
stride_w=self.stride_w,
pad_t=self.pad_t,
pad_l=self.pad_l,
pad_b=self.pad_b,
pad_r=self.pad_r,
order=self.order
) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/conv.py | 0.899368 | 0.47098 | conv.py | pypi |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from collections import namedtuple
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, schema, scope, utils, workspace
from caffe2.python.layers.tags import TagContext
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Some types to simplify descriptions of things traveling between ops
IdList = schema.List(np.int64)
IdScoreList = schema.Map(np.int64, np.float32)
IdListWithEvicted = schema.ListWithEvicted(np.int64)
IdScoreListWithEvicted = schema.MapWithEvicted(np.int64, np.float32)
def almost_equal_schemas(
record,
original_schema,
check_field_names=True,
check_field_types=True,
check_field_metas=False,
):
if original_schema == IdList:
return schema.equal_schemas(
record,
IdList,
check_field_names=check_field_names,
check_field_types=check_field_types,
check_field_metas=check_field_metas,
) or schema.equal_schemas(
record,
IdListWithEvicted,
check_field_names=check_field_names,
check_field_types=check_field_types,
check_field_metas=check_field_metas,
)
elif original_schema == IdScoreList:
return schema.equal_schemas(
record,
IdScoreList,
check_field_names=check_field_names,
check_field_types=check_field_types,
check_field_metas=check_field_metas,
) or schema.equal_schemas(
record,
IdScoreListWithEvicted,
check_field_names=check_field_names,
check_field_types=check_field_types,
check_field_metas=check_field_metas,
)
else:
return schema.equal_schemas(record, original_schema)
def get_key(record):
if almost_equal_schemas(record, IdList):
key = "values"
elif almost_equal_schemas(
record, IdScoreList, check_field_types=False
):
key = "values:keys"
else:
raise NotImplementedError("Not implemented for {}".format(record))
assert record[key].metadata is not None, "Blob {} doesn't have metadata".format(
str(record[key]())
)
return record[key]
def get_categorical_limit(record):
key = get_key(record)
return key.metadata.categorical_limit
def get_avg_length(record):
return record["lengths"].metadata.expected_value
def set_request_only(field):
for f in field.all_scalars():
categorical_limit, expected_value = None, None
if not f.metadata:
feature_specs = schema.FeatureSpec(feature_is_request_only=True)
elif not f.metadata.feature_specs:
categorical_limit = f.metadata.categorical_limit
expected_value = f.metadata.expected_value
feature_specs = schema.FeatureSpec(feature_is_request_only=True)
else:
categorical_limit = f.metadata.categorical_limit
expected_value = f.metadata.expected_value
feature_specs = schema.FeatureSpec(
feature_type=f.metadata.feature_specs.feature_type,
feature_names=f.metadata.feature_specs.feature_names,
feature_ids=f.metadata.feature_specs.feature_ids,
feature_is_request_only=True,
desired_hash_size=f.metadata.feature_specs.desired_hash_size,
)
# make sure not to set categorical_limit for a non-integer field
if not np.issubdtype(f.field_type(), np.integer):
assert (
categorical_limit is None
), "categorical_limit shouldn't be set for no-integer field"
f.set_metadata(
schema.Metadata(
categorical_limit=categorical_limit,
expected_value=expected_value,
feature_specs=feature_specs,
)
)
class InstantiationContext(object):
"""
List of contexts where layer could be instantitated
"""
# The layers support this context will accumulate predictions, labels,
# weights. The accumulated data can later be used to compute
# calibration or for other
# purpose.
ACCUMULATE_PRED = "accumulate_pred"
EVAL = "eval"
PREDICTION = "prediction"
TRAINING = "training"
_LAYER_REGISTRY = {}
def register_layer(name, layer):
assert name not in _LAYER_REGISTRY, "{0} already exists".format(name)
_LAYER_REGISTRY[name] = layer
def layer_exists(name):
return name in _LAYER_REGISTRY
def get_layer_class(name):
return _LAYER_REGISTRY[name]
def create_layer(layer_name, *args, **kwargs):
return _LAYER_REGISTRY[layer_name](*args, **kwargs)
LayerPsParam = namedtuple("LayerPsParam", ["sparse_key", "average_length"])
class LayerParameter(object):
def __init__(
self,
parameter=None,
optimizer=None,
initializer=None,
ps_param=None,
regularizer=None,
):
assert isinstance(
parameter, core.BlobReference
), "expect {0} to be a blob reference".format(str(parameter))
# need to put the following line (shape) before initialier
# shape will be updated once initializer is (re)set
self._shape = None
self.parameter = parameter
self.optimizer = optimizer
self.initializer = initializer
self.ps_param = ps_param
self.regularizer = regularizer
@property
def initializer(self):
return self._initializer
@initializer.setter
def initializer(self, op):
assert op is None or core.IsOperator(
getattr(op, "type", None)
), "initializer expects an operator, got type: {}".format(type(op))
self._initializer = op
if op is not None:
self.shape = self._infer_shape_from_initializer()
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, shape):
assert self.shape is None or self.shape == shape, (
"inconsistent shape for layer parameter:"
" {}, expect: {}, but got {}".format(self, self.shape, shape)
)
self._shape = shape
def _infer_shape_from_initializer(self):
for arg in self.initializer.arg:
if arg.name == "shape":
return list(arg.ints)
with workspace.WorkspaceGuard("model_init_by_loading_params"):
try:
net = core.Net("shape_checker")
net._net.op.extend([self.initializer])
shape_blob = net.NextScopedBlob(self.parameter + "_shape")
net.Shape([self.parameter], shape_blob)
workspace.RunNetOnce(net)
shape = workspace.FetchBlob(shape_blob).tolist()
# ResetWorkspace to save memory
workspace.ResetWorkspace()
return shape
except RuntimeError as exp:
logger.warning(
"Cannot infer the shape of blob {} from operator {}: {}".format(
self.parameter, self.initializer.type, exp
)
)
workspace.ResetWorkspace()
return None
def __str__(self):
return str(self.parameter)
def is_request_only_scalar(scalar):
if len(scalar.field_metadata()) == 0:
return False
for metadata in scalar.field_metadata():
if not (
metadata
and metadata.feature_specs
and getattr(metadata.feature_specs, "feature_is_request_only", False)
):
return False
return True
# Contains features accessed in a model layer of a given type
# type: A string representing the kind of feature, consistent with FeatureSpec
# ids: A set of feature IDs that are accessed in the model layer
AccessedFeatures = namedtuple("AccessedFeatures", ["type", "ids"])
class ModelLayer(object):
def __init__(
self,
model,
prefix,
input_record,
predict_input_record_fields=None,
tags=None,
**kwargs
):
"""
Base class for model layers. Layer is an abstraction that allows to
provide model description in terms of meta-operators, where each of the
meta-operators can have different implementations for training,
evaluation and prediction, that are instantiated later. As an example
SampledSoftmax can do something related to sampling depending on
supervision during the training and just apply softmax if it's used for
prediction/evaluation.
All inputs/outputs from layers are represented as a record (instance of
schema bounded to blobs) and are accessible through input_record and
output_schema. If Layer needs to have only a subset of inputs/provides
subset of outputs during the inference - it should provide
predict_input_record and predict_output_schema correspondingly (those
records are expected to be a subset of input_record/output_schema).
Each layer has a list of Tags associated with it, that depends on
current context and arguments. It's possible to use those tags during
the instantiation time.
"""
self.name = model.next_layer_name(prefix)
self.model = model
self.kwargs = kwargs
self._input_record = input_record
if predict_input_record_fields:
if not isinstance(predict_input_record_fields, list):
predict_input_record_fields = [predict_input_record_fields]
self._predict_input_record = self._input_record[predict_input_record_fields]
else:
self._predict_input_record = None
self.request_only = True
if len(input_record.all_scalars()) == 0:
self.request_only = False
for scalar in input_record.all_scalars():
if not is_request_only_scalar(scalar):
self.request_only = False
break
self.precomputation_request_only = False
self.precomputation_object_only = False
self._output_schema = None
self._predict_output_schema = None
self.eval_output_schema = None
self.tags = set(tags or [])
self.tags.update(TagContext.current().tags)
self.params = []
self._export_output_for_metrics = False
self._export_params_for_metrics = False
def get_type(self):
return self.__class__.__name__
def _check_output_schema(self):
assert self._output_schema is not None, "Schema is not initialized"
assert self._predict_output_schema is None or schema.is_schema_subset(
self._predict_output_schema, self._output_schema
), "predict_output_schema is not a subset of the output_schema"
@property
def predict_input_record(self):
return self._predict_input_record or self._input_record
@property
def input_record(self):
return self._input_record
@property
def predict_output_schema(self):
self._check_output_schema()
return self._predict_output_schema or self._output_schema
@predict_output_schema.setter
def predict_output_schema(self, output_schema):
assert self._predict_output_schema is None
self._predict_output_schema = output_schema
@property
def output_schema(self):
if self.request_only:
set_request_only(self._output_schema)
self._check_output_schema()
return self._output_schema
@output_schema.setter
def output_schema(self, output_schema):
assert self._output_schema is None
self._output_schema = output_schema
def get_parameters(self):
return self.params
def get_fp16_compatible_parameters(self):
"""Return a subset of parameters which can be converted to fp16"""
return []
def get_memory_usage(self):
return 0
def get_accessed_features(self):
"""
Return a map from field to list of AccessedFeatures, the map should
contain all features accessed in the model layer
"""
return {}
def add_init_params(self, init_net):
"""
Adds layer initialization operators to passed net.
"""
for param in self.params:
# TODO(amalevich): Either return back to lambdas, that add
# all params (looks a bit safer and breaking less
# abstractions) or extend Net interface to this type of
# operations better
# TODO(xlwang) init_net._net.op has type google.protobuf.\
# internal.containers.RepeatedCompositeFieldContainer, but
# the version of protobuf in fbcode does not support append
# so extend is used
init_op = param.initializer
current_device_scope = scope.CurrentDeviceScope()
if not init_op:
continue
if not init_op.HasField("device_option") and current_device_scope:
init_op = caffe2_pb2.OperatorDef()
init_op.CopyFrom(param.initializer)
init_op.device_option.CopyFrom(current_device_scope)
# do not add duplicated init ops
if any(
utils.OpAlmostEqual(op, init_op, "debug_info")
for op in init_net._net.op
):
continue
init_net._net.op.extend([init_op])
def create_param(
self, param_name, shape, initializer, optimizer, ps_param=None, regularizer=None
):
with scope.NameScope(self.name, reset=True):
param = self.model.create_param(
param_name=param_name,
shape=shape,
initializer=initializer,
optimizer=optimizer,
ps_param=ps_param,
regularizer=regularizer,
)
# make sure we don't share parameters in the same layer
assert all(param.parameter != p.parameter for p in self.params)
self.params.append(param)
return param.parameter
def get_next_blob_reference(self, name):
with scope.NameScope(self.name, reset=True):
return self.model.net.NextScopedBlob(name)
def add_operators(self, net, init_net=None, context=InstantiationContext.TRAINING):
"""
Adds layer trainig or initialization operators to the passed in net.
init_net can be None and can be called independently from add_init_params
"""
# Namescope below should warranty that all intermediate blobs will be
# assiciated with the layer that produces them
with scope.NameScope(self.name):
if context not in {
InstantiationContext.PREDICTION,
InstantiationContext.EVAL,
InstantiationContext.ACCUMULATE_PRED,
}:
assert init_net, "Only prediction and eval context don't need init_net"
if init_net:
self.add_init_params(init_net)
if context == InstantiationContext.TRAINING:
self.add_train_ops(net)
elif context == InstantiationContext.EVAL:
self.add_eval_ops(net)
elif context == InstantiationContext.ACCUMULATE_PRED:
self.add_ops_to_accumulate_pred(net)
else:
self.add_ops(net)
if (
context in {InstantiationContext.TRAINING, InstantiationContext.EVAL}
and self._export_params_for_metrics
):
self.add_param_copy_operators(net)
def add_ops(self, net):
# Predict layer implementation.
raise NotImplementedError
def add_eval_ops(self, net):
# Default eval layer implementation is completely matching
# predict layer implementation.
self.add_ops(net)
def add_train_ops(self, net):
# Default train layer implementation is completely matching
# eval layer implementation.
self.add_eval_ops(net)
def add_ops_to_accumulate_pred(self, net):
# This adds operators to accumulate predictions/labels/weights. The
# accumulated data can later be used to compute calibration or for other
# purpose. Default layer implementation is completely matching eval
# layer implementation.
self.add_eval_ops(net)
def add_param_copy_operators(self, net):
for param in self.params:
param_copy_ref = self.model.metrics_schema[str(param.parameter)]
net.Copy([param.parameter], param_copy_ref.field_blobs())
def export_output_for_metrics(self):
self._export_output_for_metrics = True
# Export output of the layer directly
export_name = self.name + "/output"
self.model.add_metric_field(export_name, self.output_schema)
def export_params_for_metrics(self):
self._export_params_for_metrics = True
# Export copies of parameters
for param in self.params:
param_copy_ref = self.get_next_blob_reference(
str(param).split("/")[-1] + "_copy"
)
self.model.add_metric_field(str(param.parameter), param_copy_ref) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/layers.py | 0.840619 | 0.258545 | layers.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
import logging
logger = logging.getLogger(__name__)
'''
Homotopy Weighting between two weights x, y by doing:
alpha x + beta y
where alpha is a decreasing scalar parameter ranging from [min, max] (default,
[0, 1]), and alpha + beta = max + min, which means that beta is increasing in
the range [min, max];
Homotopy methods first solves an "easy" problem (one to which the solution is
well known), and is gradually transformed into the target problem
'''
class HomotopyWeight(ModelLayer):
def __init__(
self,
model,
input_record,
name='homotopy_weight',
min_weight=0.,
max_weight=1.,
half_life=1e6,
quad_life=3e6,
atomic_iter=None,
**kwargs
):
super(HomotopyWeight,
self).__init__(model, name, input_record, **kwargs)
self.output_schema = schema.Scalar(
np.float32, self.get_next_blob_reference('homotopy_weight')
)
data = self.input_record.field_blobs()
assert len(data) == 2
self.x = data[0]
self.y = data[1]
# TODO: currently model building does not have access to iter counter or
# learning rate; it's added at optimization time;
self.use_external_iter = (atomic_iter is not None)
self.atomic_iter = (
atomic_iter if self.use_external_iter else self.create_atomic_iter()
)
# to map lr to [min, max]; alpha = scale * lr + offset
assert max_weight > min_weight
self.scale = float(max_weight - min_weight)
self.offset = self.model.add_global_constant(
'%s_offset_1dfloat' % self.name, float(min_weight)
)
self.gamma, self.power = self.solve_inv_lr_params(half_life, quad_life)
def solve_inv_lr_params(self, half_life, quad_life):
# ensure that the gamma, power is solvable
assert half_life > 0
# convex monotonically decreasing
assert quad_life > 2 * half_life
t = float(quad_life) / float(half_life)
x = t * (1.0 + np.sqrt(2.0)) / 2.0 - np.sqrt(2.0)
gamma = (x - 1.0) / float(half_life)
power = np.log(2.0) / np.log(x)
logger.info(
'homotopy_weighting: found lr param: gamma=%g, power=%g' %
(gamma, power)
)
return gamma, power
def create_atomic_iter(self):
self.mutex = self.create_param(
param_name=('%s_mutex' % self.name),
shape=None,
initializer=('CreateMutex', ),
optimizer=self.model.NoOptim,
)
self.atomic_iter = self.create_param(
param_name=('%s_atomic_iter' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64
}
),
optimizer=self.model.NoOptim,
)
return self.atomic_iter
def update_weight(self, net):
alpha = net.NextScopedBlob('alpha')
beta = net.NextScopedBlob('beta')
lr = net.NextScopedBlob('lr')
comp_lr = net.NextScopedBlob('complementary_lr')
scaled_lr = net.NextScopedBlob('scaled_lr')
scaled_comp_lr = net.NextScopedBlob('scaled_complementary_lr')
if not self.use_external_iter:
net.AtomicIter([self.mutex, self.atomic_iter], [self.atomic_iter])
net.LearningRate(
[self.atomic_iter],
[lr],
policy='inv',
gamma=self.gamma,
power=self.power,
base_lr=1.0,
)
net.Sub([self.model.global_constants['ONE'], lr], [comp_lr])
net.Scale([lr], [scaled_lr], scale=self.scale)
net.Scale([comp_lr], [scaled_comp_lr], scale=self.scale)
net.Add([scaled_lr, self.offset], [alpha])
net.Add([scaled_comp_lr, self.offset], [beta])
return alpha, beta
def add_ops(self, net):
alpha, beta = self.update_weight(net)
# alpha x + beta y
net.WeightedSum([self.x, alpha, self.y, beta], self.output_schema()) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/homotopy_weight.py | 0.763836 | 0.413625 | homotopy_weight.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class RandomFourierFeatures(ModelLayer):
"""
Implementation of random fourier feature map for feature processing.
Applies sqrt(2 / output_dims) * cos(wx+b), where:
output_dims is the output feature dimensions, and
wx + b applies FC using randomized, fixed weight and bias parameters
For more information, see the original paper:
https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf
Inputs:
output_dims -- output feature dimensions
sigma -- bandwidth for the Gaussian kernel estimator
w_init -- initialization options for weight parameter
b_init -- initialization options for bias parameter
"""
def __init__(
self,
model,
input_record,
output_dims,
sigma, # bandwidth
w_init=None,
b_init=None,
name='random_fourier_features',
**kwargs):
super(RandomFourierFeatures, self).__init__(model, name, input_record,
**kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
input_dims = input_record.field_type().shape[0]
assert input_dims >= 1, "Expected input dimensions >= 1, got %s" \
% input_dims
self.output_dims = output_dims
assert self.output_dims >= 1, "Expected output dimensions >= 1, got %s" \
% self.output_dims
self.output_schema = schema.Scalar(
(np.float32, (self.output_dims, )),
self.get_next_blob_reference('output')
)
assert sigma > 0.0, "Expected bandwidth > 0, got %s" % sigma
# Initialize train_init_net parameters
w_init = w_init if w_init else (
'GaussianFill', {'mean': 0.0, 'std': 1.0 / sigma}
)
b_init = b_init if b_init else (
'UniformFill', {'min': 0.0, 'max': 2 * np.pi}
)
self.w = self.create_param(param_name='w',
shape=[self.output_dims, input_dims],
initializer=w_init,
optimizer=model.NoOptim)
self.b = self.create_param(param_name='b',
shape=[self.output_dims],
initializer=b_init,
optimizer=model.NoOptim)
def add_ops(self, net):
# Random features: wx + b
cosine_arg = net.FC(self.input_record.field_blobs() + [self.w, self.b],
net.NextScopedBlob("cosine_arg"))
# Apply cosine to new vectors
new_feature_vec = net.Cos([cosine_arg],
net.NextScopedBlob('new_feature_vec'))
# Multiply each element in vector by sqrt(2/D)
scale = np.sqrt(2.0 / self.output_dims)
net.Scale([new_feature_vec],
self.output_schema.field_blobs(),
scale=scale) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/random_fourier_features.py | 0.920012 | 0.411643 | random_fourier_features.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from caffe2.python import schema
from caffe2.python.layers.layers import (
InstantiationContext,
ModelLayer,
)
logger = logging.getLogger(__name__)
class SelectRecordByContext(ModelLayer):
"""
Allowing model to follow different paths for each instantiation context and
join later at some point. The implementation use `Alias` because schema
sometimes clone fields internally so we need static blob name for output
"""
def __init__(
self,
model,
input_record,
name='select_record_by_context',
check_field_metas=True,
use_copy=False,
default_output_record_field=None,
**kwargs
):
super(SelectRecordByContext, self).__init__(model, name, input_record,
**kwargs)
assert isinstance(input_record, schema.Struct)
assert len(input_record) > 1
self.use_copy = use_copy
self.default_output_record = (
input_record[default_output_record_field]
if (default_output_record_field is not None) else None
)
ref_record = input_record[0]
for record in input_record:
assert schema.equal_schemas(record, ref_record,
check_field_metas=check_field_metas)
self.output_schema = schema.NewRecord(model.net, ref_record)
def _set_output_blobs(self, net, context):
record = self.input_record.get(context, self.default_output_record)
assert record is not None, (
"{} context is not in input record without providing default"
" output".format(context)
)
for in_blob, out_blob in zip(
record.field_blobs(), self.output_schema.field_blobs()
):
if self.use_copy:
net.Copy(in_blob, out_blob)
else:
net.Alias(in_blob, out_blob)
def add_ops(self, net):
self._set_output_blobs(net, InstantiationContext.PREDICTION)
def add_eval_ops(self, net):
self._set_output_blobs(net, InstantiationContext.EVAL)
def add_train_ops(self, net):
self._set_output_blobs(net, InstantiationContext.TRAINING)
def add_ops_to_accumulate_pred(self, net):
self._set_output_blobs(net, InstantiationContext.ACCUMULATE_PRED) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/select_record_by_context.py | 0.794026 | 0.192407 | select_record_by_context.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class UniformSampling(ModelLayer):
"""
Uniform sampling `num_samples - len(input_record)` unique elements from the
range [0, num_elements). `samples` is the concatenation of input_record and
the samples. input_record is expected to be unique.
"""
def __init__(
self,
model,
input_record,
num_samples,
num_elements,
name='uniform_sampling',
**kwargs
):
super(UniformSampling, self).__init__(
model, name, input_record, **kwargs
)
assert num_elements > num_samples > 0
assert isinstance(input_record, schema.Scalar)
self.num_elements = num_elements
num_examples_init = ('GivenTensorInt64Fill',
{'values': [num_samples]})
self.num_samples = self.create_param(param_name='num_examples',
shape=(1,),
initializer=num_examples_init,
optimizer=model.NoOptim)
sampling_blob_init = ('ConstantFill',
{'value': float(num_samples) / num_elements,
'dtype': core.DataType.FLOAT})
self.sampling_prob = self.create_param(param_name='prob',
shape=(num_samples,),
initializer=sampling_blob_init,
optimizer=model.NoOptim)
self.output_schema = schema.Struct(
(
'samples', schema.Scalar(
np.int32, self.get_next_blob_reference("samples")
)
),
('sampling_prob', schema.Scalar(np.float32, self.sampling_prob)),
)
def add_ops(self, net):
net.StopGradient(self.sampling_prob, self.sampling_prob)
shape = net.Shape([self.input_record()], net.NextScopedBlob("shape"))
shape = net.Sub([self.num_samples, shape], shape)
samples = net.UniqueUniformFill(
[shape, self.input_record()],
net.NextScopedBlob("samples_before_concat"),
min=0,
max=self.num_elements - 1,
input_as_shape=True
)
net.Concat(
[self.input_record(), samples],
[self.output_schema.samples(), net.NextScopedBlob("split_info")],
axis=0
)
net.StopGradient(
self.output_schema.samples(), self.output_schema.samples()
) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/uniform_sampling.py | 0.905327 | 0.416915 | uniform_sampling.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class LayerNormalization(ModelLayer):
def __init__(
self,
model,
input_record,
name='layer_normalization',
scale_optim=None,
bias_optim=None,
epsilon=1e-4,
axis=1,
use_layer_norm_op=True,
scale_init_value=1.0,
**kwargs
):
super(LayerNormalization, self).__init__(
model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), (
"Incorrect input type: {}".format(input_record))
self.input_shape = input_record.field_type().shape
self.axis = axis
assert len(self.input_shape) >= 1, (
"This layer supports only >= 2D tensors")
input_dims = self.input_shape[0]
self.output_schema = schema.Scalar(
(np.float32, self.input_shape),
self.get_next_blob_reference('output')
)
self.scale = self.create_param(param_name='scale',
shape=[input_dims],
initializer=('ConstantFill', {'value': scale_init_value}),
optimizer=scale_optim)
self.bias = self.create_param(param_name='bias',
shape=[input_dims],
initializer=('ConstantFill', {'value': 0.0}),
optimizer=bias_optim)
self.use_layer_norm_op = use_layer_norm_op
if self.use_layer_norm_op:
self.epsilon = epsilon
else:
assert len(self.input_shape) == 1, (
"When using alternative implementation, "
"input data can only be 2D"
)
self.epsilon = model.maybe_add_global_constant(
"%s_epsilon" % self.name, float(epsilon)
)
def add_ops_with_layer_norm_op(self, net):
input_blob = self.input_record.field_blobs()
ln_output = self.output_schema.field_blobs()
output_blobs = [net.NextScopedBlob('ln_output'), net.NextScopedBlob('ln_mean'),
net.NextScopedBlob('ln_stdev')]
normalized, mean, stdev = net.LayerNorm(input_blob,
output_blobs,
axis=self.axis,
epsilon=self.epsilon)
scaled = net.Mul(
[normalized, self.scale],
[net.NextScopedBlob('ln_scaled')],
broadcast=1,
axis=self.axis,
)
net.Add(
[scaled, self.bias],
ln_output,
broadcast=1,
axis=self.axis,
)
def add_ops_without_layer_norm_op(self, net):
# two issues here:
# 1. use multiple ops to replace the function of LayerNorm
# 2. do not use legacy broadcast
ln_output = net.NextScopedBlob("ln_output")
ln_mean = net.NextScopedBlob("ln_mean")
ln_stdev = net.NextScopedBlob("ln_stdev")
ln_mean_arr = net.NextScopedBlob("ln_mean_arr")
net.ReduceBackMean(self.input_record.field_blobs(), [ln_mean_arr])
net.ExpandDims([ln_mean_arr], [ln_mean], dims=[1])
ln_centered = net.NextScopedBlob("ln_centered")
net.Sub(self.input_record.field_blobs() + [ln_mean], [ln_centered])
ln_sqr = net.NextScopedBlob("ln_sqr")
net.Sqr([ln_centered], [ln_sqr])
ln_sqr_mean = net.NextScopedBlob("ln_sqr_mean")
net.ReduceBackMean([ln_sqr], [ln_sqr_mean])
ln_var = net.NextScopedBlob("ln_var")
net.Add([ln_sqr_mean, self.epsilon], ln_var)
ln_std_arr = net.NextScopedBlob("ln_std_arr")
net.Pow([ln_var], [ln_std_arr], exponent=0.5)
net.ExpandDims([ln_std_arr], [ln_stdev], dims=[1])
net.Div([ln_centered, ln_stdev], [ln_output])
ln_scaled = net.NextScopedBlob("ln_scaled")
net.Mul([ln_output, self.scale], [ln_scaled])
net.Add([ln_scaled, self.bias], self.output_schema.field_blobs())
def add_ops(self, net):
if self.use_layer_norm_op:
self.add_ops_with_layer_norm_op(net)
else:
self.add_ops_without_layer_norm_op(net) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/layer_normalization.py | 0.826397 | 0.310289 | layer_normalization.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema, core
from caffe2.python.layers.layers import (
ModelLayer,
IdList,
IdScoreList,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class SparseFeatureHash(ModelLayer):
def __init__(self, model, input_record, seed=0, modulo=None,
use_hashing=True, use_divide_mod=False, divisor=None, name='sparse_feature_hash', **kwargs):
super(SparseFeatureHash, self).__init__(model, name, input_record, **kwargs)
assert use_hashing + use_divide_mod < 2, "use_hashing and use_divide_mod cannot be set true at the same time."
if use_divide_mod:
assert divisor >= 1, 'Unexpected divisor: {}'.format(divisor)
self.divisor = self.create_param(param_name='divisor',
shape=[1],
initializer=('GivenTensorInt64Fill', {'values': np.array([divisor])}),
optimizer=model.NoOptim)
self.seed = seed
self.use_hashing = use_hashing
self.use_divide_mod = use_divide_mod
if schema.equal_schemas(input_record, IdList):
self.modulo = modulo or self.extract_hash_size(input_record.items.metadata)
metadata = schema.Metadata(
categorical_limit=self.modulo,
feature_specs=input_record.items.metadata.feature_specs if input_record.items.metadata else None,
expected_value=input_record.items.metadata.expected_value if input_record.items.metadata else None
)
with core.NameScope(name):
self.output_schema = schema.NewRecord(model.net, IdList)
self.output_schema.items.set_metadata(metadata)
elif schema.equal_schemas(input_record, IdScoreList):
self.modulo = modulo or self.extract_hash_size(input_record.keys.metadata)
metadata = schema.Metadata(
categorical_limit=self.modulo,
feature_specs=input_record.keys.metadata.feature_specs,
expected_value=input_record.keys.metadata.expected_value
)
with core.NameScope(name):
self.output_schema = schema.NewRecord(model.net, IdScoreList)
self.output_schema.keys.set_metadata(metadata)
else:
assert False, "Input type must be one of (IdList, IdScoreList)"
assert self.modulo >= 1, 'Unexpected modulo: {}'.format(self.modulo)
if input_record.lengths.metadata:
self.output_schema.lengths.set_metadata(input_record.lengths.metadata)
# operators in this layer do not have CUDA implementation yet.
# In addition, since the sparse feature keys that we are hashing are
# typically on CPU originally, it makes sense to have this layer on CPU.
self.tags.update([Tags.CPU_ONLY])
def extract_hash_size(self, metadata):
if metadata.feature_specs and metadata.feature_specs.desired_hash_size:
return metadata.feature_specs.desired_hash_size
elif metadata.categorical_limit is not None:
return metadata.categorical_limit
else:
assert False, "desired_hash_size or categorical_limit must be set"
def add_ops(self, net):
net.Copy(
self.input_record.lengths(),
self.output_schema.lengths()
)
if schema.equal_schemas(self.output_schema, IdList):
input_blob = self.input_record.items()
output_blob = self.output_schema.items()
elif schema.equal_schemas(self.output_schema, IdScoreList):
input_blob = self.input_record.keys()
output_blob = self.output_schema.keys()
net.Copy(
self.input_record.values(),
self.output_schema.values()
)
else:
raise NotImplementedError()
if self.use_hashing:
net.IndexHash(
input_blob, output_blob, seed=self.seed, modulo=self.modulo
)
else:
if self.use_divide_mod:
quotient = net.Div([input_blob, self.divisor], [net.NextScopedBlob('quotient')])
net.Mod(
quotient, output_blob, divisor=self.modulo, sign_follow_divisor=True
)
else:
net.Mod(
input_blob, output_blob, divisor=self.modulo, sign_follow_divisor=True
) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/sparse_feature_hash.py | 0.851135 | 0.220468 | sparse_feature_hash.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
class BlobWeightedSum(ModelLayer):
"""
This layer implements the weighted sum:
weighted element-wise sum of input blobs.
"""
def __init__(
self,
model,
input_record,
init_weights=None,
weight_optim=None,
name='blob_weighted_sum',
**kwargs
):
super(BlobWeightedSum, self).__init__(model, name, input_record, **kwargs)
self.blobs = self.input_record.field_blobs()
self.num_weights = len(self.blobs)
assert self.num_weights > 1, (
"BlobWeightedSum expects more than one input blobs"
)
assert len(input_record.field_types()[0].shape) > 0, (
"BlobWeightedSum expects limited dimensions of the input tensor"
)
assert all(
input_record.field_types()[0].shape == input_record.field_types()[i].shape
for i in range(1, self.num_weights)
), "Shape of input blobs should be the same shape {}".format(
input_record.field_types()[0].shape
)
if init_weights:
assert self.num_weights == len(init_weights), (
"the size of init_weights should be the same as input blobs, "
"expects {}, got {}".format(self.num_weights, len(init_weights))
)
else:
init_weights = [1.0] * self.num_weights
self.weights = [
self.create_param(
param_name="w_{}".format(idx),
shape=[1],
initializer=('ConstantFill', {'value': float(init_weights[idx])}),
optimizer=weight_optim
) for idx in range(self.num_weights)
]
self.output_schema = schema.Scalar(
input_record.field_types()[0],
self.get_next_blob_reference('blob_weighted_sum_out')
)
def add_ops(self, net):
net.WeightedSum(
[x for pair in zip(self.blobs, self.weights) for x in pair],
self.output_schema(),
grad_on_w=True,
) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/blob_weighted_sum.py | 0.902324 | 0.309604 | blob_weighted_sum.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
import math
import numpy as np
class FCWithoutBias(SamplingTrainableMixin, ModelLayer):
def __init__(
self,
model,
input_record,
output_dims,
weight_init=None,
weight_optim=None,
name='fc_without_bias',
uniform_weight_init_scale_numerator=1.0,
**kwargs
):
super(FCWithoutBias, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_types()[0].shape) > 0, (
"FCWithoutBias expects limited dimensions of the input tensor"
)
input_dims = input_record.field_types()[0].shape[0]
assert input_dims > 0, (
"FCWithoutBias expects input dimensions > 0, got {}".format(input_dims)
)
self.output_schema = schema.Scalar(
(np.float32, (output_dims, )),
self.get_next_blob_reference('output')
)
scale = math.sqrt(uniform_weight_init_scale_numerator / input_dims)
weight_init = weight_init if weight_init else (
'UniformFill', {'min': -scale,
'max': scale}
)
self.w = self.create_param(param_name='w',
shape=[output_dims, input_dims],
initializer=weight_init,
optimizer=weight_optim)
def _add_ops(self, net, params):
net.MatMul(
self.input_record.field_blobs() + params,
self.output_schema.field_blobs(), trans_b=1, **self.kwargs
)
@property
def param_blobs(self):
return [self.w] | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/fc_without_bias.py | 0.846673 | 0.264299 | fc_without_bias.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class ArcCosineFeatureMap(ModelLayer):
"""
A general version of the arc-cosine kernel feature map (s = 1 restores
the original arc-cosine kernel feature map).
Applies H(x) * x^s, where H is the Heaviside step function and x is the
input after applying FC (such that x = w * x_orig + b).
For more information, see the original paper:
http://cseweb.ucsd.edu/~saul/papers/nips09_kernel.pdf
Inputs :
output_dims -- dimensions of the output vector
s -- degree to raise transformed features
scale -- amount to scale the standard deviation
weight_init -- initialization distribution for weight parameter
bias_init -- initialization distribution for bias pararmeter
weight_optim -- optimizer for weight params; None for random features
bias_optim -- optimizer for bias param; None for random features
set_weight_as_global_constant -- if True, initialized random parameters
will be constant across all distributed
instances of the layer
initialize_output_schema -- if True, initialize output schema as Scalar
from Arc Cosine; else output schema is None
"""
def __init__(
self,
model,
input_record,
output_dims,
s=1,
scale=1.0,
weight_init=None,
bias_init=None,
weight_optim=None,
bias_optim=None,
set_weight_as_global_constant=False,
initialize_output_schema=True,
name='arc_cosine_feature_map',
**kwargs):
super(ArcCosineFeatureMap, self).__init__(model, name, input_record,
**kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
self.params = []
self.model = model
self.set_weight_as_global_constant = set_weight_as_global_constant
self.input_dims = input_record.field_type().shape[0]
assert self.input_dims >= 1, "Expected input dimensions >= 1, got %s" \
% self.input_dims
if initialize_output_schema:
self.output_schema = schema.Scalar(
(np.float32, (output_dims, )),
model.net.NextScopedBlob(name + '_output')
)
self.output_dims = output_dims
assert self.output_dims >= 1, "Expected output dimensions >= 1, got %s" \
% self.output_dims
self.s = s
assert (self.s >= 0), "Expected s >= 0, got %s" % self.s
assert isinstance(self.s, int), "Expected s to be type int, got type %s" \
% type(self.s)
assert (scale > 0.0), "Expected scale > 0, got %s" % scale
self.stddev = scale * np.sqrt(1.0 / self.input_dims)
# Initialize train_init_net parameters
# Random Parameters
if set_weight_as_global_constant:
w_init = np.random.normal(scale=self.stddev,
size=(self.output_dims, self.input_dims))
b_init = np.random.uniform(low=-0.5 * self.stddev,
high=0.5 * self.stddev,
size=self.output_dims)
self.random_w = self.model.add_global_constant(
name=self.name + "_fixed_rand_W",
array=w_init
)
self.random_b = self.model.add_global_constant(
name=self.name + "_fixed_rand_b",
array=b_init
)
else:
(self.random_w, self.random_b) = self._initialize_params(
'random_w',
'random_b',
w_init=weight_init,
b_init=bias_init,
w_optim=weight_optim,
b_optim=bias_optim
)
def _initialize_params(self, w_name, b_name, w_init=None, b_init=None,
w_optim=None, b_optim=None):
"""
Initializes the Layer Parameters for weight and bias terms for features
Inputs :
w_blob -- blob to contain w values
b_blob -- blob to contain b values
w_init -- initialization distribution for weight parameter
b_init -- initialization distribution for bias parameter
w_optim -- optimizer to use for w; if None, then will use no optimizer
b_optim -- optimizer to user for b; if None, then will use no optimizer
"""
w_init = w_init if w_init else (
'GaussianFill', {'mean': 0.0, 'std': self.stddev}
)
w_optim = w_optim if w_optim else self.model.NoOptim
b_init = b_init if b_init else (
'UniformFill', {'min': -0.5 * self.stddev, 'max': 0.5 * self.stddev}
)
b_optim = b_optim if b_optim else self.model.NoOptim
w_param = self.create_param(param_name=w_name,
shape=(self.output_dims, self.input_dims),
initializer=w_init,
optimizer=w_optim)
b_param = self.create_param(param_name=b_name,
shape=[self.output_dims],
initializer=b_init,
optimizer=b_optim)
return [w_param, b_param]
def _heaviside_with_power(self, net, input_features, output_blob, s):
"""
Applies Heaviside step function and Relu / exponentiation to features
depending on the value of s.
Inputs:
net -- net with operators
input_features -- features to processes
output_blob -- output blob reference
s -- degree to raise the transformed features
"""
if s == 0:
softsign_features = net.Softsign([input_features],
net.NextScopedBlob('softsign'))
return net.Relu(softsign_features, output_blob)
elif s == 1:
return net.Relu([input_features],
output_blob)
else:
relu_features = net.Relu([input_features],
net.NextScopedBlob('relu_rand'))
pow_features = net.Pow([input_features],
net.NextScopedBlob('pow_rand'),
exponent=float(s - 1))
return net.Mul([relu_features, pow_features],
output_blob)
def add_ops(self, net):
input_blob = self.input_record.field_blobs()
# Random features: wx + b
random_features = net.FC(input_blob + [self.random_w, self.random_b],
net.NextScopedBlob('random_features'))
# Process random features
self._heaviside_with_power(net,
random_features,
self.output_schema.field_blobs(),
self.s) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/arc_cosine_feature_map.py | 0.890244 | 0.351701 | arc_cosine_feature_map.py | pypi |
from __future__ import absolute_import, division, print_function, unicode_literals
import math
import numpy as np
from caffe2.python import core, schema
from caffe2.python.helpers.arg_scope import get_current_scope
from caffe2.python.layers.layers import ModelLayer
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
def get_fc_predictor_version(fc_version):
assert fc_version in ["fp32"], (
"Only support fp32 for the fully connected layer "
"in the predictor net, the provided FC precision is {}".format(fc_version)
)
return fc_version
class FCWithBootstrap(SamplingTrainableMixin, ModelLayer):
def __init__(
self,
model,
input_record,
output_dims,
num_bootstrap,
weight_init=None,
bias_init=None,
weight_optim=None,
bias_optim=None,
name="fc_with_bootstrap",
weight_reg=None,
bias_reg=None,
clip_param=None,
axis=1,
**kwargs
):
super(FCWithBootstrap, self).__init__(model, name, input_record, **kwargs)
assert isinstance(
input_record, schema.Scalar
), "Incorrect input type {}".format(input_record)
assert (
len(input_record.field_types()[0].shape) > 0
), "FC expects limited dimensions of the input tensor"
assert axis >= 1, "axis {} should >= 1.".format(axis)
self.axis = axis
input_dims = np.prod(input_record.field_types()[0].shape[axis - 1 :])
assert input_dims > 0, "FC expects input dimensions > 0, got {}".format(
input_dims
)
self.clip_args = None
# attributes for bootstrapping below
self.num_bootstrap = num_bootstrap
# input dim shape
self.input_dims = input_dims
# bootstrapped fully-connected layers to be used in eval time
self.bootstrapped_FCs = []
# scalar containing batch_size blob so that we don't need to recompute
self.batch_size = None
# we want this to be the last FC, so the output_dim should be 1, set to None
self.output_dim_vec = None
# lower bound when creating random indices
self.lower_bound = None
# upper bound when creating random indices
self.upper_bound = None
if clip_param is not None:
assert len(clip_param) == 2, (
"clip_param must be a tuple / list "
"of length 2 and in the form of (clip_min, clip max)"
)
clip_min, clip_max = clip_param
assert (
clip_min is not None or clip_max is not None
), "clip_min, and clip_max in clip_param cannot both be None"
assert (
clip_min is None or clip_max is None
) or clip_min < clip_max, (
"clip_param = [clip_min, clip_max] must have clip_min < clip_max"
)
self.clip_args = {}
if clip_min is not None:
self.clip_args["min"] = clip_min
if clip_max is not None:
self.clip_args["max"] = clip_max
scale = math.sqrt(1.0 / input_dims)
weight_init = (
weight_init
if weight_init
else ("UniformFill", {"min": -scale, "max": scale})
)
bias_init = (
bias_init if bias_init else ("UniformFill", {"min": -scale, "max": scale})
)
"""
bootstrapped FCs:
Ex: [
bootstrapped_weights_blob_1, bootstrapped_bias_blob_1,
...,
...,
bootstrapped_weights_blob_b, bootstrapped_bias_blob_b
]
output_schema:
Note: indices will always be on even indices.
Ex: Struct(
indices_0_blob,
preds_0_blob,
...
...
indices_b_blob,
preds_b_blob
)
"""
bootstrapped_FCs = []
output_schema = schema.Struct()
for i in range(num_bootstrap):
output_schema += schema.Struct(
(
"bootstrap_iteration_{}/indices".format(i),
self.get_next_blob_reference(
"bootstrap_iteration_{}/indices".format(i)
),
),
(
"bootstrap_iteration_{}/preds".format(i),
self.get_next_blob_reference(
"bootstrap_iteration_{}/preds".format(i)
),
),
)
self.bootstrapped_FCs.extend(
[
self.create_param(
param_name="bootstrap_iteration_{}/w".format(i),
shape=[output_dims, input_dims],
initializer=weight_init,
optimizer=weight_optim,
regularizer=weight_reg,
),
self.create_param(
param_name="bootstrap_iteration_{}/b".format(i),
shape=[output_dims],
initializer=bias_init,
optimizer=bias_optim,
regularizer=bias_reg,
),
]
)
self.output_schema = output_schema
if axis == 1:
output_shape = (output_dims,)
else:
output_shape = list(input_record.field_types()[0].shape)[0 : axis - 1]
output_shape = tuple(output_shape + [output_dims])
def _generate_bootstrapped_indices(self, net, copied_cur_layer, iteration):
"""
Args:
net: the caffe2 net to insert operator
copied_cur_layer: blob of the bootstrapped features (make sure this
blob has a stop_gradient on)
iteration: the bootstrap interation to generate for. Used to correctly
populate the output_schema
Return:
A blob containing the generated indices of shape: (batch_size,)
"""
with core.NameScope("bootstrap_iteration_{}".format(iteration)):
if iteration == 0:
# capture batch_size once for efficiency
input_shape = net.Shape(copied_cur_layer, "input_shape")
batch_size_index = net.Const(np.array([0]), "batch_size_index")
batch_size = net.Gather([input_shape, batch_size_index], "batch_size")
self.batch_size = batch_size
lower_bound = net.Const(np.array([0]), "lower_bound", dtype=np.int32)
offset = net.Const(np.array([1]), "offset", dtype=np.int32)
int_batch_size = net.Cast(
[self.batch_size], "int_batch_size", to=core.DataType.INT32
)
upper_bound = net.Sub([int_batch_size, offset], "upper_bound")
self.lower_bound = lower_bound
self.upper_bound = upper_bound
indices = net.UniformIntFill(
[self.batch_size, self.lower_bound, self.upper_bound],
self.output_schema[iteration * 2].field_blobs()[0],
input_as_shape=1,
)
return indices
def _bootstrap_ops(self, net, copied_cur_layer, indices, iteration):
"""
This method contains all the bootstrapping logic used to bootstrap
the features. Only used by the train_net.
Args:
net: the caffe2 net to insert bootstrapping operators
copied_cur_layer: the blob representing the current features.
Note, this layer should have a stop_gradient on it.
Returns:
bootstrapped_features: blob of bootstrapped version of cur_layer
with same dimensions
"""
# draw features based upon the bootstrapped indices
bootstrapped_features = net.Gather(
[copied_cur_layer, indices],
net.NextScopedBlob("bootstrapped_features_{}".format(iteration)),
)
bootstrapped_features = schema.Scalar(
(np.float32, self.input_dims), bootstrapped_features
)
return bootstrapped_features
def _insert_fc_ops(self, net, features, params, outputs, version):
"""
Args:
net: the caffe2 net to insert operator
features: Scalar containing blob of the bootstrapped features or
actual cur_layer features
params: weight and bias for FC
outputs: the output blobs
version: support fp32 for now.
"""
if version == "fp32":
pred_blob = net.FC(
features.field_blobs() + params, outputs, axis=self.axis, **self.kwargs
)
return pred_blob
else:
raise Exception("unsupported FC type version {}".format(version))
def _add_ops(self, net, features, iteration, params, version):
"""
Args:
params: the weight and bias, passed by either add_ops or
add_train_ops function
features: feature blobs to predict on. Can be the actual cur_layer
or the bootstrapped_feature blobs.
version: currently fp32 support only
"""
if self.clip_args is not None:
clipped_params = [net.NextScopedBlob("clipped_%s" % str(p)) for p in params]
for p, cp in zip(params, clipped_params):
net.Clip([p], [cp], **self.clip_args)
params = clipped_params
if self.output_dim_vec is None or len(self.output_dim_vec) == 1:
self._insert_fc_ops(
net=net,
features=features,
params=params,
outputs=[self.output_schema.field_blobs()[(iteration * 2) + 1]],
version=version,
)
def add_ops(self, net):
"""
Both the predict net and the eval net will call this function.
For bootstrapping approach, the goal is to pass the cur_layer feature
inputs through all the bootstrapped FCs that are stored under
self.bootstrapped_FCs. Return the preds in the same output_schema
with dummy indices (because they are not needed).
"""
version_info = get_current_scope().get(
get_fc_predictor_version.__name__, {"fc_version": "fp32"}
)
predictor_fc_fp_version = version_info["fc_version"]
for i in range(self.num_bootstrap):
# these are dummy indices, not to be used anywhere
indices = self._generate_bootstrapped_indices(
net=net,
copied_cur_layer=self.input_record.field_blobs()[0],
iteration=i,
)
params = self.bootstrapped_FCs[i * 2 : (i * 2) + 2]
self._add_ops(
net=net,
features=self.input_record,
params=params,
iteration=i,
version=predictor_fc_fp_version,
)
def add_train_ops(self, net):
# use the train_param_blobs to be consistent with the SamplingTrain unittest
# obtain features
for i in range(self.num_bootstrap):
indices = self._generate_bootstrapped_indices(
net=net,
copied_cur_layer=self.input_record.field_blobs()[0],
iteration=i,
)
bootstrapped_features = self._bootstrap_ops(
net=net,
copied_cur_layer=self.input_record.field_blobs()[0],
indices=indices,
iteration=i,
)
self._add_ops(
net,
features=bootstrapped_features,
iteration=i,
params=self.train_param_blobs[i * 2 : (i * 2) + 2],
version="fp32",
)
def get_fp16_compatible_parameters(self):
if self.output_dim_vec is None or len(self.output_dim_vec) == 1:
return [
blob for idx, blob in enumerate(self.bootstrapped_FCs) if idx % 2 == 0
]
else:
raise Exception(
"Currently only supports functionality for output_dim_vec == 1"
)
@property
def param_blobs(self):
if self.output_dim_vec is None or len(self.output_dim_vec) == 1:
return self.bootstrapped_FCs
else:
raise Exception("FCWithBootstrap layer only supports output_dim_vec==1") | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/fc_with_bootstrap.py | 0.918718 | 0.365428 | fc_with_bootstrap.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
class PairwiseSimilarity(ModelLayer):
def __init__(self, model, input_record, output_dim, pairwise_similarity_func='dot',
name='pairwise_similarity', **kwargs):
super(PairwiseSimilarity, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Struct), (
"Incorrect input type. Expected Struct, but received: {0}".
format(input_record))
assert (
('all_embeddings' in input_record) ^
('x_embeddings' in input_record and 'y_embeddings' in input_record)
), (
"either (all_embeddings) xor (x_embeddings and y_embeddings) " +
"should be given."
)
self.pairwise_similarity_func = pairwise_similarity_func
if 'all_embeddings' in input_record:
x_embeddings = input_record['all_embeddings']
y_embeddings = input_record['all_embeddings']
else:
x_embeddings = input_record['x_embeddings']
y_embeddings = input_record['y_embeddings']
assert isinstance(x_embeddings, schema.Scalar), (
"Incorrect input type for x. Expected Scalar, " +
"but received: {0}".format(x_embeddings))
assert isinstance(y_embeddings, schema.Scalar), (
"Incorrect input type for y. Expected Scalar, " +
"but received: {0}".format(y_embeddings)
)
if 'indices_to_gather' in input_record:
indices_to_gather = input_record['indices_to_gather']
assert isinstance(indices_to_gather, schema.Scalar), (
"Incorrect type of indices_to_gather. "
"Expected Scalar, but received: {0}".format(indices_to_gather)
)
self.indices_to_gather = indices_to_gather
else:
self.indices_to_gather = None
self.x_embeddings = x_embeddings
self.y_embeddings = y_embeddings
dtype = x_embeddings.field_types()[0].base
self.output_schema = schema.Scalar(
(dtype, (output_dim,)),
self.get_next_blob_reference('output')
)
def add_ops(self, net):
if self.pairwise_similarity_func == "cosine_similarity":
x_embeddings_norm = net.Normalize(self.x_embeddings(), axis=1)
y_embeddings_norm = net.Normalize(self.y_embeddings(), axis=1)
Y = net.BatchMatMul(
[x_embeddings_norm, y_embeddings_norm],
[self.get_next_blob_reference(x_embeddings_norm + '_matmul')],
trans_b=1,
)
elif self.pairwise_similarity_func == "dot":
Y = net.BatchMatMul(
[self.x_embeddings(), self.y_embeddings()],
[self.get_next_blob_reference(self.x_embeddings() + '_matmul')],
trans_b=1,
)
else:
raise NotImplementedError(
"pairwise_similarity_func={} is not valid".format(
self.pairwise_similarity_func
)
)
if self.indices_to_gather:
flattened = net.Flatten(
Y, Y + '_flatten',
)
net.BatchGather(
[flattened, self.indices_to_gather()],
self.output_schema(),
)
else:
net.Flatten(Y, self.output_schema()) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/pairwise_similarity.py | 0.796767 | 0.352773 | pairwise_similarity.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class MapToRange(ModelLayer):
"""
This layer aims to build a mapping from raw keys to indices within [0, max_index).
The mapping is continuously built during training. The mapping will be frozen during
evaluation and prediction. Unseen keys will be assigned to index 0.
"""
def __init__(
self, model,
input_record,
max_index,
name='map_to_range',
**kwargs
):
super(MapToRange, self).__init__(model, name, input_record, **kwargs)
assert max_index > 0
assert isinstance(input_record, schema.Scalar)
self.max_index = max_index
self.handler = self.create_param(
param_name='handler',
shape=None,
initializer=('LongIndexCreate', {'max_elements': self.max_index}),
optimizer=model.NoOptim
)
self.output_schema = schema.Struct(
('indices', schema.Scalar(
np.int64, self.get_next_blob_reference("indices")
)),
('handler', schema.Scalar(
np.void, self.handler
)),
)
def add_train_ops(self, net):
if self.input_record.field_type().base != np.int64:
keys = net.Cast(
self.input_record(),
net.NextScopedBlob("indices_before_mapping"),
to=core.DataType.INT64
)
else:
keys = self.input_record()
# Load keys into indices
indices = net.IndexGet([self.handler, keys],
self.output_schema.indices())
net.StopGradient(indices, indices)
def add_eval_ops(self, net):
net.IndexFreeze(self.handler, self.handler)
self.add_train_ops(net)
def add_ops(self, net):
self.add_eval_ops(net) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/build_index.py | 0.844168 | 0.276143 | build_index.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from future.utils import viewitems
import numpy as np
from collections import defaultdict
import logging
logger = logging.getLogger(__name__)
def get_concatenated_feature_to_index(blobs_to_concat):
concat_feature_to_index = defaultdict(list)
start_pos = 0
for scalar in blobs_to_concat:
num_dims = scalar.dtype.shape[0]
if hasattr(scalar, 'metadata') \
and hasattr(scalar.metadata, 'feature_specs') \
and hasattr(scalar.metadata.feature_specs, 'feature_to_index') \
and isinstance(scalar.metadata.feature_specs.feature_to_index, dict): # noqa B950
for k, v in scalar.metadata.feature_specs.feature_to_index.items():
concat_feature_to_index[k].extend([start_pos + vi for vi in v])
start_pos += num_dims
return dict(concat_feature_to_index) if concat_feature_to_index.keys() else None
class Concat(ModelLayer):
"""
Construct Concat layer
Assume that first dimension is batch,
Example:
embedding_dim = 64
input_record = self.new_record(schema.Struct(
('input1', schema.Scalar((np.float32, (embedding_dim, )))),
('input2', schema.Scalar((np.float32, (embedding_dim, )))),
('input3', schema.Scalar((np.float32, (embedding_dim, )))),
))
output = self.model.Concat(input_record)
self.assertEqual(
schema.Scalar((np.float32, ((len(input_record.fields) * embedding_dim, )))),
output
)
# Note that in Concat layer we assume first dimension is batch.
# so input is B * embedding_dim
# add_axis=1 make it B * 1 * embedding_dim
# Concat on axis=1 make it B * N * embedding_dim
output = self.model.Concat(input_record, axis=1, add_axis=1)
self.assertEqual(
schema.Scalar((np.float32, ((len(input_record.fields), embedding_dim)))),
output
)
"""
def __init__(self, model, input_record, axis=1, add_axis=0,
name='concat', **kwargs):
super(Concat, self).__init__(model, name, input_record, **kwargs)
self.axis = axis
self.add_axis = add_axis
assert not (axis == 0 and add_axis == 1), \
"It's not allowed to add axis=0"
assert isinstance(input_record, schema.Struct),\
"Incorrect input type. Expected Struct, but received: {0}".\
format(input_record)
shapes = []
for field_name, field_type in viewitems(input_record.fields):
assert isinstance(field_type, schema.Scalar),\
"Incorrect input type for {}. Expected Scalar, but got: {}".\
format(field_name, field_type)
# Assume that first dimension is batch, so actual axis in shape is
# axis - 1
shape = list(field_type.field_type().shape)
if add_axis:
shape.insert(axis - 1, 1)
assert len(shape) >= axis,\
"Concat expects that limited dimensions of the input tensor"
shapes.append(shape)
logger.info('Concat Layer input shapes: ' + str(shapes))
if axis == 0:
self.output_schema = schema.from_blob_list(
input_record[0],
[self.get_next_blob_reference('output')]
)
return
concat_dim = 0
for shape in shapes:
concat_dim += shape[axis - 1]
shape[axis - 1] = 0
assert shape == shapes[0],\
"Shapes {0} and {1} are not compatible for Concat".\
format(shape, shapes[0])
output_dims = shapes[0]
output_dims[axis - 1] = concat_dim
logger.info('Concat Layer output_dims: ' + str(output_dims))
self.output_schema = schema.Scalar(
(np.float32, output_dims),
self.get_next_blob_reference('output'))
record_to_concat = input_record.fields.values()
concated_feature_to_index = get_concatenated_feature_to_index(
record_to_concat
)
if concated_feature_to_index:
metadata = schema.Metadata(
feature_specs=schema.FeatureSpec(
feature_to_index=concated_feature_to_index
)
)
self.output_schema.set_metadata(metadata)
def add_ops(self, net):
net.Concat(
self.input_record.field_blobs(),
[
self.output_schema.field_blobs()[0],
self.output_schema.field_blobs()[0] + "_concat_dims"
],
axis=self.axis,
add_axis=self.add_axis,
) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/concat.py | 0.876278 | 0.301928 | concat.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class BatchSoftmaxLoss(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_softmax_loss',
label_smoothing_matrix=None,
label_prob=False,
scale=1.0,
average_by_batch_size=False,
**kwargs
):
super(BatchSoftmaxLoss, self).__init__(
model, name, input_record, **kwargs)
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar()),
('prediction', schema.Scalar()),
),
input_record
)
self.label_prob = label_prob
self.scale = scale
self.average_by_batch_size = average_by_batch_size
# label smoothing matrix: a K * K matrix where K is the label
# cardinality; (i, j) element is the value of for label i
# treated/smoothed as label j
self.label_smoothing_matrix = label_smoothing_matrix
if self.label_smoothing_matrix is not None:
self.initialize_label_smoothing_constants()
self.output_schema = schema.Struct(
(
'softmax', schema.Scalar(
input_record.prediction.field_type(),
self.get_next_blob_reference('softmax')
)
),
(
'loss', schema.Scalar(
np.float32, self.get_next_blob_reference('loss')
)
),
)
def initialize_label_smoothing_constants(self):
assert self.label_smoothing_matrix is not None
self.label_smoothing_matrix = np.array(
self.label_smoothing_matrix).astype(np.float32)
assert len(self.label_smoothing_matrix.shape) == 2
label_dim = self.label_smoothing_matrix.shape[0]
assert label_dim == self.label_smoothing_matrix.shape[1]
self.label_smoothing_matrix = self.model.add_global_constant(
'%s_label_smoothing_matrix' % self.name,
array=self.label_smoothing_matrix,
dtype=np.dtype(np.float32),
)
self.label_dim = self.model.add_global_constant(
'%s_label_dim' % self.name,
array=label_dim,
dtype=np.dtype(np.int64),
)
# default case: label is given NOT as target distribution
# but when used in label smoothing, the label must be in probabilities
self.label_prob = True
def compute_smoothed_label(self, net):
assert self.label_smoothing_matrix is not None
label = self.input_record.label()
original_label_type = self.input_record.label.field_type()
if original_label_type.base != np.int64:
int64_label = net.NextScopedBlob('int64_label')
net.Cast([label], [int64_label], to=core.DataType.INT64)
else:
int64_label = label
one_hot_label = net.NextScopedBlob('one_hot_label')
smoothed_label = net.NextScopedBlob('smoothed_label')
net.OneHot([int64_label, self.label_dim], [one_hot_label])
net.MatMul([one_hot_label, self.label_smoothing_matrix], smoothed_label)
return smoothed_label
def add_ops(self, net):
label = self.input_record.label.field_blobs()
if self.label_smoothing_matrix is not None:
label = [self.compute_smoothed_label(net)]
elif not self.label_prob:
if self.input_record.label.field_types()[0].base != np.int32:
label = [
net.Cast(label,
net.NextScopedBlob('int32_label'),
to=core.DataType.INT32)
]
softmax_input = self.input_record.prediction.field_blobs() + label
if 'weight' in self.input_record:
weight_blob = self.input_record.weight()
if self.input_record.weight.field_type().base != np.float32:
weight_blob = net.Cast(
weight_blob,
weight_blob + '_float32',
to=core.DataType.FLOAT
)
softmax_input += [weight_blob]
net.SoftmaxWithLoss(
softmax_input,
self.output_schema.field_blobs(),
label_prob=self.label_prob,
scale=self.scale,
average_by_batch_size=self.average_by_batch_size,
) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/batch_softmax_loss.py | 0.92222 | 0.275557 | batch_softmax_loss.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
class Split(ModelLayer):
def __init__(self, model, input_record, num_splits=1, axis=1,
name='split', split=None, **kwargs):
super(Split, self).__init__(model, name, input_record, **kwargs)
self.axis = axis
# Assume that first dimension is batch, so actual axis in shape is
# axis - 1
axis -= 1
assert axis >= 0
assert isinstance(input_record, schema.Scalar),\
"Incorrect input type. Expected Scalar, but received: {0}".\
format(input_record)
input_shape = input_record.field_type().shape
assert len(input_shape) >= axis
if split is None:
assert input_shape[axis] % num_splits == 0
else:
num_splits = len(split)
assert input_shape[axis] == sum(split)
if split is None:
output_shape = list(input_shape)
output_shape[axis] = int(output_shape[axis] / num_splits)
else:
output_shape = []
for i in range(num_splits):
output_shape_i = list(input_shape)
output_shape_i[axis] = split[i]
output_shape.append(output_shape_i)
data_type = input_record.field_type().base
if split is None:
output_scalars = [
schema.Scalar(
(data_type, output_shape),
self.get_next_blob_reference('output_{}'.format(i)),
)
for i in range(num_splits)
]
else:
output_scalars = [
schema.Scalar(
(data_type, output_shape[i]),
self.get_next_blob_reference('output_{}'.format(i)),
)
for i in range(num_splits)
]
self.output_schema = schema.Tuple(*output_scalars)
self.split = split
def add_ops(self, net):
net.Split(
self.input_record.field_blobs(),
self.output_schema.field_blobs(),
split=self.split,
axis=self.axis,
) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/split.py | 0.661158 | 0.287727 | split.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema, core
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class MarginRankLoss(ModelLayer):
def __init__(self, model, input_record, name='margin_rank_loss',
margin=0.1, average_loss=False, **kwargs):
super(MarginRankLoss, self).__init__(model, name, input_record, **kwargs)
assert margin >= 0, ('For hinge loss, margin should be no less than 0')
self._margin = margin
self._average_loss = average_loss
assert schema.is_schema_subset(
schema.Struct(
('pos_prediction', schema.Scalar()),
('neg_prediction', schema.List(np.float32)),
),
input_record
)
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output'))
def add_ops(self, net):
neg_score = self.input_record.neg_prediction['values']()
pos_score = net.LengthsTile(
[
self.input_record.pos_prediction(),
self.input_record.neg_prediction['lengths']()
],
net.NextScopedBlob('pos_score_repeated')
)
const_1 = net.ConstantFill(
neg_score,
net.NextScopedBlob('const_1'),
value=1,
dtype=core.DataType.INT32
)
rank_loss = net.MarginRankingCriterion(
[pos_score, neg_score, const_1],
net.NextScopedBlob('rank_loss'),
margin=self._margin,
)
if self._average_loss:
net.AveragedLoss(rank_loss, self.output_schema.field_blobs())
else:
net.ReduceFrontSum(rank_loss, self.output_schema.field_blobs()) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/margin_rank_loss.py | 0.857171 | 0.232986 | margin_rank_loss.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.helpers.arg_scope import get_current_scope
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
import math
import numpy as np
def get_fc_predictor_version(fc_version):
assert fc_version in ["fp32", "fp16"], (
"Only support fp32 and fp16 for the fully connected layer "
"in the predictor net, the provided FC precision is {}".format(fc_version)
)
return fc_version
class FC(SamplingTrainableMixin, ModelLayer):
def __init__(self, model, input_record, output_dims, weight_init=None,
bias_init=None, weight_optim=None, bias_optim=None, name='fc',
weight_reg=None, bias_reg=None, clip_param=None,
max_fc_size=None, axis=1, transposed=False,
uniform_weight_init_scale_numerator=1.0,
**kwargs):
super(FC, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), (
"Incorrect input type {}".format(input_record))
assert len(input_record.field_types()[0].shape) > 0, (
"FC expects limited dimensions of the input tensor")
assert axis >= 1, "axis {} should >= 1.".format(axis)
self.axis = axis
input_dims = np.prod(input_record.field_types()[0].shape[axis - 1:])
assert input_dims > 0, (
"FC expects input dimensions > 0, got {}".format(input_dims))
self.clip_args = None
if (clip_param is not None):
assert len(clip_param) == 2, (
'clip_param must be a tuple / list '
'of length 2 and in the form of (clip_min, clip max)'
)
clip_min, clip_max = clip_param
assert clip_min is not None or clip_max is not None, (
'clip_min, and clip_max in clip_param cannot both be None'
)
assert (
(clip_min is None or clip_max is None) or clip_min < clip_max
), (
'clip_param = [clip_min, clip_max] must have clip_min < clip_max'
)
self.clip_args = {}
if clip_min is not None:
self.clip_args['min'] = clip_min
if clip_max is not None:
self.clip_args['max'] = clip_max
scale = math.sqrt(uniform_weight_init_scale_numerator / input_dims)
weight_init = weight_init if weight_init else (
'UniformFill', {'min': -scale, 'max': scale})
bias_init = bias_init if bias_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.output_dim_vec = FC.calculate_fc_output_dims(
max_fc_size, input_dims, output_dims)
self.transposed = transposed
if self.output_dim_vec is None or len(self.output_dim_vec) == 1:
weight_shape = [input_dims, output_dims] if transposed else [output_dims, input_dims]
self.w = self.create_param(param_name='w',
shape=weight_shape,
initializer=weight_init,
optimizer=weight_optim,
regularizer=weight_reg)
self.b = self.create_param(param_name='b',
shape=[output_dims, ],
initializer=bias_init,
optimizer=bias_optim,
regularizer=bias_reg)
else:
self.w_vec = []
self.b_vec = []
for idx, output_dim in enumerate(self.output_dim_vec):
weight_shape = [input_dims, output_dim] if transposed else [output_dim, input_dims]
self.w_vec.append(self.create_param(param_name='w_sub_{}'.format(idx),
shape=weight_shape,
initializer=weight_init,
optimizer=weight_optim,
regularizer=weight_reg))
self.b_vec.append(self.create_param(param_name='b_sub_{}'.format(idx),
shape=[output_dim, ],
initializer=weight_init,
optimizer=weight_optim,
regularizer=weight_reg))
if axis == 1:
output_shape = (output_dims, )
else:
output_shape = list(input_record.field_types()[0].shape)[0: axis - 1]
output_shape = tuple(output_shape + [output_dims])
self.output_schema = schema.Scalar(
(np.float32, output_shape),
self.get_next_blob_reference('output')
)
@staticmethod
def calculate_fc_output_dims(max_fc_size, input_dim, output_dim):
if not max_fc_size or max_fc_size < 0:
return None
assert max_fc_size >= input_dim, "Currently we split along the output " \
"dimension. So we need max_fc_size >= input_dim. But, max_fc_size: " \
"{}, input_dim: {}".format(max_fc_size, input_dim)
output_dim_allowed = int(np.floor(max_fc_size / input_dim))
num_fc = int(np.floor((output_dim - 1) / output_dim_allowed) + 1)
output_dim_vec = [output_dim_allowed] * (num_fc - 1)
output_dim_vec.append(output_dim - sum(output_dim_vec))
return output_dim_vec
def _insert_fc_ops(self, net, params, outputs, version):
"""
Args:
net: the caffe2 net to insert operator
params: weight and bias for FC
outputs: the output blobs
version: support fp32 and fp16 for now.
"""
if version == "fp32":
if self.transposed:
return net.FCTransposed(
self.input_record.field_blobs() + params,
outputs,
axis=self.axis,
**self.kwargs
)
else:
return net.FC(
self.input_record.field_blobs() + params,
outputs,
axis=self.axis,
**self.kwargs
)
elif version == "fp16":
return net.FbFCPacked(
self.input_record.field_blobs() + params,
outputs,
axis=self.axis,
**self.kwargs
)
else:
raise Exception("unsupported FC type version {}".format(version))
def _add_ops(self, net, params, version):
"""
Args:
params : the weight and bias,
passed by either add_ops or add_train_ops function
version : fp16 or fp32, might support in8 in the future.
"""
if self.clip_args is not None:
clipped_params = [net.NextScopedBlob(
'clipped_%s' % str(p)) for p in params]
for p, cp in zip(params, clipped_params):
net.Clip([p], [cp], **self.clip_args)
params = clipped_params
if self.output_dim_vec is None or len(self.output_dim_vec) == 1:
self._insert_fc_ops(net, params, self.output_schema.field_blobs(), version)
else:
w_vec = params[:int(len(params) / 2)]
b_vec = params[int(len(params) / 2):]
assert len(w_vec) == len(b_vec)
output_blob_vec = []
for i in range(len(self.output_dim_vec)):
output_blob = net.NextScopedBlob(
'output_sub_{}'.format(i))
insert_ret = self._insert_fc_ops(
net, [w_vec[i], b_vec[i]], [output_blob], version
)
output_blob_vec.append(insert_ret)
net.Concat(output_blob_vec,
self.output_schema.field_blobs() +
[self.output_schema.field_blobs()[0] + "_concat_dims"])
def add_ops(self, net):
"""Both the predict net and the eval net will call this function
"""
version_info = get_current_scope().get(
get_fc_predictor_version.__name__, {'fc_version': 'fp32'}
)
predictor_fc_fp_version = version_info['fc_version']
self._add_ops(net, self.param_blobs, predictor_fc_fp_version)
def add_train_ops(self, net):
# use the train_param_blobs to be consistent with the SamplingTrain unittest
self._add_ops(net, self.train_param_blobs, "fp32")
def get_fp16_compatible_parameters(self):
if self.output_dim_vec is None or len(self.output_dim_vec) == 1:
return [self.w]
else:
return self.w_vec
@property
def param_blobs(self):
if self.output_dim_vec is None or len(self.output_dim_vec) == 1:
return [self.w, self.b]
else:
return self.w_vec + self.b_vec | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/fc.py | 0.867134 | 0.379551 | fc.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer, get_layer_class
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
class SamplingTrain(ModelLayer):
def __init__(
self,
model,
input_record,
prediction_layer,
output_dims,
subtract_log_odd=True,
name='sampling_train',
**kwargs
):
super(SamplingTrain, self).__init__(
model, name, input_record, **kwargs
)
layer_class = get_layer_class(prediction_layer)
assert issubclass(layer_class, SamplingTrainableMixin)
assert 'indices' in input_record
assert isinstance(input_record.indices, schema.Scalar),\
"input_record.indices is expected to be a schema.Scalar"
assert 'input' in input_record
self.subtract_log_odd = subtract_log_odd
if self.subtract_log_odd:
assert 'sampling_prob' in input_record
self._prediction_layer = layer_class(
model,
input_record.input,
output_dims=output_dims,
**kwargs
)
self._prediction_layer.train_param_blobs = [
model.net.NextBlob(str(blob) + '_sampled')
for blob in self._prediction_layer.param_blobs
]
self.params = self._prediction_layer.params
self.output_schema = self._prediction_layer.output_schema
def add_ops(self, net):
self._prediction_layer.add_ops(net)
def add_train_ops(self, net):
for full_blob, sampled_blob in zip(
self._prediction_layer.param_blobs,
self._prediction_layer.train_param_blobs
):
net.Gather([full_blob, self.input_record.indices()], sampled_blob)
self._prediction_layer.add_train_ops(net)
if not self.subtract_log_odd:
return
log_q = net.Log(self.input_record.sampling_prob(),
net.NextScopedBlob("log_q"))
net.Sub([self.output_schema(), log_q], self.output_schema(),
broadcast=1, use_grad_hack=1) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/sampling_train.py | 0.882238 | 0.230129 | sampling_train.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class BatchNormalization(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_normalization',
scale_optim=None,
bias_optim=None,
momentum=0.9,
order='NCHW',
scale_init_value=1.0,
**kwargs
):
super(BatchNormalization, self).__init__(
model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
self.input_shape = input_record.field_type().shape
if len(self.input_shape) == 3:
if order == "NCHW":
input_dims = self.input_shape[0]
elif order == "NHWC":
input_dims = self.input_shape[2]
else:
raise ValueError("Please specify a correct order")
else:
assert len(self.input_shape) == 1, (
"This layer supports only 4D or 2D tensors")
input_dims = self.input_shape[0]
self.output_schema = schema.Scalar(
(np.float32, self.input_shape),
self.get_next_blob_reference('output')
)
self.momentum = momentum
self.order = order
self.scale = self.create_param(param_name='scale',
shape=[input_dims],
initializer=('ConstantFill', {'value': scale_init_value}),
optimizer=scale_optim)
self.bias = self.create_param(param_name='bias',
shape=[input_dims],
initializer=('ConstantFill', {'value': 0.0}),
optimizer=bias_optim)
self.rm = self.create_param(param_name='running_mean',
shape=[input_dims],
initializer=('ConstantFill', {'value': 0.0}),
optimizer=model.NoOptim)
self.riv = self.create_param(param_name='running_inv_var',
shape=[input_dims],
initializer=('ConstantFill', {'value': 1.0}),
optimizer=model.NoOptim)
def _add_ops(self, net, is_test, out_blob=None):
original_input_blob = self.input_record.field_blobs()
input_blob = net.NextScopedBlob('expand_input')
if len(self.input_shape) == 1:
input_blob = net.ExpandDims(original_input_blob,
dims=[2, 3])
else:
input_blob = original_input_blob[0]
if out_blob is None:
bn_output = self.output_schema.field_blobs()
else:
bn_output = out_blob
if is_test:
output_blobs = bn_output
else:
output_blobs = bn_output + [self.rm, self.riv,
net.NextScopedBlob('bn_saved_mean'),
net.NextScopedBlob('bn_saved_iv')]
net.SpatialBN([input_blob, self.scale,
self.bias, self.rm, self.riv],
output_blobs,
momentum=self.momentum,
is_test=is_test,
order=self.order)
if len(self.input_shape) == 1:
net.Squeeze(bn_output,
bn_output,
dims=[2, 3])
def add_train_ops(self, net):
self._add_ops(net, is_test=False)
def add_eval_ops(self, net):
self._add_ops(net, is_test=True)
def add_ops(self, net):
self.add_eval_ops(net) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/batch_normalization.py | 0.742702 | 0.302829 | batch_normalization.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
get_categorical_limit,
ModelLayer,
)
from caffe2.python.layers.tags import Tags
logger = logging.getLogger(__name__)
class BucketWeighted(ModelLayer):
def __init__(self, model, input_record, max_score=0, bucket_boundaries=None,
hash_buckets=True, weight_optim=None, name="bucket_weighted"):
super(BucketWeighted, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.List), "Incorrect input type"
self.bucket_boundaries = bucket_boundaries
self.hash_buckets = hash_buckets
if bucket_boundaries is not None:
self.shape = len(bucket_boundaries) + 1
elif max_score > 0:
self.shape = max_score
else:
self.shape = get_categorical_limit(input_record)
self.bucket_w = self.create_param(param_name='bucket_w',
shape=[self.shape, ],
initializer=('ConstantFill', {'value': 1.0}),
optimizer=weight_optim)
self.output_schema = schema.Struct(
('bucket_weights',
schema.Scalar((np.float32, self.shape),
self.get_next_blob_reference("bucket_w_gather")))
)
self.tags.update({Tags.HANDLE_AS_SPARSE_LAYER})
def get_memory_usage(self):
return self.shape
def add_ops(self, net):
if self.bucket_boundaries is not None:
buckets_int = net.Bucketize(
self.input_record.values(),
"buckets_int",
boundaries=self.bucket_boundaries
)
else:
buckets = self.input_record.values()
buckets_int = net.Cast(
buckets,
"buckets_int",
to=core.DataType.INT32
)
if self.hash_buckets:
buckets_int = net.IndexHash(
buckets_int, "hashed_buckets_int", seed=0, modulo=self.shape
)
net.Gather(
[self.bucket_w, buckets_int],
self.output_schema.bucket_weights.field_blobs()) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/bucket_weighted.py | 0.783658 | 0.167049 | bucket_weighted.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class BatchLRLoss(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_lr_loss',
average_loss=True,
jsd_weight=0.0,
pos_label_target=1.0,
neg_label_target=0.0,
homotopy_weighting=False,
log_D_trick=False,
unjoined_lr_loss=False,
uncertainty_penalty=1.0,
focal_gamma=0.0,
stop_grad_in_focal_factor=False,
task_gamma=1.0,
task_gamma_lb=0.1,
**kwargs
):
super(BatchLRLoss, self).__init__(model, name, input_record, **kwargs)
self.average_loss = average_loss
assert (schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar()),
('logit', schema.Scalar())
),
input_record
))
self.jsd_fuse = False
assert jsd_weight >= 0 and jsd_weight <= 1
if jsd_weight > 0 or homotopy_weighting:
assert 'prediction' in input_record
self.init_weight(jsd_weight, homotopy_weighting)
self.jsd_fuse = True
self.homotopy_weighting = homotopy_weighting
assert pos_label_target <= 1 and pos_label_target >= 0
assert neg_label_target <= 1 and neg_label_target >= 0
assert pos_label_target >= neg_label_target
self.pos_label_target = pos_label_target
self.neg_label_target = neg_label_target
assert not (log_D_trick and unjoined_lr_loss)
self.log_D_trick = log_D_trick
self.unjoined_lr_loss = unjoined_lr_loss
assert uncertainty_penalty >= 0
self.uncertainty_penalty = uncertainty_penalty
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output')
)
self.focal_gamma = focal_gamma
self.stop_grad_in_focal_factor = stop_grad_in_focal_factor
self.apply_exp_decay = False
if task_gamma < 1.0:
self.apply_exp_decay = True
self.task_gamma_cur = self.create_param(
param_name=('%s_task_gamma_cur' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': 1.0,
'dtype': core.DataType.FLOAT
}
),
optimizer=self.model.NoOptim,
)
self.task_gamma = self.create_param(
param_name=('%s_task_gamma' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': task_gamma,
'dtype': core.DataType.FLOAT
}
),
optimizer=self.model.NoOptim,
)
self.task_gamma_lb = self.create_param(
param_name=('%s_task_gamma_lb' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': task_gamma_lb,
'dtype': core.DataType.FLOAT
}
),
optimizer=self.model.NoOptim,
)
def init_weight(self, jsd_weight, homotopy_weighting):
if homotopy_weighting:
self.mutex = self.create_param(
param_name=('%s_mutex' % self.name),
shape=None,
initializer=('CreateMutex', ),
optimizer=self.model.NoOptim,
)
self.counter = self.create_param(
param_name=('%s_counter' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64
}
),
optimizer=self.model.NoOptim,
)
self.xent_weight = self.create_param(
param_name=('%s_xent_weight' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': 1.,
'dtype': core.DataType.FLOAT
}
),
optimizer=self.model.NoOptim,
)
self.jsd_weight = self.create_param(
param_name=('%s_jsd_weight' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': 0.,
'dtype': core.DataType.FLOAT
}
),
optimizer=self.model.NoOptim,
)
else:
self.jsd_weight = self.model.add_global_constant(
'%s_jsd_weight' % self.name, jsd_weight
)
self.xent_weight = self.model.add_global_constant(
'%s_xent_weight' % self.name, 1. - jsd_weight
)
def update_weight(self, net):
net.AtomicIter([self.mutex, self.counter], [self.counter])
# iter = 0: lr = 1;
# iter = 1e6; lr = 0.5^0.1 = 0.93
# iter = 1e9; lr = 1e-3^0.1 = 0.50
net.LearningRate([self.counter], [self.xent_weight], base_lr=1.0,
policy='inv', gamma=1e-6, power=0.1,)
net.Sub(
[self.model.global_constants['ONE'], self.xent_weight],
[self.jsd_weight]
)
return self.xent_weight, self.jsd_weight
def add_ops(self, net):
# numerically stable log-softmax with crossentropy
label = self.input_record.label()
# mandatory cast to float32
# self.input_record.label.field_type().base is np.float32 but
# label type is actually int
label = net.Cast(
label,
net.NextScopedBlob('label_float32'),
to=core.DataType.FLOAT)
label = net.ExpandDims(label, net.NextScopedBlob('expanded_label'),
dims=[1])
if self.pos_label_target != 1.0 or self.neg_label_target != 0.0:
label = net.StumpFunc(
label,
net.NextScopedBlob('smoothed_label'),
threshold=0.5,
low_value=self.neg_label_target,
high_value=self.pos_label_target,
)
xent = net.SigmoidCrossEntropyWithLogits(
[self.input_record.logit(), label],
net.NextScopedBlob('cross_entropy'),
log_D_trick=self.log_D_trick,
unjoined_lr_loss=self.unjoined_lr_loss
)
if self.focal_gamma != 0:
label = net.StopGradient(
[label],
[net.NextScopedBlob('label_stop_gradient')],
)
prediction = self.input_record.prediction()
# focal loss = (y(1-p) + p(1-y))^gamma * original LR loss
# y(1-p) + p(1-y) = y + p - 2 * yp
y_plus_p = net.Add(
[prediction, label],
net.NextScopedBlob("y_plus_p"),
)
yp = net.Mul([prediction, label], net.NextScopedBlob("yp"))
two_yp = net.Scale(yp, net.NextScopedBlob("two_yp"), scale=2.0)
y_plus_p_sub_two_yp = net.Sub(
[y_plus_p, two_yp], net.NextScopedBlob("y_plus_p_sub_two_yp")
)
focal_factor = net.Pow(
y_plus_p_sub_two_yp,
net.NextScopedBlob("y_plus_p_sub_two_yp_power"),
exponent=float(self.focal_gamma),
)
if self.stop_grad_in_focal_factor is True:
focal_factor = net.StopGradient(
[focal_factor],
[net.NextScopedBlob("focal_factor_stop_gradient")],
)
xent = net.Mul(
[xent, focal_factor], net.NextScopedBlob("focallossxent")
)
if self.apply_exp_decay:
net.Mul(
[self.task_gamma_cur, self.task_gamma],
self.task_gamma_cur
)
task_gamma_multiplier = net.Max(
[self.task_gamma_cur, self.task_gamma_lb],
net.NextScopedBlob("task_gamma_cur_multiplier")
)
xent = net.Mul(
[xent, task_gamma_multiplier], net.NextScopedBlob("expdecayxent")
)
# fuse with JSD
if self.jsd_fuse:
jsd = net.BernoulliJSD(
[self.input_record.prediction(), label],
net.NextScopedBlob('jsd'),
)
if self.homotopy_weighting:
self.update_weight(net)
loss = net.WeightedSum(
[xent, self.xent_weight, jsd, self.jsd_weight],
net.NextScopedBlob('loss'),
)
else:
loss = xent
if 'log_variance' in self.input_record.fields:
# mean (0.5 * exp(-s) * loss + 0.5 * penalty * s)
log_variance_blob = self.input_record.log_variance()
log_variance_blob = net.ExpandDims(
log_variance_blob, net.NextScopedBlob('expanded_log_variance'),
dims=[1]
)
neg_log_variance_blob = net.Negative(
[log_variance_blob],
net.NextScopedBlob('neg_log_variance')
)
# enforce less than 88 to avoid OverflowError
neg_log_variance_blob = net.Clip(
[neg_log_variance_blob],
net.NextScopedBlob('clipped_neg_log_variance'),
max=88.0
)
exp_neg_log_variance_blob = net.Exp(
[neg_log_variance_blob],
net.NextScopedBlob('exp_neg_log_variance')
)
exp_neg_log_variance_loss_blob = net.Mul(
[exp_neg_log_variance_blob, loss],
net.NextScopedBlob('exp_neg_log_variance_loss')
)
penalized_uncertainty = net.Scale(
log_variance_blob, net.NextScopedBlob("penalized_unceratinty"),
scale=float(self.uncertainty_penalty)
)
loss_2x = net.Add(
[exp_neg_log_variance_loss_blob, penalized_uncertainty],
net.NextScopedBlob('loss')
)
loss = net.Scale(loss_2x, net.NextScopedBlob("loss"), scale=0.5)
if 'weight' in self.input_record.fields:
weight_blob = self.input_record.weight()
if self.input_record.weight.field_type().base != np.float32:
weight_blob = net.Cast(
weight_blob,
weight_blob + '_float32',
to=core.DataType.FLOAT
)
weight_blob = net.StopGradient(
[weight_blob],
[net.NextScopedBlob('weight_stop_gradient')],
)
loss = net.Mul(
[loss, weight_blob],
net.NextScopedBlob('weighted_cross_entropy'),
)
if self.average_loss:
net.AveragedLoss(loss, self.output_schema.field_blobs())
else:
net.ReduceFrontSum(loss, self.output_schema.field_blobs()) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/batch_lr_loss.py | 0.867247 | 0.268216 | batch_lr_loss.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.arc_cosine_feature_map import ArcCosineFeatureMap
import numpy as np
class SemiRandomFeatures(ArcCosineFeatureMap):
"""
Implementation of the semi-random kernel feature map.
Applies H(x_rand) * x_rand^s * x_learned, where
H is the Heaviside step function,
x_rand is the input after applying FC with randomized parameters,
and x_learned is the input after applying FC with learnable parameters.
If using multilayer model with semi-random layers, then input and output records
should have a 'full' and 'random' Scalar. The random Scalar will be passed as
input to process the random features.
For more information, see the original paper:
https://arxiv.org/pdf/1702.08882.pdf
Inputs :
output_dims -- dimensions of the output vector
s -- if s == 0, will obtain linear semi-random features;
else if s == 1, will obtain squared semi-random features;
else s >= 2, will obtain higher order semi-random features
scale_random -- amount to scale the standard deviation
(for random parameter initialization when weight_init or
bias_init hasn't been specified)
scale_learned -- amount to scale the standard deviation
(for learned parameter initialization when weight_init or
bias_init hasn't been specified)
weight_init_random -- initialization distribution for random weight parameter
(if None, will use Gaussian distribution)
bias_init_random -- initialization distribution for random bias pararmeter
(if None, will use Uniform distribution)
weight_init_learned -- initialization distribution for learned weight parameter
(if None, will use Gaussian distribution)
bias_init_learned -- initialization distribution for learned bias pararmeter
(if None, will use Uniform distribution)
weight_optim -- optimizer for weight params for learned features
bias_optim -- optimizer for bias param for learned features
set_weight_as_global_constant -- if True, initialized random parameters
will be constant across all distributed
instances of the layer
"""
def __init__(
self,
model,
input_record,
output_dims,
s=1,
scale_random=1.0,
scale_learned=1.0,
weight_init_random=None,
bias_init_random=None,
weight_init_learned=None,
bias_init_learned=None,
weight_optim=None,
bias_optim=None,
set_weight_as_global_constant=False,
name='semi_random_features',
**kwargs):
if isinstance(input_record, schema.Struct):
schema.is_schema_subset(
schema.Struct(
('full', schema.Scalar()),
('random', schema.Scalar()),
),
input_record
)
self.input_record_full = input_record.full
self.input_record_random = input_record.random
elif isinstance(input_record, schema.Scalar):
self.input_record_full = input_record
self.input_record_random = input_record
super(SemiRandomFeatures, self).__init__(
model,
self.input_record_full,
output_dims,
s=s,
scale=scale_random, # To initialize the random parameters
weight_init=weight_init_random,
bias_init=bias_init_random,
weight_optim=None,
bias_optim=None,
set_weight_as_global_constant=set_weight_as_global_constant,
initialize_output_schema=False,
name=name,
**kwargs)
self.output_schema = schema.Struct(
('full', schema.Scalar(
(np.float32, output_dims),
model.net.NextScopedBlob(name + '_full_output')
),),
('random', schema.Scalar(
(np.float32, output_dims),
model.net.NextScopedBlob(name + '_random_output')
),),
)
# To initialize the learnable parameters
assert (scale_learned > 0.0), \
"Expected scale (learned) > 0, got %s" % scale_learned
self.stddev = scale_learned * np.sqrt(1.0 / self.input_dims)
# Learned Parameters
(self.learned_w, self.learned_b) = self._initialize_params(
'learned_w',
'learned_b',
w_init=weight_init_learned,
b_init=bias_init_learned,
w_optim=weight_optim,
b_optim=bias_optim
)
def add_ops(self, net):
# Learned features: wx + b
learned_features = net.FC(self.input_record_full.field_blobs() +
[self.learned_w, self.learned_b],
net.NextScopedBlob('learned_features'))
# Random features: wx + b
random_features = net.FC(self.input_record_random.field_blobs() +
[self.random_w, self.random_b],
net.NextScopedBlob('random_features'))
processed_random_features = self._heaviside_with_power(
net,
random_features,
self.output_schema.random.field_blobs(),
self.s
)
net.Mul([processed_random_features, learned_features],
self.output_schema.full.field_blobs()) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/semi_random_features.py | 0.87105 | 0.471406 | semi_random_features.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class BatchHuberLoss(ModelLayer):
def __init__(self, model, input_record, name='batch_huber_loss', delta=1.0, **kwargs):
super(BatchHuberLoss, self).__init__(model, name, input_record, **kwargs)
assert delta > 0
self._delta = delta
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar()),
('prediction', schema.Scalar())
),
input_record
)
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output'))
def add_ops(self, net):
prediction = net.Squeeze(
self.input_record.prediction(),
net.NextScopedBlob('squeezed_prediction'),
dims=[1]
)
label = self.input_record.label.field_blobs()
if self.input_record.label.field_type().base != (
self.input_record.prediction.field_type().base):
label = net.Cast(
label,
net.NextScopedBlob('cast_label'),
to=schema.data_type_for_dtype(
self.input_record.prediction.field_type()
)
)
const_delta = net.ConstantFill(
label,
net.NextScopedBlob("delta"),
value=self._delta,
dtype=core.DataType.FLOAT,
)
label = net.StopGradient(
label,
net.NextScopedBlob('stopped_label')
)
const_delta = net.StopGradient(
const_delta,
net.NextScopedBlob('stopped_delta')
)
# abs_error = np.abs(true - pred)
abs_error = net.L1Distance(
[label, prediction], net.NextScopedBlob("abs_error")
)
# quadratic = 0.5*min(abs_error, delta)^2, linear = delta*max(abs_error-delta, 0)
min_error = net.Min(
[abs_error, const_delta], net.NextScopedBlob("min_error_delta")
)
quadratic_term = net.Scale(
net.Sqr(min_error), scale=float(0.5)
)
linear_term = net.Mul(
[
net.Sub([abs_error, min_error]),
const_delta,
],
net.NextScopedBlob("huber_linear_term")
)
# huber = 0.5 * min(abs_error, delta)^2 + delta * max(abs_error-delta, 0)
huber_dist = net.Add(
[quadratic_term, linear_term], net.NextScopedBlob("huber_dist")
)
if 'weight' in self.input_record.fields:
weight_blob = self.input_record.weight()
if self.input_record.weight.field_type().base != np.float32:
weight_blob = net.Cast(
weight_blob,
weight_blob + '_float32',
to=core.DataType.FLOAT
)
weight_blob = net.StopGradient(
[weight_blob],
[net.NextScopedBlob('weight_stop_gradient')],
)
huber_dist = net.Mul(
[huber_dist, weight_blob],
net.NextScopedBlob("weighted_huber_distance"),
)
net.AveragedLoss(huber_dist, self.output_schema.field_blobs()) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/batch_huber_loss.py | 0.893516 | 0.305209 | batch_huber_loss.py | pypi |
# @package label_smooth
# Module caffe2.python.layers.label_smooth
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class LabelSmooth(ModelLayer):
def __init__(
self, model, label, smooth_matrix, name='label_smooth', **kwargs
):
super(LabelSmooth, self).__init__(model, name, label, **kwargs)
self.label = label
# shape as a list
smooth_matrix = np.array(smooth_matrix).astype(np.float32).flatten()
self.set_dim(smooth_matrix)
self.set_smooth_matrix(smooth_matrix)
self.output_schema = schema.Scalar(
(np.float32, (self.dim, )),
self.get_next_blob_reference('smoothed_label')
)
def set_dim(self, smooth_matrix):
num_elements = smooth_matrix.size
self.binary_prob_label = (num_elements == 2)
if self.binary_prob_label:
self.dim = 1
else:
assert np.sqrt(num_elements)**2 == num_elements
self.dim = int(np.sqrt(num_elements))
def set_smooth_matrix(self, smooth_matrix):
if not self.binary_prob_label:
self.smooth_matrix = self.model.add_global_constant(
'%s_label_smooth_matrix' % self.name,
array=smooth_matrix.reshape((self.dim, self.dim)),
dtype=np.dtype(np.float32),
)
self.len = self.model.add_global_constant(
'%s_label_dim' % self.name,
array=self.dim,
dtype=np.dtype(np.int64),
)
else:
self.smooth_matrix = smooth_matrix
def add_ops_for_binary_prob_label(self, net):
if self.label.field_type().base != np.float32:
float32_label = net.NextScopedBlob('float32_label')
net.Cast([self.label()], [float32_label], to=core.DataType.FLOAT)
else:
float32_label = self.label()
net.StumpFunc(
float32_label,
self.output_schema(),
threshold=0.5,
low_value=self.smooth_matrix[0],
high_value=self.smooth_matrix[1],
)
def add_ops_for_categorical_label(self, net):
if self.label.field_type().base != np.int64:
int64_label = net.NextScopedBlob('int64_label')
net.Cast([self.label()], [int64_label], to=core.DataType.INT64)
else:
int64_label = self.label()
one_hot_label = net.NextScopedBlob('one_hot_label')
net.OneHot([int64_label, self.len], [one_hot_label])
net.MatMul([one_hot_label, self.smooth_matrix], self.output_schema())
def add_ops(self, net):
if self.binary_prob_label:
self.add_ops_for_binary_prob_label(net)
else:
self.add_ops_for_categorical_label(net) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/label_smooth.py | 0.817502 | 0.284452 | label_smooth.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class ReservoirSampling(ModelLayer):
"""
Collect samples from input record w/ reservoir sampling. If you have complex
data, use PackRecords to pack it before using this layer.
This layer is not thread safe.
"""
def __init__(self, model, input_record, num_to_collect,
name='reservoir_sampling', **kwargs):
super(ReservoirSampling, self).__init__(
model, name, input_record, **kwargs)
assert num_to_collect > 0
self.num_to_collect = num_to_collect
self.reservoir = self.create_param(
param_name='reservoir',
shape=[0],
initializer=('ConstantFill',),
optimizer=model.NoOptim,
)
self.num_visited_blob = self.create_param(
param_name='num_visited',
shape=[],
initializer=('ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64,
}),
optimizer=model.NoOptim,
)
self.mutex = self.create_param(
param_name='mutex',
shape=None,
initializer=('CreateMutex',),
optimizer=model.NoOptim,
)
self.extra_input_blobs = []
self.extra_output_blobs = []
if 'object_id' in input_record:
object_to_pos = self.create_param(
param_name='object_to_pos',
shape=None,
initializer=('CreateMap', {
'key_dtype': core.DataType.INT64,
'valued_dtype': core.DataType.INT32,
}),
optimizer=model.NoOptim,
)
pos_to_object = self.create_param(
param_name='pos_to_object',
shape=[0],
initializer=('ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64,
}),
optimizer=model.NoOptim,
)
self.extra_input_blobs.append(input_record.object_id())
self.extra_input_blobs.extend([object_to_pos, pos_to_object])
self.extra_output_blobs.extend([object_to_pos, pos_to_object])
self.output_schema = schema.Struct(
(
'reservoir',
schema.from_blob_list(input_record.data, [self.reservoir])
),
('num_visited', schema.Scalar(blob=self.num_visited_blob)),
('mutex', schema.Scalar(blob=self.mutex)),
)
def add_ops(self, net):
net.ReservoirSampling(
[self.reservoir, self.num_visited_blob, self.input_record.data(),
self.mutex] + self.extra_input_blobs,
[self.reservoir, self.num_visited_blob] + self.extra_output_blobs,
num_to_collect=self.num_to_collect,
) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/reservoir_sampling.py | 0.775265 | 0.26563 | reservoir_sampling.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class BatchMSELoss(ModelLayer):
def __init__(self, model, input_record, name='batch_mse_loss', **kwargs):
super(BatchMSELoss, self).__init__(model, name, input_record, **kwargs)
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar()),
('prediction', schema.Scalar())
),
input_record
)
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output'))
def add_ops(self, net):
prediction = self.input_record.prediction()
label = self.input_record.label.field_blobs()
if self.input_record.label.field_type().base != (
self.input_record.prediction.field_type().base):
label = net.Cast(
label,
net.NextScopedBlob('cast_label'),
to=schema.data_type_for_dtype(
self.input_record.prediction.field_type()
)
)
label = net.ExpandDims(label, 1, dims=[1])
label = net.StopGradient(
label,
net.NextScopedBlob('stopped_label')
)
l2dist = net.SquaredL2Distance(
[label, prediction],
net.NextScopedBlob('l2')
)
if 'weight' in self.input_record.fields:
weight_blob = self.input_record.weight()
if self.input_record.weight.field_type().base != np.float32:
weight_blob = net.Cast(
weight_blob,
weight_blob + '_float32',
to=core.DataType.FLOAT
)
weight_blob = net.StopGradient(
[weight_blob],
[net.NextScopedBlob('weight_stop_gradient')],
)
l2dist = net.Mul(
[l2dist, weight_blob],
net.NextScopedBlob('weighted_l2_distance'),
)
net.AveragedLoss(l2dist, self.output_schema.field_blobs()) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/layers/batch_mse_loss.py | 0.874319 | 0.200793 | batch_mse_loss.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.python.onnx.helper import c2_native_run_net, c2_native_run_op
from caffe2.python import core, workspace
import caffe2.python.onnx.frontend as c2_front
import caffe2.python._import_c_extension as C
import numpy as np
def _dim_values_to_list(dim_values):
return [x.dim_value for x in dim_values]
def _get_output_shapes(output_value_infos):
names = [x.name for x in output_value_infos]
shapes = [_dim_values_to_list(x.type.tensor_type.shape.dim) for x in output_value_infos]
return dict(zip(names, shapes))
def check_gpu_():
try:
C.get_cuda_version()
except Exception as _:
raise Exception("TensorRT related functions require CUDA support")
def convert_onnx_model_to_trt_op(onnx_model,
max_batch_size=64,
max_workspace_size=2*1024*1024,
verbosity=1,
debug_builder=False):
"""
Convert the whole ONNX model to a TensorRT C2 op
"""
check_gpu_()
trt_str = C.onnx_to_trt_op(onnx_model.SerializeToString(),
_get_output_shapes(onnx_model.graph.output),
max_batch_size,
max_workspace_size,
verbosity,
debug_builder)
op = caffe2_pb2.OperatorDef()
op.ParseFromString(trt_str)
return op
# Assume the workspace is already filled with init weights
def _infer_shapes(pred_net, inputs):
workspace.RunNetOnce(pred_net)
hints = {}
for op in pred_net.op:
for o in op.output:
if o not in hints:
blob = workspace.FetchBlob(o)
if hasattr(blob, 'shape'):
hints[o] = blob.shape
for i in op.input:
if i not in hints:
blob = workspace.FetchBlob(i)
if hasattr(blob, 'shape'):
hints[i] = blob.shape
return hints
def transform_caffe2_net(
pred_net,
input_shapes,
populate_shapes = False,
max_batch_size=64,
max_workspace_size=2*1024*1024,
verbosity=1,
debug_builder=False,
build_serializable_op=True):
"""
Transform the caffe2_net by collapsing TRT-runnable nodes into trt c2 ops
"""
check_gpu_()
# Hacky way to infer shapes as not all our operators have shape inference function.
# Normally this is not needed
shape_hints = {}
if populate_shapes:
input_data = {}
for k,v in input_shapes.items():
input_data[k] = np.random.randn(*v).astype(np.float32)
shape_hints = _infer_shapes(pred_net, input_data)
for k,v in input_shapes.items():
shape_hints[k] = v
pred_net_str = C.transform_trt(pred_net.SerializeToString(),
shape_hints,
max_batch_size,
max_workspace_size,
verbosity,
debug_builder,
build_serializable_op)
pred_net_cut = caffe2_pb2.NetDef()
pred_net_cut.ParseFromString(pred_net_str)
return pred_net_cut | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/trt/transform.py | 0.69285 | 0.243969 | transform.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, scope
def create_predict_net(predictor_export_meta):
"""
Return the input prediction net.
"""
# Construct a new net to clear the existing settings.
net = core.Net(predictor_export_meta.predict_net.name or "predict")
net.Proto().op.extend(predictor_export_meta.predict_net.op)
net.Proto().partition_info.extend(predictor_export_meta.predict_net.partition_info)
net.Proto().external_input.extend(
predictor_export_meta.inputs + predictor_export_meta.parameters)
net.Proto().external_output.extend(predictor_export_meta.outputs)
net.Proto().arg.extend(predictor_export_meta.predict_net.arg)
if predictor_export_meta.net_type is not None:
net.Proto().type = predictor_export_meta.net_type
if predictor_export_meta.num_workers is not None:
net.Proto().num_workers = predictor_export_meta.num_workers
return net.Proto()
def create_predict_init_net(ws, predictor_export_meta):
"""
Return an initialization net that zero-fill all the input and
output blobs, using the shapes from the provided workspace. This is
necessary as there is no shape inference functionality in Caffe2.
"""
net = core.Net("predict-init")
def zero_fill(blob):
shape = predictor_export_meta.shapes.get(blob)
if shape is None:
if blob not in ws.blobs:
raise Exception(
"{} not in workspace but needed for shape: {}".format(
blob, ws.blobs))
shape = ws.blobs[blob].fetch().shape
# Explicitly null-out the scope so users (e.g. PredictorGPU)
# can control (at a Net-global level) the DeviceOption of
# these filling operators.
with scope.EmptyDeviceScope():
net.ConstantFill([], blob, shape=shape, value=0.0)
external_blobs = predictor_export_meta.inputs + \
predictor_export_meta.outputs
for blob in external_blobs:
zero_fill(blob)
net.Proto().external_input.extend(external_blobs)
if predictor_export_meta.extra_init_net:
net.AppendNet(predictor_export_meta.extra_init_net)
# Add the model_id in the predict_net to the init_net
AddModelIdArg(predictor_export_meta, net.Proto())
return net.Proto()
def get_comp_name(string, name):
if name:
return string + '_' + name
return string
def _ProtoMapGet(field, key):
'''
Given the key, get the value of the repeated field.
Helper function used by protobuf since it doesn't have map construct
'''
for v in field:
if (v.key == key):
return v.value
return None
def GetPlan(meta_net_def, key):
return _ProtoMapGet(meta_net_def.plans, key)
def GetPlanOriginal(meta_net_def, key):
return _ProtoMapGet(meta_net_def.plans, key)
def GetBlobs(meta_net_def, key):
blobs = _ProtoMapGet(meta_net_def.blobs, key)
if blobs is None:
return []
return blobs
def GetBlobsByTypePrefix(meta_net_def, blob_type_prefix):
blob_map = {}
for b in meta_net_def.blobs:
if b.key.startswith(blob_type_prefix):
for blob in b.value:
if blob not in blob_map:
blob_map[blob] = len(blob_map)
return sorted(blob_map, key=lambda blob: blob_map[blob])
def GetNet(meta_net_def, key):
return _ProtoMapGet(meta_net_def.nets, key)
def GetNetOriginal(meta_net_def, key):
return _ProtoMapGet(meta_net_def.nets, key)
def GetApplicationSpecificInfo(meta_net_def, key):
return _ProtoMapGet(meta_net_def.applicationSpecificInfo, key)
def AddBlobs(meta_net_def, blob_name, blob_def):
blobs = _ProtoMapGet(meta_net_def.blobs, blob_name)
if blobs is None:
blobs = meta_net_def.blobs.add()
blobs.key = blob_name
blobs = blobs.value
for blob in blob_def:
blobs.append(blob)
def ReplaceBlobs(meta_net_def, blob_name, blob_def):
blobs = _ProtoMapGet(meta_net_def.blobs, blob_name)
assert blobs is not None, "The blob_name:{} does not exist".format(blob_name)
del blobs[:]
for blob in blob_def:
blobs.append(blob)
def AddPlan(meta_net_def, plan_name, plan_def):
meta_net_def.plans.add(key=plan_name, value=plan_def)
def AddNet(meta_net_def, net_name, net_def):
meta_net_def.nets.add(key=net_name, value=net_def)
def SetBlobsOrder(meta_net_def, blobs_order):
for blob in blobs_order:
meta_net_def.blobsOrder.append(blob)
def SetPreLoadBlobs(meta_net_def, pre_load_blobs):
for blob in pre_load_blobs:
meta_net_def.preLoadBlobs.append(blob)
def GetArgumentByName(net_def, arg_name):
for arg in net_def.arg:
if arg.name == arg_name:
return arg
return None
def AddModelIdArg(meta_net_def, net_def):
"""Takes the model_id from the predict_net of meta_net_def (if it is
populated) and adds it to the net_def passed in. This is intended to be
called on init_nets, as their model_id is not populated by default, but
should be the same as that of the predict_net
"""
# Get model_id from the predict_net, assuming it's an integer
model_id = GetArgumentByName(meta_net_def.predict_net, "model_id")
if model_id is None:
return
model_id = model_id.i
# If there's another model_id on the net, replace it with the new one
old_id = GetArgumentByName(net_def, "model_id")
if old_id is not None:
old_id.i = model_id
return
# Add as an integer argument, this is also assumed above
arg = net_def.arg.add()
arg.name = "model_id"
arg.i = model_id | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/predictor/predictor_py_utils.py | 0.81637 | 0.153327 | predictor_py_utils.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.proto import metanet_pb2
from caffe2.python import workspace, core, scope
from caffe2.python.predictor_constants import predictor_constants
import caffe2.python.predictor.serde as serde
import caffe2.python.predictor.predictor_py_utils as utils
from builtins import bytes
import collections
def get_predictor_exporter_helper(submodelNetName):
""" constracting stub for the PredictorExportMeta
Only used to construct names to subfields,
such as calling to predict_net_name
Args:
submodelNetName - name of the model
"""
stub_net = core.Net(submodelNetName)
pred_meta = PredictorExportMeta(predict_net=stub_net,
parameters=[],
inputs=[],
outputs=[],
shapes=None,
name=submodelNetName,
extra_init_net=None)
return pred_meta
class PredictorExportMeta(collections.namedtuple(
'PredictorExportMeta',
'predict_net, parameters, inputs, outputs, shapes, name, \
extra_init_net, global_init_net, net_type, num_workers, trainer_prefix')):
"""
Metadata to be used for serializaing a net.
parameters, inputs, outputs could be either BlobReference or blob's names
predict_net can be either core.Net, NetDef, PlanDef or object
Override the named tuple to provide optional name parameter.
name will be used to identify multiple prediction nets.
net_type is the type field in caffe2 NetDef - can be 'simple', 'dag', etc.
num_workers specifies for net type 'dag' how many threads should run ops
trainer_prefix specifies the type of trainer.
extra_init_net gets appended to pred_init_net, useful for thread local init
global_init_net gets appended to global_init_net, useful for global init
on a shared across threads parameter workspace
(in a case of multi-threaded inference)
"""
def __new__(
cls,
predict_net,
parameters,
inputs,
outputs,
shapes=None,
name="",
extra_init_net=None,
global_init_net=None,
net_type=None,
num_workers=None,
trainer_prefix=None,
):
inputs = [str(i) for i in inputs]
outputs = [str(o) for o in outputs]
assert len(set(inputs)) == len(inputs), (
"All inputs to the predictor should be unique")
parameters = [str(p) for p in parameters]
assert set(parameters).isdisjoint(inputs), (
"Parameters and inputs are required to be disjoint. "
"Intersection: {}".format(set(parameters).intersection(inputs)))
assert set(parameters).isdisjoint(outputs), (
"Parameters and outputs are required to be disjoint. "
"Intersection: {}".format(set(parameters).intersection(outputs)))
shapes = shapes or {}
if isinstance(predict_net, (core.Net, core.Plan)):
predict_net = predict_net.Proto()
assert isinstance(predict_net, (caffe2_pb2.NetDef, caffe2_pb2.PlanDef))
return super(PredictorExportMeta, cls).__new__(
cls, predict_net, parameters, inputs, outputs, shapes, name,
extra_init_net, global_init_net, net_type, num_workers, trainer_prefix)
def inputs_name(self):
return utils.get_comp_name(predictor_constants.INPUTS_BLOB_TYPE,
self.name)
def outputs_name(self):
return utils.get_comp_name(predictor_constants.OUTPUTS_BLOB_TYPE,
self.name)
def parameters_name(self):
return utils.get_comp_name(predictor_constants.PARAMETERS_BLOB_TYPE,
self.name)
def global_init_name(self):
return utils.get_comp_name(predictor_constants.GLOBAL_INIT_NET_TYPE,
self.name)
def predict_init_name(self):
return utils.get_comp_name(predictor_constants.PREDICT_INIT_NET_TYPE,
self.name)
def predict_net_name(self):
return utils.get_comp_name(predictor_constants.PREDICT_NET_TYPE,
self.name)
def train_init_plan_name(self):
plan_name = utils.get_comp_name(predictor_constants.TRAIN_INIT_PLAN_TYPE,
self.name)
return self.trainer_prefix + '_' + plan_name \
if self.trainer_prefix else plan_name
def train_plan_name(self):
plan_name = utils.get_comp_name(predictor_constants.TRAIN_PLAN_TYPE,
self.name)
return self.trainer_prefix + '_' + plan_name \
if self.trainer_prefix else plan_name
def prepare_prediction_net(filename, db_type, device_option=None):
'''
Helper function which loads all required blobs from the db
and returns prediction net ready to be used
'''
metanet_def = load_from_db(filename, db_type, device_option)
global_init_net = utils.GetNet(
metanet_def, predictor_constants.GLOBAL_INIT_NET_TYPE)
workspace.RunNetOnce(global_init_net)
predict_init_net = utils.GetNet(
metanet_def, predictor_constants.PREDICT_INIT_NET_TYPE)
workspace.RunNetOnce(predict_init_net)
predict_net = core.Net(
utils.GetNet(metanet_def, predictor_constants.PREDICT_NET_TYPE))
workspace.CreateNet(predict_net)
return predict_net
def _global_init_net(predictor_export_meta, db_type):
net = core.Net("global-init")
# manifold_db does not need DBReader
if db_type != "manifold_db":
net.Load(
[predictor_constants.PREDICTOR_DBREADER],
predictor_export_meta.parameters)
net.Proto().external_input.extend([predictor_constants.PREDICTOR_DBREADER])
net.Proto().external_output.extend(predictor_export_meta.parameters)
if predictor_export_meta.global_init_net:
net.AppendNet(predictor_export_meta.global_init_net)
# Add the model_id in the predict_net to the global_init_net
utils.AddModelIdArg(predictor_export_meta, net.Proto())
return net.Proto()
def get_meta_net_def(predictor_export_meta, ws=None, db_type=None):
"""
"""
ws = ws or workspace.C.Workspace.current
meta_net_def = metanet_pb2.MetaNetDef()
# Predict net is the core network that we use.
utils.AddNet(meta_net_def, predictor_export_meta.predict_init_name(),
utils.create_predict_init_net(ws, predictor_export_meta))
utils.AddNet(meta_net_def, predictor_export_meta.global_init_name(),
_global_init_net(predictor_export_meta, db_type))
utils.AddNet(meta_net_def, predictor_export_meta.predict_net_name(),
utils.create_predict_net(predictor_export_meta))
utils.AddBlobs(meta_net_def, predictor_export_meta.parameters_name(),
predictor_export_meta.parameters)
utils.AddBlobs(meta_net_def, predictor_export_meta.inputs_name(),
predictor_export_meta.inputs)
utils.AddBlobs(meta_net_def, predictor_export_meta.outputs_name(),
predictor_export_meta.outputs)
return meta_net_def
def set_model_info(meta_net_def, project_str, model_class_str, version):
assert isinstance(meta_net_def, metanet_pb2.MetaNetDef)
meta_net_def.modelInfo.project = project_str
meta_net_def.modelInfo.modelClass = model_class_str
meta_net_def.modelInfo.version = version
def save_to_db(db_type, db_destination, predictor_export_meta, use_ideep=False,
*args, **kwargs):
meta_net_def = get_meta_net_def(predictor_export_meta, db_type=db_type)
device_type = caffe2_pb2.IDEEP if use_ideep else caffe2_pb2.CPU
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
workspace.FeedBlob(
predictor_constants.META_NET_DEF,
serde.serialize_protobuf_struct(meta_net_def)
)
blobs_to_save = [predictor_constants.META_NET_DEF] + \
predictor_export_meta.parameters
op = core.CreateOperator(
"Save",
blobs_to_save, [],
device_option = core.DeviceOption(device_type),
absolute_path=True,
db=db_destination, db_type=db_type,
**kwargs
)
workspace.RunOperatorOnce(op)
def load_from_db(filename, db_type, device_option=None, *args, **kwargs):
# global_init_net in meta_net_def will load parameters from
# predictor_constants.PREDICTOR_DBREADER
create_db = core.CreateOperator(
'CreateDB', [],
[core.BlobReference(predictor_constants.PREDICTOR_DBREADER)],
db=filename, db_type=db_type)
assert workspace.RunOperatorOnce(create_db), (
'Failed to create db {}'.format(filename))
# predictor_constants.META_NET_DEF is always stored before the parameters
load_meta_net_def = core.CreateOperator(
'Load',
[core.BlobReference(predictor_constants.PREDICTOR_DBREADER)],
[core.BlobReference(predictor_constants.META_NET_DEF)])
assert workspace.RunOperatorOnce(load_meta_net_def)
blob = workspace.FetchBlob(predictor_constants.META_NET_DEF)
meta_net_def = serde.deserialize_protobuf_struct(
blob if isinstance(blob, bytes)
else str(blob).encode('utf-8'),
metanet_pb2.MetaNetDef)
if device_option is None:
device_option = scope.CurrentDeviceScope()
if device_option is not None:
# Set the device options of all loaded blobs
for kv in meta_net_def.nets:
net = kv.value
for op in net.op:
op.device_option.CopyFrom(device_option)
return meta_net_def | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/python/predictor/predictor_exporter.py | 0.869618 | 0.241635 | predictor_exporter.py | pypi |
## @package net_construct_bench
# Module caffe2.experiments.python.net_construct_bench
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import time
from caffe2.python import workspace, data_parallel_model
from caffe2.python import cnn
import caffe2.python.models.resnet as resnet
'''
Simple benchmark that creates a data-parallel resnet-50 model
and measures the time.
'''
logging.basicConfig()
log = logging.getLogger("net_construct_bench")
log.setLevel(logging.DEBUG)
def AddMomentumParameterUpdate(train_model, LR):
'''
Add the momentum-SGD update.
'''
params = train_model.GetParams()
assert(len(params) > 0)
ONE = train_model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0,
)
NEGONE = train_model.param_init_net.ConstantFill(
[], 'NEGONE', shape=[1], value=-1.0,
)
for param in params:
param_grad = train_model.param_to_grad[param]
param_momentum = train_model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
# Update param_grad and param_momentum in place
train_model.net.MomentumSGD(
[param_grad, param_momentum, LR],
[param_grad, param_momentum],
momentum=0.9,
nesterov=1
)
# Update parameters by applying the moment-adjusted gradient
train_model.WeightedSum(
[param, ONE, param_grad, NEGONE],
param
)
def Create(args):
gpus = list(range(args.num_gpus))
log.info("Running on gpus: {}".format(gpus))
# Create CNNModeLhelper object
train_model = cnn.CNNModelHelper(
order="NCHW",
name="resnet50",
use_cudnn=True,
cudnn_exhaustive_search=False
)
# Model building functions
def create_resnet50_model_ops(model, loss_scale):
[softmax, loss] = resnet.create_resnet50(
model,
"data",
num_input_channels=3,
num_labels=1000,
label="label",
)
model.Accuracy([softmax, "label"], "accuracy")
return [loss]
# SGD
def add_parameter_update_ops(model):
model.AddWeightDecay(1e-4)
ITER = model.Iter("ITER")
stepsz = int(30)
LR = model.net.LearningRate(
[ITER],
"LR",
base_lr=0.1,
policy="step",
stepsize=stepsz,
gamma=0.1,
)
AddMomentumParameterUpdate(model, LR)
def add_image_input(model):
pass
start_time = time.time()
# Create parallelized model
data_parallel_model.Parallelize_GPU(
train_model,
input_builder_fun=add_image_input,
forward_pass_builder_fun=create_resnet50_model_ops,
param_update_builder_fun=add_parameter_update_ops,
devices=gpus,
)
ct = time.time() - start_time
train_model.net._CheckLookupTables()
log.info("Model create for {} gpus took: {} secs".format(len(gpus), ct))
def main():
# TODO: use argv
parser = argparse.ArgumentParser(
description="Caffe2: Benchmark for net construction"
)
parser.add_argument("--num_gpus", type=int, default=1,
help="Number of GPUs.")
args = parser.parse_args()
Create(args)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
import cProfile
cProfile.run('main()', sort="cumulative") | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/experiments/python/net_construct_bench.py | 0.746416 | 0.250847 | net_construct_bench.py | pypi |
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import logging
from collections import defaultdict
import numpy as np
from caffe2.python import core, utils
from caffe2.python.fb import hardcode_scale_zp
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
from itertools import tee
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def blob_uses(net, blob):
u = []
for i, op in enumerate(net.op):
if blob in op.input or blob in op.control_input:
u.append(i)
return u
def fuse_first_bn(net, params, removed_tensors, begin_op_index):
net = copy.deepcopy(net)
params = copy.deepcopy(params)
for i, conv in enumerate(net.op[begin_op_index:], begin_op_index):
if conv.type not in ["Conv", "ConvTranspose"]:
continue
uses = blob_uses(net, conv.output[0])
if len(uses) == 0:
continue
j = uses[0]
bn = net.op[j]
if bn.type != "SpatialBN" or (len(uses) > 1 and conv.output[0] != bn.output[0]):
if bn.type == "SpatialBN":
logger.debug("Can't fuse if more than one user {}".format(uses))
# Can't fuse if more than one user unless SpatialBN is inplace
# An example of inplace SpatialBN where we want to allow multiple uses:
# x = Conv(...)
# ... // no interferring use or def of x (will be checked below)
# x = SpatialBN(x, ...)
# ...
# z = Foo(..., x, ...)
# ...
# w = Boo(..., x, ...)
# Here, we still want to fuse Conv and SpatialBN
continue
# There shouldn't be any def of conv.output[0] and any use or def of bn.output[0] between conv and bn
if any(
blob in net.op[k].input or blob in net.op[k].output
for blob in [conv.output[0], bn.output[0]]
for k in range(i + 1, j)
):
logger.debug(
"Can't fuse because of the following interferring uses or defs:"
)
for k in range(i, j + 1):
logger.debug(net.op[k])
continue
# else, can fuse
fused_conv = copy.deepcopy(conv)
fused_conv.output[0] = bn.output[0]
conv_weight = params[conv.input[1]]
if len(conv.input) > 2:
conv_bias = params[conv.input[2]]
else:
conv_bias = np.zeros(len(params[bn.input[2]])).astype(np.float32)
bn_scale = params[bn.input[1]]
bn_bias = params[bn.input[2]]
bn_running_mean = params[bn.input[3]]
bn_running_var = params[bn.input[4]]
# First, BN computation can be phrased as follows:
# (X - running_mean) * (1.0 / sqrt(running_var + eps)) *
# bn_scale + bias
# Thus, we can rewrite bn_scale as:
# X * bn_scale * 1.0 / (sqrt(running_var + eps)) + (bias -
# running_mean * (1.0 / sqrt(running_var + eps)) * bn_scale)
# Thus, can just have the affine transform
# X * A + B
# where
# A = bn_scale * 1.0 / (sqrt(running_var + eps))
# B = (bias - running_mean * (1.0 / sqrt(running_var + eps))
# * bn_scale)
eps = 1.0e-5
for arg in bn.arg:
if arg.name == "epsilon":
eps = arg.f
A = bn_scale * 1.0 / (np.sqrt(bn_running_var + eps))
B = bn_bias - bn_running_mean * A
# This identity should hold if we have correctly fused
# np.testing.assert_array_equal(
# params[conv.output[0]] * A + B,
# params[bn.output[0]])
# Now, we have that the computation made is the following:
# ((X `conv` W) + b) * A + B
# Then, we can simply fuse this as follows:
# (X `conv` (W * A)) + b * A + B
# which is simply
# (X `conv` Q) + C
# where
# Q = W * A
# C = b * A + B
# For ConvTranspose, from the view of convolutions as a
# Toepeliz multiplication, we have W_ = W^T, so the weights
# are laid out as (R, S, K, K) (vs (S, R, K, K) for a Conv),
# so the weights broadcast slightly differently. Remember, our
# BN scale 'B' is of size (S,)
A_ = (
A.reshape((-1,) + tuple([1] * (conv_weight.ndim - 1)))
if conv.type == "Conv"
else A.reshape((1, -1) + tuple([1] * (conv_weight.ndim - 2)))
)
C = conv_bias * A + B
Q = conv_weight * A_
assert params[conv.input[1]].shape == Q.shape
if len(conv.input) > 2:
assert params[conv.input[2]].shape == C.shape
else:
assert bn_bias.shape == C.shape
params[conv.input[1]] = Q
if len(conv.input) > 2:
params[conv.input[2]] = C
else:
params[bn.input[2]] = C
fused_conv.input.append(bn.input[2])
new_ops = net.op[:i] + [fused_conv] + net.op[i + 1 : j] + net.op[j + 1 :]
del net.op[:]
removed_tensors.append(bn.input[1])
if len(conv.input) > 2:
removed_tensors.append(bn.input[2])
removed_tensors.append(bn.input[3])
removed_tensors.append(bn.input[4])
del params[bn.input[1]]
if len(conv.input) > 2:
del params[bn.input[2]]
del params[bn.input[3]]
del params[bn.input[4]]
net.op.extend(new_ops)
return net, params, removed_tensors, i + 1
return net, params, removed_tensors, None
def fuse_bn(net, params, ignore_failure):
# Run until we hit a fixed point
removed_tensors = []
begin_op_index = 0
while True:
(next_net, next_params, removed_tensors, begin_op_index) = fuse_first_bn(
net, params, removed_tensors, begin_op_index
)
if begin_op_index is None:
if any(op.type == "SpatialBN" for op in next_net.op) and not ignore_failure:
raise Exception(
"Model contains SpatialBN op after fusion: %s", next_net
)
return (next_net, next_params, removed_tensors)
net, params, removed_tensors = (next_net, next_params, removed_tensors)
def fuse_first_scale(net, params, removed_tensors):
net = copy.deepcopy(net)
params = copy.deepcopy(params)
for ((i, current), (j, next_)) in pairwise(enumerate(net.op)):
if next_.input[0] != current.output[0]:
continue
if (
current.type != "SpatialBN"
or next_.type != "Mul"
or len(net.op) <= j + 1
or net.op[j + 1].type != "Add"
):
continue
# else, can fuse
bn = current
mul = next_
add = net.op[j + 1]
fused_bn = copy.deepcopy(bn)
fused_bn.output[0] = add.output[0]
bn_scale = params[bn.input[1]]
mul_scale = params[mul.input[1]]
bn_bias = params[bn.input[2]]
add_bias = params[add.input[1]]
params[bn.input[1]] = bn_scale * mul_scale
params[bn.input[2]] = mul_scale * bn_bias + add_bias
new_ops = net.op[:i] + [fused_bn] + net.op[j + 2 :]
del net.op[:]
removed_tensors.append(mul.input[1])
removed_tensors.append(add.input[1])
del params[mul.input[1]]
del params[add.input[1]]
net.op.extend(new_ops)
break
return net, params, removed_tensors
def fuse_scale(net, params, ignore_failure):
# Run until we hit a fixed point
removed_tensors = []
while True:
(next_net, next_params, removed_tensors) = fuse_first_scale(
net, params, removed_tensors
)
if len(next_net.op) == len(net.op):
return (next_net, next_params, removed_tensors)
net, params, removed_tensors = (next_net, next_params, removed_tensors)
def fuse_first_relu(net, begin_op_index, ignore_op_with_output=None):
net = copy.deepcopy(net)
for i, conv in enumerate(net.op[begin_op_index:], begin_op_index):
if conv.type not in ["Conv", "ConvTranspose", "Sum", "SpatialBN"]:
continue
uses = blob_uses(net, conv.output[0])
if (
len(uses) == 0
or ignore_op_with_output
and conv.output[0] in ignore_op_with_output
):
continue
j = uses[0]
relu = net.op[j]
if relu.type != "Relu" or len(uses) > 1 and conv.output[0] != relu.output[0]:
# Can't fuse if more than one user unless Relu is inplace
if relu.type == "Relu":
logger.debug("Can't fuse if more than one user {}".format(uses))
continue
# There shouldn't be any def of conv.output[0] and any use or def of relu.output[0] between conv and relu
if any(
blob in net.op[k].input or blob in net.op[k].output
for blob in [conv.output[0], relu.output[0]]
for k in range(i + 1, j)
):
logger.debug(
"Can't fuse because of the following interferring uses or defs:"
)
for k in range(i, j + 1):
logger.debug(net.op[k])
continue
# else, can fuse
fused_conv = copy.deepcopy(conv)
fused_conv.type = conv.type + "Relu"
fused_conv.output[0] = relu.output[0]
new_ops = net.op[:i] + [fused_conv] + net.op[i + 1 : j] + net.op[j + 1 :]
del net.op[:]
net.op.extend(new_ops)
return net, i + 1
return net, None
def fuse_relu(net, ignore_failure, ignore_op_with_output=None):
# Run until we hit a fixed point
begin_op_index = 0
while True:
next_net, begin_op_index = fuse_first_relu(
net, begin_op_index, ignore_op_with_output
)
if begin_op_index is None:
if any(op.type == "Relu" for op in next_net.op) and not ignore_failure:
raise Exception("Model contains Relu op after fusion: %s", next_net)
return next_net
net = next_net
def last_producer(ops, blob):
for (i, op) in reversed(list(enumerate(ops))):
if op.output[0] == blob:
return i
raise ValueError("Failed to find last producer of blob, %s", blob)
def swap_first_concat_relu(net, ignore_op_with_output=None):
net = copy.deepcopy(net)
for ((i, current), (j, next_)) in pairwise(enumerate(net.op)):
if next_.input[0] != current.output[0]:
continue
if current.type != "Concat" or next_.type != "Relu":
continue
if ignore_op_with_output and current.output[0] in ignore_op_with_output:
continue
# else, can swap
concat = copy.deepcopy(current)
relu = copy.deepcopy(next_)
pre_ops = copy.deepcopy(net.op[:i])
post_ops = copy.deepcopy(net.op[j + 1 :])
# Delete the Relu after Concat
concat.output[0] = relu.output[0]
# Insert Relu after each op that produces inputs to Concat
for blob in concat.input:
k = last_producer(pre_ops, blob)
producer = pre_ops[k]
assert producer.output[0] == blob
producer.output[0] = blob + "_pre_relu"
new_relu = copy.deepcopy(relu)
new_relu.input[0] = producer.output[0]
new_relu.output[0] = blob
pre_ops = pre_ops[: k + 1] + [new_relu] + pre_ops[k + 1 :]
new_ops = pre_ops + [concat] + post_ops
del net.op[:]
net.op.extend(new_ops)
break
return net
def swap_concat_relu(net, ignore_op_with_output=None):
# Run until we hit a fixed point
while True:
next_net = swap_first_concat_relu(net, ignore_op_with_output)
if len(next_net.op) == len(net.op):
return next_net
net = next_net
def add_version_to_conv_bias(net, init_net):
"""
In architectures such as FPN (https://arxiv.org/abs/1612.03144), few Conv
ops share the same weight and bias and are run at different scales of
the input. Since 'bias_scale = input_scale * weight_scale', sharing the
same bias blob among multiple Conv ops means that we need different bias
scale for each of the ops. To achieve this, we just duplicate those bias
blobs that are used by multiple Conv ops before performing int8 rewrite.
"""
bias_count = defaultdict(int)
for op in net._net.op:
if "Conv" in op.type and len(op.input) >= 3:
bias_count[op.input[2]] += 1
bias_fill_op = {}
for op in init_net._net.op:
if bias_count[op.output[0]] > 1:
bias_fill_op[op.output[0]] = op
bias_version = defaultdict(int)
for op in net._net.op:
if "Conv" in op.type and len(op.input) >= 3:
bias = op.input[2]
if bias_count[bias] <= 1:
continue
version = bias_version[bias]
bias_version[bias] += 1
if version == 0:
continue
new_bias = bias + "_v" + str(version)
fill_op = copy.deepcopy(bias_fill_op[bias])
fill_op.output[0] = new_bias
init_net._net.op.extend([fill_op])
op.input[2] = new_bias
net._net.external_input.append(new_bias)
def add_quantization_param_args_(op, q_param):
op.arg.extend(
[
utils.MakeArgument("Y_scale", q_param.scale),
utils.MakeArgument("Y_zero_point", q_param.zero_point),
]
)
def choose_quantization_params(tensor_min, tensor_max, preserve_sparsity=False):
if tensor_min < 0 and tensor_max > 0 and preserve_sparsity:
symmetric_qmin = -(255 // 2 + 1)
symmetric_qmax = 255 // 2
max_scale = max(
abs(tensor_min / symmetric_qmin), abs(tensor_max / symmetric_qmax)
)
tensor_min = max_scale * symmetric_qmin
tensor_max = max_scale * symmetric_qmax
q_param = hardcode_scale_zp.choose_quantization_params(tensor_min, tensor_max)
if tensor_min < 0 and tensor_max > 0 and preserve_sparsity:
q_param = hardcode_scale_zp.QuantizationParam(q_param.scale, 128)
return q_param
def add_quantization_param_args(op, tensor, preserve_sparsity=False):
tensor_min = 0 if tensor.size == 0 else tensor.min()
tensor_max = 0 if tensor.size == 0 else tensor.max()
q_param = choose_quantization_params(tensor_min, tensor_max, preserve_sparsity)
add_quantization_param_args_(op, q_param)
return q_param
def create_int8_given_tensor_fill(tensor, out_blob_name, preserve_sparsity=False):
"""
Create Int8GivenTensorFill op that quantizes the given tensor and outputs
an Int8Tensor with out_blob_name.
"""
op = core.CreateOperator("Int8GivenTensorFill", [], out_blob_name)
q_param = add_quantization_param_args(op, tensor, preserve_sparsity)
quantized_tensor = (
np.around(tensor / q_param.scale).astype(np.int32) + q_param.zero_point
)
quantized_tensor = np.maximum(0, np.minimum(quantized_tensor, 255))
op.arg.extend(
[
utils.MakeArgument("values", quantized_tensor.astype(np.uint8).tobytes()),
utils.MakeArgument("shape", quantized_tensor.shape),
]
)
return op, q_param
def create_int8_bias_tensor_fill(tensor, out_blob_name, x_q_param, w_q_param):
"""
Similar to create_int8_given_tensor_fill, but for bias blobs to be stored
as int32.
"""
scale = x_q_param.scale * w_q_param.scale
quantized_tensor = np.around(tensor / scale).astype(np.int32)
quantized_tensor.reshape(-1)
op = core.CreateOperator("Int8GivenIntTensorFill", [], out_blob_name)
op.arg.extend(
[
utils.MakeArgument("values", quantized_tensor),
utils.MakeArgument("shape", quantized_tensor.shape),
]
)
q_param = hardcode_scale_zp.QuantizationParam(scale, 0)
add_quantization_param_args_(op, q_param)
return op | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/quantization/server/utils.py | 0.684159 | 0.294386 | utils.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
from textwrap import dedent
from subprocess import call
def parse_lines(lines):
# States
EMPTY = 0
OP = 1
MACRO = 2
parse_state = EMPTY
# Preprocess the macros
curr_macro = ""
macros = {}
index = 0
while index < len(lines):
line = lines[index]
if line.lower().startswith("macro"):
assert parse_state == EMPTY
macro_line = line.split(" ")
# Support macros that look like attributes
# e.g. macro - CONV_LIKE
curr_macro = " ".join(macro_line[1:])
assert curr_macro not in macros, 'Macro "{}" defined twice.'.format(
curr_macro
)
macros[curr_macro] = []
parse_state = MACRO
lines = lines[:index] + lines[index + 1 :]
continue
elif line.lower().startswith("endmacro"):
assert parse_state == MACRO
parse_state = EMPTY
lines = lines[:index] + lines[index + 1 :]
continue
elif parse_state == MACRO:
macros[curr_macro].append(line)
lines = lines[:index] + lines[index + 1 :]
continue
index += 1
index = 0
while index < len(lines):
line = lines[index]
if line in macros:
lines = lines[:index] + macros[line] + lines[index + 1 :]
index += len(macros[line]) - 1
index += 1
# Now parse the file
curr_op = ""
# dict of the form
# opName : { attributes: [], ... }
ops = {}
# To preserve parsing order for dependencies (for things like init_from)
op_list = []
for line in lines:
if not len(line):
continue
if line[0] == "-":
assert parse_state is OP
attr = [_.strip() for _ in line[1:].split(":")]
assert attr[0][0].isupper()
if len(attr) == 2: # attribute : type
ops[curr_op]["attributes"].append((attr[0], attr[1]))
elif len(attr) == 3: # attribute : type
ops[curr_op]["attributes"].append((attr[0], attr[1], attr[2]))
else:
op = [l.strip() for l in line.split(":")]
assert len(op[0].split(" ")) == 1
parse_state = OP
curr_op = op[0]
assert curr_op not in ops
ops[curr_op] = {}
op_list.append(curr_op)
if len(op) > 1:
ops[curr_op]["init_from"] = [op[1]]
ops[curr_op]["attributes"] = []
return ops, op_list
def gen_class(op, op_def):
attributes = op_def["attributes"]
attribute_args = []
default_init = "NeuralNetOperator(NNKind::{op})".format(op=op)
attribute_init = [default_init]
attribute_declarations = []
attribute_getters = []
attribute_setters = []
for attr in attributes:
lower_name = attr[0][0].lower() + attr[0][1:]
private_name = lower_name + "_"
default_arg = "" if len(attr) < 3 else " = {}".format(attr[2])
name = attr[0]
t = attr[1]
attr_arg = "{type} {lower_name}".format(
type=t, lower_name=lower_name + default_arg
)
attr_init = "{private_name}({lower_name})".format(
private_name=private_name, lower_name=lower_name)
attr_declare = "{type} {private_name};".format(
type=t, private_name=private_name)
attr_get = dedent(
"""
{type} get{name}() const {{
return {private_name};
}}
""".format(
type=t, name=name, private_name=private_name
)
)
attr_set = dedent(
"""
void set{name}({type} {lower_name}) {{
{private_name} = {lower_name};
}}
""".format(
type=t, name=name, private_name=private_name, lower_name=lower_name
)
)
attribute_args.append(attr_arg)
attribute_init.append(attr_init)
attribute_declarations.append(attr_declare)
attribute_getters.append(attr_get)
attribute_setters.append(attr_set)
extra_init = ""
if "init_from" in op_def:
for other_op in op_def["init_from"]:
lower_other_op = other_op[0].lower() + other_op[1:]
other_init = [default_init]
for attr in attributes:
lower_name = attr[0][0].lower() + attr[0][1:]
private_name = lower_name + "_"
other_init.append(
"{private_name}({other_op}.get{name}())".format(
name=attr[0], private_name=private_name, other_op=lower_other_op
)
)
init = dedent(
"""
{op}(const {other_op}& {lower_other_op}) :
{other_init} {{}}
""".format(
op=op,
other_op=other_op,
lower_other_op=lower_other_op,
other_init=",\n ".join(other_init),
)
)
extra_init += init
return dedent(
"""
class {op} : public NeuralNetOperator {{
public:
{op}({attribute_args}) :
{attribute_init} {{}}
{extra_init}
~{op}() {{}}
NOMNIGRAPH_DEFINE_NN_RTTI({op});
{getters}{setters}
private:
{attribute_declarations}
}};
""".format(
op=op,
extra_init=extra_init,
getters="".join(attribute_getters),
setters="".join(attribute_setters),
attribute_args=",\n".join(attribute_args),
attribute_init=",\n".join(attribute_init),
attribute_declarations="\n".join(attribute_declarations),
)
)
def gen_classes(ops, op_list):
f = ""
for op in op_list:
f += gen_class(op, ops[op])
return f
def gen_enum(op_list):
return ",\n".join([op for op in op_list]) + "\n"
def gen_names(op_list):
f = ""
for op in op_list:
f += dedent(
"""
case NNKind::{name}:
return \"{name}\";
""".format(
name=op
)
)
return f
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate op files.")
parser.add_argument("--install_dir", help="installation directory")
parser.add_argument("--source_def", help="ops.def", action="append")
args = parser.parse_args()
install_dir = args.install_dir
sources = args.source_def
lines = []
for source in sources:
with open(source, "rb") as f:
lines_tmp = f.readlines()
lines += [l.strip().decode("utf-8") for l in lines_tmp]
ops, op_list = parse_lines(lines)
with open(install_dir + "/OpClasses.h", "wb") as f:
f.write(gen_classes(ops, op_list).encode("utf-8"))
with open(install_dir + "/OpNames.h", "wb") as f:
f.write(gen_names(op_list).encode("utf-8"))
with open(install_dir + "/OpEnum.h", "wb") as f:
f.write(gen_enum(op_list).encode("utf-8"))
try:
cmd = ["clang-format", "-i", install_dir + "/OpClasses.h"]
call(cmd)
cmd = ["clang-format", "-i", install_dir + "/OpNames.h"]
call(cmd)
cmd = ["clang-format", "-i", install_dir + "/OpEnum.h"]
call(cmd)
except Exception:
pass | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/caffe2/core/nomnigraph/op_gen.py | 0.441673 | 0.215867 | op_gen.py | pypi |
import torch
import torch.nn.functional as F
from ._lowrank import svd_lowrank, pca_lowrank
from ._overrides import has_torch_function, handle_torch_function
from ._jit_internal import boolean_dispatch, List
from ._jit_internal import _overload as overload
from torch._six import PY2
Tensor = torch.Tensor
from torch import _VF
__all__ = [
'align_tensors',
'broadcast_tensors',
'cartesian_prod',
'cdist',
'chain_matmul',
'einsum',
'lu',
'lu_unpack',
'norm',
'meshgrid',
'pca_lowrank',
'split',
'stft',
'svd_lowrank',
'tensordot',
'unique',
'unique_consecutive',
]
def broadcast_tensors(*tensors):
r"""broadcast_tensors(*tensors) -> List of Tensors
Broadcasts the given tensors according to :ref:`broadcasting-semantics`.
Args:
*tensors: any number of tensors of the same type
.. warning::
More than one element of a broadcasted tensor may refer to a single
memory location. As a result, in-place operations (especially ones that
are vectorized) may result in incorrect behavior. If you need to write
to the tensors, please clone them first.
Example::
>>> x = torch.arange(3).view(1, 3)
>>> y = torch.arange(2).view(2, 1)
>>> a, b = torch.broadcast_tensors(x, y)
>>> a.size()
torch.Size([2, 3])
>>> a
tensor([[0, 1, 2],
[0, 1, 2]])
"""
if not torch.jit.is_scripting():
if any(type(t) is not Tensor for t in tensors) and has_torch_function(tensors):
return handle_torch_function(broadcast_tensors, tensors, *tensors)
return _VF.broadcast_tensors(tensors)
def split(tensor, split_size_or_sections, dim=0):
r"""Splits the tensor into chunks. Each chunk is a view of the original tensor.
If :attr:`split_size_or_sections` is an integer type, then :attr:`tensor` will
be split into equally sized chunks (if possible). Last chunk will be smaller if
the tensor size along the given dimension :attr:`dim` is not divisible by
:attr:`split_size`.
If :attr:`split_size_or_sections` is a list, then :attr:`tensor` will be split
into ``len(split_size_or_sections)`` chunks with sizes in :attr:`dim` according
to :attr:`split_size_or_sections`.
Arguments:
tensor (Tensor): tensor to split.
split_size_or_sections (int) or (list(int)): size of a single chunk or
list of sizes for each chunk
dim (int): dimension along which to split the tensor.
"""
if not torch.jit.is_scripting():
if type(tensor) is not Tensor and has_torch_function((tensor,)):
return handle_torch_function(split, (tensor,), tensor, split_size_or_sections,
dim=dim)
# Overwriting reason:
# This dispatches to two ATen functions depending on the type of
# split_size_or_sections. The branching code is in tensor.py, which we
# call here.
return tensor.split(split_size_or_sections, dim)
# equivalent to itertools.product(indices)
def _indices_product(indices):
# type: (List[int]) -> (List[List[int]])
empty_list = torch.jit.annotate(List[int], [])
result = [empty_list]
for idx in indices:
result_temp = torch.jit.annotate(List[List[int]], [])
for res in result:
for i in range(idx):
result_temp.append(res + [i])
result = result_temp
return result
def _index_tensor_with_indices_list(tensor, indices):
# type: (Tensor, List[int]) -> Tensor
out = tensor
for index in indices:
out = out[index]
return out
def lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True):
# type: (Tensor, Tensor, bool, bool) -> (Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]])
r"""Unpacks the data and pivots from a LU factorization of a tensor.
Returns a tuple of tensors as ``(the pivots, the L tensor, the U tensor)``.
Arguments:
LU_data (Tensor): the packed LU factorization data
LU_pivots (Tensor): the packed LU factorization pivots
unpack_data (bool): flag indicating if the data should be unpacked
unpack_pivots (bool): flag indicating if the pivots should be unpacked
Examples::
>>> A = torch.randn(2, 3, 3)
>>> A_LU, pivots = A.lu()
>>> P, A_L, A_U = torch.lu_unpack(A_LU, pivots)
>>>
>>> # can recover A from factorization
>>> A_ = torch.bmm(P, torch.bmm(A_L, A_U))
>>> # LU factorization of a rectangular matrix:
>>> A = torch.randn(2, 3, 2)
>>> A_LU, pivots = A.lu()
>>> P, A_L, A_U = torch.lu_unpack(A_LU, pivots)
>>> P
tensor([[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]]])
>>> A_L
tensor([[[ 1.0000, 0.0000],
[ 0.4763, 1.0000],
[ 0.3683, 0.1135]],
[[ 1.0000, 0.0000],
[ 0.2957, 1.0000],
[-0.9668, -0.3335]]])
>>> A_U
tensor([[[ 2.1962, 1.0881],
[ 0.0000, -0.8681]],
[[-1.0947, 0.3736],
[ 0.0000, 0.5718]]])
>>> A_ = torch.bmm(P, torch.bmm(A_L, A_U))
>>> torch.norm(A_ - A)
tensor(2.9802e-08)
"""
if not torch.jit.is_scripting():
tens_ops = (LU_data, LU_pivots)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
lu_unpack, tens_ops, LU_data, LU_pivots, unpack_data=unpack_data,
unpack_pivots=unpack_pivots)
shape = LU_data.shape
# In generalized LU factorization, the following shape relations hold:
# A.shape[-2:] == (m, n)
# P.shape[-2:] == (m, m)
# L.shape[-2:] == (m, k)
# U.shape[-2:] == (k, n)
# where k = min(m, n)
m, n = shape[-2:]
k = min(m, n)
if unpack_data:
U = LU_data.triu()
if m != k:
U = U.narrow(-2, 0, k)
L = LU_data.tril()
if k != n:
L = L.narrow(-1, 0, k)
L.diagonal(dim1=-2, dim2=-1).fill_(1)
else:
L = U = None
if unpack_pivots:
LU_pivots_zero_idx = LU_pivots - 1
if LU_data.dim() > 2:
P = torch.eye(m, device=LU_data.device, dtype=LU_data.dtype) \
.expand(shape[:-1] + (m,)) \
.clone(memory_format=torch.contiguous_format)
# TODO: rewrite when TorchScript supports product and map as
# product(*map(lambda x: list(range(x)), shape[:-2])) when issue 33781 is fixed
indices = _indices_product(shape[:-2])
for idx in indices:
final_order = [i for i in range(m)] # noqa: C416 TODO: rewrite as list(range(m))
for k, j in enumerate(_index_tensor_with_indices_list(LU_pivots_zero_idx, idx)):
final_order[k], final_order[j] = final_order[j], final_order[k]
# TODO: remove _index_tensor_with_indices_list when TorchScript supports indexing Tensor with list
p_idx = _index_tensor_with_indices_list(P, idx)
p_idx.copy_(p_idx.index_select(1, torch.as_tensor(final_order, device=LU_pivots.device)))
else:
P = torch.eye(m, device=LU_data.device, dtype=LU_data.dtype)
final_order = [i for i in range(m)] # noqa: C416 TODO: rewrite as list(range(m))
for k, j, in enumerate(LU_pivots_zero_idx):
final_order[k], final_order[j] = final_order[j], final_order[k]
P = P.index_select(1, torch.as_tensor(final_order, device=LU_pivots.device))
else:
P = None
return P, L, U
def einsum(equation, *operands):
r"""einsum(equation, *operands) -> Tensor
This function provides a way of computing multilinear expressions (i.e. sums of products) using the
Einstein summation convention.
Args:
equation (string): The equation is given in terms of lower case letters (indices) to be associated
with each dimension of the operands and result. The left hand side lists the operands
dimensions, separated by commas. There should be one index letter per tensor dimension.
The right hand side follows after `->` and gives the indices for the output.
If the `->` and right hand side are omitted, it implicitly defined as the alphabetically
sorted list of all indices appearing exactly once in the left hand side.
The indices not apprearing in the output are summed over after multiplying the operands
entries.
If an index appears several times for the same operand, a diagonal is taken.
Ellipses `...` represent a fixed number of dimensions. If the right hand side is inferred,
the ellipsis dimensions are at the beginning of the output.
operands (Tensor): The operands to compute the Einstein sum of.
Examples::
>>> x = torch.randn(5)
>>> y = torch.randn(4)
>>> torch.einsum('i,j->ij', x, y) # outer product
tensor([[-0.0570, -0.0286, -0.0231, 0.0197],
[ 1.2616, 0.6335, 0.5113, -0.4351],
[ 1.4452, 0.7257, 0.5857, -0.4984],
[-0.4647, -0.2333, -0.1883, 0.1603],
[-1.1130, -0.5588, -0.4510, 0.3838]])
>>> A = torch.randn(3,5,4)
>>> l = torch.randn(2,5)
>>> r = torch.randn(2,4)
>>> torch.einsum('bn,anm,bm->ba', l, A, r) # compare torch.nn.functional.bilinear
tensor([[-0.3430, -5.2405, 0.4494],
[ 0.3311, 5.5201, -3.0356]])
>>> As = torch.randn(3,2,5)
>>> Bs = torch.randn(3,5,4)
>>> torch.einsum('bij,bjk->bik', As, Bs) # batch matrix multiplication
tensor([[[-1.0564, -1.5904, 3.2023, 3.1271],
[-1.6706, -0.8097, -0.8025, -2.1183]],
[[ 4.2239, 0.3107, -0.5756, -0.2354],
[-1.4558, -0.3460, 1.5087, -0.8530]],
[[ 2.8153, 1.8787, -4.3839, -1.2112],
[ 0.3728, -2.1131, 0.0921, 0.8305]]])
>>> A = torch.randn(3, 3)
>>> torch.einsum('ii->i', A) # diagonal
tensor([-0.7825, 0.8291, -0.1936])
>>> A = torch.randn(4, 3, 3)
>>> torch.einsum('...ii->...i', A) # batch diagonal
tensor([[-1.0864, 0.7292, 0.0569],
[-0.9725, -1.0270, 0.6493],
[ 0.5832, -1.1716, -1.5084],
[ 0.4041, -1.1690, 0.8570]])
>>> A = torch.randn(2, 3, 4, 5)
>>> torch.einsum('...ij->...ji', A).shape # batch permute
torch.Size([2, 3, 5, 4])
"""
if not torch.jit.is_scripting():
if any(type(t) is not Tensor for t in operands) and has_torch_function(operands):
return handle_torch_function(einsum, operands, *operands)
if len(operands) == 1 and isinstance(operands[0], (list, tuple)):
# the old interface of passing the operands as one list argument
operands = operands[0]
return _VF.einsum(equation, operands)
def meshgrid(*tensors):
r"""Take :math:`N` tensors, each of which can be either scalar or 1-dimensional
vector, and create :math:`N` N-dimensional grids, where the :math:`i` :sup:`th` grid is defined by
expanding the :math:`i` :sup:`th` input over dimensions defined by other inputs.
Args:
tensors (list of Tensor): list of scalars or 1 dimensional tensors. Scalars will be
treated as tensors of size :math:`(1,)` automatically
Returns:
seq (sequence of Tensors): If the input has :math:`k` tensors of size
:math:`(N_1,), (N_2,), \ldots , (N_k,)`, then the output would also have :math:`k` tensors,
where all tensors are of size :math:`(N_1, N_2, \ldots , N_k)`.
Example::
>>> x = torch.tensor([1, 2, 3])
>>> y = torch.tensor([4, 5, 6])
>>> grid_x, grid_y = torch.meshgrid(x, y)
>>> grid_x
tensor([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> grid_y
tensor([[4, 5, 6],
[4, 5, 6],
[4, 5, 6]])
"""
if not torch.jit.is_scripting():
if any(type(t) is not Tensor for t in tensors) and has_torch_function(tensors):
return handle_torch_function(meshgrid, tensors, *tensors)
if len(tensors) == 1 and isinstance(tensors[0], (list, tuple)):
# the old interface of passing the operands as one list argument
tensors = tensors[0]
return _VF.meshgrid(tensors)
def stft(input, n_fft, hop_length=None, win_length=None, window=None,
center=True, pad_mode='reflect', normalized=False, onesided=True):
# type: (Tensor, int, Optional[int], Optional[int], Optional[Tensor], bool, str, bool, bool) -> Tensor
r"""Short-time Fourier transform (STFT).
Ignoring the optional batch dimension, this method computes the following
expression:
.. math::
X[m, \omega] = \sum_{k = 0}^{\text{win\_length-1}}%
\text{window}[k]\ \text{input}[m \times \text{hop\_length} + k]\ %
\exp\left(- j \frac{2 \pi \cdot \omega k}{\text{win\_length}}\right),
where :math:`m` is the index of the sliding window, and :math:`\omega` is
the frequency that :math:`0 \leq \omega < \text{n\_fft}`. When
:attr:`onesided` is the default value ``True``,
* :attr:`input` must be either a 1-D time sequence or a 2-D batch of time
sequences.
* If :attr:`hop_length` is ``None`` (default), it is treated as equal to
``floor(n_fft / 4)``.
* If :attr:`win_length` is ``None`` (default), it is treated as equal to
:attr:`n_fft`.
* :attr:`window` can be a 1-D tensor of size :attr:`win_length`, e.g., from
:meth:`torch.hann_window`. If :attr:`window` is ``None`` (default), it is
treated as if having :math:`1` everywhere in the window. If
:math:`\text{win\_length} < \text{n\_fft}`, :attr:`window` will be padded on
both sides to length :attr:`n_fft` before being applied.
* If :attr:`center` is ``True`` (default), :attr:`input` will be padded on
both sides so that the :math:`t`-th frame is centered at time
:math:`t \times \text{hop\_length}`. Otherwise, the :math:`t`-th frame
begins at time :math:`t \times \text{hop\_length}`.
* :attr:`pad_mode` determines the padding method used on :attr:`input` when
:attr:`center` is ``True``. See :meth:`torch.nn.functional.pad` for
all available options. Default is ``"reflect"``.
* If :attr:`onesided` is ``True`` (default), only values for :math:`\omega`
in :math:`\left[0, 1, 2, \dots, \left\lfloor \frac{\text{n\_fft}}{2} \right\rfloor + 1\right]`
are returned because the real-to-complex Fourier transform satisfies the
conjugate symmetry, i.e., :math:`X[m, \omega] = X[m, \text{n\_fft} - \omega]^*`.
* If :attr:`normalized` is ``True`` (default is ``False``), the function
returns the normalized STFT results, i.e., multiplied by :math:`(\text{frame\_length})^{-0.5}`.
Returns the real and the imaginary parts together as one tensor of size
:math:`(* \times N \times T \times 2)`, where :math:`*` is the optional
batch size of :attr:`input`, :math:`N` is the number of frequencies where
STFT is applied, :math:`T` is the total number of frames used, and each pair
in the last dimension represents a complex number as the real part and the
imaginary part.
.. warning::
This function changed signature at version 0.4.1. Calling with the
previous signature may cause error or return incorrect result.
Arguments:
input (Tensor): the input tensor
n_fft (int): size of Fourier transform
hop_length (int, optional): the distance between neighboring sliding window
frames. Default: ``None`` (treated as equal to ``floor(n_fft / 4)``)
win_length (int, optional): the size of window frame and STFT filter.
Default: ``None`` (treated as equal to :attr:`n_fft`)
window (Tensor, optional): the optional window function.
Default: ``None`` (treated as window of all :math:`1` s)
center (bool, optional): whether to pad :attr:`input` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
Default: ``True``
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. Default: ``"reflect"``
normalized (bool, optional): controls whether to return the normalized STFT results
Default: ``False``
onesided (bool, optional): controls whether to return half of results to
avoid redundancy Default: ``True``
Returns:
Tensor: A tensor containing the STFT result with shape described above
"""
if not torch.jit.is_scripting():
if type(input) is not Tensor and has_torch_function((input,)):
return handle_torch_function(
stft, (input,), input, n_fft, hop_length=hop_length, win_length=win_length,
window=window, center=center, pad_mode=pad_mode, normalized=normalized,
onesided=onesided)
# TODO: after having proper ways to map Python strings to ATen Enum, move
# this and F.pad to ATen.
if center:
signal_dim = input.dim()
extended_shape = [1] * (3 - signal_dim) + list(input.size())
pad = int(n_fft // 2)
input = F.pad(input.view(extended_shape), (pad, pad), pad_mode)
input = input.view(input.shape[-signal_dim:])
return _VF.stft(input, n_fft, hop_length, win_length, window, normalized, onesided)
del torch.unique_dim
def unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
r"""Returns the unique elements of the input tensor.
.. note:: This function is different from :func:`torch.unique_consecutive` in the sense that
this function also eliminates non-consecutive duplicate values.
.. note:: Currently in the CUDA implementation and the CPU implementation when dim is specified,
`torch.unique` always sort the tensor at the beginning regardless of the `sort` argument.
Sorting could be slow, so if your input tensor is already sorted, it is recommended to use
:func:`torch.unique_consecutive` which avoids the sorting.
Arguments:
input (Tensor): the input tensor
sorted (bool): Whether to sort the unique elements in ascending order
before returning as output.
return_inverse (bool): Whether to also return the indices for where
elements in the original input ended up in the returned unique list.
return_counts (bool): Whether to also return the counts for each unique
element.
dim (int): the dimension to apply unique. If ``None``, the unique of the
flattened input is returned. default: ``None``
Returns:
(Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing
- **output** (*Tensor*): the output list of unique scalar elements.
- **inverse_indices** (*Tensor*): (optional) if
:attr:`return_inverse` is True, there will be an additional
returned tensor (same shape as input) representing the indices
for where elements in the original input map to in the output;
otherwise, this function will only return a single tensor.
- **counts** (*Tensor*): (optional) if
:attr:`return_counts` is True, there will be an additional
returned tensor (same shape as output or output.size(dim),
if dim was specified) representing the number of occurrences
for each unique value or tensor.
Example::
>>> output = torch.unique(torch.tensor([1, 3, 2, 3], dtype=torch.long))
>>> output
tensor([ 2, 3, 1])
>>> output, inverse_indices = torch.unique(
torch.tensor([1, 3, 2, 3], dtype=torch.long), sorted=True, return_inverse=True)
>>> output
tensor([ 1, 2, 3])
>>> inverse_indices
tensor([ 0, 2, 1, 2])
>>> output, inverse_indices = torch.unique(
torch.tensor([[1, 3], [2, 3]], dtype=torch.long), sorted=True, return_inverse=True)
>>> output
tensor([ 1, 2, 3])
>>> inverse_indices
tensor([[ 0, 2],
[ 1, 2]])
"""
if not torch.jit.is_scripting():
if type(input) is not Tensor and has_torch_function((input,)):
return handle_torch_function(
unique, (input,), input, sorted=sorted, return_inverse=return_inverse,
return_counts=return_counts, dim=dim)
if dim is not None:
output, inverse_indices, counts = _VF.unique_dim(
input,
dim,
sorted=sorted,
return_inverse=return_inverse,
return_counts=return_counts,
)
else:
output, inverse_indices, counts = torch._unique2(
input,
sorted=sorted,
return_inverse=return_inverse,
return_counts=return_counts,
)
if return_inverse and return_counts:
return output, inverse_indices, counts
elif return_inverse:
return output, inverse_indices
elif return_counts:
return output, counts
else:
return output
def unique_consecutive(input, return_inverse=False, return_counts=False, dim=None):
r"""Eliminates all but the first element from every consecutive group of equivalent elements.
.. note:: This function is different from :func:`torch.unique` in the sense that this function
only eliminates consecutive duplicate values. This semantics is similar to `std::unique`
in C++.
Arguments:
input (Tensor): the input tensor
return_inverse (bool): Whether to also return the indices for where
elements in the original input ended up in the returned unique list.
return_counts (bool): Whether to also return the counts for each unique
element.
dim (int): the dimension to apply unique. If ``None``, the unique of the
flattened input is returned. default: ``None``
Returns:
(Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing
- **output** (*Tensor*): the output list of unique scalar elements.
- **inverse_indices** (*Tensor*): (optional) if
:attr:`return_inverse` is True, there will be an additional
returned tensor (same shape as input) representing the indices
for where elements in the original input map to in the output;
otherwise, this function will only return a single tensor.
- **counts** (*Tensor*): (optional) if
:attr:`return_counts` is True, there will be an additional
returned tensor (same shape as output or output.size(dim),
if dim was specified) representing the number of occurrences
for each unique value or tensor.
Example::
>>> x = torch.tensor([1, 1, 2, 2, 3, 1, 1, 2])
>>> output = torch.unique_consecutive(x)
>>> output
tensor([1, 2, 3, 1, 2])
>>> output, inverse_indices = torch.unique_consecutive(x, return_inverse=True)
>>> output
tensor([1, 2, 3, 1, 2])
>>> inverse_indices
tensor([0, 0, 1, 1, 2, 3, 3, 4])
>>> output, counts = torch.unique_consecutive(x, return_counts=True)
>>> output
tensor([1, 2, 3, 1, 2])
>>> counts
tensor([2, 2, 1, 2, 1])
"""
if not torch.jit.is_scripting():
if type(input) is not Tensor and has_torch_function((input,)):
return handle_torch_function(
unique_consecutive, (input,), input, return_inverse=return_inverse,
return_counts=return_counts, dim=dim)
output, inverse_indices, counts = _VF.unique_consecutive(
input, return_inverse=return_inverse, return_counts=return_counts, dim=dim)
if return_inverse and return_counts:
return output, inverse_indices, counts
if return_inverse:
return output, inverse_indices
if return_counts:
return output, counts
return output
def tensordot(a, b, dims=2):
r"""Returns a contraction of a and b over multiple dimensions.
:attr:`tensordot` implements a generalized matrix product.
Args:
a (Tensor): Left tensor to contract
b (Tensor): Right tensor to contract
dims (int or tuple of two lists of integers): number of dimensions to
contract or explicit lists of dimensions for :attr:`a` and
:attr:`b` respectively
When called with a non-negative integer argument :attr:`dims` = :math:`d`, and
the number of dimensions of :attr:`a` and :attr:`b` is :math:`m` and :math:`n`,
respectively, :func:`~torch.tensordot` computes
.. math::
r_{i_0,...,i_{m-d}, i_d,...,i_n}
= \sum_{k_0,...,k_{d-1}} a_{i_0,...,i_{m-d},k_0,...,k_{d-1}} \times b_{k_0,...,k_{d-1}, i_d,...,i_n}.
When called with :attr:`dims` of the list form, the given dimensions will be contracted
in place of the last :math:`d` of :attr:`a` and the first :math:`d` of :math:`b`. The sizes
in these dimensions must match, but :func:`~torch.tensordot` will deal with broadcasted
dimensions.
Examples::
>>> a = torch.arange(60.).reshape(3, 4, 5)
>>> b = torch.arange(24.).reshape(4, 3, 2)
>>> torch.tensordot(a, b, dims=([1, 0], [0, 1]))
tensor([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
>>> a = torch.randn(3, 4, 5, device='cuda')
>>> b = torch.randn(4, 5, 6, device='cuda')
>>> c = torch.tensordot(a, b, dims=2).cpu()
tensor([[ 8.3504, -2.5436, 6.2922, 2.7556, -1.0732, 3.2741],
[ 3.3161, 0.0704, 5.0187, -0.4079, -4.3126, 4.8744],
[ 0.8223, 3.9445, 3.2168, -0.2400, 3.4117, 1.7780]])
"""
if not torch.jit.is_scripting():
if (type(a) is not Tensor or type(b) is not Tensor) and has_torch_function((a, b)):
return handle_torch_function(tensordot, (a, b), a, b, dims=dims)
if isinstance(dims, (list, tuple)) or \
(isinstance(dims, torch.Tensor) and dims.numel() > 1):
dims_a, dims_b = dims
else:
if isinstance(dims, torch.Tensor):
dims = dims.item()
if dims < 0:
raise RuntimeError("tensordot expects dims >= 0, but got dims={}".format(dims))
dims_a = list(range(-dims, 0))
dims_b = list(range(dims))
return _VF.tensordot(a, b, dims_a, dims_b)
def cartesian_prod(*tensors):
"""Do cartesian product of the given sequence of tensors. The behavior is similar to
python's `itertools.product`.
Arguments:
*tensors: any number of 1 dimensional tensors.
Returns:
Tensor: A tensor equivalent to converting all the input tensors into lists,
do `itertools.product` on these lists, and finally convert the resulting list
into tensor.
Example::
>>> a = [1, 2, 3]
>>> b = [4, 5]
>>> list(itertools.product(a, b))
[(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)]
>>> tensor_a = torch.tensor(a)
>>> tensor_b = torch.tensor(b)
>>> torch.cartesian_prod(tensor_a, tensor_b)
tensor([[1, 4],
[1, 5],
[2, 4],
[2, 5],
[3, 4],
[3, 5]])
"""
if not torch.jit.is_scripting():
if any(type(t) is not Tensor for t in tensors) and has_torch_function(tensors):
return handle_torch_function(cartesian_prod, tensors, *tensors)
return _VF.cartesian_prod(tensors)
def cdist(x1, x2, p=2., compute_mode='use_mm_for_euclid_dist_if_necessary'):
# type: (Tensor, Tensor, float, str) -> (Tensor)
r"""Computes batched the p-norm distance between each pair of the two collections of row vectors.
Args:
x1 (Tensor): input tensor of shape :math:`B \times P \times M`.
x2 (Tensor): input tensor of shape :math:`B \times R \times M`.
p: p value for the p-norm distance to calculate between each vector pair
:math:`\in [0, \infty]`.
compute_mode:
'use_mm_for_euclid_dist_if_necessary' - will use matrix multiplication approach to calculate
euclidean distance (p = 2) if P > 25 or R > 25
'use_mm_for_euclid_dist' - will always use matrix multiplication approach to calculate
euclidean distance (p = 2)
'donot_use_mm_for_euclid_dist' - will never use matrix multiplication approach to calculate
euclidean distance (p = 2)
Default: use_mm_for_euclid_dist_if_necessary.
If x1 has shape :math:`B \times P \times M` and x2 has shape :math:`B \times R \times M` then the
output will have shape :math:`B \times P \times R`.
This function is equivalent to `scipy.spatial.distance.cdist(input,'minkowski', p=p)`
if :math:`p \in (0, \infty)`. When :math:`p = 0` it is equivalent to
`scipy.spatial.distance.cdist(input, 'hamming') * M`. When :math:`p = \infty`, the closest
scipy function is `scipy.spatial.distance.cdist(xn, lambda x, y: np.abs(x - y).max())`.
Example:
>>> a = torch.tensor([[0.9041, 0.0196], [-0.3108, -2.4423], [-0.4821, 1.059]])
>>> a
tensor([[ 0.9041, 0.0196],
[-0.3108, -2.4423],
[-0.4821, 1.0590]])
>>> b = torch.tensor([[-2.1763, -0.4713], [-0.6986, 1.3702]])
>>> b
tensor([[-2.1763, -0.4713],
[-0.6986, 1.3702]])
>>> torch.cdist(a, b, p=2)
tensor([[3.1193, 2.0959],
[2.7138, 3.8322],
[2.2830, 0.3791]])
"""
if not torch.jit.is_scripting():
if (type(x1) is not Tensor or type(x2) is not Tensor) and has_torch_function((x1, x2)):
return handle_torch_function(
cdist, (x1, x2), x1, x2, p=p, compute_mode=compute_mode)
if compute_mode == 'use_mm_for_euclid_dist_if_necessary':
return _VF.cdist(x1, x2, p, None)
elif compute_mode == 'use_mm_for_euclid_dist':
return _VF.cdist(x1, x2, p, 1)
elif compute_mode == 'donot_use_mm_for_euclid_dist':
return _VF.cdist(x1, x2, p, 2)
else:
raise ValueError("{} is not a valid value for compute_mode".format(compute_mode))
# TODO: type dim as BroadcastingList when https://github.com/pytorch/pytorch/issues/33782 is fixed
@overload # noqa: 749
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: 749
# type: (Tensor, str, Optional[List[int]], bool, Optional[Tensor], Optional[int]) -> Tensor
pass
@overload # noqa: 749
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: 749
# type: (Tensor, Optional[number], Optional[List[int]], bool, Optional[Tensor], Optional[int]) -> Tensor
pass
@overload # noqa: 749
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: 749
# type: (Tensor, Optional[number], Optional[int], bool, Optional[Tensor], Optional[int]) -> Tensor
pass
@overload # noqa: 749
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: 749
# type: (Tensor, str, Optional[int], bool, Optional[Tensor], Optional[int]) -> Tensor
pass
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: 749
r"""Returns the matrix norm or vector norm of a given tensor.
Args:
input (Tensor): the input tensor
p (int, float, inf, -inf, 'fro', 'nuc', optional): the order of norm. Default: ``'fro'``
The following norms can be calculated:
===== ============================ ==========================
ord matrix norm vector norm
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
Other as vec norm when dim is None sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
dim (int, 2-tuple of ints, 2-list of ints, optional): If it is an int,
vector norm will be calculated, if it is 2-tuple of ints, matrix norm
will be calculated. If the value is None, matrix norm will be calculated
when the input tensor only has two dimensions, vector norm will be
calculated when the input tensor only has one dimension. If the input
tensor has more than two dimensions, the vector norm will be applied to
last dimension.
keepdim (bool, optional): whether the output tensors have :attr:`dim`
retained or not. Ignored if :attr:`dim` = ``None`` and
:attr:`out` = ``None``. Default: ``False``
out (Tensor, optional): the output tensor. Ignored if
:attr:`dim` = ``None`` and :attr:`out` = ``None``.
dtype (:class:`torch.dtype`, optional): the desired data type of
returned tensor. If specified, the input tensor is casted to
:attr:'dtype' while performing the operation. Default: None.
Example::
>>> import torch
>>> a = torch.arange(9, dtype= torch.float) - 4
>>> b = a.reshape((3, 3))
>>> torch.norm(a)
tensor(7.7460)
>>> torch.norm(b)
tensor(7.7460)
>>> torch.norm(a, float('inf'))
tensor(4.)
>>> torch.norm(b, float('inf'))
tensor(4.)
>>> c = torch.tensor([[ 1, 2, 3],[-1, 1, 4]] , dtype= torch.float)
>>> torch.norm(c, dim=0)
tensor([1.4142, 2.2361, 5.0000])
>>> torch.norm(c, dim=1)
tensor([3.7417, 4.2426])
>>> torch.norm(c, p=1, dim=1)
tensor([6., 6.])
>>> d = torch.arange(8, dtype= torch.float).reshape(2,2,2)
>>> torch.norm(d, dim=(1,2))
tensor([ 3.7417, 11.2250])
>>> torch.norm(d[0, :, :]), torch.norm(d[1, :, :])
(tensor(3.7417), tensor(11.2250))
"""
if not torch.jit.is_scripting():
if type(input) is not Tensor and has_torch_function((input,)):
return handle_torch_function(
norm, (input,), input, p=p, dim=dim, keepdim=keepdim, out=out, dtype=dtype)
# py2 considers isinstance(unicodestr, str) == False
if PY2 and isinstance(p, unicode):
p = str(p)
ndim = input.dim()
# catch default case
if dim is None and out is None and dtype is None and p is not None:
if isinstance(p, str):
if p == "fro":
return _VF.frobenius_norm(input)
if not isinstance(p, str):
return _VF.norm(input, p)
# TODO: when https://github.com/pytorch/pytorch/issues/33782 is fixed
# remove the overloads where dim is an int and replace with BraodcastingList1
# and remove next four lines, replace _dim with dim
if dim is not None:
if isinstance(dim, int):
_dim = [dim]
else:
_dim = dim
else:
_dim = None
if isinstance(p, str):
if p == "fro":
if dtype is not None:
raise ValueError("dtype argument is not supported in frobenius norm")
if _dim is None:
_dim = [i for i in range(ndim)] # noqa: C416 TODO: rewrite as list(range(m))
if out is None:
return _VF.frobenius_norm(input, _dim, keepdim=keepdim)
else:
return _VF.frobenius_norm(input, _dim, keepdim=keepdim, out=out)
elif p == "nuc":
if dtype is not None:
raise ValueError("dtype argument is not supported in nuclear norm")
if _dim is None:
if out is None:
return _VF.nuclear_norm(input, keepdim=keepdim)
else:
return _VF.nuclear_norm(input, keepdim=keepdim, out=out)
else:
if out is None:
return _VF.nuclear_norm(input, _dim, keepdim=keepdim)
else:
return _VF.nuclear_norm(input, _dim, keepdim=keepdim, out=out)
raise RuntimeError("only valid string values are 'fro' and 'nuc', found {}".format(p))
else:
if _dim is None:
_dim = [i for i in range(ndim)] # noqa: C416 TODO: rewrite as list(range(m))
if out is None:
if dtype is None:
return _VF.norm(input, p, _dim, keepdim=keepdim)
else:
return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype)
else:
if dtype is None:
return _VF.norm(input, p, _dim, keepdim=keepdim, out=out)
else:
return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype, out=out)
def chain_matmul(*matrices):
r"""Returns the matrix product of the :math:`N` 2-D tensors. This product is efficiently computed
using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms
of arithmetic operations (`[CLRS]`_). Note that since this is a function to compute the product, :math:`N`
needs to be greater than or equal to 2; if equal to 2 then a trivial matrix-matrix product is returned.
If :math:`N` is 1, then this is a no-op - the original matrix is returned as is.
Args:
matrices (Tensors...): a sequence of 2 or more 2-D tensors whose product is to be determined.
Returns:
Tensor: if the :math:`i^{th}` tensor was of dimensions :math:`p_{i} \times p_{i + 1}`, then the product
would be of dimensions :math:`p_{1} \times p_{N + 1}`.
Example::
>>> a = torch.randn(3, 4)
>>> b = torch.randn(4, 5)
>>> c = torch.randn(5, 6)
>>> d = torch.randn(6, 7)
>>> torch.chain_matmul(a, b, c, d)
tensor([[ -2.3375, -3.9790, -4.1119, -6.6577, 9.5609, -11.5095, -3.2614],
[ 21.4038, 3.3378, -8.4982, -5.2457, -10.2561, -2.4684, 2.7163],
[ -0.9647, -5.8917, -2.3213, -5.2284, 12.8615, -12.2816, -2.5095]])
.. _`[CLRS]`: https://mitpress.mit.edu/books/introduction-algorithms-third-edition
"""
if not torch.jit.is_scripting():
if any(type(t) is not Tensor for t in matrices) and has_torch_function(matrices):
return handle_torch_function(chain_matmul, matrices, *matrices)
return _VF.chain_matmul(matrices)
def _lu_impl(A, pivot=True, get_infos=False, out=None):
# type: (Tensor, bool, bool, Any) -> Tuple[Tensor, Tensor, Tensor]
r"""Computes the LU factorization of a matrix or batches of matrices
:attr:`A`. Returns a tuple containing the LU factorization and
pivots of :attr:`A`. Pivoting is done if :attr:`pivot` is set to
``True``.
.. note::
The pivots returned by the function are 1-indexed. If :attr:`pivot` is ``False``,
then the returned pivots is a tensor filled with zeros of the appropriate size.
.. note::
LU factorization with :attr:`pivot` = ``False`` is not available for CPU, and attempting
to do so will throw an error. However, LU factorization with :attr:`pivot` = ``False`` is
available for CUDA.
.. note::
This function does not check if the factorization was successful or not if
:attr:`get_infos` is ``True`` since the status of the factorization is present in the
third element of the return tuple.
.. note::
In the case of batches of square matrices with size less or
equal to 32 on a CUDA device, the LU factorization is repeated
for singular matrices due to the bug in the MAGMA library (see
magma issue 13).
Arguments:
A (Tensor): the tensor to factor of size :math:`(*, m, n)`
pivot (bool, optional): controls whether pivoting is done. Default: ``True``
get_infos (bool, optional): if set to ``True``, returns an info IntTensor.
Default: ``False``
out (tuple, optional): optional output tuple. If :attr:`get_infos` is ``True``,
then the elements in the tuple are Tensor, IntTensor,
and IntTensor. If :attr:`get_infos` is ``False``, then the
elements in the tuple are Tensor, IntTensor. Default: ``None``
Returns:
(Tensor, IntTensor, IntTensor (optional)): A tuple of tensors containing
- **factorization** (*Tensor*): the factorization of size :math:`(*, m, n)`
- **pivots** (*IntTensor*): the pivots of size :math:`(*, m)`
- **infos** (*IntTensor*, *optional*): if :attr:`get_infos` is ``True``, this is a tensor of
size :math:`(*)` where non-zero values indicate whether factorization for the matrix or
each minibatch has succeeded or failed
Example::
>>> A = torch.randn(2, 3, 3)
>>> A_LU, pivots = torch.lu(A)
>>> A_LU
tensor([[[ 1.3506, 2.5558, -0.0816],
[ 0.1684, 1.1551, 0.1940],
[ 0.1193, 0.6189, -0.5497]],
[[ 0.4526, 1.2526, -0.3285],
[-0.7988, 0.7175, -0.9701],
[ 0.2634, -0.9255, -0.3459]]])
>>> pivots
tensor([[ 3, 3, 3],
[ 3, 3, 3]], dtype=torch.int32)
>>> A_LU, pivots, info = torch.lu(A, get_infos=True)
>>> if info.nonzero().size(0) == 0:
... print('LU factorization succeeded for all samples!')
LU factorization succeeded for all samples!
"""
# If get_infos is True, then we don't need to check for errors and vice versa
return torch._lu_with_info(A, pivot=pivot, check_errors=(not get_infos))
def _check_list_size(out_len, get_infos, out):
# type: (int, bool, List[Tensor]) -> None
get_infos_int = 1 if get_infos else 0
if out_len - get_infos_int != 2:
raise TypeError("expected tuple of {} elements but got {}"
.format(2 + int(get_infos), len(out_len)))
if not isinstance(out, (tuple, list)):
raise TypeError("argument 'out' must be tuple of Tensors, not {}"
.format(type(out).__name__))
def _lu_with_infos(A, pivot=True, get_infos=False, out=None):
# type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor, Tensor]]) -> Tuple[Tensor, Tensor, Tensor]
if not torch.jit.is_scripting():
if type(A) is not Tensor and has_torch_function((A,)):
return handle_torch_function(
lu, (A,), A, pivot=pivot, get_infos=get_infos, out=out)
result = _lu_impl(A, pivot, get_infos, out)
if out is not None:
_check_list_size(len(out), get_infos, out)
for i in range(len(out)):
out[i].resize_as_(result[i]).copy_(result[i])
return out
else:
return result # A_LU, pivots, infos
def _lu_no_infos(A, pivot=True, get_infos=False, out=None):
# type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, Tensor]
# need to check for torch_function here so that we exit if
if not torch.jit.is_scripting():
if type(A) is not Tensor and has_torch_function((A,)):
return handle_torch_function(
lu, (A,), A, pivot=pivot, get_infos=get_infos, out=out)
result = _lu_impl(A, pivot, get_infos, out)
if out is not None:
_check_list_size(len(out), get_infos, out)
for i in range(len(out)):
out[i].resize_as_(result[i]).copy_(result[i])
return out
else:
return result[0], result[1] # A_LU, pivots
# The return type of lu depends on `get_infos`, so in order to resolve the output type
# of lu in TorchScript we need to statically know the value of `get_infos`
lu = boolean_dispatch(
arg_name='get_infos',
arg_index=2,
default=False,
if_true=_lu_with_infos,
if_false=_lu_no_infos,
module_name=__name__,
func_name='lu')
lu.__doc__ = _lu_impl.__doc__
def align_tensors(*tensors):
raise RuntimeError('`align_tensors` not yet implemented.') | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/functional.py | 0.896922 | 0.530905 | functional.py | pypi |
import contextlib
import warnings
from torch._C import default_generator
def set_rng_state(new_state):
r"""Sets the random number generator state.
Args:
new_state (torch.ByteTensor): The desired state
"""
default_generator.set_state(new_state)
def get_rng_state():
r"""Returns the random number generator state as a `torch.ByteTensor`."""
return default_generator.get_state()
def manual_seed(seed):
r"""Sets the seed for generating random numbers. Returns a
`torch.Generator` object.
Args:
seed (int): The desired seed.
"""
seed = int(seed)
import torch.cuda
if not torch.cuda._is_in_bad_fork():
torch.cuda.manual_seed_all(seed)
return default_generator.manual_seed(seed)
def seed():
r"""Sets the seed for generating random numbers to a non-deterministic
random number. Returns a 64 bit number used to seed the RNG.
"""
seed = default_generator.seed()
import torch.cuda
if not torch.cuda._is_in_bad_fork():
torch.cuda.manual_seed_all(seed)
return seed
def initial_seed():
r"""Returns the initial seed for generating random numbers as a
Python `long`.
"""
return default_generator.initial_seed()
_fork_rng_warned_already = False
@contextlib.contextmanager
def fork_rng(devices=None, enabled=True, _caller="fork_rng", _devices_kw="devices"):
"""
Forks the RNG, so that when you return, the RNG is reset
to the state that it was previously in.
Arguments:
devices (iterable of CUDA IDs): CUDA devices for which to fork
the RNG. CPU RNG state is always forked. By default, :meth:`fork_rng` operates
on all devices, but will emit a warning if your machine has a lot
of devices, since this function will run very slowly in that case.
If you explicitly specify devices, this warning will be suppressed
enabled (bool): if ``False``, the RNG is not forked. This is a convenience
argument for easily disabling the context manager without having
to delete it and unindent your Python code under it.
"""
import torch.cuda
global _fork_rng_warned_already
# Internal arguments:
# _caller: the function which called fork_rng, which the user used
# _devices_kw: the devices keyword of _caller
if not enabled:
yield
return
if devices is None:
num_devices = torch.cuda.device_count()
if num_devices > 1 and not _fork_rng_warned_already:
warnings.warn(
("CUDA reports that you have {num_devices} available devices, and you "
"have used {caller} without explicitly specifying which devices are being used. "
"For safety, we initialize *every* CUDA device by default, which "
"can be quite slow if you have a lot of GPUs. If you know that you are only "
"making use of a few CUDA devices, set the environment variable CUDA_VISIBLE_DEVICES "
"or the '{devices_kw}' keyword argument of {caller} with the set of devices "
"you are actually using. For example, if you are using CPU only, "
"set CUDA_VISIBLE_DEVICES= or devices=[]; if you are using "
"GPU 0 only, set CUDA_VISIBLE_DEVICES=0 or devices=[0]. To initialize "
"all devices and suppress this warning, set the '{devices_kw}' keyword argument "
"to `range(torch.cuda.device_count())`."
).format(num_devices=num_devices, caller=_caller, devices_kw=_devices_kw))
_fork_rng_warned_already = True
devices = list(range(num_devices))
else:
# Protect against user passing us a generator; we need to traverse this
# multiple times but a generator will be exhausted upon first traversal
devices = list(devices)
cpu_rng_state = torch.get_rng_state()
gpu_rng_states = []
for device in devices:
gpu_rng_states.append(torch.cuda.get_rng_state(device))
try:
yield
finally:
torch.set_rng_state(cpu_rng_state)
for device, gpu_rng_state in zip(devices, gpu_rng_states):
torch.cuda.set_rng_state(gpu_rng_state, device) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/random.py | 0.898882 | 0.473231 | random.py | pypi |
import torch
class SobolEngine(object):
r"""
The :class:`torch.quasirandom.SobolEngine` is an engine for generating
(scrambled) Sobol sequences. Sobol sequences are an example of low
discrepancy quasi-random sequences.
This implementation of an engine for Sobol sequences is capable of
sampling sequences up to a maximum dimension of 1111. It uses direction
numbers to generate these sequences, and these numbers have been adapted
from `here <http://web.maths.unsw.edu.au/~fkuo/sobol/joe-kuo-old.1111>`_.
References:
- Art B. Owen. Scrambling Sobol and Niederreiter-Xing points.
Journal of Complexity, 14(4):466-489, December 1998.
- I. M. Sobol. The distribution of points in a cube and the accurate
evaluation of integrals.
Zh. Vychisl. Mat. i Mat. Phys., 7:784-802, 1967.
Args:
dimension (Int): The dimensionality of the sequence to be drawn
scramble (bool, optional): Setting this to ``True`` will produce
scrambled Sobol sequences. Scrambling is
capable of producing better Sobol
sequences. Default: ``False``.
seed (Int, optional): This is the seed for the scrambling. The seed
of the random number generator is set to this,
if specified. Otherwise, it uses a random seed.
Default: ``None``
Examples::
>>> soboleng = torch.quasirandom.SobolEngine(dimension=5)
>>> soboleng.draw(3)
tensor([[0.5000, 0.5000, 0.5000, 0.5000, 0.5000],
[0.7500, 0.2500, 0.7500, 0.2500, 0.7500],
[0.2500, 0.7500, 0.2500, 0.7500, 0.2500]])
"""
MAXBIT = 30
MAXDIM = 1111
def __init__(self, dimension, scramble=False, seed=None):
if dimension > self.MAXDIM or dimension < 1:
raise ValueError("Supported range of dimensionality "
"for SobolEngine is [1, {}]".format(self.MAXDIM))
self.seed = seed
self.scramble = scramble
self.dimension = dimension
cpu = torch.device("cpu")
self.sobolstate = torch.zeros(dimension, self.MAXBIT, device=cpu, dtype=torch.long)
torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension)
if self.scramble:
g = torch.Generator()
if self.seed is not None:
g.manual_seed(self.seed)
else:
g.seed()
shift_ints = torch.randint(2, (self.dimension, self.MAXBIT), device=cpu, generator=g)
self.shift = torch.mv(shift_ints, torch.pow(2, torch.arange(0, self.MAXBIT, device=cpu)))
ltm_dims = (self.dimension, self.MAXBIT, self.MAXBIT)
ltm = torch.randint(2, ltm_dims, device=cpu, generator=g).tril()
torch._sobol_engine_scramble_(self.sobolstate, ltm, self.dimension)
else:
self.shift = torch.zeros(self.dimension, device=cpu, dtype=torch.long)
self.quasi = self.shift.clone(memory_format=torch.contiguous_format)
self.num_generated = 0
def draw(self, n=1, out=None, dtype=torch.float32):
r"""
Function to draw a sequence of :attr:`n` points from a Sobol sequence.
Note that the samples are dependent on the previous samples. The size
of the result is :math:`(n, dimension)`.
Args:
n (Int, optional): The length of sequence of points to draw.
Default: 1
out (Tensor, optional): The output tensor
dtype (:class:`torch.dtype`, optional): the desired data type of the
returned tensor.
Default: ``torch.float32``
"""
result, self.quasi = torch._sobol_engine_draw(self.quasi, n, self.sobolstate,
self.dimension, self.num_generated, dtype=dtype)
self.num_generated += n
if out is not None:
out.resize_as_(result).copy_(result)
return out
return result
def reset(self):
r"""
Function to reset the ``SobolEngine`` to base state.
"""
self.quasi.copy_(self.shift)
self.num_generated = 0
return self
def fast_forward(self, n):
r"""
Function to fast-forward the state of the ``SobolEngine`` by
:attr:`n` steps. This is equivalent to drawing :attr:`n` samples
without using the samples.
Args:
n (Int): The number of steps to fast-forward by.
"""
torch._sobol_engine_ff_(self.quasi, n, self.sobolstate, self.dimension, self.num_generated)
self.num_generated += n
return self
def __repr__(self):
fmt_string = ['dimension={}'.format(self.dimension)]
if self.scramble:
fmt_string += ['scramble=True']
if self.seed is not None:
fmt_string += ['seed={}'.format(self.seed)]
return self.__class__.__name__ + '(' + ', '.join(fmt_string) + ')' | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/quasirandom.py | 0.957764 | 0.733679 | quasirandom.py | pypi |
import torch
def is_sparse(A):
"""Check if tensor A is a sparse tensor"""
if isinstance(A, torch.Tensor):
return A.layout == torch.sparse_coo
raise TypeError("expected Tensor but got %s" % (type(A).__name__))
def get_floating_dtype(A):
"""Return the floating point dtype of tensor A.
Integer types map to float32.
"""
dtype = A.dtype
if dtype in (torch.float16, torch.float32, torch.float64):
return dtype
return torch.float32
def matmul(A, B):
# type: (Optional[Tensor], Tensor) -> Tensor
"""Multiply two matrices.
If A is None, return B. A can be sparse or dense. B is always
dense.
"""
if A is None:
return B
if is_sparse(A):
return torch.sparse.mm(A, B)
return torch.matmul(A, B)
def conjugate(A):
"""Return conjugate of tensor A.
.. note:: If A's dtype is not complex, A is returned.
"""
if A.is_complex():
return A.conj()
return A
def transpose(A):
"""Return transpose of a matrix or batches of matrices.
"""
ndim = len(A.shape)
return A.transpose(ndim - 1, ndim - 2)
def transjugate(A):
"""Return transpose conjugate of a matrix or batches of matrices.
"""
return conjugate(transpose(A))
def bform(X, A, Y):
# type: (Tensor, Optional[Tensor], Tensor) -> Tensor
"""Return bilinear form of matrices: :math:`X^T A Y`.
"""
return matmul(transpose(X), matmul(A, Y))
def qform(A, S):
# type: (Optional[Tensor], Tensor) -> Tensor
"""Return quadratic form :math:`S^T A S`.
"""
return bform(S, A, S)
def basis(A):
"""Return orthogonal basis of A columns.
"""
if A.is_cuda:
# torch.orgqr is not available in CUDA
Q, _ = torch.qr(A, some=True)
else:
Q = torch.orgqr(*torch.geqrf(A))
return Q
def symeig(A, largest=False, eigenvectors=True):
# type: (Tensor, Optional[bool], Optional[bool]) -> Tuple[Tensor, Tensor]
"""Return eigenpairs of A with specified ordering.
"""
if largest is None:
largest = False
if eigenvectors is None:
eigenvectors = True
E, Z = torch.symeig(A, eigenvectors, True)
# assuming that E is ordered
if largest:
E = torch.flip(E, dims=(-1,))
Z = torch.flip(Z, dims=(-1,))
return E, Z | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/_linalg_utils.py | 0.899993 | 0.769579 | _linalg_utils.py | pypi |
import io
import warnings
import torch
from ._utils import _type, _cuda
class _StorageBase(object):
is_cuda = False
is_sparse = False
def __str__(self):
content = ' ' + '\n '.join(str(self[i]) for i in range(len(self)))
return content + '\n[{} of size {}]'.format(torch.typename(self), len(self))
def __repr__(self):
return str(self)
def __iter__(self):
return iter(map(lambda i: self[i], range(self.size())))
def __copy__(self):
return self.clone()
def __deepcopy__(self, memo):
memo = memo.setdefault('torch', {})
if self._cdata in memo:
return memo[self._cdata]
new_storage = self.clone()
memo[self._cdata] = new_storage
return new_storage
def __reduce__(self):
warnings.warn("pickle support for Storage will be removed in 1.5. Use `torch.save` instead", FutureWarning)
b = io.BytesIO()
torch.save(self, b)
return (_load_from_bytes, (b.getvalue(),))
def __sizeof__(self):
return super(_StorageBase, self).__sizeof__() + self.element_size() * self.size()
def clone(self):
"""Returns a copy of this storage"""
device = self.get_device() if self.is_cuda else -1
with torch.cuda.device(device):
return type(self)(self.size()).copy_(self)
def tolist(self):
"""Returns a list containing the elements of this storage"""
return list(self)
def cpu(self):
"""Returns a CPU copy of this storage if it's not already on the CPU"""
return self.type(getattr(torch, self.__class__.__name__))
def double(self):
"""Casts this storage to double type"""
return self.type(type(self).__module__ + '.DoubleStorage')
def float(self):
"""Casts this storage to float type"""
return self.type(type(self).__module__ + '.FloatStorage')
def half(self):
"""Casts this storage to half type"""
return self.type(type(self).__module__ + '.HalfStorage')
def long(self):
"""Casts this storage to long type"""
return self.type(type(self).__module__ + '.LongStorage')
def int(self):
"""Casts this storage to int type"""
return self.type(type(self).__module__ + '.IntStorage')
def short(self):
"""Casts this storage to short type"""
return self.type(type(self).__module__ + '.ShortStorage')
def char(self):
"""Casts this storage to char type"""
return self.type(type(self).__module__ + '.CharStorage')
def byte(self):
"""Casts this storage to byte type"""
return self.type(type(self).__module__ + '.ByteStorage')
def bool(self):
"""Casts this storage to bool type"""
return self.type(type(self).__module__ + '.BoolStorage')
def bfloat16(self):
"""Casts this storage to bfloat16 type"""
return self.type(type(self).__module__ + '.BFloat16Storage')
def pin_memory(self):
"""Copies the storage to pinned memory, if it's not already pinned."""
if self.is_cuda:
raise TypeError("cannot pin '{0}' only CPU memory can be pinned"
.format(self.type()))
import torch.cuda
allocator = torch.cuda._host_allocator()
return type(self)(self.size(), allocator=allocator).copy_(self)
def share_memory_(self):
"""Moves the storage to shared memory.
This is a no-op for storages already in shared memory and for CUDA
storages, which do not need to be moved for sharing across processes.
Storages in shared memory cannot be resized.
Returns: self
"""
from torch.multiprocessing import get_sharing_strategy
if self.is_cuda:
pass # CUDA doesn't use POSIX shared memory
elif get_sharing_strategy() == 'file_system':
self._share_filename_()
else:
self._share_fd_()
return self
@classmethod
def _new_shared(cls, size):
"""Creates a new storage in shared memory with the same data type"""
from torch.multiprocessing import get_sharing_strategy
if cls.is_cuda:
return cls(size)
elif get_sharing_strategy() == 'file_system':
return cls._new_using_filename(size)
else:
return cls._new_using_fd(size)
def _load_from_bytes(b):
return torch.load(io.BytesIO(b))
_StorageBase.type = _type
_StorageBase.cuda = _cuda | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/storage.py | 0.713631 | 0.224119 | storage.py | pypi |
__all__ = ['svd_lowrank', 'pca_lowrank']
import torch
from . import _linalg_utils as _utils
from ._overrides import has_torch_function, handle_torch_function
def get_approximate_basis(A, # type: Tensor
q, # type: int
niter=2, # type: Optional[int]
M=None # type: Optional[Tensor]
):
# type: (...) -> Tensor
"""Return tensor :math:`Q` with :math:`q` orthonormal columns such
that :math:`Q Q^H A` approximates :math:`A`. If :math:`M` is
specified, then :math:`Q` is such that :math:`Q Q^H (A - M)`
approximates :math:`A - M`.
.. note:: The implementation is based on the Algorithm 4.4 from
Halko et al, 2009.
.. note:: For an adequate approximation of a k-rank matrix
:math:`A`, where k is not known in advance but could be
estimated, the number of :math:`Q` columns, q, can be
choosen according to the following criteria: in general,
:math:`k <= q <= min(2*k, m, n)`. For large low-rank
matrices, take :math:`q = k + 5..10`. If k is
relatively small compared to :math:`min(m, n)`, choosing
:math:`q = k + 0..2` may be sufficient.
.. note:: To obtain repeatable results, reset the seed for the
pseudorandom number generator
Arguments::
A (Tensor): the input tensor of size :math:`(*, m, n)`
q (int): the dimension of subspace spanned by :math:`Q`
columns.
niter (int, optional): the number of subspace iterations to
conduct; ``niter`` must be a
nonnegative integer. In most cases, the
default value 2 is more than enough.
M (Tensor, optional): the input tensor's mean of size
:math:`(*, 1, n)`.
References::
- Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
structure with randomness: probabilistic algorithms for
constructing approximate matrix decompositions,
arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
`arXiv <http://arxiv.org/abs/0909.4061>`_).
"""
niter = 2 if niter is None else niter
m, n = A.shape[-2:]
dtype = _utils.get_floating_dtype(A)
matmul = _utils.matmul
R = torch.randn(n, q, dtype=dtype, device=A.device)
A_H = _utils.transjugate(A)
if M is None:
(Q, _) = matmul(A, R).qr()
for i in range(niter):
(Q, _) = matmul(A_H, Q).qr()
(Q, _) = matmul(A, Q).qr()
else:
M_H = _utils.transjugate(M)
(Q, _) = (matmul(A, R) - matmul(M, R)).qr()
for i in range(niter):
(Q, _) = (matmul(A_H, Q) - matmul(M_H, Q)).qr()
(Q, _) = (matmul(A, Q) - matmul(M, Q)).qr()
return Q
def svd_lowrank(A, q=6, niter=2, M=None):
# type: (Tensor, Optional[int], Optional[int], Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor]
"""Return the singular value decomposition ``(U, S, V)`` of a matrix,
batches of matrices, or a sparse matrix :math:`A` such that
:math:`A \approx U diag(S) V^T`. In case :math:`M` is given, then
SVD is computed for the matrix :math:`A - M`.
.. note:: The implementation is based on the Algorithm 5.1 from
Halko et al, 2009.
.. note:: To obtain repeatable results, reset the seed for the
pseudorandom number generator
.. note:: The input is assumed to be a low-rank matrix.
.. note:: In general, use the full-rank SVD implementation
``torch.svd`` for dense matrices due to its 10-fold
higher performance characteristics. The low-rank SVD
will be useful for huge sparse matrices that
``torch.svd`` cannot handle.
Arguments::
A (Tensor): the input tensor of size :math:`(*, m, n)`
q (int, optional): a slightly overestimated rank of A.
niter (int, optional): the number of subspace iterations to
conduct; niter must be a nonnegative
integer, and defaults to 2
M (Tensor, optional): the input tensor's mean of size
:math:`(*, 1, n)`.
References::
- Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
structure with randomness: probabilistic algorithms for
constructing approximate matrix decompositions,
arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
`arXiv <http://arxiv.org/abs/0909.4061>`_).
"""
if not torch.jit.is_scripting():
tensor_ops = (A, M)
if (not set(map(type, tensor_ops)).issubset((torch.Tensor, type(None))) and has_torch_function(tensor_ops)):
return handle_torch_function(svd_lowrank, tensor_ops, A, q=q, niter=niter, M=M)
return _svd_lowrank(A, q=q, niter=niter, M=M)
def _svd_lowrank(A, q=6, niter=2, M=None):
# type: (Tensor, Optional[int], Optional[int], Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor]
q = 6 if q is None else q
m, n = A.shape[-2:]
matmul = _utils.matmul
if M is None:
M_t = None
else:
M_t = _utils.transpose(M)
A_t = _utils.transpose(A)
# Algorithm 5.1 in Halko et al 2009, slightly modified to reduce
# the number conjugate and transpose operations
if m < n:
# computing the SVD approximation of a transpose in order to
# keep B shape minimal
Q = get_approximate_basis(A_t, q, niter=niter, M=M_t)
Q_c = _utils.conjugate(Q)
if M is None:
B_t = matmul(A, Q_c)
else:
B_t = matmul(A, Q_c) - matmul(M, Q_c)
U, S, V = torch.svd(B_t)
V = Q.matmul(V)
else:
Q = get_approximate_basis(A, q, niter=niter, M=M)
Q_c = _utils.conjugate(Q)
if M is None:
B = matmul(A_t, Q_c)
else:
B = matmul(A_t, Q_c) - matmul(M_t, Q_c)
U, S, V = torch.svd(_utils.transpose(B))
U = Q.matmul(U)
return U, S, V
def pca_lowrank(A, q=None, center=True, niter=2):
# type: (Tensor, Optional[int], bool, int) -> Tuple[Tensor, Tensor, Tensor]
r"""Performs linear Principal Component Analysis (PCA) on a low-rank
matrix, batches of such matrices, or sparse matrix.
This function returns a namedtuple ``(U, S, V)`` which is the
nearly optimal approximation of a singular value decomposition of
a centered matrix :math:`A` such that :math:`A = U diag(S) V^T`.
.. note:: The relation of ``(U, S, V)`` to PCA is as follows:
- :math:`A` is a data matrix with ``m`` samples and
``n`` features
- the :math:`V` columns represent the principal directions
- :math:`S ** 2 / (m - 1)` contains the eigenvalues of
:math:`A^T A / (m - 1)` which is the covariance of
``A`` when ``center=True`` is provided.
- ``matmul(A, V[:, :k])`` projects data to the first k
principal components
.. note:: Different from the standard SVD, the size of returned
matrices depend on the specified rank and q
values as follows:
- :math:`U` is m x q matrix
- :math:`S` is q-vector
- :math:`V` is n x q matrix
.. note:: To obtain repeatable results, reset the seed for the
pseudorandom number generator
Arguments:
A (Tensor): the input tensor of size :math:`(*, m, n)`
q (int, optional): a slightly overestimated rank of
:math:`A`. By default, ``q = min(6, m,
n)``.
center (bool, optional): if True, center the input tensor,
otherwise, assume that the input is
centered.
niter (int, optional): the number of subspace iterations to
conduct; niter must be a nonnegative
integer, and defaults to 2.
References::
- Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
structure with randomness: probabilistic algorithms for
constructing approximate matrix decompositions,
arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
`arXiv <http://arxiv.org/abs/0909.4061>`_).
"""
if not torch.jit.is_scripting():
if type(A) is not torch.Tensor and has_torch_function((A,)):
return handle_torch_function(pca_lowrank, (A,), A, q=q, center=center, niter=niter)
(m, n) = A.shape[-2:]
if q is None:
q = min(6, m, n)
elif not (q >= 0 and q <= min(m, n)):
raise ValueError('q(={}) must be non-negative integer'
' and not greater than min(m, n)={}'
.format(q, min(m, n)))
if not (niter >= 0):
raise ValueError('niter(={}) must be non-negative integer'
.format(niter))
dtype = _utils.get_floating_dtype(A)
if not center:
return _svd_lowrank(A, q, niter=niter, M=None)
if _utils.is_sparse(A):
if len(A.shape) != 2:
raise ValueError('pca_lowrank input is expected to be 2-dimensional tensor')
c = torch.sparse.sum(A, dim=(-2,)) / m
# reshape c
column_indices = c.indices()[0]
indices = torch.zeros(2, len(column_indices),
dtype=column_indices.dtype,
device=column_indices.device)
indices[0] = column_indices
C_t = torch.sparse_coo_tensor(
indices, c.values(), (n, 1), dtype=dtype, device=A.device)
ones_m1_t = torch.ones(A.shape[:-2] + (1, m), dtype=dtype, device=A.device)
M = _utils.transpose(torch.sparse.mm(C_t, ones_m1_t))
return _svd_lowrank(A, q, niter=niter, M=M)
else:
c = A.sum(dim=(-2,)) / m
C = c.reshape(A.shape[:-2] + (1, n))
ones_m1 = torch.ones(A.shape[:-1] + (1, ), dtype=dtype, device=A.device)
M = ones_m1.matmul(C)
return _svd_lowrank(A - M, q, niter=niter, M=None) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/_lowrank.py | 0.903369 | 0.639032 | _lowrank.py | pypi |
from torch._six import PY2
from collections import OrderedDict
"""
This file contains helper functions that implement experimental functionality
for named tensors in python. All of these are experimental, unstable, and
subject to change or deletion.
"""
def check_serializing_named_tensor(tensor):
if tensor.has_names():
raise RuntimeError(
"NYI: Named tensors don't support serialization. Please drop "
"names via `tensor = tensor.rename(None)` before serialization.")
def build_dim_map(tensor):
"""Returns a map of { dim: dim_name } where dim is a name if the dim is named
and the dim index otherwise."""
return OrderedDict([(idx if name is None else name, name)
for idx, name in enumerate(tensor.names)])
def unzip_namedshape(namedshape):
if isinstance(namedshape, OrderedDict):
namedshape = namedshape.items()
if not hasattr(namedshape, '__iter__') and not isinstance(namedshape, tuple):
raise RuntimeError(
'Expected namedshape to be OrderedDict or iterable of tuples, got: {}'
.format(type(namedshape)))
if len(namedshape) == 0:
raise RuntimeError('Expected namedshape to non-empty.')
return zip(*namedshape)
def namer_api_name(inplace):
if inplace:
return 'rename_'
else:
return 'rename'
def is_ellipsis(item):
if PY2:
return item == '...'
else:
return item == Ellipsis or item == '...'
def single_ellipsis_index(names, fn_name):
ellipsis_indices = [i for i, name in enumerate(names) if is_ellipsis(name)]
if len(ellipsis_indices) >= 2:
raise RuntimeError('{}: More than one Ellipsis (\'...\') found in names ('
'{}). This function supports up to one Ellipsis.'
.format(fn_name, names))
if len(ellipsis_indices) == 1:
return ellipsis_indices[0]
return None
def expand_single_ellipsis(numel_pre_glob, numel_post_glob, names):
return names[numel_pre_glob:len(names) - numel_post_glob]
def replace_ellipsis_by_position(ellipsis_idx, names, tensor_names):
globbed_names = expand_single_ellipsis(ellipsis_idx, len(names) - ellipsis_idx - 1, tensor_names)
return names[:ellipsis_idx] + globbed_names + names[ellipsis_idx + 1:]
def resolve_ellipsis(names, tensor_names, fn_name):
"""
Expands ... inside `names` to be equal to a list of names from `tensor_names`.
"""
ellipsis_idx = single_ellipsis_index(names, fn_name)
if ellipsis_idx is None:
return names
return replace_ellipsis_by_position(ellipsis_idx, names, tensor_names)
def update_names_with_list(tensor, names, inplace):
# Special case for tensor.rename(None)
if len(names) == 1 and names[0] is None:
return tensor._update_names(None, inplace)
return tensor._update_names(
resolve_ellipsis(names, tensor.names, namer_api_name(inplace)), inplace)
def update_names_with_mapping(tensor, rename_map, inplace):
dim_map = build_dim_map(tensor)
for old_dim in rename_map.keys():
new_dim = rename_map[old_dim]
if old_dim in dim_map.keys():
dim_map[old_dim] = new_dim
else:
raise RuntimeError(('{api_name}: Tried to rename dim \'{old_dim}\' to dim '
'{new_dim} in Tensor[{dims}] but dim \'{old_dim}\' does not exist')
.format(old_dim=old_dim, new_dim=new_dim, dims=tensor.names,
api_name=namer_api_name(inplace)))
return tensor._update_names(tuple(dim_map.values()), inplace)
def update_names(tensor, names, rename_map, inplace):
"""There are two usages:
tensor.rename(*names) returns a view on tensor with named dims `names`.
`names` must be of length `tensor.dim()`; otherwise, if '...' is in `names`,
then it is expanded greedily to be equal to the corresponding names from
`tensor.names`.
For example,
```
>>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
>>> x.rename('...', 'height', 'width').names
('N', 'C', 'height', 'width')
>>> x.rename('batch', '...', 'width').names
('batch', 'C', 'H', 'width')
```
tensor.rename(**rename_map) returns a view on tensor that has rename dims
as specified in the mapping `rename_map`.
For example,
```
>>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
>>> x.rename(W='width', H='height').names
('N', 'C', 'height', 'width')
```
Finally, tensor.rename has an in-place version called tensor.rename_.
"""
has_names = len(names) > 0
has_rename_pairs = bool(rename_map)
if has_names and has_rename_pairs:
raise RuntimeError('{api_name}: This function takes either positional '
'args or keyword args, but not both. Use tensor.{api_name}(*names) '
'to name dims and tensor.{api_name}(**rename_map) to rename '
'dims.'.format(api_name=namer_api_name(inplace)))
# Special case for tensor.rename(*[]), which is valid for a 0 dim tensor.
if not has_names and not has_rename_pairs:
return update_names_with_list(tensor, names, inplace)
if has_names:
return update_names_with_list(tensor, names, inplace)
return update_names_with_mapping(tensor, rename_map, inplace) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/_namedtensor_internals.py | 0.945109 | 0.769817 | _namedtensor_internals.py | pypi |
import torch
from .modules.utils import _single, _pair, _triple
def _grad_input_padding(grad_output, input_size, stride, padding, kernel_size):
input_size = list(input_size)
k = grad_output.dim() - 2
if len(input_size) == k + 2:
input_size = input_size[-k:]
if len(input_size) != k:
raise ValueError("input_size must have {} elements (got {})"
.format(k + 2, len(input_size)))
def dim_size(d):
return ((grad_output.size(d + 2) - 1) * stride[d] - 2 * padding[d] +
kernel_size[d])
min_sizes = [dim_size(d) for d in range(k)]
max_sizes = [min_sizes[d] + stride[d] - 1 for d in range(k)]
for size, min_size, max_size in zip(input_size, min_sizes, max_sizes):
if size < min_size or size > max_size:
raise ValueError(
("requested an input grad size of {}, but valid sizes range "
"from {} to {} (for a grad_output of {})").format(
input_size, min_sizes, max_sizes,
grad_output.size()[2:]))
return tuple(input_size[d] - min_sizes[d] for d in range(k))
def conv1d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv1d with respect to the input of the convolution.
This is same as the 1D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kW)
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3, requires_grad=True)
>>> weight = torch.randn(1,1,1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv1d_input(input.shape, weight, grad_output)
"""
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
kernel_size = [weight.shape[2]]
if input_size is None:
raise ValueError("grad.conv1d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size)
return torch.conv_transpose1d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv1d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3, requires_grad=True)
>>> weight = torch.randn(1,1,1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv1d_weight(input, weight.shape, grad_output)
"""
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2])
grad_weight = torch.conv1d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels, grad_weight.shape[2]).transpose(
0, 1).narrow(2, 0, weight_size[2])
def conv2d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv2d with respect to the input of the convolution.
This is same as the 2D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3,3, requires_grad=True)
>>> weight = torch.randn(1,1,1,2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv2d_input(input.shape, weight, grad_output)
"""
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
kernel_size = (weight.shape[2], weight.shape[3])
if input_size is None:
raise ValueError("grad.conv2d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size)
return torch.conv_transpose2d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv2d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3,3, requires_grad=True)
>>> weight = torch.randn(1,1,1,2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv2d_weight(input, weight.shape, grad_output)
"""
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.contiguous().repeat(1, in_channels // groups, 1,
1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],
grad_output.shape[3])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2], input.shape[3])
grad_weight = torch.conv2d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
grad_weight.shape[3])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels,
grad_weight.shape[2], grad_weight.shape[3]).transpose(0, 1).narrow(
2, 0, weight_size[2]).narrow(3, 0, weight_size[3])
def conv3d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv3d with respect to the input of the convolution.
This is same as the 3D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weights tensor (out_channels x in_channels/groups x kT x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv3d_input(input.shape, weight, grad_output)
"""
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
kernel_size = (weight.shape[2], weight.shape[3], weight.shape[4])
if input_size is None:
raise ValueError("grad.conv3d_input requires specifying an input_size")
grad_input_padding = _grad_input_padding(grad_output, input_size, stride,
padding, kernel_size)
return torch.conv_transpose3d(
grad_output, weight, None, stride, padding, grad_input_padding, groups,
dilation)
def conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv3d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iT x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, weight, grad_output)
>>> F.grad.conv3d_weight(input, weight.shape, grad_output)
"""
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.repeat(1, in_channels // groups, 1, 1, 1)
grad_output = grad_output.contiguous().view(
grad_output.shape[0] * grad_output.shape[1], 1, grad_output.shape[2],
grad_output.shape[3], grad_output.shape[4])
input = input.contiguous().view(1, input.shape[0] * input.shape[1],
input.shape[2], input.shape[3],
input.shape[4])
grad_weight = torch.conv3d(input, grad_output, None, dilation, padding,
stride, in_channels * min_batch)
grad_weight = grad_weight.contiguous().view(
min_batch, grad_weight.shape[1] // min_batch, grad_weight.shape[2],
grad_weight.shape[3], grad_weight.shape[4])
return grad_weight.sum(dim=0).view(
in_channels // groups, out_channels, grad_weight.shape[2],
grad_weight.shape[3], grad_weight.shape[4]).transpose(0, 1).narrow(
2, 0, weight_size[2]).narrow(3, 0, weight_size[3]).narrow(
4, 0, weight_size[4]) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/grad.py | 0.915214 | 0.573917 | grad.py | pypi |
from torch import nn
class OrderedDictWrapper(object):
"""
A wrapper around a C++ OrderedDict that dynamically evaluates the
OrderedDict getter on a bound C++ module, such that new changes on the C++
side are picked up. Otherwise accessing e.g. ``cpp_module._parameters`` just
once would get a frozen copy of the parameters at the time of access.
``torch.nn.Module`` accesses ``_parameters`` et al. via ``self.__dict__`` so
using properties does not work.
"""
def __init__(self, cpp_module, attr):
self.cpp_module = cpp_module
self.attr = attr
@property
def cpp_dict(self):
return getattr(self.cpp_module, self.attr)
# Magic methods cannot be assigned dynamically and bypass ``getattr``, so we
# must manually override them.
def items(self):
return self.cpp_dict.items()
def keys(self):
return self.cpp_dict.keys()
def values(self):
return self.cpp_dict.values()
def __iter__(self):
return self.cpp_dict.__iter__()
def __len__(self):
return self.cpp_dict.__len__()
def __contains__(self, key):
return self.cpp_dict.__contains__(key)
def __getitem__(self, key):
return self.cpp_dict.__getitem__(key)
class ModuleWrapper(nn.Module):
"""
A subclass of ``torch.nn.Module`` that wraps a C++ frontend module and
delegates all access.
"""
def __init__(self, cpp_module):
# Assign before the super class constructor so ``self.training`` can be
# assigned to in the super class constructor.
self.cpp_module = cpp_module
super(ModuleWrapper, self).__init__()
self._parameters = OrderedDictWrapper(cpp_module, "_parameters")
self._buffers = OrderedDictWrapper(cpp_module, "_buffers")
self._modules = OrderedDictWrapper(cpp_module, "_modules")
for attr in dir(cpp_module):
# Skip magic methods and the three attributes above.
if not attr.startswith("_"):
setattr(self, attr, getattr(self.cpp_module, attr))
def _apply(self, fn):
for param in self.parameters():
# Tensors stored in modules are graph leaves, and we don't
# want to create copy nodes, so we have to unpack the data.
param.data = fn(param.data)
if param._grad is not None:
param._grad.data = fn(param._grad.data)
for buf in self.buffers():
buf.data = fn(buf.data)
return self
@property
def training(self):
return self.cpp_module.training
@training.setter
def training(self, mode):
self.cpp_module.train(mode)
def __repr__(self):
return self.cpp_module.__repr__() | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/cpp.py | 0.92836 | 0.452959 | cpp.py | pypi |
from collections import namedtuple
import torch
from . import Sequential, ModuleList, Linear
from .module import Module
from ..functional import log_softmax
_ASMoutput = namedtuple('ASMoutput', ['output', 'loss'])
class AdaptiveLogSoftmaxWithLoss(Module):
r"""Efficient softmax approximation as described in
`Efficient softmax approximation for GPUs`_ by Edouard Grave, Armand Joulin,
Moustapha Cissé, David Grangier, and Hervé Jégou.
Adaptive softmax is an approximate strategy for training models with large
output spaces. It is most effective when the label distribution is highly
imbalanced, for example in natural language modelling, where the word
frequency distribution approximately follows the `Zipf's law`_.
Adaptive softmax partitions the labels into several clusters, according to
their frequency. These clusters may contain different number of targets
each.
Additionally, clusters containing less frequent labels assign lower
dimensional embeddings to those labels, which speeds up the computation.
For each minibatch, only clusters for which at least one target is
present are evaluated.
The idea is that the clusters which are accessed frequently
(like the first one, containing most frequent labels), should also be cheap
to compute -- that is, contain a small number of assigned labels.
We highly recommend taking a look at the original paper for more details.
* :attr:`cutoffs` should be an ordered Sequence of integers sorted
in the increasing order.
It controls number of clusters and the partitioning of targets into
clusters. For example setting ``cutoffs = [10, 100, 1000]``
means that first `10` targets will be assigned
to the 'head' of the adaptive softmax, targets `11, 12, ..., 100` will be
assigned to the first cluster, and targets `101, 102, ..., 1000` will be
assigned to the second cluster, while targets
`1001, 1002, ..., n_classes - 1` will be assigned
to the last, third cluster.
* :attr:`div_value` is used to compute the size of each additional cluster,
which is given as
:math:`\left\lfloor\frac{\texttt{in\_features}}{\texttt{div\_value}^{idx}}\right\rfloor`,
where :math:`idx` is the cluster index (with clusters
for less frequent words having larger indices,
and indices starting from :math:`1`).
* :attr:`head_bias` if set to True, adds a bias term to the 'head' of the
adaptive softmax. See paper for details. Set to False in the official
implementation.
.. warning::
Labels passed as inputs to this module should be sorted according to
their frequency. This means that the most frequent label should be
represented by the index `0`, and the least frequent
label should be represented by the index `n_classes - 1`.
.. note::
This module returns a ``NamedTuple`` with ``output``
and ``loss`` fields. See further documentation for details.
.. note::
To compute log-probabilities for all classes, the ``log_prob``
method can be used.
Args:
in_features (int): Number of features in the input tensor
n_classes (int): Number of classes in the dataset
cutoffs (Sequence): Cutoffs used to assign targets to their buckets
div_value (float, optional): value used as an exponent to compute sizes
of the clusters. Default: 4.0
head_bias (bool, optional): If ``True``, adds a bias term to the 'head' of the
adaptive softmax. Default: ``False``
Returns:
``NamedTuple`` with ``output`` and ``loss`` fields:
* **output** is a Tensor of size ``N`` containing computed target
log probabilities for each example
* **loss** is a Scalar representing the computed negative
log likelihood loss
Shape:
- input: :math:`(N, \texttt{in\_features})`
- target: :math:`(N)` where each value satisfies :math:`0 <= \texttt{target[i]} <= \texttt{n\_classes}`
- output1: :math:`(N)`
- output2: ``Scalar``
.. _Efficient softmax approximation for GPUs:
https://arxiv.org/abs/1609.04309
.. _Zipf's law:
https://en.wikipedia.org/wiki/Zipf%27s_law
"""
def __init__(self, in_features, n_classes, cutoffs, div_value=4., head_bias=False):
super(AdaptiveLogSoftmaxWithLoss, self).__init__()
cutoffs = list(cutoffs)
if (cutoffs != sorted(cutoffs)) \
or (min(cutoffs) <= 0) \
or (max(cutoffs) > (n_classes - 1)) \
or (len(set(cutoffs)) != len(cutoffs)) \
or any([int(c) != c for c in cutoffs]):
raise ValueError("cutoffs should be a sequence of unique, positive "
"integers sorted in an increasing order, where "
"each value is between 1 and n_classes-1")
self.in_features = in_features
self.n_classes = n_classes
self.cutoffs = cutoffs + [n_classes]
self.div_value = div_value
self.head_bias = head_bias
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
self.head = Linear(self.in_features, self.head_size, bias=self.head_bias)
self.tail = ModuleList()
for i in range(self.n_clusters):
hsz = int(self.in_features // (self.div_value ** (i + 1)))
osz = self.cutoffs[i + 1] - self.cutoffs[i]
projection = Sequential(
Linear(self.in_features, hsz, bias=False),
Linear(hsz, osz, bias=False)
)
self.tail.append(projection)
def reset_parameters(self):
self.head.reset_parameters()
for i2h, h2o in self.tail:
i2h.reset_parameters()
h2o.reset_parameters()
def forward(self, input, target):
if input.size(0) != target.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
used_rows = 0
batch_size = target.size(0)
output = input.new_zeros(batch_size)
gather_inds = target.new_empty(batch_size)
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
low_idx = cutoff_values[i]
high_idx = cutoff_values[i + 1]
target_mask = (target >= low_idx) & (target < high_idx)
row_indices = target_mask.nonzero().squeeze()
if row_indices.numel() == 0:
continue
if i == 0:
gather_inds.index_copy_(0, row_indices, target[target_mask])
else:
relative_target = target[target_mask] - low_idx
input_subset = input.index_select(0, row_indices)
cluster_output = self.tail[i - 1](input_subset)
cluster_index = self.shortlist_size + i - 1
gather_inds.index_fill_(0, row_indices, cluster_index)
cluster_logprob = log_softmax(cluster_output, dim=1)
local_logprob = cluster_logprob.gather(1, relative_target.unsqueeze(1))
output.index_copy_(0, row_indices, local_logprob.squeeze(1))
used_rows += row_indices.numel()
if used_rows != batch_size:
raise RuntimeError("Target values should be in [0, {}], "
"but values in range [{}, {}] "
"were found. ".format(self.n_classes - 1,
target.min().item(),
target.max().item()))
head_output = self.head(input)
head_logprob = log_softmax(head_output, dim=1)
output += head_logprob.gather(1, gather_inds.unsqueeze(1)).squeeze()
loss = (-output).mean()
return _ASMoutput(output, loss)
def _get_full_log_prob(self, input, head_output):
""" Given input tensor, and output of `self.head`,
compute the log of the full distribution """
out = input.new_empty((head_output.size(0), self.n_classes))
head_logprob = log_softmax(head_output, dim=1)
out[:, :self.shortlist_size] = head_logprob[:, :self.shortlist_size]
for i, (start_idx, stop_idx) in enumerate(zip(self.cutoffs, self.cutoffs[1:])):
cluster_output = self.tail[i](input)
cluster_logprob = log_softmax(cluster_output, dim=1)
output_logprob = cluster_logprob + head_logprob[:, self.shortlist_size + i].unsqueeze(1)
out[:, start_idx:stop_idx] = output_logprob
return out
def log_prob(self, input):
r""" Computes log probabilities for all :math:`\texttt{n\_classes}`
Args:
input (Tensor): a minibatch of examples
Returns:
log-probabilities of for each class :math:`c`
in range :math:`0 <= c <= \texttt{n\_classes}`, where :math:`\texttt{n\_classes}` is a
parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.
Shape:
- Input: :math:`(N, \texttt{in\_features})`
- Output: :math:`(N, \texttt{n\_classes})`
"""
head_output = self.head(input)
return self._get_full_log_prob(input, head_output)
def predict(self, input):
r""" This is equivalent to `self.log_pob(input).argmax(dim=1)`,
but is more efficient in some cases.
Args:
input (Tensor): a minibatch of examples
Returns:
output (Tensor): a class with the highest probability for each example
Shape:
- Input: :math:`(N, \texttt{in\_features})`
- Output: :math:`(N)`
"""
head_output = self.head(input)
output = torch.argmax(head_output, dim=1)
not_in_shortlist = (output >= self.shortlist_size)
all_in_shortlist = not (not_in_shortlist.any())
if all_in_shortlist:
return output
elif not_in_shortlist.all():
log_prob = self._get_full_log_prob(input, head_output)
return torch.argmax(log_prob, dim=1)
else:
log_prob = self._get_full_log_prob(input[not_in_shortlist],
head_output[not_in_shortlist])
output[not_in_shortlist] = torch.argmax(log_prob, dim=1)
return output | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/modules/adaptive.py | 0.935465 | 0.652864 | adaptive.py | pypi |
from .module import Module
from .utils import _pair, _quadruple, _ntuple
from .. import functional as F
# TODO: grad_output size asserts in THNN
class _ConstantPadNd(Module):
__constants__ = ['padding', 'value']
def __init__(self, value):
super(_ConstantPadNd, self).__init__()
self.value = value
def forward(self, input):
return F.pad(input, self.padding, 'constant', self.value)
def extra_repr(self):
return 'padding={}, value={}'.format(self.padding, self.value)
class ConstantPad1d(_ConstantPadNd):
r"""Pads the input tensor boundaries with a constant value.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in both boundaries. If a 2-`tuple`, uses
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
Shape:
- Input: :math:`(N, C, W_{in})`
- Output: :math:`(N, C, W_{out})` where
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> m = nn.ConstantPad1d(2, 3.5)
>>> input = torch.randn(1, 2, 4)
>>> input
tensor([[[-1.0491, -0.7152, -0.0749, 0.8530],
[-1.3287, 1.8966, 0.1466, -0.2771]]])
>>> m(input)
tensor([[[ 3.5000, 3.5000, -1.0491, -0.7152, -0.0749, 0.8530, 3.5000,
3.5000],
[ 3.5000, 3.5000, -1.3287, 1.8966, 0.1466, -0.2771, 3.5000,
3.5000]]])
>>> m = nn.ConstantPad1d(2, 3.5)
>>> input = torch.randn(1, 2, 3)
>>> input
tensor([[[ 1.6616, 1.4523, -1.1255],
[-3.6372, 0.1182, -1.8652]]])
>>> m(input)
tensor([[[ 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000, 3.5000],
[ 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000, 3.5000]]])
>>> # using different paddings for different sides
>>> m = nn.ConstantPad1d((3, 1), 3.5)
>>> m(input)
tensor([[[ 3.5000, 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000],
[ 3.5000, 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000]]])
"""
def __init__(self, padding, value):
super(ConstantPad1d, self).__init__(value)
self.padding = _pair(padding)
class ConstantPad2d(_ConstantPadNd):
r"""Pads the input tensor boundaries with a constant value.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> m = nn.ConstantPad2d(2, 3.5)
>>> input = torch.randn(1, 2, 2)
>>> input
tensor([[[ 1.6585, 0.4320],
[-0.8701, -0.4649]]])
>>> m(input)
tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
[ 3.5000, 3.5000, 1.6585, 0.4320, 3.5000, 3.5000],
[ 3.5000, 3.5000, -0.8701, -0.4649, 3.5000, 3.5000],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
>>> # using different paddings for different sides
>>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)
>>> m(input)
tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
[ 3.5000, 3.5000, 3.5000, 1.6585, 0.4320],
[ 3.5000, 3.5000, 3.5000, -0.8701, -0.4649],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
"""
__constants__ = ['padding', 'value']
def __init__(self, padding, value):
super(ConstantPad2d, self).__init__(value)
self.padding = _quadruple(padding)
class ConstantPad3d(_ConstantPadNd):
r"""Pads the input tensor boundaries with a constant value.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 6-`tuple`, uses
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> m = nn.ConstantPad3d(3, 3.5)
>>> input = torch.randn(16, 3, 10, 20, 30)
>>> output = m(input)
>>> # using different paddings for different sides
>>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5)
>>> output = m(input)
"""
def __init__(self, padding, value):
super(ConstantPad3d, self).__init__(value)
self.padding = _ntuple(6)(padding)
class _ReflectionPadNd(Module):
__constants__ = ['padding']
def forward(self, input):
return F.pad(input, self.padding, 'reflect')
def extra_repr(self):
return '{}'.format(self.padding)
class ReflectionPad1d(_ReflectionPadNd):
r"""Pads the input tensor using the reflection of the input boundary.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 2-`tuple`, uses
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
Shape:
- Input: :math:`(N, C, W_{in})`
- Output: :math:`(N, C, W_{out})` where
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> m = nn.ReflectionPad1d(2)
>>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
>>> input
tensor([[[0., 1., 2., 3.],
[4., 5., 6., 7.]]])
>>> m(input)
tensor([[[2., 1., 0., 1., 2., 3., 2., 1.],
[6., 5., 4., 5., 6., 7., 6., 5.]]])
>>> # using different paddings for different sides
>>> m = nn.ReflectionPad1d((3, 1))
>>> m(input)
tensor([[[3., 2., 1., 0., 1., 2., 3., 2.],
[7., 6., 5., 4., 5., 6., 7., 6.]]])
"""
def __init__(self, padding):
super(ReflectionPad1d, self).__init__()
self.padding = _pair(padding)
class ReflectionPad2d(_ReflectionPadNd):
r"""Pads the input tensor using the reflection of the input boundary.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> m = nn.ReflectionPad2d(2)
>>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
>>> input
tensor([[[[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]]]])
>>> m(input)
tensor([[[[8., 7., 6., 7., 8., 7., 6.],
[5., 4., 3., 4., 5., 4., 3.],
[2., 1., 0., 1., 2., 1., 0.],
[5., 4., 3., 4., 5., 4., 3.],
[8., 7., 6., 7., 8., 7., 6.],
[5., 4., 3., 4., 5., 4., 3.],
[2., 1., 0., 1., 2., 1., 0.]]]])
>>> # using different paddings for different sides
>>> m = nn.ReflectionPad2d((1, 1, 2, 0))
>>> m(input)
tensor([[[[7., 6., 7., 8., 7.],
[4., 3., 4., 5., 4.],
[1., 0., 1., 2., 1.],
[4., 3., 4., 5., 4.],
[7., 6., 7., 8., 7.]]]])
"""
def __init__(self, padding):
super(ReflectionPad2d, self).__init__()
self.padding = _quadruple(padding)
class _ReplicationPadNd(Module):
__constants__ = ['padding']
def forward(self, input):
return F.pad(input, self.padding, 'replicate')
def extra_repr(self):
return '{}'.format(self.padding)
class ReplicationPad1d(_ReplicationPadNd):
r"""Pads the input tensor using replication of the input boundary.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 2-`tuple`, uses
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
Shape:
- Input: :math:`(N, C, W_{in})`
- Output: :math:`(N, C, W_{out})` where
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> m = nn.ReplicationPad1d(2)
>>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
>>> input
tensor([[[0., 1., 2., 3.],
[4., 5., 6., 7.]]])
>>> m(input)
tensor([[[0., 0., 0., 1., 2., 3., 3., 3.],
[4., 4., 4., 5., 6., 7., 7., 7.]]])
>>> # using different paddings for different sides
>>> m = nn.ReplicationPad1d((3, 1))
>>> m(input)
tensor([[[0., 0., 0., 0., 1., 2., 3., 3.],
[4., 4., 4., 4., 5., 6., 7., 7.]]])
"""
def __init__(self, padding):
super(ReplicationPad1d, self).__init__()
self.padding = _pair(padding)
class ReplicationPad2d(_ReplicationPadNd):
r"""Pads the input tensor using replication of the input boundary.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> m = nn.ReplicationPad2d(2)
>>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
>>> input
tensor([[[[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]]]])
>>> m(input)
tensor([[[[0., 0., 0., 1., 2., 2., 2.],
[0., 0., 0., 1., 2., 2., 2.],
[0., 0., 0., 1., 2., 2., 2.],
[3., 3., 3., 4., 5., 5., 5.],
[6., 6., 6., 7., 8., 8., 8.],
[6., 6., 6., 7., 8., 8., 8.],
[6., 6., 6., 7., 8., 8., 8.]]]])
>>> # using different paddings for different sides
>>> m = nn.ReplicationPad2d((1, 1, 2, 0))
>>> m(input)
tensor([[[[0., 0., 1., 2., 2.],
[0., 0., 1., 2., 2.],
[0., 0., 1., 2., 2.],
[3., 3., 4., 5., 5.],
[6., 6., 7., 8., 8.]]]])
"""
def __init__(self, padding):
super(ReplicationPad2d, self).__init__()
self.padding = _quadruple(padding)
class ReplicationPad3d(_ReplicationPadNd):
r"""Pads the input tensor using replication of the input boundary.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 6-`tuple`, uses
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> m = nn.ReplicationPad3d(3)
>>> input = torch.randn(16, 3, 8, 320, 480)
>>> output = m(input)
>>> # using different paddings for different sides
>>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1))
>>> output = m(input)
"""
def __init__(self, padding):
super(ReplicationPad3d, self).__init__()
self.padding = _ntuple(6)(padding)
class ZeroPad2d(ConstantPad2d):
r"""Pads the input tensor boundaries with zero.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> m = nn.ZeroPad2d(2)
>>> input = torch.randn(1, 1, 3, 3)
>>> input
tensor([[[[-0.1678, -0.4418, 1.9466],
[ 0.9604, -0.4219, -0.5241],
[-0.9162, -0.5436, -0.6446]]]])
>>> m(input)
tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, -0.1678, -0.4418, 1.9466, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.9604, -0.4219, -0.5241, 0.0000, 0.0000],
[ 0.0000, 0.0000, -0.9162, -0.5436, -0.6446, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
>>> # using different paddings for different sides
>>> m = nn.ZeroPad2d((1, 1, 2, 0))
>>> m(input)
tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, -0.1678, -0.4418, 1.9466, 0.0000],
[ 0.0000, 0.9604, -0.4219, -0.5241, 0.0000],
[ 0.0000, -0.9162, -0.5436, -0.6446, 0.0000]]]])
"""
def __init__(self, padding):
super(ZeroPad2d, self).__init__(padding, 0.) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/modules/padding.py | 0.770637 | 0.509093 | padding.py | pypi |
import torch
import copy
from .. import functional as F
from .module import Module
from .activation import MultiheadAttention
from .container import ModuleList
from ..init import xavier_uniform_
from .dropout import Dropout
from .linear import Linear
from .normalization import LayerNorm
class Transformer(Module):
r"""A transformer model. User is able to modify the attributes as needed. The architecture
is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information
Processing Systems, pages 6000-6010. Users can build the BERT(https://arxiv.org/abs/1810.04805)
model with corresponding parameters.
Args:
d_model: the number of expected features in the encoder/decoder inputs (default=512).
nhead: the number of heads in the multiheadattention models (default=8).
num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of encoder/decoder intermediate layer, relu or gelu (default=relu).
custom_encoder: custom encoder (default=None).
custom_decoder: custom decoder (default=None).
Examples::
>>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12)
>>> src = torch.rand((10, 32, 512))
>>> tgt = torch.rand((20, 32, 512))
>>> out = transformer_model(src, tgt)
Note: A full example to apply nn.Transformer module for the word language model is available in
https://github.com/pytorch/examples/tree/master/word_language_model
"""
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
activation="relu", custom_encoder=None, custom_decoder=None):
super(Transformer, self).__init__()
if custom_encoder is not None:
self.encoder = custom_encoder
else:
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, activation)
encoder_norm = LayerNorm(d_model)
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
if custom_decoder is not None:
self.decoder = custom_decoder
else:
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, activation)
decoder_norm = LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def forward(self, src, tgt, src_mask=None, tgt_mask=None,
memory_mask=None, src_key_padding_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None):
# type: (Tensor, Tensor, Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor]) -> Tensor # noqa
r"""Take in and process masked source/target sequences.
Args:
src: the sequence to the encoder (required).
tgt: the sequence to the decoder (required).
src_mask: the additive mask for the src sequence (optional).
tgt_mask: the additive mask for the tgt sequence (optional).
memory_mask: the additive mask for the encoder output (optional).
src_key_padding_mask: the ByteTensor mask for src keys per batch (optional).
tgt_key_padding_mask: the ByteTensor mask for tgt keys per batch (optional).
memory_key_padding_mask: the ByteTensor mask for memory keys per batch (optional).
Shape:
- src: :math:`(S, N, E)`.
- tgt: :math:`(T, N, E)`.
- src_mask: :math:`(S, S)`.
- tgt_mask: :math:`(T, T)`.
- memory_mask: :math:`(T, S)`.
- src_key_padding_mask: :math:`(N, S)`.
- tgt_key_padding_mask: :math:`(N, T)`.
- memory_key_padding_mask: :math:`(N, S)`.
Note: [src/tgt/memory]_mask should be filled with
float('-inf') for the masked positions and float(0.0) else. These masks
ensure that predictions for position i depend only on the unmasked positions
j and are applied identically for each sequence in a batch.
[src/tgt/memory]_key_padding_mask should be a ByteTensor where True values are positions
that should be masked with float('-inf') and False values will be unchanged.
This mask ensures that no information will be taken from position i if
it is masked, and has a separate mask for each sequence in a batch.
- output: :math:`(T, N, E)`.
Note: Due to the multi-head attention architecture in the transformer model,
the output sequence length of a transformer is same as the input sequence
(i.e. target) length of the decode.
where S is the source sequence length, T is the target sequence length, N is the
batch size, E is the feature number
Examples:
>>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask)
"""
if src.size(1) != tgt.size(1):
raise RuntimeError("the batch number of src and tgt must be equal")
if src.size(2) != self.d_model or tgt.size(2) != self.d_model:
raise RuntimeError("the feature number of src and tgt must be equal to d_model")
memory = self.encoder(src, mask=src_mask, src_key_padding_mask=src_key_padding_mask)
output = self.decoder(tgt, memory, tgt_mask=tgt_mask, memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
return output
def generate_square_subsequent_mask(self, sz):
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
class TransformerEncoder(Module):
r"""TransformerEncoder is a stack of N encoder layers
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
__constants__ = ['norm']
def __init__(self, encoder_layer, num_layers, norm=None):
super(TransformerEncoder, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src, mask=None, src_key_padding_mask=None):
# type: (Tensor, Optional[Tensor], Optional[Tensor]) -> Tensor
r"""Pass the input through the encoder layers in turn.
Args:
src: the sequence to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
output = src
for mod in self.layers:
output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(Module):
r"""TransformerDecoder is a stack of N decoder layers
Args:
decoder_layer: an instance of the TransformerDecoderLayer() class (required).
num_layers: the number of sub-decoder-layers in the decoder (required).
norm: the layer normalization component (optional).
Examples::
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
>>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
>>> memory = torch.rand(10, 32, 512)
>>> tgt = torch.rand(20, 32, 512)
>>> out = transformer_decoder(tgt, memory)
"""
__constants__ = ['norm']
def __init__(self, decoder_layer, num_layers, norm=None):
super(TransformerDecoder, self).__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, tgt, memory, tgt_mask=None,
memory_mask=None, tgt_key_padding_mask=None,
memory_key_padding_mask=None):
# type: (Tensor, Tensor, Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor]) -> Tensor
r"""Pass the inputs (and mask) through the decoder layer in turn.
Args:
tgt: the sequence to the decoder (required).
memory: the sequence from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
output = tgt
for mod in self.layers:
output = mod(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerEncoderLayer(Module):
r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu"):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.activation = _get_activation_fn(activation)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerEncoderLayer, self).__setstate__(state)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
# type: (Tensor, Optional[Tensor], Optional[Tensor]) -> Tensor
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
src2 = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
class TransformerDecoderLayer(Module):
r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
This standard decoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
Examples::
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
>>> memory = torch.rand(10, 32, 512)
>>> tgt = torch.rand(20, 32, 512)
>>> out = decoder_layer(tgt, memory)
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu"):
super(TransformerDecoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.norm3 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.dropout3 = Dropout(dropout)
self.activation = _get_activation_fn(activation)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(TransformerDecoderLayer, self).__setstate__(state)
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None):
# type: (Tensor, Tensor, Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor]) -> Tensor
r"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequence from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def _get_clones(module, N):
return ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
raise RuntimeError("activation should be relu/gelu, not {}".format(activation)) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/modules/transformer.py | 0.928963 | 0.504211 | transformer.py | pypi |
import warnings
import torch
from . import Linear
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from .module import Module
from .. import functional as F
class Threshold(Module):
r"""Thresholds each element of the input Tensor.
Threshold is defined as:
.. math::
y =
\begin{cases}
x, &\text{ if } x > \text{threshold} \\
\text{value}, &\text{ otherwise }
\end{cases}
Args:
threshold: The value to threshold at
value: The value to replace with
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> m = nn.Threshold(0.1, 20)
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['threshold', 'value', 'inplace']
def __init__(self, threshold, value, inplace=False):
super(Threshold, self).__init__()
self.threshold = threshold
self.value = value
self.inplace = inplace
# TODO: check in THNN (if inplace == True, then assert value <= threshold)
def forward(self, input):
return F.threshold(input, self.threshold, self.value, self.inplace)
def extra_repr(self):
inplace_str = ', inplace=True' if self.inplace else ''
return 'threshold={}, value={}{}'.format(
self.threshold, self.value, inplace_str
)
class ReLU(Module):
r"""Applies the rectified linear unit function element-wise:
:math:`\text{ReLU}(x) = (x)^+ = \max(0, x)`
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/ReLU.png
Examples::
>>> m = nn.ReLU()
>>> input = torch.randn(2)
>>> output = m(input)
An implementation of CReLU - https://arxiv.org/abs/1603.05201
>>> m = nn.ReLU()
>>> input = torch.randn(2).unsqueeze(0)
>>> output = torch.cat((m(input),m(-input)))
"""
__constants__ = ['inplace']
def __init__(self, inplace=False):
super(ReLU, self).__init__()
self.inplace = inplace
def forward(self, input):
return F.relu(input, inplace=self.inplace)
def extra_repr(self):
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class RReLU(Module):
r"""Applies the randomized leaky rectified liner unit function, element-wise,
as described in the paper:
`Empirical Evaluation of Rectified Activations in Convolutional Network`_.
The function is defined as:
.. math::
\text{RReLU}(x) =
\begin{cases}
x & \text{if } x \geq 0 \\
ax & \text{ otherwise }
\end{cases}
where :math:`a` is randomly sampled from uniform distribution
:math:`\mathcal{U}(\text{lower}, \text{upper})`.
See: https://arxiv.org/pdf/1505.00853.pdf
Args:
lower: lower bound of the uniform distribution. Default: :math:`\frac{1}{8}`
upper: upper bound of the uniform distribution. Default: :math:`\frac{1}{3}`
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> m = nn.RReLU(0.1, 0.3)
>>> input = torch.randn(2)
>>> output = m(input)
.. _`Empirical Evaluation of Rectified Activations in Convolutional Network`:
https://arxiv.org/abs/1505.00853
"""
__constants__ = ['lower', 'upper', 'inplace']
def __init__(self, lower=1. / 8, upper=1. / 3, inplace=False):
super(RReLU, self).__init__()
self.lower = lower
self.upper = upper
self.inplace = inplace
def forward(self, input):
return F.rrelu(input, self.lower, self.upper, self.training, self.inplace)
def extra_repr(self):
inplace_str = ', inplace=True' if self.inplace else ''
return 'lower={}, upper={}{}'.format(self.lower, self.upper, inplace_str)
class Hardtanh(Module):
r"""Applies the HardTanh function element-wise
HardTanh is defined as:
.. math::
\text{HardTanh}(x) = \begin{cases}
1 & \text{ if } x > 1 \\
-1 & \text{ if } x < -1 \\
x & \text{ otherwise } \\
\end{cases}
The range of the linear region :math:`[-1, 1]` can be adjusted using
:attr:`min_val` and :attr:`max_val`.
Args:
min_val: minimum value of the linear region range. Default: -1
max_val: maximum value of the linear region range. Default: 1
inplace: can optionally do the operation in-place. Default: ``False``
Keyword arguments :attr:`min_value` and :attr:`max_value`
have been deprecated in favor of :attr:`min_val` and :attr:`max_val`.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/Hardtanh.png
Examples::
>>> m = nn.Hardtanh(-2, 2)
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['min_val', 'max_val', 'inplace']
def __init__(self, min_val=-1., max_val=1., inplace=False, min_value=None, max_value=None):
super(Hardtanh, self).__init__()
if min_value is not None:
warnings.warn("keyword argument min_value is deprecated and rename to min_val")
min_val = min_value
if max_value is not None:
warnings.warn("keyword argument max_value is deprecated and rename to max_val")
max_val = max_value
self.min_val = min_val
self.max_val = max_val
self.inplace = inplace
assert self.max_val > self.min_val
def forward(self, input):
return F.hardtanh(input, self.min_val, self.max_val, self.inplace)
def extra_repr(self):
inplace_str = ', inplace=True' if self.inplace else ''
return 'min_val={}, max_val={}{}'.format(
self.min_val, self.max_val, inplace_str
)
class ReLU6(Hardtanh):
r"""Applies the element-wise function:
.. math::
\text{ReLU6}(x) = \min(\max(0,x), 6)
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/ReLU6.png
Examples::
>>> m = nn.ReLU6()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, inplace=False):
super(ReLU6, self).__init__(0., 6., inplace)
def extra_repr(self):
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class Sigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{Sigmoid}(x) = \sigma(x) = \frac{1}{1 + \exp(-x)}
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/Sigmoid.png
Examples::
>>> m = nn.Sigmoid()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input):
return torch.sigmoid(input)
class Hardsigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{Hardsigmoid}(x) = \frac{ReLU6(x + 3)}{6}
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> m = nn.Hardsigmoid()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input):
return F.hardsigmoid(input)
class Tanh(Module):
r"""Applies the element-wise function:
.. math::
\text{Tanh}(x) = \tanh(x) = \frac{\exp(x) - \exp(-x)} {\exp(x) + \exp(-x)}
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/Tanh.png
Examples::
>>> m = nn.Tanh()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input):
return torch.tanh(input)
class ELU(Module):
r"""Applies the element-wise function:
.. math::
\text{ELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1))
Args:
alpha: the :math:`\alpha` value for the ELU formulation. Default: 1.0
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/ELU.png
Examples::
>>> m = nn.ELU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['alpha', 'inplace']
def __init__(self, alpha=1., inplace=False):
super(ELU, self).__init__()
self.alpha = alpha
self.inplace = inplace
def forward(self, input):
return F.elu(input, self.alpha, self.inplace)
def extra_repr(self):
inplace_str = ', inplace=True' if self.inplace else ''
return 'alpha={}{}'.format(self.alpha, inplace_str)
class CELU(Module):
r"""Applies the element-wise function:
.. math::
\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))
More details can be found in the paper `Continuously Differentiable Exponential Linear Units`_ .
Args:
alpha: the :math:`\alpha` value for the CELU formulation. Default: 1.0
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/CELU.png
Examples::
>>> m = nn.CELU()
>>> input = torch.randn(2)
>>> output = m(input)
.. _`Continuously Differentiable Exponential Linear Units`:
https://arxiv.org/abs/1704.07483
"""
__constants__ = ['alpha', 'inplace']
def __init__(self, alpha=1., inplace=False):
super(CELU, self).__init__()
self.alpha = alpha
self.inplace = inplace
def forward(self, input):
return F.celu(input, self.alpha, self.inplace)
def extra_repr(self):
inplace_str = ', inplace=True' if self.inplace else ''
return 'alpha={}{}'.format(self.alpha, inplace_str)
class SELU(Module):
r"""Applied element-wise, as:
.. math::
\text{SELU}(x) = \text{scale} * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1)))
with :math:`\alpha = 1.6732632423543772848170429916717` and
:math:`\text{scale} = 1.0507009873554804934193349852946`.
More details can be found in the paper `Self-Normalizing Neural Networks`_ .
Args:
inplace (bool, optional): can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/SELU.png
Examples::
>>> m = nn.SELU()
>>> input = torch.randn(2)
>>> output = m(input)
.. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
"""
__constants__ = ['inplace']
def __init__(self, inplace=False):
super(SELU, self).__init__()
self.inplace = inplace
def forward(self, input):
return F.selu(input, self.inplace)
def extra_repr(self):
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
class GLU(Module):
r"""Applies the gated linear unit function
:math:`{GLU}(a, b)= a \otimes \sigma(b)` where :math:`a` is the first half
of the input matrices and :math:`b` is the second half.
Args:
dim (int): the dimension on which to split the input. Default: -1
Shape:
- Input: :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional
dimensions
- Output: :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`
Examples::
>>> m = nn.GLU()
>>> input = torch.randn(4, 2)
>>> output = m(input)
"""
__constants__ = ['dim']
def __init__(self, dim=-1):
super(GLU, self).__init__()
self.dim = dim
def forward(self, input):
return F.glu(input, self.dim)
def extra_repr(self):
return 'dim={}'.format(self.dim)
class GELU(Module):
r"""Applies the Gaussian Error Linear Units function:
.. math::
\text{GELU}(x) = x * \Phi(x)
where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/GELU.png
Examples::
>>> m = nn.GELU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input):
return F.gelu(input)
class Hardshrink(Module):
r"""Applies the hard shrinkage function element-wise:
.. math::
\text{HardShrink}(x) =
\begin{cases}
x, & \text{ if } x > \lambda \\
x, & \text{ if } x < -\lambda \\
0, & \text{ otherwise }
\end{cases}
Args:
lambd: the :math:`\lambda` value for the Hardshrink formulation. Default: 0.5
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/Hardshrink.png
Examples::
>>> m = nn.Hardshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['lambd']
def __init__(self, lambd=0.5):
super(Hardshrink, self).__init__()
self.lambd = lambd
def forward(self, input):
return F.hardshrink(input, self.lambd)
def extra_repr(self):
return '{}'.format(self.lambd)
class LeakyReLU(Module):
r"""Applies the element-wise function:
.. math::
\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)
or
.. math::
\text{LeakyRELU}(x) =
\begin{cases}
x, & \text{ if } x \geq 0 \\
\text{negative\_slope} \times x, & \text{ otherwise }
\end{cases}
Args:
negative_slope: Controls the angle of the negative slope. Default: 1e-2
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/LeakyReLU.png
Examples::
>>> m = nn.LeakyReLU(0.1)
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['inplace', 'negative_slope']
def __init__(self, negative_slope=1e-2, inplace=False):
super(LeakyReLU, self).__init__()
self.negative_slope = negative_slope
self.inplace = inplace
def forward(self, input):
return F.leaky_relu(input, self.negative_slope, self.inplace)
def extra_repr(self):
inplace_str = ', inplace=True' if self.inplace else ''
return 'negative_slope={}{}'.format(self.negative_slope, inplace_str)
class LogSigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{LogSigmoid}(x) = \log\left(\frac{ 1 }{ 1 + \exp(-x)}\right)
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/LogSigmoid.png
Examples::
>>> m = nn.LogSigmoid()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input):
return F.logsigmoid(input)
class Softplus(Module):
r"""Applies the element-wise function:
.. math::
\text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))
SoftPlus is a smooth approximation to the ReLU function and can be used
to constrain the output of a machine to always be positive.
For numerical stability the implementation reverts to the linear function
when :math:`input \times \beta > threshold`.
Args:
beta: the :math:`\beta` value for the Softplus formulation. Default: 1
threshold: values above this revert to a linear function. Default: 20
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/Softplus.png
Examples::
>>> m = nn.Softplus()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['beta', 'threshold']
def __init__(self, beta=1, threshold=20):
super(Softplus, self).__init__()
self.beta = beta
self.threshold = threshold
def forward(self, input):
return F.softplus(input, self.beta, self.threshold)
def extra_repr(self):
return 'beta={}, threshold={}'.format(self.beta, self.threshold)
class Softshrink(Module):
r"""Applies the soft shrinkage function elementwise:
.. math::
\text{SoftShrinkage}(x) =
\begin{cases}
x - \lambda, & \text{ if } x > \lambda \\
x + \lambda, & \text{ if } x < -\lambda \\
0, & \text{ otherwise }
\end{cases}
Args:
lambd: the :math:`\lambda` (must be no less than zero) value for the Softshrink formulation. Default: 0.5
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/Softshrink.png
Examples::
>>> m = nn.Softshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['lambd']
def __init__(self, lambd=0.5):
super(Softshrink, self).__init__()
self.lambd = lambd
def forward(self, input):
return F.softshrink(input, self.lambd)
def extra_repr(self):
return str(self.lambd)
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
\text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in key. Default: None.
Note: if kdim and vdim are None, they will be set to embed_dim such that
query, key, and value have the same number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__annotations__ = {
'bias_k': torch._jit_internal.Optional[torch.Tensor],
'bias_v': torch._jit_internal.Optional[torch.Tensor],
}
__constants__ = ['q_proj_weight', 'k_proj_weight', 'v_proj_weight', 'in_proj_weight']
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. This is an additive mask
(i.e. the values will be added to the attention layer). A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shape:
- Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length.
- Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not self._qkv_same_embed_dim:
return F.multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
else:
return F.multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask)
class PReLU(Module):
r"""Applies the element-wise function:
.. math::
\text{PReLU}(x) = \max(0,x) + a * \min(0,x)
or
.. math::
\text{PReLU}(x) =
\begin{cases}
x, & \text{ if } x \geq 0 \\
ax, & \text{ otherwise }
\end{cases}
Here :math:`a` is a learnable parameter. When called without arguments, `nn.PReLU()` uses a single
parameter :math:`a` across all input channels. If called with `nn.PReLU(nChannels)`,
a separate :math:`a` is used for each input channel.
.. note::
weight decay should not be used when learning :math:`a` for good performance.
.. note::
Channel dim is the 2nd dim of input. When input has dims < 2, then there is
no channel dim and the number of channels = 1.
Args:
num_parameters (int): number of :math:`a` to learn.
Although it takes an int as input, there is only two values are legitimate:
1, or the number of channels at input. Default: 1
init (float): the initial value of :math:`a`. Default: 0.25
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Attributes:
weight (Tensor): the learnable weights of shape (:attr:`num_parameters`).
.. image:: scripts/activation_images/PReLU.png
Examples::
>>> m = nn.PReLU()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ['num_parameters']
def __init__(self, num_parameters=1, init=0.25):
self.num_parameters = num_parameters
super(PReLU, self).__init__()
self.weight = Parameter(torch.Tensor(num_parameters).fill_(init))
def forward(self, input):
return F.prelu(input, self.weight)
def extra_repr(self):
return 'num_parameters={}'.format(self.num_parameters)
class Softsign(Module):
r"""Applies the element-wise function:
.. math::
\text{SoftSign}(x) = \frac{x}{ 1 + |x|}
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/Softsign.png
Examples::
>>> m = nn.Softsign()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input):
return F.softsign(input)
class Tanhshrink(Module):
r"""Applies the element-wise function:
.. math::
\text{Tanhshrink}(x) = x - \tanh(x)
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/Tanhshrink.png
Examples::
>>> m = nn.Tanhshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input):
return F.tanhshrink(input)
class Softmin(Module):
r"""Applies the Softmin function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range `[0, 1]` and sum to 1.
Softmin is defined as:
.. math::
\text{Softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_j \exp(-x_j)}
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Arguments:
dim (int): A dimension along which Softmin will be computed (so every slice
along dim will sum to 1).
Returns:
a Tensor of the same dimension and shape as the input, with
values in the range [0, 1]
Examples::
>>> m = nn.Softmin()
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
__constants__ = ['dim']
def __init__(self, dim=None):
super(Softmin, self).__init__()
self.dim = dim
def forward(self, input):
return F.softmin(input, self.dim, _stacklevel=5)
class Softmax(Module):
r"""Applies the Softmax function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range [0,1] and sum to 1.
Softmax is defined as:
.. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Arguments:
dim (int): A dimension along which Softmax will be computed (so every slice
along dim will sum to 1).
.. note::
This module doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use `LogSoftmax` instead (it's faster and has better numerical properties).
Examples::
>>> m = nn.Softmax(dim=1)
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
__constants__ = ['dim']
def __init__(self, dim=None):
super(Softmax, self).__init__()
self.dim = dim
def __setstate__(self, state):
self.__dict__.update(state)
if not hasattr(self, 'dim'):
self.dim = None
def forward(self, input):
return F.softmax(input, self.dim, _stacklevel=5)
def extra_repr(self):
return 'dim={dim}'.format(dim=self.dim)
class Softmax2d(Module):
r"""Applies SoftMax over features to each spatial location.
When given an image of ``Channels x Height x Width``, it will
apply `Softmax` to each location :math:`(Channels, h_i, w_j)`
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Examples::
>>> m = nn.Softmax2d()
>>> # you softmax over the 2nd dimension
>>> input = torch.randn(2, 3, 12, 13)
>>> output = m(input)
"""
def forward(self, input):
assert input.dim() == 4, 'Softmax2d requires a 4D tensor as input'
return F.softmax(input, 1, _stacklevel=5)
class LogSoftmax(Module):
r"""Applies the :math:`\log(\text{Softmax}(x))` function to an n-dimensional
input Tensor. The LogSoftmax formulation can be simplified as:
.. math::
\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Arguments:
dim (int): A dimension along which LogSoftmax will be computed.
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [-inf, 0)
Examples::
>>> m = nn.LogSoftmax()
>>> input = torch.randn(2, 3)
>>> output = m(input)
"""
__constants__ = ['dim']
def __init__(self, dim=None):
super(LogSoftmax, self).__init__()
self.dim = dim
def __setstate__(self, state):
self.__dict__.update(state)
if not hasattr(self, 'dim'):
self.dim = None
def forward(self, input):
return F.log_softmax(input, self.dim, _stacklevel=5) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/modules/activation.py | 0.918689 | 0.657676 | activation.py | pypi |
from .module import Module
from .. import functional as F
class _DropoutNd(Module):
__constants__ = ['p', 'inplace']
def __init__(self, p=0.5, inplace=False):
super(_DropoutNd, self).__init__()
if p < 0 or p > 1:
raise ValueError("dropout probability has to be between 0 and 1, "
"but got {}".format(p))
self.p = p
self.inplace = inplace
def extra_repr(self):
return 'p={}, inplace={}'.format(self.p, self.inplace)
class Dropout(_DropoutNd):
r"""During training, randomly zeroes some of the elements of the input
tensor with probability :attr:`p` using samples from a Bernoulli
distribution. Each channel will be zeroed out independently on every forward
call.
This has proven to be an effective technique for regularization and
preventing the co-adaptation of neurons as described in the paper
`Improving neural networks by preventing co-adaptation of feature
detectors`_ .
Furthermore, the outputs are scaled by a factor of :math:`\frac{1}{1-p}` during
training. This means that during evaluation the module simply computes an
identity function.
Args:
p: probability of an element to be zeroed. Default: 0.5
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`. Input can be of any shape
- Output: :math:`(*)`. Output is of the same shape as input
Examples::
>>> m = nn.Dropout(p=0.2)
>>> input = torch.randn(20, 16)
>>> output = m(input)
.. _Improving neural networks by preventing co-adaptation of feature
detectors: https://arxiv.org/abs/1207.0580
"""
def forward(self, input):
return F.dropout(input, self.p, self.training, self.inplace)
class Dropout2d(_DropoutNd):
r"""Randomly zero out entire channels (a channel is a 2D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 2D tensor :math:`\text{input}[i, j]`).
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
Usually the input comes from :class:`nn.Conv2d` modules.
As described in the paper
`Efficient Object Localization Using Convolutional Networks`_ ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.Dropout2d` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zero-ed.
inplace (bool, optional): If set to ``True``, will do this operation
in-place
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples::
>>> m = nn.Dropout2d(p=0.2)
>>> input = torch.randn(20, 16, 32, 32)
>>> output = m(input)
.. _Efficient Object Localization Using Convolutional Networks:
http://arxiv.org/abs/1411.4280
"""
def forward(self, input):
return F.dropout2d(input, self.p, self.training, self.inplace)
class Dropout3d(_DropoutNd):
r"""Randomly zero out entire channels (a channel is a 3D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 3D tensor :math:`\text{input}[i, j]`).
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
Usually the input comes from :class:`nn.Conv3d` modules.
As described in the paper
`Efficient Object Localization Using Convolutional Networks`_ ,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then i.i.d. dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.Dropout3d` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zeroed.
inplace (bool, optional): If set to ``True``, will do this operation
in-place
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples::
>>> m = nn.Dropout3d(p=0.2)
>>> input = torch.randn(20, 16, 4, 32, 32)
>>> output = m(input)
.. _Efficient Object Localization Using Convolutional Networks:
http://arxiv.org/abs/1411.4280
"""
def forward(self, input):
return F.dropout3d(input, self.p, self.training, self.inplace)
class AlphaDropout(_DropoutNd):
r"""Applies Alpha Dropout over the input.
Alpha Dropout is a type of Dropout that maintains the self-normalizing
property.
For an input with zero mean and unit standard deviation, the output of
Alpha Dropout maintains the original mean and standard deviation of the
input.
Alpha Dropout goes hand-in-hand with SELU activation function, which ensures
that the outputs have zero mean and unit standard deviation.
During training, it randomly masks some of the elements of the input
tensor with probability *p* using samples from a bernoulli distribution.
The elements to masked are randomized on every forward call, and scaled
and shifted to maintain zero mean and unit standard deviation.
During evaluation the module simply computes an identity function.
More details can be found in the paper `Self-Normalizing Neural Networks`_ .
Args:
p (float): probability of an element to be dropped. Default: 0.5
inplace (bool, optional): If set to ``True``, will do this operation
in-place
Shape:
- Input: :math:`(*)`. Input can be of any shape
- Output: :math:`(*)`. Output is of the same shape as input
Examples::
>>> m = nn.AlphaDropout(p=0.2)
>>> input = torch.randn(20, 16)
>>> output = m(input)
.. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
"""
def forward(self, input):
return F.alpha_dropout(input, self.p, self.training)
class FeatureAlphaDropout(_DropoutNd):
def forward(self, input):
return F.feature_alpha_dropout(input, self.p, self.training) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/modules/dropout.py | 0.927396 | 0.683618 | dropout.py | pypi |
import torch
import numbers
from torch.nn.parameter import Parameter
from .module import Module
from ._functions import CrossMapLRN2d as _cross_map_lrn2d
from .. import functional as F
from .. import init
class LocalResponseNorm(Module):
r"""Applies local response normalization over an input signal composed
of several input planes, where channels occupy the second dimension.
Applies normalization across channels.
.. math::
b_{c} = a_{c}\left(k + \frac{\alpha}{n}
\sum_{c'=\max(0, c-n/2)}^{\min(N-1,c+n/2)}a_{c'}^2\right)^{-\beta}
Args:
size: amount of neighbouring channels used for normalization
alpha: multiplicative factor. Default: 0.0001
beta: exponent. Default: 0.75
k: additive factor. Default: 1
Shape:
- Input: :math:`(N, C, *)`
- Output: :math:`(N, C, *)` (same shape as input)
Examples::
>>> lrn = nn.LocalResponseNorm(2)
>>> signal_2d = torch.randn(32, 5, 24, 24)
>>> signal_4d = torch.randn(16, 5, 7, 7, 7, 7)
>>> output_2d = lrn(signal_2d)
>>> output_4d = lrn(signal_4d)
"""
__constants__ = ['size', 'alpha', 'beta', 'k']
def __init__(self, size, alpha=1e-4, beta=0.75, k=1.):
super(LocalResponseNorm, self).__init__()
self.size = size
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, input):
return F.local_response_norm(input, self.size, self.alpha, self.beta,
self.k)
def extra_repr(self):
return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__)
class CrossMapLRN2d(Module):
def __init__(self, size, alpha=1e-4, beta=0.75, k=1):
super(CrossMapLRN2d, self).__init__()
self.size = size
self.alpha = alpha
self.beta = beta
self.k = k
def forward(self, input):
return _cross_map_lrn2d.apply(input, self.size, self.alpha, self.beta,
self.k)
def extra_repr(self):
return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__)
class LayerNorm(Module):
r"""Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated separately over the last
certain number dimensions which have to be of the shape specified by
:attr:`normalized_shape`.
:math:`\gamma` and :math:`\beta` are learnable affine transform parameters of
:attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.
.. note::
Unlike Batch Normalization and Instance Normalization, which applies
scalar scale and bias for each entire channel/plane with the
:attr:`affine` option, Layer Normalization applies per-element scale and
bias with :attr:`elementwise_affine`.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
normalized_shape (int or list or torch.Size): input shape from an expected input
of size
.. math::
[* \times \text{normalized\_shape}[0] \times \text{normalized\_shape}[1]
\times \ldots \times \text{normalized\_shape}[-1]]
If a single integer is used, it is treated as a singleton list, and this module will
normalize over the last dimension which is expected to be of that specific size.
eps: a value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine: a boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, *)`
- Output: :math:`(N, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 5, 10, 10)
>>> # With Learnable Parameters
>>> m = nn.LayerNorm(input.size()[1:])
>>> # Without Learnable Parameters
>>> m = nn.LayerNorm(input.size()[1:], elementwise_affine=False)
>>> # Normalize over last two dimensions
>>> m = nn.LayerNorm([10, 10])
>>> # Normalize over last dimension of size 10
>>> m = nn.LayerNorm(10)
>>> # Activating the module
>>> output = m(input)
.. _`Layer Normalization`: https://arxiv.org/abs/1607.06450
"""
__constants__ = ['normalized_shape', 'eps', 'elementwise_affine']
def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
super(LayerNorm, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = tuple(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = Parameter(torch.Tensor(*normalized_shape))
self.bias = Parameter(torch.Tensor(*normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input):
return F.layer_norm(
input, self.normalized_shape, self.weight, self.bias, self.eps)
def extra_repr(self):
return '{normalized_shape}, eps={eps}, ' \
'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
class GroupNorm(Module):
r"""Applies Group Normalization over a mini-batch of inputs as described in
the paper `Group Normalization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The input channels are separated into :attr:`num_groups` groups, each containing
``num_channels / num_groups`` channels. The mean and standard-deviation are calculated
separately over the each group. :math:`\gamma` and :math:`\beta` are learnable
per-channel affine transform parameter vectors of size :attr:`num_channels` if
:attr:`affine` is ``True``.
This layer uses statistics computed from input data in both training and
evaluation modes.
Args:
num_groups (int): number of groups to separate the channels into
num_channels (int): number of channels expected in input
eps: a value added to the denominator for numerical stability. Default: 1e-5
affine: a boolean value that when set to ``True``, this module
has learnable per-channel affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
Shape:
- Input: :math:`(N, C, *)` where :math:`C=\text{num\_channels}`
- Output: :math:`(N, C, *)` (same shape as input)
Examples::
>>> input = torch.randn(20, 6, 10, 10)
>>> # Separate 6 channels into 3 groups
>>> m = nn.GroupNorm(3, 6)
>>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm)
>>> m = nn.GroupNorm(6, 6)
>>> # Put all 6 channels into a single group (equivalent with LayerNorm)
>>> m = nn.GroupNorm(1, 6)
>>> # Activating the module
>>> output = m(input)
.. _`Group Normalization`: https://arxiv.org/abs/1803.08494
"""
__constants__ = ['num_groups', 'num_channels', 'eps', 'affine']
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True):
super(GroupNorm, self).__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(torch.Tensor(num_channels))
self.bias = Parameter(torch.Tensor(num_channels))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input):
return F.group_norm(
input, self.num_groups, self.weight, self.bias, self.eps)
def extra_repr(self):
return '{num_groups}, {num_channels}, eps={eps}, ' \
'affine={affine}'.format(**self.__dict__)
# TODO: ContrastiveNorm2d
# TODO: DivisiveNorm2d
# TODO: SubtractiveNorm2d | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/modules/normalization.py | 0.953134 | 0.768625 | normalization.py | pypi |
from .module import Module
from .. import functional as F
class PairwiseDistance(Module):
r"""
Computes the batchwise pairwise distance between vectors :math:`v_1`, :math:`v_2` using the p-norm:
.. math ::
\Vert x \Vert _p = \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}.
Args:
p (real): the norm degree. Default: 2
eps (float, optional): Small value to avoid division by zero.
Default: 1e-6
keepdim (bool, optional): Determines whether or not to keep the vector dimension.
Default: False
Shape:
- Input1: :math:`(N, D)` where `D = vector dimension`
- Input2: :math:`(N, D)`, same shape as the Input1
- Output: :math:`(N)`. If :attr:`keepdim` is ``True``, then :math:`(N, 1)`.
Examples::
>>> pdist = nn.PairwiseDistance(p=2)
>>> input1 = torch.randn(100, 128)
>>> input2 = torch.randn(100, 128)
>>> output = pdist(input1, input2)
"""
__constants__ = ['norm', 'eps', 'keepdim']
def __init__(self, p=2., eps=1e-6, keepdim=False):
super(PairwiseDistance, self).__init__()
self.norm = p
self.eps = eps
self.keepdim = keepdim
def forward(self, x1, x2):
return F.pairwise_distance(x1, x2, self.norm, self.eps, self.keepdim)
class CosineSimilarity(Module):
r"""Returns cosine similarity between :math:`x_1` and :math:`x_2`, computed along dim.
.. math ::
\text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}.
Args:
dim (int, optional): Dimension where cosine similarity is computed. Default: 1
eps (float, optional): Small value to avoid division by zero.
Default: 1e-8
Shape:
- Input1: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`
- Input2: :math:`(\ast_1, D, \ast_2)`, same shape as the Input1
- Output: :math:`(\ast_1, \ast_2)`
Examples::
>>> input1 = torch.randn(100, 128)
>>> input2 = torch.randn(100, 128)
>>> cos = nn.CosineSimilarity(dim=1, eps=1e-6)
>>> output = cos(input1, input2)
"""
__constants__ = ['dim', 'eps']
def __init__(self, dim=1, eps=1e-8):
super(CosineSimilarity, self).__init__()
self.dim = dim
self.eps = eps
def forward(self, x1, x2):
return F.cosine_similarity(x1, x2, self.dim, self.eps) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/modules/distance.py | 0.96277 | 0.757279 | distance.py | pypi |
from .module import Module
from .. import functional as F
class Upsample(Module):
r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data.
The input data is assumed to be of the form
`minibatch x channels x [optional depth] x [optional height] x width`.
Hence, for spatial inputs, we expect a 4D Tensor and for volumetric inputs, we expect a 5D Tensor.
The algorithms available for upsampling are nearest neighbor and linear,
bilinear, bicubic and trilinear for 3D, 4D and 5D input Tensor,
respectively.
One can either give a :attr:`scale_factor` or the target output :attr:`size` to
calculate the output size. (You cannot give both, as it is ambiguous)
Args:
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional):
output spatial sizes
scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional):
multiplier for spatial size. Has to match input size if it is a tuple.
mode (str, optional): the upsampling algorithm: one of ``'nearest'``,
``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``.
Default: ``'nearest'``
align_corners (bool, optional): if ``True``, the corner pixels of the input
and output tensors are aligned, and thus preserving the values at
those pixels. This only has effect when :attr:`mode` is
``'linear'``, ``'bilinear'``, or ``'trilinear'``. Default: ``False``
Shape:
- Input: :math:`(N, C, W_{in})`, :math:`(N, C, H_{in}, W_{in})` or :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, W_{out})`, :math:`(N, C, H_{out}, W_{out})`
or :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = \left\lfloor D_{in} \times \text{scale\_factor} \right\rfloor
.. math::
H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
.. math::
W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
.. warning::
With ``align_corners = True``, the linearly interpolating modes
(`linear`, `bilinear`, `bicubic`, and `trilinear`) don't proportionally
align the output and input pixels, and thus the output values can depend
on the input size. This was the default behavior for these modes up to
version 0.3.1. Since then, the default behavior is
``align_corners = False``. See below for concrete examples on how this
affects the outputs.
.. note::
If you want downsampling/general resizing, you should use :func:`~nn.functional.interpolate`.
Examples::
>>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
>>> input
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> m = nn.Upsample(scale_factor=2, mode='nearest')
>>> m(input)
tensor([[[[ 1., 1., 2., 2.],
[ 1., 1., 2., 2.],
[ 3., 3., 4., 4.],
[ 3., 3., 4., 4.]]]])
>>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False
>>> m(input)
tensor([[[[ 1.0000, 1.2500, 1.7500, 2.0000],
[ 1.5000, 1.7500, 2.2500, 2.5000],
[ 2.5000, 2.7500, 3.2500, 3.5000],
[ 3.0000, 3.2500, 3.7500, 4.0000]]]])
>>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
>>> m(input)
tensor([[[[ 1.0000, 1.3333, 1.6667, 2.0000],
[ 1.6667, 2.0000, 2.3333, 2.6667],
[ 2.3333, 2.6667, 3.0000, 3.3333],
[ 3.0000, 3.3333, 3.6667, 4.0000]]]])
>>> # Try scaling the same data in a larger tensor
>>>
>>> input_3x3 = torch.zeros(3, 3).view(1, 1, 3, 3)
>>> input_3x3[:, :, :2, :2].copy_(input)
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> input_3x3
tensor([[[[ 1., 2., 0.],
[ 3., 4., 0.],
[ 0., 0., 0.]]]])
>>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False
>>> # Notice that values in top left corner are the same with the small input (except at boundary)
>>> m(input_3x3)
tensor([[[[ 1.0000, 1.2500, 1.7500, 1.5000, 0.5000, 0.0000],
[ 1.5000, 1.7500, 2.2500, 1.8750, 0.6250, 0.0000],
[ 2.5000, 2.7500, 3.2500, 2.6250, 0.8750, 0.0000],
[ 2.2500, 2.4375, 2.8125, 2.2500, 0.7500, 0.0000],
[ 0.7500, 0.8125, 0.9375, 0.7500, 0.2500, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
>>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
>>> # Notice that values in top left corner are now changed
>>> m(input_3x3)
tensor([[[[ 1.0000, 1.4000, 1.8000, 1.6000, 0.8000, 0.0000],
[ 1.8000, 2.2000, 2.6000, 2.2400, 1.1200, 0.0000],
[ 2.6000, 3.0000, 3.4000, 2.8800, 1.4400, 0.0000],
[ 2.4000, 2.7200, 3.0400, 2.5600, 1.2800, 0.0000],
[ 1.2000, 1.3600, 1.5200, 1.2800, 0.6400, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
"""
__constants__ = ['size', 'scale_factor', 'mode', 'align_corners', 'name']
def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=None):
super(Upsample, self).__init__()
self.name = type(self).__name__
self.size = size
if isinstance(scale_factor, tuple):
self.scale_factor = tuple(float(factor) for factor in scale_factor)
else:
self.scale_factor = float(scale_factor) if scale_factor else None
self.mode = mode
self.align_corners = align_corners
def forward(self, input):
return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners)
def extra_repr(self):
if self.scale_factor is not None:
info = 'scale_factor=' + str(self.scale_factor)
else:
info = 'size=' + str(self.size)
info += ', mode=' + self.mode
return info
class UpsamplingNearest2d(Upsample):
r"""Applies a 2D nearest neighbor upsampling to an input signal composed of several input
channels.
To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
as it's constructor argument.
When :attr:`size` is given, it is the output size of the image `(h, w)`.
Args:
size (int or Tuple[int, int], optional): output spatial sizes
scale_factor (float or Tuple[float, float], optional): multiplier for
spatial size.
.. warning::
This class is deprecated in favor of :func:`~nn.functional.interpolate`.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
.. math::
W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
Examples::
>>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
>>> input
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> m = nn.UpsamplingNearest2d(scale_factor=2)
>>> m(input)
tensor([[[[ 1., 1., 2., 2.],
[ 1., 1., 2., 2.],
[ 3., 3., 4., 4.],
[ 3., 3., 4., 4.]]]])
"""
def __init__(self, size=None, scale_factor=None):
super(UpsamplingNearest2d, self).__init__(size, scale_factor, mode='nearest')
class UpsamplingBilinear2d(Upsample):
r"""Applies a 2D bilinear upsampling to an input signal composed of several input
channels.
To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
as it's constructor argument.
When :attr:`size` is given, it is the output size of the image `(h, w)`.
Args:
size (int or Tuple[int, int], optional): output spatial sizes
scale_factor (float or Tuple[float, float], optional): multiplier for
spatial size.
.. warning::
This class is deprecated in favor of :func:`~nn.functional.interpolate`. It is
equivalent to ``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
.. math::
W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
Examples::
>>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
>>> input
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> m = nn.UpsamplingBilinear2d(scale_factor=2)
>>> m(input)
tensor([[[[ 1.0000, 1.3333, 1.6667, 2.0000],
[ 1.6667, 2.0000, 2.3333, 2.6667],
[ 2.3333, 2.6667, 3.0000, 3.3333],
[ 3.0000, 3.3333, 3.6667, 4.0000]]]])
"""
def __init__(self, size=None, scale_factor=None):
super(UpsamplingBilinear2d, self).__init__(size, scale_factor, mode='bilinear', align_corners=True) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/modules/upsampling.py | 0.923696 | 0.953708 | upsampling.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .module import Module
from .utils import _single, _pair, _triple
from .. import functional as F
class _MaxPoolNd(Module):
__constants__ = ['kernel_size', 'stride', 'padding', 'dilation',
'return_indices', 'ceil_mode']
def __init__(self, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False):
super(_MaxPoolNd, self).__init__()
self.kernel_size = kernel_size
self.stride = stride or kernel_size
self.padding = padding
self.dilation = dilation
self.return_indices = return_indices
self.ceil_mode = ceil_mode
def extra_repr(self):
return 'kernel_size={kernel_size}, stride={stride}, padding={padding}' \
', dilation={dilation}, ceil_mode={ceil_mode}'.format(**self.__dict__)
class MaxPool1d(_MaxPoolNd):
r"""Applies a 1D max pooling over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, L)`
and output :math:`(N, C, L_{out})` can be precisely described as:
.. math::
out(N_i, C_j, k) = \max_{m=0, \ldots, \text{kernel\_size} - 1}
input(N_i, C_j, stride \times k + m)
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
Args:
kernel_size: the size of the window to take a max over
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on both sides
dilation: a parameter that controls the stride of elements in the window
return_indices: if ``True``, will return the max indices along with the outputs.
Useful for :class:`torch.nn.MaxUnpool1d` later
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, L_{in})`
- Output: :math:`(N, C, L_{out})`, where
.. math::
L_{out} = \left\lfloor \frac{L_{in} + 2 \times \text{padding} - \text{dilation}
\times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
Examples::
>>> # pool of size=3, stride=2
>>> m = nn.MaxPool1d(3, stride=2)
>>> input = torch.randn(20, 16, 50)
>>> output = m(input)
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def forward(self, input):
return F.max_pool1d(input, self.kernel_size, self.stride,
self.padding, self.dilation, self.ceil_mode,
self.return_indices)
class MaxPool2d(_MaxPoolNd):
r"""Applies a 2D max pooling over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
can be precisely described as:
.. math::
\begin{aligned}
out(N_i, C_j, h, w) ={} & \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
& \text{input}(N_i, C_j, \text{stride[0]} \times h + m,
\text{stride[1]} \times w + n)
\end{aligned}
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
Args:
kernel_size: the size of the window to take a max over
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on both sides
dilation: a parameter that controls the stride of elements in the window
return_indices: if ``True``, will return the max indices along with the outputs.
Useful for :class:`torch.nn.MaxUnpool2d` later
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})`, where
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]}
\times (\text{kernel\_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]}
\times (\text{kernel\_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor
Examples::
>>> # pool of square window of size=3, stride=2
>>> m = nn.MaxPool2d(3, stride=2)
>>> # pool of non-square window
>>> m = nn.MaxPool2d((3, 2), stride=(2, 1))
>>> input = torch.randn(20, 16, 50, 32)
>>> output = m(input)
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def forward(self, input):
return F.max_pool2d(input, self.kernel_size, self.stride,
self.padding, self.dilation, self.ceil_mode,
self.return_indices)
class MaxPool3d(_MaxPoolNd):
r"""Applies a 3D max pooling over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
can be precisely described as:
.. math::
\begin{aligned}
\text{out}(N_i, C_j, d, h, w) ={} & \max_{k=0, \ldots, kD-1} \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
& \text{input}(N_i, C_j, \text{stride[0]} \times d + k,
\text{stride[1]} \times h + m, \text{stride[2]} \times w + n)
\end{aligned}
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimension
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
Args:
kernel_size: the size of the window to take a max over
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on all three sides
dilation: a parameter that controls the stride of elements in the window
return_indices: if ``True``, will return the max indices along with the outputs.
Useful for :class:`torch.nn.MaxUnpool3d` later
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times
(\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times
(\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] \times
(\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
Examples::
>>> # pool of square window of size=3, stride=2
>>> m = nn.MaxPool3d(3, stride=2)
>>> # pool of non-square window
>>> m = nn.MaxPool3d((3, 2, 2), stride=(2, 1, 2))
>>> input = torch.randn(20, 16, 50,44, 31)
>>> output = m(input)
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
""" # noqa: E501
def forward(self, input):
return F.max_pool3d(input, self.kernel_size, self.stride,
self.padding, self.dilation, self.ceil_mode,
self.return_indices)
class _MaxUnpoolNd(Module):
def extra_repr(self):
return 'kernel_size={}, stride={}, padding={}'.format(
self.kernel_size, self.stride, self.padding
)
class MaxUnpool1d(_MaxUnpoolNd):
r"""Computes a partial inverse of :class:`MaxPool1d`.
:class:`MaxPool1d` is not fully invertible, since the non-maximal values are lost.
:class:`MaxUnpool1d` takes in as input the output of :class:`MaxPool1d`
including the indices of the maximal values and computes a partial inverse
in which all non-maximal values are set to zero.
.. note:: :class:`MaxPool1d` can map several input sizes to the same output
sizes. Hence, the inversion process can get ambiguous.
To accommodate this, you can provide the needed output size
as an additional argument :attr:`output_size` in the forward call.
See the Inputs and Example below.
Args:
kernel_size (int or tuple): Size of the max pooling window.
stride (int or tuple): Stride of the max pooling window.
It is set to :attr:`kernel_size` by default.
padding (int or tuple): Padding that was added to the input
Inputs:
- `input`: the input Tensor to invert
- `indices`: the indices given out by :class:`~torch.nn.MaxPool1d`
- `output_size` (optional): the targeted output size
Shape:
- Input: :math:`(N, C, H_{in})`
- Output: :math:`(N, C, H_{out})`, where
.. math::
H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{kernel\_size}[0]
or as given by :attr:`output_size` in the call operator
Example::
>>> pool = nn.MaxPool1d(2, stride=2, return_indices=True)
>>> unpool = nn.MaxUnpool1d(2, stride=2)
>>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8]]])
>>> output, indices = pool(input)
>>> unpool(output, indices)
tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
>>> # Example showcasing the use of output_size
>>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8, 9]]])
>>> output, indices = pool(input)
>>> unpool(output, indices, output_size=input.size())
tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8., 0.]]])
>>> unpool(output, indices)
tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
"""
def __init__(self, kernel_size, stride=None, padding=0):
super(MaxUnpool1d, self).__init__()
self.kernel_size = _single(kernel_size)
self.stride = _single(stride or kernel_size)
self.padding = _single(padding)
def forward(self, input, indices, output_size=None):
# type: (Tensor, Tensor, Optional[List[int]]) -> Tensor
return F.max_unpool1d(input, indices, self.kernel_size, self.stride,
self.padding, output_size)
class MaxUnpool2d(_MaxUnpoolNd):
r"""Computes a partial inverse of :class:`MaxPool2d`.
:class:`MaxPool2d` is not fully invertible, since the non-maximal values are lost.
:class:`MaxUnpool2d` takes in as input the output of :class:`MaxPool2d`
including the indices of the maximal values and computes a partial inverse
in which all non-maximal values are set to zero.
.. note:: :class:`MaxPool2d` can map several input sizes to the same output
sizes. Hence, the inversion process can get ambiguous.
To accommodate this, you can provide the needed output size
as an additional argument :attr:`output_size` in the forward call.
See the Inputs and Example below.
Args:
kernel_size (int or tuple): Size of the max pooling window.
stride (int or tuple): Stride of the max pooling window.
It is set to :attr:`kernel_size` by default.
padding (int or tuple): Padding that was added to the input
Inputs:
- `input`: the input Tensor to invert
- `indices`: the indices given out by :class:`~torch.nn.MaxPool2d`
- `output_size` (optional): the targeted output size
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})`, where
.. math::
H_{out} = (H_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
.. math::
W_{out} = (W_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
or as given by :attr:`output_size` in the call operator
Example::
>>> pool = nn.MaxPool2d(2, stride=2, return_indices=True)
>>> unpool = nn.MaxUnpool2d(2, stride=2)
>>> input = torch.tensor([[[[ 1., 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12],
[13, 14, 15, 16]]]])
>>> output, indices = pool(input)
>>> unpool(output, indices)
tensor([[[[ 0., 0., 0., 0.],
[ 0., 6., 0., 8.],
[ 0., 0., 0., 0.],
[ 0., 14., 0., 16.]]]])
>>> # specify a different output size than input size
>>> unpool(output, indices, output_size=torch.Size([1, 1, 5, 5]))
tensor([[[[ 0., 0., 0., 0., 0.],
[ 6., 0., 8., 0., 0.],
[ 0., 0., 0., 14., 0.],
[ 16., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]]]])
"""
def __init__(self, kernel_size, stride=None, padding=0):
super(MaxUnpool2d, self).__init__()
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride or kernel_size)
self.padding = _pair(padding)
def forward(self, input, indices, output_size=None):
# type: (Tensor, Tensor, Optional[List[int]]) -> Tensor
return F.max_unpool2d(input, indices, self.kernel_size, self.stride,
self.padding, output_size)
class MaxUnpool3d(_MaxUnpoolNd):
r"""Computes a partial inverse of :class:`MaxPool3d`.
:class:`MaxPool3d` is not fully invertible, since the non-maximal values are lost.
:class:`MaxUnpool3d` takes in as input the output of :class:`MaxPool3d`
including the indices of the maximal values and computes a partial inverse
in which all non-maximal values are set to zero.
.. note:: :class:`MaxPool3d` can map several input sizes to the same output
sizes. Hence, the inversion process can get ambiguous.
To accommodate this, you can provide the needed output size
as an additional argument :attr:`output_size` in the forward call.
See the Inputs section below.
Args:
kernel_size (int or tuple): Size of the max pooling window.
stride (int or tuple): Stride of the max pooling window.
It is set to :attr:`kernel_size` by default.
padding (int or tuple): Padding that was added to the input
Inputs:
- `input`: the input Tensor to invert
- `indices`: the indices given out by :class:`~torch.nn.MaxPool3d`
- `output_size` (optional): the targeted output size
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = (D_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
.. math::
H_{out} = (H_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
.. math::
W_{out} = (W_{in} - 1) \times \text{stride[2]} - 2 \times \text{padding[2]} + \text{kernel\_size[2]}
or as given by :attr:`output_size` in the call operator
Example::
>>> # pool of square window of size=3, stride=2
>>> pool = nn.MaxPool3d(3, stride=2, return_indices=True)
>>> unpool = nn.MaxUnpool3d(3, stride=2)
>>> output, indices = pool(torch.randn(20, 16, 51, 33, 15))
>>> unpooled_output = unpool(output, indices)
>>> unpooled_output.size()
torch.Size([20, 16, 51, 33, 15])
"""
def __init__(self, kernel_size, stride=None, padding=0):
super(MaxUnpool3d, self).__init__()
self.kernel_size = _triple(kernel_size)
self.stride = _triple(stride or kernel_size)
self.padding = _triple(padding)
def forward(self, input, indices, output_size=None):
# type: (Tensor, Tensor, Optional[List[int]]) -> Tensor
return F.max_unpool3d(input, indices, self.kernel_size, self.stride,
self.padding, output_size)
class _AvgPoolNd(Module):
__constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad']
def extra_repr(self):
return 'kernel_size={}, stride={}, padding={}'.format(
self.kernel_size, self.stride, self.padding
)
class AvgPool1d(_AvgPoolNd):
r"""Applies a 1D average pooling over an input signal composed of several
input planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, L)`,
output :math:`(N, C, L_{out})` and :attr:`kernel_size` :math:`k`
can be precisely described as:
.. math::
\text{out}(N_i, C_j, l) = \frac{1}{k} \sum_{m=0}^{k-1}
\text{input}(N_i, C_j, \text{stride} \times l + m)
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
for :attr:`padding` number of points.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can each be
an ``int`` or a one-element tuple.
Args:
kernel_size: the size of the window
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on both sides
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
count_include_pad: when True, will include the zero-padding in the averaging calculation
Shape:
- Input: :math:`(N, C, L_{in})`
- Output: :math:`(N, C, L_{out})`, where
.. math::
L_{out} = \left\lfloor \frac{L_{in} +
2 \times \text{padding} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
Examples::
>>> # pool with window of size=3, stride=2
>>> m = nn.AvgPool1d(3, stride=2)
>>> m(torch.tensor([[[1.,2,3,4,5,6,7]]]))
tensor([[[ 2., 4., 6.]]])
"""
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True):
super(AvgPool1d, self).__init__()
self.kernel_size = _single(kernel_size)
self.stride = _single(stride if stride is not None else kernel_size)
self.padding = _single(padding)
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
def forward(self, input):
return F.avg_pool1d(
input, self.kernel_size, self.stride, self.padding, self.ceil_mode,
self.count_include_pad)
class AvgPool2d(_AvgPoolNd):
r"""Applies a 2D average pooling over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
can be precisely described as:
.. math::
out(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1}
input(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n)
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
for :attr:`padding` number of points.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
Args:
kernel_size: the size of the window
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on both sides
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
count_include_pad: when True, will include the zero-padding in the averaging calculation
divisor_override: if specified, it will be used as divisor, otherwise attr:`kernel_size` will be used
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})`, where
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] -
\text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] -
\text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
Examples::
>>> # pool of square window of size=3, stride=2
>>> m = nn.AvgPool2d(3, stride=2)
>>> # pool of non-square window
>>> m = nn.AvgPool2d((3, 2), stride=(2, 1))
>>> input = torch.randn(20, 16, 50, 32)
>>> output = m(input)
"""
__constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None):
super(AvgPool2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride or kernel_size
self.padding = padding
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
self.divisor_override = divisor_override
def forward(self, input):
return F.avg_pool2d(input, self.kernel_size, self.stride,
self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
class AvgPool3d(_AvgPoolNd):
r"""Applies a 3D average pooling over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
can be precisely described as:
.. math::
\begin{aligned}
\text{out}(N_i, C_j, d, h, w) ={} & \sum_{k=0}^{kD-1} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1} \\
& \frac{\text{input}(N_i, C_j, \text{stride}[0] \times d + k,
\text{stride}[1] \times h + m, \text{stride}[2] \times w + n)}
{kD \times kH \times kW}
\end{aligned}
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on all three sides
for :attr:`padding` number of points.
The parameters :attr:`kernel_size`, :attr:`stride` can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimension
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
Args:
kernel_size: the size of the window
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on all three sides
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
count_include_pad: when True, will include the zero-padding in the averaging calculation
divisor_override: if specified, it will be used as divisor, otherwise attr:`kernel_size` will be used
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] -
\text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] -
\text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] -
\text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor
Examples::
>>> # pool of square window of size=3, stride=2
>>> m = nn.AvgPool3d(3, stride=2)
>>> # pool of non-square window
>>> m = nn.AvgPool3d((3, 2, 2), stride=(2, 1, 2))
>>> input = torch.randn(20, 16, 50,44, 31)
>>> output = m(input)
"""
__constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None):
super(AvgPool3d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride or kernel_size
self.padding = padding
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
self.divisor_override = divisor_override
def forward(self, input):
return F.avg_pool3d(input, self.kernel_size, self.stride,
self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
def __setstate__(self, d):
super(AvgPool3d, self).__setstate__(d)
self.__dict__.setdefault('padding', 0)
self.__dict__.setdefault('ceil_mode', False)
self.__dict__.setdefault('count_include_pad', True)
class FractionalMaxPool2d(Module):
r"""Applies a 2D fractional max pooling over an input signal composed of several input planes.
Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic
step size determined by the target output size.
The number of output features is equal to the number of input planes.
Args:
kernel_size: the size of the window to take a max over.
Can be a single number k (for a square kernel of k x k) or a tuple `(kh, kw)`
output_size: the target output size of the image of the form `oH x oW`.
Can be a tuple `(oH, oW)` or a single number oH for a square image `oH x oH`
output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
This has to be a number or tuple in the range (0, 1)
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to :meth:`nn.MaxUnpool2d`. Default: ``False``
Examples:
>>> # pool of square window of size=3, and target output size 13x12
>>> m = nn.FractionalMaxPool2d(3, output_size=(13, 12))
>>> # pool of square window and target output size being half of input image size
>>> m = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
>>> input = torch.randn(20, 16, 50, 32)
>>> output = m(input)
.. _Fractional MaxPooling:
http://arxiv.org/abs/1412.6071
"""
__constants__ = ['kernel_size', 'return_indices', 'output_size',
'output_ratio']
def __init__(self, kernel_size, output_size=None, output_ratio=None,
return_indices=False, _random_samples=None):
super(FractionalMaxPool2d, self).__init__()
self.kernel_size = _pair(kernel_size)
self.return_indices = return_indices
self.register_buffer('_random_samples', _random_samples)
self.output_size = _pair(output_size) if output_size is not None else None
self.output_ratio = _pair(output_ratio) if output_ratio is not None else None
if output_size is None and output_ratio is None:
raise ValueError("FractionalMaxPool2d requires specifying either "
"an output size, or a pooling ratio")
if output_size is not None and output_ratio is not None:
raise ValueError("only one of output_size and output_ratio may be specified")
if self.output_ratio is not None:
if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1):
raise ValueError("output_ratio must be between 0 and 1 (got {})"
.format(output_ratio))
def forward(self, input):
return F.fractional_max_pool2d(
input, self.kernel_size, self.output_size, self.output_ratio,
self.return_indices,
_random_samples=self._random_samples)
class FractionalMaxPool3d(Module):
r"""Applies a 3D fractional max pooling over an input signal composed of several input planes.
Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
The max-pooling operation is applied in :math:`kTxkHxkW` regions by a stochastic
step size determined by the target output size.
The number of output features is equal to the number of input planes.
Args:
kernel_size: the size of the window to take a max over.
Can be a single number k (for a square kernel of k x k x k) or a tuple `(kt x kh x kw)`
output_size: the target output size of the image of the form `oT x oH x oW`.
Can be a tuple `(oT, oH, oW)` or a single number oH for a square image `oH x oH x oH`
output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
This has to be a number or tuple in the range (0, 1)
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to :meth:`nn.MaxUnpool3d`. Default: ``False``
Examples:
>>> # pool of cubic window of size=3, and target output size 13x12x11
>>> m = nn.FractionalMaxPool3d(3, output_size=(13, 12, 11))
>>> # pool of cubic window and target output size being half of input size
>>> m = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5))
>>> input = torch.randn(20, 16, 50, 32, 16)
>>> output = m(input)
.. _Fractional MaxPooling:
http://arxiv.org/abs/1412.6071
"""
__constants__ = ['kernel_size', 'return_indices', 'output_size',
'output_ratio']
def __init__(self, kernel_size, output_size=None, output_ratio=None,
return_indices=False, _random_samples=None):
super(FractionalMaxPool3d, self).__init__()
self.kernel_size = _triple(kernel_size)
self.return_indices = return_indices
self.register_buffer('_random_samples', _random_samples)
self.output_size = _triple(output_size) if output_size is not None else None
self.output_ratio = _triple(output_ratio) if output_ratio is not None else None
if output_size is None and output_ratio is None:
raise ValueError("FractionalMaxPool3d requires specifying either "
"an output size, or a pooling ratio")
if output_size is not None and output_ratio is not None:
raise ValueError("only one of output_size and output_ratio may be specified")
if self.output_ratio is not None:
if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1 and 0 < self.output_ratio[2] < 1):
raise ValueError("output_ratio must be between 0 and 1 (got {})"
.format(output_ratio))
def forward(self, input):
return F.fractional_max_pool3d(
input, self.kernel_size, self.output_size, self.output_ratio,
self.return_indices,
_random_samples=self._random_samples)
class _LPPoolNd(Module):
__constants__ = ['norm_type', 'kernel_size', 'stride', 'ceil_mode']
def __init__(self, norm_type, kernel_size, stride=None, ceil_mode=False):
super(_LPPoolNd, self).__init__()
self.norm_type = norm_type
self.kernel_size = kernel_size
self.stride = stride
self.ceil_mode = ceil_mode
def extra_repr(self):
return 'norm_type={norm_type}, kernel_size={kernel_size}, stride={stride}, ' \
'ceil_mode={ceil_mode}'.format(**self.__dict__)
class LPPool1d(_LPPoolNd):
r"""Applies a 1D power-average pooling over an input signal composed of several input
planes.
On each window, the function computed is:
.. math::
f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
- At p = :math:`\infty`, one gets Max Pooling
- At p = 1, one gets Sum Pooling (which is proportional to Average Pooling)
.. note:: If the sum to the power of `p` is zero, the gradient of this function is
not defined. This implementation will set the gradient to zero in this case.
Args:
kernel_size: a single int, the size of the window
stride: a single int, the stride of the window. Default value is :attr:`kernel_size`
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, L_{in})`
- Output: :math:`(N, C, L_{out})`, where
.. math::
L_{out} = \left\lfloor\frac{L_{in} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
Examples::
>>> # power-2 pool of window of length 3, with stride 2.
>>> m = nn.LPPool1d(2, 3, stride=2)
>>> input = torch.randn(20, 16, 50)
>>> output = m(input)
"""
def forward(self, input):
return F.lp_pool1d(input, float(self.norm_type), self.kernel_size,
self.stride, self.ceil_mode)
class LPPool2d(_LPPoolNd):
r"""Applies a 2D power-average pooling over an input signal composed of several input
planes.
On each window, the function computed is:
.. math::
f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
- At p = :math:`\infty`, one gets Max Pooling
- At p = 1, one gets Sum Pooling (which is proportional to average pooling)
The parameters :attr:`kernel_size`, :attr:`stride` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
.. note:: If the sum to the power of `p` is zero, the gradient of this function is
not defined. This implementation will set the gradient to zero in this case.
Args:
kernel_size: the size of the window
stride: the stride of the window. Default value is :attr:`kernel_size`
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})`, where
.. math::
H_{out} = \left\lfloor\frac{H_{in} - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
Examples::
>>> # power-2 pool of square window of size=3, stride=2
>>> m = nn.LPPool2d(2, 3, stride=2)
>>> # pool of non-square window of power 1.2
>>> m = nn.LPPool2d(1.2, (3, 2), stride=(2, 1))
>>> input = torch.randn(20, 16, 50, 32)
>>> output = m(input)
"""
def forward(self, input):
return F.lp_pool2d(input, float(self.norm_type), self.kernel_size,
self.stride, self.ceil_mode)
class _AdaptiveMaxPoolNd(Module):
__constants__ = ['output_size', 'return_indices']
def __init__(self, output_size, return_indices=False):
super(_AdaptiveMaxPoolNd, self).__init__()
self.output_size = output_size
self.return_indices = return_indices
def extra_repr(self):
return 'output_size={}'.format(self.output_size)
# FIXME (by @ssnl): Improve adaptive pooling docs: specify what the input and
# output shapes are, and how the operation computes output.
class AdaptiveMaxPool1d(_AdaptiveMaxPoolNd):
r"""Applies a 1D adaptive max pooling over an input signal composed of several input planes.
The output size is H, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size H
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to nn.MaxUnpool1d. Default: ``False``
Examples:
>>> # target output size of 5
>>> m = nn.AdaptiveMaxPool1d(5)
>>> input = torch.randn(1, 64, 8)
>>> output = m(input)
"""
def forward(self, input):
return F.adaptive_max_pool1d(input, self.output_size, self.return_indices)
class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd):
r"""Applies a 2D adaptive max pooling over an input signal composed of several input planes.
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H.
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to nn.MaxUnpool2d. Default: ``False``
Examples:
>>> # target output size of 5x7
>>> m = nn.AdaptiveMaxPool2d((5,7))
>>> input = torch.randn(1, 64, 8, 9)
>>> output = m(input)
>>> # target output size of 7x7 (square)
>>> m = nn.AdaptiveMaxPool2d(7)
>>> input = torch.randn(1, 64, 10, 9)
>>> output = m(input)
>>> # target output size of 10x7
>>> m = nn.AdaptiveMaxPool2d((None, 7))
>>> input = torch.randn(1, 64, 10, 9)
>>> output = m(input)
"""
def forward(self, input):
return F.adaptive_max_pool2d(input, self.output_size, self.return_indices)
class AdaptiveMaxPool3d(_AdaptiveMaxPoolNd):
r"""Applies a 3D adaptive max pooling over an input signal composed of several input planes.
The output is of size D x H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form D x H x W.
Can be a tuple (D, H, W) or a single D for a cube D x D x D.
D, H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to nn.MaxUnpool3d. Default: ``False``
Examples:
>>> # target output size of 5x7x9
>>> m = nn.AdaptiveMaxPool3d((5,7,9))
>>> input = torch.randn(1, 64, 8, 9, 10)
>>> output = m(input)
>>> # target output size of 7x7x7 (cube)
>>> m = nn.AdaptiveMaxPool3d(7)
>>> input = torch.randn(1, 64, 10, 9, 8)
>>> output = m(input)
>>> # target output size of 7x9x8
>>> m = nn.AdaptiveMaxPool3d((7, None, None))
>>> input = torch.randn(1, 64, 10, 9, 8)
>>> output = m(input)
"""
def forward(self, input):
return F.adaptive_max_pool3d(input, self.output_size, self.return_indices)
class _AdaptiveAvgPoolNd(Module):
__constants__ = ['output_size']
def __init__(self, output_size):
super(_AdaptiveAvgPoolNd, self).__init__()
self.output_size = output_size
def extra_repr(self):
return 'output_size={}'.format(self.output_size)
class AdaptiveAvgPool1d(_AdaptiveAvgPoolNd):
r"""Applies a 1D adaptive average pooling over an input signal composed of several input planes.
The output size is H, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size H
Examples:
>>> # target output size of 5
>>> m = nn.AdaptiveAvgPool1d(5)
>>> input = torch.randn(1, 64, 8)
>>> output = m(input)
"""
def forward(self, input):
return F.adaptive_avg_pool1d(input, self.output_size)
class AdaptiveAvgPool2d(_AdaptiveAvgPoolNd):
r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes.
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H.
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
Examples:
>>> # target output size of 5x7
>>> m = nn.AdaptiveAvgPool2d((5,7))
>>> input = torch.randn(1, 64, 8, 9)
>>> output = m(input)
>>> # target output size of 7x7 (square)
>>> m = nn.AdaptiveAvgPool2d(7)
>>> input = torch.randn(1, 64, 10, 9)
>>> output = m(input)
>>> # target output size of 10x7
>>> m = nn.AdaptiveMaxPool2d((None, 7))
>>> input = torch.randn(1, 64, 10, 9)
>>> output = m(input)
"""
def forward(self, input):
return F.adaptive_avg_pool2d(input, self.output_size)
class AdaptiveAvgPool3d(_AdaptiveAvgPoolNd):
r"""Applies a 3D adaptive average pooling over an input signal composed of several input planes.
The output is of size D x H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the form D x H x W.
Can be a tuple (D, H, W) or a single number D for a cube D x D x D.
D, H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
Examples:
>>> # target output size of 5x7x9
>>> m = nn.AdaptiveAvgPool3d((5,7,9))
>>> input = torch.randn(1, 64, 8, 9, 10)
>>> output = m(input)
>>> # target output size of 7x7x7 (cube)
>>> m = nn.AdaptiveAvgPool3d(7)
>>> input = torch.randn(1, 64, 10, 9, 8)
>>> output = m(input)
>>> # target output size of 7x9x8
>>> m = nn.AdaptiveMaxPool3d((7, None, None))
>>> input = torch.randn(1, 64, 10, 9, 8)
>>> output = m(input)
"""
def forward(self, input):
return F.adaptive_avg_pool3d(input, self.output_size) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/modules/pooling.py | 0.956033 | 0.504089 | pooling.py | pypi |
import torch
from torch.nn.parameter import Parameter
from .module import Module
from .. import functional as F
from .. import init
class Embedding(Module):
r"""A simple lookup table that stores embeddings of a fixed dictionary and size.
This module is often used to store word embeddings and retrieve them using indices.
The input to the module is a list of indices, and the output is the corresponding
word embeddings.
Args:
num_embeddings (int): size of the dictionary of embeddings
embedding_dim (int): the size of each embedding vector
padding_idx (int, optional): If given, pads the output with the embedding vector at :attr:`padding_idx`
(initialized to zeros) whenever it encounters the index.
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
is renormalized to have norm :attr:`max_norm`.
norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
scale_grad_by_freq (boolean, optional): If given, this will scale gradients by the inverse of frequency of
the words in the mini-batch. Default ``False``.
sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor.
See Notes for more details regarding sparse gradients.
Attributes:
weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)
initialized from :math:`\mathcal{N}(0, 1)`
Shape:
- Input: :math:`(*)`, LongTensor of arbitrary shape containing the indices to extract
- Output: :math:`(*, H)`, where `*` is the input shape and :math:`H=\text{embedding\_dim}`
.. note::
Keep in mind that only a limited number of optimizers support
sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`),
:class:`optim.SparseAdam` (`CUDA` and `CPU`) and :class:`optim.Adagrad` (`CPU`)
.. note::
With :attr:`padding_idx` set, the embedding vector at
:attr:`padding_idx` is initialized to all zeros. However, note that this
vector can be modified afterwards, e.g., using a customized
initialization method, and thus changing the vector used to pad the
output. The gradient for this vector from :class:`~torch.nn.Embedding`
is always zero.
Examples::
>>> # an Embedding module containing 10 tensors of size 3
>>> embedding = nn.Embedding(10, 3)
>>> # a batch of 2 samples of 4 indices each
>>> input = torch.LongTensor([[1,2,4,5],[4,3,2,9]])
>>> embedding(input)
tensor([[[-0.0251, -1.6902, 0.7172],
[-0.6431, 0.0748, 0.6969],
[ 1.4970, 1.3448, -0.9685],
[-0.3677, -2.7265, -0.1685]],
[[ 1.4970, 1.3448, -0.9685],
[ 0.4362, -0.4004, 0.9400],
[-0.6431, 0.0748, 0.6969],
[ 0.9124, -2.3616, 1.1151]]])
>>> # example with padding_idx
>>> embedding = nn.Embedding(10, 3, padding_idx=0)
>>> input = torch.LongTensor([[0,2,0,5]])
>>> embedding(input)
tensor([[[ 0.0000, 0.0000, 0.0000],
[ 0.1535, -2.0309, 0.9315],
[ 0.0000, 0.0000, 0.0000],
[-0.1655, 0.9897, 0.0635]]])
"""
__constants__ = ['num_embeddings', 'embedding_dim', 'padding_idx', 'max_norm',
'norm_type', 'scale_grad_by_freq', 'sparse']
def __init__(self, num_embeddings, embedding_dim, padding_idx=None,
max_norm=None, norm_type=2., scale_grad_by_freq=False,
sparse=False, _weight=None):
super(Embedding, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
elif padding_idx < 0:
assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
if _weight is None:
self.weight = Parameter(torch.Tensor(num_embeddings, embedding_dim))
self.reset_parameters()
else:
assert list(_weight.shape) == [num_embeddings, embedding_dim], \
'Shape of weight does not match num_embeddings and embedding_dim'
self.weight = Parameter(_weight)
self.sparse = sparse
def reset_parameters(self):
init.normal_(self.weight)
if self.padding_idx is not None:
with torch.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input):
return F.embedding(
input, self.weight, self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse)
def extra_repr(self):
s = '{num_embeddings}, {embedding_dim}'
if self.padding_idx is not None:
s += ', padding_idx={padding_idx}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
if self.sparse is not False:
s += ', sparse=True'
return s.format(**self.__dict__)
@classmethod
def from_pretrained(cls, embeddings, freeze=True, padding_idx=None,
max_norm=None, norm_type=2., scale_grad_by_freq=False,
sparse=False):
r"""Creates Embedding instance from given 2-dimensional FloatTensor.
Args:
embeddings (Tensor): FloatTensor containing weights for the Embedding.
First dimension is being passed to Embedding as ``num_embeddings``, second as ``embedding_dim``.
freeze (boolean, optional): If ``True``, the tensor does not get updated in the learning process.
Equivalent to ``embedding.weight.requires_grad = False``. Default: ``True``
padding_idx (int, optional): See module initialization documentation.
max_norm (float, optional): See module initialization documentation.
norm_type (float, optional): See module initialization documentation. Default ``2``.
scale_grad_by_freq (boolean, optional): See module initialization documentation. Default ``False``.
sparse (bool, optional): See module initialization documentation.
Examples::
>>> # FloatTensor containing pretrained weights
>>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
>>> embedding = nn.Embedding.from_pretrained(weight)
>>> # Get embeddings for index 1
>>> input = torch.LongTensor([1])
>>> embedding(input)
tensor([[ 4.0000, 5.1000, 6.3000]])
"""
assert embeddings.dim() == 2, \
'Embeddings parameter is expected to be 2-dimensional'
rows, cols = embeddings.shape
embedding = cls(
num_embeddings=rows,
embedding_dim=cols,
_weight=embeddings,
padding_idx=padding_idx,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse)
embedding.weight.requires_grad = not freeze
return embedding
class EmbeddingBag(Module):
r"""Computes sums or means of 'bags' of embeddings, without instantiating the
intermediate embeddings.
For bags of constant length and no :attr:`per_sample_weights`, this class
* with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=0)``,
* with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=0)``,
* with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=0)``.
However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these
operations.
EmbeddingBag also supports per-sample weights as an argument to the forward
pass. This scales the output of the Embedding before performing a weighted
reduction as specified by ``mode``. If :attr:`per_sample_weights`` is passed, the
only supported ``mode`` is ``"sum"``, which computes a weighted sum according to
:attr:`per_sample_weights`.
Args:
num_embeddings (int): size of the dictionary of embeddings
embedding_dim (int): the size of each embedding vector
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
is renormalized to have norm :attr:`max_norm`.
norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the inverse of frequency of
the words in the mini-batch. Default ``False``.
Note: this option is not supported when ``mode="max"``.
mode (string, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights`
into consideration. ``"mean"`` computes the average of the values
in the bag, ``"max"`` computes the max value over each bag.
Default: ``"mean"``
sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See
Notes for more details regarding sparse gradients. Note: this option is not
supported when ``mode="max"``.
include_last_offset (bool, optional): if ``True``, :attr:`offsets` has one additional element, where the last element
is equivalent to the size of `indices`. This matches the CSR format. Note:
this option is currently only supported when ``mode="sum"``.
Attributes:
weight (Tensor): the learnable weights of the module of shape `(num_embeddings, embedding_dim)`
initialized from :math:`\mathcal{N}(0, 1)`.
Inputs: :attr:`input` (LongTensor), :attr:`offsets` (LongTensor, optional), and
:attr:`per_index_weights` (Tensor, optional)
- If :attr:`input` is 2D of shape `(B, N)`,
it will be treated as ``B`` bags (sequences) each of fixed length ``N``, and
this will return ``B`` values aggregated in a way depending on the :attr:`mode`.
:attr:`offsets` is ignored and required to be ``None`` in this case.
- If :attr:`input` is 1D of shape `(N)`,
it will be treated as a concatenation of multiple bags (sequences).
:attr:`offsets` is required to be a 1D tensor containing the
starting index positions of each bag in :attr:`input`. Therefore,
for :attr:`offsets` of shape `(B)`, :attr:`input` will be viewed as
having ``B`` bags. Empty bags (i.e., having 0-length) will have
returned vectors filled by zeros.
per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights`
must have exactly the same shape as input and is treated as having the same
:attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``.
Output shape: `(B, embedding_dim)`
Examples::
>>> # an Embedding module containing 10 tensors of size 3
>>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum')
>>> # a batch of 2 samples of 4 indices each
>>> input = torch.LongTensor([1,2,4,5,4,3,2,9])
>>> offsets = torch.LongTensor([0,4])
>>> embedding_sum(input, offsets)
tensor([[-0.8861, -5.4350, -0.0523],
[ 1.1306, -2.5798, -1.0044]])
"""
__constants__ = ['num_embeddings', 'embedding_dim', 'max_norm', 'norm_type',
'scale_grad_by_freq', 'mode', 'sparse', 'include_last_offset']
def __init__(self, num_embeddings, embedding_dim,
max_norm=None, norm_type=2., scale_grad_by_freq=False,
mode='mean', sparse=False, _weight=None, include_last_offset=False):
super(EmbeddingBag, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
if _weight is None:
self.weight = Parameter(torch.Tensor(num_embeddings, embedding_dim))
self.reset_parameters()
else:
assert list(_weight.shape) == [num_embeddings, embedding_dim], \
'Shape of weight does not match num_embeddings and embedding_dim'
self.weight = Parameter(_weight)
self.mode = mode
self.sparse = sparse
self.include_last_offset = include_last_offset
def reset_parameters(self):
init.normal_(self.weight)
def forward(self, input, offsets=None, per_sample_weights=None):
# type: (Tensor, Optional[Tensor], Optional[Tensor]) -> Tensor
return F.embedding_bag(input, self.weight, offsets,
self.max_norm, self.norm_type,
self.scale_grad_by_freq, self.mode, self.sparse,
per_sample_weights, self.include_last_offset)
def extra_repr(self):
s = '{num_embeddings}, {embedding_dim}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
s += ', mode={mode}'
return s.format(**self.__dict__)
@classmethod
def from_pretrained(cls, embeddings, freeze=True, max_norm=None,
norm_type=2., scale_grad_by_freq=False,
mode='mean', sparse=False, include_last_offset=False):
r"""Creates EmbeddingBag instance from given 2-dimensional FloatTensor.
Args:
embeddings (Tensor): FloatTensor containing weights for the EmbeddingBag.
First dimension is being passed to EmbeddingBag as 'num_embeddings', second as 'embedding_dim'.
freeze (boolean, optional): If ``True``, the tensor does not get updated in the learning process.
Equivalent to ``embeddingbag.weight.requires_grad = False``. Default: ``True``
max_norm (float, optional): See module initialization documentation. Default: ``None``
norm_type (float, optional): See module initialization documentation. Default ``2``.
scale_grad_by_freq (boolean, optional): See module initialization documentation. Default ``False``.
mode (string, optional): See module initialization documentation. Default: ``"mean"``
sparse (bool, optional): See module initialization documentation. Default: ``False``.
include_last_offset (bool, optional): See module initialization documentation. Default: ``False``.
Examples::
>>> # FloatTensor containing pretrained weights
>>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
>>> embeddingbag = nn.EmbeddingBag.from_pretrained(weight)
>>> # Get embeddings for index 1
>>> input = torch.LongTensor([[1, 0]])
>>> embeddingbag(input)
tensor([[ 2.5000, 3.7000, 4.6500]])
"""
assert embeddings.dim() == 2, \
'Embeddings parameter is expected to be 2-dimensional'
rows, cols = embeddings.shape
embeddingbag = cls(
num_embeddings=rows,
embedding_dim=cols,
_weight=embeddings,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
mode=mode,
sparse=sparse,
include_last_offset=include_last_offset)
embeddingbag.weight.requires_grad = not freeze
return embeddingbag | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/modules/sparse.py | 0.954361 | 0.75487 | sparse.py | pypi |
import torch
from torch.autograd.function import Function
class SyncBatchNorm(Function):
@staticmethod
def forward(self, input, weight, bias, running_mean, running_var, eps, momentum, process_group, world_size):
input = input.contiguous()
size = input.numel() // input.size(1)
if size == 1:
raise ValueError('Expected more than 1 value per channel when training, got input size {}'.format(size))
count = torch.Tensor([size]).to(input.device)
# calculate mean/invstd for input.
mean, invstd = torch.batch_norm_stats(input, eps)
count_all = torch.empty(world_size, 1, dtype=count.dtype, device=count.device)
mean_all = torch.empty(world_size, mean.size(0), dtype=mean.dtype, device=mean.device)
invstd_all = torch.empty(world_size, invstd.size(0), dtype=invstd.dtype, device=invstd.device)
count_l = list(count_all.unbind(0))
mean_l = list(mean_all.unbind(0))
invstd_l = list(invstd_all.unbind(0))
# using all_gather instead of all reduce so we can calculate count/mean/var in one go
count_all_reduce = torch.distributed.all_gather(count_l, count, process_group, async_op=True)
mean_all_reduce = torch.distributed.all_gather(mean_l, mean, process_group, async_op=True)
invstd_all_reduce = torch.distributed.all_gather(invstd_l, invstd, process_group, async_op=True)
# wait on the async communication to finish
count_all_reduce.wait()
mean_all_reduce.wait()
invstd_all_reduce.wait()
# calculate global mean & invstd
mean, invstd = torch.batch_norm_gather_stats_with_counts(
input,
mean_all,
invstd_all,
running_mean,
running_var,
momentum,
eps,
count_all.view(-1).long().tolist()
)
self.save_for_backward(input, weight, mean, invstd, count_all)
self.process_group = process_group
# apply element-wise normalization
out = torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps)
return out
@staticmethod
def backward(self, grad_output):
grad_output = grad_output.contiguous()
saved_input, weight, mean, invstd, count_tensor = self.saved_tensors
grad_input = grad_weight = grad_bias = None
process_group = self.process_group
# calculate local stats as well as grad_weight / grad_bias
sum_dy, sum_dy_xmu, grad_weight, grad_bias = torch.batch_norm_backward_reduce(
grad_output,
saved_input,
mean,
invstd,
weight,
self.needs_input_grad[0],
self.needs_input_grad[1],
self.needs_input_grad[2]
)
if self.needs_input_grad[0]:
# synchronizing stats used to calculate input gradient.
# TODO: move div_ into batch_norm_backward_elemt kernel
sum_dy_all_reduce = torch.distributed.all_reduce(
sum_dy, torch.distributed.ReduceOp.SUM, process_group, async_op=True)
sum_dy_xmu_all_reduce = torch.distributed.all_reduce(
sum_dy_xmu, torch.distributed.ReduceOp.SUM, process_group, async_op=True)
# wait on the async communication to finish
sum_dy_all_reduce.wait()
sum_dy_xmu_all_reduce.wait()
divisor = count_tensor.sum()
mean_dy = sum_dy / divisor
mean_dy_xmu = sum_dy_xmu / divisor
# backward pass for gradient calculation
grad_input = torch.batch_norm_backward_elemt(
grad_output,
saved_input,
mean,
invstd,
weight,
mean_dy,
mean_dy_xmu
)
# synchronizing of grad_weight / grad_bias is not needed as distributed
# training would handle all reduce.
if weight is None or not self.needs_input_grad[1]:
grad_weight = None
if weight is None or not self.needs_input_grad[2]:
grad_bias = None
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
class CrossMapLRN2d(Function):
@staticmethod
def forward(ctx, input, size, alpha=1e-4, beta=0.75, k=1):
ctx.size = size
ctx.alpha = alpha
ctx.beta = beta
ctx.k = k
ctx.scale = None
assert input.dim() == 4
ctx.scale = ctx.scale or input.new()
output = input.new()
batch_size = input.size(0)
channels = input.size(1)
input_height = input.size(2)
input_width = input.size(3)
output.resize_as_(input)
ctx.scale.resize_as_(input)
# use output storage as temporary buffer
input_square = output
torch.pow(input, 2, out=input_square)
pre_pad = int((ctx.size - 1) / 2 + 1)
pre_pad_crop = channels if pre_pad > channels else pre_pad
scale_first = ctx.scale.select(1, 0)
scale_first.zero_()
# compute first feature map normalization
for c in range(pre_pad_crop):
scale_first.add_(input_square.select(1, c))
# reuse computations for next feature maps normalization
# by adding the next feature map and removing the previous
for c in range(1, channels):
scale_previous = ctx.scale.select(1, c - 1)
scale_current = ctx.scale.select(1, c)
scale_current.copy_(scale_previous)
if c < channels - pre_pad + 1:
square_next = input_square.select(1, c + pre_pad - 1)
scale_current.add_(square_next, alpha=1)
if c > pre_pad:
square_previous = input_square.select(1, c - pre_pad)
scale_current.add_(square_previous, alpha=-1)
ctx.scale.mul_(ctx.alpha / ctx.size).add_(ctx.k)
torch.pow(ctx.scale, -ctx.beta, out=output)
output.mul_(input)
ctx.save_for_backward(input, output)
return output
@staticmethod
def backward(ctx, grad_output):
input, output = ctx.saved_tensors
grad_input = grad_output.new()
batch_size = input.size(0)
channels = input.size(1)
input_height = input.size(2)
input_width = input.size(3)
paddded_ratio = input.new(channels + ctx.size - 1, input_height,
input_width)
accum_ratio = input.new(input_height, input_width)
cache_ratio_value = 2 * ctx.alpha * ctx.beta / ctx.size
inversePrePad = int(ctx.size - (ctx.size - 1) / 2)
grad_input.resize_as_(input)
torch.pow(ctx.scale, -ctx.beta, out=grad_input).mul_(grad_output)
paddded_ratio.zero_()
padded_ratio_center = paddded_ratio.narrow(0, inversePrePad,
channels)
for n in range(batch_size):
torch.mul(grad_output[n], output[n], out=padded_ratio_center)
padded_ratio_center.div_(ctx.scale[n])
torch.sum(
paddded_ratio.narrow(0, 0, ctx.size - 1), 0, keepdim=False, out=accum_ratio)
for c in range(channels):
accum_ratio.add_(paddded_ratio[c + ctx.size - 1])
grad_input[n][c].addcmul_(input[n][c], accum_ratio, value=-cache_ratio_value)
accum_ratio.add_(paddded_ratio[c], alpha=-1)
return grad_input, None, None, None, None | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/modules/_functions.py | 0.827445 | 0.490236 | _functions.py | pypi |
import warnings
from collections import OrderedDict
from torch._six import container_abcs
from itertools import islice
import operator
import torch
from .module import Module
from torch._jit_internal import _copy_to_script_wrapper
class Container(Module):
def __init__(self, **kwargs):
super(Container, self).__init__()
# DeprecationWarning is ignored by default <sigh>
warnings.warn("nn.Container is deprecated. All of it's functionality "
"is now implemented in nn.Module. Subclass that instead.")
for key, value in kwargs.items():
self.add_module(key, value)
class Sequential(Module):
r"""A sequential container.
Modules will be added to it in the order they are passed in the constructor.
Alternatively, an ordered dict of modules can also be passed in.
To make it easier to understand, here is a small example::
# Example of using Sequential
model = nn.Sequential(
nn.Conv2d(1,20,5),
nn.ReLU(),
nn.Conv2d(20,64,5),
nn.ReLU()
)
# Example of using Sequential with OrderedDict
model = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(1,20,5)),
('relu1', nn.ReLU()),
('conv2', nn.Conv2d(20,64,5)),
('relu2', nn.ReLU())
]))
"""
def __init__(self, *args):
super(Sequential, self).__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict):
for key, module in args[0].items():
self.add_module(key, module)
else:
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def _get_item_by_idx(self, iterator, idx):
"""Get the idx-th item of the iterator"""
size = len(self)
idx = operator.index(idx)
if not -size <= idx < size:
raise IndexError('index {} is out of range'.format(idx))
idx %= size
return next(islice(iterator, idx, None))
@_copy_to_script_wrapper
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.__class__(OrderedDict(list(self._modules.items())[idx]))
else:
return self._get_item_by_idx(self._modules.values(), idx)
def __setitem__(self, idx, module):
key = self._get_item_by_idx(self._modules.keys(), idx)
return setattr(self, key, module)
def __delitem__(self, idx):
if isinstance(idx, slice):
for key in list(self._modules.keys())[idx]:
delattr(self, key)
else:
key = self._get_item_by_idx(self._modules.keys(), idx)
delattr(self, key)
@_copy_to_script_wrapper
def __len__(self):
return len(self._modules)
@_copy_to_script_wrapper
def __dir__(self):
keys = super(Sequential, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
@_copy_to_script_wrapper
def __iter__(self):
return iter(self._modules.values())
def forward(self, input):
for module in self:
input = module(input)
return input
class ModuleList(Module):
r"""Holds submodules in a list.
:class:`~torch.nn.ModuleList` can be indexed like a regular Python list, but
modules it contains are properly registered, and will be visible by all
:class:`~torch.nn.Module` methods.
Arguments:
modules (iterable, optional): an iterable of modules to add
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)])
def forward(self, x):
# ModuleList can act as an iterable, or be indexed using ints
for i, l in enumerate(self.linears):
x = self.linears[i // 2](x) + l(x)
return x
"""
def __init__(self, modules=None):
super(ModuleList, self).__init__()
if modules is not None:
self += modules
def _get_abs_string_index(self, idx):
"""Get the absolute index for the list of modules"""
idx = operator.index(idx)
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
return str(idx)
@_copy_to_script_wrapper
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.__class__(list(self._modules.values())[idx])
else:
return self._modules[self._get_abs_string_index(idx)]
def __setitem__(self, idx, module):
idx = self._get_abs_string_index(idx)
return setattr(self, str(idx), module)
def __delitem__(self, idx):
if isinstance(idx, slice):
for k in range(len(self._modules))[idx]:
delattr(self, str(k))
else:
delattr(self, self._get_abs_string_index(idx))
# To preserve numbering, self._modules is being reconstructed with modules after deletion
str_indices = [str(i) for i in range(len(self._modules))]
self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
@_copy_to_script_wrapper
def __len__(self):
return len(self._modules)
@_copy_to_script_wrapper
def __iter__(self):
return iter(self._modules.values())
def __iadd__(self, modules):
return self.extend(modules)
@_copy_to_script_wrapper
def __dir__(self):
keys = super(ModuleList, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def insert(self, index, module):
r"""Insert a given module before a given index in the list.
Arguments:
index (int): index to insert.
module (nn.Module): module to insert
"""
for i in range(len(self._modules), index, -1):
self._modules[str(i)] = self._modules[str(i - 1)]
self._modules[str(index)] = module
def append(self, module):
r"""Appends a given module to the end of the list.
Arguments:
module (nn.Module): module to append
"""
self.add_module(str(len(self)), module)
return self
def extend(self, modules):
r"""Appends modules from a Python iterable to the end of the list.
Arguments:
modules (iterable): iterable of modules to append
"""
if not isinstance(modules, container_abcs.Iterable):
raise TypeError("ModuleList.extend should be called with an "
"iterable, but got " + type(modules).__name__)
offset = len(self)
for i, module in enumerate(modules):
self.add_module(str(offset + i), module)
return self
def forward(self):
raise NotImplementedError()
class ModuleDict(Module):
r"""Holds submodules in a dictionary.
:class:`~torch.nn.ModuleDict` can be indexed like a regular Python dictionary,
but modules it contains are properly registered, and will be visible by all
:class:`~torch.nn.Module` methods.
:class:`~torch.nn.ModuleDict` is an **ordered** dictionary that respects
* the order of insertion, and
* in :meth:`~torch.nn.ModuleDict.update`, the order of the merged ``OrderedDict``
or another :class:`~torch.nn.ModuleDict` (the argument to :meth:`~torch.nn.ModuleDict.update`).
Note that :meth:`~torch.nn.ModuleDict.update` with other unordered mapping
types (e.g., Python's plain ``dict``) does not preserve the order of the
merged mapping.
Arguments:
modules (iterable, optional): a mapping (dictionary) of (string: module)
or an iterable of key-value pairs of type (string, module)
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.choices = nn.ModuleDict({
'conv': nn.Conv2d(10, 10, 3),
'pool': nn.MaxPool2d(3)
})
self.activations = nn.ModuleDict([
['lrelu', nn.LeakyReLU()],
['prelu', nn.PReLU()]
])
def forward(self, x, choice, act):
x = self.choices[choice](x)
x = self.activations[act](x)
return x
"""
def __init__(self, modules=None):
super(ModuleDict, self).__init__()
if modules is not None:
self.update(modules)
@_copy_to_script_wrapper
def __getitem__(self, key):
return self._modules[key]
def __setitem__(self, key, module):
self.add_module(key, module)
def __delitem__(self, key):
del self._modules[key]
@_copy_to_script_wrapper
def __len__(self):
return len(self._modules)
@_copy_to_script_wrapper
def __iter__(self):
return iter(self._modules)
@_copy_to_script_wrapper
def __contains__(self, key):
return key in self._modules
def clear(self):
"""Remove all items from the ModuleDict.
"""
self._modules.clear()
def pop(self, key):
r"""Remove key from the ModuleDict and return its module.
Arguments:
key (string): key to pop from the ModuleDict
"""
v = self[key]
del self[key]
return v
@_copy_to_script_wrapper
def keys(self):
r"""Return an iterable of the ModuleDict keys.
"""
return self._modules.keys()
@_copy_to_script_wrapper
def items(self):
r"""Return an iterable of the ModuleDict key/value pairs.
"""
return self._modules.items()
@_copy_to_script_wrapper
def values(self):
r"""Return an iterable of the ModuleDict values.
"""
return self._modules.values()
def update(self, modules):
r"""Update the :class:`~torch.nn.ModuleDict` with the key-value pairs from a
mapping or an iterable, overwriting existing keys.
.. note::
If :attr:`modules` is an ``OrderedDict``, a :class:`~torch.nn.ModuleDict`, or
an iterable of key-value pairs, the order of new elements in it is preserved.
Arguments:
modules (iterable): a mapping (dictionary) from string to :class:`~torch.nn.Module`,
or an iterable of key-value pairs of type (string, :class:`~torch.nn.Module`)
"""
if not isinstance(modules, container_abcs.Iterable):
raise TypeError("ModuleDict.update should be called with an "
"iterable of key/value pairs, but got " +
type(modules).__name__)
if isinstance(modules, container_abcs.Mapping):
if isinstance(modules, (OrderedDict, ModuleDict)):
for key, module in modules.items():
self[key] = module
else:
for key, module in sorted(modules.items()):
self[key] = module
else:
for j, m in enumerate(modules):
if not isinstance(m, container_abcs.Iterable):
raise TypeError("ModuleDict update sequence element "
"#" + str(j) + " should be Iterable; is" +
type(m).__name__)
if not len(m) == 2:
raise ValueError("ModuleDict update sequence element "
"#" + str(j) + " has length " + str(len(m)) +
"; 2 is required")
self[m[0]] = m[1]
def forward(self):
raise NotImplementedError()
class ParameterList(Module):
r"""Holds parameters in a list.
:class:`~torch.nn.ParameterList` can be indexed like a regular Python
list, but parameters it contains are properly registered, and will be
visible by all :class:`~torch.nn.Module` methods.
Arguments:
parameters (iterable, optional): an iterable of :class:`~torch.nn.Parameter` to add
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.params = nn.ParameterList([nn.Parameter(torch.randn(10, 10)) for i in range(10)])
def forward(self, x):
# ParameterList can act as an iterable, or be indexed using ints
for i, p in enumerate(self.params):
x = self.params[i // 2].mm(x) + p.mm(x)
return x
"""
def __init__(self, parameters=None):
super(ParameterList, self).__init__()
if parameters is not None:
self += parameters
def _get_abs_string_index(self, idx):
"""Get the absolute index for the list of modules"""
idx = operator.index(idx)
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
return str(idx)
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.__class__(list(self._parameters.values())[idx])
else:
idx = self._get_abs_string_index(idx)
return self._parameters[str(idx)]
def __setitem__(self, idx, param):
idx = self._get_abs_string_index(idx)
return self.register_parameter(str(idx), param)
def __len__(self):
return len(self._parameters)
def __iter__(self):
return iter(self._parameters.values())
def __iadd__(self, parameters):
return self.extend(parameters)
def __dir__(self):
keys = super(ParameterList, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def append(self, parameter):
"""Appends a given parameter at the end of the list.
Arguments:
parameter (nn.Parameter): parameter to append
"""
self.register_parameter(str(len(self)), parameter)
return self
def extend(self, parameters):
"""Appends parameters from a Python iterable to the end of the list.
Arguments:
parameters (iterable): iterable of parameters to append
"""
if not isinstance(parameters, container_abcs.Iterable):
raise TypeError("ParameterList.extend should be called with an "
"iterable, but got " + type(parameters).__name__)
offset = len(self)
for i, param in enumerate(parameters):
self.register_parameter(str(offset + i), param)
return self
def extra_repr(self):
child_lines = []
for k, p in self._parameters.items():
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = 'Parameter containing: [{} of size {}{}]'.format(
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
def __call__(self, input):
raise RuntimeError('ParameterList should not be called.')
class ParameterDict(Module):
r"""Holds parameters in a dictionary.
ParameterDict can be indexed like a regular Python dictionary, but parameters it
contains are properly registered, and will be visible by all Module methods.
:class:`~torch.nn.ParameterDict` is an **ordered** dictionary that respects
* the order of insertion, and
* in :meth:`~torch.nn.ParameterDict.update`, the order of the merged ``OrderedDict``
or another :class:`~torch.nn.ParameterDict` (the argument to
:meth:`~torch.nn.ParameterDict.update`).
Note that :meth:`~torch.nn.ParameterDict.update` with other unordered mapping
types (e.g., Python's plain ``dict``) does not preserve the order of the
merged mapping.
Arguments:
parameters (iterable, optional): a mapping (dictionary) of
(string : :class:`~torch.nn.Parameter`) or an iterable of key-value pairs
of type (string, :class:`~torch.nn.Parameter`)
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.params = nn.ParameterDict({
'left': nn.Parameter(torch.randn(5, 10)),
'right': nn.Parameter(torch.randn(5, 10))
})
def forward(self, x, choice):
x = self.params[choice].mm(x)
return x
"""
def __init__(self, parameters=None):
super(ParameterDict, self).__init__()
if parameters is not None:
self.update(parameters)
def __getitem__(self, key):
return self._parameters[key]
def __setitem__(self, key, parameter):
self.register_parameter(key, parameter)
def __delitem__(self, key):
del self._parameters[key]
def __len__(self):
return len(self._parameters)
def __iter__(self):
return iter(self._parameters.keys())
def __contains__(self, key):
return key in self._parameters
def clear(self):
"""Remove all items from the ParameterDict.
"""
self._parameters.clear()
def pop(self, key):
r"""Remove key from the ParameterDict and return its parameter.
Arguments:
key (string): key to pop from the ParameterDict
"""
v = self[key]
del self[key]
return v
def keys(self):
r"""Return an iterable of the ParameterDict keys.
"""
return self._parameters.keys()
def items(self):
r"""Return an iterable of the ParameterDict key/value pairs.
"""
return self._parameters.items()
def values(self):
r"""Return an iterable of the ParameterDict values.
"""
return self._parameters.values()
def update(self, parameters):
r"""Update the :class:`~torch.nn.ParameterDict` with the key-value pairs from a
mapping or an iterable, overwriting existing keys.
.. note::
If :attr:`parameters` is an ``OrderedDict``, a :class:`~torch.nn.ParameterDict`, or
an iterable of key-value pairs, the order of new elements in it is preserved.
Arguments:
parameters (iterable): a mapping (dictionary) from string to
:class:`~torch.nn.Parameter`, or an iterable of
key-value pairs of type (string, :class:`~torch.nn.Parameter`)
"""
if not isinstance(parameters, container_abcs.Iterable):
raise TypeError("ParametersDict.update should be called with an "
"iterable of key/value pairs, but got " +
type(parameters).__name__)
if isinstance(parameters, container_abcs.Mapping):
if isinstance(parameters, (OrderedDict, ParameterDict)):
for key, parameter in parameters.items():
self[key] = parameter
else:
for key, parameter in sorted(parameters.items()):
self[key] = parameter
else:
for j, p in enumerate(parameters):
if not isinstance(p, container_abcs.Iterable):
raise TypeError("ParameterDict update sequence element "
"#" + str(j) + " should be Iterable; is" +
type(p).__name__)
if not len(p) == 2:
raise ValueError("ParameterDict update sequence element "
"#" + str(j) + " has length " + str(len(p)) +
"; 2 is required")
self[p[0]] = p[1]
def extra_repr(self):
child_lines = []
for k, p in self._parameters.items():
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = 'Parameter containing: [{} of size {}{}]'.format(
torch.typename(p), size_str, device_str)
child_lines.append(' (' + k + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
def __call__(self, input):
raise RuntimeError('ParameterDict should not be called.') | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/modules/container.py | 0.873808 | 0.255486 | container.py | pypi |
from .module import Module
from .linear import Identity, Linear, Bilinear
from .conv import Conv1d, Conv2d, Conv3d, \
ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
Hardsigmoid
from .loss import L1Loss, NLLLoss, KLDivLoss, MSELoss, BCELoss, BCEWithLogitsLoss, NLLLoss2d, \
CosineEmbeddingLoss, CTCLoss, HingeEmbeddingLoss, MarginRankingLoss, \
MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, \
SmoothL1Loss, SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, PoissonNLLLoss
from .container import Container, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, FractionalMaxPool3d, LPPool1d, LPPool2d, \
AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d
from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
from .dropout import Dropout, Dropout2d, Dropout3d, AlphaDropout, FeatureAlphaDropout
from .padding import ReflectionPad1d, ReflectionPad2d, ReplicationPad1d, ReplicationPad2d, \
ReplicationPad3d, ZeroPad2d, ConstantPad1d, ConstantPad2d, ConstantPad3d
from .sparse import Embedding, EmbeddingBag
from .rnn import RNNBase, RNN, LSTM, GRU, \
RNNCellBase, RNNCell, LSTMCell, GRUCell
from .pixelshuffle import PixelShuffle
from .upsampling import UpsamplingNearest2d, UpsamplingBilinear2d, Upsample
from .distance import PairwiseDistance, CosineSimilarity
from .fold import Fold, Unfold
from .adaptive import AdaptiveLogSoftmaxWithLoss
from .transformer import TransformerEncoder, TransformerDecoder, \
TransformerEncoderLayer, TransformerDecoderLayer, Transformer
from .flatten import Flatten
__all__ = [
'Module', 'Identity', 'Linear', 'Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d',
'ConvTranspose2d', 'ConvTranspose3d', 'Threshold', 'ReLU', 'Hardtanh', 'ReLU6',
'Sigmoid', 'Tanh', 'Softmax', 'Softmax2d', 'LogSoftmax', 'ELU', 'SELU', 'CELU', 'GLU', 'GELU', 'Hardshrink',
'LeakyReLU', 'LogSigmoid', 'Softplus', 'Softshrink', 'MultiheadAttention', 'PReLU', 'Softsign', 'Softmin',
'Tanhshrink', 'RReLU', 'L1Loss', 'NLLLoss', 'KLDivLoss', 'MSELoss', 'BCELoss', 'BCEWithLogitsLoss',
'NLLLoss2d', 'PoissonNLLLoss', 'CosineEmbeddingLoss', 'CTCLoss', 'HingeEmbeddingLoss', 'MarginRankingLoss',
'MultiLabelMarginLoss', 'MultiLabelSoftMarginLoss', 'MultiMarginLoss', 'SmoothL1Loss',
'SoftMarginLoss', 'CrossEntropyLoss', 'Container', 'Sequential', 'ModuleList', 'ModuleDict',
'ParameterList', 'ParameterDict', 'AvgPool1d', 'AvgPool2d', 'AvgPool3d', 'MaxPool1d', 'MaxPool2d',
'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d', 'FractionalMaxPool2d', "FractionalMaxPool3d",
'LPPool1d', 'LPPool2d', 'LocalResponseNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d', 'InstanceNorm1d',
'InstanceNorm2d', 'InstanceNorm3d', 'LayerNorm', 'GroupNorm', 'SyncBatchNorm',
'Dropout', 'Dropout2d', 'Dropout3d', 'AlphaDropout', 'FeatureAlphaDropout',
'ReflectionPad1d', 'ReflectionPad2d', 'ReplicationPad2d', 'ReplicationPad1d', 'ReplicationPad3d',
'CrossMapLRN2d', 'Embedding', 'EmbeddingBag', 'RNNBase', 'RNN', 'LSTM', 'GRU', 'RNNCellBase', 'RNNCell',
'LSTMCell', 'GRUCell', 'PixelShuffle', 'Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d',
'PairwiseDistance', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d', 'AdaptiveAvgPool1d',
'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d', 'TripletMarginLoss', 'ZeroPad2d', 'ConstantPad1d', 'ConstantPad2d',
'ConstantPad3d', 'Bilinear', 'CosineSimilarity', 'Unfold', 'Fold',
'AdaptiveLogSoftmaxWithLoss', 'TransformerEncoder', 'TransformerDecoder',
'TransformerEncoderLayer', 'TransformerDecoderLayer', 'Transformer',
'Flatten', 'Hardsigmoid',
] | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/modules/__init__.py | 0.80784 | 0.398992 | __init__.py | pypi |
from .batchnorm import _NormBase
from .. import functional as F
class _InstanceNorm(_NormBase):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=False,
track_running_stats=False):
super(_InstanceNorm, self).__init__(
num_features, eps, momentum, affine, track_running_stats)
def _check_input_dim(self, input):
raise NotImplementedError
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
# at version 1: removed running_mean and running_var when
# track_running_stats=False (default)
if version is None and not self.track_running_stats:
running_stats_keys = []
for name in ('running_mean', 'running_var'):
key = prefix + name
if key in state_dict:
running_stats_keys.append(key)
if len(running_stats_keys) > 0:
error_msgs.append(
'Unexpected running stats buffer(s) {names} for {klass} '
'with track_running_stats=False. If state_dict is a '
'checkpoint saved before 0.4.0, this may be expected '
'because {klass} does not track running stats by default '
'since 0.4.0. Please remove these keys from state_dict. If '
'the running stats are actually needed, instead set '
'track_running_stats=True in {klass} to enable them. See '
'the documentation of {klass} for details.'
.format(names=" and ".join('"{}"'.format(k) for k in running_stats_keys),
klass=self.__class__.__name__))
for key in running_stats_keys:
state_dict.pop(key)
super(_InstanceNorm, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, input):
self._check_input_dim(input)
return F.instance_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training or not self.track_running_stats, self.momentum, self.eps)
class InstanceNorm1d(_InstanceNorm):
r"""Applies Instance Normalization over a 3D input (a mini-batch of 1D
inputs with optional additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size) if :attr:`affine` is ``True``.
By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.
If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
.. note::
:class:`InstanceNorm1d` and :class:`LayerNorm` are very similar, but
have some subtle differences. :class:`InstanceNorm1d` is applied
on each channel of channeled data like multidimensional time series, but
:class:`LayerNorm` is usually applied on entire sample and often in NLP
tasks. Additionally, :class:`LayerNorm` applies elementwise affine
transform, while :class:`InstanceNorm1d` usually don't apply affine
transform.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters, initialized the same way as done for batch normalization.
Default: ``False``.
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, L)`
- Output: :math:`(N, C, L)` (same shape as input)
Examples::
>>> # Without Learnable Parameters
>>> m = nn.InstanceNorm1d(100)
>>> # With Learnable Parameters
>>> m = nn.InstanceNorm1d(100, affine=True)
>>> input = torch.randn(20, 100, 40)
>>> output = m(input)
.. _`Instance Normalization: The Missing Ingredient for Fast Stylization`:
https://arxiv.org/abs/1607.08022
"""
def _check_input_dim(self, input):
if input.dim() == 2:
raise ValueError(
'InstanceNorm1d returns 0-filled tensor to 2D tensor.'
'This is because InstanceNorm1d reshapes inputs to'
'(1, N * C, ...) from (N, C,...) and this makes'
'variances 0.'
)
if input.dim() != 3:
raise ValueError('expected 3D input (got {}D input)'
.format(input.dim()))
class InstanceNorm2d(_InstanceNorm):
r"""Applies Instance Normalization over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the input size) if :attr:`affine` is ``True``.
By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.
If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
.. note::
:class:`InstanceNorm2d` and :class:`LayerNorm` are very similar, but
have some subtle differences. :class:`InstanceNorm2d` is applied
on each channel of channeled data like RGB images, but
:class:`LayerNorm` is usually applied on entire sample and often in NLP
tasks. Additionally, :class:`LayerNorm` applies elementwise affine
transform, while :class:`InstanceNorm2d` usually don't apply affine
transform.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, H, W)`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters, initialized the same way as done for batch normalization.
Default: ``False``.
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples::
>>> # Without Learnable Parameters
>>> m = nn.InstanceNorm2d(100)
>>> # With Learnable Parameters
>>> m = nn.InstanceNorm2d(100, affine=True)
>>> input = torch.randn(20, 100, 35, 45)
>>> output = m(input)
.. _`Instance Normalization: The Missing Ingredient for Fast Stylization`:
https://arxiv.org/abs/1607.08022
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class InstanceNorm3d(_InstanceNorm):
r"""Applies Instance Normalization over a 5D input (a mini-batch of 3D inputs
with additional channel dimension) as described in the paper
`Instance Normalization: The Missing Ingredient for Fast Stylization`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension separately
for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size C (where C is the input size) if :attr:`affine` is ``True``.
By default, this layer uses instance statistics computed from input data in
both training and evaluation modes.
If :attr:`track_running_stats` is set to ``True``, during training this
layer keeps running estimates of its computed mean and variance, which are
then used for normalization during evaluation. The running estimates are
kept with a default :attr:`momentum` of 0.1.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momemtum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
.. note::
:class:`InstanceNorm3d` and :class:`LayerNorm` are very similar, but
have some subtle differences. :class:`InstanceNorm3d` is applied
on each channel of channeled data like 3D models with RGB color, but
:class:`LayerNorm` is usually applied on entire sample and often in NLP
tasks. Additionally, :class:`LayerNorm` applies elementwise affine
transform, while :class:`InstanceNorm3d` usually don't apply affine
transform.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, D, H, W)`
eps: a value added to the denominator for numerical stability. Default: 1e-5
momentum: the value used for the running_mean and running_var computation. Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters, initialized the same way as done for batch normalization.
Default: ``False``.
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics and always uses batch
statistics in both training and eval modes. Default: ``False``
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples::
>>> # Without Learnable Parameters
>>> m = nn.InstanceNorm3d(100)
>>> # With Learnable Parameters
>>> m = nn.InstanceNorm3d(100, affine=True)
>>> input = torch.randn(20, 100, 35, 45, 10)
>>> output = m(input)
.. _`Instance Normalization: The Missing Ingredient for Fast Stylization`:
https://arxiv.org/abs/1607.08022
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim())) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/modules/instancenorm.py | 0.916742 | 0.477615 | instancenorm.py | pypi |
import math
import torch
from torch.nn.parameter import Parameter
from .. import functional as F
from .. import init
from .module import Module
class Identity(Module):
r"""A placeholder identity operator that is argument-insensitive.
Args:
args: any argument (unused)
kwargs: any keyword argument (unused)
Examples::
>>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 20])
"""
def __init__(self, *args, **kwargs):
super(Identity, self).__init__()
def forward(self, input):
return input
class Linear(Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \text{in\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
Attributes:
weight: the learnable weights of the module of shape
:math:`(\text{out\_features}, \text{in\_features})`. The values are
initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
:math:`k = \frac{1}{\text{in\_features}}`
bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{1}{\text{in\_features}}`
Examples::
>>> m = nn.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
__constants__ = ['in_features', 'out_features']
def __init__(self, in_features, out_features, bias=True):
super(Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
return F.linear(input, self.weight, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class Bilinear(Module):
r"""Applies a bilinear transformation to the incoming data:
:math:`y = x_1 A x_2 + b`
Args:
in1_features: size of each first input sample
in2_features: size of each second input sample
out_features: size of each output sample
bias: If set to False, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input1: :math:`(N, *, H_{in1})` where :math:`H_{in1}=\text{in1\_features}` and
:math:`*` means any number of additional dimensions. All but the last dimension
of the inputs should be the same.
- Input2: :math:`(N, *, H_{in2})` where :math:`H_{in2}=\text{in2\_features}`.
- Output: :math:`(N, *, H_{out})` where :math:`H_{out}=\text{out\_features}`
and all but the last dimension are the same shape as the input.
Attributes:
weight: the learnable weights of the module of shape
:math:`(\text{out\_features}, \text{in1\_features}, \text{in2\_features})`.
The values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
:math:`k = \frac{1}{\text{in1\_features}}`
bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
:math:`k = \frac{1}{\text{in1\_features}}`
Examples::
>>> m = nn.Bilinear(20, 30, 40)
>>> input1 = torch.randn(128, 20)
>>> input2 = torch.randn(128, 30)
>>> output = m(input1, input2)
>>> print(output.size())
torch.Size([128, 40])
"""
__constants__ = ['in1_features', 'in2_features', 'out_features']
def __init__(self, in1_features, in2_features, out_features, bias=True):
super(Bilinear, self).__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in1_features, in2_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
bound = 1 / math.sqrt(self.weight.size(1))
init.uniform_(self.weight, -bound, bound)
if self.bias is not None:
init.uniform_(self.bias, -bound, bound)
def forward(self, input1, input2):
return F.bilinear(input1, input2, self.weight, self.bias)
def extra_repr(self):
return 'in1_features={}, in2_features={}, out_features={}, bias={}'.format(
self.in1_features, self.in2_features, self.out_features, self.bias is not None
)
# TODO: PartialLinear - maybe in sparse? | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/modules/linear.py | 0.90201 | 0.669617 | linear.py | pypi |
import operator
import torch
import warnings
from itertools import chain
from ..modules import Module
from .scatter_gather import scatter_kwargs, gather
from .replicate import replicate
from .parallel_apply import parallel_apply
from torch.cuda._utils import _get_device_index
def _check_balance(device_ids):
imbalance_warn = """
There is an imbalance between your GPUs. You may want to exclude GPU {} which
has less than 75% of the memory or cores of GPU {}. You can do so by setting
the device_ids argument to DataParallel, or by setting the CUDA_VISIBLE_DEVICES
environment variable."""
device_ids = list(map(lambda x: _get_device_index(x, True), device_ids))
dev_props = [torch.cuda.get_device_properties(i) for i in device_ids]
def warn_imbalance(get_prop):
values = [get_prop(props) for props in dev_props]
min_pos, min_val = min(enumerate(values), key=operator.itemgetter(1))
max_pos, max_val = max(enumerate(values), key=operator.itemgetter(1))
if min_val / max_val < 0.75:
warnings.warn(imbalance_warn.format(device_ids[min_pos], device_ids[max_pos]))
return True
return False
if warn_imbalance(lambda props: props.total_memory):
return
if warn_imbalance(lambda props: props.multi_processor_count):
return
class DataParallel(Module):
r"""Implements data parallelism at the module level.
This container parallelizes the application of the given :attr:`module` by
splitting the input across the specified devices by chunking in the batch
dimension (other objects will be copied once per device). In the forward
pass, the module is replicated on each device, and each replica handles a
portion of the input. During the backwards pass, gradients from each replica
are summed into the original module.
The batch size should be larger than the number of GPUs used.
.. warning::
It is recommended to use :class:`~torch.nn.parallel.DistributedDataParallel`,
instead of this class, to do multi-GPU training, even if there is only a single
node. See: :ref:`cuda-nn-ddp-instead` and :ref:`ddp`.
Arbitrary positional and keyword inputs are allowed to be passed into
DataParallel but some types are specially handled. tensors will be
**scattered** on dim specified (default 0). tuple, list and dict types will
be shallow copied. The other types will be shared among different threads
and can be corrupted if written to in the model's forward pass.
The parallelized :attr:`module` must have its parameters and buffers on
``device_ids[0]`` before running this :class:`~torch.nn.DataParallel`
module.
.. warning::
In each forward, :attr:`module` is **replicated** on each device, so any
updates to the running module in ``forward`` will be lost. For example,
if :attr:`module` has a counter attribute that is incremented in each
``forward``, it will always stay at the initial value because the update
is done on the replicas which are destroyed after ``forward``. However,
:class:`~torch.nn.DataParallel` guarantees that the replica on
``device[0]`` will have its parameters and buffers sharing storage with
the base parallelized :attr:`module`. So **in-place** updates to the
parameters or buffers on ``device[0]`` will be recorded. E.g.,
:class:`~torch.nn.BatchNorm2d` and :func:`~torch.nn.utils.spectral_norm`
rely on this behavior to update the buffers.
.. warning::
Forward and backward hooks defined on :attr:`module` and its submodules
will be invoked ``len(device_ids)`` times, each with inputs located on
a particular device. Particularly, the hooks are only guaranteed to be
executed in correct order with respect to operations on corresponding
devices. For example, it is not guaranteed that hooks set via
:meth:`~torch.nn.Module.register_forward_pre_hook` be executed before
`all` ``len(device_ids)`` :meth:`~torch.nn.Module.forward` calls, but
that each such hook be executed before the corresponding
:meth:`~torch.nn.Module.forward` call of that device.
.. warning::
When :attr:`module` returns a scalar (i.e., 0-dimensional tensor) in
:func:`forward`, this wrapper will return a vector of length equal to
number of devices used in data parallelism, containing the result from
each device.
.. note::
There is a subtlety in using the
``pack sequence -> recurrent network -> unpack sequence`` pattern in a
:class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`.
See :ref:`pack-rnn-unpack-with-data-parallelism` section in FAQ for
details.
Args:
module (Module): module to be parallelized
device_ids (list of int or torch.device): CUDA devices (default: all devices)
output_device (int or torch.device): device location of output (default: device_ids[0])
Attributes:
module (Module): the module to be parallelized
Example::
>>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
>>> output = net(input_var) # input_var can be on any device, including CPU
"""
# TODO: update notes/cuda.rst when this class handles 8+ GPUs well
def __init__(self, module, device_ids=None, output_device=None, dim=0):
super(DataParallel, self).__init__()
if not torch.cuda.is_available():
self.module = module
self.device_ids = []
return
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = list(map(lambda x: _get_device_index(x, True), device_ids))
self.output_device = _get_device_index(output_device, True)
self.src_device_obj = torch.device("cuda:{}".format(self.device_ids[0]))
_check_balance(self.device_ids)
if len(self.device_ids) == 1:
self.module.cuda(device_ids[0])
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
for t in chain(self.module.parameters(), self.module.buffers()):
if t.device != self.src_device_obj:
raise RuntimeError("module must have its parameters and buffers "
"on device {} (device_ids[0]) but found one of "
"them on device: {}".format(self.src_device_obj, t.device))
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
def replicate(self, module, device_ids):
return replicate(module, device_ids, not torch.is_grad_enabled())
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module (Module): the module to evaluate in parallel
inputs (Tensor): inputs to the module
device_ids (list of int or torch.device): GPU ids on which to replicate module
output_device (list of int or torch.device): GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Tensor containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,)
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
device_ids = list(map(lambda x: _get_device_index(x, True), device_ids))
output_device = _get_device_index(output_device, True)
src_device_obj = torch.device("cuda:{}".format(device_ids[0]))
for t in chain(module.parameters(), module.buffers()):
if t.device != src_device_obj:
raise RuntimeError("module must have its parameters and buffers "
"on device {} (device_ids[0]) but found one of "
"them on device: {}".format(src_device_obj, t.device))
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[:len(inputs)]
replicas = replicate(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/parallel/data_parallel.py | 0.800107 | 0.373076 | data_parallel.py | pypi |
import torch
from ._functions import Scatter, Gather
def scatter(inputs, target_gpus, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
return Scatter.apply(target_gpus, None, dim, obj)
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
res = scatter_map(inputs)
finally:
scatter_map = None
return res
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
def gather(outputs, target_device, dim=0):
r"""
Gathers tensors from different GPUs on a specified device
(-1 means the CPU).
"""
def gather_map(outputs):
out = outputs[0]
if isinstance(out, torch.Tensor):
return Gather.apply(target_device, dim, *outputs)
if out is None:
return None
if isinstance(out, dict):
if not all((len(out) == len(d) for d in outputs)):
raise ValueError('All dicts must have the same number of keys')
return type(out)(((k, gather_map([d[k] for d in outputs]))
for k in out))
return type(out)(map(gather_map, zip(*outputs)))
# Recursive function calls like this create reference cycles.
# Setting the function to None clears the refcycle.
try:
res = gather_map(outputs)
finally:
gather_map = None
return res | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/parallel/scatter_gather.py | 0.749546 | 0.566738 | scatter_gather.py | pypi |
import warnings
import torch
import torch.cuda.comm as comm
from torch.autograd import Function
from torch.cuda._utils import _get_device_index
class Broadcast(Function):
@staticmethod
def forward(ctx, target_gpus, *inputs):
if not all(input.is_cuda for input in inputs):
raise TypeError('Broadcast function not implemented for CPU tensors')
target_gpus = list(map(lambda x: _get_device_index(x, True), target_gpus))
ctx.target_gpus = target_gpus
if len(inputs) == 0:
return tuple()
ctx.num_inputs = len(inputs)
ctx.input_device = inputs[0].get_device()
outputs = comm.broadcast_coalesced(inputs, ctx.target_gpus)
non_differentiables = []
for idx, input_requires_grad in enumerate(ctx.needs_input_grad[1:]):
if not input_requires_grad:
for output in outputs:
non_differentiables.append(output[idx])
ctx.mark_non_differentiable(*non_differentiables)
return tuple([t for tensors in outputs for t in tensors])
@staticmethod
def backward(ctx, *grad_outputs):
return (None,) + ReduceAddCoalesced.apply(ctx.input_device, ctx.num_inputs, *grad_outputs)
class ReduceAddCoalesced(Function):
@staticmethod
def forward(ctx, destination, num_inputs, *grads):
ctx.target_gpus = [grads[i].get_device() for i in range(0, len(grads), num_inputs)]
grads = [grads[i:i + num_inputs]
for i in range(0, len(grads), num_inputs)]
return comm.reduce_add_coalesced(grads, destination)
@staticmethod
def backward(ctx, *grad_outputs):
return (None, None,) + Broadcast.apply(ctx.target_gpus, *grad_outputs)
class Gather(Function):
@staticmethod
def forward(ctx, target_device, dim, *inputs):
assert all(map(lambda i: i.is_cuda, inputs))
target_device = _get_device_index(target_device, True)
ctx.target_device = target_device
ctx.dim = dim
ctx.input_gpus = tuple(map(lambda i: i.get_device(), inputs))
if all(t.dim() == 0 for t in inputs) and dim == 0:
inputs = tuple(t.view(1) for t in inputs)
warnings.warn('Was asked to gather along dimension 0, but all '
'input tensors were scalars; will instead unsqueeze '
'and return a vector.')
ctx.unsqueezed_scalar = True
else:
ctx.unsqueezed_scalar = False
ctx.input_sizes = tuple(map(lambda i: i.size(ctx.dim), inputs))
return comm.gather(inputs, ctx.dim, ctx.target_device)
@staticmethod
def backward(ctx, grad_output):
scattered_grads = Scatter.apply(ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output)
if ctx.unsqueezed_scalar:
scattered_grads = tuple(g[0] for g in scattered_grads)
return (None, None) + scattered_grads
class Scatter(Function):
@staticmethod
def forward(ctx, target_gpus, chunk_sizes, dim, input):
target_gpus = list(map(lambda x: _get_device_index(x, True), target_gpus))
ctx.dim = dim
ctx.input_device = input.get_device() if input.is_cuda else -1
streams = None
if ctx.input_device == -1:
# Perform CPU to GPU copies in a background stream
streams = [_get_stream(device) for device in target_gpus]
outputs = comm.scatter(input, target_gpus, chunk_sizes, ctx.dim, streams)
# Synchronize with the copy stream
if streams is not None:
for i, output in enumerate(outputs):
with torch.cuda.device(target_gpus[i]):
main_stream = torch.cuda.current_stream()
main_stream.wait_stream(streams[i])
output.record_stream(main_stream)
return outputs
@staticmethod
def backward(ctx, *grad_output):
return None, None, None, Gather.apply(ctx.input_device, ctx.dim, *grad_output)
# background streams used for copying
_streams = None
def _get_stream(device):
"""Gets a background stream for copying between CPU and GPU"""
global _streams
if device == -1:
return None
if _streams is None:
_streams = [None] * torch.cuda.device_count()
if _streams[device] is None:
_streams[device] = torch.cuda.Stream(device)
return _streams[device] | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/parallel/_functions.py | 0.793866 | 0.378545 | _functions.py | pypi |
import threading
import torch
from torch.cuda._utils import _get_device_index
from torch._utils import ExceptionWrapper
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if isinstance(obj, list) or isinstance(obj, tuple):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
def parallel_apply(modules, inputs, kwargs_tup=None, devices=None):
r"""Applies each `module` in :attr:`modules` in parallel on arguments
contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
on each of :attr:`devices`.
Args:
modules (Module): modules to be parallelized
inputs (tensor): inputs to the modules
devices (list of int or torch.device): CUDA devices
:attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
:attr:`devices` (if given) should all have same length. Moreover, each
element of :attr:`inputs` can either be a single object as the only argument
to a module, or a collection of positional arguments.
"""
assert len(modules) == len(inputs)
if kwargs_tup is not None:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
devices = list(map(lambda x: _get_device_index(x, True), devices))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input, kwargs, device=None):
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
output = module(*input, **kwargs)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(
where="in replica {} on device {}".format(i, device))
if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, kwargs, device))
for i, (module, input, kwargs, device) in
enumerate(zip(modules, inputs, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/parallel/parallel_apply.py | 0.763131 | 0.39161 | parallel_apply.py | pypi |
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from torch.nn import Conv2d, Conv3d, ReLU, Linear, BatchNorm2d
class ConvReLU2d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 2d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv2d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super(ConvReLU2d, self).__init__(conv, relu)
class ConvReLU3d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 3d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv3d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super(ConvReLU3d, self).__init__(conv, relu)
class LinearReLU(torch.nn.Sequential):
r"""This is a sequential container which calls the Linear and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, linear, relu):
assert type(linear) == Linear and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(linear), type(relu))
super(LinearReLU, self).__init__(linear, relu)
class ConvBn2d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 2d and Batch Norm 2d modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn):
assert type(conv) == Conv2d and type(bn) == BatchNorm2d, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(bn))
super(ConvBn2d, self).__init__(conv, bn)
class ConvBnReLU2d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 2d, Batch Norm 2d, and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn, relu):
assert type(conv) == Conv2d and type(bn) == BatchNorm2d and \
type(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \
.format(type(conv), type(bn), type(relu))
super(ConvBnReLU2d, self).__init__(conv, bn, relu) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/intrinsic/modules/fused.py | 0.951085 | 0.667485 | fused.py | pypi |
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.intrinsic
import torch.nn.intrinsic.qat
import torch.nn.quantized as nnq
from torch.nn.utils import fuse_conv_bn_weights
class ConvReLU2d(nnq.Conv2d):
r"""
A ConvReLU2d module is a fused module of Conv2d and ReLU
We adopt the same interface as :class:`torch.nn.quantized.Conv2d`.
Attributes:
Same as torch.nn.quantized.Conv2d
"""
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU2d
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros'):
super(ConvReLU2d, self).__init__(
in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias,
padding_mode=padding_mode)
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
return torch.ops.quantized.conv2d_relu(
input, self._packed_params, self.stride, self.padding,
self.dilation, self.groups, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedConvReLU2d'
@classmethod
def from_float(cls, mod):
if type(mod) == torch.nn.intrinsic.qat.ConvBnReLU2d:
mod.weight, mod.bias = fuse_conv_bn_weights(
mod.weight, mod.bias, mod.running_mean, mod.running_var,
mod.eps, mod.gamma, mod.beta)
return super(ConvReLU2d, cls).from_float(mod)
class ConvReLU3d(nnq.Conv3d):
r"""
A ConvReLU3d module is a fused module of Conv3d and ReLU
We adopt the same interface as :class:`torch.nn.quantized.Conv3d`.
.. note::
Attributes: Same as torch.nn.quantized.Conv3d
"""
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU3d
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros'):
super(ConvReLU3d, self).__init__(
in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias,
padding_mode=padding_mode)
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 5:
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
return torch.ops.quantized.conv3d_relu(
input, self._packed_params, self.stride, self.padding,
self.dilation, self.groups, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedConvReLU3d'
@classmethod
def from_float(cls, mod):
# TODO: Add qat support for ConvReLU3d and ConvBnReLU3d
return super(ConvReLU3d, cls).from_float(mod) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/intrinsic/quantized/modules/conv_relu.py | 0.918713 | 0.498169 | conv_relu.py | pypi |
import torch
def convert_conv2d_weight_memory_format(module, memory_format):
r"""Convert ``memory_format`` of ``nn.Conv2d.weight`` to ``memory_format``
The conversion recursively applies to nested ``nn.Module``, including ``module``.
Note that it only changes the memory_format, but not the semantics of each dimensions.
This function is used to facilitate the computation to adopt NHWC kernels, which
provides considerable speed up for fp16 data on CUDA devices with compute capability >= 7.0
.. note::
Calling ``model.to(memory_format=torch.channels_last)`` is more aggressive
than the utility function ``convert_conv2d_weight_memory_format``. Any
layer with 4d weight will be affected by ``model.to``, which does not
necessarily benefit from conversion to specified ``memory_format``.
One place we are confident in is that NHWC(channels_last) conversion for
convolution in cuDNN, As it is beneficial to run convolution in NHWC,
even in cases where we have to apply permutation to input tensors.
Hence our strategy here is to convert only the weight of convolution to
channels_last. This ensures that;
1. Fast convolution kernels will be used, the benefit of which could
outweigh overhead of permutation (if input is not in the same format)
2. No unnecessary permutations are applied on layers that do not benefit
from memory_format conversion.
The optimal case is that, layers between convolution layers are channels
last compatible. Input tensor would be permuted to channels last when it
encounters the first convolution layer and stay in that memory format.
Hence following convolutions will not need to permute its input tensor.
In case where a channels last incompatible layer is between convolution
layers, we need to permute the input tensor back to contiguous format
for that layer. The input tensor will go through the remaining layers in
contiguous format and be permuted to channels last when it encounters
another convolution layer. There's no point in propagating that
permutation to an earlier layer, as most layers are quite agnostic to
``memory_format``.
This claim might change when PyTorch supports fusion of permutation, as
there might have been a better spot to fuse the permutation other than
immediately before a convolution.
Args:
module (nn.Module): ``nn.Conv2d`` & ``nn.ConvTranspose2d`` or container
``nn.Module``
format: user specified ``memory_format``,
e.g. ``torch.channels_last`` or ``torch.contiguous_format``
Returns:
The original module with updated ``nn.Conv2d``
Example:
>>> input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float16, device="cuda")
>>> model = nn.Sequential(
>>> nn.Conv2d(8, 4, 3)).cuda().half()
>>> # This is identical to:
>>> # nn.utils.convert_conv2d_weight_memory_format(model, torch.channels_last)
>>> model = nn.utils.convert_conv2d_weight_memory_format(model, torch.channels_last)
>>> out = model(input)
"""
# TODO: expand this to `_ConvNd` when channels_last support is extended
# beyond only 4d tensors.
if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.ConvTranspose2d):
weight_data = module.weight.detach().clone().contiguous(memory_format=memory_format)
module.weight.data = weight_data.resize_(weight_data.size(), memory_format=memory_format)
for child in module.children():
convert_conv2d_weight_memory_format(child, memory_format)
return module | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/utils/memory_format.py | 0.9089 | 0.823115 | memory_format.py | pypi |
r"""
Weight Normalization from https://arxiv.org/abs/1602.07868
"""
from torch.nn.parameter import Parameter
from torch import _weight_norm, norm_except_dim
class WeightNorm(object):
def __init__(self, name, dim):
if dim is None:
dim = -1
self.name = name
self.dim = dim
def compute_weight(self, module):
g = getattr(module, self.name + '_g')
v = getattr(module, self.name + '_v')
return _weight_norm(v, g, self.dim)
@staticmethod
def apply(module, name, dim):
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, WeightNorm) and hook.name == name:
raise RuntimeError("Cannot register two weight_norm hooks on "
"the same parameter {}".format(name))
if dim is None:
dim = -1
fn = WeightNorm(name, dim)
weight = getattr(module, name)
# remove w from parameter list
del module._parameters[name]
# add g and v as new parameters and express w as g/||v|| * v
module.register_parameter(name + '_g', Parameter(norm_except_dim(weight, 2, dim).data))
module.register_parameter(name + '_v', Parameter(weight.data))
setattr(module, name, fn.compute_weight(module))
# recompute weight before every forward()
module.register_forward_pre_hook(fn)
return fn
def remove(self, module):
weight = self.compute_weight(module)
delattr(module, self.name)
del module._parameters[self.name + '_g']
del module._parameters[self.name + '_v']
module.register_parameter(self.name, Parameter(weight.data))
def __call__(self, module, inputs):
setattr(module, self.name, self.compute_weight(module))
def weight_norm(module, name='weight', dim=0):
r"""Applies weight normalization to a parameter in the given module.
.. math::
\mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|}
Weight normalization is a reparameterization that decouples the magnitude
of a weight tensor from its direction. This replaces the parameter specified
by :attr:`name` (e.g. ``'weight'``) with two parameters: one specifying the magnitude
(e.g. ``'weight_g'``) and one specifying the direction (e.g. ``'weight_v'``).
Weight normalization is implemented via a hook that recomputes the weight
tensor from the magnitude and direction before every :meth:`~Module.forward`
call.
By default, with ``dim=0``, the norm is computed independently per output
channel/plane. To compute a norm over the entire weight tensor, use
``dim=None``.
See https://arxiv.org/abs/1602.07868
Args:
module (Module): containing module
name (str, optional): name of weight parameter
dim (int, optional): dimension over which to compute the norm
Returns:
The original module with the weight norm hook
Example::
>>> m = weight_norm(nn.Linear(20, 40), name='weight')
>>> m
Linear(in_features=20, out_features=40, bias=True)
>>> m.weight_g.size()
torch.Size([40, 1])
>>> m.weight_v.size()
torch.Size([40, 20])
"""
WeightNorm.apply(module, name, dim)
return module
def remove_weight_norm(module, name='weight'):
r"""Removes the weight normalization reparameterization from a module.
Args:
module (Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = weight_norm(nn.Linear(20, 40))
>>> remove_weight_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, WeightNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("weight_norm of '{}' not found in {}"
.format(name, module)) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/utils/weight_norm.py | 0.954148 | 0.603406 | weight_norm.py | pypi |
import warnings
import torch
from torch._six import inf
def clip_grad_norm_(parameters, max_norm, norm_type=2):
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max() for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type) for p in parameters]), norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
p.grad.detach().mul_(clip_coef)
return total_norm
def clip_grad_norm(parameters, max_norm, norm_type=2):
r"""Clips gradient norm of an iterable of parameters.
.. warning::
This method is now deprecated in favor of
:func:`torch.nn.utils.clip_grad_norm_`.
"""
warnings.warn("torch.nn.utils.clip_grad_norm is now deprecated in favor "
"of torch.nn.utils.clip_grad_norm_.", stacklevel=2)
return clip_grad_norm_(parameters, max_norm, norm_type)
def clip_grad_value_(parameters, clip_value):
r"""Clips gradient of an iterable of parameters at specified value.
Gradients are modified in-place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
clip_value (float or int): maximum allowed value of the gradients.
The gradients are clipped in the range
:math:`\left[\text{-clip\_value}, \text{clip\_value}\right]`
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
clip_value = float(clip_value)
for p in filter(lambda p: p.grad is not None, parameters):
p.grad.data.clamp_(min=-clip_value, max=clip_value) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/utils/clip_grad.py | 0.906323 | 0.675336 | clip_grad.py | pypi |
import torch
def parameters_to_vector(parameters):
r"""Convert parameters to one vector
Arguments:
parameters (Iterable[Tensor]): an iterator of Tensors that are the
parameters of a model.
Returns:
The parameters represented by a single vector
"""
# Flag for the device where the parameter is located
param_device = None
vec = []
for param in parameters:
# Ensure the parameters are located in the same device
param_device = _check_param_device(param, param_device)
vec.append(param.view(-1))
return torch.cat(vec)
def vector_to_parameters(vec, parameters):
r"""Convert one vector to the parameters
Arguments:
vec (Tensor): a single vector represents the parameters of a model.
parameters (Iterable[Tensor]): an iterator of Tensors that are the
parameters of a model.
"""
# Ensure vec of type Tensor
if not isinstance(vec, torch.Tensor):
raise TypeError('expected torch.Tensor, but got: {}'
.format(torch.typename(vec)))
# Flag for the device where the parameter is located
param_device = None
# Pointer for slicing the vector for each parameter
pointer = 0
for param in parameters:
# Ensure the parameters are located in the same device
param_device = _check_param_device(param, param_device)
# The length of the parameter
num_param = param.numel()
# Slice the vector, reshape it, and replace the old data of the parameter
param.data = vec[pointer:pointer + num_param].view_as(param).data
# Increment the pointer
pointer += num_param
def _check_param_device(param, old_param_device):
r"""This helper function is to check if the parameters are located
in the same device. Currently, the conversion between model parameters
and single vector form is not supported for multiple allocations,
e.g. parameters in different GPUs, or mixture of CPU/GPU.
Arguments:
param ([Tensor]): a Tensor of a parameter of a model
old_param_device (int): the device where the first parameter of a
model is allocated.
Returns:
old_param_device (int): report device for the first time
"""
# Meet the first parameter
if old_param_device is None:
old_param_device = param.get_device() if param.is_cuda else -1
else:
warn = False
if param.is_cuda: # Check if in same GPU
warn = (param.get_device() != old_param_device)
else: # Check if in CPU
warn = (old_param_device != -1)
if warn:
raise TypeError('Found two parameters on different devices, '
'this is currently not supported.')
return old_param_device | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/utils/convert_parameters.py | 0.896863 | 0.888178 | convert_parameters.py | pypi |
from __future__ import absolute_import, division, print_function, unicode_literals
import torch.nn as nn
from torch.nn.intrinsic import ConvReLU2d
class Conv2d(nn.Conv2d):
r"""
A Conv2d module attached with FakeQuantize modules for both output
activation and weight, used for quantization aware training.
We adopt the same interface as `torch.nn.Conv2d`, please see
https://pytorch.org/docs/stable/nn.html?highlight=conv2d#torch.nn.Conv2d
for documentation.
Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to
default.
Attributes:
activation_post_process: fake quant module for output activation
weight_fake_quant: fake quant module for weight
"""
_FLOAT_MODULE = nn.Conv2d
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros', qconfig=None):
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size,
stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias, padding_mode=padding_mode)
assert qconfig, 'qconfig must be provided for QAT module'
self.qconfig = qconfig
self.activation_post_process = qconfig.activation()
self.weight_fake_quant = qconfig.weight()
def forward(self, input):
return self.activation_post_process(
self._conv_forward(input, self.weight_fake_quant(self.weight)))
@classmethod
def from_float(cls, mod, qconfig=None):
r"""Create a qat module from a float module or qparams_dict
Args: `mod` a float module, either produced by torch.quantization utilities
or directly from user
"""
assert type(mod) == cls._FLOAT_MODULE, 'qat.' + cls.__name__ + '.from_float only works for ' + \
cls._FLOAT_MODULE.__name__
if not qconfig:
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
assert mod.qconfig, 'Input float module must have a valid qconfig'
if type(mod) == ConvReLU2d:
mod = mod[0]
qconfig = mod.qconfig
qat_conv = cls(mod.in_channels, mod.out_channels, mod.kernel_size,
stride=mod.stride, padding=mod.padding, dilation=mod.dilation,
groups=mod.groups, bias=mod.bias is not None,
padding_mode=mod.padding_mode, qconfig=qconfig)
qat_conv.weight = mod.weight
qat_conv.bias = mod.bias
return qat_conv | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/qat/modules/conv.py | 0.954953 | 0.478285 | conv.py | pypi |
from __future__ import absolute_import, division, print_function, unicode_literals
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.intrinsic import LinearReLU
class Linear(nn.Linear):
r"""
A linear module attached with FakeQuantize modules for both output
activation and weight, used for quantization aware training.
We adopt the same interface as `torch.nn.Linear`, please see
https://pytorch.org/docs/stable/nn.html#torch.nn.Linear
for documentation.
Similar to `torch.nn.Linear`, with FakeQuantize modules initialized to
default.
Attributes:
activation_post_process: fake quant module for output activation
weight: fake quant module for weight
"""
_FLOAT_MODULE = nn.Linear
def __init__(self, in_features, out_features, bias=True,
qconfig=None):
super(Linear, self).__init__(in_features, out_features, bias)
assert qconfig, 'qconfig must be provided for QAT module'
self.qconfig = qconfig
self.activation_post_process = qconfig.activation()
self.weight_fake_quant = qconfig.weight()
def forward(self, input):
return self.activation_post_process(
F.linear(input, self.weight_fake_quant(self.weight), self.bias))
@classmethod
def from_float(cls, mod, qconfig=None):
r"""Create a qat module from a float module or qparams_dict
Args: `mod` a float module, either produced by torch.quantization utilities
or directly from user
"""
assert type(mod) == cls._FLOAT_MODULE, ' qat.' + cls.__name__ + '.from_float only works for ' + \
cls._FLOAT_MODULE.__name__
if not qconfig:
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
assert mod.qconfig, 'Input float module must have a valid qconfig'
if type(mod) == LinearReLU:
mod = mod[0]
qconfig = mod.qconfig
qat_linear = cls(mod.in_features, mod.out_features, bias=mod.bias is not None, qconfig=qconfig)
qat_linear.weight = mod.weight
qat_linear.bias = mod.bias
return qat_linear | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/qat/modules/linear.py | 0.945052 | 0.491883 | linear.py | pypi |
r""" Functional interface (quantized)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
from torch._jit_internal import List as _List
from torch.nn.modules.utils import _pair, _triple
# Although some of the functions and docstrings are mirrored from the torch.nn,
# we want to have them here for future changes.
def avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None):
r"""
Applies 2D average-pooling operation in :math:`kH \times kW` regions by step size
:math:`sH \times sW` steps. The number of output features is equal to the number of
input planes.
.. note:: The input quantization parameters propagate to the output.
See :class:`~torch.nn.quantized.AvgPool2d` for details and output shape.
Args:
input: quantized input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
kernel_size: size of the pooling region. Can be a single number or a
tuple `(kH, kW)`
stride: stride of the pooling operation. Can be a single number or a
tuple `(sH, sW)`. Default: :attr:`kernel_size`
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padH, padW)`. Default: 0
ceil_mode: when True, will use `ceil` instead of `floor` in the formula
to compute the output shape. Default: ``False``
count_include_pad: when True, will include the zero-padding in the
averaging calculation. Default: ``True``
divisor_override: if specified, it will be used as divisor, otherwise
size of the pooling region will be used. Default: None
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.avg_pool2d' must be quantized!")
return torch.nn.functional.avg_pool2d(input, kernel_size, stride, padding,
ceil_mode, count_include_pad,
divisor_override)
def avg_pool3d(input, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None):
r"""
Applies 3D average-pooling operation in :math:`kD \ times kH \times kW` regions by step size
:math:`sD \times sH \times sW` steps. The number of output features is equal to the number of
input planes.
.. note:: The input quantization parameters propagate to the output.
Args:
input: quantized input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
kernel_size: size of the pooling region. Can be a single number or a
tuple `(kD, kH, kW)`
stride: stride of the pooling operation. Can be a single number or a
tuple `(sD, sH, sW)`. Default: :attr:`kernel_size`
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padD, padH, padW)`. Default: 0
ceil_mode: when True, will use `ceil` instead of `floor` in the formula
to compute the output shape. Default: ``False``
count_include_pad: when True, will include the zero-padding in the
averaging calculation. Default: ``True``
divisor_override: if specified, it will be used as divisor, otherwise
size of the pooling region will be used. Default: None
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.avg_pool3d' must be quantized!")
return torch.nn.functional.avg_pool3d(input, kernel_size, stride, padding,
ceil_mode, count_include_pad,
divisor_override)
def adaptive_avg_pool2d(input, output_size):
# type: (Tensor, BroadcastingList2[int]) -> Tensor
r"""
Applies a 2D adaptive average pooling over a quantized input signal composed
of several quantized input planes.
.. note:: The input quantization paramteres propagate to the output.
See :class:`~torch.nn.quantized.AdaptiveAvgPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or
double-integer tuple)
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.adaptive_avg_pool2d' must be quantized!")
return torch.nn.functional.adaptive_avg_pool2d(input, output_size)
def conv2d(input, weight, bias,
stride=1, padding=0, dilation=1, groups=1,
padding_mode='zeros',
scale=1.0, zero_point=0,
dtype=torch.quint8):
r"""
Applies a 2D convolution over a quantized 2D input composed of several input
planes.
See :class:`~torch.nn.quantized.Conv2d` for details and output shape.
Args:
input: quantized input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
weight: quantized filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW)`
bias: **non-quantized** bias tensor of shape :math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sH, sW)`. Default: 1
padding: implicit paddings on both sides of the input. Can be a
single number or a tuple `(padH, padW)`. Default: 0
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dH, dW)`. Default: 1
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
number of groups. Default: 1
padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"
scale: quantization scale for the output. Default: 1.0
zero_point: quantization zero_point for the output. Default: 0
dtype: quantization data type to use. Default: ``torch.quint8``
Examples::
>>> from torch.nn.quantized import functional as qF
>>> filters = torch.randn(8, 4, 3, 3, dtype=torch.float)
>>> inputs = torch.randn(1, 4, 5, 5, dtype=torch.float)
>>> bias = torch.randn(8, dtype=torch.float)
>>>
>>> scale, zero_point = 1.0, 0
>>> dtype_inputs = torch.quint8
>>> dtype_filters = torch.qint8
>>>
>>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
>>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
>>> qF.conv2d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
""" # noqa: E501
if padding_mode != 'zeros':
raise NotImplementedError("Only zero-padding is supported!")
if input.dtype != torch.quint8:
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
if weight.dtype != torch.qint8:
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
if input.ndim != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
prepacked_weight = torch.ops.quantized.conv2d_prepack(
weight, bias, stride, padding, dilation, groups)
return torch.ops.quantized.conv2d(input,
prepacked_weight,
stride, padding, dilation,
groups, scale, zero_point)
def conv3d(input, weight, bias, stride=1, padding=0, dilation=1, groups=1,
padding_mode='zeros', scale=1.0, zero_point=0, dtype=torch.quint8):
r"""
Applies a 3D convolution over a quantized 3D input composed of several input
planes.
See :class:`~torch.nn.quantized.Conv3d` for details and output shape.
Args:
input: quantized input tensor of shape
:math:`(\text{minibatch} , \text{in\_channels} , iD , iH , iW)`
weight: quantized filters of shape
:math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kD , kH , kW)`
bias: **non-quantized** bias tensor of shape
:math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sD, sH, sW)`. Default: 1
padding: implicit paddings on both sides of the input. Can be a
single number or a tuple `(padD, padH, padW)`. Default: 0
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dD, dH, dW)`. Default: 1
groups: split input into groups, :math:`\text{in\_channels}` should be
divisible by the number of groups. Default: 1
padding_mode: the padding mode to use. Only "zeros" is supported for
quantized convolution at the moment. Default: "zeros"
scale: quantization scale for the output. Default: 1.0
zero_point: quantization zero_point for the output. Default: 0
dtype: quantization data type to use. Default: ``torch.quint8``
Examples::
>>> from torch.nn.quantized import functional as qF
>>> filters = torch.randn(8, 4, 3, 3, 3, dtype=torch.float)
>>> inputs = torch.randn(1, 4, 5, 5, 5, dtype=torch.float)
>>> bias = torch.randn(8, dtype=torch.float)
>>>
>>> scale, zero_point = 1.0, 0
>>> dtype_inputs = torch.quint8
>>> dtype_filters = torch.qint8
>>>
>>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
>>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
>>> qF.conv3d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
""" # noqa: E501
if padding_mode != 'zeros':
raise NotImplementedError("Only zero-padding is supported!")
if input.dtype != torch.quint8:
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
if weight.dtype != torch.qint8:
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
if input.ndim != 5:
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
prepacked_weight = torch.ops.quantized.conv3d_prepack(
weight, bias, stride, padding, dilation, groups)
return torch.ops.quantized.conv3d(
input, prepacked_weight, stride, padding, dilation, groups, scale,
zero_point)
def interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=None):
r"""Down/up samples the input to either the given :attr:`size` or the given
:attr:`scale_factor`
See :func:`torch.nn.functional.interpolate` for implementation details.
The input dimensions are interpreted in the form:
`mini-batch x channels x [optional depth] x [optional height] x width`.
.. note:: The input quantization parameters propagate to the output.
.. note:: Only 2D/3D input is supported for quantized inputs
.. note:: Only the following modes are supported for the quantized inputs:
- `bilinear`
- `nearest`
Args:
input (Tensor): the input tensor
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
output spatial size.
scale_factor (float or Tuple[float]): multiplier for spatial size. Has to match input size if it is a tuple.
mode (str): algorithm used for upsampling:
``'nearest'`` | ``'bilinear'``
align_corners (bool, optional): Geometrically, we consider the pixels of the
input and output as squares rather than points.
If set to ``True``, the input and output tensors are aligned by the
center points of their corner pixels, preserving the values at the corner pixels.
If set to ``False``, the input and output tensors are aligned by the corner
points of their corner pixels, and the interpolation uses edge value padding
for out-of-boundary values, making this operation *independent* of input size
when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
is ``'bilinear'``.
Default: ``False``
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.interpolate' must be quantized!")
return torch.nn.functional.interpolate(input, size, scale_factor, mode,
align_corners)
def linear(input, weight, bias=None, scale=None, zero_point=None):
# type: (Tensor, Tensor, Optional[Tensor], Optional[float], Optional[int]) -> Tensor
r"""
Applies a linear transformation to the incoming quantized data:
:math:`y = xA^T + b`.
See :class:`~torch.nn.quantized.Linear`
.. note::
Current implementation packs weights on every call, which has penalty on performance.
If you want to avoid the overhead, use :class:`~torch.nn.quantized.Linear`.
Args:
input (Tensor): Quantized input of type `torch.quint8`
weight (Tensor): Quantized weight of type `torch.qint8`
bias (Tensor): None or fp32 bias of type `torch.float`
scale (double): output scale. If None, derived from the input scale
zero_point (long): output zero point. If None, derived from the input zero_point
Shape:
- Input: :math:`(N, *, in\_features)` where `*` means any number of
additional dimensions
- Weight: :math:`(out\_features, in\_features)`
- Bias: :math:`(out\_features)`
- Output: :math:`(N, *, out\_features)`
"""
if scale is None:
scale = input.q_scale()
if zero_point is None:
zero_point = input.q_zero_point()
_packed_params = torch.ops.quantized.linear_prepack(weight, bias)
return torch.ops.quantized.linear(input, _packed_params, scale, zero_point)
def max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False):
r"""Applies a 2D max pooling over a quantized input signal composed of
several quantized input planes.
.. note:: The input quantization parameters are propagated to the output.
See :class:`~torch.nn.quantized.MaxPool2d` for details.
"""
if return_indices:
raise NotImplementedError("return_indices is not yet implemented!")
if stride is None:
stride = torch.jit.annotate(_List[int], [])
return torch.nn.functional.max_pool2d(input, kernel_size, stride, padding,
dilation, ceil_mode, return_indices)
def relu(input, inplace=False):
# type: (Tensor, bool) -> Tensor
r"""relu(input, inplace=False) -> Tensor
Applies the rectified linear unit function element-wise.
See :class:`~torch.nn.quantized.ReLU` for more details.
Args:
input: quantized input
inplace: perform the computation inplace
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.relu' must be quantized!")
if inplace:
return torch.relu_(input)
else:
return torch.relu(input)
def leaky_relu(input, negative_slope=0.01, inplace=False,
scale=None, zero_point=None):
# type: (Tensor, float, bool, float, int) -> Tensor
r"""
Quantized version of the.
leaky_relu(input, negative_slope=0.01, inplace=False, scale, zero_point) -> Tensor
Applies element-wise,
:math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)`
Args:
input: Quaintized input
negative_slope: The slope of the negative input
inplace: Inplace modification of the input tensor
scale, zero_point: Scale and zero point of thhe output tensor.
See :class:`~torch.nn.LeakyReLU` for more details.
"""
if scale is not None and zero_point is not None:
assert not inplace, "Cannot rescale with `inplace`"
output = torch.quantize_per_tensor(torch.zeros(input.shape),
scale, int(zero_point), input.dtype)
torch._C._nn.leaky_relu(input, negative_slope, out=output)
return output
if inplace:
result = torch._C._nn.leaky_relu_(input, negative_slope)
else:
result = torch._C._nn.leaky_relu(input, negative_slope)
return result
def hardtanh(input, min_val=-1., max_val=1., inplace=False):
# type: (Tensor, float, float, bool) -> Tensor
r"""
hardtanh(input, min_val=-1., max_val=1., inplace=False) -> Tensor
Applies the quantized HardTanh function element-wise, with scale and
zero-point carried over from the input tensor. See :class:`~torch.nn.Hardtanh`
for more details.
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.hardtanh' must be quantized!")
if inplace:
return torch._C._nn.hardtanh_(input, min_val, max_val)
return torch._C._nn.hardtanh(input, min_val, max_val)
def elu(input, alpha=1., inplace=False, scale=None, zero_point=None):
r"""
Applies the quantized ELU function element-wise:
.. math::
\text{ELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1))
Args:
input: quantized input
alpha: the :math:`\alpha` value for the ELU formulation. Default: 1.0
inplace: Inplace modification of the input tensor
scale, zero_point: Scale and zero point of the output tensor.
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.elu' must be quantized!")
if (scale is not None) != (zero_point is not None):
raise ValueError("Either both or none of (scale, zero_point) must be specified!")
if scale is not None and zero_point is not None:
assert not inplace, "Cannot rescale with `inplace`"
output = torch.quantize_per_tensor(torch.zeros(input.shape),
scale, int(zero_point), input.dtype)
torch._C._nn.elu(input, alpha, out=output)
return output
elif inplace:
return torch._C._nn.elu_(input, alpha)
else:
return torch._C._nn.elu(input, alpha)
def clamp(input, min_, max_):
# type: (Tensor, float, float) -> Tensor
r"""float(input, min_, max_) -> Tensor
Applies the clamp function element-wise.
See :class:`~torch.nn.quantized.clamp` for more details.
Args:
input: quantized input
min_: minimum value for clamping
max_: maximum value for clamping
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.clamp' must be quantized!")
return torch.clamp(input, min_, max_)
def upsample(input, size=None, scale_factor=None, mode='nearest', align_corners=None):
r"""Upsamples the input to either the given :attr:`size` or the given
:attr:`scale_factor`
.. warning::
This function is deprecated in favor of
:func:`torch.nn.quantized.functional.interpolate`.
This is equivalent with ``nn.quantized.functional.interpolate(...)``.
See :func:`torch.nn.functional.interpolate` for implementation details.
The input dimensions are interpreted in the form:
`mini-batch x channels x [optional depth] x [optional height] x width`.
.. note:: The input quantization parameters propagate to the output.
.. note:: Only 2D input is supported for quantized inputs
.. note:: Only the following modes are supported for the quantized inputs:
- `bilinear`
- `nearest`
Args:
input (Tensor): quantized input tensor
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
output spatial size.
scale_factor (float or Tuple[float]): multiplier for spatial size. Has to be an integer.
mode (string): algorithm used for upsampling:
``'nearest'`` | ``'bilinear'``
align_corners (bool, optional): Geometrically, we consider the pixels of the
input and output as squares rather than points.
If set to ``True``, the input and output tensors are aligned by the
center points of their corner pixels, preserving the values at the corner pixels.
If set to ``False``, the input and output tensors are aligned by the corner
points of their corner pixels, and the interpolation uses edge value padding
for out-of-boundary values, making this operation *independent* of input size
when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
is ``'bilinear'``.
Default: ``False``
.. warning::
With ``align_corners = True``, the linearly interpolating modes
(`bilinear`) don't proportionally align the
output and input pixels, and thus the output values can depend on the
input size. This was the default behavior for these modes up to version
0.3.1. Since then, the default behavior is ``align_corners = False``.
See :class:`~torch.nn.Upsample` for concrete examples on how this
affects the outputs.
"""
warnings.warn("nn.quantized.functional.upsample is deprecated. Use nn.quantized.functional.interpolate instead.")
return interpolate(input, size, scale_factor, mode, align_corners)
def upsample_bilinear(input, size=None, scale_factor=None):
r"""Upsamples the input, using bilinear upsampling.
.. warning::
This function is deprecated in favor of
:func:`torch.nn.quantized.functional.interpolate`.
This is equivalent with
``nn.quantized.functional.interpolate(..., mode='bilinear', align_corners=True)``.
.. note:: The input quantization parameters propagate to the output.
.. note:: Only 2D inputs are supported
Args:
input (Tensor): quantized input
size (int or Tuple[int, int]): output spatial size.
scale_factor (int or Tuple[int, int]): multiplier for spatial size
"""
# DeprecationWarning is ignored by default
warnings.warn("nn.quantized.functional.upsample_bilinear is deprecated. Use nn.quantized.functional.interpolate instead.")
return interpolate(input, size, scale_factor, mode='bilinear', align_corners=True)
def upsample_nearest(input, size=None, scale_factor=None):
r"""Upsamples the input, using nearest neighbours' pixel values.
.. warning::
This function is deprecated in favor of
:func:`torch.nn.quantized.functional.interpolate`.
This is equivalent with ``nn.quantized.functional.interpolate(..., mode='nearest')``.
.. note:: The input quantization parameters propagate to the output.
.. note:: Only 2D inputs are supported
Args:
input (Tensor): quantized input
size (int or Tuple[int, int] or Tuple[int, int, int]): output spatial
size.
scale_factor (int): multiplier for spatial size. Has to be an integer.
"""
# DeprecationWarning is ignored by default
warnings.warn("nn.quantized.functional.upsample_nearest is deprecated. Use nn.quantized.functional.interpolate instead.")
return interpolate(input, size, scale_factor, mode='nearest') | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/quantized/functional.py | 0.970736 | 0.600686 | functional.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn.quantized.functional
class ReLU(torch.nn.ReLU):
r"""Applies quantized rectified linear unit function element-wise:
:math:`\text{ReLU}(x)= \max(x_0, x)`, where :math:`x_0` is the zero point.
Please see https://pytorch.org/docs/stable/nn.html#torch.nn.ReLU
for more documentation on ReLU.
Args:
inplace: (Currently not supported) can optionally do the operation in-place.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> m = nn.quantized.ReLU()
>>> input = torch.randn(2)
>>> input = torch.quantize_per_tensor(input, 1.0, 0, dtype=torch.qint32)
>>> output = m(input)
"""
def __init__(self, inplace=False):
super(ReLU, self).__init__(inplace)
self.inplace = inplace
def forward(self, input):
return torch.nn.quantized.functional.relu(input, inplace=self.inplace)
def _get_name(self):
return 'QuantizedReLU'
@staticmethod
def from_float(mod):
return ReLU(mod.inplace)
class ReLU6(torch.nn.ReLU):
r"""Applies the element-wise function:
:math:`\text{ReLU6}(x) = \min(\max(x_0, x), q(6))`, where :math:`x_0` is the
zero_point, and :math:`q(6)` is the quantized representation of number 6.
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: scripts/activation_images/ReLU6.png
Examples::
>>> m = nn.quantized.ReLU6()
>>> input = torch.randn(2)
>>> input = torch.quantize_per_tensor(input, 1.0, 0, dtype=torch.qint32)
>>> output = m(input)
"""
def __init__(self, inplace=False):
super(ReLU6, self).__init__(inplace)
self.inplace = inplace
def forward(self, input):
return torch.ops.quantized.relu6(input, self.inplace)
def _get_name(self):
return 'QuantizedReLU6'
@staticmethod
def from_float(mod):
return ReLU6(mod.inplace) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/quantized/modules/activation.py | 0.947551 | 0.498962 | activation.py | pypi |
import torch
from torch._ops import ops
class FloatFunctional(torch.nn.Module):
r"""State collector class for float operatitons.
The instance of this class can be used instead of the ``torch.`` prefix for
some operations. See example usage below.
.. note::
This class does not provide a ``forward`` hook. Instead, you must use
one of the underlying functions (e.g. ``add``).
Examples::
>>> f_add = FloatFunctional()
>>> a = torch.tensor(3.0)
>>> b = torch.tensor(4.0)
>>> f_add.add(a, b) # Equivalent to ``torch.add(a, b)``
Valid operation names:
- add
- cat
- mul
- add_relu
- add_scalar
- mul_scalar
"""
def __init__(self):
super(FloatFunctional, self).__init__()
self.activation_post_process = torch.nn.Identity()
def forward(self, x):
raise RuntimeError("FloatFunctional is not intended to use the " +
"'forward'. Please use the underlying operation")
r"""Operation equivalent to ``torch.add(Tensor, Tensor)``"""
def add(self, x, y):
# type: (Tensor, Tensor) -> Tensor
r = torch.add(x, y)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.add(Tensor, float)``"""
def add_scalar(self, x, y):
# type: (Tensor, float) -> Tensor
r = torch.add(x, y)
# No observer needed for scalar add
return r
r"""Operation equivalent to ``torch.mul(Tensor, Tensor)``"""
def mul(self, x, y):
# type: (Tensor, Tensor) -> Tensor
r = torch.mul(x, y)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.mul(Tensor, float)``"""
def mul_scalar(self, x, y):
# type: (Tensor, float) -> Tensor
r = torch.mul(x, y)
# No observer needed for scalar multiply
return r
r"""Operation equivalent to ``torch.cat``"""
def cat(self, x, dim=0):
# type: (List[Tensor], int) -> Tensor
r = torch.cat(x, dim=dim)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``relu(torch.add(x,y))``"""
def add_relu(self, x, y):
# type: (Tensor, Tensor) -> Tensor
r = torch.add(x, y)
r = torch.nn.functional.relu(r)
r = self.activation_post_process(r)
return r
class QFunctional(torch.nn.Module):
r"""Wrapper class for quantized operatitons.
The instance of this class can be used instead of the
``torch.ops.quantized`` prefix. See example usage below.
.. note::
This class does not provide a ``forward`` hook. Instead, you must use
one of the underlying functions (e.g. ``add``).
Examples::
>>> q_add = QFunctional()
>>> a = torch.quantize_per_tensor(torch.tensor(3.0), 1.0, 0, torch.qint32)
>>> b = torch.quantize_per_tensor(torch.tensor(4.0), 1.0, 0, torch.qint32)
>>> q_add.add(a, b) # Equivalent to ``torch.ops.quantized.add(a, b, 1.0, 0)``
Valid operation names:
- add
- cat
- mul
- add_relu
- add_scalar
- mul_scalar
"""
def __init__(self):
super(QFunctional, self).__init__()
self.scale = 1.0
self.zero_point = 0
def _save_to_state_dict(self, destination, prefix, keep_vars):
super(QFunctional, self)._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + 'scale'] = torch.tensor(self.scale)
destination[prefix + 'zero_point'] = torch.tensor(self.zero_point)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
self.scale = float(state_dict.pop(prefix + 'scale'))
self.zero_point = int(state_dict.pop(prefix + 'zero_point'))
super(QFunctional, self)._load_from_state_dict(state_dict, prefix, local_metadata, False,
missing_keys, unexpected_keys, error_msgs)
def _get_name(self):
return 'QFunctional'
def extra_repr(self):
return 'scale={}, zero_point={}'.format(
self.scale, self.zero_point
)
def forward(self, x):
raise RuntimeError("Functional is not intended to use the " +
"'forward'. Please use the underlying operation")
r"""Operation equivalent to ``torch.ops.quantized.add``"""
def add(self, x, y):
# type: (Tensor, Tensor) -> Tensor
return ops.quantized.add(x, y, scale=self.scale,
zero_point=self.zero_point)
r"""Operation equivalent to ``torch.ops.quantized.add(Tensor, float)``"""
def add_scalar(self, x, y):
# type: (Tensor, float) -> Tensor
return ops.quantized.add_scalar(x, y)
r"""Operation equivalent to ``torch.ops.quantized.mul(Tensor, Tensor)``"""
def mul(self, x, y):
# type: (Tensor, Tensor) -> Tensor
return ops.quantized.mul(x, y, scale=self.scale,
zero_point=self.zero_point)
r"""Operation equivalent to ``torch.ops.quantized.mul(Tensor, float)``"""
def mul_scalar(self, x, y):
# type: (Tensor, float) -> Tensor
return ops.quantized.mul_scalar(x, y)
r"""Operation equivalent to ``torch.ops.quantized.cat``"""
def cat(self, x, dim=0):
# type: (List[Tensor], int) -> Tensor
return ops.quantized.cat(x, scale=self.scale,
zero_point=self.zero_point, dim=dim)
r"""Operation equivalent to ``torch.ops.quantized.add_relu``"""
def add_relu(self, x, y):
# type: (Tensor, Tensor) -> Tensor
return ops.quantized.add_relu(x, y, scale=self.scale,
zero_point=self.zero_point)
@classmethod
def from_float(cls, mod):
assert type(mod) == FloatFunctional,\
"QFunctional.from_float expects an instance of FloatFunctional"
scale, zero_point = mod.activation_post_process.calculate_qparams()
new_mod = QFunctional()
new_mod.scale = float(scale)
new_mod.zero_point = int(zero_point)
return new_mod | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/quantized/modules/functional_modules.py | 0.953242 | 0.727818 | functional_modules.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn.quantized.functional
class BatchNorm2d(torch.nn.BatchNorm2d):
r"""Applies Quantized Batch Normalization over a 4D input (a mini-batch of 2D inputs
with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, H, W)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> m = nn.quantized.BatchNorm2d(100)
>>> input = torch.randn(20, 100, 35, 45)
>>> quantized_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
>>> output = m(input)
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super(BatchNorm2d, self).__init__(num_features)
self.eps = eps
self.scale = 1.0
self.zero_point = 0
def forward(self, input):
return torch.ops.quantized.batch_norm(input, self.weight, self.bias, self.running_mean,
self.running_var, self.eps, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedBatchNorm2d'
@classmethod
def from_float(cls, mod):
assert type(mod) == torch.nn.BatchNorm2d,\
"QuantizedBatchNorm2d expects an instance of BatchNorm2d"
scale, zero_point = mod.activation_post_process.calculate_qparams()
new_mod = BatchNorm2d(mod.num_features, mod.eps)
new_mod.scale = float(scale)
new_mod.zero_point = int(zero_point)
return new_mod
class BatchNorm3d(torch.nn.BatchNorm3d):
r"""Applies Quantized Batch Normalization over a 5D input (a mini-batch of 3D inputs
with additional channel dimension) as described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Spatial Batch Normalization.
Args:
num_features: :math:`C` from an expected input of size
:math:`(N, C, D, H, W)`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
>>> m = nn.quantized.BatchNorm3d(100)
>>> input = torch.randn(20, 100, 25, 35, 45)
>>> quantized_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
>>> output = m(input)
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super(BatchNorm3d, self).__init__(num_features)
self.eps = eps
self.scale = 1.0
self.zero_point = 0
def forward(self, input):
return torch.ops.quantized.batch_norm3d(input, self.weight, self.bias, self.running_mean,
self.running_var, self.eps, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedBatchNorm3d'
@classmethod
def from_float(cls, mod):
assert type(mod) == torch.nn.BatchNorm3d,\
"QuantizedBatchNorm3d expects an instance of BatchNorm3d"
scale, zero_point = mod.activation_post_process.calculate_qparams()
new_mod = BatchNorm3d(mod.num_features, mod.eps)
new_mod.scale = float(scale)
new_mod.zero_point = int(zero_point)
return new_mod | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/quantized/modules/batchnorm.py | 0.967364 | 0.763616 | batchnorm.py | pypi |
import torch
from torch.nn.modules.pooling import MaxPool2d
from .activation import ReLU, ReLU6
from .batchnorm import BatchNorm2d, BatchNorm3d
from .conv import Conv2d, Conv3d
from .linear import Linear
from .functional_modules import FloatFunctional, QFunctional
class Quantize(torch.nn.Module):
r"""Quantizes an incoming tensor
Args:
`scale`: scale of the output Quantized Tensor
`zero_point`: zero_point of output Quantized Tensor
`dtype`: data type of output Quantized Tensor
Attributes:
`scale`, `zero_point`, `dtype`
Examples::
>>> t = torch.tensor([[1., -1.], [1., -1.]])
>>> scale, zero_point, dtype = 1.0, 2, torch.qint8
>>> qm = Quantize(scale, zero_point, dtype)
>>> qt = qm(t)
>>> print(qt)
tensor([[ 1., -1.],
[ 1., -1.]], size=(2, 2), dtype=torch.qint8, scale=1.0, zero_point=2)
"""
def __init__(self, scale, zero_point, dtype):
super(Quantize, self).__init__()
self.register_buffer('scale', torch.tensor([scale]))
self.register_buffer('zero_point', torch.tensor([zero_point], dtype=torch.long))
self.dtype = dtype
def forward(self, X):
return torch.quantize_per_tensor(X, float(self.scale),
int(self.zero_point), self.dtype)
@staticmethod
def from_float(mod):
assert hasattr(mod, 'activation_post_process')
scale, zero_point = mod.activation_post_process.calculate_qparams()
return Quantize(scale.float().item(), zero_point.long().item(), mod.activation_post_process.dtype)
def extra_repr(self):
return 'scale={}, zero_point={}, dtype={}'.format(self.scale, self.zero_point, self.dtype)
class DeQuantize(torch.nn.Module):
r"""Dequantizes an incoming tensor
Examples::
>>> input = torch.tensor([[1., -1.], [1., -1.]])
>>> scale, zero_point, dtype = 1.0, 2, torch.qint8
>>> qm = Quantize(scale, zero_point, dtype)
>>> quantized_input = qm(input)
>>> dqm = DeQuantize()
>>> dequantized = dqm(quantized_input)
>>> print(dequantized)
tensor([[ 1., -1.],
[ 1., -1.]], dtype=torch.float32)
"""
def __init__(self):
super(DeQuantize, self).__init__()
def forward(self, Xq):
return Xq.dequantize()
@staticmethod
def from_float(mod):
return DeQuantize()
__all__ = [
'BatchNorm2d',
'BatchNorm3d',
'Conv2d',
'Conv3d',
'DeQuantize',
'Linear',
'MaxPool2d',
'Quantize',
'ReLU',
'ReLU6',
# Wrapper modules
'FloatFunctional',
'QFunctional',
] | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/nn/quantized/modules/__init__.py | 0.961389 | 0.690102 | __init__.py | pypi |
import collections
import contextlib
import warnings
import torch
from . import is_initialized, _get_device_index
def _host_allocator():
_lazy_init()
return torch._C._cuda_cudaHostAllocator()
@contextlib.contextmanager
def _free_mutex():
torch._C._cuda_lock_mutex()
try:
yield
finally:
torch._C._cuda_unlock_mutex()
def caching_allocator_alloc(size, device=None, stream=None):
r"""Performs a memory allocation using the CUDA memory allocator.
Memory is allocated for a given device and a stream, this
function is intended to be used for interoperability with other
frameworks. Allocated memory is released through
:func:`~torch.cuda.caching_allocator_delete`.
Arguments:
size (int): number of bytes to be allocated.
device (torch.device or int, optional): selected device. If it is
``None`` the default CUDA device is used.
stream (torch.cuda.Stream or int, optional): selected stream. If is ``None`` then
the default stream for the selected device is used.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
if device is None:
device = torch.cuda.current_device()
device = _get_device_index(device)
if stream is None:
stream = torch.cuda.current_stream(device)
if isinstance(stream, torch.cuda.streams.Stream):
stream = stream.cuda_stream
if not isinstance(stream, int):
raise TypeError('Invalid type for stream argument, must be '
'`torch.cuda.Stream` or `int` representing a pointer '
'to a exisiting stream')
with torch.cuda.device(device):
return torch._C._cuda_cudaCachingAllocator_raw_alloc(size, stream)
def caching_allocator_delete(mem_ptr):
r"""Deletes memory allocated using the CUDA memory allocator.
Memory allocated with :func:`~torch.cuda.caching_allocator_alloc`.
is freed here. The associated device and stream are tracked inside
the allocator.
Arguments:
mem_ptr (int): memory address to be freed by the allocator.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
torch._C._cuda_cudaCachingAllocator_raw_delete(mem_ptr)
def empty_cache():
r"""Releases all unoccupied cached memory currently held by the caching
allocator so that those can be used in other GPU application and visible in
`nvidia-smi`.
.. note::
:func:`~torch.cuda.empty_cache` doesn't increase the amount of GPU
memory available for PyTorch. However, it may help reduce fragmentation
of GPU memory in certain cases. See :ref:`cuda-memory-management` for
more details about GPU memory management.
"""
if is_initialized():
torch._C._cuda_emptyCache()
def memory_stats(device=None):
r"""Returns a dictionary of CUDA memory allocator statistics for a
given device.
The return value of this function is a dictionary of statistics, each of
which is a non-negative integer.
Core statistics:
- ``"allocated.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of allocation requests received by the memory allocator.
- ``"allocated_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of allocated memory.
- ``"segment.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of reserved segments from ``cudaMalloc()``.
- ``"reserved_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of reserved memory.
- ``"active.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of active memory blocks.
- ``"active_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of active memory.
- ``"inactive_split.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of inactive, non-releasable memory blocks.
- ``"inactive_split_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of inactive, non-releasable memory.
For these core statistics, values are broken down as follows.
Pool type:
- ``all``: combined statistics across all memory pools.
- ``large_pool``: statistics for the large allocation pool
(as of October 2019, for size >= 1MB allocations).
- ``small_pool``: statistics for the small allocation pool
(as of October 2019, for size < 1MB allocations).
Metric type:
- ``current``: current value of this metric.
- ``peak``: maximum value of this metric.
- ``allocated``: historical total increase in this metric.
- ``freed``: historical total decrease in this metric.
In addition to the core statistics, we also provide some simple event
counters:
- ``"num_alloc_retries"``: number of failed ``cudaMalloc`` calls that
result in a cache flush and retry.
- ``"num_ooms"``: number of out-of-memory errors thrown.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistics for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
result = []
def _recurse_add_to_result(prefix, obj):
if isinstance(obj, dict):
if len(prefix) > 0:
prefix += "."
for k, v in obj.items():
_recurse_add_to_result(prefix + k, v)
else:
result.append((prefix, obj))
stats = memory_stats_as_nested_dict(device=device)
_recurse_add_to_result("", stats)
result.sort()
return collections.OrderedDict(result)
def memory_stats_as_nested_dict(device=None):
r"""Returns the result of :func:`~torch.cuda.memory_stats` as a nested dictionary."""
device = _get_device_index(device, optional=True)
return torch._C._cuda_memoryStats(device)
def reset_accumulated_memory_stats(device=None):
r"""Resets the "accumulated" (historical) stats tracked by the CUDA memory allocator.
See :func:`~torch.cuda.memory_stats` for details. Accumulated stats correspond to
the `"allocated"` and `"freed"` keys in each individual stat dict, as well as
`"num_alloc_retries"` and `"num_ooms"`.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
device = _get_device_index(device, optional=True)
return torch._C._cuda_resetAccumulatedMemoryStats(device)
def reset_peak_memory_stats(device=None):
r"""Resets the "peak" stats tracked by the CUDA memory allocator.
See :func:`~torch.cuda.memory_stats` for details. Peak stats correspond to the
`"peak"` key in each individual stat dict.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
device = _get_device_index(device, optional=True)
return torch._C._cuda_resetPeakMemoryStats(device)
def reset_max_memory_allocated(device=None):
r"""Resets the starting point in tracking maximum GPU memory occupied by
tensors for a given device.
See :func:`~torch.cuda.max_memory_allocated` for details.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. warning::
This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets
/all/ peak memory stats.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
warnings.warn(
"torch.cuda.reset_max_memory_allocated now calls torch.cuda.reset_peak_memory_stats, "
"which resets /all/ peak memory stats.",
DeprecationWarning)
return reset_peak_memory_stats(device=device)
def reset_max_memory_cached(device=None):
r"""Resets the starting point in tracking maximum GPU memory managed by the
caching allocator for a given device.
See :func:`~torch.cuda.max_memory_cached` for details.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. warning::
This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets
/all/ peak memory stats.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
warnings.warn(
"torch.cuda.reset_max_memory_cached now calls torch.cuda.reset_peak_memory_stats, "
"which resets /all/ peak memory stats.",
DeprecationWarning)
return reset_peak_memory_stats(device=device)
def memory_allocated(device=None):
r"""Returns the current GPU memory occupied by tensors in bytes for a given
device.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
This is likely less than the amount shown in `nvidia-smi` since some
unused memory can be held by the caching allocator and some context
needs to be created on GPU. See :ref:`cuda-memory-management` for more
details about GPU memory management.
"""
return memory_stats(device=device)["allocated_bytes.all.current"]
def max_memory_allocated(device=None):
r"""Returns the maximum GPU memory occupied by tensors in bytes for a given
device.
By default, this returns the peak allocated memory since the beginning of
this program. :func:`~torch.cuda.reset_peak_stats` can be used to
reset the starting point in tracking this metric. For example, these two
functions can measure the peak allocated memory usage of each iteration in a
training loop.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
return memory_stats(device=device)["allocated_bytes.all.peak"]
def memory_reserved(device=None):
r"""Returns the current GPU memory managed by the caching allocator in bytes
for a given device.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
return memory_stats(device=device)["reserved_bytes.all.current"]
def max_memory_reserved(device=None):
r"""Returns the maximum GPU memory managed by the caching allocator in bytes
for a given device.
By default, this returns the peak cached memory since the beginning of this
program. :func:`~torch.cuda.reset_peak_stats` can be used to reset
the starting point in tracking this metric. For example, these two functions
can measure the peak cached memory amount of each iteration in a training
loop.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
return memory_stats(device=device)["reserved_bytes.all.peak"]
def memory_cached(device=None):
r"""Deprecated; see :func:`~torch.cuda.memory_reserved`."""
warnings.warn(
"torch.cuda.memory_cached has been renamed to torch.cuda.memory_reserved",
DeprecationWarning)
return memory_reserved(device=device)
def max_memory_cached(device=None):
r"""Deprecated; see :func:`~torch.cuda.max_memory_reserved`."""
warnings.warn(
"torch.cuda.max_memory_cached has been renamed to torch.cuda.max_memory_reserved",
DeprecationWarning)
return max_memory_reserved(device=device)
def memory_snapshot():
r"""Returns a snapshot of the CUDA memory allocator state across all devices.
Interpreting the output of this function requires familiarity with the
memory allocator internals.
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
return torch._C._cuda_memorySnapshot()
def memory_summary(device=None, abbreviated=False):
r"""Returns a human-readable printout of the current memory allocator
statistics for a given device.
This can be useful to display periodically during training, or when
handling out-of-memory exceptions.
Arguments:
device (torch.device or int, optional): selected device. Returns
printout for the current device, given by :func:`~torch.cuda.current_device`,
if :attr:`device` is ``None`` (default).
abbreviated (bool, optional): whether to return an abbreviated summary
(default: False).
.. note::
See :ref:`cuda-memory-management` for more details about GPU memory
management.
"""
device = _get_device_index(device, optional=True)
stats = memory_stats(device=device)
def _format_size(sz, pref_sz):
prefixes = ["B ", "KB", "MB", "GB", "TB", "PB"]
prefix = prefixes[0]
for new_prefix in prefixes[1:]:
if pref_sz < 768 * 1024:
break
prefix = new_prefix
sz //= 1024
pref_sz /= 1024
return "{:7d} {}".format(sz, prefix)
def _format_count(cnt, pref_cnt):
prefixes = [" ", "K", "M"]
prefix = prefixes[0]
for new_prefix in prefixes[1:]:
if pref_cnt < 750 * 1000:
break
prefix = new_prefix
cnt //= 1000
pref_cnt /= 1000
return "{:7d} {} ".format(cnt, prefix)
metrics_to_display = [
("allocated_bytes", "Allocated memory", _format_size),
("active_bytes", "Active memory", _format_size),
("reserved_bytes", "GPU reserved memory", _format_size),
("inactive_split_bytes", "Non-releasable memory", _format_size),
("allocation", "Allocations", _format_count),
("active", "Active allocs", _format_count),
("segment", "GPU reserved segments", _format_count),
("inactive_split", "Non-releasable allocs", _format_count),
]
lines = []
lines.append("=" * 75)
lines.append(" {_:16} PyTorch CUDA memory summary, device ID {device:<17d} ")
lines.append("-" * 75)
lines.append(" {_:9} CUDA OOMs: {num_ooms:<12d} | {_:6} cudaMalloc retries: {num_alloc_retries:<8d} ")
lines.append("=" * 75)
lines.append(" Metric | Cur Usage | Peak Usage | Tot Alloc | Tot Freed ")
for metric_key, metric_name, formatter in metrics_to_display:
lines.append("-" * 75)
submetrics = [("all", metric_name)]
if not abbreviated:
submetrics.append(("large_pool", " from large pool"))
submetrics.append(("small_pool", " from small pool"))
current_prefval, peak_prefval, allocated_prefval, freed_prefval = None, None, None, None
for submetric_key, submetric_name in submetrics:
prefix = metric_key + "." + submetric_key + "."
current = stats[prefix + "current"]
peak = stats[prefix + "peak"]
allocated = stats[prefix + "allocated"]
freed = stats[prefix + "freed"]
if current_prefval is None:
current_prefval = current
peak_prefval = peak
allocated_prefval = allocated
freed_prefval = freed
lines.append(" {:<21} | {} | {} | {} | {} ".format(
submetric_name,
formatter(current, current_prefval),
formatter(peak, peak_prefval),
formatter(allocated, allocated_prefval),
formatter(freed, freed_prefval)),
)
lines.append("=" * 75)
fmt_dict = {"_": "", "device": device}
for k, v in stats.items():
fmt_dict[k.replace(".", "-")] = v
return "|" + "|\n|".join(lines).format(**fmt_dict) + "|\n" | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/cuda/memory.py | 0.860662 | 0.254729 | memory.py | pypi |
import ctypes
import torch
class Stream(torch._C._CudaStreamBase):
r"""Wrapper around a CUDA stream.
A CUDA stream is a linear sequence of execution that belongs to a specific
device, independent from other streams. See :ref:`cuda-semantics` for
details.
Arguments:
device(torch.device or int, optional): a device on which to allocate
the stream. If :attr:`device` is ``None`` (default) or a negative
integer, this will use the current device.
priority(int, optional): priority of the stream. Lower numbers
represent higher priorities.
"""
def __new__(cls, device=None, priority=0, **kwargs):
with torch.cuda.device(device):
return super(Stream, cls).__new__(cls, priority=priority, **kwargs)
def wait_event(self, event):
r"""Makes all future work submitted to the stream wait for an event.
Arguments:
event (Event): an event to wait for.
.. note:: This is a wrapper around ``cudaStreamWaitEvent()``: see
`CUDA Stream documentation`_ for more info.
This function returns without waiting for :attr:`event`: only future
operations are affected.
.. _CUDA Stream documentation:
http://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html
"""
event.wait(self)
def wait_stream(self, stream):
r"""Synchronizes with another stream.
All future work submitted to this stream will wait until all kernels
submitted to a given stream at the time of call complete.
Arguments:
stream (Stream): a stream to synchronize.
.. note:: This function returns without waiting for currently enqueued
kernels in :attr:`stream`: only future operations are affected.
"""
self.wait_event(stream.record_event())
def record_event(self, event=None):
r"""Records an event.
Arguments:
event (Event, optional): event to record. If not given, a new one
will be allocated.
Returns:
Recorded event.
"""
if event is None:
event = Event()
event.record(self)
return event
def query(self):
r"""Checks if all the work submitted has been completed.
Returns:
A boolean indicating if all kernels in this stream are completed."""
return super(Stream, self).query()
def synchronize(self):
r"""Wait for all the kernels in this stream to complete.
.. note:: This is a wrapper around ``cudaStreamSynchronize()``: see
`CUDA Stream documentation`_ for more info.
"""
super(Stream, self).synchronize()
@property
def _as_parameter_(self):
return ctypes.c_void_p(self.cuda_stream)
def __eq__(self, o):
if isinstance(o, Stream):
return super(Stream, self).__eq__(o)
return False
def __hash__(self):
return hash((self.cuda_stream, self.device))
def __repr__(self):
return ('<torch.cuda.Stream device={0} cuda_stream={1:#x}>'
.format(self.device, self.cuda_stream))
class Event(torch._C._CudaEventBase):
r"""Wrapper around a CUDA event.
CUDA events are synchronization markers that can be used to monitor the
device's progress, to accurately measure timing, and to synchronize CUDA
streams.
The underlying CUDA events are lazily initialized when the event is first
recorded or exported to another process. After creation, only streams on the
same device may record the event. However, streams on any device can wait on
the event.
Arguments:
enable_timing (bool, optional): indicates if the event should measure time
(default: ``False``)
blocking (bool, optional): if ``True``, :meth:`wait` will be blocking (default: ``False``)
interprocess (bool): if ``True``, the event can be shared between processes
(default: ``False``)
.. _CUDA Event Documentation:
https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__EVENT.html
"""
def __new__(cls, enable_timing=False, blocking=False, interprocess=False):
return super(Event, cls).__new__(
cls,
enable_timing=enable_timing, blocking=blocking, interprocess=interprocess)
@classmethod
def from_ipc_handle(cls, device, handle):
r"""Reconstruct an event from an IPC handle on the given device."""
return super(Event, cls).from_ipc_handle(device, handle)
def record(self, stream=None):
r"""Records the event in a given stream.
Uses ``torch.cuda.current_stream()`` if no stream is specified. The
stream's device must match the event's device."""
if stream is None:
stream = torch.cuda.current_stream()
super(Event, self).record(stream)
def wait(self, stream=None):
r"""Makes all future work submitted to the given stream wait for this
event.
Use ``torch.cuda.current_stream()`` if no stream is specified."""
if stream is None:
stream = torch.cuda.current_stream()
super(Event, self).wait(stream)
def query(self):
r"""Checks if all work currently captured by event has completed.
Returns:
A boolean indicating if all work currently captured by event has
completed.
"""
return super(Event, self).query()
def elapsed_time(self, end_event):
r"""Returns the time elapsed in milliseconds after the event was
recorded and before the end_event was recorded.
"""
return super(Event, self).elapsed_time(end_event)
def synchronize(self):
r"""Waits for the event to complete.
Waits until the completion of all work currently captured in this event.
This prevents the CPU thread from proceeding until the event completes.
.. note:: This is a wrapper around ``cudaEventSynchronize()``: see
`CUDA Event documentation`_ for more info.
"""
super(Event, self).synchronize()
def ipc_handle(self):
r"""Returns an IPC handle of this event. If not recorded yet, the event
will use the current device. """
return super(Event, self).ipc_handle()
@property
def _as_parameter_(self):
return ctypes.c_void_p(self.cuda_event)
def __repr__(self):
if self.cuda_event:
return '<torch.cuda.Event {0:#x}>'.format(self._as_parameter_.value)
else:
return '<torch.cuda.Event uninitialized>' | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/cuda/streams.py | 0.922726 | 0.492066 | streams.py | pypi |
import torch
from . import _lazy_init, _lazy_call, device_count, current_device
__all__ = ['get_rng_state', 'get_rng_state_all',
'set_rng_state', 'set_rng_state_all',
'manual_seed', 'manual_seed_all',
'seed', 'seed_all', 'initial_seed']
def get_rng_state(device='cuda'):
r"""Returns the random number generator state of the specified GPU as a ByteTensor.
Args:
device (torch.device or int, optional): The device to return the RNG state of.
Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device).
.. warning::
This function eagerly initializes CUDA.
"""
_lazy_init()
if isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device('cuda', device)
idx = device.index
if idx is None:
idx = current_device()
default_generator = torch.cuda.default_generators[idx]
return default_generator.get_state()
def get_rng_state_all():
r"""Returns a tuple of ByteTensor representing the random number states of all devices."""
results = []
for i in range(device_count()):
results.append(get_rng_state(i))
return results
def set_rng_state(new_state, device='cuda'):
r"""Sets the random number generator state of the specified GPU.
Args:
new_state (torch.ByteTensor): The desired state
device (torch.device or int, optional): The device to set the RNG state.
Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device).
"""
new_state_copy = new_state.clone(memory_format=torch.contiguous_format)
if isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device('cuda', device)
def cb():
idx = device.index
if idx is None:
idx = current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.set_state(new_state_copy)
_lazy_call(cb)
def set_rng_state_all(new_states):
r"""Sets the random number generator state of all devices.
Args:
new_state (tuple of torch.ByteTensor): The desired state for each device"""
for i, state in enumerate(new_states):
set_rng_state(state, i)
def manual_seed(seed):
r"""Sets the seed for generating random numbers for the current GPU.
It's safe to call this function if CUDA is not available; in that
case, it is silently ignored.
Args:
seed (int): The desired seed.
.. warning::
If you are working with a multi-GPU model, this function is insufficient
to get determinism. To seed all GPUs, use :func:`manual_seed_all`.
"""
seed = int(seed)
def cb():
idx = current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.manual_seed(seed)
_lazy_call(cb)
def manual_seed_all(seed):
r"""Sets the seed for generating random numbers on all GPUs.
It's safe to call this function if CUDA is not available; in that
case, it is silently ignored.
Args:
seed (int): The desired seed.
"""
seed = int(seed)
def cb():
for i in range(device_count()):
default_generator = torch.cuda.default_generators[i]
default_generator.manual_seed(seed)
_lazy_call(cb)
def seed():
r"""Sets the seed for generating random numbers to a random number for the current GPU.
It's safe to call this function if CUDA is not available; in that
case, it is silently ignored.
.. warning::
If you are working with a multi-GPU model, this function will only initialize
the seed on one GPU. To initialize all GPUs, use :func:`seed_all`.
"""
def cb():
idx = current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.seed()
_lazy_call(cb)
def seed_all():
r"""Sets the seed for generating random numbers to a random number on all GPUs.
It's safe to call this function if CUDA is not available; in that
case, it is silently ignored.
"""
def cb():
random_seed = 0
seeded = False
for i in range(device_count()):
default_generator = torch.cuda.default_generators[i]
if not seeded:
default_generator.seed()
random_seed = default_generator.initial_seed()
seeded = True
else:
default_generator.manual_seed(random_seed)
_lazy_call(cb)
def initial_seed():
r"""Returns the current random seed of the current GPU.
.. warning::
This function eagerly initializes CUDA.
"""
_lazy_init()
idx = current_device()
default_generator = torch.cuda.default_generators[idx]
return default_generator.initial_seed() | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/cuda/random.py | 0.871953 | 0.445409 | random.py | pypi |
import torch
from . import nccl
from torch._utils import _take_tensors, _flatten_dense_tensors, \
_unflatten_dense_tensors, _reorder_tensors_as
def broadcast(tensor, devices):
"""Broadcasts a tensor to a number of GPUs.
Arguments:
tensor (Tensor): tensor to broadcast.
devices (Iterable): an iterable of devices among which to broadcast.
Note that it should be like (src, dst1, dst2, ...), the first element
of which is the source device to broadcast from.
Returns:
A tuple containing copies of the ``tensor``, placed on devices
corresponding to indices from ``devices``.
"""
return torch._C._broadcast(tensor, devices)
def broadcast_coalesced(tensors, devices, buffer_size=10485760):
"""Broadcasts a sequence tensors to the specified GPUs.
Small tensors are first coalesced into a buffer to reduce the number
of synchronizations.
Arguments:
tensors (sequence): tensors to broadcast.
devices (Iterable): an iterable of devices among which to broadcast.
Note that it should be like (src, dst1, dst2, ...), the first element
of which is the source device to broadcast from.
buffer_size (int): maximum size of the buffer used for coalescing
Returns:
A tuple containing copies of the ``tensor``, placed on devices
corresponding to indices from ``devices``.
"""
return torch._C._broadcast_coalesced(tensors, devices, buffer_size)
def reduce_add(inputs, destination=None):
"""Sums tensors from multiple GPUs.
All inputs should have matching shapes.
Arguments:
inputs (Iterable[Tensor]): an iterable of tensors to add.
destination (int, optional): a device on which the output will be
placed (default: current device).
Returns:
A tensor containing an elementwise sum of all inputs, placed on the
``destination`` device.
"""
# TODO: try to find an input on another gpu, copy it,
# and accumulate into the copy
if destination is None:
destination = torch.cuda.current_device()
input_size = inputs[0].size()
nccl_root = None
for i, inp in enumerate(inputs):
assert inp.is_cuda, "reduce_add expects all inputs to be on GPUs"
if inp.get_device() == destination:
nccl_root = i
if inp.size() != input_size:
got = 'x'.join(str(x) for x in inp.size())
expected = 'x'.join(str(x) for x in input_size)
raise ValueError("input {} has invalid size: got {}, but expected "
"{}".format(i, got, expected))
if nccl_root is None:
raise RuntimeError("reduce_add expects destination to be on the same GPU with one of the tensors")
result = inp.new(device=destination).resize_as_(inp).zero_()
if nccl.is_available(inputs) and inputs[0].get_device() == destination:
outputs = [result] + [t.new(t.size()) for t in inputs[1:]]
nccl.reduce(inputs, outputs, root=nccl_root)
return result
for inp in inputs:
input_correct_gpu = inp.cuda(result.get_device())
result.add_(input_correct_gpu)
return result
def reduce_add_coalesced(inputs, destination=None, buffer_size=10485760):
"""Sums tensors from multiple GPUs.
Small tensors are first coalesced into a buffer to reduce the number
of synchronizations.
Arguments:
inputs (Iterable[Iterable[Tensor]]): iterable of iterables that
contain tensors from a single device.
destination (int, optional): a device on which the output will be
placed (default: current device).
buffer_size (int): maximum size of the buffer used for coalescing
Returns:
A tuple of tensors containing an elementwise sum of each group of
inputs, placed on the ``destination`` device.
"""
# TODO: When `len(inputs) == 1` and all inputs are on `destination`, just
# return `inputs`.
dense_tensors = [[] for _ in inputs] # shape (num_gpus, num_tensors)
output = []
ref_order = []
# process sparse ones first since they may have different sizes on different gpus
for tensor_at_gpus in zip(*inputs):
if all(t.is_sparse for t in tensor_at_gpus):
result = reduce_add(tensor_at_gpus, destination)
output.append(result)
ref_order.append(tensor_at_gpus[0])
else:
for coll, t in zip(dense_tensors, tensor_at_gpus):
coll.append(t.to_dense() if t.is_sparse else t)
ref_order.append(dense_tensors[0][-1])
itrs = [_take_tensors(tensors, buffer_size) for tensors in dense_tensors]
# now the dense ones, which have consistent sizes
for chunks in zip(*itrs):
flat_tensors = [_flatten_dense_tensors(chunk) for chunk in chunks]
flat_result = reduce_add(flat_tensors, destination)
for t in _unflatten_dense_tensors(flat_result, chunks[0]):
# The unflattened tensors do not share storage, and we don't expose
# base flat tensor anyways, so give them different version counters.
# See NOTE [ Version Counter in comm.*_coalesced ]
output.append(t.data)
return tuple(_reorder_tensors_as(output, ref_order))
def scatter(tensor, devices, chunk_sizes=None, dim=0, streams=None):
"""Scatters tensor across multiple GPUs.
Arguments:
tensor (Tensor): tensor to scatter.
devices (Iterable[int]): iterable of ints, specifying among which
devices the tensor should be scattered.
chunk_sizes (Iterable[int], optional): sizes of chunks to be placed on
each device. It should match ``devices`` in length and sum to
``tensor.size(dim)``. If not specified, the tensor will be divided
into equal chunks.
dim (int, optional): A dimension along which to chunk the tensor.
Returns:
A tuple containing chunks of the ``tensor``, spread across given
``devices``.
"""
return tuple(torch._C._scatter(tensor, devices, chunk_sizes, dim, streams))
def gather(tensors, dim=0, destination=None):
"""Gathers tensors from multiple GPUs.
Tensor sizes in all dimension different than ``dim`` have to match.
Arguments:
tensors (Iterable[Tensor]): iterable of tensors to gather.
dim (int): a dimension along which the tensors will be concatenated.
destination (int, optional): output device (-1 means CPU, default:
current device)
Returns:
A tensor located on ``destination`` device, that is a result of
concatenating ``tensors`` along ``dim``.
"""
return torch._C._gather(tensors, dim, destination) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/cuda/comm.py | 0.888275 | 0.720282 | comm.py | pypi |
import torch
from collections import defaultdict
from torch._six import container_abcs
class _MultiDeviceReplicator(object):
"""
Lazily serves copies of a tensor to requested devices. Copies are cached per-device.
"""
def __init__(self, master_tensor):
assert master_tensor.is_cuda
self.master = master_tensor
self._per_device_tensors = {}
def get(self, device):
retval = self._per_device_tensors.get(device, None)
if retval is None:
retval = self.master.to(device=device, non_blocking=True, copy=True)
self._per_device_tensors[device] = retval
return retval
class GradScaler(object):
"""
An instance ``scaler`` of :class:`GradScaler` helps perform the steps of gradient scaling
conveniently.
* ``scaler.scale(loss)`` multiplies a given loss by ``scaler``'s current scale factor.
* ``scaler.step(optimizer)`` safely unscales gradients and calls ``optimizer.step()``.
* ``scaler.update()`` updates ``scaler``'s scale factor.
Typical use::
# Creates a GradScaler once at the beginning of training.
scaler = GradScaler()
for epoch in epochs:
for input, target in data:
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# Scales the loss, and calls backward() on the scaled loss to create scaled gradients.
scaler.scale(loss).backward()
# scaler.step() first unscales the gradients of the optimizer's assigned params.
# If these gradients do not contain infs or NaNs, optimizer.step() is then called,
# otherwise, optimizer.step() is skipped.
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
See the :ref:`Gradient Scaling Examples<gradient-scaling-examples>` for usage in more complex cases like
gradient clipping, gradient penalty, and multiple losses/optimizers.
``scaler`` dynamically estimates the scale factor each iteration. To minimize gradient underflow,
a large scale factor should be used. However, ``torch.float16`` values can "overflow" (become inf or NaN) if
the scale factor is too large. Therefore, the optimal scale factor is the largest factor that can be used
without incurring inf or NaN gradient values.
``scaler`` approximates the optimal scale factor over time by checking the gradients for infs and NaNs during every
``scaler.step(optimizer)`` (or optional separate ``scaler.unscale_(optimizer)``, see :meth:`unscale_`).
* If infs/NaNs are found, ``scaler.step(optimizer)`` skips the underlying ``optimizer.step()`` (so the params
themselves remain uncorrupted) and ``update()`` multiplies the scale by ``backoff_factor``.
* If no infs/NaNs are found, ``scaler.step(optimizer)`` runs the underlying ``optimizer.step()`` as usual.
If ``growth_interval`` unskipped iterations occur consecutively, ``update()`` multiplies the scale by
``growth_factor``.
The scale factor often causes infs/NaNs to appear in gradients for the first few iterations as its
value calibrates. ``scaler.step`` will skip the underlying ``optimizer.step()`` for these
iterations. After that, step skipping should occur rarely (once every few hundred or thousand iterations).
Arguments:
init_scale (float, optional, default=2.**16): Initial scale factor.
growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during
:meth:`update` if no inf/NaN gradients occur for ``growth_factor`` consecutive iterations.
backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during
:meth:`update` if inf/NaN gradients occur in an iteration.
growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients
that must occur for the scale to be multiplied by ``growth_factor``.
enabled (bool, optional, default=True): If ``False``, disables gradient scaling. :meth:`step` simply
invokes the underlying ``optimizer.step()``, and other methods become no-ops.
"""
# Python 2 doesn't support enums.
READY = 0
UNSCALED = 1
STEPPED = 2
def __init__(self,
init_scale=2.**16,
growth_factor=2.0,
backoff_factor=0.5,
growth_interval=2000,
enabled=True):
self._enabled = enabled
if enabled:
assert growth_factor > 1.0, "The growth factor must be > 1.0."
assert backoff_factor < 1.0, "The backoff factor must be < 1.0."
self._init_scale = init_scale
# self._scale will be lazily initialized during the first call to scale()
self._scale = None
self._growth_factor = growth_factor
self._backoff_factor = backoff_factor
self._growth_interval = growth_interval
self._init_growth_tracker = 0
# self._growth_tracker will be lazily initialized during the first call to scale()
self._growth_tracker = None
READY = self.READY
self._per_optimizer_states = defaultdict(lambda: {"stage": READY, "found_inf_per_device": {}})
def _check_scale_growth_tracker(self, funcname):
fix = "This may indicate your script did not use scaler.scale(loss or outputs) earlier in the iteration."
assert self._scale is not None, "Attempted {} but _scale is None. ".format(funcname) + fix
assert self._growth_tracker is not None, "Attempted {} but _growth_tracker is None. ".format(funcname) + fix
def _lazy_init_scale_growth_tracker(self, dev):
assert self._growth_tracker is None, "_growth_tracker initialized before _scale"
self._scale = torch.full((1,), self._init_scale, dtype=torch.float32, device=dev)
self._growth_tracker = torch.full((1,), self._init_growth_tracker, dtype=torch.int32, device=dev)
def scale(self, outputs):
"""
Multiplies ('scales') a tensor or list of tensors by the scale factor.
Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned
unmodified.
Arguments:
outputs (Tensor or iterable of Tensors): Outputs to scale.
"""
if not self._enabled:
return outputs
# Short-circuit for the common case.
if isinstance(outputs, torch.Tensor):
assert outputs.is_cuda
if self._scale is None:
self._lazy_init_scale_growth_tracker(outputs.device)
return outputs * self._scale.to(device=outputs.device, non_blocking=True)
# Invoke the more complex machinery only if we're treating multiple outputs.
stash = [None] # trick to hold a reference that can be overwritten at any level of the recursion below.
def apply_scale(val):
if isinstance(val, torch.Tensor):
assert val.is_cuda
if self._scale is None:
self._lazy_init_scale_growth_tracker(val.device)
if stash[0] is None:
stash[0] = _MultiDeviceReplicator(self._scale)
return val * stash[0].get(val.device)
elif isinstance(val, container_abcs.Iterable):
return type(val)(apply_scale(v) for v in val)
else:
raise ValueError("outputs must be a Tensor or an iterable of Tensors")
return apply_scale(outputs)
def _unscale_grads_(self, optimizer, inv_scale, found_inf, allow_fp16):
per_device_inv_scale = _MultiDeviceReplicator(inv_scale)
per_device_found_inf = _MultiDeviceReplicator(found_inf)
for group in optimizer.param_groups:
for param in group["params"]:
if param.grad is not None:
if (not allow_fp16) and param.grad.dtype == torch.float16:
raise ValueError("Attempting to unscale FP16 gradients.")
else:
torch._amp_non_finite_check_and_unscale_(param.grad,
per_device_found_inf.get(param.grad.device),
per_device_inv_scale.get(param.grad.device))
return per_device_found_inf._per_device_tensors
def unscale_(self, optimizer):
"""
Divides ("unscales") the optimizer's gradient tensors by the scale factor.
:meth:`unscale_` is optional, serving cases where you need to
:ref:`modify or inspect gradients<working-with-unscaled-gradients>`
between the backward pass(es) and :meth:`step`.
If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`.
Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients::
...
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
scaler.step(optimizer)
scaler.update()
Arguments:
optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled.
.. note::
:meth:`unscale_` does not incur a CPU-GPU sync.
.. warning::
:meth:`unscale_` should only be called once per optimizer per :meth:`step` call,
and only after all gradients for that optimizer's assigned parameters have been accumulated.
Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError.
"""
if not self._enabled:
return
self._check_scale_growth_tracker("unscale_")
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state["stage"] == self.UNSCALED:
raise RuntimeError("unscale_() has already been called on this optimizer since the last update().")
elif optimizer_state["stage"] == self.STEPPED:
raise RuntimeError("unscale_() is being called after step().")
# FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
inv_scale = self._scale.double().reciprocal().float()
found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=self._scale.device)
optimizer_state["found_inf_per_device"] = self._unscale_grads_(optimizer, inv_scale, found_inf, False)
optimizer_state["stage"] = self.UNSCALED
def step(self, optimizer, *args, **kwargs):
"""
:meth:`step` carries out the following two operations:
1. Internally invokes ``unscale_(optimizer)`` (unless :meth:`unscale_` was explicitly called for ``optimizer``
earlier in the iteration). As part of the :meth:`unscale_`, gradients are checked for infs/NaNs.
2. If no inf/NaN gradients are found, invokes ``optimizer.step()`` using the unscaled
gradients. Otherwise, ``optimizer.step()`` is skipped to avoid corrupting the params.
``*args`` and ``**kwargs`` are forwarded to ``optimizer.step()``.
Returns the return value of ``optimizer.step(*args, **kwargs)``.
Arguments:
optimizer (torch.optim.Optimizer): Optimizer that applies the gradients.
args: Any arguments.
kwargs: Any keyword arguments.
.. warning::
Closure use is not currently supported.
"""
if (not self._enabled):
return optimizer.step(*args, **kwargs)
if "closure" in kwargs:
raise RuntimeError("Closure use is not currently supported if GradScaler is enabled.")
self._check_scale_growth_tracker("step")
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state["stage"] == self.STEPPED:
raise RuntimeError("step() has already been called since the last update().")
retval = None
if (hasattr(optimizer, "_step_supports_amp_scaling") and optimizer._step_supports_amp_scaling):
# This optimizer has customized scale-handling logic, so we can call optimizer.step() directly.
# The contract with custom optimizers is that their step() should accept an additional,
# optional grad_scaler kwarg. We append self to the kwargs so the custom optimizer has full information:
# it can query its own state, invoke unscale_ on itself, etc
retval = optimizer.step(*args, **dict(kwargs, grad_scaler=self))
optimizer_state["stage"] == self.STEPPED
return retval
if optimizer_state["stage"] == self.READY:
self.unscale_(optimizer)
assert len(optimizer_state["found_inf_per_device"]) > 0, "No inf checks were recorded for this optimizer."
if not sum(v.item() for v in optimizer_state["found_inf_per_device"].values()):
retval = optimizer.step(*args, **kwargs)
optimizer_state["stage"] == self.STEPPED
return retval
def update(self, new_scale=None):
"""
Updates the scale factor.
If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
the scale is multiplied by ``growth_factor`` to increase it.
Passing ``new_scale`` sets the scale directly.
Arguments:
new_scale (float or :class:`torch.cuda.FloatTensor`, optional, default=None): New scale factor.
.. warning::
:meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
been invoked for all optimizers used this iteration.
"""
if not self._enabled:
return
self._check_scale_growth_tracker("update")
if new_scale is not None:
# Accept a new user-defined scale.
if isinstance(new_scale, float):
self._scale = torch.full((1,), new_scale, dtype=torch.float32, device=self._scale.device)
else:
reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False."
assert isinstance(new_scale, torch.cuda.FloatTensor), reason
assert new_scale.numel() == 1, reason
assert new_scale.requires_grad is False, reason
self._scale = new_scale
else:
# Consume shared inf/nan data collected from optimizers to update the scale.
# If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
found_infs = [found_inf.to(device=self._scale.device, non_blocking=True)
for state in self._per_optimizer_states.values()
for found_inf in state["found_inf_per_device"].values()]
assert len(found_infs) > 0, "No inf checks were recorded prior to update."
found_inf_combined = found_infs[0]
if len(found_infs) > 1:
for i in range(1, len(found_infs)):
found_inf_combined += found_infs[i]
self._scale = torch._amp_update_scale(self._growth_tracker,
self._scale,
found_inf_combined,
self._growth_factor,
self._backoff_factor,
self._growth_interval)
# To prepare for next iteration, clear the data collected from optimizers this iteration.
self._per_optimizer_states = defaultdict(lambda: {"stage": self.READY, "found_inf_per_device": {}})
def _get_scale_async(self):
return self._scale
def get_scale(self):
"""
Returns a Python float containing the current scale, or 1.0 if scaling is disabled.
.. warning::
:meth:`get_scale` incurs a CPU-GPU sync.
"""
if self._enabled:
return self._init_scale if self._scale is None else self._get_scale_async().item()
else:
return 1.0
def get_growth_factor(self):
r"""
Returns a Python float containing the scale growth factor.
"""
return self._growth_factor
def set_growth_factor(self, new_factor):
r"""
Arguments:
new_scale (float): Value to use as the new scale growth factor.
"""
self._growth_factor = new_factor
def get_backoff_factor(self):
r"""
Returns a Python float containing the scale backoff factor.
"""
return self._backoff_factor
def set_backoff_factor(self, new_factor):
r"""
Arguments:
new_scale (float): Value to use as the new scale backoff factor.
"""
self._backoff_factor = new_factor
def get_growth_interval(self):
r"""
Returns a Python int containing the growth interval.
"""
return self._growth_interval
def set_growth_interval(self, new_interval):
r"""
Arguments:
new_interval (int): Value to use as the new growth interval.
"""
self._growth_interval = new_interval
def _get_growth_tracker(self):
if self._enabled:
return self._init_growth_tracker if self._growth_tracker is None else self._growth_tracker.item()
else:
return 0
def is_enabled(self):
r"""
Returns a bool indicating whether this instance is enabled.
"""
return self._enabled
def state_dict(self):
r"""
Returns the state of the scaler as a :class:`dict`. It contains five entries:
* ``"scale"`` - a Python float containing the current scale
* ``"growth_factor"`` - a Python float containing the current growth factor
* ``"backoff_factor"`` - a Python float containing the current backoff factor
* ``"growth_interval"`` - a Python int containing the current growth interval
* ``"_growth_tracker"`` - a Python int containing the number of recent consecutive unskipped steps.
If this instance is not enabled, returns an empty dict.
.. note::
If you wish to checkpoint the scaler's state after a particular iteration, :meth:`state_dict`
should be called after :meth:`update`.
"""
return {"scale": self.get_scale(),
"growth_factor": self._growth_factor,
"backoff_factor": self._backoff_factor,
"growth_interval": self._growth_interval,
"_growth_tracker": self._get_growth_tracker()} if self._enabled else {}
def load_state_dict(self, state_dict):
r"""
Loads the scaler state. If this instance is disabled, :meth:`load_state_dict` is a no-op.
Arguments:
state_dict(dict): scaler state. Should be an object returned from a call to :meth:`state_dict`.
"""
if not self._enabled:
return
if len(state_dict) == 0:
raise RuntimeError("The source state dict is empty, possibly because it was saved "
"from a disabled instance of GradScaler.")
self._init_scale = state_dict["scale"]
if self._scale is not None:
self._scale.fill_(state_dict["scale"])
self._growth_factor = state_dict["growth_factor"]
self._backoff_factor = state_dict["backoff_factor"]
self._growth_interval = state_dict["growth_interval"]
self._init_growth_tracker = state_dict["_growth_tracker"]
if self._growth_tracker is not None:
self._growth_tracker.fill_(state_dict["_growth_tracker"])
def _check_inf_per_device(self, optimizer):
self._check_scale_growth_tracker("_check_inf_per_device")
dummy_inv_scale = torch.full((1,), 1.0, dtype=torch.float32, device=self._scale.device)
found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=self._scale.device)
self._per_optimizer_states[id(optimizer)]["found_inf_per_device"] = \
self._unscale_grads_(optimizer, dummy_inv_scale, found_inf, True)
return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"]
def _found_inf_per_device(self, optimizer):
return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"] | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/cuda/amp/grad_scaler.py | 0.931001 | 0.561455 | grad_scaler.py | pypi |
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from torch.nn.modules.utils import _single, _pair, _triple
import torch.onnx
# This import monkey-patches graph manipulation methods on Graph, used for the
# ONNX symbolics
import torch.onnx.utils
import torch.onnx.symbolic_helper as sym_help
from torch.onnx.symbolic_helper import parse_args, _unimplemented
import torch.onnx.symbolic_opset9
# EDITING THIS FILE? READ THIS FIRST!
# see Note [Edit Symbolic Files] in symbolic_helper.py
# This file exports ONNX ops for opset 10
# Opset 10 is supported by ONNX release 1.5.0
# release on 04/24/19
@parse_args('v', 'i', 'i', 'none')
def sort(g, self, dim, decending, out=None):
return sym_help._sort_helper(g, self, dim, decending=decending, out=out)
@parse_args('v', 'v', 'i', 'i', 'i', 'none')
def topk(g, self, k, dim, largest, sorted, out=None):
return sym_help._topk_helper(g, self, k, dim, largest=largest, sorted=sorted, out=out)
def _max_pool(name, tuple_fn, ndims, return_indices):
@parse_args('v', 'is', 'is', 'is', 'is', 'i')
def symbolic_fn(g, input, kernel_size, stride, padding, dilation, ceil_mode):
if not stride:
stride = kernel_size
kwargs = {
'kernel_shape_i': tuple_fn(kernel_size),
'pads_i': tuple_fn(padding) * 2,
'strides_i': tuple_fn(stride),
'ceil_mode_i': ceil_mode,
}
if set(tuple_fn(dilation)) != {1}:
kwargs['dilations_i'] = tuple_fn(dilation)
# easy but hacky way to get flattened indices values
# to be used to convert the indices values to non-flattened.
# In ONNX the indices are computed as a flatten 1-D tensor,
# so the values in indices are in [0, N x C x D1 x ... x Dn).
# To convert the indices to the same format used by Pytorch,
# we first execute a maxpool with a kernel and stride of 1 on the same input.
# This will result in a tensor of indices in which each index will have it's own value.
# Using this tensor as a reference, we extract the first index of each axis and subtract
# it from each index of this axis in the indices to convert.
# This step will result in a tensor were each dimension has values of indices within
# the dimension it is in.
# For more information :
# https://github.com/pytorch/pytorch/pull/16455#issuecomment-460776407
if return_indices:
r, indices = g.op("MaxPool", input, outputs=2, **kwargs)
_, flattened_indices = g.op("MaxPool", input, outputs=2,
kernel_shape_i=[1 for _ in range(ndims)],
strides_i=[1 for _ in range(ndims)])
# convert indices to have non-flattened indices values
from torch.onnx.symbolic_opset9 import sub
s = sym_help._slice_helper(g, flattened_indices, axes=[2 + i for i in range(ndims)],
starts=tuple_fn(0), ends=tuple_fn(1))
indices = sub(g, indices, s)
return r, indices
else:
r = g.op("MaxPool", input, outputs=1, **kwargs)
return r
return symbolic_fn
max_pool1d = _max_pool("max_pool1d", _single, 1, return_indices=False)
max_pool2d = _max_pool("max_pool2d", _pair, 2, return_indices=False)
max_pool3d = _max_pool("max_pool3d", _triple, 3, return_indices=False)
max_pool1d_with_indices = _max_pool("max_pool1d_with_indices", _single, 1, return_indices=True)
max_pool2d_with_indices = _max_pool("max_pool2d_with_indices", _pair, 2, return_indices=True)
max_pool3d_with_indices = _max_pool("max_pool3d_with_indices", _triple, 3, return_indices=True)
def _avg_pool(name, tuple_fn):
@parse_args('v', 'is', 'is', 'is', 'i', 'i', 'none')
def symbolic_fn(g, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override=None):
if not stride:
stride = kernel_size
padding = sym_help._avgpool_helper(tuple_fn, padding, kernel_size, stride, divisor_override, name)
if count_include_pad:
input = g.op("Pad", input,
pads_i=((0,) * 2 + padding) * 2,
mode_s='constant',
value_f=0.)
padding = (0,) * len(padding)
output = g.op("AveragePool", input,
kernel_shape_i=tuple_fn(kernel_size),
strides_i=tuple_fn(stride),
pads_i=padding * 2,
ceil_mode_i=ceil_mode)
return output
return symbolic_fn
avg_pool1d = _avg_pool('avg_pool1d', _single)
avg_pool2d = _avg_pool('avg_pool2d', _pair)
avg_pool3d = _avg_pool('avg_pool3d', _triple)
def _interpolate(name, dim, interpolate_mode):
def symbolic_fn(g, input, output_size, *args):
scales, align_corners = sym_help._get_interpolate_attributes(g, interpolate_mode, args)
sym_help._interpolate_warning(interpolate_mode)
align_corners = sym_help._maybe_get_scalar(align_corners)
if align_corners:
return _unimplemented(name, "align_corners == True")
if scales is None:
scales = sym_help._interpolate_size_to_scales(g, input, output_size, dim)
return g.op("Resize", input, scales, mode_s=interpolate_mode)
return symbolic_fn
upsample_nearest1d = _interpolate('upsample_nearest1d', 3, "nearest")
upsample_nearest2d = _interpolate('upsample_nearest2d', 4, "nearest")
upsample_nearest3d = _interpolate('upsample_nearest3d', 5, "nearest")
upsample_linear1d = _interpolate('upsample_linear1d', 3, "linear")
upsample_bilinear2d = _interpolate('upsample_bilinear2d', 4, "linear")
upsample_trilinear3d = _interpolate('upsample_trilinear3d', 5, "linear")
def __interpolate(g, input, size, scale_factor, mode , align_corners, recompute_scale_factor):
scales, mode = sym_help._interpolate_get_scales_and_mode(g, input, size, scale_factor,
mode , align_corners)
return g.op("Resize", input, scales, mode_s=mode)
def _slice(g, input, axes, starts, ends, steps=None, dynamic_slice=False):
if dynamic_slice:
starts = g.op("Unsqueeze", starts, axes_i=[0])
ends = g.op("Unsqueeze", ends, axes_i=[0])
axes = g.op("Unsqueeze", axes, axes_i=[0])
else:
assert len(starts) == len(ends)
assert len(starts) == len(axes)
assert steps is None or len(starts) == len(steps)
if len(starts) == 1 and starts[0] == 0 and ends[0] == 9223372036854775807 \
and (steps is None or (len(steps) == 1 and steps[0] == 1)):
return input
axes = g.op("Constant", value_t=torch.tensor(axes))
starts = g.op("Constant", value_t=torch.tensor(starts))
ends = g.op("Constant", value_t=torch.tensor(ends))
if steps is None:
return g.op("Slice", input, starts, ends, axes)
steps = g.op("Constant", value_t=torch.tensor(steps))
return g.op("Slice", input, starts, ends, axes, steps)
@parse_args('v', 'v', 'v', 'v', 'i')
def slice(g, self, dim, start, end, step):
if (start.node().kind() != 'onnx::Constant' or
end.node().kind() != 'onnx::Constant' or dim.node().kind() != 'onnx::Constant'):
dynamic_slice = True
else:
start = [sym_help._parse_arg(start, 'i')]
end = [sym_help._parse_arg(end, 'i')]
dim = [sym_help._parse_arg(dim, 'i')]
dynamic_slice = False
return sym_help._slice_helper(g, self, axes=dim, starts=start, ends=end, steps=[step], dynamic_slice=dynamic_slice)
@parse_args('v', 'is')
def flip(g, input, dims):
return sym_help._slice_helper(g, input, axes=dims,
starts=[-1] * len(dims),
ends=[-9223372036854775807] * len(dims),
steps=[-1] * len(dims))
def fmod(g, input, other):
return g.op("Mod", input, other, fmod_i=1) | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/onnx/symbolic_opset10.py | 0.765506 | 0.269999 | symbolic_opset10.py | pypi |
import warnings
import importlib
from inspect import getmembers, isfunction
# The symbolic registry "_registry" is a dictionary that maps operators
# (for a specific domain and opset version) to their symbolic functions.
# An operator is defined by its domain, opset version, and opname.
# The keys are tuples (domain, version), (where domain is a string, and version is an int),
# and the operator's name (string).
# The map's entries are as follows : _registry[(domain, version)][op_name] = op_symbolic
_registry = {}
_symbolic_versions = {}
from torch.onnx.symbolic_helper import _onnx_stable_opsets
for opset_version in _onnx_stable_opsets:
module = importlib.import_module('torch.onnx.symbolic_opset{}'.format(opset_version))
_symbolic_versions[opset_version] = module
def register_version(domain, version):
if not is_registered_version(domain, version):
global _registry
_registry[(domain, version)] = {}
register_ops_in_version(domain, version)
def register_ops_helper(domain, version, iter_version):
version_ops = get_ops_in_version(iter_version)
for op in version_ops:
if op[0] == '_len':
op = ('len', op[1])
if isfunction(op[1]) and not is_registered_op(op[0], domain, version):
register_op(op[0], op[1], domain, version)
def register_ops_in_version(domain, version):
# iterates through the symbolic functions of
# the specified opset version, and the previous
# opset versions for operators supported in
# previous versions.
# Opset 9 is the base version. It is selected as the base version because
# 1. It is the first opset version supported by PyTorch export.
# 2. opset 9 is more robust than previous opset versions. Opset versions like 7/8 have limitations
# that certain basic operators cannot be expressed in ONNX. Instead of basing on these limitations,
# we chose to handle them as special cases separately.
# Backward support for opset versions beyond opset 7 is not in our roadmap.
# For opset versions other than 9, by default they will inherit the symbolic functions defined in
# symbolic_opset9.py.
# To extend support for updated operators in different opset versions on top of opset 9,
# simply add the updated symbolic functions in the respective symbolic_opset{version}.py file.
# Checkout topk in symbolic_opset10.py, and upsample_nearest2d in symbolic_opset8.py for example.
iter_version = version
while iter_version != 9:
register_ops_helper(domain, version, iter_version)
if iter_version > 9:
iter_version = iter_version - 1
else:
iter_version = iter_version + 1
register_ops_helper(domain, version, 9)
def get_ops_in_version(version):
return getmembers(_symbolic_versions[version])
def is_registered_version(domain, version):
global _registry
return (domain, version) in _registry
def register_op(opname, op, domain, version):
if domain is None or version is None:
warnings.warn("ONNX export failed. The ONNX domain and/or version to register are None.")
global _registry
if not is_registered_version(domain, version):
_registry[(domain, version)] = {}
_registry[(domain, version)][opname] = op
def is_registered_op(opname, domain, version):
if domain is None or version is None:
warnings.warn("ONNX export failed. The ONNX domain and/or version are None.")
global _registry
return (domain, version) in _registry and opname in _registry[(domain, version)]
def get_op_supported_version(opname, domain, version):
iter_version = version
while iter_version <= _onnx_stable_opsets[-1]:
ops = [op[0] for op in get_ops_in_version(iter_version)]
if opname in ops:
return iter_version
iter_version += 1
return None
def get_registered_op(opname, domain, version):
if domain is None or version is None:
warnings.warn("ONNX export failed. The ONNX domain and/or version are None.")
global _registry
if not is_registered_op(opname, domain, version):
msg = "Exporting the operator " + opname + " to ONNX opset version " + str(version) + " is not supported. "
supported_version = get_op_supported_version(opname, domain, version)
if supported_version is not None:
msg += "Support for this operator was added in version " + str(supported_version) + ", try exporting with this version."
else:
msg += "Please open a bug to request ONNX export support for the missing operator."
raise RuntimeError(msg)
return _registry[(domain, version)][opname] | /rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/onnx/symbolic_registry.py | 0.402862 | 0.246726 | symbolic_registry.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.