ZTWHHH commited on
Commit
91b3936
·
verified ·
1 Parent(s): aae0425

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. mgm/lib/python3.10/site-packages/torch/nn/_reduction.py +47 -0
  3. mgm/lib/python3.10/site-packages/torch/nn/cpp.py +91 -0
  4. mgm/lib/python3.10/site-packages/torch/nn/modules/__init__.py +68 -0
  5. mgm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/_functions.cpython-310.pyc +0 -0
  6. mgm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/adaptive.cpython-310.pyc +0 -0
  7. mgm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/dropout.cpython-310.pyc +0 -0
  8. mgm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/linear.cpython-310.pyc +0 -0
  9. mgm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/padding.cpython-310.pyc +0 -0
  10. mgm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/pooling.cpython-310.pyc +0 -0
  11. mgm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/upsampling.cpython-310.pyc +0 -0
  12. mgm/lib/python3.10/site-packages/torch/nn/modules/_functions.py +288 -0
  13. mgm/lib/python3.10/site-packages/torch/nn/modules/batchnorm.py +836 -0
  14. mgm/lib/python3.10/site-packages/torch/nn/modules/channelshuffle.py +54 -0
  15. mgm/lib/python3.10/site-packages/torch/nn/modules/conv.py +1598 -0
  16. mgm/lib/python3.10/site-packages/torch/nn/modules/dropout.py +282 -0
  17. mgm/lib/python3.10/site-packages/torch/nn/modules/flatten.py +141 -0
  18. mgm/lib/python3.10/site-packages/torch/nn/modules/instancenorm.py +428 -0
  19. mgm/lib/python3.10/site-packages/torch/nn/modules/lazy.py +263 -0
  20. mgm/lib/python3.10/site-packages/torch/nn/modules/linear.py +262 -0
  21. mgm/lib/python3.10/site-packages/torch/nn/modules/padding.py +800 -0
  22. mgm/lib/python3.10/site-packages/torch/nn/modules/pixelshuffle.py +107 -0
  23. mgm/lib/python3.10/site-packages/torch/nn/modules/pooling.py +1233 -0
  24. mgm/lib/python3.10/site-packages/torch/nn/modules/sparse.py +454 -0
  25. mgm/lib/python3.10/site-packages/torch/nn/modules/transformer.py +931 -0
  26. mgm/lib/python3.10/site-packages/torch/nn/modules/upsampling.py +263 -0
  27. mgm/lib/python3.10/site-packages/torch/nn/parameter.py +220 -0
  28. mgm/lib/python3.10/site-packages/torch/nn/quantized/__init__.py +40 -0
  29. mgm/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/__init__.cpython-310.pyc +0 -0
  30. mgm/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/functional.cpython-310.pyc +0 -0
  31. mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/__init__.py +1 -0
  32. mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/__pycache__/__init__.cpython-310.pyc +0 -0
  33. mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__init__.py +31 -0
  34. mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  35. mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/conv.cpython-310.pyc +0 -0
  36. mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/linear.cpython-310.pyc +0 -0
  37. mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/rnn.cpython-310.pyc +0 -0
  38. mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/sparse.cpython-310.pyc +0 -0
  39. mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/utils.cpython-310.pyc +0 -0
  40. mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/conv.py +19 -0
  41. mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/linear.py +12 -0
  42. mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/rnn.py +17 -0
  43. mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/sparse.py +13 -0
  44. mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/utils.py +15 -0
  45. mgm/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__init__.py +1 -0
  46. mgm/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
  47. mgm/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__init__.py +32 -0
  48. mgm/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  49. mgm/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc +0 -0
  50. mgm/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/linear.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -1086,3 +1086,4 @@ mgm/lib/python3.10/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs
1086
  mgm/lib/python3.10/site-packages/sympy/physics/continuum_mechanics/__pycache__/beam.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1087
  vila/lib/python3.10/site-packages/opencv_python.libs/libavcodec-402e4b05.so.59.37.100 filter=lfs diff=lfs merge=lfs -text
1088
  mgm/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
1086
  mgm/lib/python3.10/site-packages/sympy/physics/continuum_mechanics/__pycache__/beam.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1087
  vila/lib/python3.10/site-packages/opencv_python.libs/libavcodec-402e4b05.so.59.37.100 filter=lfs diff=lfs merge=lfs -text
1088
  mgm/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1089
+ videollama2/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
mgm/lib/python3.10/site-packages/torch/nn/_reduction.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import warnings
3
+
4
+ # NB: Keep this file in sync with enums in aten/src/ATen/core/Reduction.h
5
+
6
+
7
+ def get_enum(reduction: str) -> int:
8
+ if reduction == 'none':
9
+ ret = 0
10
+ elif reduction == 'mean':
11
+ ret = 1
12
+ elif reduction == 'elementwise_mean':
13
+ warnings.warn("reduction='elementwise_mean' is deprecated, please use reduction='mean' instead.")
14
+ ret = 1
15
+ elif reduction == 'sum':
16
+ ret = 2
17
+ else:
18
+ ret = -1 # TODO: remove once JIT exceptions support control flow
19
+ raise ValueError(f"{reduction} is not a valid value for reduction")
20
+ return ret
21
+
22
+ # In order to support previous versions, accept boolean size_average and reduce
23
+ # and convert them into the new constants for now
24
+
25
+
26
+ # We use these functions in torch/legacy as well, in which case we'll silence the warning
27
+ def legacy_get_string(size_average: Optional[bool], reduce: Optional[bool], emit_warning: bool = True) -> str:
28
+ warning = "size_average and reduce args will be deprecated, please use reduction='{}' instead."
29
+
30
+ if size_average is None:
31
+ size_average = True
32
+ if reduce is None:
33
+ reduce = True
34
+
35
+ if size_average and reduce:
36
+ ret = 'mean'
37
+ elif reduce:
38
+ ret = 'sum'
39
+ else:
40
+ ret = 'none'
41
+ if emit_warning:
42
+ warnings.warn(warning.format(ret))
43
+ return ret
44
+
45
+
46
+ def legacy_get_enum(size_average: Optional[bool], reduce: Optional[bool], emit_warning: bool = True) -> int:
47
+ return get_enum(legacy_get_string(size_average, reduce, emit_warning))
mgm/lib/python3.10/site-packages/torch/nn/cpp.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functionality for Python <-> C++ frontend inter-op."""
2
+
3
+ from torch import nn
4
+
5
+
6
+ class OrderedDictWrapper:
7
+ """
8
+ A wrapper around a C++ OrderedDict that dynamically evaluates the
9
+ OrderedDict getter on a bound C++ module, such that new changes on the C++
10
+ side are picked up. Otherwise accessing e.g. ``cpp_module._parameters`` just
11
+ once would get a frozen copy of the parameters at the time of access.
12
+ ``torch.nn.Module`` accesses ``_parameters`` et al. via ``self.__dict__`` so
13
+ using properties does not work.
14
+ """
15
+
16
+ def __init__(self, cpp_module, attr):
17
+ self.cpp_module = cpp_module
18
+ self.attr = attr
19
+
20
+ @property
21
+ def cpp_dict(self):
22
+ return getattr(self.cpp_module, self.attr)
23
+
24
+ # Magic methods cannot be assigned dynamically and bypass ``getattr``, so we
25
+ # must manually override them.
26
+
27
+ def items(self):
28
+ return self.cpp_dict.items()
29
+
30
+ def keys(self):
31
+ return self.cpp_dict.keys()
32
+
33
+ def values(self):
34
+ return self.cpp_dict.values()
35
+
36
+ def __iter__(self):
37
+ return self.cpp_dict.__iter__()
38
+
39
+ def __len__(self):
40
+ return self.cpp_dict.__len__()
41
+
42
+ def __contains__(self, key):
43
+ return self.cpp_dict.__contains__(key)
44
+
45
+ def __getitem__(self, key):
46
+ return self.cpp_dict.__getitem__(key)
47
+
48
+
49
+ class ModuleWrapper(nn.Module):
50
+ """
51
+ A subclass of ``torch.nn.Module`` that wraps a C++ frontend module and
52
+ delegates all access.
53
+ """
54
+
55
+ def __init__(self, cpp_module):
56
+ # Assign before the super class constructor so ``self.training`` can be
57
+ # assigned to in the super class constructor.
58
+ self.cpp_module = cpp_module
59
+ super().__init__()
60
+ self._parameters = OrderedDictWrapper(cpp_module, "_parameters") # type: ignore[assignment]
61
+ self._buffers: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_buffers") # type: ignore[assignment]
62
+ self._modules: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_modules") # type: ignore[assignment]
63
+ for attr in dir(cpp_module):
64
+ # Skip magic methods and the three attributes above.
65
+ if not attr.startswith("_"):
66
+ setattr(self, attr, getattr(self.cpp_module, attr))
67
+
68
+ def _apply(self, fn, recurse=True):
69
+ for param in self.parameters():
70
+ # Tensors stored in modules are graph leaves, and we don't
71
+ # want to create copy nodes, so we have to unpack the data.
72
+ param.data = fn(param.data)
73
+ if param._grad is not None:
74
+ param._grad.data = fn(param._grad.data)
75
+
76
+ for buf in self.buffers():
77
+ buf.data = fn(buf.data)
78
+
79
+ return self
80
+
81
+ # nn.Module defines training as a boolean
82
+ @property # type: ignore[override]
83
+ def training(self):
84
+ return self.cpp_module.training
85
+
86
+ @training.setter
87
+ def training(self, mode):
88
+ self.cpp_module.train(mode)
89
+
90
+ def __repr__(self):
91
+ return self.cpp_module.__repr__()
mgm/lib/python3.10/site-packages/torch/nn/modules/__init__.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+ from .linear import Identity, Linear, Bilinear, LazyLinear
3
+ from .conv import Conv1d, Conv2d, Conv3d, \
4
+ ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
5
+ LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
6
+ from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
7
+ Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
8
+ Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
9
+ Hardsigmoid, Hardswish, SiLU, Mish
10
+ from .loss import L1Loss, NLLLoss, KLDivLoss, MSELoss, BCELoss, BCEWithLogitsLoss, NLLLoss2d, \
11
+ CosineEmbeddingLoss, CTCLoss, HingeEmbeddingLoss, MarginRankingLoss, \
12
+ MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, SmoothL1Loss, HuberLoss, \
13
+ SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, TripletMarginWithDistanceLoss, PoissonNLLLoss, GaussianNLLLoss
14
+ from .container import Container, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
15
+ from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
16
+ MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, FractionalMaxPool3d, LPPool1d, LPPool2d, \
17
+ AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
18
+ from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
19
+ LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
20
+ from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
21
+ LazyInstanceNorm1d, LazyInstanceNorm2d, LazyInstanceNorm3d
22
+ from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
23
+ from .dropout import Dropout, Dropout1d, Dropout2d, Dropout3d, AlphaDropout, FeatureAlphaDropout
24
+ from .padding import ReflectionPad1d, ReflectionPad2d, ReflectionPad3d, ReplicationPad1d, ReplicationPad2d, \
25
+ ReplicationPad3d, ZeroPad1d, ZeroPad2d, ZeroPad3d, ConstantPad1d, ConstantPad2d, ConstantPad3d, \
26
+ CircularPad1d, CircularPad2d, CircularPad3d
27
+ from .sparse import Embedding, EmbeddingBag
28
+ from .rnn import RNNBase, RNN, LSTM, GRU, \
29
+ RNNCellBase, RNNCell, LSTMCell, GRUCell
30
+ from .pixelshuffle import PixelShuffle, PixelUnshuffle
31
+ from .upsampling import UpsamplingNearest2d, UpsamplingBilinear2d, Upsample
32
+ from .distance import PairwiseDistance, CosineSimilarity
33
+ from .fold import Fold, Unfold
34
+ from .adaptive import AdaptiveLogSoftmaxWithLoss
35
+ from .transformer import TransformerEncoder, TransformerDecoder, \
36
+ TransformerEncoderLayer, TransformerDecoderLayer, Transformer
37
+ from .flatten import Flatten, Unflatten
38
+ from .channelshuffle import ChannelShuffle
39
+
40
+ __all__ = [
41
+ 'Module', 'Identity', 'Linear', 'Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d',
42
+ 'ConvTranspose2d', 'ConvTranspose3d', 'Threshold', 'ReLU', 'Hardtanh', 'ReLU6',
43
+ 'Sigmoid', 'Tanh', 'Softmax', 'Softmax2d', 'LogSoftmax', 'ELU', 'SELU', 'CELU', 'GLU', 'GELU', 'Hardshrink',
44
+ 'LeakyReLU', 'LogSigmoid', 'Softplus', 'Softshrink', 'MultiheadAttention', 'PReLU', 'Softsign', 'Softmin',
45
+ 'Tanhshrink', 'RReLU', 'L1Loss', 'NLLLoss', 'KLDivLoss', 'MSELoss', 'BCELoss', 'BCEWithLogitsLoss',
46
+ 'NLLLoss2d', 'PoissonNLLLoss', 'CosineEmbeddingLoss', 'CTCLoss', 'HingeEmbeddingLoss', 'MarginRankingLoss',
47
+ 'MultiLabelMarginLoss', 'MultiLabelSoftMarginLoss', 'MultiMarginLoss', 'SmoothL1Loss', 'GaussianNLLLoss',
48
+ 'HuberLoss', 'SoftMarginLoss', 'CrossEntropyLoss', 'Container', 'Sequential', 'ModuleList', 'ModuleDict',
49
+ 'ParameterList', 'ParameterDict', 'AvgPool1d', 'AvgPool2d', 'AvgPool3d', 'MaxPool1d', 'MaxPool2d',
50
+ 'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d', 'FractionalMaxPool2d', "FractionalMaxPool3d",
51
+ 'LPPool1d', 'LPPool2d', 'LocalResponseNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d', 'InstanceNorm1d',
52
+ 'InstanceNorm2d', 'InstanceNorm3d', 'LayerNorm', 'GroupNorm', 'SyncBatchNorm',
53
+ 'Dropout', 'Dropout1d', 'Dropout2d', 'Dropout3d', 'AlphaDropout', 'FeatureAlphaDropout',
54
+ 'ReflectionPad1d', 'ReflectionPad2d', 'ReflectionPad3d', 'ReplicationPad2d', 'ReplicationPad1d', 'ReplicationPad3d',
55
+ 'CrossMapLRN2d', 'Embedding', 'EmbeddingBag', 'RNNBase', 'RNN', 'LSTM', 'GRU', 'RNNCellBase', 'RNNCell',
56
+ 'LSTMCell', 'GRUCell', 'PixelShuffle', 'PixelUnshuffle', 'Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d',
57
+ 'PairwiseDistance', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d', 'AdaptiveAvgPool1d',
58
+ 'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d', 'TripletMarginLoss', 'ZeroPad1d', 'ZeroPad2d', 'ZeroPad3d',
59
+ 'ConstantPad1d', 'ConstantPad2d', 'ConstantPad3d', 'Bilinear', 'CosineSimilarity', 'Unfold', 'Fold',
60
+ 'AdaptiveLogSoftmaxWithLoss', 'TransformerEncoder', 'TransformerDecoder',
61
+ 'TransformerEncoderLayer', 'TransformerDecoderLayer', 'Transformer',
62
+ 'LazyLinear', 'LazyConv1d', 'LazyConv2d', 'LazyConv3d',
63
+ 'LazyConvTranspose1d', 'LazyConvTranspose2d', 'LazyConvTranspose3d',
64
+ 'LazyBatchNorm1d', 'LazyBatchNorm2d', 'LazyBatchNorm3d',
65
+ 'LazyInstanceNorm1d', 'LazyInstanceNorm2d', 'LazyInstanceNorm3d',
66
+ 'Flatten', 'Unflatten', 'Hardsigmoid', 'Hardswish', 'SiLU', 'Mish', 'TripletMarginWithDistanceLoss', 'ChannelShuffle',
67
+ 'CircularPad1d', 'CircularPad2d', 'CircularPad3d'
68
+ ]
mgm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/_functions.cpython-310.pyc ADDED
Binary file (5.97 kB). View file
 
mgm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/adaptive.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
mgm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/dropout.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
mgm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/linear.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
mgm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/padding.cpython-310.pyc ADDED
Binary file (33.4 kB). View file
 
mgm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/pooling.cpython-310.pyc ADDED
Binary file (53.4 kB). View file
 
mgm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/upsampling.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
mgm/lib/python3.10/site-packages/torch/nn/modules/_functions.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.distributed as dist
3
+
4
+ from torch.autograd.function import Function
5
+
6
+ class SyncBatchNorm(Function):
7
+
8
+ @staticmethod
9
+ def forward(self, input, weight, bias, running_mean, running_var, eps, momentum, process_group, world_size):
10
+ if not (
11
+ input.is_contiguous(memory_format=torch.channels_last) or
12
+ input.is_contiguous(memory_format=torch.channels_last_3d)
13
+ ):
14
+ input = input.contiguous()
15
+ if weight is not None:
16
+ weight = weight.contiguous()
17
+
18
+ size = int(input.numel() // input.size(1))
19
+ if size == 1 and world_size < 2:
20
+ raise ValueError(f'Expected more than 1 value per channel when training, got input size {size}')
21
+
22
+ num_channels = input.shape[1]
23
+ if input.numel() > 0:
24
+ # calculate mean/invstd for input.
25
+ mean, invstd = torch.batch_norm_stats(input, eps)
26
+
27
+ count = torch.full(
28
+ (1,),
29
+ input.numel() // input.size(1),
30
+ dtype=mean.dtype,
31
+ device=mean.device
32
+ )
33
+
34
+ # C, C, 1 -> (2C + 1)
35
+ combined = torch.cat([mean, invstd, count], dim=0)
36
+ else:
37
+ # for empty input, set stats and the count to zero. The stats with
38
+ # zero count will be filtered out later when computing global mean
39
+ # & invstd, but they still needs to participate the all_gather
40
+ # collective communication to unblock other peer processes.
41
+ combined = torch.zeros(
42
+ 2 * num_channels + 1,
43
+ dtype=input.dtype,
44
+ device=input.device
45
+ )
46
+
47
+ # Use allgather instead of allreduce because count could be different across
48
+ # ranks, simple all reduce op can not give correct results.
49
+ # batch_norm_gather_stats_with_counts calculates global mean & invstd based on
50
+ # all gathered mean, invstd and count.
51
+ # for nccl backend, use the optimized version of all gather.
52
+ # The Gloo backend does not support `all_gather_into_tensor`.
53
+ if process_group._get_backend_name() != "gloo":
54
+ # world_size * (2C + 1)
55
+ combined_size = combined.numel()
56
+ combined_flat = torch.empty(1,
57
+ combined_size * world_size,
58
+ dtype=combined.dtype,
59
+ device=combined.device)
60
+ dist.all_gather_into_tensor(combined_flat, combined, process_group, async_op=False)
61
+ combined = torch.reshape(combined_flat, (world_size, combined_size))
62
+ # world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
63
+ mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
64
+ else:
65
+ # world_size * (2C + 1)
66
+ combined_list = [
67
+ torch.empty_like(combined) for _ in range(world_size)
68
+ ]
69
+ dist.all_gather(combined_list, combined, process_group, async_op=False)
70
+ combined = torch.stack(combined_list, dim=0)
71
+ # world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
72
+ mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
73
+
74
+ if not (torch.cuda.is_available() and torch.cuda.is_current_stream_capturing()):
75
+ # The lines below force a synchronization between CUDA and CPU, because
76
+ # the shape of the result count_all depends on the values in mask tensor.
77
+ # Such synchronizations break CUDA Graph capturing.
78
+ # See https://github.com/pytorch/pytorch/issues/78549
79
+ # FIXME: https://github.com/pytorch/pytorch/issues/78656 describes
80
+ # a better longer-term solution.
81
+
82
+ # remove stats from empty inputs
83
+ mask = count_all.squeeze(-1) >= 1
84
+ count_all = count_all[mask]
85
+ mean_all = mean_all[mask]
86
+ invstd_all = invstd_all[mask]
87
+
88
+ # calculate global mean & invstd
89
+ counts = count_all.view(-1)
90
+ if running_mean is not None and counts.dtype != running_mean.dtype:
91
+ counts = counts.to(running_mean.dtype)
92
+ mean, invstd = torch.batch_norm_gather_stats_with_counts(
93
+ input,
94
+ mean_all,
95
+ invstd_all,
96
+ running_mean,
97
+ running_var,
98
+ momentum,
99
+ eps,
100
+ counts,
101
+ )
102
+
103
+ self.save_for_backward(input, weight, mean, invstd, count_all.to(torch.int32))
104
+ self.process_group = process_group
105
+
106
+ # apply element-wise normalization
107
+ if input.numel() > 0:
108
+ return torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps)
109
+ else:
110
+ return torch.empty_like(input)
111
+
112
+ @staticmethod
113
+ def backward(self, grad_output):
114
+ if not (
115
+ grad_output.is_contiguous(memory_format=torch.channels_last) or
116
+ grad_output.is_contiguous(memory_format=torch.channels_last_3d)
117
+ ):
118
+ grad_output = grad_output.contiguous()
119
+ saved_input, weight, mean, invstd, count_tensor = self.saved_tensors
120
+ grad_input = grad_weight = grad_bias = None
121
+ process_group = self.process_group
122
+
123
+ if saved_input.numel() > 0:
124
+ # calculate local stats as well as grad_weight / grad_bias
125
+ sum_dy, sum_dy_xmu, grad_weight, grad_bias = torch.batch_norm_backward_reduce(
126
+ grad_output,
127
+ saved_input,
128
+ mean,
129
+ invstd,
130
+ weight,
131
+ self.needs_input_grad[0],
132
+ self.needs_input_grad[1],
133
+ self.needs_input_grad[2]
134
+ )
135
+
136
+ if self.needs_input_grad[0]:
137
+ # synchronizing stats used to calculate input gradient.
138
+ num_channels = sum_dy.shape[0]
139
+ combined = torch.cat([sum_dy, sum_dy_xmu], dim=0)
140
+ torch.distributed.all_reduce(
141
+ combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False)
142
+ sum_dy, sum_dy_xmu = torch.split(combined, num_channels)
143
+
144
+ # backward pass for gradient calculation
145
+ if weight is not None and weight.dtype != mean.dtype:
146
+ weight = weight.to(mean.dtype)
147
+ grad_input = torch.batch_norm_backward_elemt(
148
+ grad_output,
149
+ saved_input,
150
+ mean,
151
+ invstd,
152
+ weight,
153
+ sum_dy,
154
+ sum_dy_xmu,
155
+ count_tensor
156
+ )
157
+ # synchronizing of grad_weight / grad_bias is not needed as distributed
158
+ # training would handle all reduce.
159
+ if weight is None or not self.needs_input_grad[1]:
160
+ grad_weight = None
161
+
162
+ if weight is None or not self.needs_input_grad[2]:
163
+ grad_bias = None
164
+ else:
165
+ # This process got an empty input tensor in the forward pass.
166
+ # Although this process can directly set grad_input as an empty
167
+ # tensor of zeros, it still needs to participate in the collective
168
+ # communication to unblock its peers, as other peer processes might
169
+ # have received non-empty inputs.
170
+ num_channels = saved_input.shape[1]
171
+ if self.needs_input_grad[0]:
172
+ # launch all_reduce to unblock other peer processes
173
+ combined = torch.zeros(
174
+ 2 * num_channels,
175
+ dtype=saved_input.dtype,
176
+ device=saved_input.device
177
+ )
178
+ torch.distributed.all_reduce(
179
+ combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False)
180
+
181
+ # Leave grad_input, grad_weight and grad_bias as None, which will be
182
+ # interpreted by the autograd engine as Tensors full of zeros.
183
+
184
+ return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
185
+
186
+ class CrossMapLRN2d(Function):
187
+
188
+ @staticmethod
189
+ def forward(ctx, input, size, alpha=1e-4, beta=0.75, k=1):
190
+ ctx.size = size
191
+ ctx.alpha = alpha
192
+ ctx.beta = beta
193
+ ctx.k = k
194
+ ctx.scale = None
195
+
196
+ if input.dim() != 4:
197
+ raise ValueError(f"CrossMapLRN2d: Expected input to be 4D, got {input.dim()}D instead.")
198
+
199
+ ctx.scale = ctx.scale or input.new()
200
+ output = input.new()
201
+
202
+ batch_size = input.size(0)
203
+ channels = input.size(1)
204
+ input_height = input.size(2)
205
+ input_width = input.size(3)
206
+
207
+ output.resize_as_(input)
208
+ ctx.scale.resize_as_(input)
209
+
210
+ # use output storage as temporary buffer
211
+ input_square = output
212
+ torch.pow(input, 2, out=input_square)
213
+
214
+ pre_pad = int((ctx.size - 1) / 2 + 1)
215
+ pre_pad_crop = channels if pre_pad > channels else pre_pad
216
+
217
+ scale_first = ctx.scale.select(1, 0)
218
+ scale_first.zero_()
219
+ # compute first feature map normalization
220
+ for c in range(pre_pad_crop):
221
+ scale_first.add_(input_square.select(1, c))
222
+
223
+ # reuse computations for next feature maps normalization
224
+ # by adding the next feature map and removing the previous
225
+ for c in range(1, channels):
226
+ scale_previous = ctx.scale.select(1, c - 1)
227
+ scale_current = ctx.scale.select(1, c)
228
+ scale_current.copy_(scale_previous)
229
+ if c < channels - pre_pad + 1:
230
+ square_next = input_square.select(1, c + pre_pad - 1)
231
+ scale_current.add_(square_next, alpha=1)
232
+
233
+ if c > pre_pad:
234
+ square_previous = input_square.select(1, c - pre_pad)
235
+ scale_current.add_(square_previous, alpha=-1)
236
+
237
+ ctx.scale.mul_(ctx.alpha / ctx.size).add_(ctx.k)
238
+
239
+ torch.pow(ctx.scale, -ctx.beta, out=output)
240
+ output.mul_(input)
241
+
242
+ ctx.save_for_backward(input, output)
243
+ return output
244
+
245
+ @staticmethod
246
+ def backward(ctx, grad_output):
247
+ input, output = ctx.saved_tensors
248
+ grad_input = grad_output.new()
249
+
250
+ batch_size = input.size(0)
251
+ channels = input.size(1)
252
+ input_height = input.size(2)
253
+ input_width = input.size(3)
254
+
255
+ paddded_ratio = input.new(channels + ctx.size - 1, input_height,
256
+ input_width)
257
+ accum_ratio = input.new(input_height, input_width)
258
+
259
+ cache_ratio_value = 2 * ctx.alpha * ctx.beta / ctx.size
260
+ inversePrePad = int(ctx.size - (ctx.size - 1) / 2)
261
+
262
+ grad_input.resize_as_(input)
263
+ torch.pow(ctx.scale, -ctx.beta, out=grad_input).mul_(grad_output)
264
+
265
+ paddded_ratio.zero_()
266
+ padded_ratio_center = paddded_ratio.narrow(0, inversePrePad,
267
+ channels)
268
+ for n in range(batch_size):
269
+ torch.mul(grad_output[n], output[n], out=padded_ratio_center)
270
+ padded_ratio_center.div_(ctx.scale[n])
271
+ torch.sum(
272
+ paddded_ratio.narrow(0, 0, ctx.size - 1), 0, keepdim=False, out=accum_ratio)
273
+ for c in range(channels):
274
+ accum_ratio.add_(paddded_ratio[c + ctx.size - 1])
275
+ grad_input[n][c].addcmul_(input[n][c], accum_ratio, value=-cache_ratio_value)
276
+ accum_ratio.add_(paddded_ratio[c], alpha=-1)
277
+
278
+ return grad_input, None, None, None, None
279
+
280
+ class BackwardHookFunction(torch.autograd.Function):
281
+ @staticmethod
282
+ def forward(ctx, *args):
283
+ ctx.mark_non_differentiable(*[arg for arg in args if not arg.requires_grad])
284
+ return args
285
+
286
+ @staticmethod
287
+ def backward(ctx, *args):
288
+ return args
mgm/lib/python3.10/site-packages/torch/nn/modules/batchnorm.py ADDED
@@ -0,0 +1,836 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Any
2
+
3
+ import torch
4
+ from torch import Tensor
5
+ from torch.nn.parameter import Parameter, UninitializedParameter, UninitializedBuffer
6
+
7
+ from .. import functional as F
8
+ from .. import init
9
+ from ._functions import SyncBatchNorm as sync_batch_norm
10
+ from .lazy import LazyModuleMixin
11
+ from .module import Module
12
+
13
+ __all__ = ['BatchNorm1d', 'LazyBatchNorm1d', 'BatchNorm2d', 'LazyBatchNorm2d', 'BatchNorm3d',
14
+ 'LazyBatchNorm3d', 'SyncBatchNorm']
15
+
16
+
17
+ class _NormBase(Module):
18
+ """Common base of _InstanceNorm and _BatchNorm"""
19
+
20
+ _version = 2
21
+ __constants__ = ["track_running_stats", "momentum", "eps", "num_features", "affine"]
22
+ num_features: int
23
+ eps: float
24
+ momentum: float
25
+ affine: bool
26
+ track_running_stats: bool
27
+ # WARNING: weight and bias purposely not defined here.
28
+ # See https://github.com/pytorch/pytorch/issues/39670
29
+
30
+ def __init__(
31
+ self,
32
+ num_features: int,
33
+ eps: float = 1e-5,
34
+ momentum: float = 0.1,
35
+ affine: bool = True,
36
+ track_running_stats: bool = True,
37
+ device=None,
38
+ dtype=None
39
+ ) -> None:
40
+ factory_kwargs = {'device': device, 'dtype': dtype}
41
+ super().__init__()
42
+ self.num_features = num_features
43
+ self.eps = eps
44
+ self.momentum = momentum
45
+ self.affine = affine
46
+ self.track_running_stats = track_running_stats
47
+ if self.affine:
48
+ self.weight = Parameter(torch.empty(num_features, **factory_kwargs))
49
+ self.bias = Parameter(torch.empty(num_features, **factory_kwargs))
50
+ else:
51
+ self.register_parameter("weight", None)
52
+ self.register_parameter("bias", None)
53
+ if self.track_running_stats:
54
+ self.register_buffer('running_mean', torch.zeros(num_features, **factory_kwargs))
55
+ self.register_buffer('running_var', torch.ones(num_features, **factory_kwargs))
56
+ self.running_mean: Optional[Tensor]
57
+ self.running_var: Optional[Tensor]
58
+ self.register_buffer('num_batches_tracked',
59
+ torch.tensor(0, dtype=torch.long,
60
+ **{k: v for k, v in factory_kwargs.items() if k != 'dtype'}))
61
+ self.num_batches_tracked: Optional[Tensor]
62
+ else:
63
+ self.register_buffer("running_mean", None)
64
+ self.register_buffer("running_var", None)
65
+ self.register_buffer("num_batches_tracked", None)
66
+ self.reset_parameters()
67
+
68
+ def reset_running_stats(self) -> None:
69
+ if self.track_running_stats:
70
+ # running_mean/running_var/num_batches... are registered at runtime depending
71
+ # if self.track_running_stats is on
72
+ self.running_mean.zero_() # type: ignore[union-attr]
73
+ self.running_var.fill_(1) # type: ignore[union-attr]
74
+ self.num_batches_tracked.zero_() # type: ignore[union-attr,operator]
75
+
76
+ def reset_parameters(self) -> None:
77
+ self.reset_running_stats()
78
+ if self.affine:
79
+ init.ones_(self.weight)
80
+ init.zeros_(self.bias)
81
+
82
+ def _check_input_dim(self, input):
83
+ raise NotImplementedError
84
+
85
+ def extra_repr(self):
86
+ return (
87
+ "{num_features}, eps={eps}, momentum={momentum}, affine={affine}, "
88
+ "track_running_stats={track_running_stats}".format(**self.__dict__)
89
+ )
90
+
91
+ def _load_from_state_dict(
92
+ self,
93
+ state_dict,
94
+ prefix,
95
+ local_metadata,
96
+ strict,
97
+ missing_keys,
98
+ unexpected_keys,
99
+ error_msgs,
100
+ ):
101
+ version = local_metadata.get("version", None)
102
+
103
+ if (version is None or version < 2) and self.track_running_stats:
104
+ # at version 2: added num_batches_tracked buffer
105
+ # this should have a default value of 0
106
+ num_batches_tracked_key = prefix + "num_batches_tracked"
107
+ if num_batches_tracked_key not in state_dict:
108
+ state_dict[num_batches_tracked_key] = torch.tensor(0, dtype=torch.long)
109
+
110
+ super()._load_from_state_dict(
111
+ state_dict,
112
+ prefix,
113
+ local_metadata,
114
+ strict,
115
+ missing_keys,
116
+ unexpected_keys,
117
+ error_msgs,
118
+ )
119
+
120
+
121
+ class _BatchNorm(_NormBase):
122
+ def __init__(
123
+ self,
124
+ num_features: int,
125
+ eps: float = 1e-5,
126
+ momentum: float = 0.1,
127
+ affine: bool = True,
128
+ track_running_stats: bool = True,
129
+ device=None,
130
+ dtype=None
131
+ ) -> None:
132
+ factory_kwargs = {'device': device, 'dtype': dtype}
133
+ super().__init__(
134
+ num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
135
+ )
136
+
137
+ def forward(self, input: Tensor) -> Tensor:
138
+ self._check_input_dim(input)
139
+
140
+ # exponential_average_factor is set to self.momentum
141
+ # (when it is available) only so that it gets updated
142
+ # in ONNX graph when this node is exported to ONNX.
143
+ if self.momentum is None:
144
+ exponential_average_factor = 0.0
145
+ else:
146
+ exponential_average_factor = self.momentum
147
+
148
+ if self.training and self.track_running_stats:
149
+ # TODO: if statement only here to tell the jit to skip emitting this when it is None
150
+ if self.num_batches_tracked is not None: # type: ignore[has-type]
151
+ self.num_batches_tracked.add_(1) # type: ignore[has-type]
152
+ if self.momentum is None: # use cumulative moving average
153
+ exponential_average_factor = 1.0 / float(self.num_batches_tracked)
154
+ else: # use exponential moving average
155
+ exponential_average_factor = self.momentum
156
+
157
+ r"""
158
+ Decide whether the mini-batch stats should be used for normalization rather than the buffers.
159
+ Mini-batch stats are used in training mode, and in eval mode when buffers are None.
160
+ """
161
+ if self.training:
162
+ bn_training = True
163
+ else:
164
+ bn_training = (self.running_mean is None) and (self.running_var is None)
165
+
166
+ r"""
167
+ Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
168
+ passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
169
+ used for normalization (i.e. in eval mode when buffers are not None).
170
+ """
171
+ return F.batch_norm(
172
+ input,
173
+ # If buffers are not to be tracked, ensure that they won't be updated
174
+ self.running_mean
175
+ if not self.training or self.track_running_stats
176
+ else None,
177
+ self.running_var if not self.training or self.track_running_stats else None,
178
+ self.weight,
179
+ self.bias,
180
+ bn_training,
181
+ exponential_average_factor,
182
+ self.eps,
183
+ )
184
+
185
+
186
+ class _LazyNormBase(LazyModuleMixin, _NormBase):
187
+
188
+ weight: UninitializedParameter # type: ignore[assignment]
189
+ bias: UninitializedParameter # type: ignore[assignment]
190
+
191
+ def __init__(self, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True,
192
+ device=None, dtype=None) -> None:
193
+ factory_kwargs = {'device': device, 'dtype': dtype}
194
+ super().__init__(
195
+ # affine and track_running_stats are hardcoded to False to
196
+ # avoid creating tensors that will soon be overwritten.
197
+ 0,
198
+ eps,
199
+ momentum,
200
+ False,
201
+ False,
202
+ **factory_kwargs,
203
+ )
204
+ self.affine = affine
205
+ self.track_running_stats = track_running_stats
206
+ if self.affine:
207
+ self.weight = UninitializedParameter(**factory_kwargs)
208
+ self.bias = UninitializedParameter(**factory_kwargs)
209
+ if self.track_running_stats:
210
+ self.running_mean = UninitializedBuffer(**factory_kwargs)
211
+ self.running_var = UninitializedBuffer(**factory_kwargs)
212
+ self.num_batches_tracked = torch.tensor(
213
+ 0, dtype=torch.long, **{k: v for k, v in factory_kwargs.items() if k != 'dtype'})
214
+
215
+ def reset_parameters(self) -> None:
216
+ if not self.has_uninitialized_params() and self.num_features != 0:
217
+ super().reset_parameters()
218
+
219
+ def initialize_parameters(self, input) -> None: # type: ignore[override]
220
+ if self.has_uninitialized_params():
221
+ self.num_features = input.shape[1]
222
+ if self.affine:
223
+ assert isinstance(self.weight, UninitializedParameter)
224
+ assert isinstance(self.bias, UninitializedParameter)
225
+ self.weight.materialize((self.num_features,))
226
+ self.bias.materialize((self.num_features,))
227
+ if self.track_running_stats:
228
+ self.running_mean.materialize((self.num_features,)) # type:ignore[union-attr]
229
+ self.running_var.materialize((self.num_features,)) # type:ignore[union-attr]
230
+ self.reset_parameters()
231
+
232
+
233
+ class BatchNorm1d(_BatchNorm):
234
+ r"""Applies Batch Normalization over a 2D or 3D input as described in the paper
235
+ `Batch Normalization: Accelerating Deep Network Training by Reducing
236
+ Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
237
+
238
+ .. math::
239
+
240
+ y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
241
+
242
+ The mean and standard-deviation are calculated per-dimension over
243
+ the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
244
+ of size `C` (where `C` is the number of features or channels of the input). By default, the
245
+ elements of :math:`\gamma` are set to 1 and the elements of :math:`\beta` are set to 0.
246
+ At train time in the forward pass, the standard-deviation is calculated via the biased estimator,
247
+ equivalent to ``torch.var(input, unbiased=False)``. However, the value stored in the
248
+ moving average of the standard-deviation is calculated via the unbiased estimator, equivalent to
249
+ ``torch.var(input, unbiased=True)``.
250
+
251
+ Also by default, during training this layer keeps running estimates of its
252
+ computed mean and variance, which are then used for normalization during
253
+ evaluation. The running estimates are kept with a default :attr:`momentum`
254
+ of 0.1.
255
+
256
+ If :attr:`track_running_stats` is set to ``False``, this layer then does not
257
+ keep running estimates, and batch statistics are instead used during
258
+ evaluation time as well.
259
+
260
+ .. note::
261
+ This :attr:`momentum` argument is different from one used in optimizer
262
+ classes and the conventional notion of momentum. Mathematically, the
263
+ update rule for running statistics here is
264
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
265
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
266
+ new observed value.
267
+
268
+ Because the Batch Normalization is done over the `C` dimension, computing statistics
269
+ on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization.
270
+
271
+ Args:
272
+ num_features: number of features or channels :math:`C` of the input
273
+ eps: a value added to the denominator for numerical stability.
274
+ Default: 1e-5
275
+ momentum: the value used for the running_mean and running_var
276
+ computation. Can be set to ``None`` for cumulative moving average
277
+ (i.e. simple average). Default: 0.1
278
+ affine: a boolean value that when set to ``True``, this module has
279
+ learnable affine parameters. Default: ``True``
280
+ track_running_stats: a boolean value that when set to ``True``, this
281
+ module tracks the running mean and variance, and when set to ``False``,
282
+ this module does not track such statistics, and initializes statistics
283
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
284
+ When these buffers are ``None``, this module always uses batch statistics.
285
+ in both training and eval modes. Default: ``True``
286
+
287
+ Shape:
288
+ - Input: :math:`(N, C)` or :math:`(N, C, L)`, where :math:`N` is the batch size,
289
+ :math:`C` is the number of features or channels, and :math:`L` is the sequence length
290
+ - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
291
+
292
+ Examples::
293
+
294
+ >>> # With Learnable Parameters
295
+ >>> m = nn.BatchNorm1d(100)
296
+ >>> # Without Learnable Parameters
297
+ >>> m = nn.BatchNorm1d(100, affine=False)
298
+ >>> input = torch.randn(20, 100)
299
+ >>> output = m(input)
300
+ """
301
+
302
+ def _check_input_dim(self, input):
303
+ if input.dim() != 2 and input.dim() != 3:
304
+ raise ValueError(
305
+ f"expected 2D or 3D input (got {input.dim()}D input)"
306
+ )
307
+
308
+
309
+ class LazyBatchNorm1d(_LazyNormBase, _BatchNorm):
310
+ r"""A :class:`torch.nn.BatchNorm1d` module with lazy initialization of
311
+ the ``num_features`` argument of the :class:`BatchNorm1d` that is inferred
312
+ from the ``input.size(1)``.
313
+ The attributes that will be lazily initialized are `weight`, `bias`,
314
+ `running_mean` and `running_var`.
315
+
316
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
317
+ on lazy modules and their limitations.
318
+
319
+ Args:
320
+ eps: a value added to the denominator for numerical stability.
321
+ Default: 1e-5
322
+ momentum: the value used for the running_mean and running_var
323
+ computation. Can be set to ``None`` for cumulative moving average
324
+ (i.e. simple average). Default: 0.1
325
+ affine: a boolean value that when set to ``True``, this module has
326
+ learnable affine parameters. Default: ``True``
327
+ track_running_stats: a boolean value that when set to ``True``, this
328
+ module tracks the running mean and variance, and when set to ``False``,
329
+ this module does not track such statistics, and initializes statistics
330
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
331
+ When these buffers are ``None``, this module always uses batch statistics.
332
+ in both training and eval modes. Default: ``True``
333
+ """
334
+
335
+ cls_to_become = BatchNorm1d # type: ignore[assignment]
336
+
337
+ def _check_input_dim(self, input):
338
+ if input.dim() != 2 and input.dim() != 3:
339
+ raise ValueError(
340
+ f"expected 2D or 3D input (got {input.dim()}D input)"
341
+ )
342
+
343
+
344
+ class BatchNorm2d(_BatchNorm):
345
+ r"""Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs
346
+ with additional channel dimension) as described in the paper
347
+ `Batch Normalization: Accelerating Deep Network Training by Reducing
348
+ Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
349
+
350
+ .. math::
351
+
352
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
353
+
354
+ The mean and standard-deviation are calculated per-dimension over
355
+ the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
356
+ of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set
357
+ to 1 and the elements of :math:`\beta` are set to 0. At train time in the forward pass, the
358
+ standard-deviation is calculated via the biased estimator, equivalent to
359
+ ``torch.var(input, unbiased=False)``. However, the value stored in the moving average of the
360
+ standard-deviation is calculated via the unbiased estimator, equivalent to
361
+ ``torch.var(input, unbiased=True)``.
362
+
363
+ Also by default, during training this layer keeps running estimates of its
364
+ computed mean and variance, which are then used for normalization during
365
+ evaluation. The running estimates are kept with a default :attr:`momentum`
366
+ of 0.1.
367
+
368
+ If :attr:`track_running_stats` is set to ``False``, this layer then does not
369
+ keep running estimates, and batch statistics are instead used during
370
+ evaluation time as well.
371
+
372
+ .. note::
373
+ This :attr:`momentum` argument is different from one used in optimizer
374
+ classes and the conventional notion of momentum. Mathematically, the
375
+ update rule for running statistics here is
376
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
377
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
378
+ new observed value.
379
+
380
+ Because the Batch Normalization is done over the `C` dimension, computing statistics
381
+ on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization.
382
+
383
+ Args:
384
+ num_features: :math:`C` from an expected input of size
385
+ :math:`(N, C, H, W)`
386
+ eps: a value added to the denominator for numerical stability.
387
+ Default: 1e-5
388
+ momentum: the value used for the running_mean and running_var
389
+ computation. Can be set to ``None`` for cumulative moving average
390
+ (i.e. simple average). Default: 0.1
391
+ affine: a boolean value that when set to ``True``, this module has
392
+ learnable affine parameters. Default: ``True``
393
+ track_running_stats: a boolean value that when set to ``True``, this
394
+ module tracks the running mean and variance, and when set to ``False``,
395
+ this module does not track such statistics, and initializes statistics
396
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
397
+ When these buffers are ``None``, this module always uses batch statistics.
398
+ in both training and eval modes. Default: ``True``
399
+
400
+ Shape:
401
+ - Input: :math:`(N, C, H, W)`
402
+ - Output: :math:`(N, C, H, W)` (same shape as input)
403
+
404
+ Examples::
405
+
406
+ >>> # With Learnable Parameters
407
+ >>> m = nn.BatchNorm2d(100)
408
+ >>> # Without Learnable Parameters
409
+ >>> m = nn.BatchNorm2d(100, affine=False)
410
+ >>> input = torch.randn(20, 100, 35, 45)
411
+ >>> output = m(input)
412
+ """
413
+
414
+ def _check_input_dim(self, input):
415
+ if input.dim() != 4:
416
+ raise ValueError(f"expected 4D input (got {input.dim()}D input)")
417
+
418
+
419
+ class LazyBatchNorm2d(_LazyNormBase, _BatchNorm):
420
+ r"""A :class:`torch.nn.BatchNorm2d` module with lazy initialization of
421
+ the ``num_features`` argument of the :class:`BatchNorm2d` that is inferred
422
+ from the ``input.size(1)``.
423
+ The attributes that will be lazily initialized are `weight`, `bias`,
424
+ `running_mean` and `running_var`.
425
+
426
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
427
+ on lazy modules and their limitations.
428
+
429
+ Args:
430
+ eps: a value added to the denominator for numerical stability.
431
+ Default: 1e-5
432
+ momentum: the value used for the running_mean and running_var
433
+ computation. Can be set to ``None`` for cumulative moving average
434
+ (i.e. simple average). Default: 0.1
435
+ affine: a boolean value that when set to ``True``, this module has
436
+ learnable affine parameters. Default: ``True``
437
+ track_running_stats: a boolean value that when set to ``True``, this
438
+ module tracks the running mean and variance, and when set to ``False``,
439
+ this module does not track such statistics, and initializes statistics
440
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
441
+ When these buffers are ``None``, this module always uses batch statistics.
442
+ in both training and eval modes. Default: ``True``
443
+ """
444
+
445
+ cls_to_become = BatchNorm2d # type: ignore[assignment]
446
+
447
+ def _check_input_dim(self, input):
448
+ if input.dim() != 4:
449
+ raise ValueError(f"expected 4D input (got {input.dim()}D input)")
450
+
451
+
452
+ class BatchNorm3d(_BatchNorm):
453
+ r"""Applies Batch Normalization over a 5D input (a mini-batch of 3D inputs
454
+ with additional channel dimension) as described in the paper
455
+ `Batch Normalization: Accelerating Deep Network Training by Reducing
456
+ Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
457
+
458
+ .. math::
459
+
460
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
461
+
462
+ The mean and standard-deviation are calculated per-dimension over
463
+ the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
464
+ of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set
465
+ to 1 and the elements of :math:`\beta` are set to 0. At train time in the forward pass, the
466
+ standard-deviation is calculated via the biased estimator, equivalent to
467
+ ``torch.var(input, unbiased=False)``. However, the value stored in the moving average of the
468
+ standard-deviation is calculated via the unbiased estimator, equivalent to
469
+ ``torch.var(input, unbiased=True)``.
470
+
471
+ Also by default, during training this layer keeps running estimates of its
472
+ computed mean and variance, which are then used for normalization during
473
+ evaluation. The running estimates are kept with a default :attr:`momentum`
474
+ of 0.1.
475
+
476
+ If :attr:`track_running_stats` is set to ``False``, this layer then does not
477
+ keep running estimates, and batch statistics are instead used during
478
+ evaluation time as well.
479
+
480
+ .. note::
481
+ This :attr:`momentum` argument is different from one used in optimizer
482
+ classes and the conventional notion of momentum. Mathematically, the
483
+ update rule for running statistics here is
484
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
485
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
486
+ new observed value.
487
+
488
+ Because the Batch Normalization is done over the `C` dimension, computing statistics
489
+ on `(N, D, H, W)` slices, it's common terminology to call this Volumetric Batch Normalization
490
+ or Spatio-temporal Batch Normalization.
491
+
492
+ Args:
493
+ num_features: :math:`C` from an expected input of size
494
+ :math:`(N, C, D, H, W)`
495
+ eps: a value added to the denominator for numerical stability.
496
+ Default: 1e-5
497
+ momentum: the value used for the running_mean and running_var
498
+ computation. Can be set to ``None`` for cumulative moving average
499
+ (i.e. simple average). Default: 0.1
500
+ affine: a boolean value that when set to ``True``, this module has
501
+ learnable affine parameters. Default: ``True``
502
+ track_running_stats: a boolean value that when set to ``True``, this
503
+ module tracks the running mean and variance, and when set to ``False``,
504
+ this module does not track such statistics, and initializes statistics
505
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
506
+ When these buffers are ``None``, this module always uses batch statistics.
507
+ in both training and eval modes. Default: ``True``
508
+
509
+ Shape:
510
+ - Input: :math:`(N, C, D, H, W)`
511
+ - Output: :math:`(N, C, D, H, W)` (same shape as input)
512
+
513
+ Examples::
514
+
515
+ >>> # With Learnable Parameters
516
+ >>> m = nn.BatchNorm3d(100)
517
+ >>> # Without Learnable Parameters
518
+ >>> m = nn.BatchNorm3d(100, affine=False)
519
+ >>> input = torch.randn(20, 100, 35, 45, 10)
520
+ >>> output = m(input)
521
+ """
522
+
523
+ def _check_input_dim(self, input):
524
+ if input.dim() != 5:
525
+ raise ValueError(f"expected 5D input (got {input.dim()}D input)")
526
+
527
+
528
+ class LazyBatchNorm3d(_LazyNormBase, _BatchNorm):
529
+ r"""A :class:`torch.nn.BatchNorm3d` module with lazy initialization of
530
+ the ``num_features`` argument of the :class:`BatchNorm3d` that is inferred
531
+ from the ``input.size(1)``.
532
+ The attributes that will be lazily initialized are `weight`, `bias`,
533
+ `running_mean` and `running_var`.
534
+
535
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
536
+ on lazy modules and their limitations.
537
+
538
+ Args:
539
+ eps: a value added to the denominator for numerical stability.
540
+ Default: 1e-5
541
+ momentum: the value used for the running_mean and running_var
542
+ computation. Can be set to ``None`` for cumulative moving average
543
+ (i.e. simple average). Default: 0.1
544
+ affine: a boolean value that when set to ``True``, this module has
545
+ learnable affine parameters. Default: ``True``
546
+ track_running_stats: a boolean value that when set to ``True``, this
547
+ module tracks the running mean and variance, and when set to ``False``,
548
+ this module does not track such statistics, and initializes statistics
549
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
550
+ When these buffers are ``None``, this module always uses batch statistics.
551
+ in both training and eval modes. Default: ``True``
552
+ """
553
+
554
+ cls_to_become = BatchNorm3d # type: ignore[assignment]
555
+
556
+ def _check_input_dim(self, input):
557
+ if input.dim() != 5:
558
+ raise ValueError(f"expected 5D input (got {input.dim()}D input)")
559
+
560
+
561
+ class SyncBatchNorm(_BatchNorm):
562
+ r"""Applies Batch Normalization over a N-Dimensional input (a mini-batch of [N-2]D inputs
563
+ with additional channel dimension) as described in the paper
564
+ `Batch Normalization: Accelerating Deep Network Training by Reducing
565
+ Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
566
+
567
+ .. math::
568
+
569
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
570
+
571
+ The mean and standard-deviation are calculated per-dimension over all
572
+ mini-batches of the same process groups. :math:`\gamma` and :math:`\beta`
573
+ are learnable parameter vectors of size `C` (where `C` is the input size).
574
+ By default, the elements of :math:`\gamma` are sampled from
575
+ :math:`\mathcal{U}(0, 1)` and the elements of :math:`\beta` are set to 0.
576
+ The standard-deviation is calculated via the biased estimator, equivalent to
577
+ `torch.var(input, unbiased=False)`.
578
+
579
+ Also by default, during training this layer keeps running estimates of its
580
+ computed mean and variance, which are then used for normalization during
581
+ evaluation. The running estimates are kept with a default :attr:`momentum`
582
+ of 0.1.
583
+
584
+ If :attr:`track_running_stats` is set to ``False``, this layer then does not
585
+ keep running estimates, and batch statistics are instead used during
586
+ evaluation time as well.
587
+
588
+ .. note::
589
+ This :attr:`momentum` argument is different from one used in optimizer
590
+ classes and the conventional notion of momentum. Mathematically, the
591
+ update rule for running statistics here is
592
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
593
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
594
+ new observed value.
595
+
596
+ Because the Batch Normalization is done for each channel in the ``C`` dimension, computing
597
+ statistics on ``(N, +)`` slices, it's common terminology to call this Volumetric Batch
598
+ Normalization or Spatio-temporal Batch Normalization.
599
+
600
+ Currently :class:`SyncBatchNorm` only supports
601
+ :class:`~torch.nn.DistributedDataParallel` (DDP) with single GPU per process. Use
602
+ :meth:`torch.nn.SyncBatchNorm.convert_sync_batchnorm()` to convert
603
+ :attr:`BatchNorm*D` layer to :class:`SyncBatchNorm` before wrapping
604
+ Network with DDP.
605
+
606
+ Args:
607
+ num_features: :math:`C` from an expected input of size
608
+ :math:`(N, C, +)`
609
+ eps: a value added to the denominator for numerical stability.
610
+ Default: ``1e-5``
611
+ momentum: the value used for the running_mean and running_var
612
+ computation. Can be set to ``None`` for cumulative moving average
613
+ (i.e. simple average). Default: 0.1
614
+ affine: a boolean value that when set to ``True``, this module has
615
+ learnable affine parameters. Default: ``True``
616
+ track_running_stats: a boolean value that when set to ``True``, this
617
+ module tracks the running mean and variance, and when set to ``False``,
618
+ this module does not track such statistics, and initializes statistics
619
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
620
+ When these buffers are ``None``, this module always uses batch statistics.
621
+ in both training and eval modes. Default: ``True``
622
+ process_group: synchronization of stats happen within each process group
623
+ individually. Default behavior is synchronization across the whole
624
+ world
625
+
626
+ Shape:
627
+ - Input: :math:`(N, C, +)`
628
+ - Output: :math:`(N, C, +)` (same shape as input)
629
+
630
+ .. note::
631
+ Synchronization of batchnorm statistics occurs only while training, i.e.
632
+ synchronization is disabled when ``model.eval()`` is set or if
633
+ ``self.training`` is otherwise ``False``.
634
+
635
+ Examples::
636
+
637
+ >>> # xdoctest: +SKIP
638
+ >>> # With Learnable Parameters
639
+ >>> m = nn.SyncBatchNorm(100)
640
+ >>> # creating process group (optional)
641
+ >>> # ranks is a list of int identifying rank ids.
642
+ >>> ranks = list(range(8))
643
+ >>> r1, r2 = ranks[:4], ranks[4:]
644
+ >>> # Note: every rank calls into new_group for every
645
+ >>> # process group created, even if that rank is not
646
+ >>> # part of the group.
647
+ >>> process_groups = [torch.distributed.new_group(pids) for pids in [r1, r2]]
648
+ >>> process_group = process_groups[0 if dist.get_rank() <= 3 else 1]
649
+ >>> # Without Learnable Parameters
650
+ >>> m = nn.BatchNorm3d(100, affine=False, process_group=process_group)
651
+ >>> input = torch.randn(20, 100, 35, 45, 10)
652
+ >>> output = m(input)
653
+
654
+ >>> # network is nn.BatchNorm layer
655
+ >>> sync_bn_network = nn.SyncBatchNorm.convert_sync_batchnorm(network, process_group)
656
+ >>> # only single gpu per process is currently supported
657
+ >>> ddp_sync_bn_network = torch.nn.parallel.DistributedDataParallel(
658
+ >>> sync_bn_network,
659
+ >>> device_ids=[args.local_rank],
660
+ >>> output_device=args.local_rank)
661
+ """
662
+
663
+ def __init__(
664
+ self,
665
+ num_features: int,
666
+ eps: float = 1e-5,
667
+ momentum: float = 0.1,
668
+ affine: bool = True,
669
+ track_running_stats: bool = True,
670
+ process_group: Optional[Any] = None,
671
+ device=None,
672
+ dtype=None
673
+ ) -> None:
674
+ factory_kwargs = {'device': device, 'dtype': dtype}
675
+ super().__init__(
676
+ num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
677
+ )
678
+ self.process_group = process_group
679
+
680
+ def _check_input_dim(self, input):
681
+ if input.dim() < 2:
682
+ raise ValueError(
683
+ f"expected at least 2D input (got {input.dim()}D input)"
684
+ )
685
+
686
+ def _check_non_zero_input_channels(self, input):
687
+ if input.size(1) == 0:
688
+ raise ValueError(
689
+ "SyncBatchNorm number of input channels should be non-zero"
690
+ )
691
+
692
+ def forward(self, input: Tensor) -> Tensor:
693
+ self._check_input_dim(input)
694
+ self._check_non_zero_input_channels(input)
695
+
696
+ # exponential_average_factor is set to self.momentum
697
+ # (when it is available) only so that it gets updated
698
+ # in ONNX graph when this node is exported to ONNX.
699
+ if self.momentum is None:
700
+ exponential_average_factor = 0.0
701
+ else:
702
+ exponential_average_factor = self.momentum
703
+
704
+ if self.training and self.track_running_stats:
705
+ assert self.num_batches_tracked is not None
706
+ self.num_batches_tracked.add_(1)
707
+ if self.momentum is None: # use cumulative moving average
708
+ exponential_average_factor = 1.0 / self.num_batches_tracked.item()
709
+ else: # use exponential moving average
710
+ exponential_average_factor = self.momentum
711
+
712
+ r"""
713
+ Decide whether the mini-batch stats should be used for normalization rather than the buffers.
714
+ Mini-batch stats are used in training mode, and in eval mode when buffers are None.
715
+ """
716
+ if self.training:
717
+ bn_training = True
718
+ else:
719
+ bn_training = (self.running_mean is None) and (self.running_var is None)
720
+
721
+ r"""
722
+ Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
723
+ passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
724
+ used for normalization (i.e. in eval mode when buffers are not None).
725
+ """
726
+ # If buffers are not to be tracked, ensure that they won't be updated
727
+ running_mean = (
728
+ self.running_mean if not self.training or self.track_running_stats else None
729
+ )
730
+ running_var = (
731
+ self.running_var if not self.training or self.track_running_stats else None
732
+ )
733
+
734
+ # Don't sync batchnorm stats in inference mode (model.eval()).
735
+ need_sync = (bn_training and self.training and
736
+ torch.distributed.is_available() and torch.distributed.is_initialized())
737
+ if need_sync:
738
+ # currently only GPU/PrivateUse1 input is supported
739
+ if input.device.type not in ["cuda", torch._C._get_privateuse1_backend_name()]:
740
+ raise ValueError("SyncBatchNorm expected input tensor to be on GPU or "
741
+ f"{torch._C._get_privateuse1_backend_name()}")
742
+
743
+ process_group = torch.distributed.group.WORLD
744
+ if self.process_group:
745
+ process_group = self.process_group
746
+ world_size = torch.distributed.get_world_size(process_group)
747
+ need_sync = world_size > 1
748
+
749
+ # fallback to framework BN when synchronization is not necessary
750
+ if not need_sync:
751
+ return F.batch_norm(
752
+ input,
753
+ running_mean,
754
+ running_var,
755
+ self.weight,
756
+ self.bias,
757
+ bn_training,
758
+ exponential_average_factor,
759
+ self.eps,
760
+ )
761
+ else:
762
+ assert bn_training
763
+ return sync_batch_norm.apply(
764
+ input,
765
+ self.weight,
766
+ self.bias,
767
+ running_mean,
768
+ running_var,
769
+ self.eps,
770
+ exponential_average_factor,
771
+ process_group,
772
+ world_size,
773
+ )
774
+
775
+ @classmethod
776
+ def convert_sync_batchnorm(cls, module, process_group=None):
777
+ r"""Helper function to convert all :attr:`BatchNorm*D` layers in the model to
778
+ :class:`torch.nn.SyncBatchNorm` layers.
779
+
780
+ Args:
781
+ module (nn.Module): module containing one or more :attr:`BatchNorm*D` layers
782
+ process_group (optional): process group to scope synchronization,
783
+ default is the whole world
784
+
785
+ Returns:
786
+ The original :attr:`module` with the converted :class:`torch.nn.SyncBatchNorm`
787
+ layers. If the original :attr:`module` is a :attr:`BatchNorm*D` layer,
788
+ a new :class:`torch.nn.SyncBatchNorm` layer object will be returned
789
+ instead.
790
+
791
+ Example::
792
+
793
+ >>> # Network with nn.BatchNorm layer
794
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
795
+ >>> module = torch.nn.Sequential(
796
+ >>> torch.nn.Linear(20, 100),
797
+ >>> torch.nn.BatchNorm1d(100),
798
+ >>> ).cuda()
799
+ >>> # creating process group (optional)
800
+ >>> # ranks is a list of int identifying rank ids.
801
+ >>> ranks = list(range(8))
802
+ >>> r1, r2 = ranks[:4], ranks[4:]
803
+ >>> # Note: every rank calls into new_group for every
804
+ >>> # process group created, even if that rank is not
805
+ >>> # part of the group.
806
+ >>> # xdoctest: +SKIP("distributed")
807
+ >>> process_groups = [torch.distributed.new_group(pids) for pids in [r1, r2]]
808
+ >>> process_group = process_groups[0 if dist.get_rank() <= 3 else 1]
809
+ >>> sync_bn_module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
810
+
811
+ """
812
+ module_output = module
813
+ if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
814
+ module_output = torch.nn.SyncBatchNorm(
815
+ module.num_features,
816
+ module.eps,
817
+ module.momentum,
818
+ module.affine,
819
+ module.track_running_stats,
820
+ process_group,
821
+ )
822
+ if module.affine:
823
+ with torch.no_grad():
824
+ module_output.weight = module.weight
825
+ module_output.bias = module.bias
826
+ module_output.running_mean = module.running_mean
827
+ module_output.running_var = module.running_var
828
+ module_output.num_batches_tracked = module.num_batches_tracked
829
+ if hasattr(module, "qconfig"):
830
+ module_output.qconfig = module.qconfig
831
+ for name, child in module.named_children():
832
+ module_output.add_module(
833
+ name, cls.convert_sync_batchnorm(child, process_group)
834
+ )
835
+ del module
836
+ return module_output
mgm/lib/python3.10/site-packages/torch/nn/modules/channelshuffle.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+ from .. import functional as F
3
+
4
+ from torch import Tensor
5
+
6
+ __all__ = ['ChannelShuffle']
7
+
8
+ class ChannelShuffle(Module):
9
+ r"""Divide the channels in a tensor of shape :math:`(*, C , H, W)`
10
+ into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`,
11
+ while keeping the original tensor shape.
12
+
13
+ Args:
14
+ groups (int): number of groups to divide channels in.
15
+
16
+ Examples::
17
+
18
+ >>> # xdoctest: +IGNORE_WANT("FIXME: incorrect want")
19
+ >>> channel_shuffle = nn.ChannelShuffle(2)
20
+ >>> input = torch.randn(1, 4, 2, 2)
21
+ >>> print(input)
22
+ [[[[1, 2],
23
+ [3, 4]],
24
+ [[5, 6],
25
+ [7, 8]],
26
+ [[9, 10],
27
+ [11, 12]],
28
+ [[13, 14],
29
+ [15, 16]],
30
+ ]]
31
+ >>> output = channel_shuffle(input)
32
+ >>> print(output)
33
+ [[[[1, 2],
34
+ [3, 4]],
35
+ [[9, 10],
36
+ [11, 12]],
37
+ [[5, 6],
38
+ [7, 8]],
39
+ [[13, 14],
40
+ [15, 16]],
41
+ ]]
42
+ """
43
+ __constants__ = ['groups']
44
+ groups: int
45
+
46
+ def __init__(self, groups: int) -> None:
47
+ super().__init__()
48
+ self.groups = groups
49
+
50
+ def forward(self, input: Tensor) -> Tensor:
51
+ return F.channel_shuffle(input, self.groups)
52
+
53
+ def extra_repr(self) -> str:
54
+ return f'groups={self.groups}'
mgm/lib/python3.10/site-packages/torch/nn/modules/conv.py ADDED
@@ -0,0 +1,1598 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import warnings
3
+
4
+ import torch
5
+ from torch import Tensor
6
+ from torch.nn.parameter import Parameter, UninitializedParameter
7
+ from .. import functional as F
8
+ from .. import init
9
+ from .lazy import LazyModuleMixin
10
+ from .module import Module
11
+ from .utils import _single, _pair, _triple, _reverse_repeat_tuple
12
+ from torch._torch_docs import reproducibility_notes
13
+
14
+ from ..common_types import _size_1_t, _size_2_t, _size_3_t
15
+ from typing import Optional, List, Tuple, Union
16
+
17
+ __all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d',
18
+ 'LazyConv1d', 'LazyConv2d', 'LazyConv3d', 'LazyConvTranspose1d', 'LazyConvTranspose2d',
19
+ 'LazyConvTranspose3d']
20
+
21
+ convolution_notes = \
22
+ {"groups_note": r"""* :attr:`groups` controls the connections between inputs and outputs.
23
+ :attr:`in_channels` and :attr:`out_channels` must both be divisible by
24
+ :attr:`groups`. For example,
25
+
26
+ * At groups=1, all inputs are convolved to all outputs.
27
+ * At groups=2, the operation becomes equivalent to having two conv
28
+ layers side by side, each seeing half the input channels
29
+ and producing half the output channels, and both subsequently
30
+ concatenated.
31
+ * At groups= :attr:`in_channels`, each input channel is convolved with
32
+ its own set of filters (of size
33
+ :math:`\frac{\text{out\_channels}}{\text{in\_channels}}`).""",
34
+
35
+ "depthwise_separable_note": r"""When `groups == in_channels` and `out_channels == K * in_channels`,
36
+ where `K` is a positive integer, this operation is also known as a "depthwise convolution".
37
+
38
+ In other words, for an input of size :math:`(N, C_{in}, L_{in})`,
39
+ a depthwise convolution with a depthwise multiplier `K` can be performed with the arguments
40
+ :math:`(C_\text{in}=C_\text{in}, C_\text{out}=C_\text{in} \times \text{K}, ..., \text{groups}=C_\text{in})`."""} # noqa: B950
41
+
42
+
43
+
44
+
45
+
46
+ class _ConvNd(Module):
47
+
48
+ __constants__ = ['stride', 'padding', 'dilation', 'groups',
49
+ 'padding_mode', 'output_padding', 'in_channels',
50
+ 'out_channels', 'kernel_size']
51
+ __annotations__ = {'bias': Optional[torch.Tensor]}
52
+
53
+ def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: # type: ignore[empty-body]
54
+ ...
55
+
56
+ in_channels: int
57
+ _reversed_padding_repeated_twice: List[int]
58
+ out_channels: int
59
+ kernel_size: Tuple[int, ...]
60
+ stride: Tuple[int, ...]
61
+ padding: Union[str, Tuple[int, ...]]
62
+ dilation: Tuple[int, ...]
63
+ transposed: bool
64
+ output_padding: Tuple[int, ...]
65
+ groups: int
66
+ padding_mode: str
67
+ weight: Tensor
68
+ bias: Optional[Tensor]
69
+
70
+ def __init__(self,
71
+ in_channels: int,
72
+ out_channels: int,
73
+ kernel_size: Tuple[int, ...],
74
+ stride: Tuple[int, ...],
75
+ padding: Tuple[int, ...],
76
+ dilation: Tuple[int, ...],
77
+ transposed: bool,
78
+ output_padding: Tuple[int, ...],
79
+ groups: int,
80
+ bias: bool,
81
+ padding_mode: str,
82
+ device=None,
83
+ dtype=None) -> None:
84
+ factory_kwargs = {'device': device, 'dtype': dtype}
85
+ super().__init__()
86
+ if groups <= 0:
87
+ raise ValueError('groups must be a positive integer')
88
+ if in_channels % groups != 0:
89
+ raise ValueError('in_channels must be divisible by groups')
90
+ if out_channels % groups != 0:
91
+ raise ValueError('out_channels must be divisible by groups')
92
+ valid_padding_strings = {'same', 'valid'}
93
+ if isinstance(padding, str):
94
+ if padding not in valid_padding_strings:
95
+ raise ValueError(
96
+ f"Invalid padding string {padding!r}, should be one of {valid_padding_strings}")
97
+ if padding == 'same' and any(s != 1 for s in stride):
98
+ raise ValueError("padding='same' is not supported for strided convolutions")
99
+
100
+ valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
101
+ if padding_mode not in valid_padding_modes:
102
+ raise ValueError(f"padding_mode must be one of {valid_padding_modes}, but got padding_mode='{padding_mode}'")
103
+ self.in_channels = in_channels
104
+ self.out_channels = out_channels
105
+ self.kernel_size = kernel_size
106
+ self.stride = stride
107
+ self.padding = padding
108
+ self.dilation = dilation
109
+ self.transposed = transposed
110
+ self.output_padding = output_padding
111
+ self.groups = groups
112
+ self.padding_mode = padding_mode
113
+ # `_reversed_padding_repeated_twice` is the padding to be passed to
114
+ # `F.pad` if needed (e.g., for non-zero padding types that are
115
+ # implemented as two ops: padding + conv). `F.pad` accepts paddings in
116
+ # reverse order than the dimension.
117
+ if isinstance(self.padding, str):
118
+ self._reversed_padding_repeated_twice = [0, 0] * len(kernel_size)
119
+ if padding == 'same':
120
+ for d, k, i in zip(dilation, kernel_size,
121
+ range(len(kernel_size) - 1, -1, -1)):
122
+ total_padding = d * (k - 1)
123
+ left_pad = total_padding // 2
124
+ self._reversed_padding_repeated_twice[2 * i] = left_pad
125
+ self._reversed_padding_repeated_twice[2 * i + 1] = (
126
+ total_padding - left_pad)
127
+ else:
128
+ self._reversed_padding_repeated_twice = _reverse_repeat_tuple(self.padding, 2)
129
+
130
+ if transposed:
131
+ self.weight = Parameter(torch.empty(
132
+ (in_channels, out_channels // groups, *kernel_size), **factory_kwargs))
133
+ else:
134
+ self.weight = Parameter(torch.empty(
135
+ (out_channels, in_channels // groups, *kernel_size), **factory_kwargs))
136
+ if bias:
137
+ self.bias = Parameter(torch.empty(out_channels, **factory_kwargs))
138
+ else:
139
+ self.register_parameter('bias', None)
140
+
141
+ self.reset_parameters()
142
+
143
+ def reset_parameters(self) -> None:
144
+ # Setting a=sqrt(5) in kaiming_uniform is the same as initializing with
145
+ # uniform(-1/sqrt(k), 1/sqrt(k)), where k = weight.size(1) * prod(*kernel_size)
146
+ # For more details see: https://github.com/pytorch/pytorch/issues/15314#issuecomment-477448573
147
+ init.kaiming_uniform_(self.weight, a=math.sqrt(5))
148
+ if self.bias is not None:
149
+ fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
150
+ if fan_in != 0:
151
+ bound = 1 / math.sqrt(fan_in)
152
+ init.uniform_(self.bias, -bound, bound)
153
+
154
+ def extra_repr(self):
155
+ s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
156
+ ', stride={stride}')
157
+ if self.padding != (0,) * len(self.padding):
158
+ s += ', padding={padding}'
159
+ if self.dilation != (1,) * len(self.dilation):
160
+ s += ', dilation={dilation}'
161
+ if self.output_padding != (0,) * len(self.output_padding):
162
+ s += ', output_padding={output_padding}'
163
+ if self.groups != 1:
164
+ s += ', groups={groups}'
165
+ if self.bias is None:
166
+ s += ', bias=False'
167
+ if self.padding_mode != 'zeros':
168
+ s += ', padding_mode={padding_mode}'
169
+ return s.format(**self.__dict__)
170
+
171
+ def __setstate__(self, state):
172
+ super().__setstate__(state)
173
+ if not hasattr(self, 'padding_mode'):
174
+ self.padding_mode = 'zeros'
175
+
176
+
177
+ class Conv1d(_ConvNd):
178
+ __doc__ = r"""Applies a 1D convolution over an input signal composed of several input
179
+ planes.
180
+
181
+ In the simplest case, the output value of the layer with input size
182
+ :math:`(N, C_{\text{in}}, L)` and output :math:`(N, C_{\text{out}}, L_{\text{out}})` can be
183
+ precisely described as:
184
+
185
+ .. math::
186
+ \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
187
+ \sum_{k = 0}^{C_{in} - 1} \text{weight}(C_{\text{out}_j}, k)
188
+ \star \text{input}(N_i, k)
189
+
190
+ where :math:`\star` is the valid `cross-correlation`_ operator,
191
+ :math:`N` is a batch size, :math:`C` denotes a number of channels,
192
+ :math:`L` is a length of signal sequence.
193
+ """ + r"""
194
+
195
+ This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
196
+
197
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
198
+
199
+ * :attr:`stride` controls the stride for the cross-correlation, a single
200
+ number or a one-element tuple.
201
+
202
+ * :attr:`padding` controls the amount of padding applied to the input. It
203
+ can be either a string {{'valid', 'same'}} or a tuple of ints giving the
204
+ amount of implicit padding applied on both sides.
205
+
206
+ * :attr:`dilation` controls the spacing between the kernel points; also
207
+ known as the à trous algorithm. It is harder to describe, but this `link`_
208
+ has a nice visualization of what :attr:`dilation` does.
209
+
210
+ {groups_note}
211
+
212
+ Note:
213
+ {depthwise_separable_note}
214
+ Note:
215
+ {cudnn_reproducibility_note}
216
+
217
+ Note:
218
+ ``padding='valid'`` is the same as no padding. ``padding='same'`` pads
219
+ the input so the output has the shape as the input. However, this mode
220
+ doesn't support any stride values other than 1.
221
+
222
+ Note:
223
+ This module supports complex data types i.e. ``complex32, complex64, complex128``.
224
+
225
+ Args:
226
+ in_channels (int): Number of channels in the input image
227
+ out_channels (int): Number of channels produced by the convolution
228
+ kernel_size (int or tuple): Size of the convolving kernel
229
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
230
+ padding (int, tuple or str, optional): Padding added to both sides of
231
+ the input. Default: 0
232
+ padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
233
+ ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
234
+ dilation (int or tuple, optional): Spacing between kernel
235
+ elements. Default: 1
236
+ groups (int, optional): Number of blocked connections from input
237
+ channels to output channels. Default: 1
238
+ bias (bool, optional): If ``True``, adds a learnable bias to the
239
+ output. Default: ``True``
240
+
241
+ """.format(**reproducibility_notes, **convolution_notes) + r"""
242
+
243
+ Shape:
244
+ - Input: :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`
245
+ - Output: :math:`(N, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`, where
246
+
247
+ .. math::
248
+ L_{out} = \left\lfloor\frac{L_{in} + 2 \times \text{padding} - \text{dilation}
249
+ \times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
250
+
251
+ Attributes:
252
+ weight (Tensor): the learnable weights of the module of shape
253
+ :math:`(\text{out\_channels},
254
+ \frac{\text{in\_channels}}{\text{groups}}, \text{kernel\_size})`.
255
+ The values of these weights are sampled from
256
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
257
+ :math:`k = \frac{groups}{C_\text{in} * \text{kernel\_size}}`
258
+ bias (Tensor): the learnable bias of the module of shape
259
+ (out_channels). If :attr:`bias` is ``True``, then the values of these weights are
260
+ sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
261
+ :math:`k = \frac{groups}{C_\text{in} * \text{kernel\_size}}`
262
+
263
+ Examples::
264
+
265
+ >>> m = nn.Conv1d(16, 33, 3, stride=2)
266
+ >>> input = torch.randn(20, 16, 50)
267
+ >>> output = m(input)
268
+
269
+ .. _cross-correlation:
270
+ https://en.wikipedia.org/wiki/Cross-correlation
271
+
272
+ .. _link:
273
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
274
+ """
275
+
276
+ def __init__(
277
+ self,
278
+ in_channels: int,
279
+ out_channels: int,
280
+ kernel_size: _size_1_t,
281
+ stride: _size_1_t = 1,
282
+ padding: Union[str, _size_1_t] = 0,
283
+ dilation: _size_1_t = 1,
284
+ groups: int = 1,
285
+ bias: bool = True,
286
+ padding_mode: str = 'zeros', # TODO: refine this type
287
+ device=None,
288
+ dtype=None
289
+ ) -> None:
290
+ factory_kwargs = {'device': device, 'dtype': dtype}
291
+ # we create new variables below to make mypy happy since kernel_size has
292
+ # type Union[int, Tuple[int]] and kernel_size_ has type Tuple[int]
293
+ kernel_size_ = _single(kernel_size)
294
+ stride_ = _single(stride)
295
+ padding_ = padding if isinstance(padding, str) else _single(padding)
296
+ dilation_ = _single(dilation)
297
+ super().__init__(
298
+ in_channels, out_channels, kernel_size_, stride_, padding_, dilation_,
299
+ False, _single(0), groups, bias, padding_mode, **factory_kwargs)
300
+
301
+ def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
302
+ if self.padding_mode != 'zeros':
303
+ return F.conv1d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
304
+ weight, bias, self.stride,
305
+ _single(0), self.dilation, self.groups)
306
+ return F.conv1d(input, weight, bias, self.stride,
307
+ self.padding, self.dilation, self.groups)
308
+
309
+ def forward(self, input: Tensor) -> Tensor:
310
+ return self._conv_forward(input, self.weight, self.bias)
311
+
312
+
313
+ class Conv2d(_ConvNd):
314
+ __doc__ = r"""Applies a 2D convolution over an input signal composed of several input
315
+ planes.
316
+
317
+ In the simplest case, the output value of the layer with input size
318
+ :math:`(N, C_{\text{in}}, H, W)` and output :math:`(N, C_{\text{out}}, H_{\text{out}}, W_{\text{out}})`
319
+ can be precisely described as:
320
+
321
+ .. math::
322
+ \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
323
+ \sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k)
324
+
325
+
326
+ where :math:`\star` is the valid 2D `cross-correlation`_ operator,
327
+ :math:`N` is a batch size, :math:`C` denotes a number of channels,
328
+ :math:`H` is a height of input planes in pixels, and :math:`W` is
329
+ width in pixels.
330
+ """ + r"""
331
+
332
+ This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
333
+
334
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
335
+
336
+ * :attr:`stride` controls the stride for the cross-correlation, a single
337
+ number or a tuple.
338
+
339
+ * :attr:`padding` controls the amount of padding applied to the input. It
340
+ can be either a string {{'valid', 'same'}} or an int / a tuple of ints giving the
341
+ amount of implicit padding applied on both sides.
342
+
343
+ * :attr:`dilation` controls the spacing between the kernel points; also
344
+ known as the à trous algorithm. It is harder to describe, but this `link`_
345
+ has a nice visualization of what :attr:`dilation` does.
346
+
347
+ {groups_note}
348
+
349
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
350
+
351
+ - a single ``int`` -- in which case the same value is used for the height and width dimension
352
+ - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
353
+ and the second `int` for the width dimension
354
+
355
+ Note:
356
+ {depthwise_separable_note}
357
+
358
+ Note:
359
+ {cudnn_reproducibility_note}
360
+
361
+ Note:
362
+ ``padding='valid'`` is the same as no padding. ``padding='same'`` pads
363
+ the input so the output has the shape as the input. However, this mode
364
+ doesn't support any stride values other than 1.
365
+
366
+ Note:
367
+ This module supports complex data types i.e. ``complex32, complex64, complex128``.
368
+
369
+ Args:
370
+ in_channels (int): Number of channels in the input image
371
+ out_channels (int): Number of channels produced by the convolution
372
+ kernel_size (int or tuple): Size of the convolving kernel
373
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
374
+ padding (int, tuple or str, optional): Padding added to all four sides of
375
+ the input. Default: 0
376
+ padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
377
+ ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
378
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
379
+ groups (int, optional): Number of blocked connections from input
380
+ channels to output channels. Default: 1
381
+ bias (bool, optional): If ``True``, adds a learnable bias to the
382
+ output. Default: ``True``
383
+ """.format(**reproducibility_notes, **convolution_notes) + r"""
384
+
385
+ Shape:
386
+ - Input: :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`
387
+ - Output: :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(C_{out}, H_{out}, W_{out})`, where
388
+
389
+ .. math::
390
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] - \text{dilation}[0]
391
+ \times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
392
+
393
+ .. math::
394
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] - \text{dilation}[1]
395
+ \times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
396
+
397
+ Attributes:
398
+ weight (Tensor): the learnable weights of the module of shape
399
+ :math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},`
400
+ :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`.
401
+ The values of these weights are sampled from
402
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
403
+ :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
404
+ bias (Tensor): the learnable bias of the module of shape
405
+ (out_channels). If :attr:`bias` is ``True``,
406
+ then the values of these weights are
407
+ sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
408
+ :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
409
+
410
+ Examples:
411
+
412
+ >>> # With square kernels and equal stride
413
+ >>> m = nn.Conv2d(16, 33, 3, stride=2)
414
+ >>> # non-square kernels and unequal stride and with padding
415
+ >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
416
+ >>> # non-square kernels and unequal stride and with padding and dilation
417
+ >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
418
+ >>> input = torch.randn(20, 16, 50, 100)
419
+ >>> output = m(input)
420
+
421
+ .. _cross-correlation:
422
+ https://en.wikipedia.org/wiki/Cross-correlation
423
+
424
+ .. _link:
425
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
426
+ """
427
+
428
+ def __init__(
429
+ self,
430
+ in_channels: int,
431
+ out_channels: int,
432
+ kernel_size: _size_2_t,
433
+ stride: _size_2_t = 1,
434
+ padding: Union[str, _size_2_t] = 0,
435
+ dilation: _size_2_t = 1,
436
+ groups: int = 1,
437
+ bias: bool = True,
438
+ padding_mode: str = 'zeros', # TODO: refine this type
439
+ device=None,
440
+ dtype=None
441
+ ) -> None:
442
+ factory_kwargs = {'device': device, 'dtype': dtype}
443
+ kernel_size_ = _pair(kernel_size)
444
+ stride_ = _pair(stride)
445
+ padding_ = padding if isinstance(padding, str) else _pair(padding)
446
+ dilation_ = _pair(dilation)
447
+ super().__init__(
448
+ in_channels, out_channels, kernel_size_, stride_, padding_, dilation_,
449
+ False, _pair(0), groups, bias, padding_mode, **factory_kwargs)
450
+
451
+ def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
452
+ if self.padding_mode != 'zeros':
453
+ return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
454
+ weight, bias, self.stride,
455
+ _pair(0), self.dilation, self.groups)
456
+ return F.conv2d(input, weight, bias, self.stride,
457
+ self.padding, self.dilation, self.groups)
458
+
459
+ def forward(self, input: Tensor) -> Tensor:
460
+ return self._conv_forward(input, self.weight, self.bias)
461
+
462
+ class Conv3d(_ConvNd):
463
+ __doc__ = r"""Applies a 3D convolution over an input signal composed of several input
464
+ planes.
465
+
466
+ In the simplest case, the output value of the layer with input size :math:`(N, C_{in}, D, H, W)`
467
+ and output :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` can be precisely described as:
468
+
469
+ .. math::
470
+ out(N_i, C_{out_j}) = bias(C_{out_j}) +
471
+ \sum_{k = 0}^{C_{in} - 1} weight(C_{out_j}, k) \star input(N_i, k)
472
+
473
+ where :math:`\star` is the valid 3D `cross-correlation`_ operator
474
+ """ + r"""
475
+
476
+ This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
477
+
478
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
479
+
480
+ * :attr:`stride` controls the stride for the cross-correlation.
481
+
482
+ * :attr:`padding` controls the amount of padding applied to the input. It
483
+ can be either a string {{'valid', 'same'}} or a tuple of ints giving the
484
+ amount of implicit padding applied on both sides.
485
+
486
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
487
+ It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
488
+
489
+ {groups_note}
490
+
491
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
492
+
493
+ - a single ``int`` -- in which case the same value is used for the depth, height and width dimension
494
+ - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
495
+ the second `int` for the height dimension and the third `int` for the width dimension
496
+
497
+ Note:
498
+ {depthwise_separable_note}
499
+
500
+ Note:
501
+ {cudnn_reproducibility_note}
502
+
503
+ Note:
504
+ ``padding='valid'`` is the same as no padding. ``padding='same'`` pads
505
+ the input so the output has the shape as the input. However, this mode
506
+ doesn't support any stride values other than 1.
507
+
508
+ Note:
509
+ This module supports complex data types i.e. ``complex32, complex64, complex128``.
510
+
511
+ Args:
512
+ in_channels (int): Number of channels in the input image
513
+ out_channels (int): Number of channels produced by the convolution
514
+ kernel_size (int or tuple): Size of the convolving kernel
515
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
516
+ padding (int, tuple or str, optional): Padding added to all six sides of
517
+ the input. Default: 0
518
+ padding_mode (str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
519
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
520
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
521
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
522
+ """.format(**reproducibility_notes, **convolution_notes) + r"""
523
+
524
+ Shape:
525
+ - Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` or :math:`(C_{in}, D_{in}, H_{in}, W_{in})`
526
+ - Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` or :math:`(C_{out}, D_{out}, H_{out}, W_{out})`,
527
+ where
528
+
529
+ .. math::
530
+ D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0]
531
+ \times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
532
+
533
+ .. math::
534
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1]
535
+ \times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
536
+
537
+ .. math::
538
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2]
539
+ \times (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
540
+
541
+ Attributes:
542
+ weight (Tensor): the learnable weights of the module of shape
543
+ :math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},`
544
+ :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})`.
545
+ The values of these weights are sampled from
546
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
547
+ :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
548
+ bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``,
549
+ then the values of these weights are
550
+ sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
551
+ :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
552
+
553
+ Examples::
554
+
555
+ >>> # With square kernels and equal stride
556
+ >>> m = nn.Conv3d(16, 33, 3, stride=2)
557
+ >>> # non-square kernels and unequal stride and with padding
558
+ >>> m = nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))
559
+ >>> input = torch.randn(20, 16, 10, 50, 100)
560
+ >>> output = m(input)
561
+
562
+ .. _cross-correlation:
563
+ https://en.wikipedia.org/wiki/Cross-correlation
564
+
565
+ .. _link:
566
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
567
+ """
568
+
569
+ def __init__(
570
+ self,
571
+ in_channels: int,
572
+ out_channels: int,
573
+ kernel_size: _size_3_t,
574
+ stride: _size_3_t = 1,
575
+ padding: Union[str, _size_3_t] = 0,
576
+ dilation: _size_3_t = 1,
577
+ groups: int = 1,
578
+ bias: bool = True,
579
+ padding_mode: str = 'zeros',
580
+ device=None,
581
+ dtype=None
582
+ ) -> None:
583
+ factory_kwargs = {'device': device, 'dtype': dtype}
584
+ kernel_size_ = _triple(kernel_size)
585
+ stride_ = _triple(stride)
586
+ padding_ = padding if isinstance(padding, str) else _triple(padding)
587
+ dilation_ = _triple(dilation)
588
+ super().__init__(
589
+ in_channels, out_channels, kernel_size_, stride_, padding_, dilation_,
590
+ False, _triple(0), groups, bias, padding_mode, **factory_kwargs)
591
+
592
+ def _conv_forward(self, input: Tensor, weight: Tensor, bias: Optional[Tensor]):
593
+ if self.padding_mode != "zeros":
594
+ return F.conv3d(
595
+ F.pad(
596
+ input, self._reversed_padding_repeated_twice, mode=self.padding_mode
597
+ ),
598
+ weight,
599
+ bias,
600
+ self.stride,
601
+ _triple(0),
602
+ self.dilation,
603
+ self.groups,
604
+ )
605
+ return F.conv3d(
606
+ input, weight, bias, self.stride, self.padding, self.dilation, self.groups
607
+ )
608
+
609
+ def forward(self, input: Tensor) -> Tensor:
610
+ return self._conv_forward(input, self.weight, self.bias)
611
+
612
+
613
+
614
+ class _ConvTransposeNd(_ConvNd):
615
+ def __init__(self, in_channels, out_channels, kernel_size, stride,
616
+ padding, dilation, transposed, output_padding,
617
+ groups, bias, padding_mode, device=None, dtype=None) -> None:
618
+ if padding_mode != 'zeros':
619
+ raise ValueError(f'Only "zeros" padding mode is supported for {self.__class__.__name__}')
620
+
621
+ factory_kwargs = {'device': device, 'dtype': dtype}
622
+ super().__init__(
623
+ in_channels, out_channels, kernel_size, stride,
624
+ padding, dilation, transposed, output_padding,
625
+ groups, bias, padding_mode, **factory_kwargs)
626
+
627
+ # dilation being an optional parameter is for backwards
628
+ # compatibility
629
+ def _output_padding(self, input: Tensor, output_size: Optional[List[int]],
630
+ stride: List[int], padding: List[int], kernel_size: List[int],
631
+ num_spatial_dims: int, dilation: Optional[List[int]] = None) -> List[int]:
632
+ if output_size is None:
633
+ ret = _single(self.output_padding) # converting to list if was not already
634
+ else:
635
+ has_batch_dim = input.dim() == num_spatial_dims + 2
636
+ num_non_spatial_dims = 2 if has_batch_dim else 1
637
+ if len(output_size) == num_non_spatial_dims + num_spatial_dims:
638
+ output_size = output_size[num_non_spatial_dims:]
639
+ if len(output_size) != num_spatial_dims:
640
+ raise ValueError(
641
+ "ConvTranspose{}D: for {}D input, output_size must have {} or {} elements (got {})"
642
+ .format(num_spatial_dims, input.dim(), num_spatial_dims,
643
+ num_non_spatial_dims + num_spatial_dims, len(output_size)))
644
+
645
+ min_sizes = torch.jit.annotate(List[int], [])
646
+ max_sizes = torch.jit.annotate(List[int], [])
647
+ for d in range(num_spatial_dims):
648
+ dim_size = ((input.size(d + num_non_spatial_dims) - 1) * stride[d] -
649
+ 2 * padding[d] +
650
+ (dilation[d] if dilation is not None else 1) * (kernel_size[d] - 1) + 1)
651
+ min_sizes.append(dim_size)
652
+ max_sizes.append(min_sizes[d] + stride[d] - 1)
653
+
654
+ for i in range(len(output_size)):
655
+ size = output_size[i]
656
+ min_size = min_sizes[i]
657
+ max_size = max_sizes[i]
658
+ if size < min_size or size > max_size:
659
+ raise ValueError(
660
+ f"requested an output size of {output_size}, but valid sizes range "
661
+ f"from {min_sizes} to {max_sizes} (for an input of {input.size()[2:]})")
662
+
663
+ res = torch.jit.annotate(List[int], [])
664
+ for d in range(num_spatial_dims):
665
+ res.append(output_size[d] - min_sizes[d])
666
+
667
+ ret = res
668
+ return ret
669
+
670
+
671
+ class ConvTranspose1d(_ConvTransposeNd):
672
+ __doc__ = r"""Applies a 1D transposed convolution operator over an input image
673
+ composed of several input planes.
674
+
675
+ This module can be seen as the gradient of Conv1d with respect to its input.
676
+ It is also known as a fractionally-strided convolution or
677
+ a deconvolution (although it is not an actual deconvolution operation as it does
678
+ not compute a true inverse of convolution). For more information, see the visualizations
679
+ `here`_ and the `Deconvolutional Networks`_ paper.
680
+
681
+ This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
682
+
683
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
684
+
685
+ * :attr:`stride` controls the stride for the cross-correlation.
686
+
687
+ * :attr:`padding` controls the amount of implicit zero padding on both
688
+ sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
689
+ below for details.
690
+
691
+ * :attr:`output_padding` controls the additional size added to one side
692
+ of the output shape. See note below for details.
693
+
694
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
695
+ It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
696
+
697
+ {groups_note}
698
+
699
+ Note:
700
+ The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
701
+ amount of zero padding to both sizes of the input. This is set so that
702
+ when a :class:`~torch.nn.Conv1d` and a :class:`~torch.nn.ConvTranspose1d`
703
+ are initialized with same parameters, they are inverses of each other in
704
+ regard to the input and output shapes. However, when ``stride > 1``,
705
+ :class:`~torch.nn.Conv1d` maps multiple input shapes to the same output
706
+ shape. :attr:`output_padding` is provided to resolve this ambiguity by
707
+ effectively increasing the calculated output shape on one side. Note
708
+ that :attr:`output_padding` is only used to find output shape, but does
709
+ not actually add zero-padding to output.
710
+
711
+ Note:
712
+ In some circumstances when using the CUDA backend with CuDNN, this operator
713
+ may select a nondeterministic algorithm to increase performance. If this is
714
+ undesirable, you can try to make the operation deterministic (potentially at
715
+ a performance cost) by setting ``torch.backends.cudnn.deterministic =
716
+ True``.
717
+ Please see the notes on :doc:`/notes/randomness` for background.
718
+
719
+
720
+ Args:
721
+ in_channels (int): Number of channels in the input image
722
+ out_channels (int): Number of channels produced by the convolution
723
+ kernel_size (int or tuple): Size of the convolving kernel
724
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
725
+ padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
726
+ will be added to both sides of the input. Default: 0
727
+ output_padding (int or tuple, optional): Additional size added to one side
728
+ of the output shape. Default: 0
729
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
730
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
731
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
732
+ """.format(**reproducibility_notes, **convolution_notes) + r"""
733
+
734
+ Shape:
735
+ - Input: :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`
736
+ - Output: :math:`(N, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`, where
737
+
738
+ .. math::
739
+ L_{out} = (L_{in} - 1) \times \text{stride} - 2 \times \text{padding} + \text{dilation}
740
+ \times (\text{kernel\_size} - 1) + \text{output\_padding} + 1
741
+
742
+ Attributes:
743
+ weight (Tensor): the learnable weights of the module of shape
744
+ :math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
745
+ :math:`\text{kernel\_size})`.
746
+ The values of these weights are sampled from
747
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
748
+ :math:`k = \frac{groups}{C_\text{out} * \text{kernel\_size}}`
749
+ bias (Tensor): the learnable bias of the module of shape (out_channels).
750
+ If :attr:`bias` is ``True``, then the values of these weights are
751
+ sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
752
+ :math:`k = \frac{groups}{C_\text{out} * \text{kernel\_size}}`
753
+
754
+ .. _`here`:
755
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
756
+
757
+ .. _`Deconvolutional Networks`:
758
+ https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
759
+ """
760
+
761
+ def __init__(
762
+ self,
763
+ in_channels: int,
764
+ out_channels: int,
765
+ kernel_size: _size_1_t,
766
+ stride: _size_1_t = 1,
767
+ padding: _size_1_t = 0,
768
+ output_padding: _size_1_t = 0,
769
+ groups: int = 1,
770
+ bias: bool = True,
771
+ dilation: _size_1_t = 1,
772
+ padding_mode: str = 'zeros',
773
+ device=None,
774
+ dtype=None
775
+ ) -> None:
776
+ factory_kwargs = {'device': device, 'dtype': dtype}
777
+ kernel_size = _single(kernel_size)
778
+ stride = _single(stride)
779
+ padding = _single(padding)
780
+ dilation = _single(dilation)
781
+ output_padding = _single(output_padding)
782
+ super().__init__(
783
+ in_channels, out_channels, kernel_size, stride, padding, dilation,
784
+ True, output_padding, groups, bias, padding_mode, **factory_kwargs)
785
+
786
+ def forward(self, input: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
787
+ if self.padding_mode != 'zeros':
788
+ raise ValueError('Only `zeros` padding mode is supported for ConvTranspose1d')
789
+
790
+ assert isinstance(self.padding, tuple)
791
+ # One cannot replace List by Tuple or Sequence in "_output_padding" because
792
+ # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
793
+ num_spatial_dims = 1
794
+ output_padding = self._output_padding(
795
+ input, output_size, self.stride, self.padding, self.kernel_size, # type: ignore[arg-type]
796
+ num_spatial_dims, self.dilation) # type: ignore[arg-type]
797
+ return F.conv_transpose1d(
798
+ input, self.weight, self.bias, self.stride, self.padding,
799
+ output_padding, self.groups, self.dilation)
800
+
801
+
802
+ class ConvTranspose2d(_ConvTransposeNd):
803
+ __doc__ = r"""Applies a 2D transposed convolution operator over an input image
804
+ composed of several input planes.
805
+
806
+ This module can be seen as the gradient of Conv2d with respect to its input.
807
+ It is also known as a fractionally-strided convolution or
808
+ a deconvolution (although it is not an actual deconvolution operation as it does
809
+ not compute a true inverse of convolution). For more information, see the visualizations
810
+ `here`_ and the `Deconvolutional Networks`_ paper.
811
+
812
+ This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
813
+
814
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
815
+
816
+ * :attr:`stride` controls the stride for the cross-correlation.
817
+
818
+ * :attr:`padding` controls the amount of implicit zero padding on both
819
+ sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
820
+ below for details.
821
+
822
+ * :attr:`output_padding` controls the additional size added to one side
823
+ of the output shape. See note below for details.
824
+
825
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
826
+ It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
827
+
828
+ {groups_note}
829
+
830
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding`
831
+ can either be:
832
+
833
+ - a single ``int`` -- in which case the same value is used for the height and width dimensions
834
+ - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
835
+ and the second `int` for the width dimension
836
+
837
+ Note:
838
+ The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
839
+ amount of zero padding to both sizes of the input. This is set so that
840
+ when a :class:`~torch.nn.Conv2d` and a :class:`~torch.nn.ConvTranspose2d`
841
+ are initialized with same parameters, they are inverses of each other in
842
+ regard to the input and output shapes. However, when ``stride > 1``,
843
+ :class:`~torch.nn.Conv2d` maps multiple input shapes to the same output
844
+ shape. :attr:`output_padding` is provided to resolve this ambiguity by
845
+ effectively increasing the calculated output shape on one side. Note
846
+ that :attr:`output_padding` is only used to find output shape, but does
847
+ not actually add zero-padding to output.
848
+
849
+ Note:
850
+ {cudnn_reproducibility_note}
851
+
852
+ Args:
853
+ in_channels (int): Number of channels in the input image
854
+ out_channels (int): Number of channels produced by the convolution
855
+ kernel_size (int or tuple): Size of the convolving kernel
856
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
857
+ padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
858
+ will be added to both sides of each dimension in the input. Default: 0
859
+ output_padding (int or tuple, optional): Additional size added to one side
860
+ of each dimension in the output shape. Default: 0
861
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
862
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
863
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
864
+ """.format(**reproducibility_notes, **convolution_notes) + r"""
865
+
866
+ Shape:
867
+ - Input: :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`
868
+ - Output: :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(C_{out}, H_{out}, W_{out})`, where
869
+
870
+ .. math::
871
+ H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0]
872
+ \times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1
873
+ .. math::
874
+ W_{out} = (W_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1]
875
+ \times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1
876
+
877
+ Attributes:
878
+ weight (Tensor): the learnable weights of the module of shape
879
+ :math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
880
+ :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`.
881
+ The values of these weights are sampled from
882
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
883
+ :math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
884
+ bias (Tensor): the learnable bias of the module of shape (out_channels)
885
+ If :attr:`bias` is ``True``, then the values of these weights are
886
+ sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
887
+ :math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
888
+
889
+ Examples::
890
+
891
+ >>> # With square kernels and equal stride
892
+ >>> m = nn.ConvTranspose2d(16, 33, 3, stride=2)
893
+ >>> # non-square kernels and unequal stride and with padding
894
+ >>> m = nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
895
+ >>> input = torch.randn(20, 16, 50, 100)
896
+ >>> output = m(input)
897
+ >>> # exact output size can be also specified as an argument
898
+ >>> input = torch.randn(1, 16, 12, 12)
899
+ >>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1)
900
+ >>> upsample = nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
901
+ >>> h = downsample(input)
902
+ >>> h.size()
903
+ torch.Size([1, 16, 6, 6])
904
+ >>> output = upsample(h, output_size=input.size())
905
+ >>> output.size()
906
+ torch.Size([1, 16, 12, 12])
907
+
908
+ .. _`here`:
909
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
910
+
911
+ .. _`Deconvolutional Networks`:
912
+ https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
913
+ """
914
+
915
+ def __init__(
916
+ self,
917
+ in_channels: int,
918
+ out_channels: int,
919
+ kernel_size: _size_2_t,
920
+ stride: _size_2_t = 1,
921
+ padding: _size_2_t = 0,
922
+ output_padding: _size_2_t = 0,
923
+ groups: int = 1,
924
+ bias: bool = True,
925
+ dilation: _size_2_t = 1,
926
+ padding_mode: str = 'zeros',
927
+ device=None,
928
+ dtype=None
929
+ ) -> None:
930
+ factory_kwargs = {'device': device, 'dtype': dtype}
931
+ kernel_size = _pair(kernel_size)
932
+ stride = _pair(stride)
933
+ padding = _pair(padding)
934
+ dilation = _pair(dilation)
935
+ output_padding = _pair(output_padding)
936
+ super().__init__(
937
+ in_channels, out_channels, kernel_size, stride, padding, dilation,
938
+ True, output_padding, groups, bias, padding_mode, **factory_kwargs)
939
+
940
+ def forward(self, input: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
941
+ if self.padding_mode != 'zeros':
942
+ raise ValueError('Only `zeros` padding mode is supported for ConvTranspose2d')
943
+
944
+ assert isinstance(self.padding, tuple)
945
+ # One cannot replace List by Tuple or Sequence in "_output_padding" because
946
+ # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
947
+ num_spatial_dims = 2
948
+ output_padding = self._output_padding(
949
+ input, output_size, self.stride, self.padding, self.kernel_size, # type: ignore[arg-type]
950
+ num_spatial_dims, self.dilation) # type: ignore[arg-type]
951
+
952
+ return F.conv_transpose2d(
953
+ input, self.weight, self.bias, self.stride, self.padding,
954
+ output_padding, self.groups, self.dilation)
955
+
956
+
957
+ class ConvTranspose3d(_ConvTransposeNd):
958
+ __doc__ = r"""Applies a 3D transposed convolution operator over an input image composed of several input
959
+ planes.
960
+ The transposed convolution operator multiplies each input value element-wise by a learnable kernel,
961
+ and sums over the outputs from all input feature planes.
962
+
963
+ This module can be seen as the gradient of Conv3d with respect to its input.
964
+ It is also known as a fractionally-strided convolution or
965
+ a deconvolution (although it is not an actual deconvolution operation as it does
966
+ not compute a true inverse of convolution). For more information, see the visualizations
967
+ `here`_ and the `Deconvolutional Networks`_ paper.
968
+
969
+ This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
970
+
971
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
972
+
973
+ * :attr:`stride` controls the stride for the cross-correlation.
974
+
975
+ * :attr:`padding` controls the amount of implicit zero padding on both
976
+ sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
977
+ below for details.
978
+
979
+ * :attr:`output_padding` controls the additional size added to one side
980
+ of the output shape. See note below for details.
981
+
982
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
983
+ It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
984
+
985
+ {groups_note}
986
+
987
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding`
988
+ can either be:
989
+
990
+ - a single ``int`` -- in which case the same value is used for the depth, height and width dimensions
991
+ - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
992
+ the second `int` for the height dimension and the third `int` for the width dimension
993
+
994
+ Note:
995
+ The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
996
+ amount of zero padding to both sizes of the input. This is set so that
997
+ when a :class:`~torch.nn.Conv3d` and a :class:`~torch.nn.ConvTranspose3d`
998
+ are initialized with same parameters, they are inverses of each other in
999
+ regard to the input and output shapes. However, when ``stride > 1``,
1000
+ :class:`~torch.nn.Conv3d` maps multiple input shapes to the same output
1001
+ shape. :attr:`output_padding` is provided to resolve this ambiguity by
1002
+ effectively increasing the calculated output shape on one side. Note
1003
+ that :attr:`output_padding` is only used to find output shape, but does
1004
+ not actually add zero-padding to output.
1005
+
1006
+ Note:
1007
+ {cudnn_reproducibility_note}
1008
+
1009
+ Args:
1010
+ in_channels (int): Number of channels in the input image
1011
+ out_channels (int): Number of channels produced by the convolution
1012
+ kernel_size (int or tuple): Size of the convolving kernel
1013
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
1014
+ padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
1015
+ will be added to both sides of each dimension in the input. Default: 0
1016
+ output_padding (int or tuple, optional): Additional size added to one side
1017
+ of each dimension in the output shape. Default: 0
1018
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
1019
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
1020
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
1021
+ """.format(**reproducibility_notes, **convolution_notes) + r"""
1022
+
1023
+ Shape:
1024
+ - Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` or :math:`(C_{in}, D_{in}, H_{in}, W_{in})`
1025
+ - Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` or
1026
+ :math:`(C_{out}, D_{out}, H_{out}, W_{out})`, where
1027
+
1028
+ .. math::
1029
+ D_{out} = (D_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0]
1030
+ \times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1
1031
+ .. math::
1032
+ H_{out} = (H_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1]
1033
+ \times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1
1034
+ .. math::
1035
+ W_{out} = (W_{in} - 1) \times \text{stride}[2] - 2 \times \text{padding}[2] + \text{dilation}[2]
1036
+ \times (\text{kernel\_size}[2] - 1) + \text{output\_padding}[2] + 1
1037
+
1038
+
1039
+ Attributes:
1040
+ weight (Tensor): the learnable weights of the module of shape
1041
+ :math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
1042
+ :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})`.
1043
+ The values of these weights are sampled from
1044
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
1045
+ :math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
1046
+ bias (Tensor): the learnable bias of the module of shape (out_channels)
1047
+ If :attr:`bias` is ``True``, then the values of these weights are
1048
+ sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
1049
+ :math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
1050
+
1051
+ Examples::
1052
+
1053
+ >>> # With square kernels and equal stride
1054
+ >>> m = nn.ConvTranspose3d(16, 33, 3, stride=2)
1055
+ >>> # non-square kernels and unequal stride and with padding
1056
+ >>> m = nn.ConvTranspose3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(0, 4, 2))
1057
+ >>> input = torch.randn(20, 16, 10, 50, 100)
1058
+ >>> output = m(input)
1059
+
1060
+ .. _`here`:
1061
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
1062
+
1063
+ .. _`Deconvolutional Networks`:
1064
+ https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
1065
+ """
1066
+
1067
+ def __init__(
1068
+ self,
1069
+ in_channels: int,
1070
+ out_channels: int,
1071
+ kernel_size: _size_3_t,
1072
+ stride: _size_3_t = 1,
1073
+ padding: _size_3_t = 0,
1074
+ output_padding: _size_3_t = 0,
1075
+ groups: int = 1,
1076
+ bias: bool = True,
1077
+ dilation: _size_3_t = 1,
1078
+ padding_mode: str = 'zeros',
1079
+ device=None,
1080
+ dtype=None
1081
+ ) -> None:
1082
+ factory_kwargs = {'device': device, 'dtype': dtype}
1083
+ kernel_size = _triple(kernel_size)
1084
+ stride = _triple(stride)
1085
+ padding = _triple(padding)
1086
+ dilation = _triple(dilation)
1087
+ output_padding = _triple(output_padding)
1088
+ super().__init__(
1089
+ in_channels, out_channels, kernel_size, stride, padding, dilation,
1090
+ True, output_padding, groups, bias, padding_mode, **factory_kwargs)
1091
+
1092
+ def forward(self, input: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
1093
+ if self.padding_mode != 'zeros':
1094
+ raise ValueError('Only `zeros` padding mode is supported for ConvTranspose3d')
1095
+
1096
+ assert isinstance(self.padding, tuple)
1097
+ # One cannot replace List by Tuple or Sequence in "_output_padding" because
1098
+ # TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
1099
+ num_spatial_dims = 3
1100
+ output_padding = self._output_padding(
1101
+ input, output_size, self.stride, self.padding, self.kernel_size, # type: ignore[arg-type]
1102
+ num_spatial_dims, self.dilation) # type: ignore[arg-type]
1103
+
1104
+ return F.conv_transpose3d(
1105
+ input, self.weight, self.bias, self.stride, self.padding,
1106
+ output_padding, self.groups, self.dilation)
1107
+
1108
+
1109
+ # TODO: Deprecate and remove the following alias `_ConvTransposeMixin`.
1110
+ #
1111
+ # `_ConvTransposeMixin` was a mixin that was removed. It is meant to be used
1112
+ # with `_ConvNd` to construct actual module classes that implements conv
1113
+ # transpose ops:
1114
+ #
1115
+ # class MyConvTranspose(_ConvNd, _ConvTransposeMixin):
1116
+ # ...
1117
+ #
1118
+ # In PyTorch, it has been replaced by `_ConvTransposeNd`, which is a proper
1119
+ # subclass of `_ConvNd`. However, some user code in the wild still (incorrectly)
1120
+ # use the internal class `_ConvTransposeMixin`. Hence, we provide this alias
1121
+ # for BC, because it is cheap and easy for us to do so, even though that
1122
+ # `_ConvTransposeNd` is really not a mixin anymore (but multiple inheritance as
1123
+ # above would still work).
1124
+ class _ConvTransposeMixin(_ConvTransposeNd):
1125
+ def __init__(self, *args, **kwargs):
1126
+ warnings.warn(
1127
+ "_ConvTransposeMixin is a deprecated internal class. "
1128
+ "Please consider using public APIs.")
1129
+ super().__init__(*args, **kwargs)
1130
+
1131
+
1132
+ # TODO: Conv2dLocal
1133
+ # TODO: Conv2dMap
1134
+ # TODO: ConvTranspose2dMap
1135
+
1136
+
1137
+ class _LazyConvXdMixin(LazyModuleMixin):
1138
+ groups: int
1139
+ transposed: bool
1140
+ in_channels: int
1141
+ out_channels: int
1142
+ kernel_size: Tuple[int, ...]
1143
+ weight: UninitializedParameter
1144
+ bias: UninitializedParameter
1145
+
1146
+ def reset_parameters(self) -> None:
1147
+ # has_uninitialized_params is defined in parent class and it is using a protocol on self
1148
+ if not self.has_uninitialized_params() and self.in_channels != 0: # type: ignore[misc]
1149
+ # "type:ignore[..]" is required because mypy thinks that "reset_parameters" is undefined
1150
+ # in super class. Turns out that it is defined in _ConvND which is inherited by any class
1151
+ # that also inherits _LazyConvXdMixin
1152
+ super().reset_parameters() # type: ignore[misc]
1153
+
1154
+ # Signature of "initialize_parameters" is incompatible with the definition in supertype LazyModuleMixin
1155
+ def initialize_parameters(self, input) -> None: # type: ignore[override]
1156
+ # defined by parent class but using a protocol
1157
+ if self.has_uninitialized_params(): # type: ignore[misc]
1158
+ self.in_channels = self._get_in_channels(input)
1159
+ if self.in_channels % self.groups != 0:
1160
+ raise ValueError('in_channels must be divisible by groups')
1161
+ assert isinstance(self.weight, UninitializedParameter)
1162
+ if self.transposed:
1163
+ self.weight.materialize((
1164
+ self.in_channels, self.out_channels // self.groups, *self.kernel_size))
1165
+ else:
1166
+ self.weight.materialize((
1167
+ self.out_channels, self.in_channels // self.groups, *self.kernel_size))
1168
+ if self.bias is not None:
1169
+ assert isinstance(self.bias, UninitializedParameter)
1170
+ self.bias.materialize((self.out_channels,))
1171
+ self.reset_parameters()
1172
+
1173
+ # Function to extract in_channels from first input.
1174
+ def _get_in_channels(self, input: Tensor) -> int:
1175
+ num_spatial_dims = self._get_num_spatial_dims()
1176
+ num_dims_no_batch = num_spatial_dims + 1 # +1 for channels dim
1177
+ num_dims_batch = num_dims_no_batch + 1
1178
+ if input.dim() not in (num_dims_no_batch, num_dims_batch):
1179
+ raise RuntimeError("Expected {}D (unbatched) or {}D (batched) input to {}, but "
1180
+ "got input of size: {}".format(num_dims_no_batch, num_dims_batch,
1181
+ self.__class__.__name__, input.shape))
1182
+ return input.shape[1] if input.dim() == num_dims_batch else input.shape[0]
1183
+
1184
+ # Function to return the number of spatial dims expected for inputs to the module.
1185
+ # This is expected to be implemented by subclasses.
1186
+ def _get_num_spatial_dims(self) -> int:
1187
+ raise NotImplementedError()
1188
+
1189
+
1190
+ # LazyConv1d defines weight as a Tensor but derived class defines it as UnitializeParameter
1191
+ class LazyConv1d(_LazyConvXdMixin, Conv1d): # type: ignore[misc]
1192
+ r"""A :class:`torch.nn.Conv1d` module with lazy initialization of
1193
+ the ``in_channels`` argument of the :class:`Conv1d` that is inferred from
1194
+ the ``input.size(1)``.
1195
+ The attributes that will be lazily initialized are `weight` and `bias`.
1196
+
1197
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
1198
+ on lazy modules and their limitations.
1199
+
1200
+ Args:
1201
+ out_channels (int): Number of channels produced by the convolution
1202
+ kernel_size (int or tuple): Size of the convolving kernel
1203
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
1204
+ padding (int or tuple, optional): Zero-padding added to both sides of
1205
+ the input. Default: 0
1206
+ padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
1207
+ ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
1208
+ dilation (int or tuple, optional): Spacing between kernel
1209
+ elements. Default: 1
1210
+ groups (int, optional): Number of blocked connections from input
1211
+ channels to output channels. Default: 1
1212
+ bias (bool, optional): If ``True``, adds a learnable bias to the
1213
+ output. Default: ``True``
1214
+
1215
+ .. seealso:: :class:`torch.nn.Conv1d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
1216
+ """
1217
+
1218
+ # super class define this variable as None. "type: ignore[..] is required
1219
+ # since we are redefining the variable.
1220
+ cls_to_become = Conv1d # type: ignore[assignment]
1221
+
1222
+ def __init__(
1223
+ self,
1224
+ out_channels: int,
1225
+ kernel_size: _size_1_t,
1226
+ stride: _size_1_t = 1,
1227
+ padding: _size_1_t = 0,
1228
+ dilation: _size_1_t = 1,
1229
+ groups: int = 1,
1230
+ bias: bool = True,
1231
+ padding_mode: str = 'zeros',
1232
+ device=None,
1233
+ dtype=None
1234
+ ) -> None:
1235
+ factory_kwargs = {'device': device, 'dtype': dtype}
1236
+ super().__init__(
1237
+ 0,
1238
+ 0,
1239
+ kernel_size,
1240
+ stride,
1241
+ padding,
1242
+ dilation,
1243
+ groups,
1244
+ # bias is hardcoded to False to avoid creating tensor
1245
+ # that will soon be overwritten.
1246
+ False,
1247
+ padding_mode,
1248
+ **factory_kwargs
1249
+ )
1250
+ self.weight = UninitializedParameter(**factory_kwargs)
1251
+ self.out_channels = out_channels
1252
+ if bias:
1253
+ self.bias = UninitializedParameter(**factory_kwargs)
1254
+
1255
+ def _get_num_spatial_dims(self) -> int:
1256
+ return 1
1257
+
1258
+
1259
+ # LazyConv2d defines weight as a Tensor but derived class defines it as UnitializeParameter
1260
+ class LazyConv2d(_LazyConvXdMixin, Conv2d): # type: ignore[misc]
1261
+ r"""A :class:`torch.nn.Conv2d` module with lazy initialization of
1262
+ the ``in_channels`` argument of the :class:`Conv2d` that is inferred from
1263
+ the ``input.size(1)``.
1264
+ The attributes that will be lazily initialized are `weight` and `bias`.
1265
+
1266
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
1267
+ on lazy modules and their limitations.
1268
+
1269
+ Args:
1270
+ out_channels (int): Number of channels produced by the convolution
1271
+ kernel_size (int or tuple): Size of the convolving kernel
1272
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
1273
+ padding (int or tuple, optional): Zero-padding added to both sides of
1274
+ the input. Default: 0
1275
+ padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
1276
+ ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
1277
+ dilation (int or tuple, optional): Spacing between kernel
1278
+ elements. Default: 1
1279
+ groups (int, optional): Number of blocked connections from input
1280
+ channels to output channels. Default: 1
1281
+ bias (bool, optional): If ``True``, adds a learnable bias to the
1282
+ output. Default: ``True``
1283
+
1284
+ .. seealso:: :class:`torch.nn.Conv2d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
1285
+ """
1286
+
1287
+ # super class define this variable as None. "type: ignore[..] is required
1288
+ # since we are redefining the variable.
1289
+ cls_to_become = Conv2d # type: ignore[assignment]
1290
+
1291
+ def __init__(
1292
+ self,
1293
+ out_channels: int,
1294
+ kernel_size: _size_2_t,
1295
+ stride: _size_2_t = 1,
1296
+ padding: _size_2_t = 0,
1297
+ dilation: _size_2_t = 1,
1298
+ groups: int = 1,
1299
+ bias: bool = True,
1300
+ padding_mode: str = 'zeros', # TODO: refine this type
1301
+ device=None,
1302
+ dtype=None
1303
+ ) -> None:
1304
+ factory_kwargs = {'device': device, 'dtype': dtype}
1305
+ super().__init__(
1306
+ 0,
1307
+ 0,
1308
+ kernel_size,
1309
+ stride,
1310
+ padding,
1311
+ dilation,
1312
+ groups,
1313
+ # bias is hardcoded to False to avoid creating tensor
1314
+ # that will soon be overwritten.
1315
+ False,
1316
+ padding_mode,
1317
+ **factory_kwargs
1318
+ )
1319
+ self.weight = UninitializedParameter(**factory_kwargs)
1320
+ self.out_channels = out_channels
1321
+ if bias:
1322
+ self.bias = UninitializedParameter(**factory_kwargs)
1323
+
1324
+ def _get_num_spatial_dims(self) -> int:
1325
+ return 2
1326
+
1327
+
1328
+ # LazyConv3d defines weight as a Tensor but derived class defines it as UnitializeParameter
1329
+ class LazyConv3d(_LazyConvXdMixin, Conv3d): # type: ignore[misc]
1330
+ r"""A :class:`torch.nn.Conv3d` module with lazy initialization of
1331
+ the ``in_channels`` argument of the :class:`Conv3d` that is inferred from
1332
+ the ``input.size(1)``.
1333
+ The attributes that will be lazily initialized are `weight` and `bias`.
1334
+
1335
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
1336
+ on lazy modules and their limitations.
1337
+
1338
+ Args:
1339
+ out_channels (int): Number of channels produced by the convolution
1340
+ kernel_size (int or tuple): Size of the convolving kernel
1341
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
1342
+ padding (int or tuple, optional): Zero-padding added to both sides of
1343
+ the input. Default: 0
1344
+ padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
1345
+ ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
1346
+ dilation (int or tuple, optional): Spacing between kernel
1347
+ elements. Default: 1
1348
+ groups (int, optional): Number of blocked connections from input
1349
+ channels to output channels. Default: 1
1350
+ bias (bool, optional): If ``True``, adds a learnable bias to the
1351
+ output. Default: ``True``
1352
+
1353
+ .. seealso:: :class:`torch.nn.Conv3d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
1354
+ """
1355
+
1356
+ # super class define this variable as None. "type: ignore[..] is required
1357
+ # since we are redefining the variable.
1358
+ cls_to_become = Conv3d # type: ignore[assignment]
1359
+
1360
+ def __init__(
1361
+ self,
1362
+ out_channels: int,
1363
+ kernel_size: _size_3_t,
1364
+ stride: _size_3_t = 1,
1365
+ padding: _size_3_t = 0,
1366
+ dilation: _size_3_t = 1,
1367
+ groups: int = 1,
1368
+ bias: bool = True,
1369
+ padding_mode: str = 'zeros',
1370
+ device=None,
1371
+ dtype=None
1372
+ ) -> None:
1373
+ factory_kwargs = {'device': device, 'dtype': dtype}
1374
+ super().__init__(
1375
+ 0,
1376
+ 0,
1377
+ kernel_size,
1378
+ stride,
1379
+ padding,
1380
+ dilation,
1381
+ groups,
1382
+ # bias is hardcoded to False to avoid creating tensor
1383
+ # that will soon be overwritten.
1384
+ False,
1385
+ padding_mode,
1386
+ **factory_kwargs
1387
+ )
1388
+ self.weight = UninitializedParameter(**factory_kwargs)
1389
+ self.out_channels = out_channels
1390
+ if bias:
1391
+ self.bias = UninitializedParameter(**factory_kwargs)
1392
+
1393
+ def _get_num_spatial_dims(self) -> int:
1394
+ return 3
1395
+
1396
+
1397
+ # LazyConvTranspose1d defines weight as a Tensor but derived class defines it as UnitializeParameter
1398
+ class LazyConvTranspose1d(_LazyConvXdMixin, ConvTranspose1d): # type: ignore[misc]
1399
+ r"""A :class:`torch.nn.ConvTranspose1d` module with lazy initialization of
1400
+ the ``in_channels`` argument of the :class:`ConvTranspose1d` that is inferred from
1401
+ the ``input.size(1)``.
1402
+ The attributes that will be lazily initialized are `weight` and `bias`.
1403
+
1404
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
1405
+ on lazy modules and their limitations.
1406
+
1407
+ Args:
1408
+ out_channels (int): Number of channels produced by the convolution
1409
+ kernel_size (int or tuple): Size of the convolving kernel
1410
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
1411
+ padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
1412
+ will be added to both sides of the input. Default: 0
1413
+ output_padding (int or tuple, optional): Additional size added to one side
1414
+ of the output shape. Default: 0
1415
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
1416
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
1417
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
1418
+
1419
+ .. seealso:: :class:`torch.nn.ConvTranspose1d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
1420
+ """
1421
+
1422
+ # super class define this variable as None. "type: ignore[..] is required
1423
+ # since we are redefining the variable.
1424
+ cls_to_become = ConvTranspose1d # type: ignore[assignment]
1425
+
1426
+ def __init__(
1427
+ self,
1428
+ out_channels: int,
1429
+ kernel_size: _size_1_t,
1430
+ stride: _size_1_t = 1,
1431
+ padding: _size_1_t = 0,
1432
+ output_padding: _size_1_t = 0,
1433
+ groups: int = 1,
1434
+ bias: bool = True,
1435
+ dilation: _size_1_t = 1,
1436
+ padding_mode: str = 'zeros',
1437
+ device=None,
1438
+ dtype=None
1439
+ ) -> None:
1440
+ factory_kwargs = {'device': device, 'dtype': dtype}
1441
+ super().__init__(
1442
+ 0,
1443
+ 0,
1444
+ kernel_size,
1445
+ stride,
1446
+ padding,
1447
+ output_padding,
1448
+ groups,
1449
+ # bias is hardcoded to False to avoid creating tensor
1450
+ # that will soon be overwritten.
1451
+ False,
1452
+ dilation,
1453
+ padding_mode,
1454
+ **factory_kwargs
1455
+ )
1456
+ self.weight = UninitializedParameter(**factory_kwargs)
1457
+ self.out_channels = out_channels
1458
+ if bias:
1459
+ self.bias = UninitializedParameter(**factory_kwargs)
1460
+
1461
+ def _get_num_spatial_dims(self) -> int:
1462
+ return 1
1463
+
1464
+
1465
+ # LazyConvTranspose2d defines weight as a Tensor but derived class defines it as UnitializeParameter
1466
+ class LazyConvTranspose2d(_LazyConvXdMixin, ConvTranspose2d): # type: ignore[misc]
1467
+ r"""A :class:`torch.nn.ConvTranspose2d` module with lazy initialization of
1468
+ the ``in_channels`` argument of the :class:`ConvTranspose2d` that is inferred from
1469
+ the ``input.size(1)``.
1470
+ The attributes that will be lazily initialized are `weight` and `bias`.
1471
+
1472
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
1473
+ on lazy modules and their limitations.
1474
+
1475
+ Args:
1476
+ out_channels (int): Number of channels produced by the convolution
1477
+ kernel_size (int or tuple): Size of the convolving kernel
1478
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
1479
+ padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
1480
+ will be added to both sides of each dimension in the input. Default: 0
1481
+ output_padding (int or tuple, optional): Additional size added to one side
1482
+ of each dimension in the output shape. Default: 0
1483
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
1484
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
1485
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
1486
+
1487
+ .. seealso:: :class:`torch.nn.ConvTranspose2d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
1488
+ """
1489
+
1490
+ # super class define this variable as None. "type: ignore[..] is required
1491
+ # since we are redefining the variable.
1492
+ cls_to_become = ConvTranspose2d # type: ignore[assignment]
1493
+
1494
+ def __init__(
1495
+ self,
1496
+ out_channels: int,
1497
+ kernel_size: _size_2_t,
1498
+ stride: _size_2_t = 1,
1499
+ padding: _size_2_t = 0,
1500
+ output_padding: _size_2_t = 0,
1501
+ groups: int = 1,
1502
+ bias: bool = True,
1503
+ dilation: int = 1,
1504
+ padding_mode: str = 'zeros',
1505
+ device=None,
1506
+ dtype=None
1507
+ ) -> None:
1508
+ factory_kwargs = {'device': device, 'dtype': dtype}
1509
+ super().__init__(
1510
+ 0,
1511
+ 0,
1512
+ kernel_size,
1513
+ stride,
1514
+ padding,
1515
+ output_padding,
1516
+ groups,
1517
+ # bias is hardcoded to False to avoid creating tensor
1518
+ # that will soon be overwritten.
1519
+ False,
1520
+ dilation,
1521
+ padding_mode,
1522
+ **factory_kwargs
1523
+ )
1524
+ self.weight = UninitializedParameter(**factory_kwargs)
1525
+ self.out_channels = out_channels
1526
+ if bias:
1527
+ self.bias = UninitializedParameter(**factory_kwargs)
1528
+
1529
+ def _get_num_spatial_dims(self) -> int:
1530
+ return 2
1531
+
1532
+
1533
+ # LazyConvTranspose3d defines weight as a Tensor but derived class defines it as UnitializeParameter
1534
+ class LazyConvTranspose3d(_LazyConvXdMixin, ConvTranspose3d): # type: ignore[misc]
1535
+ r"""A :class:`torch.nn.ConvTranspose3d` module with lazy initialization of
1536
+ the ``in_channels`` argument of the :class:`ConvTranspose3d` that is inferred from
1537
+ the ``input.size(1)``.
1538
+ The attributes that will be lazily initialized are `weight` and `bias`.
1539
+
1540
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
1541
+ on lazy modules and their limitations.
1542
+
1543
+ Args:
1544
+ out_channels (int): Number of channels produced by the convolution
1545
+ kernel_size (int or tuple): Size of the convolving kernel
1546
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
1547
+ padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
1548
+ will be added to both sides of each dimension in the input. Default: 0
1549
+ output_padding (int or tuple, optional): Additional size added to one side
1550
+ of each dimension in the output shape. Default: 0
1551
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
1552
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
1553
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
1554
+
1555
+ .. seealso:: :class:`torch.nn.ConvTranspose3d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
1556
+ """
1557
+
1558
+ # super class define this variable as None. "type: ignore[..] is required
1559
+ # since we are redefining the variable.
1560
+ cls_to_become = ConvTranspose3d # type: ignore[assignment]
1561
+
1562
+ def __init__(
1563
+ self,
1564
+ out_channels: int,
1565
+ kernel_size: _size_3_t,
1566
+ stride: _size_3_t = 1,
1567
+ padding: _size_3_t = 0,
1568
+ output_padding: _size_3_t = 0,
1569
+ groups: int = 1,
1570
+ bias: bool = True,
1571
+ dilation: _size_3_t = 1,
1572
+ padding_mode: str = 'zeros',
1573
+ device=None,
1574
+ dtype=None
1575
+ ) -> None:
1576
+ factory_kwargs = {'device': device, 'dtype': dtype}
1577
+ super().__init__(
1578
+ 0,
1579
+ 0,
1580
+ kernel_size,
1581
+ stride,
1582
+ padding,
1583
+ output_padding,
1584
+ groups,
1585
+ # bias is hardcoded to False to avoid creating tensor
1586
+ # that will soon be overwritten.
1587
+ False,
1588
+ dilation,
1589
+ padding_mode,
1590
+ **factory_kwargs
1591
+ )
1592
+ self.weight = UninitializedParameter(**factory_kwargs)
1593
+ self.out_channels = out_channels
1594
+ if bias:
1595
+ self.bias = UninitializedParameter(**factory_kwargs)
1596
+
1597
+ def _get_num_spatial_dims(self) -> int:
1598
+ return 3
mgm/lib/python3.10/site-packages/torch/nn/modules/dropout.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+ from .. import functional as F
3
+
4
+ from torch import Tensor
5
+
6
+ __all__ = ['Dropout', 'Dropout1d', 'Dropout2d', 'Dropout3d', 'AlphaDropout', 'FeatureAlphaDropout']
7
+
8
+ class _DropoutNd(Module):
9
+ __constants__ = ['p', 'inplace']
10
+ p: float
11
+ inplace: bool
12
+
13
+ def __init__(self, p: float = 0.5, inplace: bool = False) -> None:
14
+ super().__init__()
15
+ if p < 0 or p > 1:
16
+ raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}")
17
+ self.p = p
18
+ self.inplace = inplace
19
+
20
+ def extra_repr(self) -> str:
21
+ return f'p={self.p}, inplace={self.inplace}'
22
+
23
+
24
+ class Dropout(_DropoutNd):
25
+ r"""During training, randomly zeroes some of the elements of the input
26
+ tensor with probability :attr:`p` using samples from a Bernoulli
27
+ distribution. Each channel will be zeroed out independently on every forward
28
+ call.
29
+
30
+ This has proven to be an effective technique for regularization and
31
+ preventing the co-adaptation of neurons as described in the paper
32
+ `Improving neural networks by preventing co-adaptation of feature
33
+ detectors`_ .
34
+
35
+ Furthermore, the outputs are scaled by a factor of :math:`\frac{1}{1-p}` during
36
+ training. This means that during evaluation the module simply computes an
37
+ identity function.
38
+
39
+ Args:
40
+ p: probability of an element to be zeroed. Default: 0.5
41
+ inplace: If set to ``True``, will do this operation in-place. Default: ``False``
42
+
43
+ Shape:
44
+ - Input: :math:`(*)`. Input can be of any shape
45
+ - Output: :math:`(*)`. Output is of the same shape as input
46
+
47
+ Examples::
48
+
49
+ >>> m = nn.Dropout(p=0.2)
50
+ >>> input = torch.randn(20, 16)
51
+ >>> output = m(input)
52
+
53
+ .. _Improving neural networks by preventing co-adaptation of feature
54
+ detectors: https://arxiv.org/abs/1207.0580
55
+ """
56
+
57
+ def forward(self, input: Tensor) -> Tensor:
58
+ return F.dropout(input, self.p, self.training, self.inplace)
59
+
60
+
61
+ class Dropout1d(_DropoutNd):
62
+ r"""Randomly zero out entire channels (a channel is a 1D feature map,
63
+ e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
64
+ batched input is a 1D tensor :math:`\text{input}[i, j]`).
65
+ Each channel will be zeroed out independently on every forward call with
66
+ probability :attr:`p` using samples from a Bernoulli distribution.
67
+
68
+ Usually the input comes from :class:`nn.Conv1d` modules.
69
+
70
+ As described in the paper
71
+ `Efficient Object Localization Using Convolutional Networks`_ ,
72
+ if adjacent pixels within feature maps are strongly correlated
73
+ (as is normally the case in early convolution layers) then i.i.d. dropout
74
+ will not regularize the activations and will otherwise just result
75
+ in an effective learning rate decrease.
76
+
77
+ In this case, :func:`nn.Dropout1d` will help promote independence between
78
+ feature maps and should be used instead.
79
+
80
+ Args:
81
+ p (float, optional): probability of an element to be zero-ed.
82
+ inplace (bool, optional): If set to ``True``, will do this operation
83
+ in-place
84
+
85
+ Shape:
86
+ - Input: :math:`(N, C, L)` or :math:`(C, L)`.
87
+ - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input).
88
+
89
+ Examples::
90
+
91
+ >>> m = nn.Dropout1d(p=0.2)
92
+ >>> input = torch.randn(20, 16, 32)
93
+ >>> output = m(input)
94
+
95
+ .. _Efficient Object Localization Using Convolutional Networks:
96
+ https://arxiv.org/abs/1411.4280
97
+ """
98
+
99
+ def forward(self, input: Tensor) -> Tensor:
100
+ return F.dropout1d(input, self.p, self.training, self.inplace)
101
+
102
+
103
+ class Dropout2d(_DropoutNd):
104
+ r"""Randomly zero out entire channels (a channel is a 2D feature map,
105
+ e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
106
+ batched input is a 2D tensor :math:`\text{input}[i, j]`).
107
+ Each channel will be zeroed out independently on every forward call with
108
+ probability :attr:`p` using samples from a Bernoulli distribution.
109
+
110
+ Usually the input comes from :class:`nn.Conv2d` modules.
111
+
112
+ As described in the paper
113
+ `Efficient Object Localization Using Convolutional Networks`_ ,
114
+ if adjacent pixels within feature maps are strongly correlated
115
+ (as is normally the case in early convolution layers) then i.i.d. dropout
116
+ will not regularize the activations and will otherwise just result
117
+ in an effective learning rate decrease.
118
+
119
+ In this case, :func:`nn.Dropout2d` will help promote independence between
120
+ feature maps and should be used instead.
121
+
122
+ Args:
123
+ p (float, optional): probability of an element to be zero-ed.
124
+ inplace (bool, optional): If set to ``True``, will do this operation
125
+ in-place
126
+
127
+ .. warning ::
128
+ Due to historical reasons, this class will perform 1D channel-wise dropout
129
+ for 3D inputs (as done by :class:`nn.Dropout1d`). Thus, it currently does NOT
130
+ support inputs without a batch dimension of shape :math:`(C, H, W)`. This
131
+ behavior will change in a future release to interpret 3D inputs as no-batch-dim
132
+ inputs. To maintain the old behavior, switch to :class:`nn.Dropout1d`.
133
+
134
+ Shape:
135
+ - Input: :math:`(N, C, H, W)` or :math:`(N, C, L)`.
136
+ - Output: :math:`(N, C, H, W)` or :math:`(N, C, L)` (same shape as input).
137
+
138
+ Examples::
139
+
140
+ >>> m = nn.Dropout2d(p=0.2)
141
+ >>> input = torch.randn(20, 16, 32, 32)
142
+ >>> output = m(input)
143
+
144
+ .. _Efficient Object Localization Using Convolutional Networks:
145
+ https://arxiv.org/abs/1411.4280
146
+ """
147
+
148
+ def forward(self, input: Tensor) -> Tensor:
149
+ return F.dropout2d(input, self.p, self.training, self.inplace)
150
+
151
+
152
+ class Dropout3d(_DropoutNd):
153
+ r"""Randomly zero out entire channels (a channel is a 3D feature map,
154
+ e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
155
+ batched input is a 3D tensor :math:`\text{input}[i, j]`).
156
+ Each channel will be zeroed out independently on every forward call with
157
+ probability :attr:`p` using samples from a Bernoulli distribution.
158
+
159
+ Usually the input comes from :class:`nn.Conv3d` modules.
160
+
161
+ As described in the paper
162
+ `Efficient Object Localization Using Convolutional Networks`_ ,
163
+ if adjacent pixels within feature maps are strongly correlated
164
+ (as is normally the case in early convolution layers) then i.i.d. dropout
165
+ will not regularize the activations and will otherwise just result
166
+ in an effective learning rate decrease.
167
+
168
+ In this case, :func:`nn.Dropout3d` will help promote independence between
169
+ feature maps and should be used instead.
170
+
171
+ Args:
172
+ p (float, optional): probability of an element to be zeroed.
173
+ inplace (bool, optional): If set to ``True``, will do this operation
174
+ in-place
175
+
176
+ Shape:
177
+ - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`.
178
+ - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input).
179
+
180
+ Examples::
181
+
182
+ >>> m = nn.Dropout3d(p=0.2)
183
+ >>> input = torch.randn(20, 16, 4, 32, 32)
184
+ >>> output = m(input)
185
+
186
+ .. _Efficient Object Localization Using Convolutional Networks:
187
+ https://arxiv.org/abs/1411.4280
188
+ """
189
+
190
+ def forward(self, input: Tensor) -> Tensor:
191
+ return F.dropout3d(input, self.p, self.training, self.inplace)
192
+
193
+
194
+ class AlphaDropout(_DropoutNd):
195
+ r"""Applies Alpha Dropout over the input.
196
+
197
+ Alpha Dropout is a type of Dropout that maintains the self-normalizing
198
+ property.
199
+ For an input with zero mean and unit standard deviation, the output of
200
+ Alpha Dropout maintains the original mean and standard deviation of the
201
+ input.
202
+ Alpha Dropout goes hand-in-hand with SELU activation function, which ensures
203
+ that the outputs have zero mean and unit standard deviation.
204
+
205
+ During training, it randomly masks some of the elements of the input
206
+ tensor with probability *p* using samples from a bernoulli distribution.
207
+ The elements to masked are randomized on every forward call, and scaled
208
+ and shifted to maintain zero mean and unit standard deviation.
209
+
210
+ During evaluation the module simply computes an identity function.
211
+
212
+ More details can be found in the paper `Self-Normalizing Neural Networks`_ .
213
+
214
+ Args:
215
+ p (float): probability of an element to be dropped. Default: 0.5
216
+ inplace (bool, optional): If set to ``True``, will do this operation
217
+ in-place
218
+
219
+ Shape:
220
+ - Input: :math:`(*)`. Input can be of any shape
221
+ - Output: :math:`(*)`. Output is of the same shape as input
222
+
223
+ Examples::
224
+
225
+ >>> m = nn.AlphaDropout(p=0.2)
226
+ >>> input = torch.randn(20, 16)
227
+ >>> output = m(input)
228
+
229
+ .. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
230
+ """
231
+
232
+ def forward(self, input: Tensor) -> Tensor:
233
+ return F.alpha_dropout(input, self.p, self.training)
234
+
235
+
236
+ class FeatureAlphaDropout(_DropoutNd):
237
+ r"""Randomly masks out entire channels (a channel is a feature map,
238
+ e.g. the :math:`j`-th channel of the :math:`i`-th sample in the batch input
239
+ is a tensor :math:`\text{input}[i, j]`) of the input tensor). Instead of
240
+ setting activations to zero, as in regular Dropout, the activations are set
241
+ to the negative saturation value of the SELU activation function. More details
242
+ can be found in the paper `Self-Normalizing Neural Networks`_ .
243
+
244
+ Each element will be masked independently for each sample on every forward
245
+ call with probability :attr:`p` using samples from a Bernoulli distribution.
246
+ The elements to be masked are randomized on every forward call, and scaled
247
+ and shifted to maintain zero mean and unit variance.
248
+
249
+ Usually the input comes from :class:`nn.AlphaDropout` modules.
250
+
251
+ As described in the paper
252
+ `Efficient Object Localization Using Convolutional Networks`_ ,
253
+ if adjacent pixels within feature maps are strongly correlated
254
+ (as is normally the case in early convolution layers) then i.i.d. dropout
255
+ will not regularize the activations and will otherwise just result
256
+ in an effective learning rate decrease.
257
+
258
+ In this case, :func:`nn.AlphaDropout` will help promote independence between
259
+ feature maps and should be used instead.
260
+
261
+ Args:
262
+ p (float, optional): probability of an element to be zeroed. Default: 0.5
263
+ inplace (bool, optional): If set to ``True``, will do this operation
264
+ in-place
265
+
266
+ Shape:
267
+ - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`.
268
+ - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input).
269
+
270
+ Examples::
271
+
272
+ >>> m = nn.FeatureAlphaDropout(p=0.2)
273
+ >>> input = torch.randn(20, 16, 4, 32, 32)
274
+ >>> output = m(input)
275
+
276
+ .. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
277
+ .. _Efficient Object Localization Using Convolutional Networks:
278
+ https://arxiv.org/abs/1411.4280
279
+ """
280
+
281
+ def forward(self, input: Tensor) -> Tensor:
282
+ return F.feature_alpha_dropout(input, self.p, self.training)
mgm/lib/python3.10/site-packages/torch/nn/modules/flatten.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+
3
+ from typing import Tuple, Union
4
+ from torch import Tensor
5
+ from torch.types import _size
6
+
7
+ __all__ = ['Flatten', 'Unflatten']
8
+
9
+ class Flatten(Module):
10
+ r"""
11
+ Flattens a contiguous range of dims into a tensor. For use with :class:`~nn.Sequential`.
12
+ See :meth:`torch.flatten` for details.
13
+
14
+ Shape:
15
+ - Input: :math:`(*, S_{\text{start}},..., S_{i}, ..., S_{\text{end}}, *)`,'
16
+ where :math:`S_{i}` is the size at dimension :math:`i` and :math:`*` means any
17
+ number of dimensions including none.
18
+ - Output: :math:`(*, \prod_{i=\text{start}}^{\text{end}} S_{i}, *)`.
19
+
20
+ Args:
21
+ start_dim: first dim to flatten (default = 1).
22
+ end_dim: last dim to flatten (default = -1).
23
+
24
+ Examples::
25
+ >>> input = torch.randn(32, 1, 5, 5)
26
+ >>> # With default parameters
27
+ >>> m = nn.Flatten()
28
+ >>> output = m(input)
29
+ >>> output.size()
30
+ torch.Size([32, 25])
31
+ >>> # With non-default parameters
32
+ >>> m = nn.Flatten(0, 2)
33
+ >>> output = m(input)
34
+ >>> output.size()
35
+ torch.Size([160, 5])
36
+ """
37
+ __constants__ = ['start_dim', 'end_dim']
38
+ start_dim: int
39
+ end_dim: int
40
+
41
+ def __init__(self, start_dim: int = 1, end_dim: int = -1) -> None:
42
+ super().__init__()
43
+ self.start_dim = start_dim
44
+ self.end_dim = end_dim
45
+
46
+ def forward(self, input: Tensor) -> Tensor:
47
+ return input.flatten(self.start_dim, self.end_dim)
48
+
49
+ def extra_repr(self) -> str:
50
+ return f'start_dim={self.start_dim}, end_dim={self.end_dim}'
51
+
52
+
53
+ class Unflatten(Module):
54
+ r"""
55
+ Unflattens a tensor dim expanding it to a desired shape. For use with :class:`~nn.Sequential`.
56
+
57
+ * :attr:`dim` specifies the dimension of the input tensor to be unflattened, and it can
58
+ be either `int` or `str` when `Tensor` or `NamedTensor` is used, respectively.
59
+
60
+ * :attr:`unflattened_size` is the new shape of the unflattened dimension of the tensor and it can be
61
+ a `tuple` of ints or a `list` of ints or `torch.Size` for `Tensor` input; a `NamedShape`
62
+ (tuple of `(name, size)` tuples) for `NamedTensor` input.
63
+
64
+ Shape:
65
+ - Input: :math:`(*, S_{\text{dim}}, *)`, where :math:`S_{\text{dim}}` is the size at
66
+ dimension :attr:`dim` and :math:`*` means any number of dimensions including none.
67
+ - Output: :math:`(*, U_1, ..., U_n, *)`, where :math:`U` = :attr:`unflattened_size` and
68
+ :math:`\prod_{i=1}^n U_i = S_{\text{dim}}`.
69
+
70
+ Args:
71
+ dim (Union[int, str]): Dimension to be unflattened
72
+ unflattened_size (Union[torch.Size, Tuple, List, NamedShape]): New shape of the unflattened dimension
73
+
74
+ Examples:
75
+ >>> input = torch.randn(2, 50)
76
+ >>> # With tuple of ints
77
+ >>> m = nn.Sequential(
78
+ >>> nn.Linear(50, 50),
79
+ >>> nn.Unflatten(1, (2, 5, 5))
80
+ >>> )
81
+ >>> output = m(input)
82
+ >>> output.size()
83
+ torch.Size([2, 2, 5, 5])
84
+ >>> # With torch.Size
85
+ >>> m = nn.Sequential(
86
+ >>> nn.Linear(50, 50),
87
+ >>> nn.Unflatten(1, torch.Size([2, 5, 5]))
88
+ >>> )
89
+ >>> output = m(input)
90
+ >>> output.size()
91
+ torch.Size([2, 2, 5, 5])
92
+ >>> # With namedshape (tuple of tuples)
93
+ >>> input = torch.randn(2, 50, names=('N', 'features'))
94
+ >>> unflatten = nn.Unflatten('features', (('C', 2), ('H', 5), ('W', 5)))
95
+ >>> output = unflatten(input)
96
+ >>> output.size()
97
+ torch.Size([2, 2, 5, 5])
98
+ """
99
+ NamedShape = Tuple[Tuple[str, int]]
100
+
101
+ __constants__ = ['dim', 'unflattened_size']
102
+ dim: Union[int, str]
103
+ unflattened_size: Union[_size, NamedShape]
104
+
105
+ def __init__(self, dim: Union[int, str], unflattened_size: Union[_size, NamedShape]) -> None:
106
+ super().__init__()
107
+
108
+ if isinstance(dim, int):
109
+ self._require_tuple_int(unflattened_size)
110
+ elif isinstance(dim, str):
111
+ self._require_tuple_tuple(unflattened_size)
112
+ else:
113
+ raise TypeError("invalid argument type for dim parameter")
114
+
115
+ self.dim = dim
116
+ self.unflattened_size = unflattened_size
117
+
118
+ def _require_tuple_tuple(self, input):
119
+ if (isinstance(input, tuple)):
120
+ for idx, elem in enumerate(input):
121
+ if not isinstance(elem, tuple):
122
+ raise TypeError("unflattened_size must be tuple of tuples, " +
123
+ f"but found element of type {type(elem).__name__} at pos {idx}")
124
+ return
125
+ raise TypeError("unflattened_size must be a tuple of tuples, " +
126
+ f"but found type {type(input).__name__}")
127
+
128
+ def _require_tuple_int(self, input):
129
+ if (isinstance(input, (tuple, list))):
130
+ for idx, elem in enumerate(input):
131
+ if not isinstance(elem, int):
132
+ raise TypeError("unflattened_size must be tuple of ints, " +
133
+ f"but found element of type {type(elem).__name__} at pos {idx}")
134
+ return
135
+ raise TypeError(f"unflattened_size must be a tuple of ints, but found type {type(input).__name__}")
136
+
137
+ def forward(self, input: Tensor) -> Tensor:
138
+ return input.unflatten(self.dim, self.unflattened_size)
139
+
140
+ def extra_repr(self) -> str:
141
+ return f'dim={self.dim}, unflattened_size={self.unflattened_size}'
mgm/lib/python3.10/site-packages/torch/nn/modules/instancenorm.py ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import warnings
3
+ from torch import Tensor
4
+
5
+ from .batchnorm import _LazyNormBase, _NormBase
6
+ from .. import functional as F
7
+
8
+ __all__ = ['InstanceNorm1d', 'InstanceNorm2d', 'InstanceNorm3d', 'LazyInstanceNorm1d',
9
+ 'LazyInstanceNorm2d', 'LazyInstanceNorm3d']
10
+
11
+ class _InstanceNorm(_NormBase):
12
+ def __init__(
13
+ self,
14
+ num_features: int,
15
+ eps: float = 1e-5,
16
+ momentum: float = 0.1,
17
+ affine: bool = False,
18
+ track_running_stats: bool = False,
19
+ device=None,
20
+ dtype=None
21
+ ) -> None:
22
+ factory_kwargs = {'device': device, 'dtype': dtype}
23
+ super().__init__(
24
+ num_features, eps, momentum, affine, track_running_stats, **factory_kwargs)
25
+
26
+ def _check_input_dim(self, input):
27
+ raise NotImplementedError
28
+
29
+ def _get_no_batch_dim(self):
30
+ raise NotImplementedError
31
+
32
+ def _handle_no_batch_input(self, input):
33
+ return self._apply_instance_norm(input.unsqueeze(0)).squeeze(0)
34
+
35
+ def _apply_instance_norm(self, input):
36
+ return F.instance_norm(
37
+ input, self.running_mean, self.running_var, self.weight, self.bias,
38
+ self.training or not self.track_running_stats, self.momentum, self.eps)
39
+
40
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
41
+ missing_keys, unexpected_keys, error_msgs):
42
+ version = local_metadata.get('version', None)
43
+ # at version 1: removed running_mean and running_var when
44
+ # track_running_stats=False (default)
45
+ if version is None and not self.track_running_stats:
46
+ running_stats_keys = []
47
+ for name in ('running_mean', 'running_var'):
48
+ key = prefix + name
49
+ if key in state_dict:
50
+ running_stats_keys.append(key)
51
+ if len(running_stats_keys) > 0:
52
+ error_msgs.append(
53
+ 'Unexpected running stats buffer(s) {names} for {klass} '
54
+ 'with track_running_stats=False. If state_dict is a '
55
+ 'checkpoint saved before 0.4.0, this may be expected '
56
+ 'because {klass} does not track running stats by default '
57
+ 'since 0.4.0. Please remove these keys from state_dict. If '
58
+ 'the running stats are actually needed, instead set '
59
+ 'track_running_stats=True in {klass} to enable them. See '
60
+ 'the documentation of {klass} for details.'
61
+ .format(names=" and ".join(f'"{k}"' for k in running_stats_keys),
62
+ klass=self.__class__.__name__))
63
+ for key in running_stats_keys:
64
+ state_dict.pop(key)
65
+
66
+ super()._load_from_state_dict(
67
+ state_dict, prefix, local_metadata, strict,
68
+ missing_keys, unexpected_keys, error_msgs)
69
+
70
+ def forward(self, input: Tensor) -> Tensor:
71
+ self._check_input_dim(input)
72
+
73
+ feature_dim = input.dim() - self._get_no_batch_dim()
74
+ if input.size(feature_dim) != self.num_features:
75
+ if self.affine:
76
+ raise ValueError(
77
+ f"expected input's size at dim={feature_dim} to match num_features"
78
+ f" ({self.num_features}), but got: {input.size(feature_dim)}.")
79
+ else:
80
+ warnings.warn(f"input's size at dim={feature_dim} does not match num_features. "
81
+ "You can silence this warning by not passing in num_features, "
82
+ "which is not used because affine=False")
83
+
84
+ if input.dim() == self._get_no_batch_dim():
85
+ return self._handle_no_batch_input(input)
86
+
87
+ return self._apply_instance_norm(input)
88
+
89
+
90
+ class InstanceNorm1d(_InstanceNorm):
91
+ r"""Applies Instance Normalization over a 2D (unbatched) or 3D (batched) input
92
+ as described in the paper
93
+ `Instance Normalization: The Missing Ingredient for Fast Stylization
94
+ <https://arxiv.org/abs/1607.08022>`__.
95
+
96
+ .. math::
97
+
98
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
99
+
100
+ The mean and standard-deviation are calculated per-dimension separately
101
+ for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
102
+ of size `C` (where `C` is the number of features or channels of the input) if :attr:`affine` is ``True``.
103
+ The standard-deviation is calculated via the biased estimator, equivalent to
104
+ `torch.var(input, unbiased=False)`.
105
+
106
+ By default, this layer uses instance statistics computed from input data in
107
+ both training and evaluation modes.
108
+
109
+ If :attr:`track_running_stats` is set to ``True``, during training this
110
+ layer keeps running estimates of its computed mean and variance, which are
111
+ then used for normalization during evaluation. The running estimates are
112
+ kept with a default :attr:`momentum` of 0.1.
113
+
114
+ .. note::
115
+ This :attr:`momentum` argument is different from one used in optimizer
116
+ classes and the conventional notion of momentum. Mathematically, the
117
+ update rule for running statistics here is
118
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
119
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
120
+ new observed value.
121
+
122
+ .. note::
123
+ :class:`InstanceNorm1d` and :class:`LayerNorm` are very similar, but
124
+ have some subtle differences. :class:`InstanceNorm1d` is applied
125
+ on each channel of channeled data like multidimensional time series, but
126
+ :class:`LayerNorm` is usually applied on entire sample and often in NLP
127
+ tasks. Additionally, :class:`LayerNorm` applies elementwise affine
128
+ transform, while :class:`InstanceNorm1d` usually don't apply affine
129
+ transform.
130
+
131
+ Args:
132
+ num_features: number of features or channels :math:`C` of the input
133
+ eps: a value added to the denominator for numerical stability. Default: 1e-5
134
+ momentum: the value used for the running_mean and running_var computation. Default: 0.1
135
+ affine: a boolean value that when set to ``True``, this module has
136
+ learnable affine parameters, initialized the same way as done for batch normalization.
137
+ Default: ``False``.
138
+ track_running_stats: a boolean value that when set to ``True``, this
139
+ module tracks the running mean and variance, and when set to ``False``,
140
+ this module does not track such statistics and always uses batch
141
+ statistics in both training and eval modes. Default: ``False``
142
+
143
+ Shape:
144
+ - Input: :math:`(N, C, L)` or :math:`(C, L)`
145
+ - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input)
146
+
147
+ Examples::
148
+
149
+ >>> # Without Learnable Parameters
150
+ >>> m = nn.InstanceNorm1d(100)
151
+ >>> # With Learnable Parameters
152
+ >>> m = nn.InstanceNorm1d(100, affine=True)
153
+ >>> input = torch.randn(20, 100, 40)
154
+ >>> output = m(input)
155
+ """
156
+
157
+ def _get_no_batch_dim(self):
158
+ return 2
159
+
160
+ def _check_input_dim(self, input):
161
+ if input.dim() not in (2, 3):
162
+ raise ValueError(f'expected 2D or 3D input (got {input.dim()}D input)')
163
+
164
+
165
+ class LazyInstanceNorm1d(_LazyNormBase, _InstanceNorm):
166
+ r"""A :class:`torch.nn.InstanceNorm1d` module with lazy initialization of
167
+ the ``num_features`` argument of the :class:`InstanceNorm1d` that is inferred
168
+ from the ``input.size(1)``.
169
+ The attributes that will be lazily initialized are `weight`, `bias`,
170
+ `running_mean` and `running_var`.
171
+
172
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
173
+ on lazy modules and their limitations.
174
+
175
+ Args:
176
+ num_features: :math:`C` from an expected input of size
177
+ :math:`(N, C, L)` or :math:`(C, L)`
178
+ eps: a value added to the denominator for numerical stability. Default: 1e-5
179
+ momentum: the value used for the running_mean and running_var computation. Default: 0.1
180
+ affine: a boolean value that when set to ``True``, this module has
181
+ learnable affine parameters, initialized the same way as done for batch normalization.
182
+ Default: ``False``.
183
+ track_running_stats: a boolean value that when set to ``True``, this
184
+ module tracks the running mean and variance, and when set to ``False``,
185
+ this module does not track such statistics and always uses batch
186
+ statistics in both training and eval modes. Default: ``False``
187
+
188
+ Shape:
189
+ - Input: :math:`(N, C, L)` or :math:`(C, L)`
190
+ - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input)
191
+ """
192
+
193
+ cls_to_become = InstanceNorm1d # type: ignore[assignment]
194
+
195
+ def _get_no_batch_dim(self):
196
+ return 2
197
+
198
+ def _check_input_dim(self, input):
199
+ if input.dim() not in (2, 3):
200
+ raise ValueError(f'expected 2D or 3D input (got {input.dim()}D input)')
201
+
202
+
203
+ class InstanceNorm2d(_InstanceNorm):
204
+ r"""Applies Instance Normalization over a 4D input (a mini-batch of 2D inputs
205
+ with additional channel dimension) as described in the paper
206
+ `Instance Normalization: The Missing Ingredient for Fast Stylization
207
+ <https://arxiv.org/abs/1607.08022>`__.
208
+
209
+ .. math::
210
+
211
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
212
+
213
+ The mean and standard-deviation are calculated per-dimension separately
214
+ for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
215
+ of size `C` (where `C` is the input size) if :attr:`affine` is ``True``.
216
+ The standard-deviation is calculated via the biased estimator, equivalent to
217
+ `torch.var(input, unbiased=False)`.
218
+
219
+ By default, this layer uses instance statistics computed from input data in
220
+ both training and evaluation modes.
221
+
222
+ If :attr:`track_running_stats` is set to ``True``, during training this
223
+ layer keeps running estimates of its computed mean and variance, which are
224
+ then used for normalization during evaluation. The running estimates are
225
+ kept with a default :attr:`momentum` of 0.1.
226
+
227
+ .. note::
228
+ This :attr:`momentum` argument is different from one used in optimizer
229
+ classes and the conventional notion of momentum. Mathematically, the
230
+ update rule for running statistics here is
231
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
232
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
233
+ new observed value.
234
+
235
+ .. note::
236
+ :class:`InstanceNorm2d` and :class:`LayerNorm` are very similar, but
237
+ have some subtle differences. :class:`InstanceNorm2d` is applied
238
+ on each channel of channeled data like RGB images, but
239
+ :class:`LayerNorm` is usually applied on entire sample and often in NLP
240
+ tasks. Additionally, :class:`LayerNorm` applies elementwise affine
241
+ transform, while :class:`InstanceNorm2d` usually don't apply affine
242
+ transform.
243
+
244
+ Args:
245
+ num_features: :math:`C` from an expected input of size
246
+ :math:`(N, C, H, W)` or :math:`(C, H, W)`
247
+ eps: a value added to the denominator for numerical stability. Default: 1e-5
248
+ momentum: the value used for the running_mean and running_var computation. Default: 0.1
249
+ affine: a boolean value that when set to ``True``, this module has
250
+ learnable affine parameters, initialized the same way as done for batch normalization.
251
+ Default: ``False``.
252
+ track_running_stats: a boolean value that when set to ``True``, this
253
+ module tracks the running mean and variance, and when set to ``False``,
254
+ this module does not track such statistics and always uses batch
255
+ statistics in both training and eval modes. Default: ``False``
256
+
257
+ Shape:
258
+ - Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
259
+ - Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
260
+
261
+ Examples::
262
+
263
+ >>> # Without Learnable Parameters
264
+ >>> m = nn.InstanceNorm2d(100)
265
+ >>> # With Learnable Parameters
266
+ >>> m = nn.InstanceNorm2d(100, affine=True)
267
+ >>> input = torch.randn(20, 100, 35, 45)
268
+ >>> output = m(input)
269
+ """
270
+
271
+ def _get_no_batch_dim(self):
272
+ return 3
273
+
274
+ def _check_input_dim(self, input):
275
+ if input.dim() not in (3, 4):
276
+ raise ValueError(f'expected 3D or 4D input (got {input.dim()}D input)')
277
+
278
+
279
+ class LazyInstanceNorm2d(_LazyNormBase, _InstanceNorm):
280
+ r"""A :class:`torch.nn.InstanceNorm2d` module with lazy initialization of
281
+ the ``num_features`` argument of the :class:`InstanceNorm2d` that is inferred
282
+ from the ``input.size(1)``.
283
+ The attributes that will be lazily initialized are `weight`, `bias`,
284
+ `running_mean` and `running_var`.
285
+
286
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
287
+ on lazy modules and their limitations.
288
+
289
+ Args:
290
+ num_features: :math:`C` from an expected input of size
291
+ :math:`(N, C, H, W)` or :math:`(C, H, W)`
292
+ eps: a value added to the denominator for numerical stability. Default: 1e-5
293
+ momentum: the value used for the running_mean and running_var computation. Default: 0.1
294
+ affine: a boolean value that when set to ``True``, this module has
295
+ learnable affine parameters, initialized the same way as done for batch normalization.
296
+ Default: ``False``.
297
+ track_running_stats: a boolean value that when set to ``True``, this
298
+ module tracks the running mean and variance, and when set to ``False``,
299
+ this module does not track such statistics and always uses batch
300
+ statistics in both training and eval modes. Default: ``False``
301
+
302
+ Shape:
303
+ - Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
304
+ - Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
305
+ """
306
+
307
+ cls_to_become = InstanceNorm2d # type: ignore[assignment]
308
+
309
+ def _get_no_batch_dim(self):
310
+ return 3
311
+
312
+ def _check_input_dim(self, input):
313
+ if input.dim() not in (3, 4):
314
+ raise ValueError(f'expected 3D or 4D input (got {input.dim()}D input)')
315
+
316
+
317
+ class InstanceNorm3d(_InstanceNorm):
318
+ r"""Applies Instance Normalization over a 5D input (a mini-batch of 3D inputs
319
+ with additional channel dimension) as described in the paper
320
+ `Instance Normalization: The Missing Ingredient for Fast Stylization
321
+ <https://arxiv.org/abs/1607.08022>`__.
322
+
323
+ .. math::
324
+
325
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
326
+
327
+ The mean and standard-deviation are calculated per-dimension separately
328
+ for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
329
+ of size C (where C is the input size) if :attr:`affine` is ``True``.
330
+ The standard-deviation is calculated via the biased estimator, equivalent to
331
+ `torch.var(input, unbiased=False)`.
332
+
333
+ By default, this layer uses instance statistics computed from input data in
334
+ both training and evaluation modes.
335
+
336
+ If :attr:`track_running_stats` is set to ``True``, during training this
337
+ layer keeps running estimates of its computed mean and variance, which are
338
+ then used for normalization during evaluation. The running estimates are
339
+ kept with a default :attr:`momentum` of 0.1.
340
+
341
+ .. note::
342
+ This :attr:`momentum` argument is different from one used in optimizer
343
+ classes and the conventional notion of momentum. Mathematically, the
344
+ update rule for running statistics here is
345
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
346
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
347
+ new observed value.
348
+
349
+ .. note::
350
+ :class:`InstanceNorm3d` and :class:`LayerNorm` are very similar, but
351
+ have some subtle differences. :class:`InstanceNorm3d` is applied
352
+ on each channel of channeled data like 3D models with RGB color, but
353
+ :class:`LayerNorm` is usually applied on entire sample and often in NLP
354
+ tasks. Additionally, :class:`LayerNorm` applies elementwise affine
355
+ transform, while :class:`InstanceNorm3d` usually don't apply affine
356
+ transform.
357
+
358
+ Args:
359
+ num_features: :math:`C` from an expected input of size
360
+ :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
361
+ eps: a value added to the denominator for numerical stability. Default: 1e-5
362
+ momentum: the value used for the running_mean and running_var computation. Default: 0.1
363
+ affine: a boolean value that when set to ``True``, this module has
364
+ learnable affine parameters, initialized the same way as done for batch normalization.
365
+ Default: ``False``.
366
+ track_running_stats: a boolean value that when set to ``True``, this
367
+ module tracks the running mean and variance, and when set to ``False``,
368
+ this module does not track such statistics and always uses batch
369
+ statistics in both training and eval modes. Default: ``False``
370
+
371
+ Shape:
372
+ - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
373
+ - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input)
374
+
375
+ Examples::
376
+
377
+ >>> # Without Learnable Parameters
378
+ >>> m = nn.InstanceNorm3d(100)
379
+ >>> # With Learnable Parameters
380
+ >>> m = nn.InstanceNorm3d(100, affine=True)
381
+ >>> input = torch.randn(20, 100, 35, 45, 10)
382
+ >>> output = m(input)
383
+ """
384
+
385
+ def _get_no_batch_dim(self):
386
+ return 4
387
+
388
+ def _check_input_dim(self, input):
389
+ if input.dim() not in (4, 5):
390
+ raise ValueError(f'expected 4D or 5D input (got {input.dim()}D input)')
391
+
392
+
393
+ class LazyInstanceNorm3d(_LazyNormBase, _InstanceNorm):
394
+ r"""A :class:`torch.nn.InstanceNorm3d` module with lazy initialization of
395
+ the ``num_features`` argument of the :class:`InstanceNorm3d` that is inferred
396
+ from the ``input.size(1)``.
397
+ The attributes that will be lazily initialized are `weight`, `bias`,
398
+ `running_mean` and `running_var`.
399
+
400
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
401
+ on lazy modules and their limitations.
402
+
403
+ Args:
404
+ num_features: :math:`C` from an expected input of size
405
+ :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
406
+ eps: a value added to the denominator for numerical stability. Default: 1e-5
407
+ momentum: the value used for the running_mean and running_var computation. Default: 0.1
408
+ affine: a boolean value that when set to ``True``, this module has
409
+ learnable affine parameters, initialized the same way as done for batch normalization.
410
+ Default: ``False``.
411
+ track_running_stats: a boolean value that when set to ``True``, this
412
+ module tracks the running mean and variance, and when set to ``False``,
413
+ this module does not track such statistics and always uses batch
414
+ statistics in both training and eval modes. Default: ``False``
415
+
416
+ Shape:
417
+ - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
418
+ - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input)
419
+ """
420
+
421
+ cls_to_become = InstanceNorm3d # type: ignore[assignment]
422
+
423
+ def _get_no_batch_dim(self):
424
+ return 4
425
+
426
+ def _check_input_dim(self, input):
427
+ if input.dim() not in (4, 5):
428
+ raise ValueError(f'expected 4D or 5D input (got {input.dim()}D input)')
mgm/lib/python3.10/site-packages/torch/nn/modules/lazy.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import warnings
3
+ from typing import Protocol
4
+
5
+ import torch
6
+ from ..parameter import is_lazy
7
+
8
+ __all__ = ['LazyModuleMixin']
9
+
10
+ class _LazyProtocol(Protocol):
11
+ """This is to avoid errors with mypy checks for
12
+ The attributes in a mixin:
13
+ https://mypy.readthedocs.io/en/latest/more_types.html#mixin-classes
14
+ """
15
+ def _register_load_state_dict_pre_hook(self, hook):
16
+ ...
17
+
18
+ def register_forward_pre_hook(self, hook):
19
+ ...
20
+
21
+ def _lazy_load_hook(
22
+ self, state_dict, prefix, local_metadata, strict,
23
+ missing_keys, unexpected_keys, error_msgs):
24
+ ...
25
+
26
+ def _get_name(self):
27
+ ...
28
+
29
+ def _infer_parameters(self, module, input):
30
+ ...
31
+
32
+ @property
33
+ def _parameters(self):
34
+ ...
35
+
36
+ @property
37
+ def _buffers(self):
38
+ ...
39
+
40
+ @property
41
+ def _non_persistent_buffers_set(self):
42
+ ...
43
+
44
+ @property
45
+ def _load_hook(self):
46
+ ...
47
+
48
+ @property
49
+ def _initialize_hook(self):
50
+ ...
51
+
52
+
53
+ class LazyModuleMixin:
54
+ r"""A mixin for modules that lazily initialize parameters, also known as "lazy modules."
55
+
56
+ .. warning:
57
+ Lazy modules are an experimental new feature under active development,
58
+ and their API is likely to change.
59
+
60
+ Modules that lazily initialize parameters, or "lazy modules",
61
+ derive the shapes of their parameters from the first input(s)
62
+ to their forward method. Until that first forward they contain
63
+ :class:`torch.nn.UninitializedParameter` s that should not be accessed
64
+ or used, and afterward they contain regular :class:`torch.nn.Parameter` s.
65
+ Lazy modules are convenient since they don't require computing some
66
+ module arguments, like the :attr:`in_features` argument of a
67
+ typical :class:`torch.nn.Linear`.
68
+
69
+ After construction, networks with lazy modules should first
70
+ be converted to the desired dtype and placed on the expected device.
71
+ This is because lazy modules only perform shape inference so the usual dtype
72
+ and device placement behavior applies.
73
+ The lazy modules should then perform "dry runs" to initialize all the components in the module.
74
+ These "dry runs" send inputs of the correct size, dtype, and device through
75
+ the network and to each one of its lazy modules. After this the network can be used as usual.
76
+
77
+ >>> # xdoctest: +SKIP
78
+ >>> class LazyMLP(torch.nn.Module):
79
+ ... def __init__(self):
80
+ ... super().__init__()
81
+ ... self.fc1 = torch.nn.LazyLinear(10)
82
+ ... self.relu1 = torch.nn.ReLU()
83
+ ... self.fc2 = torch.nn.LazyLinear(1)
84
+ ... self.relu2 = torch.nn.ReLU()
85
+ ...
86
+ ... def forward(self, input):
87
+ ... x = self.relu1(self.fc1(input))
88
+ ... y = self.relu2(self.fc2(x))
89
+ ... return y
90
+ >>> # constructs a network with lazy modules
91
+ >>> lazy_mlp = LazyMLP()
92
+ >>> # transforms the network's device and dtype
93
+ >>> # NOTE: these transforms can and should be applied after construction and before any 'dry runs'
94
+ >>> lazy_mlp = lazy_mlp.cuda().double()
95
+ >>> lazy_mlp
96
+ LazyMLP( (fc1): LazyLinear(in_features=0, out_features=10, bias=True)
97
+ (relu1): ReLU()
98
+ (fc2): LazyLinear(in_features=0, out_features=1, bias=True)
99
+ (relu2): ReLU()
100
+ )
101
+ >>> # performs a dry run to initialize the network's lazy modules
102
+ >>> lazy_mlp(torch.ones(10,10).cuda())
103
+ >>> # after initialization, LazyLinear modules become regular Linear modules
104
+ >>> lazy_mlp
105
+ LazyMLP(
106
+ (fc1): Linear(in_features=10, out_features=10, bias=True)
107
+ (relu1): ReLU()
108
+ (fc2): Linear(in_features=10, out_features=1, bias=True)
109
+ (relu2): ReLU()
110
+ )
111
+ >>> # attaches an optimizer, since parameters can now be used as usual
112
+ >>> optim = torch.optim.SGD(mlp.parameters(), lr=0.01)
113
+
114
+ A final caveat when using lazy modules is that the order of initialization of a network's
115
+ parameters may change, since the lazy modules are always initialized after other modules.
116
+ For example, if the LazyMLP class defined above had a :class:`torch.nn.LazyLinear` module
117
+ first and then a regular :class:`torch.nn.Linear` second, the second module would be
118
+ initialized on construction and the first module would be initialized during the first dry run.
119
+ This can cause the parameters of a network using lazy modules to be initialized differently
120
+ than the parameters of a network without lazy modules as the order of parameter initializations,
121
+ which often depends on a stateful random number generator, is different.
122
+ Check :doc:`/notes/randomness` for more details.
123
+
124
+ Lazy modules can be serialized with a state dict like other modules. For example:
125
+
126
+ >>> lazy_mlp = LazyMLP()
127
+ >>> # The state dict shows the uninitialized parameters
128
+ >>> lazy_mlp.state_dict()
129
+ OrderedDict([('fc1.weight', Uninitialized parameter),
130
+ ('fc1.bias',
131
+ tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30,
132
+ 4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])),
133
+ ('fc2.weight', Uninitialized parameter),
134
+ ('fc2.bias', tensor([0.0019]))])
135
+
136
+
137
+ Lazy modules can load regular :class:`torch.nn.Parameter` s (i.e. you can serialize/deserialize
138
+ initialized LazyModules and they will remain initialized)
139
+
140
+
141
+ >>> full_mlp = LazyMLP()
142
+ >>> # Dry run to initialize another module
143
+ >>> full_mlp.forward(torch.ones(10, 1))
144
+ >>> # Load an initialized state into a lazy module
145
+ >>> lazy_mlp.load_state_dict(full_mlp.state_dict())
146
+ >>> # The state dict now holds valid values
147
+ >>> lazy_mlp.state_dict()
148
+ OrderedDict([('fc1.weight',
149
+ tensor([[-0.3837],
150
+ [ 0.0907],
151
+ [ 0.6708],
152
+ [-0.5223],
153
+ [-0.9028],
154
+ [ 0.2851],
155
+ [-0.4537],
156
+ [ 0.6813],
157
+ [ 0.5766],
158
+ [-0.8678]])),
159
+ ('fc1.bias',
160
+ tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30,
161
+ 4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])),
162
+ ('fc2.weight',
163
+ tensor([[ 0.1320, 0.2938, 0.0679, 0.2793, 0.1088, -0.1795, -0.2301, 0.2807,
164
+ 0.2479, 0.1091]])),
165
+ ('fc2.bias', tensor([0.0019]))])
166
+
167
+ Note, however, that the loaded parameters will not be replaced when doing a "dry run" if they are initialized
168
+ when the state is loaded. This prevents using initialized modules in different contexts.
169
+ """
170
+
171
+ # modules inheriting from this will change their __class__ to the specified
172
+ # one after they are fully initialized
173
+ cls_to_become = None
174
+
175
+ def __init__(self: _LazyProtocol, *args, **kwargs):
176
+ # Mypy doesnt like this super call in a mixin
177
+ super().__init__(*args, **kwargs) # type: ignore[misc]
178
+ self._load_hook = self._register_load_state_dict_pre_hook(self._lazy_load_hook)
179
+ self._initialize_hook = self.register_forward_pre_hook(self._infer_parameters)
180
+ warnings.warn('Lazy modules are a new feature under heavy development '
181
+ 'so changes to the API or functionality can happen at any moment.')
182
+
183
+ def _save_to_state_dict(self: _LazyProtocol, destination, prefix, keep_vars):
184
+ # This should be ideally implemented as a hook,
185
+ # but we should override `detach` in the UninitializedParameter to return itself
186
+ # which is not clean
187
+ for name, param in self._parameters.items():
188
+ if param is not None:
189
+ if not (is_lazy(param) or keep_vars):
190
+ param = param.detach()
191
+ destination[prefix + name] = param
192
+ for name, buf in self._buffers.items():
193
+ if buf is not None and name not in self._non_persistent_buffers_set:
194
+ if not (is_lazy(buf) or keep_vars):
195
+ buf = buf.detach()
196
+ destination[prefix + name] = buf
197
+
198
+ def _lazy_load_hook(
199
+ self: _LazyProtocol, state_dict, prefix, local_metadata, strict,
200
+ missing_keys, unexpected_keys, error_msgs):
201
+ """load_state_dict pre-hook function for lazy buffers and parameters.
202
+
203
+ The purpose of this hook is to adjust the current state and/or
204
+ ``state_dict`` being loaded so that a module instance serialized in
205
+ both un/initialized state can be deserialized onto both un/initialized
206
+ module instance.
207
+ See comment in ``torch.nn.Module._register_load_state_dict_pre_hook``
208
+ for the details of the hook specification.
209
+ """
210
+ for name, param in itertools.chain(self._parameters.items(), self._buffers.items()):
211
+ key = prefix + name
212
+ if key in state_dict and param is not None:
213
+ input_param = state_dict[key]
214
+ if is_lazy(param):
215
+ # The current parameter is not initialized but the one being loaded one is
216
+ # create a new parameter based on the uninitialized one
217
+ if not is_lazy(input_param):
218
+ with torch.no_grad():
219
+ param.materialize(input_param.shape)
220
+
221
+ def initialize_parameters(self: _LazyProtocol, *args, **kwargs):
222
+ r"""Initialize parameters according to the input batch properties.
223
+ This adds an interface to isolate parameter initialization from the
224
+ forward pass when doing parameter shape inference.
225
+ """
226
+ raise NotImplementedError(f'initialize_parameters is not implemented for {self.__class__.__name__}')
227
+
228
+ def has_uninitialized_params(self: _LazyProtocol):
229
+ r"""Check if a module has parameters that are not initialized
230
+ """
231
+ # This is to avoid the JIT to track this parameter and force
232
+ # custom modules __setstate__ to add it
233
+ params = self._parameters.values()
234
+ buffers = self._buffers.values()
235
+ for param in itertools.chain(params, buffers):
236
+ if is_lazy(param):
237
+ return True
238
+ return False
239
+
240
+ def _infer_parameters(self: _LazyProtocol, module, input):
241
+ r"""Infers the size and initializes the parameters according to the
242
+ provided input batch.
243
+ Given a module that contains parameters that were declared inferrable
244
+ using :class:`torch.nn.parameter.ParameterMode.Infer`, runs a forward pass
245
+ in the complete module using the provided input to initialize all the parameters
246
+ as needed.
247
+ The module is set into evaluation mode before running the forward pass in order
248
+ to avoid saving statistics or calculating gradients
249
+ """
250
+ module.initialize_parameters(*input)
251
+ if module.has_uninitialized_params():
252
+ raise RuntimeError(f'module {self._get_name()} has not been fully initialized')
253
+ module._initialize_hook.remove()
254
+ module._load_hook.remove()
255
+ delattr(module, '_initialize_hook')
256
+ delattr(module, '_load_hook')
257
+ if module.cls_to_become is not None:
258
+ module.__class__ = module.cls_to_become
259
+
260
+
261
+ def _replicate_for_data_parallel(self: _LazyProtocol):
262
+ raise RuntimeError('Modules with uninitialized parameters can\'t be used with `DataParallel`. '
263
+ 'Run a dummy forward pass to correctly initialize the modules')
mgm/lib/python3.10/site-packages/torch/nn/modules/linear.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Any
3
+
4
+ import torch
5
+ from torch import Tensor
6
+ from torch.nn.parameter import Parameter, UninitializedParameter
7
+ from .. import functional as F
8
+ from .. import init
9
+ from .module import Module
10
+ from .lazy import LazyModuleMixin
11
+
12
+
13
+ __all__ = [
14
+ 'Bilinear',
15
+ 'Identity',
16
+ 'LazyLinear',
17
+ 'Linear',
18
+ ]
19
+
20
+
21
+ class Identity(Module):
22
+ r"""A placeholder identity operator that is argument-insensitive.
23
+
24
+ Args:
25
+ args: any argument (unused)
26
+ kwargs: any keyword argument (unused)
27
+
28
+ Shape:
29
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
30
+ - Output: :math:`(*)`, same shape as the input.
31
+
32
+ Examples::
33
+
34
+ >>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False)
35
+ >>> input = torch.randn(128, 20)
36
+ >>> output = m(input)
37
+ >>> print(output.size())
38
+ torch.Size([128, 20])
39
+
40
+ """
41
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
42
+ super().__init__()
43
+
44
+ def forward(self, input: Tensor) -> Tensor:
45
+ return input
46
+
47
+
48
+ class Linear(Module):
49
+ r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
50
+
51
+ This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
52
+
53
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
54
+
55
+ Args:
56
+ in_features: size of each input sample
57
+ out_features: size of each output sample
58
+ bias: If set to ``False``, the layer will not learn an additive bias.
59
+ Default: ``True``
60
+
61
+ Shape:
62
+ - Input: :math:`(*, H_{in})` where :math:`*` means any number of
63
+ dimensions including none and :math:`H_{in} = \text{in\_features}`.
64
+ - Output: :math:`(*, H_{out})` where all but the last dimension
65
+ are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
66
+
67
+ Attributes:
68
+ weight: the learnable weights of the module of shape
69
+ :math:`(\text{out\_features}, \text{in\_features})`. The values are
70
+ initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
71
+ :math:`k = \frac{1}{\text{in\_features}}`
72
+ bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
73
+ If :attr:`bias` is ``True``, the values are initialized from
74
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
75
+ :math:`k = \frac{1}{\text{in\_features}}`
76
+
77
+ Examples::
78
+
79
+ >>> m = nn.Linear(20, 30)
80
+ >>> input = torch.randn(128, 20)
81
+ >>> output = m(input)
82
+ >>> print(output.size())
83
+ torch.Size([128, 30])
84
+ """
85
+ __constants__ = ['in_features', 'out_features']
86
+ in_features: int
87
+ out_features: int
88
+ weight: Tensor
89
+
90
+ def __init__(self, in_features: int, out_features: int, bias: bool = True,
91
+ device=None, dtype=None) -> None:
92
+ factory_kwargs = {'device': device, 'dtype': dtype}
93
+ super().__init__()
94
+ self.in_features = in_features
95
+ self.out_features = out_features
96
+ self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
97
+ if bias:
98
+ self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
99
+ else:
100
+ self.register_parameter('bias', None)
101
+ self.reset_parameters()
102
+
103
+ def reset_parameters(self) -> None:
104
+ # Setting a=sqrt(5) in kaiming_uniform is the same as initializing with
105
+ # uniform(-1/sqrt(in_features), 1/sqrt(in_features)). For details, see
106
+ # https://github.com/pytorch/pytorch/issues/57109
107
+ init.kaiming_uniform_(self.weight, a=math.sqrt(5))
108
+ if self.bias is not None:
109
+ fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
110
+ bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
111
+ init.uniform_(self.bias, -bound, bound)
112
+
113
+ def forward(self, input: Tensor) -> Tensor:
114
+ return F.linear(input, self.weight, self.bias)
115
+
116
+ def extra_repr(self) -> str:
117
+ return f'in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}'
118
+
119
+
120
+ # This class exists solely to avoid triggering an obscure error when scripting
121
+ # an improperly quantized attention layer. See this issue for details:
122
+ # https://github.com/pytorch/pytorch/issues/58969
123
+ # TODO: fail fast on quantization API usage error, then remove this class
124
+ # and replace uses of it with plain Linear
125
+ class NonDynamicallyQuantizableLinear(Linear):
126
+ def __init__(self, in_features: int, out_features: int, bias: bool = True,
127
+ device=None, dtype=None) -> None:
128
+ super().__init__(in_features, out_features, bias=bias,
129
+ device=device, dtype=dtype)
130
+
131
+
132
+ class Bilinear(Module):
133
+ r"""Applies a bilinear transformation to the incoming data:
134
+ :math:`y = x_1^T A x_2 + b`
135
+
136
+ Args:
137
+ in1_features: size of each first input sample
138
+ in2_features: size of each second input sample
139
+ out_features: size of each output sample
140
+ bias: If set to False, the layer will not learn an additive bias.
141
+ Default: ``True``
142
+
143
+ Shape:
144
+ - Input1: :math:`(*, H_{in1})` where :math:`H_{in1}=\text{in1\_features}` and
145
+ :math:`*` means any number of additional dimensions including none. All but the last dimension
146
+ of the inputs should be the same.
147
+ - Input2: :math:`(*, H_{in2})` where :math:`H_{in2}=\text{in2\_features}`.
148
+ - Output: :math:`(*, H_{out})` where :math:`H_{out}=\text{out\_features}`
149
+ and all but the last dimension are the same shape as the input.
150
+
151
+ Attributes:
152
+ weight: the learnable weights of the module of shape
153
+ :math:`(\text{out\_features}, \text{in1\_features}, \text{in2\_features})`.
154
+ The values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
155
+ :math:`k = \frac{1}{\text{in1\_features}}`
156
+ bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
157
+ If :attr:`bias` is ``True``, the values are initialized from
158
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
159
+ :math:`k = \frac{1}{\text{in1\_features}}`
160
+
161
+ Examples::
162
+
163
+ >>> m = nn.Bilinear(20, 30, 40)
164
+ >>> input1 = torch.randn(128, 20)
165
+ >>> input2 = torch.randn(128, 30)
166
+ >>> output = m(input1, input2)
167
+ >>> print(output.size())
168
+ torch.Size([128, 40])
169
+ """
170
+ __constants__ = ['in1_features', 'in2_features', 'out_features']
171
+ in1_features: int
172
+ in2_features: int
173
+ out_features: int
174
+ weight: Tensor
175
+
176
+ def __init__(self, in1_features: int, in2_features: int, out_features: int, bias: bool = True,
177
+ device=None, dtype=None) -> None:
178
+ factory_kwargs = {'device': device, 'dtype': dtype}
179
+ super().__init__()
180
+ self.in1_features = in1_features
181
+ self.in2_features = in2_features
182
+ self.out_features = out_features
183
+ self.weight = Parameter(torch.empty((out_features, in1_features, in2_features), **factory_kwargs))
184
+
185
+ if bias:
186
+ self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
187
+ else:
188
+ self.register_parameter('bias', None)
189
+ self.reset_parameters()
190
+
191
+ def reset_parameters(self) -> None:
192
+ bound = 1 / math.sqrt(self.weight.size(1))
193
+ init.uniform_(self.weight, -bound, bound)
194
+ if self.bias is not None:
195
+ init.uniform_(self.bias, -bound, bound)
196
+
197
+ def forward(self, input1: Tensor, input2: Tensor) -> Tensor:
198
+ return F.bilinear(input1, input2, self.weight, self.bias)
199
+
200
+ def extra_repr(self) -> str:
201
+ return 'in1_features={}, in2_features={}, out_features={}, bias={}'.format(
202
+ self.in1_features, self.in2_features, self.out_features, self.bias is not None
203
+ )
204
+
205
+
206
+ class LazyLinear(LazyModuleMixin, Linear):
207
+ r"""A :class:`torch.nn.Linear` module where `in_features` is inferred.
208
+
209
+ In this module, the `weight` and `bias` are of :class:`torch.nn.UninitializedParameter`
210
+ class. They will be initialized after the first call to ``forward`` is done and the
211
+ module will become a regular :class:`torch.nn.Linear` module. The ``in_features`` argument
212
+ of the :class:`Linear` is inferred from the ``input.shape[-1]``.
213
+
214
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
215
+ on lazy modules and their limitations.
216
+
217
+ Args:
218
+ out_features: size of each output sample
219
+ bias: If set to ``False``, the layer will not learn an additive bias.
220
+ Default: ``True``
221
+
222
+ Attributes:
223
+ weight: the learnable weights of the module of shape
224
+ :math:`(\text{out\_features}, \text{in\_features})`. The values are
225
+ initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
226
+ :math:`k = \frac{1}{\text{in\_features}}`
227
+ bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
228
+ If :attr:`bias` is ``True``, the values are initialized from
229
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
230
+ :math:`k = \frac{1}{\text{in\_features}}`
231
+
232
+
233
+ """
234
+
235
+ cls_to_become = Linear # type: ignore[assignment]
236
+ weight: UninitializedParameter
237
+ bias: UninitializedParameter # type: ignore[assignment]
238
+
239
+ def __init__(self, out_features: int, bias: bool = True,
240
+ device=None, dtype=None) -> None:
241
+ factory_kwargs = {'device': device, 'dtype': dtype}
242
+ # bias is hardcoded to False to avoid creating tensor
243
+ # that will soon be overwritten.
244
+ super().__init__(0, 0, False)
245
+ self.weight = UninitializedParameter(**factory_kwargs)
246
+ self.out_features = out_features
247
+ if bias:
248
+ self.bias = UninitializedParameter(**factory_kwargs)
249
+
250
+ def reset_parameters(self) -> None:
251
+ if not self.has_uninitialized_params() and self.in_features != 0:
252
+ super().reset_parameters()
253
+
254
+ def initialize_parameters(self, input) -> None: # type: ignore[override]
255
+ if self.has_uninitialized_params():
256
+ with torch.no_grad():
257
+ self.in_features = input.shape[-1]
258
+ self.weight.materialize((self.out_features, self.in_features))
259
+ if self.bias is not None:
260
+ self.bias.materialize((self.out_features,))
261
+ self.reset_parameters()
262
+ # TODO: PartialLinear - maybe in sparse?
mgm/lib/python3.10/site-packages/torch/nn/modules/padding.py ADDED
@@ -0,0 +1,800 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+ from .utils import _pair, _quadruple, _ntuple
3
+ from .. import functional as F
4
+
5
+ from torch import Tensor
6
+ from ..common_types import _size_2_t, _size_4_t, _size_6_t
7
+ from typing import Sequence, Tuple
8
+
9
+
10
+ # TODO: grad_output size asserts in THNN
11
+
12
+ __all__ = ['CircularPad1d', 'CircularPad2d', 'CircularPad3d', 'ConstantPad1d', 'ConstantPad2d',
13
+ 'ConstantPad3d', 'ReflectionPad1d', 'ReflectionPad2d', 'ReflectionPad3d',
14
+ 'ReplicationPad1d', 'ReplicationPad2d', 'ReplicationPad3d', 'ZeroPad1d', 'ZeroPad2d', 'ZeroPad3d']
15
+
16
+
17
+ class _CircularPadNd(Module):
18
+ __constants__ = ['padding']
19
+ padding: Sequence[int]
20
+
21
+ def _check_input_dim(self, input):
22
+ raise NotImplementedError
23
+
24
+ def forward(self, input: Tensor) -> Tensor:
25
+ self._check_input_dim(input)
26
+ return F.pad(input, self.padding, 'circular')
27
+
28
+ def extra_repr(self) -> str:
29
+ return f'{self.padding}'
30
+
31
+
32
+ class CircularPad1d(_CircularPadNd):
33
+ r"""Pads the input tensor using circular padding of the input boundary.
34
+
35
+ Tensor values at the beginning of the dimension are used to pad the end,
36
+ and values at the end are used to pad the beginning. If negative padding is
37
+ applied then the ends of the tensor get removed.
38
+
39
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
40
+
41
+ Args:
42
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
43
+ padding in all boundaries. If a 2-`tuple`, uses
44
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
45
+
46
+ Shape:
47
+ - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
48
+ - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
49
+
50
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
51
+
52
+ Examples::
53
+
54
+ >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
55
+ >>> m = nn.CircularPad1d(2)
56
+ >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
57
+ >>> input
58
+ tensor([[[0., 1., 2., 3.],
59
+ [4., 5., 6., 7.]]])
60
+ >>> m(input)
61
+ tensor([[[2., 3., 0., 1., 2., 3., 0., 1.],
62
+ [6., 7., 4., 5., 6., 7., 4., 5.]]])
63
+ >>> # using different paddings for different sides
64
+ >>> m = nn.CircularPad1d((3, 1))
65
+ >>> m(input)
66
+ tensor([[[1., 2., 3., 0., 1., 2., 3., 0.],
67
+ [5., 6., 7., 4., 5., 6., 7., 4.]]])
68
+
69
+ """
70
+ padding: Tuple[int, int]
71
+
72
+ def __init__(self, padding: _size_2_t) -> None:
73
+ super().__init__()
74
+ self.padding = _pair(padding)
75
+
76
+ def _check_input_dim(self, input):
77
+ if input.dim() != 2 and input.dim() != 3:
78
+ raise ValueError(
79
+ f"expected 2D or 3D input (got {input.dim()}D input)"
80
+ )
81
+
82
+
83
+ class CircularPad2d(_CircularPadNd):
84
+ r"""Pads the input tensor using circular padding of the input boundary.
85
+
86
+ Tensor values at the beginning of the dimension are used to pad the end,
87
+ and values at the end are used to pad the beginning. If negative padding is
88
+ applied then the ends of the tensor get removed.
89
+
90
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
91
+
92
+ Args:
93
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
94
+ padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
95
+ :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
96
+
97
+ Shape:
98
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
99
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
100
+
101
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
102
+
103
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
104
+
105
+ Examples::
106
+
107
+ >>> m = nn.CircularPad2d(2)
108
+ >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
109
+ >>> input
110
+ tensor([[[[0., 1., 2.],
111
+ [3., 4., 5.],
112
+ [6., 7., 8.]]]])
113
+ >>> m(input)
114
+ tensor([[[[4., 5., 3., 4., 5., 3., 4.],
115
+ [7., 8., 6., 7., 8., 6., 7.],
116
+ [1., 2., 0., 1., 2., 0., 1.],
117
+ [4., 5., 3., 4., 5., 3., 4.],
118
+ [7., 8., 6., 7., 8., 6., 7.],
119
+ [1., 2., 0., 1., 2., 0., 1.],
120
+ [4., 5., 3., 4., 5., 3., 4.]]]])
121
+ >>> # using different paddings for different sides
122
+ >>> m = nn.CircularPad2d((1, 1, 2, 0))
123
+ >>> m(input)
124
+ tensor([[[[5., 3., 4., 5., 3.],
125
+ [8., 6., 7., 8., 6.],
126
+ [2., 0., 1., 2., 0.],
127
+ [5., 3., 4., 5., 3.],
128
+ [8., 6., 7., 8., 6.]]]])
129
+
130
+ """
131
+ padding: Tuple[int, int, int, int]
132
+
133
+ def __init__(self, padding: _size_4_t) -> None:
134
+ super().__init__()
135
+ self.padding = _quadruple(padding)
136
+
137
+ def _check_input_dim(self, input):
138
+ if input.dim() != 3 and input.dim() != 4:
139
+ raise ValueError(
140
+ f"expected 3D or 4D input (got {input.dim()}D input)"
141
+ )
142
+
143
+
144
+ class CircularPad3d(_CircularPadNd):
145
+ r"""Pads the input tensor using circular padding of the input boundary.
146
+
147
+ Tensor values at the beginning of the dimension are used to pad the end,
148
+ and values at the end are used to pad the beginning. If negative padding is
149
+ applied then the ends of the tensor get removed.
150
+
151
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
152
+
153
+ Args:
154
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
155
+ padding in all boundaries. If a 6-`tuple`, uses
156
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
157
+ :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
158
+ :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
159
+
160
+ Shape:
161
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
162
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
163
+ where
164
+
165
+ :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
166
+
167
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
168
+
169
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
170
+
171
+ Examples::
172
+
173
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
174
+ >>> m = nn.CircularPad3d(3)
175
+ >>> input = torch.randn(16, 3, 8, 320, 480)
176
+ >>> output = m(input)
177
+ >>> # using different paddings for different sides
178
+ >>> m = nn.CircularPad3d((3, 3, 6, 6, 1, 1))
179
+ >>> output = m(input)
180
+
181
+ """
182
+ padding: Tuple[int, int, int, int, int, int]
183
+
184
+ def __init__(self, padding: _size_6_t) -> None:
185
+ super().__init__()
186
+ self.padding = _ntuple(6)(padding)
187
+
188
+ def _check_input_dim(self, input):
189
+ if input.dim() != 4 and input.dim() != 5:
190
+ raise ValueError(
191
+ f"expected 4D or 5D input (got {input.dim()}D input)"
192
+ )
193
+
194
+
195
+ class _ConstantPadNd(Module):
196
+ __constants__ = ['padding', 'value']
197
+ value: float
198
+ padding: Sequence[int]
199
+
200
+ def __init__(self, value: float) -> None:
201
+ super().__init__()
202
+ self.value = value
203
+
204
+ def forward(self, input: Tensor) -> Tensor:
205
+ return F.pad(input, self.padding, 'constant', self.value)
206
+
207
+ def extra_repr(self) -> str:
208
+ return f'padding={self.padding}, value={self.value}'
209
+
210
+
211
+ class ConstantPad1d(_ConstantPadNd):
212
+ r"""Pads the input tensor boundaries with a constant value.
213
+
214
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
215
+
216
+ Args:
217
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
218
+ padding in both boundaries. If a 2-`tuple`, uses
219
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
220
+
221
+ Shape:
222
+ - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
223
+ - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
224
+
225
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
226
+
227
+ Examples::
228
+
229
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
230
+ >>> m = nn.ConstantPad1d(2, 3.5)
231
+ >>> input = torch.randn(1, 2, 4)
232
+ >>> input
233
+ tensor([[[-1.0491, -0.7152, -0.0749, 0.8530],
234
+ [-1.3287, 1.8966, 0.1466, -0.2771]]])
235
+ >>> m(input)
236
+ tensor([[[ 3.5000, 3.5000, -1.0491, -0.7152, -0.0749, 0.8530, 3.5000,
237
+ 3.5000],
238
+ [ 3.5000, 3.5000, -1.3287, 1.8966, 0.1466, -0.2771, 3.5000,
239
+ 3.5000]]])
240
+ >>> m = nn.ConstantPad1d(2, 3.5)
241
+ >>> input = torch.randn(1, 2, 3)
242
+ >>> input
243
+ tensor([[[ 1.6616, 1.4523, -1.1255],
244
+ [-3.6372, 0.1182, -1.8652]]])
245
+ >>> m(input)
246
+ tensor([[[ 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000, 3.5000],
247
+ [ 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000, 3.5000]]])
248
+ >>> # using different paddings for different sides
249
+ >>> m = nn.ConstantPad1d((3, 1), 3.5)
250
+ >>> m(input)
251
+ tensor([[[ 3.5000, 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000],
252
+ [ 3.5000, 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000]]])
253
+
254
+ """
255
+ padding: Tuple[int, int]
256
+
257
+ def __init__(self, padding: _size_2_t, value: float):
258
+ super().__init__(value)
259
+ self.padding = _pair(padding)
260
+
261
+
262
+ class ConstantPad2d(_ConstantPadNd):
263
+ r"""Pads the input tensor boundaries with a constant value.
264
+
265
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
266
+
267
+ Args:
268
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
269
+ padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
270
+ :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
271
+
272
+ Shape:
273
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
274
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
275
+
276
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
277
+
278
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
279
+
280
+ Examples::
281
+
282
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
283
+ >>> m = nn.ConstantPad2d(2, 3.5)
284
+ >>> input = torch.randn(1, 2, 2)
285
+ >>> input
286
+ tensor([[[ 1.6585, 0.4320],
287
+ [-0.8701, -0.4649]]])
288
+ >>> m(input)
289
+ tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
290
+ [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
291
+ [ 3.5000, 3.5000, 1.6585, 0.4320, 3.5000, 3.5000],
292
+ [ 3.5000, 3.5000, -0.8701, -0.4649, 3.5000, 3.5000],
293
+ [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
294
+ [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
295
+ >>> # using different paddings for different sides
296
+ >>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)
297
+ >>> m(input)
298
+ tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
299
+ [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
300
+ [ 3.5000, 3.5000, 3.5000, 1.6585, 0.4320],
301
+ [ 3.5000, 3.5000, 3.5000, -0.8701, -0.4649],
302
+ [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
303
+
304
+ """
305
+ __constants__ = ['padding', 'value']
306
+ padding: Tuple[int, int, int, int]
307
+
308
+ def __init__(self, padding: _size_4_t, value: float) -> None:
309
+ super().__init__(value)
310
+ self.padding = _quadruple(padding)
311
+
312
+
313
+ class ConstantPad3d(_ConstantPadNd):
314
+ r"""Pads the input tensor boundaries with a constant value.
315
+
316
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
317
+
318
+ Args:
319
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
320
+ padding in all boundaries. If a 6-`tuple`, uses
321
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
322
+ :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
323
+ :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
324
+
325
+ Shape:
326
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
327
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
328
+ :math:`(C, D_{out}, H_{out}, W_{out})`, where
329
+
330
+ :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
331
+
332
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
333
+
334
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
335
+
336
+ Examples::
337
+
338
+ >>> m = nn.ConstantPad3d(3, 3.5)
339
+ >>> input = torch.randn(16, 3, 10, 20, 30)
340
+ >>> output = m(input)
341
+ >>> # using different paddings for different sides
342
+ >>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5)
343
+ >>> output = m(input)
344
+
345
+ """
346
+ padding: Tuple[int, int, int, int, int, int]
347
+
348
+ def __init__(self, padding: _size_6_t, value: float) -> None:
349
+ super().__init__(value)
350
+ self.padding = _ntuple(6)(padding)
351
+
352
+
353
+ class _ReflectionPadNd(Module):
354
+ __constants__ = ['padding']
355
+ padding: Sequence[int]
356
+
357
+ def forward(self, input: Tensor) -> Tensor:
358
+ return F.pad(input, self.padding, 'reflect')
359
+
360
+ def extra_repr(self) -> str:
361
+ return f'{self.padding}'
362
+
363
+
364
+ class ReflectionPad1d(_ReflectionPadNd):
365
+ r"""Pads the input tensor using the reflection of the input boundary.
366
+
367
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
368
+
369
+ Args:
370
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
371
+ padding in all boundaries. If a 2-`tuple`, uses
372
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
373
+
374
+ Shape:
375
+ - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
376
+ - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
377
+
378
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
379
+
380
+ Examples::
381
+
382
+ >>> m = nn.ReflectionPad1d(2)
383
+ >>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles")
384
+ >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
385
+ >>> input
386
+ tensor([[[0., 1., 2., 3.],
387
+ [4., 5., 6., 7.]]])
388
+ >>> m(input)
389
+ tensor([[[2., 1., 0., 1., 2., 3., 2., 1.],
390
+ [6., 5., 4., 5., 6., 7., 6., 5.]]])
391
+ >>> # using different paddings for different sides
392
+ >>> m = nn.ReflectionPad1d((3, 1))
393
+ >>> m(input)
394
+ tensor([[[3., 2., 1., 0., 1., 2., 3., 2.],
395
+ [7., 6., 5., 4., 5., 6., 7., 6.]]])
396
+
397
+ """
398
+ padding: Tuple[int, int]
399
+
400
+ def __init__(self, padding: _size_2_t) -> None:
401
+ super().__init__()
402
+ self.padding = _pair(padding)
403
+
404
+
405
+ class ReflectionPad2d(_ReflectionPadNd):
406
+ r"""Pads the input tensor using the reflection of the input boundary.
407
+
408
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
409
+
410
+ Args:
411
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
412
+ padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
413
+ :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
414
+
415
+ Shape:
416
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
417
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})` where
418
+
419
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
420
+
421
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
422
+
423
+ Examples::
424
+
425
+ >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
426
+ >>> m = nn.ReflectionPad2d(2)
427
+ >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
428
+ >>> input
429
+ tensor([[[[0., 1., 2.],
430
+ [3., 4., 5.],
431
+ [6., 7., 8.]]]])
432
+ >>> m(input)
433
+ tensor([[[[8., 7., 6., 7., 8., 7., 6.],
434
+ [5., 4., 3., 4., 5., 4., 3.],
435
+ [2., 1., 0., 1., 2., 1., 0.],
436
+ [5., 4., 3., 4., 5., 4., 3.],
437
+ [8., 7., 6., 7., 8., 7., 6.],
438
+ [5., 4., 3., 4., 5., 4., 3.],
439
+ [2., 1., 0., 1., 2., 1., 0.]]]])
440
+ >>> # using different paddings for different sides
441
+ >>> m = nn.ReflectionPad2d((1, 1, 2, 0))
442
+ >>> m(input)
443
+ tensor([[[[7., 6., 7., 8., 7.],
444
+ [4., 3., 4., 5., 4.],
445
+ [1., 0., 1., 2., 1.],
446
+ [4., 3., 4., 5., 4.],
447
+ [7., 6., 7., 8., 7.]]]])
448
+
449
+ """
450
+ padding: Tuple[int, int, int, int]
451
+
452
+ def __init__(self, padding: _size_4_t) -> None:
453
+ super().__init__()
454
+ self.padding = _quadruple(padding)
455
+
456
+
457
+ class ReflectionPad3d(_ReflectionPadNd):
458
+ r"""Pads the input tensor using the reflection of the input boundary.
459
+
460
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
461
+
462
+ Args:
463
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
464
+ padding in all boundaries. If a 6-`tuple`, uses
465
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
466
+ :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
467
+ :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
468
+
469
+ Shape:
470
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
471
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
472
+ where
473
+
474
+ :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
475
+
476
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
477
+
478
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
479
+
480
+ Examples::
481
+
482
+ >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
483
+ >>> m = nn.ReflectionPad3d(1)
484
+ >>> input = torch.arange(8, dtype=torch.float).reshape(1, 1, 2, 2, 2)
485
+ >>> m(input)
486
+ tensor([[[[[7., 6., 7., 6.],
487
+ [5., 4., 5., 4.],
488
+ [7., 6., 7., 6.],
489
+ [5., 4., 5., 4.]],
490
+ [[3., 2., 3., 2.],
491
+ [1., 0., 1., 0.],
492
+ [3., 2., 3., 2.],
493
+ [1., 0., 1., 0.]],
494
+ [[7., 6., 7., 6.],
495
+ [5., 4., 5., 4.],
496
+ [7., 6., 7., 6.],
497
+ [5., 4., 5., 4.]],
498
+ [[3., 2., 3., 2.],
499
+ [1., 0., 1., 0.],
500
+ [3., 2., 3., 2.],
501
+ [1., 0., 1., 0.]]]]])
502
+ """
503
+ padding: Tuple[int, int, int, int, int, int]
504
+
505
+ def __init__(self, padding: _size_6_t) -> None:
506
+ super().__init__()
507
+ self.padding = _ntuple(6)(padding)
508
+
509
+
510
+ class _ReplicationPadNd(Module):
511
+ __constants__ = ['padding']
512
+ padding: Sequence[int]
513
+
514
+ def forward(self, input: Tensor) -> Tensor:
515
+ return F.pad(input, self.padding, 'replicate')
516
+
517
+ def extra_repr(self) -> str:
518
+ return f'{self.padding}'
519
+
520
+
521
+ class ReplicationPad1d(_ReplicationPadNd):
522
+ r"""Pads the input tensor using replication of the input boundary.
523
+
524
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
525
+
526
+ Args:
527
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
528
+ padding in all boundaries. If a 2-`tuple`, uses
529
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
530
+
531
+ Shape:
532
+ - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
533
+ - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
534
+
535
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
536
+
537
+ Examples::
538
+
539
+ >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
540
+ >>> m = nn.ReplicationPad1d(2)
541
+ >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
542
+ >>> input
543
+ tensor([[[0., 1., 2., 3.],
544
+ [4., 5., 6., 7.]]])
545
+ >>> m(input)
546
+ tensor([[[0., 0., 0., 1., 2., 3., 3., 3.],
547
+ [4., 4., 4., 5., 6., 7., 7., 7.]]])
548
+ >>> # using different paddings for different sides
549
+ >>> m = nn.ReplicationPad1d((3, 1))
550
+ >>> m(input)
551
+ tensor([[[0., 0., 0., 0., 1., 2., 3., 3.],
552
+ [4., 4., 4., 4., 5., 6., 7., 7.]]])
553
+
554
+ """
555
+ padding: Tuple[int, int]
556
+
557
+ def __init__(self, padding: _size_2_t) -> None:
558
+ super().__init__()
559
+ self.padding = _pair(padding)
560
+
561
+
562
+ class ReplicationPad2d(_ReplicationPadNd):
563
+ r"""Pads the input tensor using replication of the input boundary.
564
+
565
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
566
+
567
+ Args:
568
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
569
+ padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
570
+ :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
571
+
572
+ Shape:
573
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
574
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
575
+
576
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
577
+
578
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
579
+
580
+ Examples::
581
+
582
+ >>> m = nn.ReplicationPad2d(2)
583
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
584
+ >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
585
+ >>> input
586
+ tensor([[[[0., 1., 2.],
587
+ [3., 4., 5.],
588
+ [6., 7., 8.]]]])
589
+ >>> m(input)
590
+ tensor([[[[0., 0., 0., 1., 2., 2., 2.],
591
+ [0., 0., 0., 1., 2., 2., 2.],
592
+ [0., 0., 0., 1., 2., 2., 2.],
593
+ [3., 3., 3., 4., 5., 5., 5.],
594
+ [6., 6., 6., 7., 8., 8., 8.],
595
+ [6., 6., 6., 7., 8., 8., 8.],
596
+ [6., 6., 6., 7., 8., 8., 8.]]]])
597
+ >>> # using different paddings for different sides
598
+ >>> m = nn.ReplicationPad2d((1, 1, 2, 0))
599
+ >>> m(input)
600
+ tensor([[[[0., 0., 1., 2., 2.],
601
+ [0., 0., 1., 2., 2.],
602
+ [0., 0., 1., 2., 2.],
603
+ [3., 3., 4., 5., 5.],
604
+ [6., 6., 7., 8., 8.]]]])
605
+
606
+ """
607
+ padding: Tuple[int, int, int, int]
608
+
609
+ def __init__(self, padding: _size_4_t) -> None:
610
+ super().__init__()
611
+ self.padding = _quadruple(padding)
612
+
613
+
614
+ class ReplicationPad3d(_ReplicationPadNd):
615
+ r"""Pads the input tensor using replication of the input boundary.
616
+
617
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
618
+
619
+ Args:
620
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
621
+ padding in all boundaries. If a 6-`tuple`, uses
622
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
623
+ :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
624
+ :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
625
+
626
+ Shape:
627
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
628
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
629
+ where
630
+
631
+ :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
632
+
633
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
634
+
635
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
636
+
637
+ Examples::
638
+
639
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
640
+ >>> m = nn.ReplicationPad3d(3)
641
+ >>> input = torch.randn(16, 3, 8, 320, 480)
642
+ >>> output = m(input)
643
+ >>> # using different paddings for different sides
644
+ >>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1))
645
+ >>> output = m(input)
646
+
647
+ """
648
+ padding: Tuple[int, int, int, int, int, int]
649
+
650
+ def __init__(self, padding: _size_6_t) -> None:
651
+ super().__init__()
652
+ self.padding = _ntuple(6)(padding)
653
+
654
+
655
+ class ZeroPad1d(ConstantPad1d):
656
+ r"""Pads the input tensor boundaries with zero.
657
+
658
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
659
+
660
+ Args:
661
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
662
+ padding in both boundaries. If a 2-`tuple`, uses
663
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
664
+
665
+ Shape:
666
+ - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
667
+ - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
668
+
669
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
670
+
671
+ Examples::
672
+
673
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
674
+ >>> m = nn.ZeroPad1d(2)
675
+ >>> input = torch.randn(1, 2, 4)
676
+ >>> input
677
+ tensor([[[-1.0491, -0.7152, -0.0749, 0.8530],
678
+ [-1.3287, 1.8966, 0.1466, -0.2771]]])
679
+ >>> m(input)
680
+ tensor([[[ 0.0000, 0.0000, -1.0491, -0.7152, -0.0749, 0.8530, 0.0000,
681
+ 0.0000],
682
+ [ 0.0000, 0.0000, -1.3287, 1.8966, 0.1466, -0.2771, 0.0000,
683
+ 0.0000]]])
684
+ >>> m = nn.ZeroPad1d(2)
685
+ >>> input = torch.randn(1, 2, 3)
686
+ >>> input
687
+ tensor([[[ 1.6616, 1.4523, -1.1255],
688
+ [-3.6372, 0.1182, -1.8652]]])
689
+ >>> m(input)
690
+ tensor([[[ 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000, 0.0000],
691
+ [ 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000, 0.0000]]])
692
+ >>> # using different paddings for different sides
693
+ >>> m = nn.ZeroPad1d((3, 1))
694
+ >>> m(input)
695
+ tensor([[[ 0.0000, 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000],
696
+ [ 0.0000, 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000]]])
697
+
698
+ """
699
+ padding: Tuple[int, int]
700
+
701
+ def __init__(self, padding: _size_2_t) -> None:
702
+ super().__init__(padding, 0.)
703
+
704
+ def extra_repr(self) -> str:
705
+ return f'{self.padding}'
706
+
707
+ class ZeroPad2d(ConstantPad2d):
708
+ r"""Pads the input tensor boundaries with zero.
709
+
710
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
711
+
712
+ Args:
713
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
714
+ padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
715
+ :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
716
+
717
+ Shape:
718
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
719
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
720
+
721
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
722
+
723
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
724
+
725
+ Examples::
726
+
727
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
728
+ >>> m = nn.ZeroPad2d(2)
729
+ >>> input = torch.randn(1, 1, 3, 3)
730
+ >>> input
731
+ tensor([[[[-0.1678, -0.4418, 1.9466],
732
+ [ 0.9604, -0.4219, -0.5241],
733
+ [-0.9162, -0.5436, -0.6446]]]])
734
+ >>> m(input)
735
+ tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
736
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
737
+ [ 0.0000, 0.0000, -0.1678, -0.4418, 1.9466, 0.0000, 0.0000],
738
+ [ 0.0000, 0.0000, 0.9604, -0.4219, -0.5241, 0.0000, 0.0000],
739
+ [ 0.0000, 0.0000, -0.9162, -0.5436, -0.6446, 0.0000, 0.0000],
740
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
741
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
742
+ >>> # using different paddings for different sides
743
+ >>> m = nn.ZeroPad2d((1, 1, 2, 0))
744
+ >>> m(input)
745
+ tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
746
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
747
+ [ 0.0000, -0.1678, -0.4418, 1.9466, 0.0000],
748
+ [ 0.0000, 0.9604, -0.4219, -0.5241, 0.0000],
749
+ [ 0.0000, -0.9162, -0.5436, -0.6446, 0.0000]]]])
750
+
751
+ """
752
+ padding: Tuple[int, int, int, int]
753
+
754
+ def __init__(self, padding: _size_4_t) -> None:
755
+ super().__init__(padding, 0.)
756
+
757
+ def extra_repr(self) -> str:
758
+ return f'{self.padding}'
759
+
760
+ class ZeroPad3d(ConstantPad3d):
761
+ r"""Pads the input tensor boundaries with zero.
762
+
763
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
764
+
765
+ Args:
766
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
767
+ padding in all boundaries. If a 6-`tuple`, uses
768
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
769
+ :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
770
+ :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
771
+
772
+ Shape:
773
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
774
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
775
+ :math:`(C, D_{out}, H_{out}, W_{out})`, where
776
+
777
+ :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
778
+
779
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
780
+
781
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
782
+
783
+ Examples::
784
+
785
+ >>> m = nn.ZeroPad3d(3)
786
+ >>> input = torch.randn(16, 3, 10, 20, 30)
787
+ >>> output = m(input)
788
+ >>> # using different paddings for different sides
789
+ >>> m = nn.ZeroPad3d((3, 3, 6, 6, 0, 1))
790
+ >>> output = m(input)
791
+
792
+ """
793
+
794
+ padding: Tuple[int, int, int, int, int, int]
795
+
796
+ def __init__(self, padding: _size_6_t) -> None:
797
+ super().__init__(padding, 0.)
798
+
799
+ def extra_repr(self) -> str:
800
+ return f'{self.padding}'
mgm/lib/python3.10/site-packages/torch/nn/modules/pixelshuffle.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+ from .. import functional as F
3
+
4
+ from torch import Tensor
5
+
6
+ __all__ = ['PixelShuffle', 'PixelUnshuffle']
7
+
8
+ class PixelShuffle(Module):
9
+ r"""Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)`
10
+ to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an upscale factor.
11
+
12
+ This is useful for implementing efficient sub-pixel convolution
13
+ with a stride of :math:`1/r`.
14
+
15
+ See the paper:
16
+ `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_
17
+ by Shi et. al (2016) for more details.
18
+
19
+ Args:
20
+ upscale_factor (int): factor to increase spatial resolution by
21
+
22
+ Shape:
23
+ - Input: :math:`(*, C_{in}, H_{in}, W_{in})`, where * is zero or more batch dimensions
24
+ - Output: :math:`(*, C_{out}, H_{out}, W_{out})`, where
25
+
26
+ .. math::
27
+ C_{out} = C_{in} \div \text{upscale\_factor}^2
28
+
29
+ .. math::
30
+ H_{out} = H_{in} \times \text{upscale\_factor}
31
+
32
+ .. math::
33
+ W_{out} = W_{in} \times \text{upscale\_factor}
34
+
35
+ Examples::
36
+
37
+ >>> pixel_shuffle = nn.PixelShuffle(3)
38
+ >>> input = torch.randn(1, 9, 4, 4)
39
+ >>> output = pixel_shuffle(input)
40
+ >>> print(output.size())
41
+ torch.Size([1, 1, 12, 12])
42
+
43
+ .. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network:
44
+ https://arxiv.org/abs/1609.05158
45
+ """
46
+ __constants__ = ['upscale_factor']
47
+ upscale_factor: int
48
+
49
+ def __init__(self, upscale_factor: int) -> None:
50
+ super().__init__()
51
+ self.upscale_factor = upscale_factor
52
+
53
+ def forward(self, input: Tensor) -> Tensor:
54
+ return F.pixel_shuffle(input, self.upscale_factor)
55
+
56
+ def extra_repr(self) -> str:
57
+ return f'upscale_factor={self.upscale_factor}'
58
+
59
+
60
+ class PixelUnshuffle(Module):
61
+ r"""Reverses the :class:`~torch.nn.PixelShuffle` operation by rearranging elements
62
+ in a tensor of shape :math:`(*, C, H \times r, W \times r)` to a tensor of shape
63
+ :math:`(*, C \times r^2, H, W)`, where r is a downscale factor.
64
+
65
+ See the paper:
66
+ `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_
67
+ by Shi et. al (2016) for more details.
68
+
69
+ Args:
70
+ downscale_factor (int): factor to decrease spatial resolution by
71
+
72
+ Shape:
73
+ - Input: :math:`(*, C_{in}, H_{in}, W_{in})`, where * is zero or more batch dimensions
74
+ - Output: :math:`(*, C_{out}, H_{out}, W_{out})`, where
75
+
76
+ .. math::
77
+ C_{out} = C_{in} \times \text{downscale\_factor}^2
78
+
79
+ .. math::
80
+ H_{out} = H_{in} \div \text{downscale\_factor}
81
+
82
+ .. math::
83
+ W_{out} = W_{in} \div \text{downscale\_factor}
84
+
85
+ Examples::
86
+
87
+ >>> pixel_unshuffle = nn.PixelUnshuffle(3)
88
+ >>> input = torch.randn(1, 1, 12, 12)
89
+ >>> output = pixel_unshuffle(input)
90
+ >>> print(output.size())
91
+ torch.Size([1, 9, 4, 4])
92
+
93
+ .. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network:
94
+ https://arxiv.org/abs/1609.05158
95
+ """
96
+ __constants__ = ['downscale_factor']
97
+ downscale_factor: int
98
+
99
+ def __init__(self, downscale_factor: int) -> None:
100
+ super().__init__()
101
+ self.downscale_factor = downscale_factor
102
+
103
+ def forward(self, input: Tensor) -> Tensor:
104
+ return F.pixel_unshuffle(input, self.downscale_factor)
105
+
106
+ def extra_repr(self) -> str:
107
+ return f'downscale_factor={self.downscale_factor}'
mgm/lib/python3.10/site-packages/torch/nn/modules/pooling.py ADDED
@@ -0,0 +1,1233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ from torch import Tensor
4
+ from .module import Module
5
+ from .utils import _single, _pair, _triple
6
+ from .. import functional as F
7
+
8
+ from ..common_types import (_size_any_t, _size_1_t, _size_2_t, _size_3_t,
9
+ _ratio_3_t, _ratio_2_t, _size_any_opt_t, _size_2_opt_t, _size_3_opt_t)
10
+
11
+ __all__ = ['MaxPool1d', 'MaxPool2d', 'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d',
12
+ 'AvgPool1d', 'AvgPool2d', 'AvgPool3d', 'FractionalMaxPool2d', 'FractionalMaxPool3d', 'LPPool1d',
13
+ 'LPPool2d', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d', 'AdaptiveAvgPool1d',
14
+ 'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d']
15
+
16
+ class _MaxPoolNd(Module):
17
+ __constants__ = ['kernel_size', 'stride', 'padding', 'dilation',
18
+ 'return_indices', 'ceil_mode']
19
+ return_indices: bool
20
+ ceil_mode: bool
21
+
22
+ def __init__(self, kernel_size: _size_any_t, stride: Optional[_size_any_t] = None,
23
+ padding: _size_any_t = 0, dilation: _size_any_t = 1,
24
+ return_indices: bool = False, ceil_mode: bool = False) -> None:
25
+ super().__init__()
26
+ self.kernel_size = kernel_size
27
+ self.stride = stride if (stride is not None) else kernel_size
28
+ self.padding = padding
29
+ self.dilation = dilation
30
+ self.return_indices = return_indices
31
+ self.ceil_mode = ceil_mode
32
+
33
+ def extra_repr(self) -> str:
34
+ return 'kernel_size={kernel_size}, stride={stride}, padding={padding}' \
35
+ ', dilation={dilation}, ceil_mode={ceil_mode}'.format(**self.__dict__)
36
+
37
+
38
+ class MaxPool1d(_MaxPoolNd):
39
+ r"""Applies a 1D max pooling over an input signal composed of several input
40
+ planes.
41
+
42
+ In the simplest case, the output value of the layer with input size :math:`(N, C, L)`
43
+ and output :math:`(N, C, L_{out})` can be precisely described as:
44
+
45
+ .. math::
46
+ out(N_i, C_j, k) = \max_{m=0, \ldots, \text{kernel\_size} - 1}
47
+ input(N_i, C_j, stride \times k + m)
48
+
49
+ If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
50
+ for :attr:`padding` number of points. :attr:`dilation` is the stride between the elements within the
51
+ sliding window. This `link`_ has a nice visualization of the pooling parameters.
52
+
53
+ Note:
54
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
55
+ or the input. Sliding windows that would start in the right padded region are ignored.
56
+
57
+ Args:
58
+ kernel_size: The size of the sliding window, must be > 0.
59
+ stride: The stride of the sliding window, must be > 0. Default value is :attr:`kernel_size`.
60
+ padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
61
+ dilation: The stride between elements within a sliding window, must be > 0.
62
+ return_indices: If ``True``, will return the argmax along with the max values.
63
+ Useful for :class:`torch.nn.MaxUnpool1d` later
64
+ ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
65
+ ensures that every element in the input tensor is covered by a sliding window.
66
+
67
+ Shape:
68
+ - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
69
+ - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
70
+
71
+ .. math::
72
+ L_{out} = \left\lfloor \frac{L_{in} + 2 \times \text{padding} - \text{dilation}
73
+ \times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
74
+
75
+ Examples::
76
+
77
+ >>> # pool of size=3, stride=2
78
+ >>> m = nn.MaxPool1d(3, stride=2)
79
+ >>> input = torch.randn(20, 16, 50)
80
+ >>> output = m(input)
81
+
82
+ .. _link:
83
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
84
+ """
85
+
86
+ kernel_size: _size_1_t
87
+ stride: _size_1_t
88
+ padding: _size_1_t
89
+ dilation: _size_1_t
90
+
91
+ def forward(self, input: Tensor):
92
+ return F.max_pool1d(input, self.kernel_size, self.stride,
93
+ self.padding, self.dilation, ceil_mode=self.ceil_mode,
94
+ return_indices=self.return_indices)
95
+
96
+
97
+ class MaxPool2d(_MaxPoolNd):
98
+ r"""Applies a 2D max pooling over an input signal composed of several input
99
+ planes.
100
+
101
+ In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
102
+ output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
103
+ can be precisely described as:
104
+
105
+ .. math::
106
+ \begin{aligned}
107
+ out(N_i, C_j, h, w) ={} & \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
108
+ & \text{input}(N_i, C_j, \text{stride[0]} \times h + m,
109
+ \text{stride[1]} \times w + n)
110
+ \end{aligned}
111
+
112
+ If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
113
+ for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
114
+ It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
115
+
116
+ Note:
117
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
118
+ or the input. Sliding windows that would start in the right padded region are ignored.
119
+
120
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
121
+
122
+ - a single ``int`` -- in which case the same value is used for the height and width dimension
123
+ - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
124
+ and the second `int` for the width dimension
125
+
126
+ Args:
127
+ kernel_size: the size of the window to take a max over
128
+ stride: the stride of the window. Default value is :attr:`kernel_size`
129
+ padding: Implicit negative infinity padding to be added on both sides
130
+ dilation: a parameter that controls the stride of elements in the window
131
+ return_indices: if ``True``, will return the max indices along with the outputs.
132
+ Useful for :class:`torch.nn.MaxUnpool2d` later
133
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
134
+
135
+ Shape:
136
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`
137
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
138
+
139
+ .. math::
140
+ H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]}
141
+ \times (\text{kernel\_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor
142
+
143
+ .. math::
144
+ W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]}
145
+ \times (\text{kernel\_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor
146
+
147
+ Examples::
148
+
149
+ >>> # pool of square window of size=3, stride=2
150
+ >>> m = nn.MaxPool2d(3, stride=2)
151
+ >>> # pool of non-square window
152
+ >>> m = nn.MaxPool2d((3, 2), stride=(2, 1))
153
+ >>> input = torch.randn(20, 16, 50, 32)
154
+ >>> output = m(input)
155
+
156
+ .. _link:
157
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
158
+ """
159
+
160
+ kernel_size: _size_2_t
161
+ stride: _size_2_t
162
+ padding: _size_2_t
163
+ dilation: _size_2_t
164
+
165
+ def forward(self, input: Tensor):
166
+ return F.max_pool2d(input, self.kernel_size, self.stride,
167
+ self.padding, self.dilation, ceil_mode=self.ceil_mode,
168
+ return_indices=self.return_indices)
169
+
170
+
171
+ class MaxPool3d(_MaxPoolNd):
172
+ r"""Applies a 3D max pooling over an input signal composed of several input
173
+ planes.
174
+
175
+ In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
176
+ output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
177
+ can be precisely described as:
178
+
179
+ .. math::
180
+ \begin{aligned}
181
+ \text{out}(N_i, C_j, d, h, w) ={} & \max_{k=0, \ldots, kD-1} \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
182
+ & \text{input}(N_i, C_j, \text{stride[0]} \times d + k,
183
+ \text{stride[1]} \times h + m, \text{stride[2]} \times w + n)
184
+ \end{aligned}
185
+
186
+ If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
187
+ for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
188
+ It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
189
+
190
+ Note:
191
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
192
+ or the input. Sliding windows that would start in the right padded region are ignored.
193
+
194
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
195
+
196
+ - a single ``int`` -- in which case the same value is used for the depth, height and width dimension
197
+ - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
198
+ the second `int` for the height dimension and the third `int` for the width dimension
199
+
200
+ Args:
201
+ kernel_size: the size of the window to take a max over
202
+ stride: the stride of the window. Default value is :attr:`kernel_size`
203
+ padding: Implicit negative infinity padding to be added on all three sides
204
+ dilation: a parameter that controls the stride of elements in the window
205
+ return_indices: if ``True``, will return the max indices along with the outputs.
206
+ Useful for :class:`torch.nn.MaxUnpool3d` later
207
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
208
+
209
+ Shape:
210
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
211
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where
212
+
213
+ .. math::
214
+ D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times
215
+ (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
216
+
217
+ .. math::
218
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times
219
+ (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
220
+
221
+ .. math::
222
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] \times
223
+ (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
224
+
225
+ Examples::
226
+
227
+ >>> # pool of square window of size=3, stride=2
228
+ >>> m = nn.MaxPool3d(3, stride=2)
229
+ >>> # pool of non-square window
230
+ >>> m = nn.MaxPool3d((3, 2, 2), stride=(2, 1, 2))
231
+ >>> input = torch.randn(20, 16, 50, 44, 31)
232
+ >>> output = m(input)
233
+
234
+ .. _link:
235
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
236
+ """ # noqa: E501
237
+
238
+ kernel_size: _size_3_t
239
+ stride: _size_3_t
240
+ padding: _size_3_t
241
+ dilation: _size_3_t
242
+
243
+ def forward(self, input: Tensor):
244
+ return F.max_pool3d(input, self.kernel_size, self.stride,
245
+ self.padding, self.dilation, ceil_mode=self.ceil_mode,
246
+ return_indices=self.return_indices)
247
+
248
+
249
+ class _MaxUnpoolNd(Module):
250
+
251
+ def extra_repr(self) -> str:
252
+ return f'kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}'
253
+
254
+
255
+ class MaxUnpool1d(_MaxUnpoolNd):
256
+ r"""Computes a partial inverse of :class:`MaxPool1d`.
257
+
258
+ :class:`MaxPool1d` is not fully invertible, since the non-maximal values are lost.
259
+
260
+ :class:`MaxUnpool1d` takes in as input the output of :class:`MaxPool1d`
261
+ including the indices of the maximal values and computes a partial inverse
262
+ in which all non-maximal values are set to zero.
263
+
264
+ Note:
265
+ This operation may behave nondeterministically when the input indices has repeat values.
266
+ See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
267
+
268
+ .. note:: :class:`MaxPool1d` can map several input sizes to the same output
269
+ sizes. Hence, the inversion process can get ambiguous.
270
+ To accommodate this, you can provide the needed output size
271
+ as an additional argument :attr:`output_size` in the forward call.
272
+ See the Inputs and Example below.
273
+
274
+ Args:
275
+ kernel_size (int or tuple): Size of the max pooling window.
276
+ stride (int or tuple): Stride of the max pooling window.
277
+ It is set to :attr:`kernel_size` by default.
278
+ padding (int or tuple): Padding that was added to the input
279
+
280
+ Inputs:
281
+ - `input`: the input Tensor to invert
282
+ - `indices`: the indices given out by :class:`~torch.nn.MaxPool1d`
283
+ - `output_size` (optional): the targeted output size
284
+
285
+ Shape:
286
+ - Input: :math:`(N, C, H_{in})` or :math:`(C, H_{in})`.
287
+ - Output: :math:`(N, C, H_{out})` or :math:`(C, H_{out})`, where
288
+
289
+ .. math::
290
+ H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{kernel\_size}[0]
291
+
292
+ or as given by :attr:`output_size` in the call operator
293
+
294
+ Example::
295
+
296
+ >>> # xdoctest: +IGNORE_WANT("do other tests modify the global state?")
297
+ >>> pool = nn.MaxPool1d(2, stride=2, return_indices=True)
298
+ >>> unpool = nn.MaxUnpool1d(2, stride=2)
299
+ >>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8]]])
300
+ >>> output, indices = pool(input)
301
+ >>> unpool(output, indices)
302
+ tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
303
+
304
+ >>> # Example showcasing the use of output_size
305
+ >>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8, 9]]])
306
+ >>> output, indices = pool(input)
307
+ >>> unpool(output, indices, output_size=input.size())
308
+ tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8., 0.]]])
309
+
310
+ >>> unpool(output, indices)
311
+ tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
312
+ """
313
+
314
+ kernel_size: _size_1_t
315
+ stride: _size_1_t
316
+ padding: _size_1_t
317
+
318
+ def __init__(self, kernel_size: _size_1_t, stride: Optional[_size_1_t] = None, padding: _size_1_t = 0) -> None:
319
+ super().__init__()
320
+ self.kernel_size = _single(kernel_size)
321
+ self.stride = _single(stride if (stride is not None) else kernel_size)
322
+ self.padding = _single(padding)
323
+
324
+ def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
325
+ return F.max_unpool1d(input, indices, self.kernel_size, self.stride,
326
+ self.padding, output_size)
327
+
328
+
329
+ class MaxUnpool2d(_MaxUnpoolNd):
330
+ r"""Computes a partial inverse of :class:`MaxPool2d`.
331
+
332
+ :class:`MaxPool2d` is not fully invertible, since the non-maximal values are lost.
333
+
334
+ :class:`MaxUnpool2d` takes in as input the output of :class:`MaxPool2d`
335
+ including the indices of the maximal values and computes a partial inverse
336
+ in which all non-maximal values are set to zero.
337
+
338
+ Note:
339
+ This operation may behave nondeterministically when the input indices has repeat values.
340
+ See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
341
+
342
+ .. note:: :class:`MaxPool2d` can map several input sizes to the same output
343
+ sizes. Hence, the inversion process can get ambiguous.
344
+ To accommodate this, you can provide the needed output size
345
+ as an additional argument :attr:`output_size` in the forward call.
346
+ See the Inputs and Example below.
347
+
348
+ Args:
349
+ kernel_size (int or tuple): Size of the max pooling window.
350
+ stride (int or tuple): Stride of the max pooling window.
351
+ It is set to :attr:`kernel_size` by default.
352
+ padding (int or tuple): Padding that was added to the input
353
+
354
+ Inputs:
355
+ - `input`: the input Tensor to invert
356
+ - `indices`: the indices given out by :class:`~torch.nn.MaxPool2d`
357
+ - `output_size` (optional): the targeted output size
358
+
359
+ Shape:
360
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
361
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
362
+
363
+ .. math::
364
+ H_{out} = (H_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
365
+
366
+ .. math::
367
+ W_{out} = (W_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
368
+
369
+ or as given by :attr:`output_size` in the call operator
370
+
371
+ Example::
372
+
373
+ >>> pool = nn.MaxPool2d(2, stride=2, return_indices=True)
374
+ >>> unpool = nn.MaxUnpool2d(2, stride=2)
375
+ >>> input = torch.tensor([[[[ 1., 2., 3., 4.],
376
+ [ 5., 6., 7., 8.],
377
+ [ 9., 10., 11., 12.],
378
+ [13., 14., 15., 16.]]]])
379
+ >>> output, indices = pool(input)
380
+ >>> unpool(output, indices)
381
+ tensor([[[[ 0., 0., 0., 0.],
382
+ [ 0., 6., 0., 8.],
383
+ [ 0., 0., 0., 0.],
384
+ [ 0., 14., 0., 16.]]]])
385
+ >>> # Now using output_size to resolve an ambiguous size for the inverse
386
+ >>> input = torch.torch.tensor([[[[ 1., 2., 3., 4., 5.],
387
+ [ 6., 7., 8., 9., 10.],
388
+ [11., 12., 13., 14., 15.],
389
+ [16., 17., 18., 19., 20.]]]])
390
+ >>> output, indices = pool(input)
391
+ >>> # This call will not work without specifying output_size
392
+ >>> unpool(output, indices, output_size=input.size())
393
+ tensor([[[[ 0., 0., 0., 0., 0.],
394
+ [ 0., 7., 0., 9., 0.],
395
+ [ 0., 0., 0., 0., 0.],
396
+ [ 0., 17., 0., 19., 0.]]]])
397
+
398
+
399
+ """
400
+
401
+ kernel_size: _size_2_t
402
+ stride: _size_2_t
403
+ padding: _size_2_t
404
+
405
+ def __init__(self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = None, padding: _size_2_t = 0) -> None:
406
+ super().__init__()
407
+ self.kernel_size = _pair(kernel_size)
408
+ self.stride = _pair(stride if (stride is not None) else kernel_size)
409
+ self.padding = _pair(padding)
410
+
411
+ def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
412
+ return F.max_unpool2d(input, indices, self.kernel_size, self.stride,
413
+ self.padding, output_size)
414
+
415
+
416
+ class MaxUnpool3d(_MaxUnpoolNd):
417
+ r"""Computes a partial inverse of :class:`MaxPool3d`.
418
+
419
+ :class:`MaxPool3d` is not fully invertible, since the non-maximal values are lost.
420
+ :class:`MaxUnpool3d` takes in as input the output of :class:`MaxPool3d`
421
+ including the indices of the maximal values and computes a partial inverse
422
+ in which all non-maximal values are set to zero.
423
+
424
+ Note:
425
+ This operation may behave nondeterministically when the input indices has repeat values.
426
+ See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
427
+
428
+ .. note:: :class:`MaxPool3d` can map several input sizes to the same output
429
+ sizes. Hence, the inversion process can get ambiguous.
430
+ To accommodate this, you can provide the needed output size
431
+ as an additional argument :attr:`output_size` in the forward call.
432
+ See the Inputs section below.
433
+
434
+ Args:
435
+ kernel_size (int or tuple): Size of the max pooling window.
436
+ stride (int or tuple): Stride of the max pooling window.
437
+ It is set to :attr:`kernel_size` by default.
438
+ padding (int or tuple): Padding that was added to the input
439
+
440
+ Inputs:
441
+ - `input`: the input Tensor to invert
442
+ - `indices`: the indices given out by :class:`~torch.nn.MaxPool3d`
443
+ - `output_size` (optional): the targeted output size
444
+
445
+ Shape:
446
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
447
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where
448
+
449
+ .. math::
450
+ D_{out} = (D_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
451
+
452
+ .. math::
453
+ H_{out} = (H_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
454
+
455
+ .. math::
456
+ W_{out} = (W_{in} - 1) \times \text{stride[2]} - 2 \times \text{padding[2]} + \text{kernel\_size[2]}
457
+
458
+ or as given by :attr:`output_size` in the call operator
459
+
460
+ Example::
461
+
462
+ >>> # pool of square window of size=3, stride=2
463
+ >>> pool = nn.MaxPool3d(3, stride=2, return_indices=True)
464
+ >>> unpool = nn.MaxUnpool3d(3, stride=2)
465
+ >>> output, indices = pool(torch.randn(20, 16, 51, 33, 15))
466
+ >>> unpooled_output = unpool(output, indices)
467
+ >>> unpooled_output.size()
468
+ torch.Size([20, 16, 51, 33, 15])
469
+ """
470
+
471
+ kernel_size: _size_3_t
472
+ stride: _size_3_t
473
+ padding: _size_3_t
474
+
475
+ def __init__(self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = None, padding: _size_3_t = 0) -> None:
476
+ super().__init__()
477
+ self.kernel_size = _triple(kernel_size)
478
+ self.stride = _triple(stride if (stride is not None) else kernel_size)
479
+ self.padding = _triple(padding)
480
+
481
+ def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
482
+ return F.max_unpool3d(input, indices, self.kernel_size, self.stride,
483
+ self.padding, output_size)
484
+
485
+
486
+ class _AvgPoolNd(Module):
487
+ __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad']
488
+
489
+ def extra_repr(self) -> str:
490
+ return f'kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}'
491
+
492
+
493
+ class AvgPool1d(_AvgPoolNd):
494
+ r"""Applies a 1D average pooling over an input signal composed of several
495
+ input planes.
496
+
497
+ In the simplest case, the output value of the layer with input size :math:`(N, C, L)`,
498
+ output :math:`(N, C, L_{out})` and :attr:`kernel_size` :math:`k`
499
+ can be precisely described as:
500
+
501
+ .. math::
502
+
503
+ \text{out}(N_i, C_j, l) = \frac{1}{k} \sum_{m=0}^{k-1}
504
+ \text{input}(N_i, C_j, \text{stride} \times l + m)
505
+
506
+ If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
507
+ for :attr:`padding` number of points.
508
+
509
+ Note:
510
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
511
+ or the input. Sliding windows that would start in the right padded region are ignored.
512
+
513
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can each be
514
+ an ``int`` or a one-element tuple.
515
+
516
+ Args:
517
+ kernel_size: the size of the window
518
+ stride: the stride of the window. Default value is :attr:`kernel_size`
519
+ padding: implicit zero padding to be added on both sides
520
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
521
+ count_include_pad: when True, will include the zero-padding in the averaging calculation
522
+
523
+ Shape:
524
+ - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
525
+ - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
526
+
527
+ .. math::
528
+ L_{out} = \left\lfloor \frac{L_{in} +
529
+ 2 \times \text{padding} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
530
+
531
+ Examples::
532
+
533
+ >>> # pool with window of size=3, stride=2
534
+ >>> m = nn.AvgPool1d(3, stride=2)
535
+ >>> m(torch.tensor([[[1., 2, 3, 4, 5, 6, 7]]]))
536
+ tensor([[[2., 4., 6.]]])
537
+ """
538
+
539
+ kernel_size: _size_1_t
540
+ stride: _size_1_t
541
+ padding: _size_1_t
542
+ ceil_mode: bool
543
+ count_include_pad: bool
544
+
545
+ def __init__(self, kernel_size: _size_1_t, stride: _size_1_t = None, padding: _size_1_t = 0, ceil_mode: bool = False,
546
+ count_include_pad: bool = True) -> None:
547
+ super().__init__()
548
+ self.kernel_size = _single(kernel_size)
549
+ self.stride = _single(stride if stride is not None else kernel_size)
550
+ self.padding = _single(padding)
551
+ self.ceil_mode = ceil_mode
552
+ self.count_include_pad = count_include_pad
553
+
554
+ def forward(self, input: Tensor) -> Tensor:
555
+ return F.avg_pool1d(
556
+ input, self.kernel_size, self.stride, self.padding, self.ceil_mode,
557
+ self.count_include_pad)
558
+
559
+
560
+ class AvgPool2d(_AvgPoolNd):
561
+ r"""Applies a 2D average pooling over an input signal composed of several input
562
+ planes.
563
+
564
+ In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
565
+ output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
566
+ can be precisely described as:
567
+
568
+ .. math::
569
+
570
+ out(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1}
571
+ input(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n)
572
+
573
+ If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
574
+ for :attr:`padding` number of points.
575
+
576
+ Note:
577
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
578
+ or the input. Sliding windows that would start in the right padded region are ignored.
579
+
580
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can either be:
581
+
582
+ - a single ``int`` -- in which case the same value is used for the height and width dimension
583
+ - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
584
+ and the second `int` for the width dimension
585
+
586
+ Args:
587
+ kernel_size: the size of the window
588
+ stride: the stride of the window. Default value is :attr:`kernel_size`
589
+ padding: implicit zero padding to be added on both sides
590
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
591
+ count_include_pad: when True, will include the zero-padding in the averaging calculation
592
+ divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used.
593
+
594
+
595
+ Shape:
596
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
597
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
598
+
599
+ .. math::
600
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] -
601
+ \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
602
+
603
+ .. math::
604
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] -
605
+ \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
606
+
607
+ Examples::
608
+
609
+ >>> # pool of square window of size=3, stride=2
610
+ >>> m = nn.AvgPool2d(3, stride=2)
611
+ >>> # pool of non-square window
612
+ >>> m = nn.AvgPool2d((3, 2), stride=(2, 1))
613
+ >>> input = torch.randn(20, 16, 50, 32)
614
+ >>> output = m(input)
615
+ """
616
+ __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
617
+
618
+ kernel_size: _size_2_t
619
+ stride: _size_2_t
620
+ padding: _size_2_t
621
+ ceil_mode: bool
622
+ count_include_pad: bool
623
+
624
+ def __init__(self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = None, padding: _size_2_t = 0,
625
+ ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> None:
626
+ super().__init__()
627
+ self.kernel_size = kernel_size
628
+ self.stride = stride if (stride is not None) else kernel_size
629
+ self.padding = padding
630
+ self.ceil_mode = ceil_mode
631
+ self.count_include_pad = count_include_pad
632
+ self.divisor_override = divisor_override
633
+
634
+ def forward(self, input: Tensor) -> Tensor:
635
+ return F.avg_pool2d(input, self.kernel_size, self.stride,
636
+ self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
637
+
638
+
639
+ class AvgPool3d(_AvgPoolNd):
640
+ r"""Applies a 3D average pooling over an input signal composed of several input
641
+ planes.
642
+
643
+ In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
644
+ output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
645
+ can be precisely described as:
646
+
647
+ .. math::
648
+ \begin{aligned}
649
+ \text{out}(N_i, C_j, d, h, w) ={} & \sum_{k=0}^{kD-1} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1} \\
650
+ & \frac{\text{input}(N_i, C_j, \text{stride}[0] \times d + k,
651
+ \text{stride}[1] \times h + m, \text{stride}[2] \times w + n)}
652
+ {kD \times kH \times kW}
653
+ \end{aligned}
654
+
655
+ If :attr:`padding` is non-zero, then the input is implicitly zero-padded on all three sides
656
+ for :attr:`padding` number of points.
657
+
658
+ Note:
659
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
660
+ or the input. Sliding windows that would start in the right padded region are ignored.
661
+
662
+ The parameters :attr:`kernel_size`, :attr:`stride` can either be:
663
+
664
+ - a single ``int`` -- in which case the same value is used for the depth, height and width dimension
665
+ - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
666
+ the second `int` for the height dimension and the third `int` for the width dimension
667
+
668
+ Args:
669
+ kernel_size: the size of the window
670
+ stride: the stride of the window. Default value is :attr:`kernel_size`
671
+ padding: implicit zero padding to be added on all three sides
672
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
673
+ count_include_pad: when True, will include the zero-padding in the averaging calculation
674
+ divisor_override: if specified, it will be used as divisor, otherwise :attr:`kernel_size` will be used
675
+
676
+ Shape:
677
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
678
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
679
+ :math:`(C, D_{out}, H_{out}, W_{out})`, where
680
+
681
+ .. math::
682
+ D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] -
683
+ \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
684
+
685
+ .. math::
686
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] -
687
+ \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
688
+
689
+ .. math::
690
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] -
691
+ \text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor
692
+
693
+ Examples::
694
+
695
+ >>> # pool of square window of size=3, stride=2
696
+ >>> m = nn.AvgPool3d(3, stride=2)
697
+ >>> # pool of non-square window
698
+ >>> m = nn.AvgPool3d((3, 2, 2), stride=(2, 1, 2))
699
+ >>> input = torch.randn(20, 16, 50, 44, 31)
700
+ >>> output = m(input)
701
+ """
702
+ __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
703
+
704
+ kernel_size: _size_3_t
705
+ stride: _size_3_t
706
+ padding: _size_3_t
707
+ ceil_mode: bool
708
+ count_include_pad: bool
709
+
710
+ def __init__(self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = None, padding: _size_3_t = 0,
711
+ ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> None:
712
+ super().__init__()
713
+ self.kernel_size = kernel_size
714
+ self.stride = stride if (stride is not None) else kernel_size
715
+ self.padding = padding
716
+ self.ceil_mode = ceil_mode
717
+ self.count_include_pad = count_include_pad
718
+ self.divisor_override = divisor_override
719
+
720
+ def forward(self, input: Tensor) -> Tensor:
721
+ return F.avg_pool3d(input, self.kernel_size, self.stride,
722
+ self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
723
+
724
+ def __setstate__(self, d):
725
+ super().__setstate__(d)
726
+ self.__dict__.setdefault('padding', 0)
727
+ self.__dict__.setdefault('ceil_mode', False)
728
+ self.__dict__.setdefault('count_include_pad', True)
729
+
730
+
731
+ class FractionalMaxPool2d(Module):
732
+ r"""Applies a 2D fractional max pooling over an input signal composed of several input planes.
733
+
734
+ Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
735
+
736
+ The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic
737
+ step size determined by the target output size.
738
+ The number of output features is equal to the number of input planes.
739
+
740
+ .. note:: Exactly one of ``output_size`` or ``output_ratio`` must be defined.
741
+
742
+ Args:
743
+ kernel_size: the size of the window to take a max over.
744
+ Can be a single number k (for a square kernel of k x k) or a tuple `(kh, kw)`
745
+ output_size: the target output size of the image of the form `oH x oW`.
746
+ Can be a tuple `(oH, oW)` or a single number oH for a square image `oH x oH`
747
+ output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
748
+ This has to be a number or tuple in the range (0, 1)
749
+ return_indices: if ``True``, will return the indices along with the outputs.
750
+ Useful to pass to :meth:`nn.MaxUnpool2d`. Default: ``False``
751
+
752
+ Shape:
753
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
754
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
755
+ :math:`(H_{out}, W_{out})=\text{output\_size}` or
756
+ :math:`(H_{out}, W_{out})=\text{output\_ratio} \times (H_{in}, W_{in})`.
757
+
758
+ Examples:
759
+ >>> # pool of square window of size=3, and target output size 13x12
760
+ >>> m = nn.FractionalMaxPool2d(3, output_size=(13, 12))
761
+ >>> # pool of square window and target output size being half of input image size
762
+ >>> m = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
763
+ >>> input = torch.randn(20, 16, 50, 32)
764
+ >>> output = m(input)
765
+
766
+ .. _Fractional MaxPooling:
767
+ https://arxiv.org/abs/1412.6071
768
+ """
769
+ __constants__ = ['kernel_size', 'return_indices', 'output_size',
770
+ 'output_ratio']
771
+
772
+ kernel_size: _size_2_t
773
+ return_indices: bool
774
+ output_size: _size_2_t
775
+ output_ratio: _ratio_2_t
776
+
777
+ def __init__(self, kernel_size: _size_2_t, output_size: Optional[_size_2_t] = None,
778
+ output_ratio: Optional[_ratio_2_t] = None,
779
+ return_indices: bool = False, _random_samples=None) -> None:
780
+ super().__init__()
781
+ self.kernel_size = _pair(kernel_size)
782
+ self.return_indices = return_indices
783
+ self.register_buffer('_random_samples', _random_samples)
784
+ self.output_size = _pair(output_size) if output_size is not None else None
785
+ self.output_ratio = _pair(output_ratio) if output_ratio is not None else None
786
+ if output_size is None and output_ratio is None:
787
+ raise ValueError("FractionalMaxPool2d requires specifying either "
788
+ "an output size, or a pooling ratio")
789
+ if output_size is not None and output_ratio is not None:
790
+ raise ValueError("only one of output_size and output_ratio may be specified")
791
+ if self.output_ratio is not None:
792
+ if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1):
793
+ raise ValueError(f"output_ratio must be between 0 and 1 (got {output_ratio})")
794
+
795
+ def forward(self, input: Tensor):
796
+ return F.fractional_max_pool2d(
797
+ input, self.kernel_size, self.output_size, self.output_ratio,
798
+ self.return_indices,
799
+ _random_samples=self._random_samples)
800
+
801
+
802
+ class FractionalMaxPool3d(Module):
803
+ r"""Applies a 3D fractional max pooling over an input signal composed of several input planes.
804
+
805
+ Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
806
+
807
+ The max-pooling operation is applied in :math:`kT \times kH \times kW` regions by a stochastic
808
+ step size determined by the target output size.
809
+ The number of output features is equal to the number of input planes.
810
+
811
+ .. note:: Exactly one of ``output_size`` or ``output_ratio`` must be defined.
812
+
813
+ Args:
814
+ kernel_size: the size of the window to take a max over.
815
+ Can be a single number k (for a square kernel of k x k x k) or a tuple `(kt x kh x kw)`
816
+ output_size: the target output size of the image of the form `oT x oH x oW`.
817
+ Can be a tuple `(oT, oH, oW)` or a single number oH for a square image `oH x oH x oH`
818
+ output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
819
+ This has to be a number or tuple in the range (0, 1)
820
+ return_indices: if ``True``, will return the indices along with the outputs.
821
+ Useful to pass to :meth:`nn.MaxUnpool3d`. Default: ``False``
822
+
823
+ Shape:
824
+ - Input: :math:`(N, C, T_{in}, H_{in}, W_{in})` or :math:`(C, T_{in}, H_{in}, W_{in})`.
825
+ - Output: :math:`(N, C, T_{out}, H_{out}, W_{out})` or :math:`(C, T_{out}, H_{out}, W_{out})`, where
826
+ :math:`(T_{out}, H_{out}, W_{out})=\text{output\_size}` or
827
+ :math:`(T_{out}, H_{out}, W_{out})=\text{output\_ratio} \times (T_{in}, H_{in}, W_{in})`
828
+
829
+ Examples:
830
+ >>> # pool of cubic window of size=3, and target output size 13x12x11
831
+ >>> m = nn.FractionalMaxPool3d(3, output_size=(13, 12, 11))
832
+ >>> # pool of cubic window and target output size being half of input size
833
+ >>> m = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5))
834
+ >>> input = torch.randn(20, 16, 50, 32, 16)
835
+ >>> output = m(input)
836
+
837
+ .. _Fractional MaxPooling:
838
+ https://arxiv.org/abs/1412.6071
839
+ """
840
+ __constants__ = ['kernel_size', 'return_indices', 'output_size',
841
+ 'output_ratio']
842
+ kernel_size: _size_3_t
843
+ return_indices: bool
844
+ output_size: _size_3_t
845
+ output_ratio: _ratio_3_t
846
+
847
+ def __init__(self, kernel_size: _size_3_t, output_size: Optional[_size_3_t] = None,
848
+ output_ratio: Optional[_ratio_3_t] = None,
849
+ return_indices: bool = False, _random_samples=None) -> None:
850
+ super().__init__()
851
+ self.kernel_size = _triple(kernel_size)
852
+ self.return_indices = return_indices
853
+ self.register_buffer('_random_samples', _random_samples)
854
+ self.output_size = _triple(output_size) if output_size is not None else None
855
+ self.output_ratio = _triple(output_ratio) if output_ratio is not None else None
856
+ if output_size is None and output_ratio is None:
857
+ raise ValueError("FractionalMaxPool3d requires specifying either "
858
+ "an output size, or a pooling ratio")
859
+ if output_size is not None and output_ratio is not None:
860
+ raise ValueError("only one of output_size and output_ratio may be specified")
861
+ if self.output_ratio is not None:
862
+ if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1 and 0 < self.output_ratio[2] < 1):
863
+ raise ValueError(f"output_ratio must be between 0 and 1 (got {output_ratio})")
864
+
865
+ def forward(self, input: Tensor):
866
+ return F.fractional_max_pool3d(
867
+ input, self.kernel_size, self.output_size, self.output_ratio,
868
+ self.return_indices,
869
+ _random_samples=self._random_samples)
870
+
871
+
872
+ class _LPPoolNd(Module):
873
+ __constants__ = ['norm_type', 'kernel_size', 'stride', 'ceil_mode']
874
+
875
+ norm_type: float
876
+ ceil_mode: bool
877
+
878
+ def __init__(self, norm_type: float, kernel_size: _size_any_t, stride: Optional[_size_any_t] = None,
879
+ ceil_mode: bool = False) -> None:
880
+ super().__init__()
881
+ self.norm_type = norm_type
882
+ self.kernel_size = kernel_size
883
+ self.stride = stride
884
+ self.ceil_mode = ceil_mode
885
+
886
+ def extra_repr(self) -> str:
887
+ return 'norm_type={norm_type}, kernel_size={kernel_size}, stride={stride}, ' \
888
+ 'ceil_mode={ceil_mode}'.format(**self.__dict__)
889
+
890
+
891
+ class LPPool1d(_LPPoolNd):
892
+ r"""Applies a 1D power-average pooling over an input signal composed of several input
893
+ planes.
894
+
895
+ On each window, the function computed is:
896
+
897
+ .. math::
898
+ f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
899
+
900
+ - At p = :math:`\infty`, one gets Max Pooling
901
+ - At p = 1, one gets Sum Pooling (which is proportional to Average Pooling)
902
+
903
+ .. note:: If the sum to the power of `p` is zero, the gradient of this function is
904
+ not defined. This implementation will set the gradient to zero in this case.
905
+
906
+ Args:
907
+ kernel_size: a single int, the size of the window
908
+ stride: a single int, the stride of the window. Default value is :attr:`kernel_size`
909
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
910
+
911
+ Shape:
912
+ - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
913
+ - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
914
+
915
+ .. math::
916
+ L_{out} = \left\lfloor\frac{L_{in} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
917
+
918
+ Examples::
919
+ >>> # power-2 pool of window of length 3, with stride 2.
920
+ >>> m = nn.LPPool1d(2, 3, stride=2)
921
+ >>> input = torch.randn(20, 16, 50)
922
+ >>> output = m(input)
923
+ """
924
+
925
+ kernel_size: _size_1_t
926
+ stride: _size_1_t
927
+
928
+ def forward(self, input: Tensor) -> Tensor:
929
+ return F.lp_pool1d(input, float(self.norm_type), self.kernel_size,
930
+ self.stride, self.ceil_mode)
931
+
932
+
933
+ class LPPool2d(_LPPoolNd):
934
+ r"""Applies a 2D power-average pooling over an input signal composed of several input
935
+ planes.
936
+
937
+ On each window, the function computed is:
938
+
939
+ .. math::
940
+ f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
941
+
942
+ - At p = :math:`\infty`, one gets Max Pooling
943
+ - At p = 1, one gets Sum Pooling (which is proportional to average pooling)
944
+
945
+ The parameters :attr:`kernel_size`, :attr:`stride` can either be:
946
+
947
+ - a single ``int`` -- in which case the same value is used for the height and width dimension
948
+ - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
949
+ and the second `int` for the width dimension
950
+
951
+ .. note:: If the sum to the power of `p` is zero, the gradient of this function is
952
+ not defined. This implementation will set the gradient to zero in this case.
953
+
954
+ Args:
955
+ kernel_size: the size of the window
956
+ stride: the stride of the window. Default value is :attr:`kernel_size`
957
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
958
+
959
+ Shape:
960
+ - Input: :math:`(N, C, H_{in}, W_{in})`
961
+ - Output: :math:`(N, C, H_{out}, W_{out})`, where
962
+
963
+ .. math::
964
+ H_{out} = \left\lfloor\frac{H_{in} - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
965
+
966
+ .. math::
967
+ W_{out} = \left\lfloor\frac{W_{in} - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
968
+
969
+ Examples::
970
+
971
+ >>> # power-2 pool of square window of size=3, stride=2
972
+ >>> m = nn.LPPool2d(2, 3, stride=2)
973
+ >>> # pool of non-square window of power 1.2
974
+ >>> m = nn.LPPool2d(1.2, (3, 2), stride=(2, 1))
975
+ >>> input = torch.randn(20, 16, 50, 32)
976
+ >>> output = m(input)
977
+
978
+ """
979
+
980
+ kernel_size: _size_2_t
981
+ stride: _size_2_t
982
+
983
+ def forward(self, input: Tensor) -> Tensor:
984
+ return F.lp_pool2d(input, float(self.norm_type), self.kernel_size,
985
+ self.stride, self.ceil_mode)
986
+
987
+
988
+ class _AdaptiveMaxPoolNd(Module):
989
+ __constants__ = ['output_size', 'return_indices']
990
+ return_indices: bool
991
+
992
+ def __init__(self, output_size: _size_any_opt_t, return_indices: bool = False) -> None:
993
+ super().__init__()
994
+ self.output_size = output_size
995
+ self.return_indices = return_indices
996
+
997
+ def extra_repr(self) -> str:
998
+ return f'output_size={self.output_size}'
999
+
1000
+ # FIXME (by @ssnl): Improve adaptive pooling docs: specify what the input and
1001
+ # output shapes are, and how the operation computes output.
1002
+
1003
+
1004
+ class AdaptiveMaxPool1d(_AdaptiveMaxPoolNd):
1005
+ r"""Applies a 1D adaptive max pooling over an input signal composed of several input planes.
1006
+
1007
+ The output size is :math:`L_{out}`, for any input size.
1008
+ The number of output features is equal to the number of input planes.
1009
+
1010
+ Args:
1011
+ output_size: the target output size :math:`L_{out}`.
1012
+ return_indices: if ``True``, will return the indices along with the outputs.
1013
+ Useful to pass to nn.MaxUnpool1d. Default: ``False``
1014
+
1015
+ Shape:
1016
+ - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
1017
+ - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
1018
+ :math:`L_{out}=\text{output\_size}`.
1019
+
1020
+ Examples:
1021
+ >>> # target output size of 5
1022
+ >>> m = nn.AdaptiveMaxPool1d(5)
1023
+ >>> input = torch.randn(1, 64, 8)
1024
+ >>> output = m(input)
1025
+
1026
+ """
1027
+
1028
+ output_size: _size_1_t
1029
+
1030
+ def forward(self, input: Tensor) -> Tensor:
1031
+ return F.adaptive_max_pool1d(input, self.output_size, self.return_indices)
1032
+
1033
+
1034
+ class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd):
1035
+ r"""Applies a 2D adaptive max pooling over an input signal composed of several input planes.
1036
+
1037
+ The output is of size :math:`H_{out} \times W_{out}`, for any input size.
1038
+ The number of output features is equal to the number of input planes.
1039
+
1040
+ Args:
1041
+ output_size: the target output size of the image of the form :math:`H_{out} \times W_{out}`.
1042
+ Can be a tuple :math:`(H_{out}, W_{out})` or a single :math:`H_{out}` for a
1043
+ square image :math:`H_{out} \times H_{out}`. :math:`H_{out}` and :math:`W_{out}`
1044
+ can be either a ``int``, or ``None`` which means the size will be the same as that
1045
+ of the input.
1046
+ return_indices: if ``True``, will return the indices along with the outputs.
1047
+ Useful to pass to nn.MaxUnpool2d. Default: ``False``
1048
+
1049
+ Shape:
1050
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
1051
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
1052
+ :math:`(H_{out}, W_{out})=\text{output\_size}`.
1053
+
1054
+ Examples:
1055
+ >>> # target output size of 5x7
1056
+ >>> m = nn.AdaptiveMaxPool2d((5, 7))
1057
+ >>> input = torch.randn(1, 64, 8, 9)
1058
+ >>> output = m(input)
1059
+ >>> # target output size of 7x7 (square)
1060
+ >>> m = nn.AdaptiveMaxPool2d(7)
1061
+ >>> input = torch.randn(1, 64, 10, 9)
1062
+ >>> output = m(input)
1063
+ >>> # target output size of 10x7
1064
+ >>> m = nn.AdaptiveMaxPool2d((None, 7))
1065
+ >>> input = torch.randn(1, 64, 10, 9)
1066
+ >>> output = m(input)
1067
+
1068
+ """
1069
+
1070
+ output_size: _size_2_opt_t
1071
+
1072
+ def forward(self, input: Tensor):
1073
+ return F.adaptive_max_pool2d(input, self.output_size, self.return_indices)
1074
+
1075
+
1076
+ class AdaptiveMaxPool3d(_AdaptiveMaxPoolNd):
1077
+ r"""Applies a 3D adaptive max pooling over an input signal composed of several input planes.
1078
+
1079
+ The output is of size :math:`D_{out} \times H_{out} \times W_{out}`, for any input size.
1080
+ The number of output features is equal to the number of input planes.
1081
+
1082
+ Args:
1083
+ output_size: the target output size of the image of the form :math:`D_{out} \times H_{out} \times W_{out}`.
1084
+ Can be a tuple :math:`(D_{out}, H_{out}, W_{out})` or a single
1085
+ :math:`D_{out}` for a cube :math:`D_{out} \times D_{out} \times D_{out}`.
1086
+ :math:`D_{out}`, :math:`H_{out}` and :math:`W_{out}` can be either a
1087
+ ``int``, or ``None`` which means the size will be the same as that of the input.
1088
+
1089
+ return_indices: if ``True``, will return the indices along with the outputs.
1090
+ Useful to pass to nn.MaxUnpool3d. Default: ``False``
1091
+
1092
+ Shape:
1093
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
1094
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
1095
+ where :math:`(D_{out}, H_{out}, W_{out})=\text{output\_size}`.
1096
+
1097
+ Examples:
1098
+ >>> # target output size of 5x7x9
1099
+ >>> m = nn.AdaptiveMaxPool3d((5, 7, 9))
1100
+ >>> input = torch.randn(1, 64, 8, 9, 10)
1101
+ >>> output = m(input)
1102
+ >>> # target output size of 7x7x7 (cube)
1103
+ >>> m = nn.AdaptiveMaxPool3d(7)
1104
+ >>> input = torch.randn(1, 64, 10, 9, 8)
1105
+ >>> output = m(input)
1106
+ >>> # target output size of 7x9x8
1107
+ >>> m = nn.AdaptiveMaxPool3d((7, None, None))
1108
+ >>> input = torch.randn(1, 64, 10, 9, 8)
1109
+ >>> output = m(input)
1110
+
1111
+ """
1112
+
1113
+ output_size: _size_3_opt_t
1114
+
1115
+ def forward(self, input: Tensor):
1116
+ return F.adaptive_max_pool3d(input, self.output_size, self.return_indices)
1117
+
1118
+
1119
+ class _AdaptiveAvgPoolNd(Module):
1120
+ __constants__ = ['output_size']
1121
+
1122
+ def __init__(self, output_size: _size_any_opt_t) -> None:
1123
+ super().__init__()
1124
+ self.output_size = output_size
1125
+
1126
+ def extra_repr(self) -> str:
1127
+ return f'output_size={self.output_size}'
1128
+
1129
+
1130
+ class AdaptiveAvgPool1d(_AdaptiveAvgPoolNd):
1131
+ r"""Applies a 1D adaptive average pooling over an input signal composed of several input planes.
1132
+
1133
+ The output size is :math:`L_{out}`, for any input size.
1134
+ The number of output features is equal to the number of input planes.
1135
+
1136
+ Args:
1137
+ output_size: the target output size :math:`L_{out}`.
1138
+
1139
+ Shape:
1140
+ - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
1141
+ - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
1142
+ :math:`L_{out}=\text{output\_size}`.
1143
+
1144
+ Examples:
1145
+ >>> # target output size of 5
1146
+ >>> m = nn.AdaptiveAvgPool1d(5)
1147
+ >>> input = torch.randn(1, 64, 8)
1148
+ >>> output = m(input)
1149
+
1150
+ """
1151
+
1152
+ output_size: _size_1_t
1153
+
1154
+ def forward(self, input: Tensor) -> Tensor:
1155
+ return F.adaptive_avg_pool1d(input, self.output_size)
1156
+
1157
+
1158
+ class AdaptiveAvgPool2d(_AdaptiveAvgPoolNd):
1159
+ r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes.
1160
+
1161
+ The output is of size H x W, for any input size.
1162
+ The number of output features is equal to the number of input planes.
1163
+
1164
+ Args:
1165
+ output_size: the target output size of the image of the form H x W.
1166
+ Can be a tuple (H, W) or a single H for a square image H x H.
1167
+ H and W can be either a ``int``, or ``None`` which means the size will
1168
+ be the same as that of the input.
1169
+
1170
+ Shape:
1171
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
1172
+ - Output: :math:`(N, C, S_{0}, S_{1})` or :math:`(C, S_{0}, S_{1})`, where
1173
+ :math:`S=\text{output\_size}`.
1174
+
1175
+ Examples:
1176
+ >>> # target output size of 5x7
1177
+ >>> m = nn.AdaptiveAvgPool2d((5, 7))
1178
+ >>> input = torch.randn(1, 64, 8, 9)
1179
+ >>> output = m(input)
1180
+ >>> # target output size of 7x7 (square)
1181
+ >>> m = nn.AdaptiveAvgPool2d(7)
1182
+ >>> input = torch.randn(1, 64, 10, 9)
1183
+ >>> output = m(input)
1184
+ >>> # target output size of 10x7
1185
+ >>> m = nn.AdaptiveAvgPool2d((None, 7))
1186
+ >>> input = torch.randn(1, 64, 10, 9)
1187
+ >>> output = m(input)
1188
+
1189
+ """
1190
+
1191
+ output_size: _size_2_opt_t
1192
+
1193
+ def forward(self, input: Tensor) -> Tensor:
1194
+ return F.adaptive_avg_pool2d(input, self.output_size)
1195
+
1196
+
1197
+ class AdaptiveAvgPool3d(_AdaptiveAvgPoolNd):
1198
+ r"""Applies a 3D adaptive average pooling over an input signal composed of several input planes.
1199
+
1200
+ The output is of size D x H x W, for any input size.
1201
+ The number of output features is equal to the number of input planes.
1202
+
1203
+ Args:
1204
+ output_size: the target output size of the form D x H x W.
1205
+ Can be a tuple (D, H, W) or a single number D for a cube D x D x D.
1206
+ D, H and W can be either a ``int``, or ``None`` which means the size will
1207
+ be the same as that of the input.
1208
+
1209
+ Shape:
1210
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
1211
+ - Output: :math:`(N, C, S_{0}, S_{1}, S_{2})` or :math:`(C, S_{0}, S_{1}, S_{2})`,
1212
+ where :math:`S=\text{output\_size}`.
1213
+
1214
+ Examples:
1215
+ >>> # target output size of 5x7x9
1216
+ >>> m = nn.AdaptiveAvgPool3d((5, 7, 9))
1217
+ >>> input = torch.randn(1, 64, 8, 9, 10)
1218
+ >>> output = m(input)
1219
+ >>> # target output size of 7x7x7 (cube)
1220
+ >>> m = nn.AdaptiveAvgPool3d(7)
1221
+ >>> input = torch.randn(1, 64, 10, 9, 8)
1222
+ >>> output = m(input)
1223
+ >>> # target output size of 7x9x8
1224
+ >>> m = nn.AdaptiveAvgPool3d((7, None, None))
1225
+ >>> input = torch.randn(1, 64, 10, 9, 8)
1226
+ >>> output = m(input)
1227
+
1228
+ """
1229
+
1230
+ output_size: _size_3_opt_t
1231
+
1232
+ def forward(self, input: Tensor) -> Tensor:
1233
+ return F.adaptive_avg_pool3d(input, self.output_size)
mgm/lib/python3.10/site-packages/torch/nn/modules/sparse.py ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+ from torch import Tensor
5
+ from torch.nn.parameter import Parameter
6
+
7
+ from .module import Module
8
+ from .. import functional as F
9
+ from .. import init
10
+
11
+ __all__ = ['Embedding', 'EmbeddingBag']
12
+
13
+ class Embedding(Module):
14
+ r"""A simple lookup table that stores embeddings of a fixed dictionary and size.
15
+
16
+ This module is often used to store word embeddings and retrieve them using indices.
17
+ The input to the module is a list of indices, and the output is the corresponding
18
+ word embeddings.
19
+
20
+ Args:
21
+ num_embeddings (int): size of the dictionary of embeddings
22
+ embedding_dim (int): the size of each embedding vector
23
+ padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
24
+ therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
25
+ i.e. it remains as a fixed "pad". For a newly constructed Embedding,
26
+ the embedding vector at :attr:`padding_idx` will default to all zeros,
27
+ but can be updated to another value to be used as the padding vector.
28
+ max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
29
+ is renormalized to have norm :attr:`max_norm`.
30
+ norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
31
+ scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of
32
+ the words in the mini-batch. Default ``False``.
33
+ sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor.
34
+ See Notes for more details regarding sparse gradients.
35
+
36
+ Attributes:
37
+ weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)
38
+ initialized from :math:`\mathcal{N}(0, 1)`
39
+
40
+ Shape:
41
+ - Input: :math:`(*)`, IntTensor or LongTensor of arbitrary shape containing the indices to extract
42
+ - Output: :math:`(*, H)`, where `*` is the input shape and :math:`H=\text{embedding\_dim}`
43
+
44
+ .. note::
45
+ Keep in mind that only a limited number of optimizers support
46
+ sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`),
47
+ :class:`optim.SparseAdam` (`CUDA` and `CPU`) and :class:`optim.Adagrad` (`CPU`)
48
+
49
+ .. note::
50
+ When :attr:`max_norm` is not ``None``, :class:`Embedding`'s forward method will modify the
51
+ :attr:`weight` tensor in-place. Since tensors needed for gradient computations cannot be
52
+ modified in-place, performing a differentiable operation on ``Embedding.weight`` before
53
+ calling :class:`Embedding`'s forward method requires cloning ``Embedding.weight`` when
54
+ :attr:`max_norm` is not ``None``. For example::
55
+
56
+ n, d, m = 3, 5, 7
57
+ embedding = nn.Embedding(n, d, max_norm=True)
58
+ W = torch.randn((m, d), requires_grad=True)
59
+ idx = torch.tensor([1, 2])
60
+ a = embedding.weight.clone() @ W.t() # weight must be cloned for this to be differentiable
61
+ b = embedding(idx) @ W.t() # modifies weight in-place
62
+ out = (a.unsqueeze(0) + b.unsqueeze(1))
63
+ loss = out.sigmoid().prod()
64
+ loss.backward()
65
+
66
+ Examples::
67
+
68
+ >>> # an Embedding module containing 10 tensors of size 3
69
+ >>> embedding = nn.Embedding(10, 3)
70
+ >>> # a batch of 2 samples of 4 indices each
71
+ >>> input = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]])
72
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
73
+ >>> embedding(input)
74
+ tensor([[[-0.0251, -1.6902, 0.7172],
75
+ [-0.6431, 0.0748, 0.6969],
76
+ [ 1.4970, 1.3448, -0.9685],
77
+ [-0.3677, -2.7265, -0.1685]],
78
+
79
+ [[ 1.4970, 1.3448, -0.9685],
80
+ [ 0.4362, -0.4004, 0.9400],
81
+ [-0.6431, 0.0748, 0.6969],
82
+ [ 0.9124, -2.3616, 1.1151]]])
83
+
84
+
85
+ >>> # example with padding_idx
86
+ >>> embedding = nn.Embedding(10, 3, padding_idx=0)
87
+ >>> input = torch.LongTensor([[0, 2, 0, 5]])
88
+ >>> embedding(input)
89
+ tensor([[[ 0.0000, 0.0000, 0.0000],
90
+ [ 0.1535, -2.0309, 0.9315],
91
+ [ 0.0000, 0.0000, 0.0000],
92
+ [-0.1655, 0.9897, 0.0635]]])
93
+
94
+ >>> # example of changing `pad` vector
95
+ >>> padding_idx = 0
96
+ >>> embedding = nn.Embedding(3, 3, padding_idx=padding_idx)
97
+ >>> embedding.weight
98
+ Parameter containing:
99
+ tensor([[ 0.0000, 0.0000, 0.0000],
100
+ [-0.7895, -0.7089, -0.0364],
101
+ [ 0.6778, 0.5803, 0.2678]], requires_grad=True)
102
+ >>> with torch.no_grad():
103
+ ... embedding.weight[padding_idx] = torch.ones(3)
104
+ >>> embedding.weight
105
+ Parameter containing:
106
+ tensor([[ 1.0000, 1.0000, 1.0000],
107
+ [-0.7895, -0.7089, -0.0364],
108
+ [ 0.6778, 0.5803, 0.2678]], requires_grad=True)
109
+ """
110
+ __constants__ = ['num_embeddings', 'embedding_dim', 'padding_idx', 'max_norm',
111
+ 'norm_type', 'scale_grad_by_freq', 'sparse']
112
+
113
+ num_embeddings: int
114
+ embedding_dim: int
115
+ padding_idx: Optional[int]
116
+ max_norm: Optional[float]
117
+ norm_type: float
118
+ scale_grad_by_freq: bool
119
+ weight: Tensor
120
+ freeze: bool
121
+ sparse: bool
122
+
123
+ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None,
124
+ max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
125
+ sparse: bool = False, _weight: Optional[Tensor] = None, _freeze: bool = False,
126
+ device=None, dtype=None) -> None:
127
+ factory_kwargs = {'device': device, 'dtype': dtype}
128
+ super().__init__()
129
+ self.num_embeddings = num_embeddings
130
+ self.embedding_dim = embedding_dim
131
+ if padding_idx is not None:
132
+ if padding_idx > 0:
133
+ assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
134
+ elif padding_idx < 0:
135
+ assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
136
+ padding_idx = self.num_embeddings + padding_idx
137
+ self.padding_idx = padding_idx
138
+ self.max_norm = max_norm
139
+ self.norm_type = norm_type
140
+ self.scale_grad_by_freq = scale_grad_by_freq
141
+ if _weight is None:
142
+ self.weight = Parameter(torch.empty((num_embeddings, embedding_dim), **factory_kwargs),
143
+ requires_grad=not _freeze)
144
+ self.reset_parameters()
145
+ else:
146
+ assert list(_weight.shape) == [num_embeddings, embedding_dim], \
147
+ 'Shape of weight does not match num_embeddings and embedding_dim'
148
+ self.weight = Parameter(_weight, requires_grad=not _freeze)
149
+
150
+ self.sparse = sparse
151
+
152
+ def reset_parameters(self) -> None:
153
+ init.normal_(self.weight)
154
+ self._fill_padding_idx_with_zero()
155
+
156
+ def _fill_padding_idx_with_zero(self) -> None:
157
+ if self.padding_idx is not None:
158
+ with torch.no_grad():
159
+ self.weight[self.padding_idx].fill_(0)
160
+
161
+ def forward(self, input: Tensor) -> Tensor:
162
+ return F.embedding(
163
+ input, self.weight, self.padding_idx, self.max_norm,
164
+ self.norm_type, self.scale_grad_by_freq, self.sparse)
165
+
166
+ def extra_repr(self) -> str:
167
+ s = '{num_embeddings}, {embedding_dim}'
168
+ if self.padding_idx is not None:
169
+ s += ', padding_idx={padding_idx}'
170
+ if self.max_norm is not None:
171
+ s += ', max_norm={max_norm}'
172
+ if self.norm_type != 2:
173
+ s += ', norm_type={norm_type}'
174
+ if self.scale_grad_by_freq is not False:
175
+ s += ', scale_grad_by_freq={scale_grad_by_freq}'
176
+ if self.sparse is not False:
177
+ s += ', sparse=True'
178
+ return s.format(**self.__dict__)
179
+
180
+ @classmethod
181
+ def from_pretrained(cls, embeddings, freeze=True, padding_idx=None,
182
+ max_norm=None, norm_type=2., scale_grad_by_freq=False,
183
+ sparse=False):
184
+ r"""Creates Embedding instance from given 2-dimensional FloatTensor.
185
+
186
+ Args:
187
+ embeddings (Tensor): FloatTensor containing weights for the Embedding.
188
+ First dimension is being passed to Embedding as ``num_embeddings``, second as ``embedding_dim``.
189
+ freeze (bool, optional): If ``True``, the tensor does not get updated in the learning process.
190
+ Equivalent to ``embedding.weight.requires_grad = False``. Default: ``True``
191
+ padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
192
+ therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
193
+ i.e. it remains as a fixed "pad".
194
+ max_norm (float, optional): See module initialization documentation.
195
+ norm_type (float, optional): See module initialization documentation. Default ``2``.
196
+ scale_grad_by_freq (bool, optional): See module initialization documentation. Default ``False``.
197
+ sparse (bool, optional): See module initialization documentation.
198
+
199
+ Examples::
200
+
201
+ >>> # FloatTensor containing pretrained weights
202
+ >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
203
+ >>> embedding = nn.Embedding.from_pretrained(weight)
204
+ >>> # Get embeddings for index 1
205
+ >>> input = torch.LongTensor([1])
206
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
207
+ >>> embedding(input)
208
+ tensor([[ 4.0000, 5.1000, 6.3000]])
209
+ """
210
+ assert embeddings.dim() == 2, \
211
+ 'Embeddings parameter is expected to be 2-dimensional'
212
+ rows, cols = embeddings.shape
213
+ embedding = cls(
214
+ num_embeddings=rows,
215
+ embedding_dim=cols,
216
+ _weight=embeddings,
217
+ _freeze=freeze,
218
+ padding_idx=padding_idx,
219
+ max_norm=max_norm,
220
+ norm_type=norm_type,
221
+ scale_grad_by_freq=scale_grad_by_freq,
222
+ sparse=sparse)
223
+ return embedding
224
+
225
+
226
+ class EmbeddingBag(Module):
227
+ r"""Computes sums or means of 'bags' of embeddings, without instantiating the
228
+ intermediate embeddings.
229
+
230
+ For bags of constant length, no :attr:`per_sample_weights`, no indices equal to :attr:`padding_idx`,
231
+ and with 2D inputs, this class
232
+
233
+ * with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=1)``,
234
+ * with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=1)``,
235
+ * with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=1)``.
236
+
237
+ However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these
238
+ operations.
239
+
240
+ EmbeddingBag also supports per-sample weights as an argument to the forward
241
+ pass. This scales the output of the Embedding before performing a weighted
242
+ reduction as specified by ``mode``. If :attr:`per_sample_weights` is passed, the
243
+ only supported ``mode`` is ``"sum"``, which computes a weighted sum according to
244
+ :attr:`per_sample_weights`.
245
+
246
+ Args:
247
+ num_embeddings (int): size of the dictionary of embeddings
248
+ embedding_dim (int): the size of each embedding vector
249
+ max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
250
+ is renormalized to have norm :attr:`max_norm`.
251
+ norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
252
+ scale_grad_by_freq (bool, optional): if given, this will scale gradients by the inverse of frequency of
253
+ the words in the mini-batch. Default ``False``.
254
+ Note: this option is not supported when ``mode="max"``.
255
+ mode (str, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
256
+ ``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights`
257
+ into consideration. ``"mean"`` computes the average of the values
258
+ in the bag, ``"max"`` computes the max value over each bag.
259
+ Default: ``"mean"``
260
+ sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See
261
+ Notes for more details regarding sparse gradients. Note: this option is not
262
+ supported when ``mode="max"``.
263
+ include_last_offset (bool, optional): if ``True``, :attr:`offsets` has one additional element, where the last element
264
+ is equivalent to the size of `indices`. This matches the CSR format.
265
+ padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the
266
+ gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated
267
+ during training, i.e. it remains as a fixed "pad". For a newly constructed
268
+ EmbeddingBag, the embedding vector at :attr:`padding_idx` will default to all
269
+ zeros, but can be updated to another value to be used as the padding vector.
270
+ Note that the embedding vector at :attr:`padding_idx` is excluded from the
271
+ reduction.
272
+
273
+ Attributes:
274
+ weight (Tensor): the learnable weights of the module of shape `(num_embeddings, embedding_dim)`
275
+ initialized from :math:`\mathcal{N}(0, 1)`.
276
+
277
+ Examples::
278
+
279
+ >>> # an EmbeddingBag module containing 10 tensors of size 3
280
+ >>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum')
281
+ >>> # a batch of 2 samples of 4 indices each
282
+ >>> input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long)
283
+ >>> offsets = torch.tensor([0, 4], dtype=torch.long)
284
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
285
+ >>> embedding_sum(input, offsets)
286
+ tensor([[-0.8861, -5.4350, -0.0523],
287
+ [ 1.1306, -2.5798, -1.0044]])
288
+
289
+ >>> # Example with padding_idx
290
+ >>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum', padding_idx=2)
291
+ >>> input = torch.tensor([2, 2, 2, 2, 4, 3, 2, 9], dtype=torch.long)
292
+ >>> offsets = torch.tensor([0, 4], dtype=torch.long)
293
+ >>> embedding_sum(input, offsets)
294
+ tensor([[ 0.0000, 0.0000, 0.0000],
295
+ [-0.7082, 3.2145, -2.6251]])
296
+
297
+ >>> # An EmbeddingBag can be loaded from an Embedding like so
298
+ >>> embedding = nn.Embedding(10, 3, padding_idx=2)
299
+ >>> embedding_sum = nn.EmbeddingBag.from_pretrained(
300
+ embedding.weight,
301
+ padding_idx=embedding.padding_idx,
302
+ mode='sum')
303
+ """
304
+ __constants__ = ['num_embeddings', 'embedding_dim', 'max_norm', 'norm_type',
305
+ 'scale_grad_by_freq', 'mode', 'sparse', 'include_last_offset',
306
+ 'padding_idx']
307
+
308
+ num_embeddings: int
309
+ embedding_dim: int
310
+ max_norm: Optional[float]
311
+ norm_type: float
312
+ scale_grad_by_freq: bool
313
+ weight: Tensor
314
+ mode: str
315
+ sparse: bool
316
+ include_last_offset: bool
317
+ padding_idx: Optional[int]
318
+
319
+ def __init__(self, num_embeddings: int, embedding_dim: int,
320
+ max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
321
+ mode: str = 'mean', sparse: bool = False, _weight: Optional[Tensor] = None,
322
+ include_last_offset: bool = False, padding_idx: Optional[int] = None,
323
+ device=None, dtype=None) -> None:
324
+ factory_kwargs = {'device': device, 'dtype': dtype}
325
+ super().__init__()
326
+ self.num_embeddings = num_embeddings
327
+ self.embedding_dim = embedding_dim
328
+ self.max_norm = max_norm
329
+ self.norm_type = norm_type
330
+ self.scale_grad_by_freq = scale_grad_by_freq
331
+ if padding_idx is not None:
332
+ if padding_idx > 0:
333
+ assert padding_idx < self.num_embeddings, 'padding_idx must be within num_embeddings'
334
+ elif padding_idx < 0:
335
+ assert padding_idx >= -self.num_embeddings, 'padding_idx must be within num_embeddings'
336
+ padding_idx = self.num_embeddings + padding_idx
337
+ self.padding_idx = padding_idx
338
+ if _weight is None:
339
+ self.weight = Parameter(torch.empty((num_embeddings, embedding_dim), **factory_kwargs))
340
+ self.reset_parameters()
341
+ else:
342
+ assert list(_weight.shape) == [num_embeddings, embedding_dim], \
343
+ 'Shape of weight does not match num_embeddings and embedding_dim'
344
+ self.weight = Parameter(_weight)
345
+ self.mode = mode
346
+ self.sparse = sparse
347
+ self.include_last_offset = include_last_offset
348
+
349
+ def reset_parameters(self) -> None:
350
+ init.normal_(self.weight)
351
+ self._fill_padding_idx_with_zero()
352
+
353
+ def _fill_padding_idx_with_zero(self) -> None:
354
+ if self.padding_idx is not None:
355
+ with torch.no_grad():
356
+ self.weight[self.padding_idx].fill_(0)
357
+
358
+ def forward(self, input: Tensor, offsets: Optional[Tensor] = None, per_sample_weights: Optional[Tensor] = None) -> Tensor:
359
+ """Forward pass of EmbeddingBag.
360
+
361
+ Args:
362
+ input (Tensor): Tensor containing bags of indices into the embedding matrix.
363
+ offsets (Tensor, optional): Only used when :attr:`input` is 1D. :attr:`offsets` determines
364
+ the starting index position of each bag (sequence) in :attr:`input`.
365
+ per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
366
+ to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights`
367
+ must have exactly the same shape as input and is treated as having the same
368
+ :attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``.
369
+
370
+ Returns:
371
+ Tensor output shape of `(B, embedding_dim)`.
372
+
373
+ .. note::
374
+
375
+ A few notes about ``input`` and ``offsets``:
376
+
377
+ - :attr:`input` and :attr:`offsets` have to be of the same type, either int or long
378
+
379
+ - If :attr:`input` is 2D of shape `(B, N)`, it will be treated as ``B`` bags (sequences)
380
+ each of fixed length ``N``, and this will return ``B`` values aggregated in a way
381
+ depending on the :attr:`mode`. :attr:`offsets` is ignored and required to be ``None`` in this case.
382
+
383
+ - If :attr:`input` is 1D of shape `(N)`, it will be treated as a concatenation of
384
+ multiple bags (sequences). :attr:`offsets` is required to be a 1D tensor containing the
385
+ starting index positions of each bag in :attr:`input`. Therefore, for :attr:`offsets` of shape `(B)`,
386
+ :attr:`input` will be viewed as having ``B`` bags. Empty bags (i.e., having 0-length) will have
387
+ returned vectors filled by zeros.
388
+ """
389
+ return F.embedding_bag(input, self.weight, offsets,
390
+ self.max_norm, self.norm_type,
391
+ self.scale_grad_by_freq, self.mode, self.sparse,
392
+ per_sample_weights, self.include_last_offset,
393
+ self.padding_idx)
394
+
395
+ def extra_repr(self) -> str:
396
+ s = '{num_embeddings}, {embedding_dim}'
397
+ if self.max_norm is not None:
398
+ s += ', max_norm={max_norm}'
399
+ if self.norm_type != 2:
400
+ s += ', norm_type={norm_type}'
401
+ if self.scale_grad_by_freq is not False:
402
+ s += ', scale_grad_by_freq={scale_grad_by_freq}'
403
+ s += ', mode={mode}'
404
+ if self.padding_idx is not None:
405
+ s += ', padding_idx={padding_idx}'
406
+ return s.format(**{k: repr(v) for k, v in self.__dict__.items()})
407
+
408
+ @classmethod
409
+ def from_pretrained(cls, embeddings: Tensor, freeze: bool = True, max_norm: Optional[float] = None,
410
+ norm_type: float = 2., scale_grad_by_freq: bool = False,
411
+ mode: str = 'mean', sparse: bool = False, include_last_offset: bool = False,
412
+ padding_idx: Optional[int] = None) -> 'EmbeddingBag':
413
+ r"""Creates EmbeddingBag instance from given 2-dimensional FloatTensor.
414
+
415
+ Args:
416
+ embeddings (Tensor): FloatTensor containing weights for the EmbeddingBag.
417
+ First dimension is being passed to EmbeddingBag as 'num_embeddings', second as 'embedding_dim'.
418
+ freeze (bool, optional): If ``True``, the tensor does not get updated in the learning process.
419
+ Equivalent to ``embeddingbag.weight.requires_grad = False``. Default: ``True``
420
+ max_norm (float, optional): See module initialization documentation. Default: ``None``
421
+ norm_type (float, optional): See module initialization documentation. Default ``2``.
422
+ scale_grad_by_freq (bool, optional): See module initialization documentation. Default ``False``.
423
+ mode (str, optional): See module initialization documentation. Default: ``"mean"``
424
+ sparse (bool, optional): See module initialization documentation. Default: ``False``.
425
+ include_last_offset (bool, optional): See module initialization documentation. Default: ``False``.
426
+ padding_idx (int, optional): See module initialization documentation. Default: ``None``.
427
+
428
+ Examples::
429
+
430
+ >>> # FloatTensor containing pretrained weights
431
+ >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
432
+ >>> embeddingbag = nn.EmbeddingBag.from_pretrained(weight)
433
+ >>> # Get embeddings for index 1
434
+ >>> input = torch.LongTensor([[1, 0]])
435
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
436
+ >>> embeddingbag(input)
437
+ tensor([[ 2.5000, 3.7000, 4.6500]])
438
+ """
439
+ assert embeddings.dim() == 2, \
440
+ 'Embeddings parameter is expected to be 2-dimensional'
441
+ rows, cols = embeddings.shape
442
+ embeddingbag = cls(
443
+ num_embeddings=rows,
444
+ embedding_dim=cols,
445
+ _weight=embeddings,
446
+ max_norm=max_norm,
447
+ norm_type=norm_type,
448
+ scale_grad_by_freq=scale_grad_by_freq,
449
+ mode=mode,
450
+ sparse=sparse,
451
+ include_last_offset=include_last_offset,
452
+ padding_idx=padding_idx)
453
+ embeddingbag.weight.requires_grad = not freeze
454
+ return embeddingbag
mgm/lib/python3.10/site-packages/torch/nn/modules/transformer.py ADDED
@@ -0,0 +1,931 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from typing import Optional, Any, Union, Callable
3
+
4
+ import torch
5
+ import warnings
6
+ from torch import Tensor
7
+ from .. import functional as F
8
+ from .module import Module
9
+ from .activation import MultiheadAttention
10
+ from .container import ModuleList
11
+ from ..init import xavier_uniform_
12
+ from .dropout import Dropout
13
+ from .linear import Linear
14
+ from .normalization import LayerNorm
15
+
16
+ __all__ = ['Transformer', 'TransformerEncoder', 'TransformerDecoder', 'TransformerEncoderLayer', 'TransformerDecoderLayer']
17
+
18
+ def _generate_square_subsequent_mask(
19
+ sz: int,
20
+ device: torch.device = torch.device(torch._C._get_default_device()), # torch.device('cpu'),
21
+ dtype: torch.dtype = torch.get_default_dtype(),
22
+ ) -> Tensor:
23
+ r"""Generate a square causal mask for the sequence. The masked positions are filled with float('-inf').
24
+ Unmasked positions are filled with float(0.0).
25
+ """
26
+ return torch.triu(
27
+ torch.full((sz, sz), float('-inf'), dtype=dtype, device=device),
28
+ diagonal=1,
29
+ )
30
+
31
+
32
+ def _get_seq_len(
33
+ src: Tensor,
34
+ batch_first: bool
35
+ ) -> Optional[int]:
36
+
37
+ if src.is_nested:
38
+ return None
39
+ else:
40
+ src_size = src.size()
41
+ if len(src_size) == 2:
42
+ # unbatched: S, E
43
+ return src_size[0]
44
+ else:
45
+ # batched: B, S, E if batch_first else S, B, E
46
+ seq_len_pos = 1 if batch_first else 0
47
+ return src_size[seq_len_pos]
48
+
49
+
50
+ class Transformer(Module):
51
+ r"""A transformer model. User is able to modify the attributes as needed. The architecture
52
+ is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
53
+ Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
54
+ Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information
55
+ Processing Systems, pages 6000-6010.
56
+
57
+ Args:
58
+ d_model: the number of expected features in the encoder/decoder inputs (default=512).
59
+ nhead: the number of heads in the multiheadattention models (default=8).
60
+ num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
61
+ num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6).
62
+ dim_feedforward: the dimension of the feedforward network model (default=2048).
63
+ dropout: the dropout value (default=0.1).
64
+ activation: the activation function of encoder/decoder intermediate layer, can be a string
65
+ ("relu" or "gelu") or a unary callable. Default: relu
66
+ custom_encoder: custom encoder (default=None).
67
+ custom_decoder: custom decoder (default=None).
68
+ layer_norm_eps: the eps value in layer normalization components (default=1e-5).
69
+ batch_first: If ``True``, then the input and output tensors are provided
70
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
71
+ norm_first: if ``True``, encoder and decoder layers will perform LayerNorms before
72
+ other attention and feedforward operations, otherwise after. Default: ``False`` (after).
73
+ bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive
74
+ bias. Default: ``True``.
75
+
76
+ Examples::
77
+ >>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12)
78
+ >>> src = torch.rand((10, 32, 512))
79
+ >>> tgt = torch.rand((20, 32, 512))
80
+ >>> out = transformer_model(src, tgt)
81
+
82
+ Note: A full example to apply nn.Transformer module for the word language model is available in
83
+ https://github.com/pytorch/examples/tree/master/word_language_model
84
+ """
85
+
86
+ def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6,
87
+ num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1,
88
+ activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
89
+ custom_encoder: Optional[Any] = None, custom_decoder: Optional[Any] = None,
90
+ layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
91
+ bias: bool = True, device=None, dtype=None) -> None:
92
+ factory_kwargs = {'device': device, 'dtype': dtype}
93
+ super().__init__()
94
+ torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}")
95
+
96
+ if custom_encoder is not None:
97
+ self.encoder = custom_encoder
98
+ else:
99
+ encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,
100
+ activation, layer_norm_eps, batch_first, norm_first,
101
+ bias, **factory_kwargs)
102
+ encoder_norm = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
103
+ self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
104
+
105
+ if custom_decoder is not None:
106
+ self.decoder = custom_decoder
107
+ else:
108
+ decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,
109
+ activation, layer_norm_eps, batch_first, norm_first,
110
+ bias, **factory_kwargs)
111
+ decoder_norm = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
112
+ self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm)
113
+
114
+ self._reset_parameters()
115
+
116
+ self.d_model = d_model
117
+ self.nhead = nhead
118
+
119
+ self.batch_first = batch_first
120
+
121
+ def forward(self, src: Tensor, tgt: Tensor, src_mask: Optional[Tensor] = None, tgt_mask: Optional[Tensor] = None,
122
+ memory_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None,
123
+ tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None,
124
+ src_is_causal: Optional[bool] = None, tgt_is_causal: Optional[bool] = None,
125
+ memory_is_causal: bool = False) -> Tensor:
126
+ r"""Take in and process masked source/target sequences.
127
+
128
+ Args:
129
+ src: the sequence to the encoder (required).
130
+ tgt: the sequence to the decoder (required).
131
+ src_mask: the additive mask for the src sequence (optional).
132
+ tgt_mask: the additive mask for the tgt sequence (optional).
133
+ memory_mask: the additive mask for the encoder output (optional).
134
+ src_key_padding_mask: the Tensor mask for src keys per batch (optional).
135
+ tgt_key_padding_mask: the Tensor mask for tgt keys per batch (optional).
136
+ memory_key_padding_mask: the Tensor mask for memory keys per batch (optional).
137
+ src_is_causal: If specified, applies a causal mask as ``src_mask``.
138
+ Default: ``None``; try to detect a causal mask.
139
+ Warning:
140
+ ``src_is_causal`` provides a hint that ``src_mask`` is
141
+ the causal mask. Providing incorrect hints can result in
142
+ incorrect execution, including forward and backward
143
+ compatibility.
144
+ tgt_is_causal: If specified, applies a causal mask as ``tgt_mask``.
145
+ Default: ``None``; try to detect a causal mask.
146
+ Warning:
147
+ ``tgt_is_causal`` provides a hint that ``tgt_mask`` is
148
+ the causal mask. Providing incorrect hints can result in
149
+ incorrect execution, including forward and backward
150
+ compatibility.
151
+ memory_is_causal: If specified, applies a causal mask as
152
+ ``memory_mask``.
153
+ Default: ``False``.
154
+ Warning:
155
+ ``memory_is_causal`` provides a hint that
156
+ ``memory_mask`` is the causal mask. Providing incorrect
157
+ hints can result in incorrect execution, including
158
+ forward and backward compatibility.
159
+
160
+ Shape:
161
+ - src: :math:`(S, E)` for unbatched input, :math:`(S, N, E)` if `batch_first=False` or
162
+ `(N, S, E)` if `batch_first=True`.
163
+ - tgt: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or
164
+ `(N, T, E)` if `batch_first=True`.
165
+ - src_mask: :math:`(S, S)` or :math:`(N\cdot\text{num\_heads}, S, S)`.
166
+ - tgt_mask: :math:`(T, T)` or :math:`(N\cdot\text{num\_heads}, T, T)`.
167
+ - memory_mask: :math:`(T, S)`.
168
+ - src_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`.
169
+ - tgt_key_padding_mask: :math:`(T)` for unbatched input otherwise :math:`(N, T)`.
170
+ - memory_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`.
171
+
172
+ Note: [src/tgt/memory]_mask ensures that position i is allowed to attend the unmasked
173
+ positions. If a BoolTensor is provided, positions with ``True``
174
+ are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
175
+ is provided, it will be added to the attention weight.
176
+ [src/tgt/memory]_key_padding_mask provides specified elements in the key to be ignored by
177
+ the attention. If a BoolTensor is provided, the positions with the
178
+ value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
179
+
180
+ - output: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or
181
+ `(N, T, E)` if `batch_first=True`.
182
+
183
+ Note: Due to the multi-head attention architecture in the transformer model,
184
+ the output sequence length of a transformer is same as the input sequence
185
+ (i.e. target) length of the decoder.
186
+
187
+ where S is the source sequence length, T is the target sequence length, N is the
188
+ batch size, E is the feature number
189
+
190
+ Examples:
191
+ >>> # xdoctest: +SKIP
192
+ >>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask)
193
+ """
194
+
195
+ is_batched = src.dim() == 3
196
+ if not self.batch_first and src.size(1) != tgt.size(1) and is_batched:
197
+ raise RuntimeError("the batch number of src and tgt must be equal")
198
+ elif self.batch_first and src.size(0) != tgt.size(0) and is_batched:
199
+ raise RuntimeError("the batch number of src and tgt must be equal")
200
+
201
+ if src.size(-1) != self.d_model or tgt.size(-1) != self.d_model:
202
+ raise RuntimeError("the feature number of src and tgt must be equal to d_model")
203
+
204
+ memory = self.encoder(src, mask=src_mask, src_key_padding_mask=src_key_padding_mask,
205
+ is_causal=src_is_causal)
206
+ output = self.decoder(tgt, memory, tgt_mask=tgt_mask, memory_mask=memory_mask,
207
+ tgt_key_padding_mask=tgt_key_padding_mask,
208
+ memory_key_padding_mask=memory_key_padding_mask,
209
+ tgt_is_causal=tgt_is_causal, memory_is_causal=memory_is_causal)
210
+ return output
211
+
212
+ @staticmethod
213
+ def generate_square_subsequent_mask(
214
+ sz: int,
215
+ device: torch.device = torch.device(torch._C._get_default_device()), # torch.device('cpu'),
216
+ dtype: torch.dtype = torch.get_default_dtype(),
217
+ ) -> Tensor:
218
+ r"""Generate a square causal mask for the sequence. The masked positions are filled with float('-inf').
219
+ Unmasked positions are filled with float(0.0).
220
+ """
221
+ return _generate_square_subsequent_mask(sz, dtype=dtype, device=device)
222
+
223
+ def _reset_parameters(self):
224
+ r"""Initiate parameters in the transformer model."""
225
+
226
+ for p in self.parameters():
227
+ if p.dim() > 1:
228
+ xavier_uniform_(p)
229
+
230
+
231
+ class TransformerEncoder(Module):
232
+ r"""TransformerEncoder is a stack of N encoder layers. Users can build the
233
+ BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
234
+
235
+ Args:
236
+ encoder_layer: an instance of the TransformerEncoderLayer() class (required).
237
+ num_layers: the number of sub-encoder-layers in the encoder (required).
238
+ norm: the layer normalization component (optional).
239
+ enable_nested_tensor: if True, input will automatically convert to nested tensor
240
+ (and convert back on output). This will improve the overall performance of
241
+ TransformerEncoder when padding rate is high. Default: ``True`` (enabled).
242
+
243
+ Examples::
244
+ >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
245
+ >>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
246
+ >>> src = torch.rand(10, 32, 512)
247
+ >>> out = transformer_encoder(src)
248
+ """
249
+ __constants__ = ['norm']
250
+
251
+ def __init__(self, encoder_layer, num_layers, norm=None, enable_nested_tensor=True, mask_check=True):
252
+ super().__init__()
253
+ torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}")
254
+ self.layers = _get_clones(encoder_layer, num_layers)
255
+ self.num_layers = num_layers
256
+ self.norm = norm
257
+ # this attribute saves the value providedat object construction
258
+ self.enable_nested_tensor = enable_nested_tensor
259
+ # this attribute controls whether nested tensors are used
260
+ self.use_nested_tensor = enable_nested_tensor
261
+ self.mask_check = mask_check
262
+
263
+ enc_layer = "encoder_layer"
264
+ why_not_sparsity_fast_path = ''
265
+ if not isinstance(encoder_layer, torch.nn.TransformerEncoderLayer):
266
+ why_not_sparsity_fast_path = f"{enc_layer} was not TransformerEncoderLayer"
267
+ elif encoder_layer.norm_first :
268
+ why_not_sparsity_fast_path = f"{enc_layer}.norm_first was True"
269
+ elif not encoder_layer.self_attn.batch_first:
270
+ why_not_sparsity_fast_path = (f"{enc_layer}.self_attn.batch_first was not True" +
271
+ "(use batch_first for better inference performance)")
272
+ elif not encoder_layer.self_attn._qkv_same_embed_dim:
273
+ why_not_sparsity_fast_path = f"{enc_layer}.self_attn._qkv_same_embed_dim was not True"
274
+ elif not encoder_layer.activation_relu_or_gelu:
275
+ why_not_sparsity_fast_path = f"{enc_layer}.activation_relu_or_gelu was not True"
276
+ elif not (encoder_layer.norm1.eps == encoder_layer.norm2.eps) :
277
+ why_not_sparsity_fast_path = f"{enc_layer}.norm1.eps was not equal to {enc_layer}.norm2.eps"
278
+ elif encoder_layer.self_attn.num_heads % 2 == 1:
279
+ why_not_sparsity_fast_path = f"{enc_layer}.self_attn.num_heads is odd"
280
+
281
+ if enable_nested_tensor and why_not_sparsity_fast_path:
282
+ warnings.warn(f"enable_nested_tensor is True, but self.use_nested_tensor is False because {why_not_sparsity_fast_path}")
283
+ self.use_nested_tensor = False
284
+
285
+
286
+ def forward(
287
+ self,
288
+ src: Tensor,
289
+ mask: Optional[Tensor] = None,
290
+ src_key_padding_mask: Optional[Tensor] = None,
291
+ is_causal: Optional[bool] = None) -> Tensor:
292
+ r"""Pass the input through the encoder layers in turn.
293
+
294
+ Args:
295
+ src: the sequence to the encoder (required).
296
+ mask: the mask for the src sequence (optional).
297
+ src_key_padding_mask: the mask for the src keys per batch (optional).
298
+ is_causal: If specified, applies a causal mask as ``mask``.
299
+ Default: ``None``; try to detect a causal mask.
300
+ Warning:
301
+ ``is_causal`` provides a hint that ``mask`` is the
302
+ causal mask. Providing incorrect hints can result in
303
+ incorrect execution, including forward and backward
304
+ compatibility.
305
+
306
+ Shape:
307
+ see the docs in Transformer class.
308
+ """
309
+ src_key_padding_mask = F._canonical_mask(
310
+ mask=src_key_padding_mask,
311
+ mask_name="src_key_padding_mask",
312
+ other_type=F._none_or_dtype(mask),
313
+ other_name="mask",
314
+ target_type=src.dtype
315
+ )
316
+
317
+ mask = F._canonical_mask(
318
+ mask=mask,
319
+ mask_name="mask",
320
+ other_type=None,
321
+ other_name="",
322
+ target_type=src.dtype,
323
+ check_other=False,
324
+ )
325
+
326
+ output = src
327
+ convert_to_nested = False
328
+ first_layer = self.layers[0]
329
+ src_key_padding_mask_for_layers = src_key_padding_mask
330
+ why_not_sparsity_fast_path = ''
331
+ str_first_layer = "self.layers[0]"
332
+ batch_first = first_layer.self_attn.batch_first
333
+ if not hasattr(self, "use_nested_tensor"):
334
+ why_not_sparsity_fast_path = "use_nested_tensor attribute not present"
335
+ elif not self.use_nested_tensor:
336
+ why_not_sparsity_fast_path = "self.use_nested_tensor (set in init) was not True"
337
+ elif first_layer.training:
338
+ why_not_sparsity_fast_path = f"{str_first_layer} was in training mode"
339
+ elif not src.dim() == 3:
340
+ why_not_sparsity_fast_path = f"input not batched; expected src.dim() of 3 but got {src.dim()}"
341
+ elif src_key_padding_mask is None:
342
+ why_not_sparsity_fast_path = "src_key_padding_mask was None"
343
+ elif (((not hasattr(self, "mask_check")) or self.mask_check)
344
+ and not torch._nested_tensor_from_mask_left_aligned(src, src_key_padding_mask.logical_not())):
345
+ why_not_sparsity_fast_path = "mask_check enabled, and src and src_key_padding_mask was not left aligned"
346
+ elif output.is_nested:
347
+ why_not_sparsity_fast_path = "NestedTensor input is not supported"
348
+ elif mask is not None:
349
+ why_not_sparsity_fast_path = "src_key_padding_mask and mask were both supplied"
350
+ elif torch.is_autocast_enabled():
351
+ why_not_sparsity_fast_path = "autocast is enabled"
352
+
353
+ if not why_not_sparsity_fast_path:
354
+ tensor_args = (
355
+ src,
356
+ first_layer.self_attn.in_proj_weight,
357
+ first_layer.self_attn.in_proj_bias,
358
+ first_layer.self_attn.out_proj.weight,
359
+ first_layer.self_attn.out_proj.bias,
360
+ first_layer.norm1.weight,
361
+ first_layer.norm1.bias,
362
+ first_layer.norm2.weight,
363
+ first_layer.norm2.bias,
364
+ first_layer.linear1.weight,
365
+ first_layer.linear1.bias,
366
+ first_layer.linear2.weight,
367
+ first_layer.linear2.bias,
368
+ )
369
+ _supported_device_type = ["cpu", "cuda", torch.utils.backend_registration._privateuse1_backend_name]
370
+ if torch.overrides.has_torch_function(tensor_args):
371
+ why_not_sparsity_fast_path = "some Tensor argument has_torch_function"
372
+ elif src.device.type not in _supported_device_type:
373
+ why_not_sparsity_fast_path = f"src device is neither one of {_supported_device_type}"
374
+ elif torch.is_grad_enabled() and any(x.requires_grad for x in tensor_args):
375
+ why_not_sparsity_fast_path = ("grad is enabled and at least one of query or the "
376
+ "input/output projection weights or biases requires_grad")
377
+
378
+ if (not why_not_sparsity_fast_path) and (src_key_padding_mask is not None):
379
+ convert_to_nested = True
380
+ output = torch._nested_tensor_from_mask(output, src_key_padding_mask.logical_not(), mask_check=False)
381
+ src_key_padding_mask_for_layers = None
382
+
383
+ seq_len = _get_seq_len(src, batch_first)
384
+ is_causal = _detect_is_causal_mask(mask, is_causal, seq_len)
385
+
386
+ for mod in self.layers:
387
+ output = mod(output, src_mask=mask, is_causal=is_causal, src_key_padding_mask=src_key_padding_mask_for_layers)
388
+
389
+ if convert_to_nested:
390
+ output = output.to_padded_tensor(0., src.size())
391
+
392
+ if self.norm is not None:
393
+ output = self.norm(output)
394
+
395
+ return output
396
+
397
+
398
+ class TransformerDecoder(Module):
399
+ r"""TransformerDecoder is a stack of N decoder layers
400
+
401
+ Args:
402
+ decoder_layer: an instance of the TransformerDecoderLayer() class (required).
403
+ num_layers: the number of sub-decoder-layers in the decoder (required).
404
+ norm: the layer normalization component (optional).
405
+
406
+ Examples::
407
+ >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
408
+ >>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
409
+ >>> memory = torch.rand(10, 32, 512)
410
+ >>> tgt = torch.rand(20, 32, 512)
411
+ >>> out = transformer_decoder(tgt, memory)
412
+ """
413
+ __constants__ = ['norm']
414
+
415
+ def __init__(self, decoder_layer, num_layers, norm=None):
416
+ super().__init__()
417
+ torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}")
418
+ self.layers = _get_clones(decoder_layer, num_layers)
419
+ self.num_layers = num_layers
420
+ self.norm = norm
421
+
422
+ def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None,
423
+ memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None,
424
+ memory_key_padding_mask: Optional[Tensor] = None, tgt_is_causal: Optional[bool] = None,
425
+ memory_is_causal: bool = False) -> Tensor:
426
+ r"""Pass the inputs (and mask) through the decoder layer in turn.
427
+
428
+ Args:
429
+ tgt: the sequence to the decoder (required).
430
+ memory: the sequence from the last layer of the encoder (required).
431
+ tgt_mask: the mask for the tgt sequence (optional).
432
+ memory_mask: the mask for the memory sequence (optional).
433
+ tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
434
+ memory_key_padding_mask: the mask for the memory keys per batch (optional).
435
+ tgt_is_causal: If specified, applies a causal mask as ``tgt mask``.
436
+ Default: ``None``; try to detect a causal mask.
437
+ Warning:
438
+ ``tgt_is_causal`` provides a hint that ``tgt_mask`` is
439
+ the causal mask. Providing incorrect hints can result in
440
+ incorrect execution, including forward and backward
441
+ compatibility.
442
+ memory_is_causal: If specified, applies a causal mask as
443
+ ``memory mask``.
444
+ Default: ``False``.
445
+ Warning:
446
+ ``memory_is_causal`` provides a hint that
447
+ ``memory_mask`` is the causal mask. Providing incorrect
448
+ hints can result in incorrect execution, including
449
+ forward and backward compatibility.
450
+
451
+ Shape:
452
+ see the docs in Transformer class.
453
+ """
454
+ output = tgt
455
+
456
+ seq_len = _get_seq_len(tgt, self.layers[0].self_attn.batch_first)
457
+ tgt_is_causal = _detect_is_causal_mask(tgt_mask, tgt_is_causal, seq_len)
458
+
459
+ for mod in self.layers:
460
+ output = mod(output, memory, tgt_mask=tgt_mask,
461
+ memory_mask=memory_mask,
462
+ tgt_key_padding_mask=tgt_key_padding_mask,
463
+ memory_key_padding_mask=memory_key_padding_mask,
464
+ tgt_is_causal=tgt_is_causal,
465
+ memory_is_causal=memory_is_causal)
466
+
467
+ if self.norm is not None:
468
+ output = self.norm(output)
469
+
470
+ return output
471
+
472
+ class TransformerEncoderLayer(Module):
473
+ r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
474
+ This standard encoder layer is based on the paper "Attention Is All You Need".
475
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
476
+ Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
477
+ Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
478
+ in a different way during application.
479
+
480
+ TransformerEncoderLayer can handle either traditional torch.tensor inputs,
481
+ or Nested Tensor inputs. Derived classes are expected to similarly accept
482
+ both input formats. (Not all combinations of inputs are currently
483
+ supported by TransformerEncoderLayer while Nested Tensor is in prototype
484
+ state.)
485
+
486
+ If you are implementing a custom layer, you may derive it either from
487
+ the Module or TransformerEncoderLayer class. If your custom layer
488
+ supports both torch.Tensors and Nested Tensors inputs, make its
489
+ implementation a derived class of TransformerEncoderLayer. If your custom
490
+ Layer supports only torch.Tensor inputs, derive its implementation from
491
+ Module.
492
+
493
+ Args:
494
+ d_model: the number of expected features in the input (required).
495
+ nhead: the number of heads in the multiheadattention models (required).
496
+ dim_feedforward: the dimension of the feedforward network model (default=2048).
497
+ dropout: the dropout value (default=0.1).
498
+ activation: the activation function of the intermediate layer, can be a string
499
+ ("relu" or "gelu") or a unary callable. Default: relu
500
+ layer_norm_eps: the eps value in layer normalization components (default=1e-5).
501
+ batch_first: If ``True``, then the input and output tensors are provided
502
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
503
+ norm_first: if ``True``, layer norm is done prior to attention and feedforward
504
+ operations, respectively. Otherwise it's done after. Default: ``False`` (after).
505
+ bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive
506
+ bias. Default: ``True``.
507
+
508
+ Examples::
509
+ >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
510
+ >>> src = torch.rand(10, 32, 512)
511
+ >>> out = encoder_layer(src)
512
+
513
+ Alternatively, when ``batch_first`` is ``True``:
514
+ >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)
515
+ >>> src = torch.rand(32, 10, 512)
516
+ >>> out = encoder_layer(src)
517
+
518
+ Fast path:
519
+ forward() will use a special optimized implementation described in
520
+ `FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`_ if all of the following
521
+ conditions are met:
522
+
523
+ - Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor
524
+ argument ``requires_grad``
525
+ - training is disabled (using ``.eval()``)
526
+ - batch_first is ``True`` and the input is batched (i.e., ``src.dim() == 3``)
527
+ - activation is one of: ``"relu"``, ``"gelu"``, ``torch.functional.relu``, or ``torch.functional.gelu``
528
+ - at most one of ``src_mask`` and ``src_key_padding_mask`` is passed
529
+ - if src is a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_, neither ``src_mask``
530
+ nor ``src_key_padding_mask`` is passed
531
+ - the two ``LayerNorm`` instances have a consistent ``eps`` value (this will naturally be the case
532
+ unless the caller has manually modified one without modifying the other)
533
+
534
+ If the optimized implementation is in use, a
535
+ `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be
536
+ passed for ``src`` to represent padding more efficiently than using a padding
537
+ mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ will be
538
+ returned, and an additional speedup proportional to the fraction of the input that
539
+ is padding can be expected.
540
+
541
+ .. _`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`:
542
+ https://arxiv.org/abs/2205.14135
543
+
544
+ """
545
+ __constants__ = ['norm_first']
546
+
547
+ def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1,
548
+ activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
549
+ layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
550
+ bias: bool = True, device=None, dtype=None) -> None:
551
+ factory_kwargs = {'device': device, 'dtype': dtype}
552
+ super().__init__()
553
+ self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout,
554
+ bias=bias, batch_first=batch_first,
555
+ **factory_kwargs)
556
+ # Implementation of Feedforward model
557
+ self.linear1 = Linear(d_model, dim_feedforward, bias=bias, **factory_kwargs)
558
+ self.dropout = Dropout(dropout)
559
+ self.linear2 = Linear(dim_feedforward, d_model, bias=bias, **factory_kwargs)
560
+
561
+ self.norm_first = norm_first
562
+ self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
563
+ self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
564
+ self.dropout1 = Dropout(dropout)
565
+ self.dropout2 = Dropout(dropout)
566
+
567
+ # Legacy string support for activation function.
568
+ if isinstance(activation, str):
569
+ activation = _get_activation_fn(activation)
570
+
571
+ # We can't test self.activation in forward() in TorchScript,
572
+ # so stash some information about it instead.
573
+ if activation is F.relu or isinstance(activation, torch.nn.ReLU):
574
+ self.activation_relu_or_gelu = 1
575
+ elif activation is F.gelu or isinstance(activation, torch.nn.GELU):
576
+ self.activation_relu_or_gelu = 2
577
+ else:
578
+ self.activation_relu_or_gelu = 0
579
+ self.activation = activation
580
+
581
+ def __setstate__(self, state):
582
+ super().__setstate__(state)
583
+ if not hasattr(self, 'activation'):
584
+ self.activation = F.relu
585
+
586
+
587
+ def forward(
588
+ self,
589
+ src: Tensor,
590
+ src_mask: Optional[Tensor] = None,
591
+ src_key_padding_mask: Optional[Tensor] = None,
592
+ is_causal: bool = False) -> Tensor:
593
+ r"""Pass the input through the encoder layer.
594
+
595
+ Args:
596
+ src: the sequence to the encoder layer (required).
597
+ src_mask: the mask for the src sequence (optional).
598
+ src_key_padding_mask: the mask for the src keys per batch (optional).
599
+ is_causal: If specified, applies a causal mask as ``src mask``.
600
+ Default: ``False``.
601
+ Warning:
602
+ ``is_causal`` provides a hint that ``src_mask`` is the
603
+ causal mask. Providing incorrect hints can result in
604
+ incorrect execution, including forward and backward
605
+ compatibility.
606
+
607
+ Shape:
608
+ see the docs in Transformer class.
609
+ """
610
+ src_key_padding_mask = F._canonical_mask(
611
+ mask=src_key_padding_mask,
612
+ mask_name="src_key_padding_mask",
613
+ other_type=F._none_or_dtype(src_mask),
614
+ other_name="src_mask",
615
+ target_type=src.dtype
616
+ )
617
+
618
+ src_mask = F._canonical_mask(
619
+ mask=src_mask,
620
+ mask_name="src_mask",
621
+ other_type=None,
622
+ other_name="",
623
+ target_type=src.dtype,
624
+ check_other=False,
625
+ )
626
+
627
+ # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
628
+ why_not_sparsity_fast_path = ''
629
+ if not src.dim() == 3:
630
+ why_not_sparsity_fast_path = f"input not batched; expected src.dim() of 3 but got {src.dim()}"
631
+ elif self.training:
632
+ why_not_sparsity_fast_path = "training is enabled"
633
+ elif not self.self_attn.batch_first :
634
+ why_not_sparsity_fast_path = "self_attn.batch_first was not True"
635
+ elif not self.self_attn._qkv_same_embed_dim :
636
+ why_not_sparsity_fast_path = "self_attn._qkv_same_embed_dim was not True"
637
+ elif not self.activation_relu_or_gelu:
638
+ why_not_sparsity_fast_path = "activation_relu_or_gelu was not True"
639
+ elif not (self.norm1.eps == self.norm2.eps):
640
+ why_not_sparsity_fast_path = "norm1.eps is not equal to norm2.eps"
641
+ elif src.is_nested and (src_key_padding_mask is not None or src_mask is not None):
642
+ why_not_sparsity_fast_path = "neither src_key_padding_mask nor src_mask are not supported with NestedTensor input"
643
+ elif self.self_attn.num_heads % 2 == 1:
644
+ why_not_sparsity_fast_path = "num_head is odd"
645
+ elif torch.is_autocast_enabled():
646
+ why_not_sparsity_fast_path = "autocast is enabled"
647
+ if not why_not_sparsity_fast_path:
648
+ tensor_args = (
649
+ src,
650
+ self.self_attn.in_proj_weight,
651
+ self.self_attn.in_proj_bias,
652
+ self.self_attn.out_proj.weight,
653
+ self.self_attn.out_proj.bias,
654
+ self.norm1.weight,
655
+ self.norm1.bias,
656
+ self.norm2.weight,
657
+ self.norm2.bias,
658
+ self.linear1.weight,
659
+ self.linear1.bias,
660
+ self.linear2.weight,
661
+ self.linear2.bias,
662
+ )
663
+
664
+ # We have to use list comprehensions below because TorchScript does not support
665
+ # generator expressions.
666
+ _supported_device_type = ["cpu", "cuda", torch.utils.backend_registration._privateuse1_backend_name]
667
+ if torch.overrides.has_torch_function(tensor_args):
668
+ why_not_sparsity_fast_path = "some Tensor argument has_torch_function"
669
+ elif not all((x.device.type in _supported_device_type) for x in tensor_args):
670
+ why_not_sparsity_fast_path = ("some Tensor argument's device is neither one of "
671
+ f"{_supported_device_type}")
672
+ elif torch.is_grad_enabled() and any(x.requires_grad for x in tensor_args):
673
+ why_not_sparsity_fast_path = ("grad is enabled and at least one of query or the "
674
+ "input/output projection weights or biases requires_grad")
675
+
676
+ if not why_not_sparsity_fast_path:
677
+ merged_mask, mask_type = self.self_attn.merge_masks(src_mask, src_key_padding_mask, src)
678
+ return torch._transformer_encoder_layer_fwd(
679
+ src,
680
+ self.self_attn.embed_dim,
681
+ self.self_attn.num_heads,
682
+ self.self_attn.in_proj_weight,
683
+ self.self_attn.in_proj_bias,
684
+ self.self_attn.out_proj.weight,
685
+ self.self_attn.out_proj.bias,
686
+ self.activation_relu_or_gelu == 2,
687
+ self.norm_first,
688
+ self.norm1.eps,
689
+ self.norm1.weight,
690
+ self.norm1.bias,
691
+ self.norm2.weight,
692
+ self.norm2.bias,
693
+ self.linear1.weight,
694
+ self.linear1.bias,
695
+ self.linear2.weight,
696
+ self.linear2.bias,
697
+ merged_mask,
698
+ mask_type,
699
+ )
700
+
701
+
702
+ x = src
703
+ if self.norm_first:
704
+ x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask, is_causal=is_causal)
705
+ x = x + self._ff_block(self.norm2(x))
706
+ else:
707
+ x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask, is_causal=is_causal))
708
+ x = self.norm2(x + self._ff_block(x))
709
+
710
+ return x
711
+
712
+ # self-attention block
713
+ def _sa_block(self, x: Tensor,
714
+ attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], is_causal: bool = False) -> Tensor:
715
+ x = self.self_attn(x, x, x,
716
+ attn_mask=attn_mask,
717
+ key_padding_mask=key_padding_mask,
718
+ need_weights=False, is_causal=is_causal)[0]
719
+ return self.dropout1(x)
720
+
721
+ # feed forward block
722
+ def _ff_block(self, x: Tensor) -> Tensor:
723
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
724
+ return self.dropout2(x)
725
+
726
+
727
+ class TransformerDecoderLayer(Module):
728
+ r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
729
+ This standard decoder layer is based on the paper "Attention Is All You Need".
730
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
731
+ Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
732
+ Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
733
+ in a different way during application.
734
+
735
+ Args:
736
+ d_model: the number of expected features in the input (required).
737
+ nhead: the number of heads in the multiheadattention models (required).
738
+ dim_feedforward: the dimension of the feedforward network model (default=2048).
739
+ dropout: the dropout value (default=0.1).
740
+ activation: the activation function of the intermediate layer, can be a string
741
+ ("relu" or "gelu") or a unary callable. Default: relu
742
+ layer_norm_eps: the eps value in layer normalization components (default=1e-5).
743
+ batch_first: If ``True``, then the input and output tensors are provided
744
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
745
+ norm_first: if ``True``, layer norm is done prior to self attention, multihead
746
+ attention and feedforward operations, respectively. Otherwise it's done after.
747
+ Default: ``False`` (after).
748
+ bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive
749
+ bias. Default: ``True``.
750
+
751
+ Examples::
752
+ >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
753
+ >>> memory = torch.rand(10, 32, 512)
754
+ >>> tgt = torch.rand(20, 32, 512)
755
+ >>> out = decoder_layer(tgt, memory)
756
+
757
+ Alternatively, when ``batch_first`` is ``True``:
758
+ >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=True)
759
+ >>> memory = torch.rand(32, 10, 512)
760
+ >>> tgt = torch.rand(32, 20, 512)
761
+ >>> out = decoder_layer(tgt, memory)
762
+ """
763
+ __constants__ = ['norm_first']
764
+
765
+ def __init__(self, d_model: int, nhead: int, dim_feedforward: int = 2048, dropout: float = 0.1,
766
+ activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
767
+ layer_norm_eps: float = 1e-5, batch_first: bool = False, norm_first: bool = False,
768
+ bias: bool = True, device=None, dtype=None) -> None:
769
+ factory_kwargs = {'device': device, 'dtype': dtype}
770
+ super().__init__()
771
+ self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,
772
+ bias=bias, **factory_kwargs)
773
+ self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first,
774
+ bias=bias, **factory_kwargs)
775
+ # Implementation of Feedforward model
776
+ self.linear1 = Linear(d_model, dim_feedforward, bias=bias, **factory_kwargs)
777
+ self.dropout = Dropout(dropout)
778
+ self.linear2 = Linear(dim_feedforward, d_model, bias=bias, **factory_kwargs)
779
+
780
+ self.norm_first = norm_first
781
+ self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
782
+ self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
783
+ self.norm3 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
784
+ self.dropout1 = Dropout(dropout)
785
+ self.dropout2 = Dropout(dropout)
786
+ self.dropout3 = Dropout(dropout)
787
+
788
+ # Legacy string support for activation function.
789
+ if isinstance(activation, str):
790
+ self.activation = _get_activation_fn(activation)
791
+ else:
792
+ self.activation = activation
793
+
794
+ def __setstate__(self, state):
795
+ if 'activation' not in state:
796
+ state['activation'] = F.relu
797
+ super().__setstate__(state)
798
+
799
+ def forward(
800
+ self,
801
+ tgt: Tensor,
802
+ memory: Tensor,
803
+ tgt_mask: Optional[Tensor] = None,
804
+ memory_mask: Optional[Tensor] = None,
805
+ tgt_key_padding_mask: Optional[Tensor] = None,
806
+ memory_key_padding_mask: Optional[Tensor] = None,
807
+ tgt_is_causal: bool = False,
808
+ memory_is_causal: bool = False,
809
+ ) -> Tensor:
810
+ r"""Pass the inputs (and mask) through the decoder layer.
811
+
812
+ Args:
813
+ tgt: the sequence to the decoder layer (required).
814
+ memory: the sequence from the last layer of the encoder (required).
815
+ tgt_mask: the mask for the tgt sequence (optional).
816
+ memory_mask: the mask for the memory sequence (optional).
817
+ tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
818
+ memory_key_padding_mask: the mask for the memory keys per batch (optional).
819
+ tgt_is_causal: If specified, applies a causal mask as ``tgt mask``.
820
+ Default: ``False``.
821
+ Warning:
822
+ ``tgt_is_causal`` provides a hint that ``tgt_mask`` is
823
+ the causal mask. Providing incorrect hints can result in
824
+ incorrect execution, including forward and backward
825
+ compatibility.
826
+ memory_is_causal: If specified, applies a causal mask as
827
+ ``memory mask``.
828
+ Default: ``False``.
829
+ Warning:
830
+ ``memory_is_causal`` provides a hint that
831
+ ``memory_mask`` is the causal mask. Providing incorrect
832
+ hints can result in incorrect execution, including
833
+ forward and backward compatibility.
834
+
835
+ Shape:
836
+ see the docs in Transformer class.
837
+ """
838
+ # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
839
+
840
+ x = tgt
841
+ if self.norm_first:
842
+ x = x + self._sa_block(self.norm1(x), tgt_mask, tgt_key_padding_mask, tgt_is_causal)
843
+ x = x + self._mha_block(self.norm2(x), memory, memory_mask, memory_key_padding_mask, memory_is_causal)
844
+ x = x + self._ff_block(self.norm3(x))
845
+ else:
846
+ x = self.norm1(x + self._sa_block(x, tgt_mask, tgt_key_padding_mask, tgt_is_causal))
847
+ x = self.norm2(x + self._mha_block(x, memory, memory_mask, memory_key_padding_mask, memory_is_causal))
848
+ x = self.norm3(x + self._ff_block(x))
849
+
850
+ return x
851
+
852
+ # self-attention block
853
+ def _sa_block(self, x: Tensor,
854
+ attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], is_causal: bool = False) -> Tensor:
855
+ x = self.self_attn(x, x, x,
856
+ attn_mask=attn_mask,
857
+ key_padding_mask=key_padding_mask,
858
+ is_causal=is_causal,
859
+ need_weights=False)[0]
860
+ return self.dropout1(x)
861
+
862
+ # multihead attention block
863
+ def _mha_block(self, x: Tensor, mem: Tensor,
864
+ attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], is_causal: bool = False) -> Tensor:
865
+ x = self.multihead_attn(x, mem, mem,
866
+ attn_mask=attn_mask,
867
+ key_padding_mask=key_padding_mask,
868
+ is_causal=is_causal,
869
+ need_weights=False)[0]
870
+ return self.dropout2(x)
871
+
872
+ # feed forward block
873
+ def _ff_block(self, x: Tensor) -> Tensor:
874
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
875
+ return self.dropout3(x)
876
+
877
+
878
+ def _get_clones(module, N):
879
+ # FIXME: copy.deepcopy() is not defined on nn.module
880
+ return ModuleList([copy.deepcopy(module) for i in range(N)])
881
+
882
+
883
+ def _get_activation_fn(activation: str) -> Callable[[Tensor], Tensor]:
884
+ if activation == "relu":
885
+ return F.relu
886
+ elif activation == "gelu":
887
+ return F.gelu
888
+
889
+ raise RuntimeError(f"activation should be relu/gelu, not {activation}")
890
+
891
+
892
+ def _detect_is_causal_mask(
893
+ mask: Optional[Tensor],
894
+ is_causal: Optional[bool] = None,
895
+ size: Optional[int] = None,
896
+ ) -> bool:
897
+ """Return whether the given attention mask is causal.
898
+
899
+ Warning:
900
+ If ``is_causal`` is not ``None``, its value will be returned as is. If a
901
+ user supplies an incorrect ``is_causal`` hint,
902
+
903
+ ``is_causal=False`` when the mask is in fact a causal attention.mask
904
+ may lead to reduced performance relative to what would be achievable
905
+ with ``is_causal=True``;
906
+ ``is_causal=True`` when the mask is in fact not a causal attention.mask
907
+ may lead to incorrect and unpredictable execution - in some scenarios,
908
+ a causal mask may be applied based on the hint, in other execution
909
+ scenarios the specified mask may be used. The choice may not appear
910
+ to be deterministic, in that a number of factors like alignment,
911
+ hardware SKU, etc influence the decision whether to use a mask or
912
+ rely on the hint.
913
+ ``size`` if not None, check whether the mask is a causal mask of the provided size
914
+ Otherwise, checks for any causal mask.
915
+ """
916
+ # Prevent type refinement
917
+ make_causal = (is_causal is True)
918
+
919
+ if is_causal is None and mask is not None:
920
+ sz = size if size is not None else mask.size(-2)
921
+ causal_comparison = _generate_square_subsequent_mask(
922
+ sz, device=mask.device, dtype=mask.dtype)
923
+
924
+ # Do not use `torch.equal` so we handle batched masks by
925
+ # broadcasting the comparison.
926
+ if mask.size() == causal_comparison.size():
927
+ make_causal = bool((mask == causal_comparison).all())
928
+ else:
929
+ make_causal = False
930
+
931
+ return make_causal
mgm/lib/python3.10/site-packages/torch/nn/modules/upsampling.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module
2
+ from .. import functional as F
3
+
4
+ from torch import Tensor
5
+ from typing import Optional
6
+ from ..common_types import _size_2_t, _ratio_2_t, _size_any_t, _ratio_any_t
7
+
8
+ __all__ = ['Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d']
9
+
10
+
11
+ class Upsample(Module):
12
+ r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data.
13
+
14
+ The input data is assumed to be of the form
15
+ `minibatch x channels x [optional depth] x [optional height] x width`.
16
+ Hence, for spatial inputs, we expect a 4D Tensor and for volumetric inputs, we expect a 5D Tensor.
17
+
18
+ The algorithms available for upsampling are nearest neighbor and linear,
19
+ bilinear, bicubic and trilinear for 3D, 4D and 5D input Tensor,
20
+ respectively.
21
+
22
+ One can either give a :attr:`scale_factor` or the target output :attr:`size` to
23
+ calculate the output size. (You cannot give both, as it is ambiguous)
24
+
25
+ Args:
26
+ size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional):
27
+ output spatial sizes
28
+ scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional):
29
+ multiplier for spatial size. Has to match input size if it is a tuple.
30
+ mode (str, optional): the upsampling algorithm: one of ``'nearest'``,
31
+ ``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``.
32
+ Default: ``'nearest'``
33
+ align_corners (bool, optional): if ``True``, the corner pixels of the input
34
+ and output tensors are aligned, and thus preserving the values at
35
+ those pixels. This only has effect when :attr:`mode` is
36
+ ``'linear'``, ``'bilinear'``, ``'bicubic'``, or ``'trilinear'``.
37
+ Default: ``False``
38
+ recompute_scale_factor (bool, optional): recompute the scale_factor for use in the
39
+ interpolation calculation. If `recompute_scale_factor` is ``True``, then
40
+ `scale_factor` must be passed in and `scale_factor` is used to compute the
41
+ output `size`. The computed output `size` will be used to infer new scales for
42
+ the interpolation. Note that when `scale_factor` is floating-point, it may differ
43
+ from the recomputed `scale_factor` due to rounding and precision issues.
44
+ If `recompute_scale_factor` is ``False``, then `size` or `scale_factor` will
45
+ be used directly for interpolation.
46
+
47
+ Shape:
48
+ - Input: :math:`(N, C, W_{in})`, :math:`(N, C, H_{in}, W_{in})` or :math:`(N, C, D_{in}, H_{in}, W_{in})`
49
+ - Output: :math:`(N, C, W_{out})`, :math:`(N, C, H_{out}, W_{out})`
50
+ or :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
51
+
52
+ .. math::
53
+ D_{out} = \left\lfloor D_{in} \times \text{scale\_factor} \right\rfloor
54
+
55
+ .. math::
56
+ H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
57
+
58
+ .. math::
59
+ W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
60
+
61
+ .. warning::
62
+ With ``align_corners = True``, the linearly interpolating modes
63
+ (`linear`, `bilinear`, `bicubic`, and `trilinear`) don't proportionally
64
+ align the output and input pixels, and thus the output values can depend
65
+ on the input size. This was the default behavior for these modes up to
66
+ version 0.3.1. Since then, the default behavior is
67
+ ``align_corners = False``. See below for concrete examples on how this
68
+ affects the outputs.
69
+
70
+ .. note::
71
+ If you want downsampling/general resizing, you should use :func:`~nn.functional.interpolate`.
72
+
73
+ Examples::
74
+
75
+ >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
76
+ >>> input
77
+ tensor([[[[1., 2.],
78
+ [3., 4.]]]])
79
+
80
+ >>> m = nn.Upsample(scale_factor=2, mode='nearest')
81
+ >>> m(input)
82
+ tensor([[[[1., 1., 2., 2.],
83
+ [1., 1., 2., 2.],
84
+ [3., 3., 4., 4.],
85
+ [3., 3., 4., 4.]]]])
86
+
87
+ >>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles")
88
+ >>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False
89
+ >>> m(input)
90
+ tensor([[[[1.0000, 1.2500, 1.7500, 2.0000],
91
+ [1.5000, 1.7500, 2.2500, 2.5000],
92
+ [2.5000, 2.7500, 3.2500, 3.5000],
93
+ [3.0000, 3.2500, 3.7500, 4.0000]]]])
94
+
95
+ >>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
96
+ >>> m(input)
97
+ tensor([[[[1.0000, 1.3333, 1.6667, 2.0000],
98
+ [1.6667, 2.0000, 2.3333, 2.6667],
99
+ [2.3333, 2.6667, 3.0000, 3.3333],
100
+ [3.0000, 3.3333, 3.6667, 4.0000]]]])
101
+
102
+ >>> # Try scaling the same data in a larger tensor
103
+ >>> input_3x3 = torch.zeros(3, 3).view(1, 1, 3, 3)
104
+ >>> input_3x3[:, :, :2, :2].copy_(input)
105
+ tensor([[[[1., 2.],
106
+ [3., 4.]]]])
107
+ >>> input_3x3
108
+ tensor([[[[1., 2., 0.],
109
+ [3., 4., 0.],
110
+ [0., 0., 0.]]]])
111
+
112
+ >>> # xdoctest: +IGNORE_WANT("seems to fail when other tests are run in the same session")
113
+ >>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False
114
+ >>> # Notice that values in top left corner are the same with the small input (except at boundary)
115
+ >>> m(input_3x3)
116
+ tensor([[[[1.0000, 1.2500, 1.7500, 1.5000, 0.5000, 0.0000],
117
+ [1.5000, 1.7500, 2.2500, 1.8750, 0.6250, 0.0000],
118
+ [2.5000, 2.7500, 3.2500, 2.6250, 0.8750, 0.0000],
119
+ [2.2500, 2.4375, 2.8125, 2.2500, 0.7500, 0.0000],
120
+ [0.7500, 0.8125, 0.9375, 0.7500, 0.2500, 0.0000],
121
+ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
122
+
123
+ >>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
124
+ >>> # Notice that values in top left corner are now changed
125
+ >>> m(input_3x3)
126
+ tensor([[[[1.0000, 1.4000, 1.8000, 1.6000, 0.8000, 0.0000],
127
+ [1.8000, 2.2000, 2.6000, 2.2400, 1.1200, 0.0000],
128
+ [2.6000, 3.0000, 3.4000, 2.8800, 1.4400, 0.0000],
129
+ [2.4000, 2.7200, 3.0400, 2.5600, 1.2800, 0.0000],
130
+ [1.2000, 1.3600, 1.5200, 1.2800, 0.6400, 0.0000],
131
+ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
132
+ """
133
+ __constants__ = ['size', 'scale_factor', 'mode', 'align_corners', 'name', 'recompute_scale_factor']
134
+ name: str
135
+ size: Optional[_size_any_t]
136
+ scale_factor: Optional[_ratio_any_t]
137
+ mode: str
138
+ align_corners: Optional[bool]
139
+ recompute_scale_factor: Optional[bool]
140
+
141
+ def __init__(self, size: Optional[_size_any_t] = None, scale_factor: Optional[_ratio_any_t] = None,
142
+ mode: str = 'nearest', align_corners: Optional[bool] = None,
143
+ recompute_scale_factor: Optional[bool] = None) -> None:
144
+ super().__init__()
145
+ self.name = type(self).__name__
146
+ self.size = size
147
+ if isinstance(scale_factor, tuple):
148
+ self.scale_factor = tuple(float(factor) for factor in scale_factor)
149
+ else:
150
+ self.scale_factor = float(scale_factor) if scale_factor else None
151
+ self.mode = mode
152
+ self.align_corners = align_corners
153
+ self.recompute_scale_factor = recompute_scale_factor
154
+
155
+ def forward(self, input: Tensor) -> Tensor:
156
+ return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners,
157
+ recompute_scale_factor=self.recompute_scale_factor)
158
+
159
+ def __setstate__(self, state):
160
+ if 'recompute_scale_factor' not in state:
161
+ state['recompute_scale_factor'] = True
162
+
163
+ super().__setstate__(state)
164
+
165
+ def extra_repr(self) -> str:
166
+ if self.scale_factor is not None:
167
+ info = 'scale_factor=' + repr(self.scale_factor)
168
+ else:
169
+ info = 'size=' + repr(self.size)
170
+ info += ', mode=' + repr(self.mode)
171
+ return info
172
+
173
+
174
+ class UpsamplingNearest2d(Upsample):
175
+ r"""Applies a 2D nearest neighbor upsampling to an input signal composed of several input
176
+ channels.
177
+
178
+ To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
179
+ as it's constructor argument.
180
+
181
+ When :attr:`size` is given, it is the output size of the image `(h, w)`.
182
+
183
+ Args:
184
+ size (int or Tuple[int, int], optional): output spatial sizes
185
+ scale_factor (float or Tuple[float, float], optional): multiplier for
186
+ spatial size.
187
+
188
+ .. warning::
189
+ This class is deprecated in favor of :func:`~nn.functional.interpolate`.
190
+
191
+ Shape:
192
+ - Input: :math:`(N, C, H_{in}, W_{in})`
193
+ - Output: :math:`(N, C, H_{out}, W_{out})` where
194
+
195
+ .. math::
196
+ H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
197
+
198
+ .. math::
199
+ W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
200
+
201
+ Examples::
202
+
203
+ >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
204
+ >>> input
205
+ tensor([[[[1., 2.],
206
+ [3., 4.]]]])
207
+
208
+ >>> m = nn.UpsamplingNearest2d(scale_factor=2)
209
+ >>> m(input)
210
+ tensor([[[[1., 1., 2., 2.],
211
+ [1., 1., 2., 2.],
212
+ [3., 3., 4., 4.],
213
+ [3., 3., 4., 4.]]]])
214
+ """
215
+ def __init__(self, size: Optional[_size_2_t] = None, scale_factor: Optional[_ratio_2_t] = None) -> None:
216
+ super().__init__(size, scale_factor, mode='nearest')
217
+
218
+
219
+ class UpsamplingBilinear2d(Upsample):
220
+ r"""Applies a 2D bilinear upsampling to an input signal composed of several input
221
+ channels.
222
+
223
+ To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
224
+ as it's constructor argument.
225
+
226
+ When :attr:`size` is given, it is the output size of the image `(h, w)`.
227
+
228
+ Args:
229
+ size (int or Tuple[int, int], optional): output spatial sizes
230
+ scale_factor (float or Tuple[float, float], optional): multiplier for
231
+ spatial size.
232
+
233
+ .. warning::
234
+ This class is deprecated in favor of :func:`~nn.functional.interpolate`. It is
235
+ equivalent to ``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``.
236
+
237
+ Shape:
238
+ - Input: :math:`(N, C, H_{in}, W_{in})`
239
+ - Output: :math:`(N, C, H_{out}, W_{out})` where
240
+
241
+ .. math::
242
+ H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
243
+
244
+ .. math::
245
+ W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
246
+
247
+ Examples::
248
+
249
+ >>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
250
+ >>> input
251
+ tensor([[[[1., 2.],
252
+ [3., 4.]]]])
253
+
254
+ >>> # xdoctest: +IGNORE_WANT("do other tests modify the global state?")
255
+ >>> m = nn.UpsamplingBilinear2d(scale_factor=2)
256
+ >>> m(input)
257
+ tensor([[[[1.0000, 1.3333, 1.6667, 2.0000],
258
+ [1.6667, 2.0000, 2.3333, 2.6667],
259
+ [2.3333, 2.6667, 3.0000, 3.3333],
260
+ [3.0000, 3.3333, 3.6667, 4.0000]]]])
261
+ """
262
+ def __init__(self, size: Optional[_size_2_t] = None, scale_factor: Optional[_ratio_2_t] = None) -> None:
263
+ super().__init__(size, scale_factor, mode='bilinear', align_corners=True)
mgm/lib/python3.10/site-packages/torch/nn/parameter.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch._C import _disabled_torch_function_impl
3
+ from collections import OrderedDict
4
+
5
+ # Metaclass to combine _TensorMeta and the instance check override for Parameter.
6
+ class _ParameterMeta(torch._C._TensorMeta):
7
+ # Make `isinstance(t, Parameter)` return True for custom tensor instances that have the _is_param flag.
8
+ def __instancecheck__(self, instance):
9
+ return super().__instancecheck__(instance) or (
10
+ isinstance(instance, torch.Tensor) and getattr(instance, '_is_param', False))
11
+
12
+
13
+ class Parameter(torch.Tensor, metaclass=_ParameterMeta):
14
+ r"""A kind of Tensor that is to be considered a module parameter.
15
+
16
+ Parameters are :class:`~torch.Tensor` subclasses, that have a
17
+ very special property when used with :class:`Module` s - when they're
18
+ assigned as Module attributes they are automatically added to the list of
19
+ its parameters, and will appear e.g. in :meth:`~Module.parameters` iterator.
20
+ Assigning a Tensor doesn't have such effect. This is because one might
21
+ want to cache some temporary state, like last hidden state of the RNN, in
22
+ the model. If there was no such class as :class:`Parameter`, these
23
+ temporaries would get registered too.
24
+
25
+ Args:
26
+ data (Tensor): parameter tensor.
27
+ requires_grad (bool, optional): if the parameter requires gradient. Note that
28
+ the torch.no_grad() context does NOT affect the default behavior of
29
+ Parameter creation--the Parameter will still have `requires_grad=True` in
30
+ :class:`~no_grad` mode. See :ref:`locally-disable-grad-doc` for more
31
+ details. Default: `True`
32
+ """
33
+ def __new__(cls, data=None, requires_grad=True):
34
+ if data is None:
35
+ data = torch.empty(0)
36
+ if type(data) is torch.Tensor or type(data) is Parameter:
37
+ # For ease of BC maintenance, keep this path for standard Tensor.
38
+ # Eventually (tm), we should change the behavior for standard Tensor to match.
39
+ return torch.Tensor._make_subclass(cls, data, requires_grad)
40
+
41
+ # Path for custom tensors: set a flag on the instance to indicate parameter-ness.
42
+ t = data.detach().requires_grad_(requires_grad)
43
+ if type(t) is not type(data):
44
+ raise RuntimeError(f"Creating a Parameter from an instance of type {type(data).__name__} "
45
+ "requires that detach() returns an instance of the same type, but return "
46
+ f"type {type(t).__name__} was found instead. To use the type as a "
47
+ "Parameter, please correct the detach() semantics defined by "
48
+ "its __torch_dispatch__() implementation.")
49
+ t._is_param = True
50
+ return t
51
+
52
+ # Note: the 3 methods below only apply to standard Tensor. Parameters of custom tensor types
53
+ # are still considered that custom tensor type and these methods will not be called for them.
54
+ def __deepcopy__(self, memo):
55
+ if id(self) in memo:
56
+ return memo[id(self)]
57
+ else:
58
+ result = type(self)(self.data.clone(memory_format=torch.preserve_format), self.requires_grad)
59
+ memo[id(self)] = result
60
+ return result
61
+
62
+ def __repr__(self):
63
+ return 'Parameter containing:\n' + super().__repr__()
64
+
65
+ def __reduce_ex__(self, proto):
66
+ state = torch._utils._get_obj_state(self)
67
+
68
+ # See Note [Don't serialize hooks]
69
+ hooks = OrderedDict()
70
+ if not state:
71
+ return (
72
+ torch._utils._rebuild_parameter,
73
+ (self.data, self.requires_grad, hooks)
74
+ )
75
+
76
+ return (
77
+ torch._utils._rebuild_parameter_with_state,
78
+ (self.data, self.requires_grad, hooks, state)
79
+ )
80
+
81
+ __torch_function__ = _disabled_torch_function_impl
82
+
83
+
84
+ class UninitializedTensorMixin:
85
+ _allowed_methods = [
86
+ torch.Tensor.__hash__,
87
+ torch.Tensor.size,
88
+ torch.Tensor.copy_,
89
+ torch.Tensor.is_floating_point,
90
+ torch.Tensor.half,
91
+ torch.Tensor.float,
92
+ torch.Tensor.double,
93
+ torch.Tensor.char,
94
+ torch.Tensor.short,
95
+ torch.Tensor.int,
96
+ torch.Tensor.long,
97
+ torch.Tensor.cuda,
98
+ torch.Tensor.cpu,
99
+ torch.Tensor.to,
100
+ torch.Tensor.get_device,
101
+ torch._has_compatible_shallow_copy_type,
102
+ ]
103
+
104
+ def materialize(self, shape, device=None, dtype=None):
105
+ r"""Create a Parameter or Tensor with the same properties of the uninitialized one.
106
+ Given a shape, it materializes a parameter in the same device
107
+ and with the same `dtype` as the current one or the specified ones in the
108
+ arguments.
109
+
110
+ Args:
111
+ shape : (tuple): the shape for the materialized tensor.
112
+ device (:class:`torch.device`): the desired device of the parameters
113
+ and buffers in this module. Optional.
114
+ dtype (:class:`torch.dtype`): the desired floating point type of
115
+ the floating point parameters and buffers in this module. Optional.
116
+ """
117
+ if device is None:
118
+ device = self.data.device
119
+ if dtype is None:
120
+ dtype = self.data.dtype
121
+ self.data = torch.empty(shape, device=device, dtype=dtype)
122
+ self.__class__ = self.cls_to_become
123
+
124
+ @property
125
+ def shape(self):
126
+ raise RuntimeError(
127
+ 'Can\'t access the shape of an uninitialized parameter or buffer. '
128
+ 'This error usually happens in `load_state_dict` when trying to load '
129
+ 'an uninitialized parameter into an initialized one. '
130
+ 'Call `forward` to initialize the parameters before accessing their attributes.')
131
+
132
+ def share_memory_(self):
133
+ raise RuntimeError(
134
+ 'Can\'t share memory on an uninitialized parameter or buffer. '
135
+ 'Call `forward` to initialize the parameters before calling '
136
+ '`module.share_memory()`.')
137
+
138
+ def __repr__(self):
139
+ return f'<{self.__class__.__name__}>'
140
+
141
+ def __reduce_ex__(self, proto):
142
+ # See Note [Don't serialize hooks]
143
+ return (
144
+ self.__class__,
145
+ (self.requires_grad,)
146
+ )
147
+
148
+ @classmethod
149
+ def __torch_function__(cls, func, types, args=(), kwargs=None):
150
+ # method-wrapper is to detect access to Tensor properties that are
151
+ # wrapped in descriptors
152
+ if func in cls._allowed_methods or func.__class__.__name__ == 'method-wrapper':
153
+ if kwargs is None:
154
+ kwargs = {}
155
+ return super().__torch_function__(func, types, args, kwargs)
156
+ raise ValueError(
157
+ f'Attempted to use an uninitialized parameter in {func}. '
158
+ 'This error happens when you are using a `LazyModule` or '
159
+ f'explicitly manipulating `torch.nn.parameter.{cls.__name__}` '
160
+ 'objects. When using LazyModules Call `forward` with a dummy batch '
161
+ 'to initialize the parameters before calling torch functions')
162
+
163
+
164
+ def is_lazy(param):
165
+ return isinstance(param, UninitializedTensorMixin)
166
+
167
+
168
+ class UninitializedParameter(UninitializedTensorMixin, Parameter):
169
+ r"""A parameter that is not initialized.
170
+
171
+ Uninitialized Parameters are a a special case of :class:`torch.nn.Parameter`
172
+ where the shape of the data is still unknown.
173
+
174
+ Unlike a :class:`torch.nn.Parameter`, uninitialized parameters
175
+ hold no data and attempting to access some properties, like their shape,
176
+ will throw a runtime error. The only operations that can be performed on a uninitialized
177
+ parameter are changing its datatype, moving it to a different device and
178
+ converting it to a regular :class:`torch.nn.Parameter`.
179
+
180
+ The default device or dtype to use when the parameter is materialized can be set
181
+ during construction using e.g. ``device='cuda'``.
182
+ """
183
+
184
+ cls_to_become = Parameter
185
+
186
+ def __new__(cls, requires_grad=True, device=None, dtype=None) -> None:
187
+ factory_kwargs = {'device': device, 'dtype': dtype}
188
+ data = torch.empty(0, **factory_kwargs)
189
+ return torch.Tensor._make_subclass(cls, data, requires_grad)
190
+
191
+ def __deepcopy__(self, memo):
192
+ if id(self) in memo:
193
+ return memo[id(self)]
194
+ else:
195
+ result = type(self)(self.requires_grad, self.data.device, self.data.dtype)
196
+ memo[id(self)] = result
197
+ return result
198
+
199
+ class UninitializedBuffer(UninitializedTensorMixin, torch.Tensor):
200
+ r"""A buffer that is not initialized.
201
+
202
+ Uninitialized Buffer is a a special case of :class:`torch.Tensor`
203
+ where the shape of the data is still unknown.
204
+
205
+ Unlike a :class:`torch.Tensor`, uninitialized parameters
206
+ hold no data and attempting to access some properties, like their shape,
207
+ will throw a runtime error. The only operations that can be performed on a uninitialized
208
+ parameter are changing its datatype, moving it to a different device and
209
+ converting it to a regular :class:`torch.Tensor`.
210
+
211
+ The default device or dtype to use when the buffer is materialized can be set
212
+ during construction using e.g. ``device='cuda'``.
213
+ """
214
+
215
+ cls_to_become = torch.Tensor
216
+
217
+ def __new__(cls, requires_grad=False, device=None, dtype=None) -> None:
218
+ factory_kwargs = {'device': device, 'dtype': dtype}
219
+ data = torch.empty(0, **factory_kwargs)
220
+ return torch.Tensor._make_subclass(cls, data, requires_grad)
mgm/lib/python3.10/site-packages/torch/nn/quantized/__init__.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import dynamic # noqa: F403
2
+ from . import functional # noqa: F403
3
+ from . import modules # noqa: F403
4
+ from .modules import * # noqa: F403
5
+ from .modules import MaxPool2d
6
+
7
+ __all__ = [
8
+ 'BatchNorm2d',
9
+ 'BatchNorm3d',
10
+ 'Conv1d',
11
+ 'Conv2d',
12
+ 'Conv3d',
13
+ 'ConvTranspose1d',
14
+ 'ConvTranspose2d',
15
+ 'ConvTranspose3d',
16
+ 'DeQuantize',
17
+ 'Dropout',
18
+ 'ELU',
19
+ 'Embedding',
20
+ 'EmbeddingBag',
21
+ 'GroupNorm',
22
+ 'Hardswish',
23
+ 'InstanceNorm1d',
24
+ 'InstanceNorm2d',
25
+ 'InstanceNorm3d',
26
+ 'LayerNorm',
27
+ 'LeakyReLU',
28
+ 'Linear',
29
+ 'LSTM',
30
+ 'MultiheadAttention',
31
+ 'PReLU',
32
+ 'Quantize',
33
+ 'ReLU6',
34
+ 'Sigmoid',
35
+ 'Softmax',
36
+ # Wrapper modules
37
+ 'FloatFunctional',
38
+ 'FXFloatFunctional',
39
+ 'QFunctional',
40
+ ]
mgm/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (699 Bytes). View file
 
mgm/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/functional.cpython-310.pyc ADDED
Binary file (435 Bytes). View file
 
mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (202 Bytes). View file
 
mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+
12
+ from torch.ao.nn.quantized.reference.modules.linear import Linear
13
+ from torch.ao.nn.quantized.reference.modules.conv import Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
14
+ from torch.ao.nn.quantized.reference.modules.rnn import RNNCell, LSTMCell, GRUCell, LSTM
15
+ from torch.ao.nn.quantized.reference.modules.sparse import Embedding, EmbeddingBag
16
+
17
+ __all__ = [
18
+ 'Linear',
19
+ 'Conv1d',
20
+ 'Conv2d',
21
+ 'Conv3d',
22
+ 'ConvTranspose1d',
23
+ 'ConvTranspose2d',
24
+ 'ConvTranspose3d',
25
+ 'RNNCell',
26
+ 'LSTMCell',
27
+ 'GRUCell',
28
+ 'LSTM',
29
+ 'Embedding',
30
+ 'EmbeddingBag',
31
+ ]
mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/conv.cpython-310.pyc ADDED
Binary file (871 Bytes). View file
 
mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/linear.cpython-310.pyc ADDED
Binary file (634 Bytes). View file
 
mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (781 Bytes). View file
 
mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/sparse.cpython-310.pyc ADDED
Binary file (672 Bytes). View file
 
mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/utils.cpython-310.pyc ADDED
Binary file (832 Bytes). View file
 
mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/conv.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+
12
+ from torch.ao.nn.quantized.reference.modules.conv import _ConvNd
13
+ from torch.ao.nn.quantized.reference.modules.conv import Conv1d
14
+ from torch.ao.nn.quantized.reference.modules.conv import Conv2d
15
+ from torch.ao.nn.quantized.reference.modules.conv import Conv3d
16
+ from torch.ao.nn.quantized.reference.modules.conv import _ConvTransposeNd
17
+ from torch.ao.nn.quantized.reference.modules.conv import ConvTranspose1d
18
+ from torch.ao.nn.quantized.reference.modules.conv import ConvTranspose2d
19
+ from torch.ao.nn.quantized.reference.modules.conv import ConvTranspose3d
mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/linear.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+
12
+ from torch.ao.nn.quantized.reference.modules.linear import Linear
mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/rnn.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+
12
+ from torch.ao.nn.quantized.reference.modules.rnn import RNNCellBase
13
+ from torch.ao.nn.quantized.reference.modules.rnn import RNNCell
14
+ from torch.ao.nn.quantized.reference.modules.rnn import LSTMCell
15
+ from torch.ao.nn.quantized.reference.modules.rnn import GRUCell
16
+ from torch.ao.nn.quantized.reference.modules.rnn import RNNBase
17
+ from torch.ao.nn.quantized.reference.modules.rnn import LSTM
mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/sparse.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+
12
+ from torch.ao.nn.quantized.reference.modules.sparse import Embedding
13
+ from torch.ao.nn.quantized.reference.modules.sparse import EmbeddingBag
mgm/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/utils.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+ from torch.ao.nn.quantized.reference.modules.utils import _quantize_weight
12
+ from torch.ao.nn.quantized.reference.modules.utils import _quantize_and_dequantize_weight
13
+ from torch.ao.nn.quantized.reference.modules.utils import _save_weight_qparams
14
+ from torch.ao.nn.quantized.reference.modules.utils import _get_weight_qparam_keys
15
+ from torch.ao.nn.quantized.reference.modules.utils import ReferenceQuantizedModule
mgm/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from torch.ao.nn.quantized.dynamic import * # noqa: F403
mgm/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (221 Bytes). View file
 
mgm/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__init__.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Dynamic Modules
3
+
4
+ This file is in the process of migration to `torch/ao/nn/quantized/dynamic`,
5
+ and is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/quantized/dynamic`,
8
+ while adding an import statement here.
9
+ """
10
+
11
+ from torch.ao.nn.quantized.dynamic.modules import conv
12
+ from torch.ao.nn.quantized.dynamic.modules import linear
13
+ from torch.ao.nn.quantized.dynamic.modules import rnn
14
+
15
+ from torch.ao.nn.quantized.dynamic.modules.conv import Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
16
+ from torch.ao.nn.quantized.dynamic.modules.linear import Linear
17
+ from torch.ao.nn.quantized.dynamic.modules.rnn import LSTM, GRU, LSTMCell, RNNCell, GRUCell
18
+
19
+ __all__ = [
20
+ 'Linear',
21
+ 'LSTM',
22
+ 'GRU',
23
+ 'LSTMCell',
24
+ 'RNNCell',
25
+ 'GRUCell',
26
+ 'Conv1d',
27
+ 'Conv2d',
28
+ 'Conv3d',
29
+ 'ConvTranspose1d',
30
+ 'ConvTranspose2d',
31
+ 'ConvTranspose3d',
32
+ ]
mgm/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
mgm/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc ADDED
Binary file (848 Bytes). View file
 
mgm/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/linear.cpython-310.pyc ADDED
Binary file (629 Bytes). View file