Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.xz +3 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl +3 -0
- evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl +3 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc +3 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/__pycache__/_reduction.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/__pycache__/common_types.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/__pycache__/cpp.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/__pycache__/grad.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/__pycache__/init.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/__pycache__/parameter.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/__init__.py +35 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__init__.py +13 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__init__.py +12 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/bn_relu.py +7 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/conv_relu.py +9 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/linear_relu.py +5 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/__init__.py +68 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/batchnorm.py +841 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/distance.py +89 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/dropout.py +294 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/flatten.py +144 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/linear.py +264 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/module.py +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/padding.py +800 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/pooling.py +1229 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/sparse.py +455 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__init__.py +3 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/qat/modules/__pycache__/embedding_ops.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__init__.py +31 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_deprecation_utils.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_named_member_accessor.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_per_sample_grad.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/clip_grad.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/convert_parameters.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/fusion.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/init.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/memory_format.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrizations.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrize.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/prune.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/rnn.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/spectral_norm.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/stateless.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/weight_norm.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/_deprecation_utils.py +45 -0
.gitattributes
CHANGED
|
@@ -602,3 +602,4 @@ evalkit_tf449/lib/python3.10/site-packages/triton/backends/nvidia/lib/cupti/libn
|
|
| 602 |
evalkit_internvl/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_pt_objects.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 603 |
evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_outputs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 604 |
evalkit_internvl/lib/python3.10/site-packages/sympy/parsing/latex/_antlr/__pycache__/latexparser.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 602 |
evalkit_internvl/lib/python3.10/site-packages/transformers/utils/__pycache__/dummy_pt_objects.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 603 |
evalkit_internvl/lib/python3.10/site-packages/transformers/__pycache__/modeling_outputs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 604 |
evalkit_internvl/lib/python3.10/site-packages/sympy/parsing/latex/_antlr/__pycache__/latexparser.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 605 |
+
evalkit_internvl/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.xz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0e9a63dcc7df38ab0a1137a9b44b436b13cebfa300eb19dba4ae4bce50d0fa81
|
| 3 |
+
size 752
|
evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2f29d7f1d2ceca07f10df172c0e826ef08163a14b12c6ef3fa80ec53a5fcdc3c
|
| 3 |
+
size 670
|
evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8a538100e6ae94b16f2ab0f7d92d4d7e7a622be2dfcc0f6b0b73b623bc513ae2
|
| 3 |
+
size 691
|
evalkit_internvl/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0bf83ffe5d957c8c7fcb728e8b1e0bb990dee6f4834d95669fde40e2f348ef4e
|
| 3 |
+
size 137856
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.11 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/__pycache__/_reduction.cpython-310.pyc
ADDED
|
Binary file (1.29 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/__pycache__/common_types.cpython-310.pyc
ADDED
|
Binary file (1.02 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/__pycache__/cpp.cpython-310.pyc
ADDED
|
Binary file (3.45 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/__pycache__/grad.cpython-310.pyc
ADDED
|
Binary file (8.45 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/__pycache__/init.cpython-310.pyc
ADDED
|
Binary file (19.3 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/__pycache__/parameter.cpython-310.pyc
ADDED
|
Binary file (8.98 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/__init__.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.ao.nn.intrinsic import ConvBn1d
|
| 2 |
+
from torch.ao.nn.intrinsic import ConvBn2d
|
| 3 |
+
from torch.ao.nn.intrinsic import ConvBn3d
|
| 4 |
+
from torch.ao.nn.intrinsic import ConvBnReLU1d
|
| 5 |
+
from torch.ao.nn.intrinsic import ConvBnReLU2d
|
| 6 |
+
from torch.ao.nn.intrinsic import ConvBnReLU3d
|
| 7 |
+
from torch.ao.nn.intrinsic import ConvReLU1d
|
| 8 |
+
from torch.ao.nn.intrinsic import ConvReLU2d
|
| 9 |
+
from torch.ao.nn.intrinsic import ConvReLU3d
|
| 10 |
+
from torch.ao.nn.intrinsic import LinearReLU
|
| 11 |
+
from torch.ao.nn.intrinsic import BNReLU2d
|
| 12 |
+
from torch.ao.nn.intrinsic import BNReLU3d
|
| 13 |
+
from torch.ao.nn.intrinsic import LinearBn1d
|
| 14 |
+
from torch.ao.nn.intrinsic.modules.fused import _FusedModule # noqa: F401
|
| 15 |
+
|
| 16 |
+
# Include the subpackages in case user imports from it directly
|
| 17 |
+
from . import modules # noqa: F401
|
| 18 |
+
from . import qat # noqa: F401
|
| 19 |
+
from . import quantized # noqa: F401
|
| 20 |
+
|
| 21 |
+
__all__ = [
|
| 22 |
+
'ConvBn1d',
|
| 23 |
+
'ConvBn2d',
|
| 24 |
+
'ConvBn3d',
|
| 25 |
+
'ConvBnReLU1d',
|
| 26 |
+
'ConvBnReLU2d',
|
| 27 |
+
'ConvBnReLU3d',
|
| 28 |
+
'ConvReLU1d',
|
| 29 |
+
'ConvReLU2d',
|
| 30 |
+
'ConvReLU3d',
|
| 31 |
+
'LinearReLU',
|
| 32 |
+
'BNReLU2d',
|
| 33 |
+
'BNReLU3d',
|
| 34 |
+
'LinearBn1d',
|
| 35 |
+
]
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .modules import * # noqa: F403
|
| 2 |
+
# to ensure customers can use the module below
|
| 3 |
+
# without importing it directly
|
| 4 |
+
import torch.nn.intrinsic.quantized.dynamic
|
| 5 |
+
|
| 6 |
+
__all__ = [
|
| 7 |
+
'BNReLU2d',
|
| 8 |
+
'BNReLU3d',
|
| 9 |
+
'ConvReLU1d',
|
| 10 |
+
'ConvReLU2d',
|
| 11 |
+
'ConvReLU3d',
|
| 12 |
+
'LinearReLU',
|
| 13 |
+
]
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .linear_relu import LinearReLU
|
| 2 |
+
from .conv_relu import ConvReLU1d, ConvReLU2d, ConvReLU3d
|
| 3 |
+
from .bn_relu import BNReLU2d, BNReLU3d
|
| 4 |
+
|
| 5 |
+
__all__ = [
|
| 6 |
+
'LinearReLU',
|
| 7 |
+
'ConvReLU1d',
|
| 8 |
+
'ConvReLU2d',
|
| 9 |
+
'ConvReLU3d',
|
| 10 |
+
'BNReLU2d',
|
| 11 |
+
'BNReLU3d',
|
| 12 |
+
]
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc
ADDED
|
Binary file (325 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc
ADDED
|
Binary file (291 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/bn_relu.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.ao.nn.intrinsic.quantized import BNReLU2d
|
| 2 |
+
from torch.ao.nn.intrinsic.quantized import BNReLU3d
|
| 3 |
+
|
| 4 |
+
__all__ = [
|
| 5 |
+
'BNReLU2d',
|
| 6 |
+
'BNReLU3d',
|
| 7 |
+
]
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/conv_relu.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.ao.nn.intrinsic.quantized import ConvReLU1d
|
| 2 |
+
from torch.ao.nn.intrinsic.quantized import ConvReLU2d
|
| 3 |
+
from torch.ao.nn.intrinsic.quantized import ConvReLU3d
|
| 4 |
+
|
| 5 |
+
__all__ = [
|
| 6 |
+
'ConvReLU1d',
|
| 7 |
+
'ConvReLU2d',
|
| 8 |
+
'ConvReLU3d',
|
| 9 |
+
]
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/linear_relu.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.ao.nn.intrinsic.quantized import LinearReLU
|
| 2 |
+
|
| 3 |
+
__all__ = [
|
| 4 |
+
'LinearReLU',
|
| 5 |
+
]
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/__init__.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .module import Module
|
| 2 |
+
from .linear import Identity, Linear, Bilinear, LazyLinear
|
| 3 |
+
from .conv import Conv1d, Conv2d, Conv3d, \
|
| 4 |
+
ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
|
| 5 |
+
LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
|
| 6 |
+
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
|
| 7 |
+
Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
|
| 8 |
+
Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
|
| 9 |
+
Hardsigmoid, Hardswish, SiLU, Mish
|
| 10 |
+
from .loss import L1Loss, NLLLoss, KLDivLoss, MSELoss, BCELoss, BCEWithLogitsLoss, NLLLoss2d, \
|
| 11 |
+
CosineEmbeddingLoss, CTCLoss, HingeEmbeddingLoss, MarginRankingLoss, \
|
| 12 |
+
MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, SmoothL1Loss, HuberLoss, \
|
| 13 |
+
SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, TripletMarginWithDistanceLoss, PoissonNLLLoss, GaussianNLLLoss
|
| 14 |
+
from .container import Container, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
|
| 15 |
+
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
|
| 16 |
+
MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, FractionalMaxPool3d, LPPool1d, LPPool2d, \
|
| 17 |
+
AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
|
| 18 |
+
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
|
| 19 |
+
LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
|
| 20 |
+
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
|
| 21 |
+
LazyInstanceNorm1d, LazyInstanceNorm2d, LazyInstanceNorm3d
|
| 22 |
+
from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
|
| 23 |
+
from .dropout import Dropout, Dropout1d, Dropout2d, Dropout3d, AlphaDropout, FeatureAlphaDropout
|
| 24 |
+
from .padding import ReflectionPad1d, ReflectionPad2d, ReflectionPad3d, ReplicationPad1d, ReplicationPad2d, \
|
| 25 |
+
ReplicationPad3d, ZeroPad1d, ZeroPad2d, ZeroPad3d, ConstantPad1d, ConstantPad2d, ConstantPad3d, \
|
| 26 |
+
CircularPad1d, CircularPad2d, CircularPad3d
|
| 27 |
+
from .sparse import Embedding, EmbeddingBag
|
| 28 |
+
from .rnn import RNNBase, RNN, LSTM, GRU, \
|
| 29 |
+
RNNCellBase, RNNCell, LSTMCell, GRUCell
|
| 30 |
+
from .pixelshuffle import PixelShuffle, PixelUnshuffle
|
| 31 |
+
from .upsampling import UpsamplingNearest2d, UpsamplingBilinear2d, Upsample
|
| 32 |
+
from .distance import PairwiseDistance, CosineSimilarity
|
| 33 |
+
from .fold import Fold, Unfold
|
| 34 |
+
from .adaptive import AdaptiveLogSoftmaxWithLoss
|
| 35 |
+
from .transformer import TransformerEncoder, TransformerDecoder, \
|
| 36 |
+
TransformerEncoderLayer, TransformerDecoderLayer, Transformer
|
| 37 |
+
from .flatten import Flatten, Unflatten
|
| 38 |
+
from .channelshuffle import ChannelShuffle
|
| 39 |
+
|
| 40 |
+
__all__ = [
|
| 41 |
+
'Module', 'Identity', 'Linear', 'Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d',
|
| 42 |
+
'ConvTranspose2d', 'ConvTranspose3d', 'Threshold', 'ReLU', 'Hardtanh', 'ReLU6',
|
| 43 |
+
'Sigmoid', 'Tanh', 'Softmax', 'Softmax2d', 'LogSoftmax', 'ELU', 'SELU', 'CELU', 'GLU', 'GELU', 'Hardshrink',
|
| 44 |
+
'LeakyReLU', 'LogSigmoid', 'Softplus', 'Softshrink', 'MultiheadAttention', 'PReLU', 'Softsign', 'Softmin',
|
| 45 |
+
'Tanhshrink', 'RReLU', 'L1Loss', 'NLLLoss', 'KLDivLoss', 'MSELoss', 'BCELoss', 'BCEWithLogitsLoss',
|
| 46 |
+
'NLLLoss2d', 'PoissonNLLLoss', 'CosineEmbeddingLoss', 'CTCLoss', 'HingeEmbeddingLoss', 'MarginRankingLoss',
|
| 47 |
+
'MultiLabelMarginLoss', 'MultiLabelSoftMarginLoss', 'MultiMarginLoss', 'SmoothL1Loss', 'GaussianNLLLoss',
|
| 48 |
+
'HuberLoss', 'SoftMarginLoss', 'CrossEntropyLoss', 'Container', 'Sequential', 'ModuleList', 'ModuleDict',
|
| 49 |
+
'ParameterList', 'ParameterDict', 'AvgPool1d', 'AvgPool2d', 'AvgPool3d', 'MaxPool1d', 'MaxPool2d',
|
| 50 |
+
'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d', 'FractionalMaxPool2d', "FractionalMaxPool3d",
|
| 51 |
+
'LPPool1d', 'LPPool2d', 'LocalResponseNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d', 'InstanceNorm1d',
|
| 52 |
+
'InstanceNorm2d', 'InstanceNorm3d', 'LayerNorm', 'GroupNorm', 'SyncBatchNorm',
|
| 53 |
+
'Dropout', 'Dropout1d', 'Dropout2d', 'Dropout3d', 'AlphaDropout', 'FeatureAlphaDropout',
|
| 54 |
+
'ReflectionPad1d', 'ReflectionPad2d', 'ReflectionPad3d', 'ReplicationPad2d', 'ReplicationPad1d', 'ReplicationPad3d',
|
| 55 |
+
'CrossMapLRN2d', 'Embedding', 'EmbeddingBag', 'RNNBase', 'RNN', 'LSTM', 'GRU', 'RNNCellBase', 'RNNCell',
|
| 56 |
+
'LSTMCell', 'GRUCell', 'PixelShuffle', 'PixelUnshuffle', 'Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d',
|
| 57 |
+
'PairwiseDistance', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d', 'AdaptiveAvgPool1d',
|
| 58 |
+
'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d', 'TripletMarginLoss', 'ZeroPad1d', 'ZeroPad2d', 'ZeroPad3d',
|
| 59 |
+
'ConstantPad1d', 'ConstantPad2d', 'ConstantPad3d', 'Bilinear', 'CosineSimilarity', 'Unfold', 'Fold',
|
| 60 |
+
'AdaptiveLogSoftmaxWithLoss', 'TransformerEncoder', 'TransformerDecoder',
|
| 61 |
+
'TransformerEncoderLayer', 'TransformerDecoderLayer', 'Transformer',
|
| 62 |
+
'LazyLinear', 'LazyConv1d', 'LazyConv2d', 'LazyConv3d',
|
| 63 |
+
'LazyConvTranspose1d', 'LazyConvTranspose2d', 'LazyConvTranspose3d',
|
| 64 |
+
'LazyBatchNorm1d', 'LazyBatchNorm2d', 'LazyBatchNorm3d',
|
| 65 |
+
'LazyInstanceNorm1d', 'LazyInstanceNorm2d', 'LazyInstanceNorm3d',
|
| 66 |
+
'Flatten', 'Unflatten', 'Hardsigmoid', 'Hardswish', 'SiLU', 'Mish', 'TripletMarginWithDistanceLoss', 'ChannelShuffle',
|
| 67 |
+
'CircularPad1d', 'CircularPad2d', 'CircularPad3d'
|
| 68 |
+
]
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/batchnorm.py
ADDED
|
@@ -0,0 +1,841 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Any
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch import Tensor
|
| 5 |
+
from torch.nn.parameter import Parameter, UninitializedParameter, UninitializedBuffer
|
| 6 |
+
|
| 7 |
+
from .. import functional as F
|
| 8 |
+
from .. import init
|
| 9 |
+
from ._functions import SyncBatchNorm as sync_batch_norm
|
| 10 |
+
from .lazy import LazyModuleMixin
|
| 11 |
+
from .module import Module
|
| 12 |
+
|
| 13 |
+
__all__ = ['BatchNorm1d', 'LazyBatchNorm1d', 'BatchNorm2d', 'LazyBatchNorm2d', 'BatchNorm3d',
|
| 14 |
+
'LazyBatchNorm3d', 'SyncBatchNorm']
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class _NormBase(Module):
|
| 18 |
+
"""Common base of _InstanceNorm and _BatchNorm"""
|
| 19 |
+
|
| 20 |
+
_version = 2
|
| 21 |
+
__constants__ = ["track_running_stats", "momentum", "eps", "num_features", "affine"]
|
| 22 |
+
num_features: int
|
| 23 |
+
eps: float
|
| 24 |
+
momentum: float
|
| 25 |
+
affine: bool
|
| 26 |
+
track_running_stats: bool
|
| 27 |
+
# WARNING: weight and bias purposely not defined here.
|
| 28 |
+
# See https://github.com/pytorch/pytorch/issues/39670
|
| 29 |
+
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
num_features: int,
|
| 33 |
+
eps: float = 1e-5,
|
| 34 |
+
momentum: float = 0.1,
|
| 35 |
+
affine: bool = True,
|
| 36 |
+
track_running_stats: bool = True,
|
| 37 |
+
device=None,
|
| 38 |
+
dtype=None
|
| 39 |
+
) -> None:
|
| 40 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 41 |
+
super().__init__()
|
| 42 |
+
self.num_features = num_features
|
| 43 |
+
self.eps = eps
|
| 44 |
+
self.momentum = momentum
|
| 45 |
+
self.affine = affine
|
| 46 |
+
self.track_running_stats = track_running_stats
|
| 47 |
+
if self.affine:
|
| 48 |
+
self.weight = Parameter(torch.empty(num_features, **factory_kwargs))
|
| 49 |
+
self.bias = Parameter(torch.empty(num_features, **factory_kwargs))
|
| 50 |
+
else:
|
| 51 |
+
self.register_parameter("weight", None)
|
| 52 |
+
self.register_parameter("bias", None)
|
| 53 |
+
if self.track_running_stats:
|
| 54 |
+
self.register_buffer('running_mean', torch.zeros(num_features, **factory_kwargs))
|
| 55 |
+
self.register_buffer('running_var', torch.ones(num_features, **factory_kwargs))
|
| 56 |
+
self.running_mean: Optional[Tensor]
|
| 57 |
+
self.running_var: Optional[Tensor]
|
| 58 |
+
self.register_buffer('num_batches_tracked',
|
| 59 |
+
torch.tensor(0, dtype=torch.long,
|
| 60 |
+
**{k: v for k, v in factory_kwargs.items() if k != 'dtype'}))
|
| 61 |
+
self.num_batches_tracked: Optional[Tensor]
|
| 62 |
+
else:
|
| 63 |
+
self.register_buffer("running_mean", None)
|
| 64 |
+
self.register_buffer("running_var", None)
|
| 65 |
+
self.register_buffer("num_batches_tracked", None)
|
| 66 |
+
self.reset_parameters()
|
| 67 |
+
|
| 68 |
+
def reset_running_stats(self) -> None:
|
| 69 |
+
if self.track_running_stats:
|
| 70 |
+
# running_mean/running_var/num_batches... are registered at runtime depending
|
| 71 |
+
# if self.track_running_stats is on
|
| 72 |
+
self.running_mean.zero_() # type: ignore[union-attr]
|
| 73 |
+
self.running_var.fill_(1) # type: ignore[union-attr]
|
| 74 |
+
self.num_batches_tracked.zero_() # type: ignore[union-attr,operator]
|
| 75 |
+
|
| 76 |
+
def reset_parameters(self) -> None:
|
| 77 |
+
self.reset_running_stats()
|
| 78 |
+
if self.affine:
|
| 79 |
+
init.ones_(self.weight)
|
| 80 |
+
init.zeros_(self.bias)
|
| 81 |
+
|
| 82 |
+
def _check_input_dim(self, input):
|
| 83 |
+
raise NotImplementedError
|
| 84 |
+
|
| 85 |
+
def extra_repr(self):
|
| 86 |
+
return (
|
| 87 |
+
"{num_features}, eps={eps}, momentum={momentum}, affine={affine}, "
|
| 88 |
+
"track_running_stats={track_running_stats}".format(**self.__dict__)
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
def _load_from_state_dict(
|
| 92 |
+
self,
|
| 93 |
+
state_dict,
|
| 94 |
+
prefix,
|
| 95 |
+
local_metadata,
|
| 96 |
+
strict,
|
| 97 |
+
missing_keys,
|
| 98 |
+
unexpected_keys,
|
| 99 |
+
error_msgs,
|
| 100 |
+
):
|
| 101 |
+
version = local_metadata.get("version", None)
|
| 102 |
+
|
| 103 |
+
if (version is None or version < 2) and self.track_running_stats:
|
| 104 |
+
# at version 2: added num_batches_tracked buffer
|
| 105 |
+
# this should have a default value of 0
|
| 106 |
+
num_batches_tracked_key = prefix + "num_batches_tracked"
|
| 107 |
+
if num_batches_tracked_key not in state_dict:
|
| 108 |
+
state_dict[num_batches_tracked_key] = (
|
| 109 |
+
self.num_batches_tracked
|
| 110 |
+
if self.num_batches_tracked is not None
|
| 111 |
+
else torch.tensor(0, dtype=torch.long)
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
super()._load_from_state_dict(
|
| 115 |
+
state_dict,
|
| 116 |
+
prefix,
|
| 117 |
+
local_metadata,
|
| 118 |
+
strict,
|
| 119 |
+
missing_keys,
|
| 120 |
+
unexpected_keys,
|
| 121 |
+
error_msgs,
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class _BatchNorm(_NormBase):
|
| 126 |
+
def __init__(
|
| 127 |
+
self,
|
| 128 |
+
num_features: int,
|
| 129 |
+
eps: float = 1e-5,
|
| 130 |
+
momentum: float = 0.1,
|
| 131 |
+
affine: bool = True,
|
| 132 |
+
track_running_stats: bool = True,
|
| 133 |
+
device=None,
|
| 134 |
+
dtype=None
|
| 135 |
+
) -> None:
|
| 136 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 137 |
+
super().__init__(
|
| 138 |
+
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 142 |
+
self._check_input_dim(input)
|
| 143 |
+
|
| 144 |
+
# exponential_average_factor is set to self.momentum
|
| 145 |
+
# (when it is available) only so that it gets updated
|
| 146 |
+
# in ONNX graph when this node is exported to ONNX.
|
| 147 |
+
if self.momentum is None:
|
| 148 |
+
exponential_average_factor = 0.0
|
| 149 |
+
else:
|
| 150 |
+
exponential_average_factor = self.momentum
|
| 151 |
+
|
| 152 |
+
if self.training and self.track_running_stats:
|
| 153 |
+
# TODO: if statement only here to tell the jit to skip emitting this when it is None
|
| 154 |
+
if self.num_batches_tracked is not None: # type: ignore[has-type]
|
| 155 |
+
self.num_batches_tracked.add_(1) # type: ignore[has-type]
|
| 156 |
+
if self.momentum is None: # use cumulative moving average
|
| 157 |
+
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
|
| 158 |
+
else: # use exponential moving average
|
| 159 |
+
exponential_average_factor = self.momentum
|
| 160 |
+
|
| 161 |
+
r"""
|
| 162 |
+
Decide whether the mini-batch stats should be used for normalization rather than the buffers.
|
| 163 |
+
Mini-batch stats are used in training mode, and in eval mode when buffers are None.
|
| 164 |
+
"""
|
| 165 |
+
if self.training:
|
| 166 |
+
bn_training = True
|
| 167 |
+
else:
|
| 168 |
+
bn_training = (self.running_mean is None) and (self.running_var is None)
|
| 169 |
+
|
| 170 |
+
r"""
|
| 171 |
+
Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
|
| 172 |
+
passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
|
| 173 |
+
used for normalization (i.e. in eval mode when buffers are not None).
|
| 174 |
+
"""
|
| 175 |
+
return F.batch_norm(
|
| 176 |
+
input,
|
| 177 |
+
# If buffers are not to be tracked, ensure that they won't be updated
|
| 178 |
+
self.running_mean
|
| 179 |
+
if not self.training or self.track_running_stats
|
| 180 |
+
else None,
|
| 181 |
+
self.running_var if not self.training or self.track_running_stats else None,
|
| 182 |
+
self.weight,
|
| 183 |
+
self.bias,
|
| 184 |
+
bn_training,
|
| 185 |
+
exponential_average_factor,
|
| 186 |
+
self.eps,
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class _LazyNormBase(LazyModuleMixin, _NormBase):
|
| 191 |
+
|
| 192 |
+
weight: UninitializedParameter # type: ignore[assignment]
|
| 193 |
+
bias: UninitializedParameter # type: ignore[assignment]
|
| 194 |
+
|
| 195 |
+
def __init__(self, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True,
|
| 196 |
+
device=None, dtype=None) -> None:
|
| 197 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 198 |
+
super().__init__(
|
| 199 |
+
# affine and track_running_stats are hardcoded to False to
|
| 200 |
+
# avoid creating tensors that will soon be overwritten.
|
| 201 |
+
0,
|
| 202 |
+
eps,
|
| 203 |
+
momentum,
|
| 204 |
+
False,
|
| 205 |
+
False,
|
| 206 |
+
**factory_kwargs,
|
| 207 |
+
)
|
| 208 |
+
self.affine = affine
|
| 209 |
+
self.track_running_stats = track_running_stats
|
| 210 |
+
if self.affine:
|
| 211 |
+
self.weight = UninitializedParameter(**factory_kwargs)
|
| 212 |
+
self.bias = UninitializedParameter(**factory_kwargs)
|
| 213 |
+
if self.track_running_stats:
|
| 214 |
+
self.running_mean = UninitializedBuffer(**factory_kwargs)
|
| 215 |
+
self.running_var = UninitializedBuffer(**factory_kwargs)
|
| 216 |
+
self.num_batches_tracked = torch.tensor(
|
| 217 |
+
0, dtype=torch.long, **{k: v for k, v in factory_kwargs.items() if k != 'dtype'})
|
| 218 |
+
|
| 219 |
+
def reset_parameters(self) -> None:
|
| 220 |
+
if not self.has_uninitialized_params() and self.num_features != 0:
|
| 221 |
+
super().reset_parameters()
|
| 222 |
+
|
| 223 |
+
def initialize_parameters(self, input) -> None: # type: ignore[override]
|
| 224 |
+
if self.has_uninitialized_params():
|
| 225 |
+
self.num_features = input.shape[1]
|
| 226 |
+
if self.affine:
|
| 227 |
+
assert isinstance(self.weight, UninitializedParameter)
|
| 228 |
+
assert isinstance(self.bias, UninitializedParameter)
|
| 229 |
+
self.weight.materialize((self.num_features,))
|
| 230 |
+
self.bias.materialize((self.num_features,))
|
| 231 |
+
if self.track_running_stats:
|
| 232 |
+
self.running_mean.materialize((self.num_features,)) # type:ignore[union-attr]
|
| 233 |
+
self.running_var.materialize((self.num_features,)) # type:ignore[union-attr]
|
| 234 |
+
self.reset_parameters()
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
class BatchNorm1d(_BatchNorm):
|
| 238 |
+
r"""Applies Batch Normalization over a 2D or 3D input as described in the paper
|
| 239 |
+
`Batch Normalization: Accelerating Deep Network Training by Reducing
|
| 240 |
+
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
|
| 241 |
+
|
| 242 |
+
.. math::
|
| 243 |
+
|
| 244 |
+
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
|
| 245 |
+
|
| 246 |
+
The mean and standard-deviation are calculated per-dimension over
|
| 247 |
+
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
|
| 248 |
+
of size `C` (where `C` is the number of features or channels of the input). By default, the
|
| 249 |
+
elements of :math:`\gamma` are set to 1 and the elements of :math:`\beta` are set to 0.
|
| 250 |
+
At train time in the forward pass, the standard-deviation is calculated via the biased estimator,
|
| 251 |
+
equivalent to ``torch.var(input, unbiased=False)``. However, the value stored in the
|
| 252 |
+
moving average of the standard-deviation is calculated via the unbiased estimator, equivalent to
|
| 253 |
+
``torch.var(input, unbiased=True)``.
|
| 254 |
+
|
| 255 |
+
Also by default, during training this layer keeps running estimates of its
|
| 256 |
+
computed mean and variance, which are then used for normalization during
|
| 257 |
+
evaluation. The running estimates are kept with a default :attr:`momentum`
|
| 258 |
+
of 0.1.
|
| 259 |
+
|
| 260 |
+
If :attr:`track_running_stats` is set to ``False``, this layer then does not
|
| 261 |
+
keep running estimates, and batch statistics are instead used during
|
| 262 |
+
evaluation time as well.
|
| 263 |
+
|
| 264 |
+
.. note::
|
| 265 |
+
This :attr:`momentum` argument is different from one used in optimizer
|
| 266 |
+
classes and the conventional notion of momentum. Mathematically, the
|
| 267 |
+
update rule for running statistics here is
|
| 268 |
+
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
|
| 269 |
+
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
|
| 270 |
+
new observed value.
|
| 271 |
+
|
| 272 |
+
Because the Batch Normalization is done over the `C` dimension, computing statistics
|
| 273 |
+
on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization.
|
| 274 |
+
|
| 275 |
+
Args:
|
| 276 |
+
num_features: number of features or channels :math:`C` of the input
|
| 277 |
+
eps: a value added to the denominator for numerical stability.
|
| 278 |
+
Default: 1e-5
|
| 279 |
+
momentum: the value used for the running_mean and running_var
|
| 280 |
+
computation. Can be set to ``None`` for cumulative moving average
|
| 281 |
+
(i.e. simple average). Default: 0.1
|
| 282 |
+
affine: a boolean value that when set to ``True``, this module has
|
| 283 |
+
learnable affine parameters. Default: ``True``
|
| 284 |
+
track_running_stats: a boolean value that when set to ``True``, this
|
| 285 |
+
module tracks the running mean and variance, and when set to ``False``,
|
| 286 |
+
this module does not track such statistics, and initializes statistics
|
| 287 |
+
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
|
| 288 |
+
When these buffers are ``None``, this module always uses batch statistics.
|
| 289 |
+
in both training and eval modes. Default: ``True``
|
| 290 |
+
|
| 291 |
+
Shape:
|
| 292 |
+
- Input: :math:`(N, C)` or :math:`(N, C, L)`, where :math:`N` is the batch size,
|
| 293 |
+
:math:`C` is the number of features or channels, and :math:`L` is the sequence length
|
| 294 |
+
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
|
| 295 |
+
|
| 296 |
+
Examples::
|
| 297 |
+
|
| 298 |
+
>>> # With Learnable Parameters
|
| 299 |
+
>>> m = nn.BatchNorm1d(100)
|
| 300 |
+
>>> # Without Learnable Parameters
|
| 301 |
+
>>> m = nn.BatchNorm1d(100, affine=False)
|
| 302 |
+
>>> input = torch.randn(20, 100)
|
| 303 |
+
>>> output = m(input)
|
| 304 |
+
"""
|
| 305 |
+
|
| 306 |
+
def _check_input_dim(self, input):
|
| 307 |
+
if input.dim() != 2 and input.dim() != 3:
|
| 308 |
+
raise ValueError(
|
| 309 |
+
f"expected 2D or 3D input (got {input.dim()}D input)"
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
class LazyBatchNorm1d(_LazyNormBase, _BatchNorm):
|
| 314 |
+
r"""A :class:`torch.nn.BatchNorm1d` module with lazy initialization of
|
| 315 |
+
the ``num_features`` argument of the :class:`BatchNorm1d` that is inferred
|
| 316 |
+
from the ``input.size(1)``.
|
| 317 |
+
The attributes that will be lazily initialized are `weight`, `bias`,
|
| 318 |
+
`running_mean` and `running_var`.
|
| 319 |
+
|
| 320 |
+
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
|
| 321 |
+
on lazy modules and their limitations.
|
| 322 |
+
|
| 323 |
+
Args:
|
| 324 |
+
eps: a value added to the denominator for numerical stability.
|
| 325 |
+
Default: 1e-5
|
| 326 |
+
momentum: the value used for the running_mean and running_var
|
| 327 |
+
computation. Can be set to ``None`` for cumulative moving average
|
| 328 |
+
(i.e. simple average). Default: 0.1
|
| 329 |
+
affine: a boolean value that when set to ``True``, this module has
|
| 330 |
+
learnable affine parameters. Default: ``True``
|
| 331 |
+
track_running_stats: a boolean value that when set to ``True``, this
|
| 332 |
+
module tracks the running mean and variance, and when set to ``False``,
|
| 333 |
+
this module does not track such statistics, and initializes statistics
|
| 334 |
+
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
|
| 335 |
+
When these buffers are ``None``, this module always uses batch statistics.
|
| 336 |
+
in both training and eval modes. Default: ``True``
|
| 337 |
+
"""
|
| 338 |
+
|
| 339 |
+
cls_to_become = BatchNorm1d # type: ignore[assignment]
|
| 340 |
+
|
| 341 |
+
def _check_input_dim(self, input):
|
| 342 |
+
if input.dim() != 2 and input.dim() != 3:
|
| 343 |
+
raise ValueError(
|
| 344 |
+
f"expected 2D or 3D input (got {input.dim()}D input)"
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
class BatchNorm2d(_BatchNorm):
|
| 349 |
+
r"""Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs
|
| 350 |
+
with additional channel dimension) as described in the paper
|
| 351 |
+
`Batch Normalization: Accelerating Deep Network Training by Reducing
|
| 352 |
+
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
|
| 353 |
+
|
| 354 |
+
.. math::
|
| 355 |
+
|
| 356 |
+
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
|
| 357 |
+
|
| 358 |
+
The mean and standard-deviation are calculated per-dimension over
|
| 359 |
+
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
|
| 360 |
+
of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set
|
| 361 |
+
to 1 and the elements of :math:`\beta` are set to 0. At train time in the forward pass, the
|
| 362 |
+
standard-deviation is calculated via the biased estimator, equivalent to
|
| 363 |
+
``torch.var(input, unbiased=False)``. However, the value stored in the moving average of the
|
| 364 |
+
standard-deviation is calculated via the unbiased estimator, equivalent to
|
| 365 |
+
``torch.var(input, unbiased=True)``.
|
| 366 |
+
|
| 367 |
+
Also by default, during training this layer keeps running estimates of its
|
| 368 |
+
computed mean and variance, which are then used for normalization during
|
| 369 |
+
evaluation. The running estimates are kept with a default :attr:`momentum`
|
| 370 |
+
of 0.1.
|
| 371 |
+
|
| 372 |
+
If :attr:`track_running_stats` is set to ``False``, this layer then does not
|
| 373 |
+
keep running estimates, and batch statistics are instead used during
|
| 374 |
+
evaluation time as well.
|
| 375 |
+
|
| 376 |
+
.. note::
|
| 377 |
+
This :attr:`momentum` argument is different from one used in optimizer
|
| 378 |
+
classes and the conventional notion of momentum. Mathematically, the
|
| 379 |
+
update rule for running statistics here is
|
| 380 |
+
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
|
| 381 |
+
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
|
| 382 |
+
new observed value.
|
| 383 |
+
|
| 384 |
+
Because the Batch Normalization is done over the `C` dimension, computing statistics
|
| 385 |
+
on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization.
|
| 386 |
+
|
| 387 |
+
Args:
|
| 388 |
+
num_features: :math:`C` from an expected input of size
|
| 389 |
+
:math:`(N, C, H, W)`
|
| 390 |
+
eps: a value added to the denominator for numerical stability.
|
| 391 |
+
Default: 1e-5
|
| 392 |
+
momentum: the value used for the running_mean and running_var
|
| 393 |
+
computation. Can be set to ``None`` for cumulative moving average
|
| 394 |
+
(i.e. simple average). Default: 0.1
|
| 395 |
+
affine: a boolean value that when set to ``True``, this module has
|
| 396 |
+
learnable affine parameters. Default: ``True``
|
| 397 |
+
track_running_stats: a boolean value that when set to ``True``, this
|
| 398 |
+
module tracks the running mean and variance, and when set to ``False``,
|
| 399 |
+
this module does not track such statistics, and initializes statistics
|
| 400 |
+
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
|
| 401 |
+
When these buffers are ``None``, this module always uses batch statistics.
|
| 402 |
+
in both training and eval modes. Default: ``True``
|
| 403 |
+
|
| 404 |
+
Shape:
|
| 405 |
+
- Input: :math:`(N, C, H, W)`
|
| 406 |
+
- Output: :math:`(N, C, H, W)` (same shape as input)
|
| 407 |
+
|
| 408 |
+
Examples::
|
| 409 |
+
|
| 410 |
+
>>> # With Learnable Parameters
|
| 411 |
+
>>> m = nn.BatchNorm2d(100)
|
| 412 |
+
>>> # Without Learnable Parameters
|
| 413 |
+
>>> m = nn.BatchNorm2d(100, affine=False)
|
| 414 |
+
>>> input = torch.randn(20, 100, 35, 45)
|
| 415 |
+
>>> output = m(input)
|
| 416 |
+
"""
|
| 417 |
+
|
| 418 |
+
def _check_input_dim(self, input):
|
| 419 |
+
if input.dim() != 4:
|
| 420 |
+
raise ValueError(f"expected 4D input (got {input.dim()}D input)")
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
class LazyBatchNorm2d(_LazyNormBase, _BatchNorm):
|
| 424 |
+
r"""A :class:`torch.nn.BatchNorm2d` module with lazy initialization of
|
| 425 |
+
the ``num_features`` argument of the :class:`BatchNorm2d` that is inferred
|
| 426 |
+
from the ``input.size(1)``.
|
| 427 |
+
The attributes that will be lazily initialized are `weight`, `bias`,
|
| 428 |
+
`running_mean` and `running_var`.
|
| 429 |
+
|
| 430 |
+
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
|
| 431 |
+
on lazy modules and their limitations.
|
| 432 |
+
|
| 433 |
+
Args:
|
| 434 |
+
eps: a value added to the denominator for numerical stability.
|
| 435 |
+
Default: 1e-5
|
| 436 |
+
momentum: the value used for the running_mean and running_var
|
| 437 |
+
computation. Can be set to ``None`` for cumulative moving average
|
| 438 |
+
(i.e. simple average). Default: 0.1
|
| 439 |
+
affine: a boolean value that when set to ``True``, this module has
|
| 440 |
+
learnable affine parameters. Default: ``True``
|
| 441 |
+
track_running_stats: a boolean value that when set to ``True``, this
|
| 442 |
+
module tracks the running mean and variance, and when set to ``False``,
|
| 443 |
+
this module does not track such statistics, and initializes statistics
|
| 444 |
+
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
|
| 445 |
+
When these buffers are ``None``, this module always uses batch statistics.
|
| 446 |
+
in both training and eval modes. Default: ``True``
|
| 447 |
+
"""
|
| 448 |
+
|
| 449 |
+
cls_to_become = BatchNorm2d # type: ignore[assignment]
|
| 450 |
+
|
| 451 |
+
def _check_input_dim(self, input):
|
| 452 |
+
if input.dim() != 4:
|
| 453 |
+
raise ValueError(f"expected 4D input (got {input.dim()}D input)")
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
class BatchNorm3d(_BatchNorm):
|
| 457 |
+
r"""Applies Batch Normalization over a 5D input (a mini-batch of 3D inputs
|
| 458 |
+
with additional channel dimension) as described in the paper
|
| 459 |
+
`Batch Normalization: Accelerating Deep Network Training by Reducing
|
| 460 |
+
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
|
| 461 |
+
|
| 462 |
+
.. math::
|
| 463 |
+
|
| 464 |
+
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
|
| 465 |
+
|
| 466 |
+
The mean and standard-deviation are calculated per-dimension over
|
| 467 |
+
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
|
| 468 |
+
of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set
|
| 469 |
+
to 1 and the elements of :math:`\beta` are set to 0. At train time in the forward pass, the
|
| 470 |
+
standard-deviation is calculated via the biased estimator, equivalent to
|
| 471 |
+
``torch.var(input, unbiased=False)``. However, the value stored in the moving average of the
|
| 472 |
+
standard-deviation is calculated via the unbiased estimator, equivalent to
|
| 473 |
+
``torch.var(input, unbiased=True)``.
|
| 474 |
+
|
| 475 |
+
Also by default, during training this layer keeps running estimates of its
|
| 476 |
+
computed mean and variance, which are then used for normalization during
|
| 477 |
+
evaluation. The running estimates are kept with a default :attr:`momentum`
|
| 478 |
+
of 0.1.
|
| 479 |
+
|
| 480 |
+
If :attr:`track_running_stats` is set to ``False``, this layer then does not
|
| 481 |
+
keep running estimates, and batch statistics are instead used during
|
| 482 |
+
evaluation time as well.
|
| 483 |
+
|
| 484 |
+
.. note::
|
| 485 |
+
This :attr:`momentum` argument is different from one used in optimizer
|
| 486 |
+
classes and the conventional notion of momentum. Mathematically, the
|
| 487 |
+
update rule for running statistics here is
|
| 488 |
+
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
|
| 489 |
+
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
|
| 490 |
+
new observed value.
|
| 491 |
+
|
| 492 |
+
Because the Batch Normalization is done over the `C` dimension, computing statistics
|
| 493 |
+
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric Batch Normalization
|
| 494 |
+
or Spatio-temporal Batch Normalization.
|
| 495 |
+
|
| 496 |
+
Args:
|
| 497 |
+
num_features: :math:`C` from an expected input of size
|
| 498 |
+
:math:`(N, C, D, H, W)`
|
| 499 |
+
eps: a value added to the denominator for numerical stability.
|
| 500 |
+
Default: 1e-5
|
| 501 |
+
momentum: the value used for the running_mean and running_var
|
| 502 |
+
computation. Can be set to ``None`` for cumulative moving average
|
| 503 |
+
(i.e. simple average). Default: 0.1
|
| 504 |
+
affine: a boolean value that when set to ``True``, this module has
|
| 505 |
+
learnable affine parameters. Default: ``True``
|
| 506 |
+
track_running_stats: a boolean value that when set to ``True``, this
|
| 507 |
+
module tracks the running mean and variance, and when set to ``False``,
|
| 508 |
+
this module does not track such statistics, and initializes statistics
|
| 509 |
+
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
|
| 510 |
+
When these buffers are ``None``, this module always uses batch statistics.
|
| 511 |
+
in both training and eval modes. Default: ``True``
|
| 512 |
+
|
| 513 |
+
Shape:
|
| 514 |
+
- Input: :math:`(N, C, D, H, W)`
|
| 515 |
+
- Output: :math:`(N, C, D, H, W)` (same shape as input)
|
| 516 |
+
|
| 517 |
+
Examples::
|
| 518 |
+
|
| 519 |
+
>>> # With Learnable Parameters
|
| 520 |
+
>>> m = nn.BatchNorm3d(100)
|
| 521 |
+
>>> # Without Learnable Parameters
|
| 522 |
+
>>> m = nn.BatchNorm3d(100, affine=False)
|
| 523 |
+
>>> input = torch.randn(20, 100, 35, 45, 10)
|
| 524 |
+
>>> output = m(input)
|
| 525 |
+
"""
|
| 526 |
+
|
| 527 |
+
def _check_input_dim(self, input):
|
| 528 |
+
if input.dim() != 5:
|
| 529 |
+
raise ValueError(f"expected 5D input (got {input.dim()}D input)")
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
class LazyBatchNorm3d(_LazyNormBase, _BatchNorm):
|
| 533 |
+
r"""A :class:`torch.nn.BatchNorm3d` module with lazy initialization of
|
| 534 |
+
the ``num_features`` argument of the :class:`BatchNorm3d` that is inferred
|
| 535 |
+
from the ``input.size(1)``.
|
| 536 |
+
The attributes that will be lazily initialized are `weight`, `bias`,
|
| 537 |
+
`running_mean` and `running_var`.
|
| 538 |
+
|
| 539 |
+
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
|
| 540 |
+
on lazy modules and their limitations.
|
| 541 |
+
|
| 542 |
+
Args:
|
| 543 |
+
eps: a value added to the denominator for numerical stability.
|
| 544 |
+
Default: 1e-5
|
| 545 |
+
momentum: the value used for the running_mean and running_var
|
| 546 |
+
computation. Can be set to ``None`` for cumulative moving average
|
| 547 |
+
(i.e. simple average). Default: 0.1
|
| 548 |
+
affine: a boolean value that when set to ``True``, this module has
|
| 549 |
+
learnable affine parameters. Default: ``True``
|
| 550 |
+
track_running_stats: a boolean value that when set to ``True``, this
|
| 551 |
+
module tracks the running mean and variance, and when set to ``False``,
|
| 552 |
+
this module does not track such statistics, and initializes statistics
|
| 553 |
+
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
|
| 554 |
+
When these buffers are ``None``, this module always uses batch statistics.
|
| 555 |
+
in both training and eval modes. Default: ``True``
|
| 556 |
+
"""
|
| 557 |
+
|
| 558 |
+
cls_to_become = BatchNorm3d # type: ignore[assignment]
|
| 559 |
+
|
| 560 |
+
def _check_input_dim(self, input):
|
| 561 |
+
if input.dim() != 5:
|
| 562 |
+
raise ValueError(f"expected 5D input (got {input.dim()}D input)")
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
class SyncBatchNorm(_BatchNorm):
|
| 566 |
+
r"""Applies Batch Normalization over a N-Dimensional input (a mini-batch of [N-2]D inputs
|
| 567 |
+
with additional channel dimension) as described in the paper
|
| 568 |
+
`Batch Normalization: Accelerating Deep Network Training by Reducing
|
| 569 |
+
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
|
| 570 |
+
|
| 571 |
+
.. math::
|
| 572 |
+
|
| 573 |
+
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
|
| 574 |
+
|
| 575 |
+
The mean and standard-deviation are calculated per-dimension over all
|
| 576 |
+
mini-batches of the same process groups. :math:`\gamma` and :math:`\beta`
|
| 577 |
+
are learnable parameter vectors of size `C` (where `C` is the input size).
|
| 578 |
+
By default, the elements of :math:`\gamma` are sampled from
|
| 579 |
+
:math:`\mathcal{U}(0, 1)` and the elements of :math:`\beta` are set to 0.
|
| 580 |
+
The standard-deviation is calculated via the biased estimator, equivalent to
|
| 581 |
+
`torch.var(input, unbiased=False)`.
|
| 582 |
+
|
| 583 |
+
Also by default, during training this layer keeps running estimates of its
|
| 584 |
+
computed mean and variance, which are then used for normalization during
|
| 585 |
+
evaluation. The running estimates are kept with a default :attr:`momentum`
|
| 586 |
+
of 0.1.
|
| 587 |
+
|
| 588 |
+
If :attr:`track_running_stats` is set to ``False``, this layer then does not
|
| 589 |
+
keep running estimates, and batch statistics are instead used during
|
| 590 |
+
evaluation time as well.
|
| 591 |
+
|
| 592 |
+
.. note::
|
| 593 |
+
This :attr:`momentum` argument is different from one used in optimizer
|
| 594 |
+
classes and the conventional notion of momentum. Mathematically, the
|
| 595 |
+
update rule for running statistics here is
|
| 596 |
+
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
|
| 597 |
+
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
|
| 598 |
+
new observed value.
|
| 599 |
+
|
| 600 |
+
Because the Batch Normalization is done for each channel in the ``C`` dimension, computing
|
| 601 |
+
statistics on ``(N, +)`` slices, it's common terminology to call this Volumetric Batch
|
| 602 |
+
Normalization or Spatio-temporal Batch Normalization.
|
| 603 |
+
|
| 604 |
+
Currently :class:`SyncBatchNorm` only supports
|
| 605 |
+
:class:`~torch.nn.DistributedDataParallel` (DDP) with single GPU per process. Use
|
| 606 |
+
:meth:`torch.nn.SyncBatchNorm.convert_sync_batchnorm()` to convert
|
| 607 |
+
:attr:`BatchNorm*D` layer to :class:`SyncBatchNorm` before wrapping
|
| 608 |
+
Network with DDP.
|
| 609 |
+
|
| 610 |
+
Args:
|
| 611 |
+
num_features: :math:`C` from an expected input of size
|
| 612 |
+
:math:`(N, C, +)`
|
| 613 |
+
eps: a value added to the denominator for numerical stability.
|
| 614 |
+
Default: ``1e-5``
|
| 615 |
+
momentum: the value used for the running_mean and running_var
|
| 616 |
+
computation. Can be set to ``None`` for cumulative moving average
|
| 617 |
+
(i.e. simple average). Default: 0.1
|
| 618 |
+
affine: a boolean value that when set to ``True``, this module has
|
| 619 |
+
learnable affine parameters. Default: ``True``
|
| 620 |
+
track_running_stats: a boolean value that when set to ``True``, this
|
| 621 |
+
module tracks the running mean and variance, and when set to ``False``,
|
| 622 |
+
this module does not track such statistics, and initializes statistics
|
| 623 |
+
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
|
| 624 |
+
When these buffers are ``None``, this module always uses batch statistics.
|
| 625 |
+
in both training and eval modes. Default: ``True``
|
| 626 |
+
process_group: synchronization of stats happen within each process group
|
| 627 |
+
individually. Default behavior is synchronization across the whole
|
| 628 |
+
world
|
| 629 |
+
|
| 630 |
+
Shape:
|
| 631 |
+
- Input: :math:`(N, C, +)`
|
| 632 |
+
- Output: :math:`(N, C, +)` (same shape as input)
|
| 633 |
+
|
| 634 |
+
.. note::
|
| 635 |
+
Synchronization of batchnorm statistics occurs only while training, i.e.
|
| 636 |
+
synchronization is disabled when ``model.eval()`` is set or if
|
| 637 |
+
``self.training`` is otherwise ``False``.
|
| 638 |
+
|
| 639 |
+
Examples::
|
| 640 |
+
|
| 641 |
+
>>> # xdoctest: +SKIP
|
| 642 |
+
>>> # With Learnable Parameters
|
| 643 |
+
>>> m = nn.SyncBatchNorm(100)
|
| 644 |
+
>>> # creating process group (optional)
|
| 645 |
+
>>> # ranks is a list of int identifying rank ids.
|
| 646 |
+
>>> ranks = list(range(8))
|
| 647 |
+
>>> r1, r2 = ranks[:4], ranks[4:]
|
| 648 |
+
>>> # Note: every rank calls into new_group for every
|
| 649 |
+
>>> # process group created, even if that rank is not
|
| 650 |
+
>>> # part of the group.
|
| 651 |
+
>>> process_groups = [torch.distributed.new_group(pids) for pids in [r1, r2]]
|
| 652 |
+
>>> process_group = process_groups[0 if dist.get_rank() <= 3 else 1]
|
| 653 |
+
>>> # Without Learnable Parameters
|
| 654 |
+
>>> m = nn.BatchNorm3d(100, affine=False, process_group=process_group)
|
| 655 |
+
>>> input = torch.randn(20, 100, 35, 45, 10)
|
| 656 |
+
>>> output = m(input)
|
| 657 |
+
|
| 658 |
+
>>> # network is nn.BatchNorm layer
|
| 659 |
+
>>> sync_bn_network = nn.SyncBatchNorm.convert_sync_batchnorm(network, process_group)
|
| 660 |
+
>>> # only single gpu per process is currently supported
|
| 661 |
+
>>> ddp_sync_bn_network = torch.nn.parallel.DistributedDataParallel(
|
| 662 |
+
>>> sync_bn_network,
|
| 663 |
+
>>> device_ids=[args.local_rank],
|
| 664 |
+
>>> output_device=args.local_rank)
|
| 665 |
+
"""
|
| 666 |
+
|
| 667 |
+
def __init__(
|
| 668 |
+
self,
|
| 669 |
+
num_features: int,
|
| 670 |
+
eps: float = 1e-5,
|
| 671 |
+
momentum: float = 0.1,
|
| 672 |
+
affine: bool = True,
|
| 673 |
+
track_running_stats: bool = True,
|
| 674 |
+
process_group: Optional[Any] = None,
|
| 675 |
+
device=None,
|
| 676 |
+
dtype=None
|
| 677 |
+
) -> None:
|
| 678 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 679 |
+
super().__init__(
|
| 680 |
+
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
|
| 681 |
+
)
|
| 682 |
+
self.process_group = process_group
|
| 683 |
+
|
| 684 |
+
def _check_input_dim(self, input):
|
| 685 |
+
if input.dim() < 2:
|
| 686 |
+
raise ValueError(
|
| 687 |
+
f"expected at least 2D input (got {input.dim()}D input)"
|
| 688 |
+
)
|
| 689 |
+
|
| 690 |
+
def _check_non_zero_input_channels(self, input):
|
| 691 |
+
if input.size(1) == 0:
|
| 692 |
+
raise ValueError(
|
| 693 |
+
"SyncBatchNorm number of input channels should be non-zero"
|
| 694 |
+
)
|
| 695 |
+
|
| 696 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 697 |
+
self._check_input_dim(input)
|
| 698 |
+
self._check_non_zero_input_channels(input)
|
| 699 |
+
|
| 700 |
+
# exponential_average_factor is set to self.momentum
|
| 701 |
+
# (when it is available) only so that it gets updated
|
| 702 |
+
# in ONNX graph when this node is exported to ONNX.
|
| 703 |
+
if self.momentum is None:
|
| 704 |
+
exponential_average_factor = 0.0
|
| 705 |
+
else:
|
| 706 |
+
exponential_average_factor = self.momentum
|
| 707 |
+
|
| 708 |
+
if self.training and self.track_running_stats:
|
| 709 |
+
assert self.num_batches_tracked is not None
|
| 710 |
+
self.num_batches_tracked.add_(1)
|
| 711 |
+
if self.momentum is None: # use cumulative moving average
|
| 712 |
+
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
|
| 713 |
+
else: # use exponential moving average
|
| 714 |
+
exponential_average_factor = self.momentum
|
| 715 |
+
|
| 716 |
+
r"""
|
| 717 |
+
Decide whether the mini-batch stats should be used for normalization rather than the buffers.
|
| 718 |
+
Mini-batch stats are used in training mode, and in eval mode when buffers are None.
|
| 719 |
+
"""
|
| 720 |
+
if self.training:
|
| 721 |
+
bn_training = True
|
| 722 |
+
else:
|
| 723 |
+
bn_training = (self.running_mean is None) and (self.running_var is None)
|
| 724 |
+
|
| 725 |
+
r"""
|
| 726 |
+
Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
|
| 727 |
+
passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
|
| 728 |
+
used for normalization (i.e. in eval mode when buffers are not None).
|
| 729 |
+
"""
|
| 730 |
+
# If buffers are not to be tracked, ensure that they won't be updated
|
| 731 |
+
running_mean = (
|
| 732 |
+
self.running_mean if not self.training or self.track_running_stats else None
|
| 733 |
+
)
|
| 734 |
+
running_var = (
|
| 735 |
+
self.running_var if not self.training or self.track_running_stats else None
|
| 736 |
+
)
|
| 737 |
+
|
| 738 |
+
# Don't sync batchnorm stats in inference mode (model.eval()).
|
| 739 |
+
need_sync = (bn_training and self.training and
|
| 740 |
+
torch.distributed.is_available() and torch.distributed.is_initialized())
|
| 741 |
+
if need_sync:
|
| 742 |
+
# currently only GPU/PrivateUse1 input is supported
|
| 743 |
+
if input.device.type not in ["cuda", torch._C._get_privateuse1_backend_name()]:
|
| 744 |
+
raise ValueError("SyncBatchNorm expected input tensor to be on GPU or "
|
| 745 |
+
f"{torch._C._get_privateuse1_backend_name()}")
|
| 746 |
+
|
| 747 |
+
process_group = torch.distributed.group.WORLD
|
| 748 |
+
if self.process_group:
|
| 749 |
+
process_group = self.process_group
|
| 750 |
+
world_size = torch.distributed.get_world_size(process_group)
|
| 751 |
+
need_sync = world_size > 1
|
| 752 |
+
|
| 753 |
+
# fallback to framework BN when synchronization is not necessary
|
| 754 |
+
if not need_sync:
|
| 755 |
+
return F.batch_norm(
|
| 756 |
+
input,
|
| 757 |
+
running_mean,
|
| 758 |
+
running_var,
|
| 759 |
+
self.weight,
|
| 760 |
+
self.bias,
|
| 761 |
+
bn_training,
|
| 762 |
+
exponential_average_factor,
|
| 763 |
+
self.eps,
|
| 764 |
+
)
|
| 765 |
+
else:
|
| 766 |
+
assert bn_training
|
| 767 |
+
return sync_batch_norm.apply(
|
| 768 |
+
input,
|
| 769 |
+
self.weight,
|
| 770 |
+
self.bias,
|
| 771 |
+
running_mean,
|
| 772 |
+
running_var,
|
| 773 |
+
self.eps,
|
| 774 |
+
exponential_average_factor,
|
| 775 |
+
process_group,
|
| 776 |
+
world_size,
|
| 777 |
+
)
|
| 778 |
+
|
| 779 |
+
@classmethod
|
| 780 |
+
def convert_sync_batchnorm(cls, module, process_group=None):
|
| 781 |
+
r"""Helper function to convert all :attr:`BatchNorm*D` layers in the model to
|
| 782 |
+
:class:`torch.nn.SyncBatchNorm` layers.
|
| 783 |
+
|
| 784 |
+
Args:
|
| 785 |
+
module (nn.Module): module containing one or more :attr:`BatchNorm*D` layers
|
| 786 |
+
process_group (optional): process group to scope synchronization,
|
| 787 |
+
default is the whole world
|
| 788 |
+
|
| 789 |
+
Returns:
|
| 790 |
+
The original :attr:`module` with the converted :class:`torch.nn.SyncBatchNorm`
|
| 791 |
+
layers. If the original :attr:`module` is a :attr:`BatchNorm*D` layer,
|
| 792 |
+
a new :class:`torch.nn.SyncBatchNorm` layer object will be returned
|
| 793 |
+
instead.
|
| 794 |
+
|
| 795 |
+
Example::
|
| 796 |
+
|
| 797 |
+
>>> # Network with nn.BatchNorm layer
|
| 798 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
|
| 799 |
+
>>> module = torch.nn.Sequential(
|
| 800 |
+
>>> torch.nn.Linear(20, 100),
|
| 801 |
+
>>> torch.nn.BatchNorm1d(100),
|
| 802 |
+
>>> ).cuda()
|
| 803 |
+
>>> # creating process group (optional)
|
| 804 |
+
>>> # ranks is a list of int identifying rank ids.
|
| 805 |
+
>>> ranks = list(range(8))
|
| 806 |
+
>>> r1, r2 = ranks[:4], ranks[4:]
|
| 807 |
+
>>> # Note: every rank calls into new_group for every
|
| 808 |
+
>>> # process group created, even if that rank is not
|
| 809 |
+
>>> # part of the group.
|
| 810 |
+
>>> # xdoctest: +SKIP("distributed")
|
| 811 |
+
>>> process_groups = [torch.distributed.new_group(pids) for pids in [r1, r2]]
|
| 812 |
+
>>> process_group = process_groups[0 if dist.get_rank() <= 3 else 1]
|
| 813 |
+
>>> sync_bn_module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
|
| 814 |
+
|
| 815 |
+
"""
|
| 816 |
+
module_output = module
|
| 817 |
+
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
|
| 818 |
+
module_output = torch.nn.SyncBatchNorm(
|
| 819 |
+
module.num_features,
|
| 820 |
+
module.eps,
|
| 821 |
+
module.momentum,
|
| 822 |
+
module.affine,
|
| 823 |
+
module.track_running_stats,
|
| 824 |
+
process_group,
|
| 825 |
+
)
|
| 826 |
+
if module.affine:
|
| 827 |
+
with torch.no_grad():
|
| 828 |
+
module_output.weight = module.weight
|
| 829 |
+
module_output.bias = module.bias
|
| 830 |
+
module_output.running_mean = module.running_mean
|
| 831 |
+
module_output.running_var = module.running_var
|
| 832 |
+
module_output.num_batches_tracked = module.num_batches_tracked
|
| 833 |
+
module_output.training = module.training
|
| 834 |
+
if hasattr(module, "qconfig"):
|
| 835 |
+
module_output.qconfig = module.qconfig
|
| 836 |
+
for name, child in module.named_children():
|
| 837 |
+
module_output.add_module(
|
| 838 |
+
name, cls.convert_sync_batchnorm(child, process_group)
|
| 839 |
+
)
|
| 840 |
+
del module
|
| 841 |
+
return module_output
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/distance.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .module import Module
|
| 2 |
+
from .. import functional as F
|
| 3 |
+
|
| 4 |
+
from torch import Tensor
|
| 5 |
+
|
| 6 |
+
__all__ = ['PairwiseDistance', 'CosineSimilarity']
|
| 7 |
+
|
| 8 |
+
class PairwiseDistance(Module):
|
| 9 |
+
r"""
|
| 10 |
+
Computes the pairwise distance between input vectors, or between columns of input matrices.
|
| 11 |
+
|
| 12 |
+
Distances are computed using ``p``-norm, with constant ``eps`` added to avoid division by zero
|
| 13 |
+
if ``p`` is negative, i.e.:
|
| 14 |
+
|
| 15 |
+
.. math ::
|
| 16 |
+
\mathrm{dist}\left(x, y\right) = \left\Vert x-y + \epsilon e \right\Vert_p,
|
| 17 |
+
|
| 18 |
+
where :math:`e` is the vector of ones and the ``p``-norm is given by.
|
| 19 |
+
|
| 20 |
+
.. math ::
|
| 21 |
+
\Vert x \Vert _p = \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}.
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
p (real, optional): the norm degree. Can be negative. Default: 2
|
| 25 |
+
eps (float, optional): Small value to avoid division by zero.
|
| 26 |
+
Default: 1e-6
|
| 27 |
+
keepdim (bool, optional): Determines whether or not to keep the vector dimension.
|
| 28 |
+
Default: False
|
| 29 |
+
Shape:
|
| 30 |
+
- Input1: :math:`(N, D)` or :math:`(D)` where `N = batch dimension` and `D = vector dimension`
|
| 31 |
+
- Input2: :math:`(N, D)` or :math:`(D)`, same shape as the Input1
|
| 32 |
+
- Output: :math:`(N)` or :math:`()` based on input dimension.
|
| 33 |
+
If :attr:`keepdim` is ``True``, then :math:`(N, 1)` or :math:`(1)` based on input dimension.
|
| 34 |
+
|
| 35 |
+
Examples::
|
| 36 |
+
>>> pdist = nn.PairwiseDistance(p=2)
|
| 37 |
+
>>> input1 = torch.randn(100, 128)
|
| 38 |
+
>>> input2 = torch.randn(100, 128)
|
| 39 |
+
>>> output = pdist(input1, input2)
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
__constants__ = ['norm', 'eps', 'keepdim']
|
| 43 |
+
norm: float
|
| 44 |
+
eps: float
|
| 45 |
+
keepdim: bool
|
| 46 |
+
|
| 47 |
+
def __init__(self, p: float = 2., eps: float = 1e-6, keepdim: bool = False) -> None:
|
| 48 |
+
super().__init__()
|
| 49 |
+
self.norm = p
|
| 50 |
+
self.eps = eps
|
| 51 |
+
self.keepdim = keepdim
|
| 52 |
+
|
| 53 |
+
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
|
| 54 |
+
return F.pairwise_distance(x1, x2, self.norm, self.eps, self.keepdim)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class CosineSimilarity(Module):
|
| 58 |
+
r"""Returns cosine similarity between :math:`x_1` and :math:`x_2`, computed along `dim`.
|
| 59 |
+
|
| 60 |
+
.. math ::
|
| 61 |
+
\text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
dim (int, optional): Dimension where cosine similarity is computed. Default: 1
|
| 65 |
+
eps (float, optional): Small value to avoid division by zero.
|
| 66 |
+
Default: 1e-8
|
| 67 |
+
Shape:
|
| 68 |
+
- Input1: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`
|
| 69 |
+
- Input2: :math:`(\ast_1, D, \ast_2)`, same number of dimensions as x1, matching x1 size at dimension `dim`,
|
| 70 |
+
and broadcastable with x1 at other dimensions.
|
| 71 |
+
- Output: :math:`(\ast_1, \ast_2)`
|
| 72 |
+
Examples::
|
| 73 |
+
>>> input1 = torch.randn(100, 128)
|
| 74 |
+
>>> input2 = torch.randn(100, 128)
|
| 75 |
+
>>> cos = nn.CosineSimilarity(dim=1, eps=1e-6)
|
| 76 |
+
>>> output = cos(input1, input2)
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
__constants__ = ['dim', 'eps']
|
| 80 |
+
dim: int
|
| 81 |
+
eps: float
|
| 82 |
+
|
| 83 |
+
def __init__(self, dim: int = 1, eps: float = 1e-8) -> None:
|
| 84 |
+
super().__init__()
|
| 85 |
+
self.dim = dim
|
| 86 |
+
self.eps = eps
|
| 87 |
+
|
| 88 |
+
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
|
| 89 |
+
return F.cosine_similarity(x1, x2, self.dim, self.eps)
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/dropout.py
ADDED
|
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .module import Module
|
| 2 |
+
from .. import functional as F
|
| 3 |
+
|
| 4 |
+
from torch import Tensor
|
| 5 |
+
|
| 6 |
+
__all__ = ['Dropout', 'Dropout1d', 'Dropout2d', 'Dropout3d', 'AlphaDropout', 'FeatureAlphaDropout']
|
| 7 |
+
|
| 8 |
+
class _DropoutNd(Module):
|
| 9 |
+
__constants__ = ['p', 'inplace']
|
| 10 |
+
p: float
|
| 11 |
+
inplace: bool
|
| 12 |
+
|
| 13 |
+
def __init__(self, p: float = 0.5, inplace: bool = False) -> None:
|
| 14 |
+
super().__init__()
|
| 15 |
+
if p < 0 or p > 1:
|
| 16 |
+
raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}")
|
| 17 |
+
self.p = p
|
| 18 |
+
self.inplace = inplace
|
| 19 |
+
|
| 20 |
+
def extra_repr(self) -> str:
|
| 21 |
+
return f'p={self.p}, inplace={self.inplace}'
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class Dropout(_DropoutNd):
|
| 25 |
+
r"""During training, randomly zeroes some of the elements of the input tensor with probability :attr:`p`.
|
| 26 |
+
|
| 27 |
+
The zeroed elements are chosen independently for each forward call and are sampled from a Bernoulli distribution.
|
| 28 |
+
|
| 29 |
+
Each channel will be zeroed out independently on every forward call.
|
| 30 |
+
|
| 31 |
+
This has proven to be an effective technique for regularization and
|
| 32 |
+
preventing the co-adaptation of neurons as described in the paper
|
| 33 |
+
`Improving neural networks by preventing co-adaptation of feature
|
| 34 |
+
detectors`_ .
|
| 35 |
+
|
| 36 |
+
Furthermore, the outputs are scaled by a factor of :math:`\frac{1}{1-p}` during
|
| 37 |
+
training. This means that during evaluation the module simply computes an
|
| 38 |
+
identity function.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
p: probability of an element to be zeroed. Default: 0.5
|
| 42 |
+
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
|
| 43 |
+
|
| 44 |
+
Shape:
|
| 45 |
+
- Input: :math:`(*)`. Input can be of any shape
|
| 46 |
+
- Output: :math:`(*)`. Output is of the same shape as input
|
| 47 |
+
|
| 48 |
+
Examples::
|
| 49 |
+
|
| 50 |
+
>>> m = nn.Dropout(p=0.2)
|
| 51 |
+
>>> input = torch.randn(20, 16)
|
| 52 |
+
>>> output = m(input)
|
| 53 |
+
|
| 54 |
+
.. _Improving neural networks by preventing co-adaptation of feature
|
| 55 |
+
detectors: https://arxiv.org/abs/1207.0580
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 59 |
+
return F.dropout(input, self.p, self.training, self.inplace)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class Dropout1d(_DropoutNd):
|
| 63 |
+
r"""Randomly zero out entire channels.
|
| 64 |
+
|
| 65 |
+
A channel is a 1D feature map,
|
| 66 |
+
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
|
| 67 |
+
batched input is a 1D tensor :math:`\text{input}[i, j]`.
|
| 68 |
+
|
| 69 |
+
Each channel will be zeroed out independently on every forward call with
|
| 70 |
+
probability :attr:`p` using samples from a Bernoulli distribution.
|
| 71 |
+
|
| 72 |
+
Usually the input comes from :class:`nn.Conv1d` modules.
|
| 73 |
+
|
| 74 |
+
As described in the paper
|
| 75 |
+
`Efficient Object Localization Using Convolutional Networks`_ ,
|
| 76 |
+
if adjacent pixels within feature maps are strongly correlated
|
| 77 |
+
(as is normally the case in early convolution layers) then i.i.d. dropout
|
| 78 |
+
will not regularize the activations and will otherwise just result
|
| 79 |
+
in an effective learning rate decrease.
|
| 80 |
+
|
| 81 |
+
In this case, :func:`nn.Dropout1d` will help promote independence between
|
| 82 |
+
feature maps and should be used instead.
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
p (float, optional): probability of an element to be zero-ed.
|
| 86 |
+
inplace (bool, optional): If set to ``True``, will do this operation
|
| 87 |
+
in-place
|
| 88 |
+
|
| 89 |
+
Shape:
|
| 90 |
+
- Input: :math:`(N, C, L)` or :math:`(C, L)`.
|
| 91 |
+
- Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input).
|
| 92 |
+
|
| 93 |
+
Examples::
|
| 94 |
+
|
| 95 |
+
>>> m = nn.Dropout1d(p=0.2)
|
| 96 |
+
>>> input = torch.randn(20, 16, 32)
|
| 97 |
+
>>> output = m(input)
|
| 98 |
+
|
| 99 |
+
.. _Efficient Object Localization Using Convolutional Networks:
|
| 100 |
+
https://arxiv.org/abs/1411.4280
|
| 101 |
+
"""
|
| 102 |
+
|
| 103 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 104 |
+
return F.dropout1d(input, self.p, self.training, self.inplace)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class Dropout2d(_DropoutNd):
|
| 108 |
+
r"""Randomly zero out entire channels.
|
| 109 |
+
|
| 110 |
+
A channel is a 2D feature map,
|
| 111 |
+
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
|
| 112 |
+
batched input is a 2D tensor :math:`\text{input}[i, j]`.
|
| 113 |
+
|
| 114 |
+
Each channel will be zeroed out independently on every forward call with
|
| 115 |
+
probability :attr:`p` using samples from a Bernoulli distribution.
|
| 116 |
+
|
| 117 |
+
Usually the input comes from :class:`nn.Conv2d` modules.
|
| 118 |
+
|
| 119 |
+
As described in the paper
|
| 120 |
+
`Efficient Object Localization Using Convolutional Networks`_ ,
|
| 121 |
+
if adjacent pixels within feature maps are strongly correlated
|
| 122 |
+
(as is normally the case in early convolution layers) then i.i.d. dropout
|
| 123 |
+
will not regularize the activations and will otherwise just result
|
| 124 |
+
in an effective learning rate decrease.
|
| 125 |
+
|
| 126 |
+
In this case, :func:`nn.Dropout2d` will help promote independence between
|
| 127 |
+
feature maps and should be used instead.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
p (float, optional): probability of an element to be zero-ed.
|
| 131 |
+
inplace (bool, optional): If set to ``True``, will do this operation
|
| 132 |
+
in-place
|
| 133 |
+
|
| 134 |
+
.. warning ::
|
| 135 |
+
Due to historical reasons, this class will perform 1D channel-wise dropout
|
| 136 |
+
for 3D inputs (as done by :class:`nn.Dropout1d`). Thus, it currently does NOT
|
| 137 |
+
support inputs without a batch dimension of shape :math:`(C, H, W)`. This
|
| 138 |
+
behavior will change in a future release to interpret 3D inputs as no-batch-dim
|
| 139 |
+
inputs. To maintain the old behavior, switch to :class:`nn.Dropout1d`.
|
| 140 |
+
|
| 141 |
+
Shape:
|
| 142 |
+
- Input: :math:`(N, C, H, W)` or :math:`(N, C, L)`.
|
| 143 |
+
- Output: :math:`(N, C, H, W)` or :math:`(N, C, L)` (same shape as input).
|
| 144 |
+
|
| 145 |
+
Examples::
|
| 146 |
+
|
| 147 |
+
>>> m = nn.Dropout2d(p=0.2)
|
| 148 |
+
>>> input = torch.randn(20, 16, 32, 32)
|
| 149 |
+
>>> output = m(input)
|
| 150 |
+
|
| 151 |
+
.. _Efficient Object Localization Using Convolutional Networks:
|
| 152 |
+
https://arxiv.org/abs/1411.4280
|
| 153 |
+
"""
|
| 154 |
+
|
| 155 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 156 |
+
return F.dropout2d(input, self.p, self.training, self.inplace)
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
class Dropout3d(_DropoutNd):
|
| 160 |
+
r"""Randomly zero out entire channels.
|
| 161 |
+
|
| 162 |
+
A channel is a 3D feature map,
|
| 163 |
+
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
|
| 164 |
+
batched input is a 3D tensor :math:`\text{input}[i, j]`.
|
| 165 |
+
|
| 166 |
+
Each channel will be zeroed out independently on every forward call with
|
| 167 |
+
probability :attr:`p` using samples from a Bernoulli distribution.
|
| 168 |
+
|
| 169 |
+
Usually the input comes from :class:`nn.Conv3d` modules.
|
| 170 |
+
|
| 171 |
+
As described in the paper
|
| 172 |
+
`Efficient Object Localization Using Convolutional Networks`_ ,
|
| 173 |
+
if adjacent pixels within feature maps are strongly correlated
|
| 174 |
+
(as is normally the case in early convolution layers) then i.i.d. dropout
|
| 175 |
+
will not regularize the activations and will otherwise just result
|
| 176 |
+
in an effective learning rate decrease.
|
| 177 |
+
|
| 178 |
+
In this case, :func:`nn.Dropout3d` will help promote independence between
|
| 179 |
+
feature maps and should be used instead.
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
p (float, optional): probability of an element to be zeroed.
|
| 183 |
+
inplace (bool, optional): If set to ``True``, will do this operation
|
| 184 |
+
in-place
|
| 185 |
+
|
| 186 |
+
Shape:
|
| 187 |
+
- Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`.
|
| 188 |
+
- Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input).
|
| 189 |
+
|
| 190 |
+
Examples::
|
| 191 |
+
|
| 192 |
+
>>> m = nn.Dropout3d(p=0.2)
|
| 193 |
+
>>> input = torch.randn(20, 16, 4, 32, 32)
|
| 194 |
+
>>> output = m(input)
|
| 195 |
+
|
| 196 |
+
.. _Efficient Object Localization Using Convolutional Networks:
|
| 197 |
+
https://arxiv.org/abs/1411.4280
|
| 198 |
+
"""
|
| 199 |
+
|
| 200 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 201 |
+
return F.dropout3d(input, self.p, self.training, self.inplace)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
class AlphaDropout(_DropoutNd):
|
| 205 |
+
r"""Applies Alpha Dropout over the input.
|
| 206 |
+
|
| 207 |
+
Alpha Dropout is a type of Dropout that maintains the self-normalizing
|
| 208 |
+
property.
|
| 209 |
+
For an input with zero mean and unit standard deviation, the output of
|
| 210 |
+
Alpha Dropout maintains the original mean and standard deviation of the
|
| 211 |
+
input.
|
| 212 |
+
Alpha Dropout goes hand-in-hand with SELU activation function, which ensures
|
| 213 |
+
that the outputs have zero mean and unit standard deviation.
|
| 214 |
+
|
| 215 |
+
During training, it randomly masks some of the elements of the input
|
| 216 |
+
tensor with probability *p* using samples from a bernoulli distribution.
|
| 217 |
+
The elements to masked are randomized on every forward call, and scaled
|
| 218 |
+
and shifted to maintain zero mean and unit standard deviation.
|
| 219 |
+
|
| 220 |
+
During evaluation the module simply computes an identity function.
|
| 221 |
+
|
| 222 |
+
More details can be found in the paper `Self-Normalizing Neural Networks`_ .
|
| 223 |
+
|
| 224 |
+
Args:
|
| 225 |
+
p (float): probability of an element to be dropped. Default: 0.5
|
| 226 |
+
inplace (bool, optional): If set to ``True``, will do this operation
|
| 227 |
+
in-place
|
| 228 |
+
|
| 229 |
+
Shape:
|
| 230 |
+
- Input: :math:`(*)`. Input can be of any shape
|
| 231 |
+
- Output: :math:`(*)`. Output is of the same shape as input
|
| 232 |
+
|
| 233 |
+
Examples::
|
| 234 |
+
|
| 235 |
+
>>> m = nn.AlphaDropout(p=0.2)
|
| 236 |
+
>>> input = torch.randn(20, 16)
|
| 237 |
+
>>> output = m(input)
|
| 238 |
+
|
| 239 |
+
.. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
|
| 240 |
+
"""
|
| 241 |
+
|
| 242 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 243 |
+
return F.alpha_dropout(input, self.p, self.training)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
class FeatureAlphaDropout(_DropoutNd):
|
| 247 |
+
r"""Randomly masks out entire channels.
|
| 248 |
+
|
| 249 |
+
A channel is a feature map,
|
| 250 |
+
e.g. the :math:`j`-th channel of the :math:`i`-th sample in the batch input
|
| 251 |
+
is a tensor :math:`\text{input}[i, j]` of the input tensor). Instead of
|
| 252 |
+
setting activations to zero, as in regular Dropout, the activations are set
|
| 253 |
+
to the negative saturation value of the SELU activation function. More details
|
| 254 |
+
can be found in the paper `Self-Normalizing Neural Networks`_ .
|
| 255 |
+
|
| 256 |
+
Each element will be masked independently for each sample on every forward
|
| 257 |
+
call with probability :attr:`p` using samples from a Bernoulli distribution.
|
| 258 |
+
The elements to be masked are randomized on every forward call, and scaled
|
| 259 |
+
and shifted to maintain zero mean and unit variance.
|
| 260 |
+
|
| 261 |
+
Usually the input comes from :class:`nn.AlphaDropout` modules.
|
| 262 |
+
|
| 263 |
+
As described in the paper
|
| 264 |
+
`Efficient Object Localization Using Convolutional Networks`_ ,
|
| 265 |
+
if adjacent pixels within feature maps are strongly correlated
|
| 266 |
+
(as is normally the case in early convolution layers) then i.i.d. dropout
|
| 267 |
+
will not regularize the activations and will otherwise just result
|
| 268 |
+
in an effective learning rate decrease.
|
| 269 |
+
|
| 270 |
+
In this case, :func:`nn.AlphaDropout` will help promote independence between
|
| 271 |
+
feature maps and should be used instead.
|
| 272 |
+
|
| 273 |
+
Args:
|
| 274 |
+
p (float, optional): probability of an element to be zeroed. Default: 0.5
|
| 275 |
+
inplace (bool, optional): If set to ``True``, will do this operation
|
| 276 |
+
in-place
|
| 277 |
+
|
| 278 |
+
Shape:
|
| 279 |
+
- Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`.
|
| 280 |
+
- Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input).
|
| 281 |
+
|
| 282 |
+
Examples::
|
| 283 |
+
|
| 284 |
+
>>> m = nn.FeatureAlphaDropout(p=0.2)
|
| 285 |
+
>>> input = torch.randn(20, 16, 4, 32, 32)
|
| 286 |
+
>>> output = m(input)
|
| 287 |
+
|
| 288 |
+
.. _Self-Normalizing Neural Networks: https://arxiv.org/abs/1706.02515
|
| 289 |
+
.. _Efficient Object Localization Using Convolutional Networks:
|
| 290 |
+
https://arxiv.org/abs/1411.4280
|
| 291 |
+
"""
|
| 292 |
+
|
| 293 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 294 |
+
return F.feature_alpha_dropout(input, self.p, self.training)
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/flatten.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .module import Module
|
| 2 |
+
|
| 3 |
+
from typing import Tuple, Union
|
| 4 |
+
from torch import Tensor
|
| 5 |
+
from torch.types import _size
|
| 6 |
+
|
| 7 |
+
__all__ = ['Flatten', 'Unflatten']
|
| 8 |
+
|
| 9 |
+
class Flatten(Module):
|
| 10 |
+
r"""
|
| 11 |
+
Flattens a contiguous range of dims into a tensor.
|
| 12 |
+
|
| 13 |
+
For use with :class:`~nn.Sequential`, see :meth:`torch.flatten` for details.
|
| 14 |
+
|
| 15 |
+
Shape:
|
| 16 |
+
- Input: :math:`(*, S_{\text{start}},..., S_{i}, ..., S_{\text{end}}, *)`,'
|
| 17 |
+
where :math:`S_{i}` is the size at dimension :math:`i` and :math:`*` means any
|
| 18 |
+
number of dimensions including none.
|
| 19 |
+
- Output: :math:`(*, \prod_{i=\text{start}}^{\text{end}} S_{i}, *)`.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
start_dim: first dim to flatten (default = 1).
|
| 23 |
+
end_dim: last dim to flatten (default = -1).
|
| 24 |
+
|
| 25 |
+
Examples::
|
| 26 |
+
>>> input = torch.randn(32, 1, 5, 5)
|
| 27 |
+
>>> # With default parameters
|
| 28 |
+
>>> m = nn.Flatten()
|
| 29 |
+
>>> output = m(input)
|
| 30 |
+
>>> output.size()
|
| 31 |
+
torch.Size([32, 25])
|
| 32 |
+
>>> # With non-default parameters
|
| 33 |
+
>>> m = nn.Flatten(0, 2)
|
| 34 |
+
>>> output = m(input)
|
| 35 |
+
>>> output.size()
|
| 36 |
+
torch.Size([160, 5])
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
__constants__ = ['start_dim', 'end_dim']
|
| 40 |
+
start_dim: int
|
| 41 |
+
end_dim: int
|
| 42 |
+
|
| 43 |
+
def __init__(self, start_dim: int = 1, end_dim: int = -1) -> None:
|
| 44 |
+
super().__init__()
|
| 45 |
+
self.start_dim = start_dim
|
| 46 |
+
self.end_dim = end_dim
|
| 47 |
+
|
| 48 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 49 |
+
return input.flatten(self.start_dim, self.end_dim)
|
| 50 |
+
|
| 51 |
+
def extra_repr(self) -> str:
|
| 52 |
+
return f'start_dim={self.start_dim}, end_dim={self.end_dim}'
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class Unflatten(Module):
|
| 56 |
+
r"""
|
| 57 |
+
Unflattens a tensor dim expanding it to a desired shape. For use with :class:`~nn.Sequential`.
|
| 58 |
+
|
| 59 |
+
* :attr:`dim` specifies the dimension of the input tensor to be unflattened, and it can
|
| 60 |
+
be either `int` or `str` when `Tensor` or `NamedTensor` is used, respectively.
|
| 61 |
+
|
| 62 |
+
* :attr:`unflattened_size` is the new shape of the unflattened dimension of the tensor and it can be
|
| 63 |
+
a `tuple` of ints or a `list` of ints or `torch.Size` for `Tensor` input; a `NamedShape`
|
| 64 |
+
(tuple of `(name, size)` tuples) for `NamedTensor` input.
|
| 65 |
+
|
| 66 |
+
Shape:
|
| 67 |
+
- Input: :math:`(*, S_{\text{dim}}, *)`, where :math:`S_{\text{dim}}` is the size at
|
| 68 |
+
dimension :attr:`dim` and :math:`*` means any number of dimensions including none.
|
| 69 |
+
- Output: :math:`(*, U_1, ..., U_n, *)`, where :math:`U` = :attr:`unflattened_size` and
|
| 70 |
+
:math:`\prod_{i=1}^n U_i = S_{\text{dim}}`.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
dim (Union[int, str]): Dimension to be unflattened
|
| 74 |
+
unflattened_size (Union[torch.Size, Tuple, List, NamedShape]): New shape of the unflattened dimension
|
| 75 |
+
|
| 76 |
+
Examples:
|
| 77 |
+
>>> input = torch.randn(2, 50)
|
| 78 |
+
>>> # With tuple of ints
|
| 79 |
+
>>> m = nn.Sequential(
|
| 80 |
+
>>> nn.Linear(50, 50),
|
| 81 |
+
>>> nn.Unflatten(1, (2, 5, 5))
|
| 82 |
+
>>> )
|
| 83 |
+
>>> output = m(input)
|
| 84 |
+
>>> output.size()
|
| 85 |
+
torch.Size([2, 2, 5, 5])
|
| 86 |
+
>>> # With torch.Size
|
| 87 |
+
>>> m = nn.Sequential(
|
| 88 |
+
>>> nn.Linear(50, 50),
|
| 89 |
+
>>> nn.Unflatten(1, torch.Size([2, 5, 5]))
|
| 90 |
+
>>> )
|
| 91 |
+
>>> output = m(input)
|
| 92 |
+
>>> output.size()
|
| 93 |
+
torch.Size([2, 2, 5, 5])
|
| 94 |
+
>>> # With namedshape (tuple of tuples)
|
| 95 |
+
>>> input = torch.randn(2, 50, names=('N', 'features'))
|
| 96 |
+
>>> unflatten = nn.Unflatten('features', (('C', 2), ('H', 5), ('W', 5)))
|
| 97 |
+
>>> output = unflatten(input)
|
| 98 |
+
>>> output.size()
|
| 99 |
+
torch.Size([2, 2, 5, 5])
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
NamedShape = Tuple[Tuple[str, int]]
|
| 103 |
+
|
| 104 |
+
__constants__ = ['dim', 'unflattened_size']
|
| 105 |
+
dim: Union[int, str]
|
| 106 |
+
unflattened_size: Union[_size, NamedShape]
|
| 107 |
+
|
| 108 |
+
def __init__(self, dim: Union[int, str], unflattened_size: Union[_size, NamedShape]) -> None:
|
| 109 |
+
super().__init__()
|
| 110 |
+
|
| 111 |
+
if isinstance(dim, int):
|
| 112 |
+
self._require_tuple_int(unflattened_size)
|
| 113 |
+
elif isinstance(dim, str):
|
| 114 |
+
self._require_tuple_tuple(unflattened_size)
|
| 115 |
+
else:
|
| 116 |
+
raise TypeError("invalid argument type for dim parameter")
|
| 117 |
+
|
| 118 |
+
self.dim = dim
|
| 119 |
+
self.unflattened_size = unflattened_size
|
| 120 |
+
|
| 121 |
+
def _require_tuple_tuple(self, input):
|
| 122 |
+
if (isinstance(input, tuple)):
|
| 123 |
+
for idx, elem in enumerate(input):
|
| 124 |
+
if not isinstance(elem, tuple):
|
| 125 |
+
raise TypeError("unflattened_size must be tuple of tuples, " +
|
| 126 |
+
f"but found element of type {type(elem).__name__} at pos {idx}")
|
| 127 |
+
return
|
| 128 |
+
raise TypeError("unflattened_size must be a tuple of tuples, " +
|
| 129 |
+
f"but found type {type(input).__name__}")
|
| 130 |
+
|
| 131 |
+
def _require_tuple_int(self, input):
|
| 132 |
+
if (isinstance(input, (tuple, list))):
|
| 133 |
+
for idx, elem in enumerate(input):
|
| 134 |
+
if not isinstance(elem, int):
|
| 135 |
+
raise TypeError("unflattened_size must be tuple of ints, " +
|
| 136 |
+
f"but found element of type {type(elem).__name__} at pos {idx}")
|
| 137 |
+
return
|
| 138 |
+
raise TypeError(f"unflattened_size must be a tuple of ints, but found type {type(input).__name__}")
|
| 139 |
+
|
| 140 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 141 |
+
return input.unflatten(self.dim, self.unflattened_size)
|
| 142 |
+
|
| 143 |
+
def extra_repr(self) -> str:
|
| 144 |
+
return f'dim={self.dim}, unflattened_size={self.unflattened_size}'
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/linear.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from typing import Any
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch import Tensor
|
| 6 |
+
from torch.nn.parameter import Parameter, UninitializedParameter
|
| 7 |
+
from .. import functional as F
|
| 8 |
+
from .. import init
|
| 9 |
+
from .module import Module
|
| 10 |
+
from .lazy import LazyModuleMixin
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
'Bilinear',
|
| 15 |
+
'Identity',
|
| 16 |
+
'LazyLinear',
|
| 17 |
+
'Linear',
|
| 18 |
+
]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Identity(Module):
|
| 22 |
+
r"""A placeholder identity operator that is argument-insensitive.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
args: any argument (unused)
|
| 26 |
+
kwargs: any keyword argument (unused)
|
| 27 |
+
|
| 28 |
+
Shape:
|
| 29 |
+
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
|
| 30 |
+
- Output: :math:`(*)`, same shape as the input.
|
| 31 |
+
|
| 32 |
+
Examples::
|
| 33 |
+
|
| 34 |
+
>>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False)
|
| 35 |
+
>>> input = torch.randn(128, 20)
|
| 36 |
+
>>> output = m(input)
|
| 37 |
+
>>> print(output.size())
|
| 38 |
+
torch.Size([128, 20])
|
| 39 |
+
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
| 43 |
+
super().__init__()
|
| 44 |
+
|
| 45 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 46 |
+
return input
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class Linear(Module):
|
| 50 |
+
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
|
| 51 |
+
|
| 52 |
+
This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
|
| 53 |
+
|
| 54 |
+
On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
in_features: size of each input sample
|
| 58 |
+
out_features: size of each output sample
|
| 59 |
+
bias: If set to ``False``, the layer will not learn an additive bias.
|
| 60 |
+
Default: ``True``
|
| 61 |
+
|
| 62 |
+
Shape:
|
| 63 |
+
- Input: :math:`(*, H_{in})` where :math:`*` means any number of
|
| 64 |
+
dimensions including none and :math:`H_{in} = \text{in\_features}`.
|
| 65 |
+
- Output: :math:`(*, H_{out})` where all but the last dimension
|
| 66 |
+
are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
|
| 67 |
+
|
| 68 |
+
Attributes:
|
| 69 |
+
weight: the learnable weights of the module of shape
|
| 70 |
+
:math:`(\text{out\_features}, \text{in\_features})`. The values are
|
| 71 |
+
initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
|
| 72 |
+
:math:`k = \frac{1}{\text{in\_features}}`
|
| 73 |
+
bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
|
| 74 |
+
If :attr:`bias` is ``True``, the values are initialized from
|
| 75 |
+
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
|
| 76 |
+
:math:`k = \frac{1}{\text{in\_features}}`
|
| 77 |
+
|
| 78 |
+
Examples::
|
| 79 |
+
|
| 80 |
+
>>> m = nn.Linear(20, 30)
|
| 81 |
+
>>> input = torch.randn(128, 20)
|
| 82 |
+
>>> output = m(input)
|
| 83 |
+
>>> print(output.size())
|
| 84 |
+
torch.Size([128, 30])
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
__constants__ = ['in_features', 'out_features']
|
| 88 |
+
in_features: int
|
| 89 |
+
out_features: int
|
| 90 |
+
weight: Tensor
|
| 91 |
+
|
| 92 |
+
def __init__(self, in_features: int, out_features: int, bias: bool = True,
|
| 93 |
+
device=None, dtype=None) -> None:
|
| 94 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 95 |
+
super().__init__()
|
| 96 |
+
self.in_features = in_features
|
| 97 |
+
self.out_features = out_features
|
| 98 |
+
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
|
| 99 |
+
if bias:
|
| 100 |
+
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
|
| 101 |
+
else:
|
| 102 |
+
self.register_parameter('bias', None)
|
| 103 |
+
self.reset_parameters()
|
| 104 |
+
|
| 105 |
+
def reset_parameters(self) -> None:
|
| 106 |
+
# Setting a=sqrt(5) in kaiming_uniform is the same as initializing with
|
| 107 |
+
# uniform(-1/sqrt(in_features), 1/sqrt(in_features)). For details, see
|
| 108 |
+
# https://github.com/pytorch/pytorch/issues/57109
|
| 109 |
+
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
|
| 110 |
+
if self.bias is not None:
|
| 111 |
+
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
|
| 112 |
+
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
|
| 113 |
+
init.uniform_(self.bias, -bound, bound)
|
| 114 |
+
|
| 115 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 116 |
+
return F.linear(input, self.weight, self.bias)
|
| 117 |
+
|
| 118 |
+
def extra_repr(self) -> str:
|
| 119 |
+
return f'in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}'
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
# This class exists solely to avoid triggering an obscure error when scripting
|
| 123 |
+
# an improperly quantized attention layer. See this issue for details:
|
| 124 |
+
# https://github.com/pytorch/pytorch/issues/58969
|
| 125 |
+
# TODO: fail fast on quantization API usage error, then remove this class
|
| 126 |
+
# and replace uses of it with plain Linear
|
| 127 |
+
class NonDynamicallyQuantizableLinear(Linear):
|
| 128 |
+
def __init__(self, in_features: int, out_features: int, bias: bool = True,
|
| 129 |
+
device=None, dtype=None) -> None:
|
| 130 |
+
super().__init__(in_features, out_features, bias=bias,
|
| 131 |
+
device=device, dtype=dtype)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
class Bilinear(Module):
|
| 135 |
+
r"""Applies a bilinear transformation to the incoming data: :math:`y = x_1^T A x_2 + b`.
|
| 136 |
+
|
| 137 |
+
Args:
|
| 138 |
+
in1_features: size of each first input sample
|
| 139 |
+
in2_features: size of each second input sample
|
| 140 |
+
out_features: size of each output sample
|
| 141 |
+
bias: If set to False, the layer will not learn an additive bias.
|
| 142 |
+
Default: ``True``
|
| 143 |
+
|
| 144 |
+
Shape:
|
| 145 |
+
- Input1: :math:`(*, H_{in1})` where :math:`H_{in1}=\text{in1\_features}` and
|
| 146 |
+
:math:`*` means any number of additional dimensions including none. All but the last dimension
|
| 147 |
+
of the inputs should be the same.
|
| 148 |
+
- Input2: :math:`(*, H_{in2})` where :math:`H_{in2}=\text{in2\_features}`.
|
| 149 |
+
- Output: :math:`(*, H_{out})` where :math:`H_{out}=\text{out\_features}`
|
| 150 |
+
and all but the last dimension are the same shape as the input.
|
| 151 |
+
|
| 152 |
+
Attributes:
|
| 153 |
+
weight: the learnable weights of the module of shape
|
| 154 |
+
:math:`(\text{out\_features}, \text{in1\_features}, \text{in2\_features})`.
|
| 155 |
+
The values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
|
| 156 |
+
:math:`k = \frac{1}{\text{in1\_features}}`
|
| 157 |
+
bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
|
| 158 |
+
If :attr:`bias` is ``True``, the values are initialized from
|
| 159 |
+
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
|
| 160 |
+
:math:`k = \frac{1}{\text{in1\_features}}`
|
| 161 |
+
|
| 162 |
+
Examples::
|
| 163 |
+
|
| 164 |
+
>>> m = nn.Bilinear(20, 30, 40)
|
| 165 |
+
>>> input1 = torch.randn(128, 20)
|
| 166 |
+
>>> input2 = torch.randn(128, 30)
|
| 167 |
+
>>> output = m(input1, input2)
|
| 168 |
+
>>> print(output.size())
|
| 169 |
+
torch.Size([128, 40])
|
| 170 |
+
"""
|
| 171 |
+
|
| 172 |
+
__constants__ = ['in1_features', 'in2_features', 'out_features']
|
| 173 |
+
in1_features: int
|
| 174 |
+
in2_features: int
|
| 175 |
+
out_features: int
|
| 176 |
+
weight: Tensor
|
| 177 |
+
|
| 178 |
+
def __init__(self, in1_features: int, in2_features: int, out_features: int, bias: bool = True,
|
| 179 |
+
device=None, dtype=None) -> None:
|
| 180 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 181 |
+
super().__init__()
|
| 182 |
+
self.in1_features = in1_features
|
| 183 |
+
self.in2_features = in2_features
|
| 184 |
+
self.out_features = out_features
|
| 185 |
+
self.weight = Parameter(torch.empty((out_features, in1_features, in2_features), **factory_kwargs))
|
| 186 |
+
|
| 187 |
+
if bias:
|
| 188 |
+
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
|
| 189 |
+
else:
|
| 190 |
+
self.register_parameter('bias', None)
|
| 191 |
+
self.reset_parameters()
|
| 192 |
+
|
| 193 |
+
def reset_parameters(self) -> None:
|
| 194 |
+
bound = 1 / math.sqrt(self.weight.size(1))
|
| 195 |
+
init.uniform_(self.weight, -bound, bound)
|
| 196 |
+
if self.bias is not None:
|
| 197 |
+
init.uniform_(self.bias, -bound, bound)
|
| 198 |
+
|
| 199 |
+
def forward(self, input1: Tensor, input2: Tensor) -> Tensor:
|
| 200 |
+
return F.bilinear(input1, input2, self.weight, self.bias)
|
| 201 |
+
|
| 202 |
+
def extra_repr(self) -> str:
|
| 203 |
+
return 'in1_features={}, in2_features={}, out_features={}, bias={}'.format(
|
| 204 |
+
self.in1_features, self.in2_features, self.out_features, self.bias is not None
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
class LazyLinear(LazyModuleMixin, Linear):
|
| 209 |
+
r"""A :class:`torch.nn.Linear` module where `in_features` is inferred.
|
| 210 |
+
|
| 211 |
+
In this module, the `weight` and `bias` are of :class:`torch.nn.UninitializedParameter`
|
| 212 |
+
class. They will be initialized after the first call to ``forward`` is done and the
|
| 213 |
+
module will become a regular :class:`torch.nn.Linear` module. The ``in_features`` argument
|
| 214 |
+
of the :class:`Linear` is inferred from the ``input.shape[-1]``.
|
| 215 |
+
|
| 216 |
+
Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
|
| 217 |
+
on lazy modules and their limitations.
|
| 218 |
+
|
| 219 |
+
Args:
|
| 220 |
+
out_features: size of each output sample
|
| 221 |
+
bias: If set to ``False``, the layer will not learn an additive bias.
|
| 222 |
+
Default: ``True``
|
| 223 |
+
|
| 224 |
+
Attributes:
|
| 225 |
+
weight: the learnable weights of the module of shape
|
| 226 |
+
:math:`(\text{out\_features}, \text{in\_features})`. The values are
|
| 227 |
+
initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
|
| 228 |
+
:math:`k = \frac{1}{\text{in\_features}}`
|
| 229 |
+
bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
|
| 230 |
+
If :attr:`bias` is ``True``, the values are initialized from
|
| 231 |
+
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
|
| 232 |
+
:math:`k = \frac{1}{\text{in\_features}}`
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
"""
|
| 236 |
+
|
| 237 |
+
cls_to_become = Linear # type: ignore[assignment]
|
| 238 |
+
weight: UninitializedParameter
|
| 239 |
+
bias: UninitializedParameter # type: ignore[assignment]
|
| 240 |
+
|
| 241 |
+
def __init__(self, out_features: int, bias: bool = True,
|
| 242 |
+
device=None, dtype=None) -> None:
|
| 243 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 244 |
+
# bias is hardcoded to False to avoid creating tensor
|
| 245 |
+
# that will soon be overwritten.
|
| 246 |
+
super().__init__(0, 0, False)
|
| 247 |
+
self.weight = UninitializedParameter(**factory_kwargs)
|
| 248 |
+
self.out_features = out_features
|
| 249 |
+
if bias:
|
| 250 |
+
self.bias = UninitializedParameter(**factory_kwargs)
|
| 251 |
+
|
| 252 |
+
def reset_parameters(self) -> None:
|
| 253 |
+
if not self.has_uninitialized_params() and self.in_features != 0:
|
| 254 |
+
super().reset_parameters()
|
| 255 |
+
|
| 256 |
+
def initialize_parameters(self, input) -> None: # type: ignore[override]
|
| 257 |
+
if self.has_uninitialized_params():
|
| 258 |
+
with torch.no_grad():
|
| 259 |
+
self.in_features = input.shape[-1]
|
| 260 |
+
self.weight.materialize((self.out_features, self.in_features))
|
| 261 |
+
if self.bias is not None:
|
| 262 |
+
self.bias.materialize((self.out_features,))
|
| 263 |
+
self.reset_parameters()
|
| 264 |
+
# TODO: PartialLinear - maybe in sparse?
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/module.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/padding.py
ADDED
|
@@ -0,0 +1,800 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .module import Module
|
| 2 |
+
from .utils import _pair, _quadruple, _ntuple
|
| 3 |
+
from .. import functional as F
|
| 4 |
+
|
| 5 |
+
from torch import Tensor
|
| 6 |
+
from ..common_types import _size_2_t, _size_4_t, _size_6_t
|
| 7 |
+
from typing import Sequence, Tuple
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# TODO: grad_output size asserts in THNN
|
| 11 |
+
|
| 12 |
+
__all__ = ['CircularPad1d', 'CircularPad2d', 'CircularPad3d', 'ConstantPad1d', 'ConstantPad2d',
|
| 13 |
+
'ConstantPad3d', 'ReflectionPad1d', 'ReflectionPad2d', 'ReflectionPad3d',
|
| 14 |
+
'ReplicationPad1d', 'ReplicationPad2d', 'ReplicationPad3d', 'ZeroPad1d', 'ZeroPad2d', 'ZeroPad3d']
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class _CircularPadNd(Module):
|
| 18 |
+
__constants__ = ['padding']
|
| 19 |
+
padding: Sequence[int]
|
| 20 |
+
|
| 21 |
+
def _check_input_dim(self, input):
|
| 22 |
+
raise NotImplementedError
|
| 23 |
+
|
| 24 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 25 |
+
self._check_input_dim(input)
|
| 26 |
+
return F.pad(input, self.padding, 'circular')
|
| 27 |
+
|
| 28 |
+
def extra_repr(self) -> str:
|
| 29 |
+
return f'{self.padding}'
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class CircularPad1d(_CircularPadNd):
|
| 33 |
+
r"""Pads the input tensor using circular padding of the input boundary.
|
| 34 |
+
|
| 35 |
+
Tensor values at the beginning of the dimension are used to pad the end,
|
| 36 |
+
and values at the end are used to pad the beginning. If negative padding is
|
| 37 |
+
applied then the ends of the tensor get removed.
|
| 38 |
+
|
| 39 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
| 43 |
+
padding in all boundaries. If a 2-`tuple`, uses
|
| 44 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
|
| 45 |
+
|
| 46 |
+
Shape:
|
| 47 |
+
- Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
|
| 48 |
+
- Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
|
| 49 |
+
|
| 50 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
| 51 |
+
|
| 52 |
+
Examples::
|
| 53 |
+
|
| 54 |
+
>>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
|
| 55 |
+
>>> m = nn.CircularPad1d(2)
|
| 56 |
+
>>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
|
| 57 |
+
>>> input
|
| 58 |
+
tensor([[[0., 1., 2., 3.],
|
| 59 |
+
[4., 5., 6., 7.]]])
|
| 60 |
+
>>> m(input)
|
| 61 |
+
tensor([[[2., 3., 0., 1., 2., 3., 0., 1.],
|
| 62 |
+
[6., 7., 4., 5., 6., 7., 4., 5.]]])
|
| 63 |
+
>>> # using different paddings for different sides
|
| 64 |
+
>>> m = nn.CircularPad1d((3, 1))
|
| 65 |
+
>>> m(input)
|
| 66 |
+
tensor([[[1., 2., 3., 0., 1., 2., 3., 0.],
|
| 67 |
+
[5., 6., 7., 4., 5., 6., 7., 4.]]])
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
padding: Tuple[int, int]
|
| 71 |
+
|
| 72 |
+
def __init__(self, padding: _size_2_t) -> None:
|
| 73 |
+
super().__init__()
|
| 74 |
+
self.padding = _pair(padding)
|
| 75 |
+
|
| 76 |
+
def _check_input_dim(self, input):
|
| 77 |
+
if input.dim() != 2 and input.dim() != 3:
|
| 78 |
+
raise ValueError(
|
| 79 |
+
f"expected 2D or 3D input (got {input.dim()}D input)"
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class CircularPad2d(_CircularPadNd):
|
| 84 |
+
r"""Pads the input tensor using circular padding of the input boundary.
|
| 85 |
+
|
| 86 |
+
Tensor values at the beginning of the dimension are used to pad the end,
|
| 87 |
+
and values at the end are used to pad the beginning. If negative padding is
|
| 88 |
+
applied then the ends of the tensor get removed.
|
| 89 |
+
|
| 90 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
| 94 |
+
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
|
| 95 |
+
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
|
| 96 |
+
|
| 97 |
+
Shape:
|
| 98 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
| 99 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
|
| 100 |
+
|
| 101 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
| 102 |
+
|
| 103 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
| 104 |
+
|
| 105 |
+
Examples::
|
| 106 |
+
|
| 107 |
+
>>> m = nn.CircularPad2d(2)
|
| 108 |
+
>>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
|
| 109 |
+
>>> input
|
| 110 |
+
tensor([[[[0., 1., 2.],
|
| 111 |
+
[3., 4., 5.],
|
| 112 |
+
[6., 7., 8.]]]])
|
| 113 |
+
>>> m(input)
|
| 114 |
+
tensor([[[[4., 5., 3., 4., 5., 3., 4.],
|
| 115 |
+
[7., 8., 6., 7., 8., 6., 7.],
|
| 116 |
+
[1., 2., 0., 1., 2., 0., 1.],
|
| 117 |
+
[4., 5., 3., 4., 5., 3., 4.],
|
| 118 |
+
[7., 8., 6., 7., 8., 6., 7.],
|
| 119 |
+
[1., 2., 0., 1., 2., 0., 1.],
|
| 120 |
+
[4., 5., 3., 4., 5., 3., 4.]]]])
|
| 121 |
+
>>> # using different paddings for different sides
|
| 122 |
+
>>> m = nn.CircularPad2d((1, 1, 2, 0))
|
| 123 |
+
>>> m(input)
|
| 124 |
+
tensor([[[[5., 3., 4., 5., 3.],
|
| 125 |
+
[8., 6., 7., 8., 6.],
|
| 126 |
+
[2., 0., 1., 2., 0.],
|
| 127 |
+
[5., 3., 4., 5., 3.],
|
| 128 |
+
[8., 6., 7., 8., 6.]]]])
|
| 129 |
+
"""
|
| 130 |
+
|
| 131 |
+
padding: Tuple[int, int, int, int]
|
| 132 |
+
|
| 133 |
+
def __init__(self, padding: _size_4_t) -> None:
|
| 134 |
+
super().__init__()
|
| 135 |
+
self.padding = _quadruple(padding)
|
| 136 |
+
|
| 137 |
+
def _check_input_dim(self, input):
|
| 138 |
+
if input.dim() != 3 and input.dim() != 4:
|
| 139 |
+
raise ValueError(
|
| 140 |
+
f"expected 3D or 4D input (got {input.dim()}D input)"
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
class CircularPad3d(_CircularPadNd):
|
| 145 |
+
r"""Pads the input tensor using circular padding of the input boundary.
|
| 146 |
+
|
| 147 |
+
Tensor values at the beginning of the dimension are used to pad the end,
|
| 148 |
+
and values at the end are used to pad the beginning. If negative padding is
|
| 149 |
+
applied then the ends of the tensor get removed.
|
| 150 |
+
|
| 151 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
| 152 |
+
|
| 153 |
+
Args:
|
| 154 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
| 155 |
+
padding in all boundaries. If a 6-`tuple`, uses
|
| 156 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
|
| 157 |
+
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
|
| 158 |
+
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
|
| 159 |
+
|
| 160 |
+
Shape:
|
| 161 |
+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
| 162 |
+
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
|
| 163 |
+
where
|
| 164 |
+
|
| 165 |
+
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
|
| 166 |
+
|
| 167 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
| 168 |
+
|
| 169 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
| 170 |
+
|
| 171 |
+
Examples::
|
| 172 |
+
|
| 173 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 174 |
+
>>> m = nn.CircularPad3d(3)
|
| 175 |
+
>>> input = torch.randn(16, 3, 8, 320, 480)
|
| 176 |
+
>>> output = m(input)
|
| 177 |
+
>>> # using different paddings for different sides
|
| 178 |
+
>>> m = nn.CircularPad3d((3, 3, 6, 6, 1, 1))
|
| 179 |
+
>>> output = m(input)
|
| 180 |
+
"""
|
| 181 |
+
|
| 182 |
+
padding: Tuple[int, int, int, int, int, int]
|
| 183 |
+
|
| 184 |
+
def __init__(self, padding: _size_6_t) -> None:
|
| 185 |
+
super().__init__()
|
| 186 |
+
self.padding = _ntuple(6)(padding)
|
| 187 |
+
|
| 188 |
+
def _check_input_dim(self, input):
|
| 189 |
+
if input.dim() != 4 and input.dim() != 5:
|
| 190 |
+
raise ValueError(
|
| 191 |
+
f"expected 4D or 5D input (got {input.dim()}D input)"
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
class _ConstantPadNd(Module):
|
| 196 |
+
__constants__ = ['padding', 'value']
|
| 197 |
+
value: float
|
| 198 |
+
padding: Sequence[int]
|
| 199 |
+
|
| 200 |
+
def __init__(self, value: float) -> None:
|
| 201 |
+
super().__init__()
|
| 202 |
+
self.value = value
|
| 203 |
+
|
| 204 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 205 |
+
return F.pad(input, self.padding, 'constant', self.value)
|
| 206 |
+
|
| 207 |
+
def extra_repr(self) -> str:
|
| 208 |
+
return f'padding={self.padding}, value={self.value}'
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
class ConstantPad1d(_ConstantPadNd):
|
| 212 |
+
r"""Pads the input tensor boundaries with a constant value.
|
| 213 |
+
|
| 214 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
| 215 |
+
|
| 216 |
+
Args:
|
| 217 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
| 218 |
+
padding in both boundaries. If a 2-`tuple`, uses
|
| 219 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
|
| 220 |
+
|
| 221 |
+
Shape:
|
| 222 |
+
- Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
|
| 223 |
+
- Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
|
| 224 |
+
|
| 225 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
| 226 |
+
|
| 227 |
+
Examples::
|
| 228 |
+
|
| 229 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 230 |
+
>>> m = nn.ConstantPad1d(2, 3.5)
|
| 231 |
+
>>> input = torch.randn(1, 2, 4)
|
| 232 |
+
>>> input
|
| 233 |
+
tensor([[[-1.0491, -0.7152, -0.0749, 0.8530],
|
| 234 |
+
[-1.3287, 1.8966, 0.1466, -0.2771]]])
|
| 235 |
+
>>> m(input)
|
| 236 |
+
tensor([[[ 3.5000, 3.5000, -1.0491, -0.7152, -0.0749, 0.8530, 3.5000,
|
| 237 |
+
3.5000],
|
| 238 |
+
[ 3.5000, 3.5000, -1.3287, 1.8966, 0.1466, -0.2771, 3.5000,
|
| 239 |
+
3.5000]]])
|
| 240 |
+
>>> m = nn.ConstantPad1d(2, 3.5)
|
| 241 |
+
>>> input = torch.randn(1, 2, 3)
|
| 242 |
+
>>> input
|
| 243 |
+
tensor([[[ 1.6616, 1.4523, -1.1255],
|
| 244 |
+
[-3.6372, 0.1182, -1.8652]]])
|
| 245 |
+
>>> m(input)
|
| 246 |
+
tensor([[[ 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000, 3.5000],
|
| 247 |
+
[ 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000, 3.5000]]])
|
| 248 |
+
>>> # using different paddings for different sides
|
| 249 |
+
>>> m = nn.ConstantPad1d((3, 1), 3.5)
|
| 250 |
+
>>> m(input)
|
| 251 |
+
tensor([[[ 3.5000, 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000],
|
| 252 |
+
[ 3.5000, 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000]]])
|
| 253 |
+
"""
|
| 254 |
+
|
| 255 |
+
padding: Tuple[int, int]
|
| 256 |
+
|
| 257 |
+
def __init__(self, padding: _size_2_t, value: float):
|
| 258 |
+
super().__init__(value)
|
| 259 |
+
self.padding = _pair(padding)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
class ConstantPad2d(_ConstantPadNd):
|
| 263 |
+
r"""Pads the input tensor boundaries with a constant value.
|
| 264 |
+
|
| 265 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
| 266 |
+
|
| 267 |
+
Args:
|
| 268 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
| 269 |
+
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
|
| 270 |
+
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
|
| 271 |
+
|
| 272 |
+
Shape:
|
| 273 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
| 274 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
|
| 275 |
+
|
| 276 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
| 277 |
+
|
| 278 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
| 279 |
+
|
| 280 |
+
Examples::
|
| 281 |
+
|
| 282 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 283 |
+
>>> m = nn.ConstantPad2d(2, 3.5)
|
| 284 |
+
>>> input = torch.randn(1, 2, 2)
|
| 285 |
+
>>> input
|
| 286 |
+
tensor([[[ 1.6585, 0.4320],
|
| 287 |
+
[-0.8701, -0.4649]]])
|
| 288 |
+
>>> m(input)
|
| 289 |
+
tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
|
| 290 |
+
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
|
| 291 |
+
[ 3.5000, 3.5000, 1.6585, 0.4320, 3.5000, 3.5000],
|
| 292 |
+
[ 3.5000, 3.5000, -0.8701, -0.4649, 3.5000, 3.5000],
|
| 293 |
+
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
|
| 294 |
+
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
|
| 295 |
+
>>> # using different paddings for different sides
|
| 296 |
+
>>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)
|
| 297 |
+
>>> m(input)
|
| 298 |
+
tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
|
| 299 |
+
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
|
| 300 |
+
[ 3.5000, 3.5000, 3.5000, 1.6585, 0.4320],
|
| 301 |
+
[ 3.5000, 3.5000, 3.5000, -0.8701, -0.4649],
|
| 302 |
+
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
|
| 303 |
+
"""
|
| 304 |
+
|
| 305 |
+
__constants__ = ['padding', 'value']
|
| 306 |
+
padding: Tuple[int, int, int, int]
|
| 307 |
+
|
| 308 |
+
def __init__(self, padding: _size_4_t, value: float) -> None:
|
| 309 |
+
super().__init__(value)
|
| 310 |
+
self.padding = _quadruple(padding)
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
class ConstantPad3d(_ConstantPadNd):
|
| 314 |
+
r"""Pads the input tensor boundaries with a constant value.
|
| 315 |
+
|
| 316 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
| 317 |
+
|
| 318 |
+
Args:
|
| 319 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
| 320 |
+
padding in all boundaries. If a 6-`tuple`, uses
|
| 321 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
|
| 322 |
+
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
|
| 323 |
+
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
|
| 324 |
+
|
| 325 |
+
Shape:
|
| 326 |
+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
| 327 |
+
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
|
| 328 |
+
:math:`(C, D_{out}, H_{out}, W_{out})`, where
|
| 329 |
+
|
| 330 |
+
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
|
| 331 |
+
|
| 332 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
| 333 |
+
|
| 334 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
| 335 |
+
|
| 336 |
+
Examples::
|
| 337 |
+
|
| 338 |
+
>>> m = nn.ConstantPad3d(3, 3.5)
|
| 339 |
+
>>> input = torch.randn(16, 3, 10, 20, 30)
|
| 340 |
+
>>> output = m(input)
|
| 341 |
+
>>> # using different paddings for different sides
|
| 342 |
+
>>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5)
|
| 343 |
+
>>> output = m(input)
|
| 344 |
+
"""
|
| 345 |
+
|
| 346 |
+
padding: Tuple[int, int, int, int, int, int]
|
| 347 |
+
|
| 348 |
+
def __init__(self, padding: _size_6_t, value: float) -> None:
|
| 349 |
+
super().__init__(value)
|
| 350 |
+
self.padding = _ntuple(6)(padding)
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
class _ReflectionPadNd(Module):
|
| 354 |
+
__constants__ = ['padding']
|
| 355 |
+
padding: Sequence[int]
|
| 356 |
+
|
| 357 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 358 |
+
return F.pad(input, self.padding, 'reflect')
|
| 359 |
+
|
| 360 |
+
def extra_repr(self) -> str:
|
| 361 |
+
return f'{self.padding}'
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
class ReflectionPad1d(_ReflectionPadNd):
|
| 365 |
+
r"""Pads the input tensor using the reflection of the input boundary.
|
| 366 |
+
|
| 367 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
| 368 |
+
|
| 369 |
+
Args:
|
| 370 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
| 371 |
+
padding in all boundaries. If a 2-`tuple`, uses
|
| 372 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
|
| 373 |
+
|
| 374 |
+
Shape:
|
| 375 |
+
- Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
|
| 376 |
+
- Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
|
| 377 |
+
|
| 378 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
| 379 |
+
|
| 380 |
+
Examples::
|
| 381 |
+
|
| 382 |
+
>>> m = nn.ReflectionPad1d(2)
|
| 383 |
+
>>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles")
|
| 384 |
+
>>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
|
| 385 |
+
>>> input
|
| 386 |
+
tensor([[[0., 1., 2., 3.],
|
| 387 |
+
[4., 5., 6., 7.]]])
|
| 388 |
+
>>> m(input)
|
| 389 |
+
tensor([[[2., 1., 0., 1., 2., 3., 2., 1.],
|
| 390 |
+
[6., 5., 4., 5., 6., 7., 6., 5.]]])
|
| 391 |
+
>>> # using different paddings for different sides
|
| 392 |
+
>>> m = nn.ReflectionPad1d((3, 1))
|
| 393 |
+
>>> m(input)
|
| 394 |
+
tensor([[[3., 2., 1., 0., 1., 2., 3., 2.],
|
| 395 |
+
[7., 6., 5., 4., 5., 6., 7., 6.]]])
|
| 396 |
+
"""
|
| 397 |
+
|
| 398 |
+
padding: Tuple[int, int]
|
| 399 |
+
|
| 400 |
+
def __init__(self, padding: _size_2_t) -> None:
|
| 401 |
+
super().__init__()
|
| 402 |
+
self.padding = _pair(padding)
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
class ReflectionPad2d(_ReflectionPadNd):
|
| 406 |
+
r"""Pads the input tensor using the reflection of the input boundary.
|
| 407 |
+
|
| 408 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
| 409 |
+
|
| 410 |
+
Args:
|
| 411 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
| 412 |
+
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
|
| 413 |
+
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
|
| 414 |
+
|
| 415 |
+
Shape:
|
| 416 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
| 417 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})` where
|
| 418 |
+
|
| 419 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
| 420 |
+
|
| 421 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
| 422 |
+
|
| 423 |
+
Examples::
|
| 424 |
+
|
| 425 |
+
>>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
|
| 426 |
+
>>> m = nn.ReflectionPad2d(2)
|
| 427 |
+
>>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
|
| 428 |
+
>>> input
|
| 429 |
+
tensor([[[[0., 1., 2.],
|
| 430 |
+
[3., 4., 5.],
|
| 431 |
+
[6., 7., 8.]]]])
|
| 432 |
+
>>> m(input)
|
| 433 |
+
tensor([[[[8., 7., 6., 7., 8., 7., 6.],
|
| 434 |
+
[5., 4., 3., 4., 5., 4., 3.],
|
| 435 |
+
[2., 1., 0., 1., 2., 1., 0.],
|
| 436 |
+
[5., 4., 3., 4., 5., 4., 3.],
|
| 437 |
+
[8., 7., 6., 7., 8., 7., 6.],
|
| 438 |
+
[5., 4., 3., 4., 5., 4., 3.],
|
| 439 |
+
[2., 1., 0., 1., 2., 1., 0.]]]])
|
| 440 |
+
>>> # using different paddings for different sides
|
| 441 |
+
>>> m = nn.ReflectionPad2d((1, 1, 2, 0))
|
| 442 |
+
>>> m(input)
|
| 443 |
+
tensor([[[[7., 6., 7., 8., 7.],
|
| 444 |
+
[4., 3., 4., 5., 4.],
|
| 445 |
+
[1., 0., 1., 2., 1.],
|
| 446 |
+
[4., 3., 4., 5., 4.],
|
| 447 |
+
[7., 6., 7., 8., 7.]]]])
|
| 448 |
+
"""
|
| 449 |
+
|
| 450 |
+
padding: Tuple[int, int, int, int]
|
| 451 |
+
|
| 452 |
+
def __init__(self, padding: _size_4_t) -> None:
|
| 453 |
+
super().__init__()
|
| 454 |
+
self.padding = _quadruple(padding)
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
class ReflectionPad3d(_ReflectionPadNd):
|
| 458 |
+
r"""Pads the input tensor using the reflection of the input boundary.
|
| 459 |
+
|
| 460 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
| 461 |
+
|
| 462 |
+
Args:
|
| 463 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
| 464 |
+
padding in all boundaries. If a 6-`tuple`, uses
|
| 465 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
|
| 466 |
+
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
|
| 467 |
+
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
|
| 468 |
+
|
| 469 |
+
Shape:
|
| 470 |
+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
| 471 |
+
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
|
| 472 |
+
where
|
| 473 |
+
|
| 474 |
+
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
|
| 475 |
+
|
| 476 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
| 477 |
+
|
| 478 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
| 479 |
+
|
| 480 |
+
Examples::
|
| 481 |
+
|
| 482 |
+
>>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
|
| 483 |
+
>>> m = nn.ReflectionPad3d(1)
|
| 484 |
+
>>> input = torch.arange(8, dtype=torch.float).reshape(1, 1, 2, 2, 2)
|
| 485 |
+
>>> m(input)
|
| 486 |
+
tensor([[[[[7., 6., 7., 6.],
|
| 487 |
+
[5., 4., 5., 4.],
|
| 488 |
+
[7., 6., 7., 6.],
|
| 489 |
+
[5., 4., 5., 4.]],
|
| 490 |
+
[[3., 2., 3., 2.],
|
| 491 |
+
[1., 0., 1., 0.],
|
| 492 |
+
[3., 2., 3., 2.],
|
| 493 |
+
[1., 0., 1., 0.]],
|
| 494 |
+
[[7., 6., 7., 6.],
|
| 495 |
+
[5., 4., 5., 4.],
|
| 496 |
+
[7., 6., 7., 6.],
|
| 497 |
+
[5., 4., 5., 4.]],
|
| 498 |
+
[[3., 2., 3., 2.],
|
| 499 |
+
[1., 0., 1., 0.],
|
| 500 |
+
[3., 2., 3., 2.],
|
| 501 |
+
[1., 0., 1., 0.]]]]])
|
| 502 |
+
"""
|
| 503 |
+
|
| 504 |
+
padding: Tuple[int, int, int, int, int, int]
|
| 505 |
+
|
| 506 |
+
def __init__(self, padding: _size_6_t) -> None:
|
| 507 |
+
super().__init__()
|
| 508 |
+
self.padding = _ntuple(6)(padding)
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
class _ReplicationPadNd(Module):
|
| 512 |
+
__constants__ = ['padding']
|
| 513 |
+
padding: Sequence[int]
|
| 514 |
+
|
| 515 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 516 |
+
return F.pad(input, self.padding, 'replicate')
|
| 517 |
+
|
| 518 |
+
def extra_repr(self) -> str:
|
| 519 |
+
return f'{self.padding}'
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
class ReplicationPad1d(_ReplicationPadNd):
|
| 523 |
+
r"""Pads the input tensor using replication of the input boundary.
|
| 524 |
+
|
| 525 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
| 526 |
+
|
| 527 |
+
Args:
|
| 528 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
| 529 |
+
padding in all boundaries. If a 2-`tuple`, uses
|
| 530 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
|
| 531 |
+
|
| 532 |
+
Shape:
|
| 533 |
+
- Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
|
| 534 |
+
- Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
|
| 535 |
+
|
| 536 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
| 537 |
+
|
| 538 |
+
Examples::
|
| 539 |
+
|
| 540 |
+
>>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
|
| 541 |
+
>>> m = nn.ReplicationPad1d(2)
|
| 542 |
+
>>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
|
| 543 |
+
>>> input
|
| 544 |
+
tensor([[[0., 1., 2., 3.],
|
| 545 |
+
[4., 5., 6., 7.]]])
|
| 546 |
+
>>> m(input)
|
| 547 |
+
tensor([[[0., 0., 0., 1., 2., 3., 3., 3.],
|
| 548 |
+
[4., 4., 4., 5., 6., 7., 7., 7.]]])
|
| 549 |
+
>>> # using different paddings for different sides
|
| 550 |
+
>>> m = nn.ReplicationPad1d((3, 1))
|
| 551 |
+
>>> m(input)
|
| 552 |
+
tensor([[[0., 0., 0., 0., 1., 2., 3., 3.],
|
| 553 |
+
[4., 4., 4., 4., 5., 6., 7., 7.]]])
|
| 554 |
+
"""
|
| 555 |
+
|
| 556 |
+
padding: Tuple[int, int]
|
| 557 |
+
|
| 558 |
+
def __init__(self, padding: _size_2_t) -> None:
|
| 559 |
+
super().__init__()
|
| 560 |
+
self.padding = _pair(padding)
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
class ReplicationPad2d(_ReplicationPadNd):
|
| 564 |
+
r"""Pads the input tensor using replication of the input boundary.
|
| 565 |
+
|
| 566 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
| 567 |
+
|
| 568 |
+
Args:
|
| 569 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
| 570 |
+
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
|
| 571 |
+
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
|
| 572 |
+
|
| 573 |
+
Shape:
|
| 574 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
| 575 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
|
| 576 |
+
|
| 577 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
| 578 |
+
|
| 579 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
| 580 |
+
|
| 581 |
+
Examples::
|
| 582 |
+
|
| 583 |
+
>>> m = nn.ReplicationPad2d(2)
|
| 584 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 585 |
+
>>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
|
| 586 |
+
>>> input
|
| 587 |
+
tensor([[[[0., 1., 2.],
|
| 588 |
+
[3., 4., 5.],
|
| 589 |
+
[6., 7., 8.]]]])
|
| 590 |
+
>>> m(input)
|
| 591 |
+
tensor([[[[0., 0., 0., 1., 2., 2., 2.],
|
| 592 |
+
[0., 0., 0., 1., 2., 2., 2.],
|
| 593 |
+
[0., 0., 0., 1., 2., 2., 2.],
|
| 594 |
+
[3., 3., 3., 4., 5., 5., 5.],
|
| 595 |
+
[6., 6., 6., 7., 8., 8., 8.],
|
| 596 |
+
[6., 6., 6., 7., 8., 8., 8.],
|
| 597 |
+
[6., 6., 6., 7., 8., 8., 8.]]]])
|
| 598 |
+
>>> # using different paddings for different sides
|
| 599 |
+
>>> m = nn.ReplicationPad2d((1, 1, 2, 0))
|
| 600 |
+
>>> m(input)
|
| 601 |
+
tensor([[[[0., 0., 1., 2., 2.],
|
| 602 |
+
[0., 0., 1., 2., 2.],
|
| 603 |
+
[0., 0., 1., 2., 2.],
|
| 604 |
+
[3., 3., 4., 5., 5.],
|
| 605 |
+
[6., 6., 7., 8., 8.]]]])
|
| 606 |
+
"""
|
| 607 |
+
|
| 608 |
+
padding: Tuple[int, int, int, int]
|
| 609 |
+
|
| 610 |
+
def __init__(self, padding: _size_4_t) -> None:
|
| 611 |
+
super().__init__()
|
| 612 |
+
self.padding = _quadruple(padding)
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
class ReplicationPad3d(_ReplicationPadNd):
|
| 616 |
+
r"""Pads the input tensor using replication of the input boundary.
|
| 617 |
+
|
| 618 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
| 619 |
+
|
| 620 |
+
Args:
|
| 621 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
| 622 |
+
padding in all boundaries. If a 6-`tuple`, uses
|
| 623 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
|
| 624 |
+
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
|
| 625 |
+
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
|
| 626 |
+
|
| 627 |
+
Shape:
|
| 628 |
+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
| 629 |
+
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
|
| 630 |
+
where
|
| 631 |
+
|
| 632 |
+
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
|
| 633 |
+
|
| 634 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
| 635 |
+
|
| 636 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
| 637 |
+
|
| 638 |
+
Examples::
|
| 639 |
+
|
| 640 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 641 |
+
>>> m = nn.ReplicationPad3d(3)
|
| 642 |
+
>>> input = torch.randn(16, 3, 8, 320, 480)
|
| 643 |
+
>>> output = m(input)
|
| 644 |
+
>>> # using different paddings for different sides
|
| 645 |
+
>>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1))
|
| 646 |
+
>>> output = m(input)
|
| 647 |
+
"""
|
| 648 |
+
|
| 649 |
+
padding: Tuple[int, int, int, int, int, int]
|
| 650 |
+
|
| 651 |
+
def __init__(self, padding: _size_6_t) -> None:
|
| 652 |
+
super().__init__()
|
| 653 |
+
self.padding = _ntuple(6)(padding)
|
| 654 |
+
|
| 655 |
+
|
| 656 |
+
class ZeroPad1d(ConstantPad1d):
|
| 657 |
+
r"""Pads the input tensor boundaries with zero.
|
| 658 |
+
|
| 659 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
| 660 |
+
|
| 661 |
+
Args:
|
| 662 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
| 663 |
+
padding in both boundaries. If a 2-`tuple`, uses
|
| 664 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
|
| 665 |
+
|
| 666 |
+
Shape:
|
| 667 |
+
- Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
|
| 668 |
+
- Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
|
| 669 |
+
|
| 670 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
| 671 |
+
|
| 672 |
+
Examples::
|
| 673 |
+
|
| 674 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 675 |
+
>>> m = nn.ZeroPad1d(2)
|
| 676 |
+
>>> input = torch.randn(1, 2, 4)
|
| 677 |
+
>>> input
|
| 678 |
+
tensor([[[-1.0491, -0.7152, -0.0749, 0.8530],
|
| 679 |
+
[-1.3287, 1.8966, 0.1466, -0.2771]]])
|
| 680 |
+
>>> m(input)
|
| 681 |
+
tensor([[[ 0.0000, 0.0000, -1.0491, -0.7152, -0.0749, 0.8530, 0.0000,
|
| 682 |
+
0.0000],
|
| 683 |
+
[ 0.0000, 0.0000, -1.3287, 1.8966, 0.1466, -0.2771, 0.0000,
|
| 684 |
+
0.0000]]])
|
| 685 |
+
>>> m = nn.ZeroPad1d(2)
|
| 686 |
+
>>> input = torch.randn(1, 2, 3)
|
| 687 |
+
>>> input
|
| 688 |
+
tensor([[[ 1.6616, 1.4523, -1.1255],
|
| 689 |
+
[-3.6372, 0.1182, -1.8652]]])
|
| 690 |
+
>>> m(input)
|
| 691 |
+
tensor([[[ 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000, 0.0000],
|
| 692 |
+
[ 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000, 0.0000]]])
|
| 693 |
+
>>> # using different paddings for different sides
|
| 694 |
+
>>> m = nn.ZeroPad1d((3, 1))
|
| 695 |
+
>>> m(input)
|
| 696 |
+
tensor([[[ 0.0000, 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000],
|
| 697 |
+
[ 0.0000, 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000]]])
|
| 698 |
+
"""
|
| 699 |
+
|
| 700 |
+
padding: Tuple[int, int]
|
| 701 |
+
|
| 702 |
+
def __init__(self, padding: _size_2_t) -> None:
|
| 703 |
+
super().__init__(padding, 0.)
|
| 704 |
+
|
| 705 |
+
def extra_repr(self) -> str:
|
| 706 |
+
return f'{self.padding}'
|
| 707 |
+
|
| 708 |
+
class ZeroPad2d(ConstantPad2d):
|
| 709 |
+
r"""Pads the input tensor boundaries with zero.
|
| 710 |
+
|
| 711 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
| 712 |
+
|
| 713 |
+
Args:
|
| 714 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
| 715 |
+
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
|
| 716 |
+
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
|
| 717 |
+
|
| 718 |
+
Shape:
|
| 719 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
| 720 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
|
| 721 |
+
|
| 722 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
| 723 |
+
|
| 724 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
| 725 |
+
|
| 726 |
+
Examples::
|
| 727 |
+
|
| 728 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 729 |
+
>>> m = nn.ZeroPad2d(2)
|
| 730 |
+
>>> input = torch.randn(1, 1, 3, 3)
|
| 731 |
+
>>> input
|
| 732 |
+
tensor([[[[-0.1678, -0.4418, 1.9466],
|
| 733 |
+
[ 0.9604, -0.4219, -0.5241],
|
| 734 |
+
[-0.9162, -0.5436, -0.6446]]]])
|
| 735 |
+
>>> m(input)
|
| 736 |
+
tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
|
| 737 |
+
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
|
| 738 |
+
[ 0.0000, 0.0000, -0.1678, -0.4418, 1.9466, 0.0000, 0.0000],
|
| 739 |
+
[ 0.0000, 0.0000, 0.9604, -0.4219, -0.5241, 0.0000, 0.0000],
|
| 740 |
+
[ 0.0000, 0.0000, -0.9162, -0.5436, -0.6446, 0.0000, 0.0000],
|
| 741 |
+
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
|
| 742 |
+
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
|
| 743 |
+
>>> # using different paddings for different sides
|
| 744 |
+
>>> m = nn.ZeroPad2d((1, 1, 2, 0))
|
| 745 |
+
>>> m(input)
|
| 746 |
+
tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
|
| 747 |
+
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
|
| 748 |
+
[ 0.0000, -0.1678, -0.4418, 1.9466, 0.0000],
|
| 749 |
+
[ 0.0000, 0.9604, -0.4219, -0.5241, 0.0000],
|
| 750 |
+
[ 0.0000, -0.9162, -0.5436, -0.6446, 0.0000]]]])
|
| 751 |
+
"""
|
| 752 |
+
|
| 753 |
+
padding: Tuple[int, int, int, int]
|
| 754 |
+
|
| 755 |
+
def __init__(self, padding: _size_4_t) -> None:
|
| 756 |
+
super().__init__(padding, 0.)
|
| 757 |
+
|
| 758 |
+
def extra_repr(self) -> str:
|
| 759 |
+
return f'{self.padding}'
|
| 760 |
+
|
| 761 |
+
class ZeroPad3d(ConstantPad3d):
|
| 762 |
+
r"""Pads the input tensor boundaries with zero.
|
| 763 |
+
|
| 764 |
+
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
|
| 765 |
+
|
| 766 |
+
Args:
|
| 767 |
+
padding (int, tuple): the size of the padding. If is `int`, uses the same
|
| 768 |
+
padding in all boundaries. If a 6-`tuple`, uses
|
| 769 |
+
(:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
|
| 770 |
+
:math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
|
| 771 |
+
:math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
|
| 772 |
+
|
| 773 |
+
Shape:
|
| 774 |
+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
| 775 |
+
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
|
| 776 |
+
:math:`(C, D_{out}, H_{out}, W_{out})`, where
|
| 777 |
+
|
| 778 |
+
:math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
|
| 779 |
+
|
| 780 |
+
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
|
| 781 |
+
|
| 782 |
+
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
|
| 783 |
+
|
| 784 |
+
Examples::
|
| 785 |
+
|
| 786 |
+
>>> m = nn.ZeroPad3d(3)
|
| 787 |
+
>>> input = torch.randn(16, 3, 10, 20, 30)
|
| 788 |
+
>>> output = m(input)
|
| 789 |
+
>>> # using different paddings for different sides
|
| 790 |
+
>>> m = nn.ZeroPad3d((3, 3, 6, 6, 0, 1))
|
| 791 |
+
>>> output = m(input)
|
| 792 |
+
"""
|
| 793 |
+
|
| 794 |
+
padding: Tuple[int, int, int, int, int, int]
|
| 795 |
+
|
| 796 |
+
def __init__(self, padding: _size_6_t) -> None:
|
| 797 |
+
super().__init__(padding, 0.)
|
| 798 |
+
|
| 799 |
+
def extra_repr(self) -> str:
|
| 800 |
+
return f'{self.padding}'
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/pooling.py
ADDED
|
@@ -0,0 +1,1229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional
|
| 2 |
+
|
| 3 |
+
from torch import Tensor
|
| 4 |
+
from .module import Module
|
| 5 |
+
from .utils import _single, _pair, _triple
|
| 6 |
+
from .. import functional as F
|
| 7 |
+
|
| 8 |
+
from ..common_types import (_size_any_t, _size_1_t, _size_2_t, _size_3_t,
|
| 9 |
+
_ratio_3_t, _ratio_2_t, _size_any_opt_t, _size_2_opt_t, _size_3_opt_t)
|
| 10 |
+
|
| 11 |
+
__all__ = ['MaxPool1d', 'MaxPool2d', 'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d',
|
| 12 |
+
'AvgPool1d', 'AvgPool2d', 'AvgPool3d', 'FractionalMaxPool2d', 'FractionalMaxPool3d', 'LPPool1d',
|
| 13 |
+
'LPPool2d', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d', 'AdaptiveAvgPool1d',
|
| 14 |
+
'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d']
|
| 15 |
+
|
| 16 |
+
class _MaxPoolNd(Module):
|
| 17 |
+
__constants__ = ['kernel_size', 'stride', 'padding', 'dilation',
|
| 18 |
+
'return_indices', 'ceil_mode']
|
| 19 |
+
return_indices: bool
|
| 20 |
+
ceil_mode: bool
|
| 21 |
+
|
| 22 |
+
def __init__(self, kernel_size: _size_any_t, stride: Optional[_size_any_t] = None,
|
| 23 |
+
padding: _size_any_t = 0, dilation: _size_any_t = 1,
|
| 24 |
+
return_indices: bool = False, ceil_mode: bool = False) -> None:
|
| 25 |
+
super().__init__()
|
| 26 |
+
self.kernel_size = kernel_size
|
| 27 |
+
self.stride = stride if (stride is not None) else kernel_size
|
| 28 |
+
self.padding = padding
|
| 29 |
+
self.dilation = dilation
|
| 30 |
+
self.return_indices = return_indices
|
| 31 |
+
self.ceil_mode = ceil_mode
|
| 32 |
+
|
| 33 |
+
def extra_repr(self) -> str:
|
| 34 |
+
return 'kernel_size={kernel_size}, stride={stride}, padding={padding}' \
|
| 35 |
+
', dilation={dilation}, ceil_mode={ceil_mode}'.format(**self.__dict__)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class MaxPool1d(_MaxPoolNd):
|
| 39 |
+
r"""Applies a 1D max pooling over an input signal composed of several input planes.
|
| 40 |
+
|
| 41 |
+
In the simplest case, the output value of the layer with input size :math:`(N, C, L)`
|
| 42 |
+
and output :math:`(N, C, L_{out})` can be precisely described as:
|
| 43 |
+
|
| 44 |
+
.. math::
|
| 45 |
+
out(N_i, C_j, k) = \max_{m=0, \ldots, \text{kernel\_size} - 1}
|
| 46 |
+
input(N_i, C_j, stride \times k + m)
|
| 47 |
+
|
| 48 |
+
If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
|
| 49 |
+
for :attr:`padding` number of points. :attr:`dilation` is the stride between the elements within the
|
| 50 |
+
sliding window. This `link`_ has a nice visualization of the pooling parameters.
|
| 51 |
+
|
| 52 |
+
Note:
|
| 53 |
+
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
|
| 54 |
+
or the input. Sliding windows that would start in the right padded region are ignored.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
kernel_size: The size of the sliding window, must be > 0.
|
| 58 |
+
stride: The stride of the sliding window, must be > 0. Default value is :attr:`kernel_size`.
|
| 59 |
+
padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
|
| 60 |
+
dilation: The stride between elements within a sliding window, must be > 0.
|
| 61 |
+
return_indices: If ``True``, will return the argmax along with the max values.
|
| 62 |
+
Useful for :class:`torch.nn.MaxUnpool1d` later
|
| 63 |
+
ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
|
| 64 |
+
ensures that every element in the input tensor is covered by a sliding window.
|
| 65 |
+
|
| 66 |
+
Shape:
|
| 67 |
+
- Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
|
| 68 |
+
- Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
|
| 69 |
+
|
| 70 |
+
.. math::
|
| 71 |
+
L_{out} = \left\lfloor \frac{L_{in} + 2 \times \text{padding} - \text{dilation}
|
| 72 |
+
\times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
|
| 73 |
+
|
| 74 |
+
Examples::
|
| 75 |
+
|
| 76 |
+
>>> # pool of size=3, stride=2
|
| 77 |
+
>>> m = nn.MaxPool1d(3, stride=2)
|
| 78 |
+
>>> input = torch.randn(20, 16, 50)
|
| 79 |
+
>>> output = m(input)
|
| 80 |
+
|
| 81 |
+
.. _link:
|
| 82 |
+
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
kernel_size: _size_1_t
|
| 86 |
+
stride: _size_1_t
|
| 87 |
+
padding: _size_1_t
|
| 88 |
+
dilation: _size_1_t
|
| 89 |
+
|
| 90 |
+
def forward(self, input: Tensor):
|
| 91 |
+
return F.max_pool1d(input, self.kernel_size, self.stride,
|
| 92 |
+
self.padding, self.dilation, ceil_mode=self.ceil_mode,
|
| 93 |
+
return_indices=self.return_indices)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class MaxPool2d(_MaxPoolNd):
|
| 97 |
+
r"""Applies a 2D max pooling over an input signal composed of several input planes.
|
| 98 |
+
|
| 99 |
+
In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
|
| 100 |
+
output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
|
| 101 |
+
can be precisely described as:
|
| 102 |
+
|
| 103 |
+
.. math::
|
| 104 |
+
\begin{aligned}
|
| 105 |
+
out(N_i, C_j, h, w) ={} & \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
|
| 106 |
+
& \text{input}(N_i, C_j, \text{stride[0]} \times h + m,
|
| 107 |
+
\text{stride[1]} \times w + n)
|
| 108 |
+
\end{aligned}
|
| 109 |
+
|
| 110 |
+
If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
|
| 111 |
+
for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
|
| 112 |
+
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
|
| 113 |
+
|
| 114 |
+
Note:
|
| 115 |
+
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
|
| 116 |
+
or the input. Sliding windows that would start in the right padded region are ignored.
|
| 117 |
+
|
| 118 |
+
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
|
| 119 |
+
|
| 120 |
+
- a single ``int`` -- in which case the same value is used for the height and width dimension
|
| 121 |
+
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
|
| 122 |
+
and the second `int` for the width dimension
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
kernel_size: the size of the window to take a max over
|
| 126 |
+
stride: the stride of the window. Default value is :attr:`kernel_size`
|
| 127 |
+
padding: Implicit negative infinity padding to be added on both sides
|
| 128 |
+
dilation: a parameter that controls the stride of elements in the window
|
| 129 |
+
return_indices: if ``True``, will return the max indices along with the outputs.
|
| 130 |
+
Useful for :class:`torch.nn.MaxUnpool2d` later
|
| 131 |
+
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
|
| 132 |
+
|
| 133 |
+
Shape:
|
| 134 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`
|
| 135 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
|
| 136 |
+
|
| 137 |
+
.. math::
|
| 138 |
+
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]}
|
| 139 |
+
\times (\text{kernel\_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor
|
| 140 |
+
|
| 141 |
+
.. math::
|
| 142 |
+
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]}
|
| 143 |
+
\times (\text{kernel\_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor
|
| 144 |
+
|
| 145 |
+
Examples::
|
| 146 |
+
|
| 147 |
+
>>> # pool of square window of size=3, stride=2
|
| 148 |
+
>>> m = nn.MaxPool2d(3, stride=2)
|
| 149 |
+
>>> # pool of non-square window
|
| 150 |
+
>>> m = nn.MaxPool2d((3, 2), stride=(2, 1))
|
| 151 |
+
>>> input = torch.randn(20, 16, 50, 32)
|
| 152 |
+
>>> output = m(input)
|
| 153 |
+
|
| 154 |
+
.. _link:
|
| 155 |
+
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
|
| 156 |
+
"""
|
| 157 |
+
|
| 158 |
+
kernel_size: _size_2_t
|
| 159 |
+
stride: _size_2_t
|
| 160 |
+
padding: _size_2_t
|
| 161 |
+
dilation: _size_2_t
|
| 162 |
+
|
| 163 |
+
def forward(self, input: Tensor):
|
| 164 |
+
return F.max_pool2d(input, self.kernel_size, self.stride,
|
| 165 |
+
self.padding, self.dilation, ceil_mode=self.ceil_mode,
|
| 166 |
+
return_indices=self.return_indices)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
class MaxPool3d(_MaxPoolNd):
|
| 170 |
+
r"""Applies a 3D max pooling over an input signal composed of several input planes.
|
| 171 |
+
|
| 172 |
+
In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
|
| 173 |
+
output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
|
| 174 |
+
can be precisely described as:
|
| 175 |
+
|
| 176 |
+
.. math::
|
| 177 |
+
\begin{aligned}
|
| 178 |
+
\text{out}(N_i, C_j, d, h, w) ={} & \max_{k=0, \ldots, kD-1} \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
|
| 179 |
+
& \text{input}(N_i, C_j, \text{stride[0]} \times d + k,
|
| 180 |
+
\text{stride[1]} \times h + m, \text{stride[2]} \times w + n)
|
| 181 |
+
\end{aligned}
|
| 182 |
+
|
| 183 |
+
If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
|
| 184 |
+
for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
|
| 185 |
+
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
|
| 186 |
+
|
| 187 |
+
Note:
|
| 188 |
+
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
|
| 189 |
+
or the input. Sliding windows that would start in the right padded region are ignored.
|
| 190 |
+
|
| 191 |
+
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
|
| 192 |
+
|
| 193 |
+
- a single ``int`` -- in which case the same value is used for the depth, height and width dimension
|
| 194 |
+
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
|
| 195 |
+
the second `int` for the height dimension and the third `int` for the width dimension
|
| 196 |
+
|
| 197 |
+
Args:
|
| 198 |
+
kernel_size: the size of the window to take a max over
|
| 199 |
+
stride: the stride of the window. Default value is :attr:`kernel_size`
|
| 200 |
+
padding: Implicit negative infinity padding to be added on all three sides
|
| 201 |
+
dilation: a parameter that controls the stride of elements in the window
|
| 202 |
+
return_indices: if ``True``, will return the max indices along with the outputs.
|
| 203 |
+
Useful for :class:`torch.nn.MaxUnpool3d` later
|
| 204 |
+
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
|
| 205 |
+
|
| 206 |
+
Shape:
|
| 207 |
+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
| 208 |
+
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where
|
| 209 |
+
|
| 210 |
+
.. math::
|
| 211 |
+
D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times
|
| 212 |
+
(\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
|
| 213 |
+
|
| 214 |
+
.. math::
|
| 215 |
+
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times
|
| 216 |
+
(\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
|
| 217 |
+
|
| 218 |
+
.. math::
|
| 219 |
+
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] \times
|
| 220 |
+
(\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
|
| 221 |
+
|
| 222 |
+
Examples::
|
| 223 |
+
|
| 224 |
+
>>> # pool of square window of size=3, stride=2
|
| 225 |
+
>>> m = nn.MaxPool3d(3, stride=2)
|
| 226 |
+
>>> # pool of non-square window
|
| 227 |
+
>>> m = nn.MaxPool3d((3, 2, 2), stride=(2, 1, 2))
|
| 228 |
+
>>> input = torch.randn(20, 16, 50, 44, 31)
|
| 229 |
+
>>> output = m(input)
|
| 230 |
+
|
| 231 |
+
.. _link:
|
| 232 |
+
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
|
| 233 |
+
""" # noqa: E501
|
| 234 |
+
|
| 235 |
+
kernel_size: _size_3_t
|
| 236 |
+
stride: _size_3_t
|
| 237 |
+
padding: _size_3_t
|
| 238 |
+
dilation: _size_3_t
|
| 239 |
+
|
| 240 |
+
def forward(self, input: Tensor):
|
| 241 |
+
return F.max_pool3d(input, self.kernel_size, self.stride,
|
| 242 |
+
self.padding, self.dilation, ceil_mode=self.ceil_mode,
|
| 243 |
+
return_indices=self.return_indices)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
class _MaxUnpoolNd(Module):
|
| 247 |
+
|
| 248 |
+
def extra_repr(self) -> str:
|
| 249 |
+
return f'kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}'
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
class MaxUnpool1d(_MaxUnpoolNd):
|
| 253 |
+
r"""Computes a partial inverse of :class:`MaxPool1d`.
|
| 254 |
+
|
| 255 |
+
:class:`MaxPool1d` is not fully invertible, since the non-maximal values are lost.
|
| 256 |
+
|
| 257 |
+
:class:`MaxUnpool1d` takes in as input the output of :class:`MaxPool1d`
|
| 258 |
+
including the indices of the maximal values and computes a partial inverse
|
| 259 |
+
in which all non-maximal values are set to zero.
|
| 260 |
+
|
| 261 |
+
Note:
|
| 262 |
+
This operation may behave nondeterministically when the input indices has repeat values.
|
| 263 |
+
See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
|
| 264 |
+
|
| 265 |
+
.. note:: :class:`MaxPool1d` can map several input sizes to the same output
|
| 266 |
+
sizes. Hence, the inversion process can get ambiguous.
|
| 267 |
+
To accommodate this, you can provide the needed output size
|
| 268 |
+
as an additional argument :attr:`output_size` in the forward call.
|
| 269 |
+
See the Inputs and Example below.
|
| 270 |
+
|
| 271 |
+
Args:
|
| 272 |
+
kernel_size (int or tuple): Size of the max pooling window.
|
| 273 |
+
stride (int or tuple): Stride of the max pooling window.
|
| 274 |
+
It is set to :attr:`kernel_size` by default.
|
| 275 |
+
padding (int or tuple): Padding that was added to the input
|
| 276 |
+
|
| 277 |
+
Inputs:
|
| 278 |
+
- `input`: the input Tensor to invert
|
| 279 |
+
- `indices`: the indices given out by :class:`~torch.nn.MaxPool1d`
|
| 280 |
+
- `output_size` (optional): the targeted output size
|
| 281 |
+
|
| 282 |
+
Shape:
|
| 283 |
+
- Input: :math:`(N, C, H_{in})` or :math:`(C, H_{in})`.
|
| 284 |
+
- Output: :math:`(N, C, H_{out})` or :math:`(C, H_{out})`, where
|
| 285 |
+
|
| 286 |
+
.. math::
|
| 287 |
+
H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{kernel\_size}[0]
|
| 288 |
+
|
| 289 |
+
or as given by :attr:`output_size` in the call operator
|
| 290 |
+
|
| 291 |
+
Example::
|
| 292 |
+
|
| 293 |
+
>>> # xdoctest: +IGNORE_WANT("do other tests modify the global state?")
|
| 294 |
+
>>> pool = nn.MaxPool1d(2, stride=2, return_indices=True)
|
| 295 |
+
>>> unpool = nn.MaxUnpool1d(2, stride=2)
|
| 296 |
+
>>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8]]])
|
| 297 |
+
>>> output, indices = pool(input)
|
| 298 |
+
>>> unpool(output, indices)
|
| 299 |
+
tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
|
| 300 |
+
|
| 301 |
+
>>> # Example showcasing the use of output_size
|
| 302 |
+
>>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8, 9]]])
|
| 303 |
+
>>> output, indices = pool(input)
|
| 304 |
+
>>> unpool(output, indices, output_size=input.size())
|
| 305 |
+
tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8., 0.]]])
|
| 306 |
+
|
| 307 |
+
>>> unpool(output, indices)
|
| 308 |
+
tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
|
| 309 |
+
"""
|
| 310 |
+
|
| 311 |
+
kernel_size: _size_1_t
|
| 312 |
+
stride: _size_1_t
|
| 313 |
+
padding: _size_1_t
|
| 314 |
+
|
| 315 |
+
def __init__(self, kernel_size: _size_1_t, stride: Optional[_size_1_t] = None, padding: _size_1_t = 0) -> None:
|
| 316 |
+
super().__init__()
|
| 317 |
+
self.kernel_size = _single(kernel_size)
|
| 318 |
+
self.stride = _single(stride if (stride is not None) else kernel_size)
|
| 319 |
+
self.padding = _single(padding)
|
| 320 |
+
|
| 321 |
+
def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
|
| 322 |
+
return F.max_unpool1d(input, indices, self.kernel_size, self.stride,
|
| 323 |
+
self.padding, output_size)
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
class MaxUnpool2d(_MaxUnpoolNd):
|
| 327 |
+
r"""Computes a partial inverse of :class:`MaxPool2d`.
|
| 328 |
+
|
| 329 |
+
:class:`MaxPool2d` is not fully invertible, since the non-maximal values are lost.
|
| 330 |
+
|
| 331 |
+
:class:`MaxUnpool2d` takes in as input the output of :class:`MaxPool2d`
|
| 332 |
+
including the indices of the maximal values and computes a partial inverse
|
| 333 |
+
in which all non-maximal values are set to zero.
|
| 334 |
+
|
| 335 |
+
Note:
|
| 336 |
+
This operation may behave nondeterministically when the input indices has repeat values.
|
| 337 |
+
See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
|
| 338 |
+
|
| 339 |
+
.. note:: :class:`MaxPool2d` can map several input sizes to the same output
|
| 340 |
+
sizes. Hence, the inversion process can get ambiguous.
|
| 341 |
+
To accommodate this, you can provide the needed output size
|
| 342 |
+
as an additional argument :attr:`output_size` in the forward call.
|
| 343 |
+
See the Inputs and Example below.
|
| 344 |
+
|
| 345 |
+
Args:
|
| 346 |
+
kernel_size (int or tuple): Size of the max pooling window.
|
| 347 |
+
stride (int or tuple): Stride of the max pooling window.
|
| 348 |
+
It is set to :attr:`kernel_size` by default.
|
| 349 |
+
padding (int or tuple): Padding that was added to the input
|
| 350 |
+
|
| 351 |
+
Inputs:
|
| 352 |
+
- `input`: the input Tensor to invert
|
| 353 |
+
- `indices`: the indices given out by :class:`~torch.nn.MaxPool2d`
|
| 354 |
+
- `output_size` (optional): the targeted output size
|
| 355 |
+
|
| 356 |
+
Shape:
|
| 357 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
| 358 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
|
| 359 |
+
|
| 360 |
+
.. math::
|
| 361 |
+
H_{out} = (H_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
|
| 362 |
+
|
| 363 |
+
.. math::
|
| 364 |
+
W_{out} = (W_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
|
| 365 |
+
|
| 366 |
+
or as given by :attr:`output_size` in the call operator
|
| 367 |
+
|
| 368 |
+
Example::
|
| 369 |
+
|
| 370 |
+
>>> pool = nn.MaxPool2d(2, stride=2, return_indices=True)
|
| 371 |
+
>>> unpool = nn.MaxUnpool2d(2, stride=2)
|
| 372 |
+
>>> input = torch.tensor([[[[ 1., 2., 3., 4.],
|
| 373 |
+
[ 5., 6., 7., 8.],
|
| 374 |
+
[ 9., 10., 11., 12.],
|
| 375 |
+
[13., 14., 15., 16.]]]])
|
| 376 |
+
>>> output, indices = pool(input)
|
| 377 |
+
>>> unpool(output, indices)
|
| 378 |
+
tensor([[[[ 0., 0., 0., 0.],
|
| 379 |
+
[ 0., 6., 0., 8.],
|
| 380 |
+
[ 0., 0., 0., 0.],
|
| 381 |
+
[ 0., 14., 0., 16.]]]])
|
| 382 |
+
>>> # Now using output_size to resolve an ambiguous size for the inverse
|
| 383 |
+
>>> input = torch.torch.tensor([[[[ 1., 2., 3., 4., 5.],
|
| 384 |
+
[ 6., 7., 8., 9., 10.],
|
| 385 |
+
[11., 12., 13., 14., 15.],
|
| 386 |
+
[16., 17., 18., 19., 20.]]]])
|
| 387 |
+
>>> output, indices = pool(input)
|
| 388 |
+
>>> # This call will not work without specifying output_size
|
| 389 |
+
>>> unpool(output, indices, output_size=input.size())
|
| 390 |
+
tensor([[[[ 0., 0., 0., 0., 0.],
|
| 391 |
+
[ 0., 7., 0., 9., 0.],
|
| 392 |
+
[ 0., 0., 0., 0., 0.],
|
| 393 |
+
[ 0., 17., 0., 19., 0.]]]])
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
"""
|
| 397 |
+
|
| 398 |
+
kernel_size: _size_2_t
|
| 399 |
+
stride: _size_2_t
|
| 400 |
+
padding: _size_2_t
|
| 401 |
+
|
| 402 |
+
def __init__(self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = None, padding: _size_2_t = 0) -> None:
|
| 403 |
+
super().__init__()
|
| 404 |
+
self.kernel_size = _pair(kernel_size)
|
| 405 |
+
self.stride = _pair(stride if (stride is not None) else kernel_size)
|
| 406 |
+
self.padding = _pair(padding)
|
| 407 |
+
|
| 408 |
+
def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
|
| 409 |
+
return F.max_unpool2d(input, indices, self.kernel_size, self.stride,
|
| 410 |
+
self.padding, output_size)
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
class MaxUnpool3d(_MaxUnpoolNd):
|
| 414 |
+
r"""Computes a partial inverse of :class:`MaxPool3d`.
|
| 415 |
+
|
| 416 |
+
:class:`MaxPool3d` is not fully invertible, since the non-maximal values are lost.
|
| 417 |
+
:class:`MaxUnpool3d` takes in as input the output of :class:`MaxPool3d`
|
| 418 |
+
including the indices of the maximal values and computes a partial inverse
|
| 419 |
+
in which all non-maximal values are set to zero.
|
| 420 |
+
|
| 421 |
+
Note:
|
| 422 |
+
This operation may behave nondeterministically when the input indices has repeat values.
|
| 423 |
+
See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
|
| 424 |
+
|
| 425 |
+
.. note:: :class:`MaxPool3d` can map several input sizes to the same output
|
| 426 |
+
sizes. Hence, the inversion process can get ambiguous.
|
| 427 |
+
To accommodate this, you can provide the needed output size
|
| 428 |
+
as an additional argument :attr:`output_size` in the forward call.
|
| 429 |
+
See the Inputs section below.
|
| 430 |
+
|
| 431 |
+
Args:
|
| 432 |
+
kernel_size (int or tuple): Size of the max pooling window.
|
| 433 |
+
stride (int or tuple): Stride of the max pooling window.
|
| 434 |
+
It is set to :attr:`kernel_size` by default.
|
| 435 |
+
padding (int or tuple): Padding that was added to the input
|
| 436 |
+
|
| 437 |
+
Inputs:
|
| 438 |
+
- `input`: the input Tensor to invert
|
| 439 |
+
- `indices`: the indices given out by :class:`~torch.nn.MaxPool3d`
|
| 440 |
+
- `output_size` (optional): the targeted output size
|
| 441 |
+
|
| 442 |
+
Shape:
|
| 443 |
+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
| 444 |
+
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where
|
| 445 |
+
|
| 446 |
+
.. math::
|
| 447 |
+
D_{out} = (D_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
|
| 448 |
+
|
| 449 |
+
.. math::
|
| 450 |
+
H_{out} = (H_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
|
| 451 |
+
|
| 452 |
+
.. math::
|
| 453 |
+
W_{out} = (W_{in} - 1) \times \text{stride[2]} - 2 \times \text{padding[2]} + \text{kernel\_size[2]}
|
| 454 |
+
|
| 455 |
+
or as given by :attr:`output_size` in the call operator
|
| 456 |
+
|
| 457 |
+
Example::
|
| 458 |
+
|
| 459 |
+
>>> # pool of square window of size=3, stride=2
|
| 460 |
+
>>> pool = nn.MaxPool3d(3, stride=2, return_indices=True)
|
| 461 |
+
>>> unpool = nn.MaxUnpool3d(3, stride=2)
|
| 462 |
+
>>> output, indices = pool(torch.randn(20, 16, 51, 33, 15))
|
| 463 |
+
>>> unpooled_output = unpool(output, indices)
|
| 464 |
+
>>> unpooled_output.size()
|
| 465 |
+
torch.Size([20, 16, 51, 33, 15])
|
| 466 |
+
"""
|
| 467 |
+
|
| 468 |
+
kernel_size: _size_3_t
|
| 469 |
+
stride: _size_3_t
|
| 470 |
+
padding: _size_3_t
|
| 471 |
+
|
| 472 |
+
def __init__(self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = None, padding: _size_3_t = 0) -> None:
|
| 473 |
+
super().__init__()
|
| 474 |
+
self.kernel_size = _triple(kernel_size)
|
| 475 |
+
self.stride = _triple(stride if (stride is not None) else kernel_size)
|
| 476 |
+
self.padding = _triple(padding)
|
| 477 |
+
|
| 478 |
+
def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
|
| 479 |
+
return F.max_unpool3d(input, indices, self.kernel_size, self.stride,
|
| 480 |
+
self.padding, output_size)
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
class _AvgPoolNd(Module):
|
| 484 |
+
__constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad']
|
| 485 |
+
|
| 486 |
+
def extra_repr(self) -> str:
|
| 487 |
+
return f'kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}'
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
class AvgPool1d(_AvgPoolNd):
|
| 491 |
+
r"""Applies a 1D average pooling over an input signal composed of several input planes.
|
| 492 |
+
|
| 493 |
+
In the simplest case, the output value of the layer with input size :math:`(N, C, L)`,
|
| 494 |
+
output :math:`(N, C, L_{out})` and :attr:`kernel_size` :math:`k`
|
| 495 |
+
can be precisely described as:
|
| 496 |
+
|
| 497 |
+
.. math::
|
| 498 |
+
|
| 499 |
+
\text{out}(N_i, C_j, l) = \frac{1}{k} \sum_{m=0}^{k-1}
|
| 500 |
+
\text{input}(N_i, C_j, \text{stride} \times l + m)
|
| 501 |
+
|
| 502 |
+
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
|
| 503 |
+
for :attr:`padding` number of points.
|
| 504 |
+
|
| 505 |
+
Note:
|
| 506 |
+
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
|
| 507 |
+
or the input. Sliding windows that would start in the right padded region are ignored.
|
| 508 |
+
|
| 509 |
+
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can each be
|
| 510 |
+
an ``int`` or a one-element tuple.
|
| 511 |
+
|
| 512 |
+
Args:
|
| 513 |
+
kernel_size: the size of the window
|
| 514 |
+
stride: the stride of the window. Default value is :attr:`kernel_size`
|
| 515 |
+
padding: implicit zero padding to be added on both sides
|
| 516 |
+
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
|
| 517 |
+
count_include_pad: when True, will include the zero-padding in the averaging calculation
|
| 518 |
+
|
| 519 |
+
Shape:
|
| 520 |
+
- Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
|
| 521 |
+
- Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
|
| 522 |
+
|
| 523 |
+
.. math::
|
| 524 |
+
L_{out} = \left\lfloor \frac{L_{in} +
|
| 525 |
+
2 \times \text{padding} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
|
| 526 |
+
|
| 527 |
+
Examples::
|
| 528 |
+
|
| 529 |
+
>>> # pool with window of size=3, stride=2
|
| 530 |
+
>>> m = nn.AvgPool1d(3, stride=2)
|
| 531 |
+
>>> m(torch.tensor([[[1., 2, 3, 4, 5, 6, 7]]]))
|
| 532 |
+
tensor([[[2., 4., 6.]]])
|
| 533 |
+
"""
|
| 534 |
+
|
| 535 |
+
kernel_size: _size_1_t
|
| 536 |
+
stride: _size_1_t
|
| 537 |
+
padding: _size_1_t
|
| 538 |
+
ceil_mode: bool
|
| 539 |
+
count_include_pad: bool
|
| 540 |
+
|
| 541 |
+
def __init__(self, kernel_size: _size_1_t, stride: _size_1_t = None, padding: _size_1_t = 0, ceil_mode: bool = False,
|
| 542 |
+
count_include_pad: bool = True) -> None:
|
| 543 |
+
super().__init__()
|
| 544 |
+
self.kernel_size = _single(kernel_size)
|
| 545 |
+
self.stride = _single(stride if stride is not None else kernel_size)
|
| 546 |
+
self.padding = _single(padding)
|
| 547 |
+
self.ceil_mode = ceil_mode
|
| 548 |
+
self.count_include_pad = count_include_pad
|
| 549 |
+
|
| 550 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 551 |
+
return F.avg_pool1d(
|
| 552 |
+
input, self.kernel_size, self.stride, self.padding, self.ceil_mode,
|
| 553 |
+
self.count_include_pad)
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
class AvgPool2d(_AvgPoolNd):
|
| 557 |
+
r"""Applies a 2D average pooling over an input signal composed of several input planes.
|
| 558 |
+
|
| 559 |
+
In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
|
| 560 |
+
output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
|
| 561 |
+
can be precisely described as:
|
| 562 |
+
|
| 563 |
+
.. math::
|
| 564 |
+
|
| 565 |
+
out(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1}
|
| 566 |
+
input(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n)
|
| 567 |
+
|
| 568 |
+
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
|
| 569 |
+
for :attr:`padding` number of points.
|
| 570 |
+
|
| 571 |
+
Note:
|
| 572 |
+
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
|
| 573 |
+
or the input. Sliding windows that would start in the right padded region are ignored.
|
| 574 |
+
|
| 575 |
+
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can either be:
|
| 576 |
+
|
| 577 |
+
- a single ``int`` -- in which case the same value is used for the height and width dimension
|
| 578 |
+
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
|
| 579 |
+
and the second `int` for the width dimension
|
| 580 |
+
|
| 581 |
+
Args:
|
| 582 |
+
kernel_size: the size of the window
|
| 583 |
+
stride: the stride of the window. Default value is :attr:`kernel_size`
|
| 584 |
+
padding: implicit zero padding to be added on both sides
|
| 585 |
+
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
|
| 586 |
+
count_include_pad: when True, will include the zero-padding in the averaging calculation
|
| 587 |
+
divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used.
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
Shape:
|
| 591 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
| 592 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
|
| 593 |
+
|
| 594 |
+
.. math::
|
| 595 |
+
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] -
|
| 596 |
+
\text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
|
| 597 |
+
|
| 598 |
+
.. math::
|
| 599 |
+
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] -
|
| 600 |
+
\text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
|
| 601 |
+
|
| 602 |
+
Examples::
|
| 603 |
+
|
| 604 |
+
>>> # pool of square window of size=3, stride=2
|
| 605 |
+
>>> m = nn.AvgPool2d(3, stride=2)
|
| 606 |
+
>>> # pool of non-square window
|
| 607 |
+
>>> m = nn.AvgPool2d((3, 2), stride=(2, 1))
|
| 608 |
+
>>> input = torch.randn(20, 16, 50, 32)
|
| 609 |
+
>>> output = m(input)
|
| 610 |
+
"""
|
| 611 |
+
|
| 612 |
+
__constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
|
| 613 |
+
|
| 614 |
+
kernel_size: _size_2_t
|
| 615 |
+
stride: _size_2_t
|
| 616 |
+
padding: _size_2_t
|
| 617 |
+
ceil_mode: bool
|
| 618 |
+
count_include_pad: bool
|
| 619 |
+
|
| 620 |
+
def __init__(self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = None, padding: _size_2_t = 0,
|
| 621 |
+
ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> None:
|
| 622 |
+
super().__init__()
|
| 623 |
+
self.kernel_size = kernel_size
|
| 624 |
+
self.stride = stride if (stride is not None) else kernel_size
|
| 625 |
+
self.padding = padding
|
| 626 |
+
self.ceil_mode = ceil_mode
|
| 627 |
+
self.count_include_pad = count_include_pad
|
| 628 |
+
self.divisor_override = divisor_override
|
| 629 |
+
|
| 630 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 631 |
+
return F.avg_pool2d(input, self.kernel_size, self.stride,
|
| 632 |
+
self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
|
| 633 |
+
|
| 634 |
+
|
| 635 |
+
class AvgPool3d(_AvgPoolNd):
|
| 636 |
+
r"""Applies a 3D average pooling over an input signal composed of several input planes.
|
| 637 |
+
|
| 638 |
+
In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
|
| 639 |
+
output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
|
| 640 |
+
can be precisely described as:
|
| 641 |
+
|
| 642 |
+
.. math::
|
| 643 |
+
\begin{aligned}
|
| 644 |
+
\text{out}(N_i, C_j, d, h, w) ={} & \sum_{k=0}^{kD-1} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1} \\
|
| 645 |
+
& \frac{\text{input}(N_i, C_j, \text{stride}[0] \times d + k,
|
| 646 |
+
\text{stride}[1] \times h + m, \text{stride}[2] \times w + n)}
|
| 647 |
+
{kD \times kH \times kW}
|
| 648 |
+
\end{aligned}
|
| 649 |
+
|
| 650 |
+
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on all three sides
|
| 651 |
+
for :attr:`padding` number of points.
|
| 652 |
+
|
| 653 |
+
Note:
|
| 654 |
+
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
|
| 655 |
+
or the input. Sliding windows that would start in the right padded region are ignored.
|
| 656 |
+
|
| 657 |
+
The parameters :attr:`kernel_size`, :attr:`stride` can either be:
|
| 658 |
+
|
| 659 |
+
- a single ``int`` -- in which case the same value is used for the depth, height and width dimension
|
| 660 |
+
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
|
| 661 |
+
the second `int` for the height dimension and the third `int` for the width dimension
|
| 662 |
+
|
| 663 |
+
Args:
|
| 664 |
+
kernel_size: the size of the window
|
| 665 |
+
stride: the stride of the window. Default value is :attr:`kernel_size`
|
| 666 |
+
padding: implicit zero padding to be added on all three sides
|
| 667 |
+
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
|
| 668 |
+
count_include_pad: when True, will include the zero-padding in the averaging calculation
|
| 669 |
+
divisor_override: if specified, it will be used as divisor, otherwise :attr:`kernel_size` will be used
|
| 670 |
+
|
| 671 |
+
Shape:
|
| 672 |
+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
| 673 |
+
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
|
| 674 |
+
:math:`(C, D_{out}, H_{out}, W_{out})`, where
|
| 675 |
+
|
| 676 |
+
.. math::
|
| 677 |
+
D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] -
|
| 678 |
+
\text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
|
| 679 |
+
|
| 680 |
+
.. math::
|
| 681 |
+
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] -
|
| 682 |
+
\text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
|
| 683 |
+
|
| 684 |
+
.. math::
|
| 685 |
+
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] -
|
| 686 |
+
\text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor
|
| 687 |
+
|
| 688 |
+
Examples::
|
| 689 |
+
|
| 690 |
+
>>> # pool of square window of size=3, stride=2
|
| 691 |
+
>>> m = nn.AvgPool3d(3, stride=2)
|
| 692 |
+
>>> # pool of non-square window
|
| 693 |
+
>>> m = nn.AvgPool3d((3, 2, 2), stride=(2, 1, 2))
|
| 694 |
+
>>> input = torch.randn(20, 16, 50, 44, 31)
|
| 695 |
+
>>> output = m(input)
|
| 696 |
+
"""
|
| 697 |
+
|
| 698 |
+
__constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
|
| 699 |
+
|
| 700 |
+
kernel_size: _size_3_t
|
| 701 |
+
stride: _size_3_t
|
| 702 |
+
padding: _size_3_t
|
| 703 |
+
ceil_mode: bool
|
| 704 |
+
count_include_pad: bool
|
| 705 |
+
|
| 706 |
+
def __init__(self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = None, padding: _size_3_t = 0,
|
| 707 |
+
ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> None:
|
| 708 |
+
super().__init__()
|
| 709 |
+
self.kernel_size = kernel_size
|
| 710 |
+
self.stride = stride if (stride is not None) else kernel_size
|
| 711 |
+
self.padding = padding
|
| 712 |
+
self.ceil_mode = ceil_mode
|
| 713 |
+
self.count_include_pad = count_include_pad
|
| 714 |
+
self.divisor_override = divisor_override
|
| 715 |
+
|
| 716 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 717 |
+
return F.avg_pool3d(input, self.kernel_size, self.stride,
|
| 718 |
+
self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
|
| 719 |
+
|
| 720 |
+
def __setstate__(self, d):
|
| 721 |
+
super().__setstate__(d)
|
| 722 |
+
self.__dict__.setdefault('padding', 0)
|
| 723 |
+
self.__dict__.setdefault('ceil_mode', False)
|
| 724 |
+
self.__dict__.setdefault('count_include_pad', True)
|
| 725 |
+
|
| 726 |
+
|
| 727 |
+
class FractionalMaxPool2d(Module):
|
| 728 |
+
r"""Applies a 2D fractional max pooling over an input signal composed of several input planes.
|
| 729 |
+
|
| 730 |
+
Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
|
| 731 |
+
|
| 732 |
+
The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic
|
| 733 |
+
step size determined by the target output size.
|
| 734 |
+
The number of output features is equal to the number of input planes.
|
| 735 |
+
|
| 736 |
+
.. note:: Exactly one of ``output_size`` or ``output_ratio`` must be defined.
|
| 737 |
+
|
| 738 |
+
Args:
|
| 739 |
+
kernel_size: the size of the window to take a max over.
|
| 740 |
+
Can be a single number k (for a square kernel of k x k) or a tuple `(kh, kw)`
|
| 741 |
+
output_size: the target output size of the image of the form `oH x oW`.
|
| 742 |
+
Can be a tuple `(oH, oW)` or a single number oH for a square image `oH x oH`
|
| 743 |
+
output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
|
| 744 |
+
This has to be a number or tuple in the range (0, 1)
|
| 745 |
+
return_indices: if ``True``, will return the indices along with the outputs.
|
| 746 |
+
Useful to pass to :meth:`nn.MaxUnpool2d`. Default: ``False``
|
| 747 |
+
|
| 748 |
+
Shape:
|
| 749 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
| 750 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
|
| 751 |
+
:math:`(H_{out}, W_{out})=\text{output\_size}` or
|
| 752 |
+
:math:`(H_{out}, W_{out})=\text{output\_ratio} \times (H_{in}, W_{in})`.
|
| 753 |
+
|
| 754 |
+
Examples:
|
| 755 |
+
>>> # pool of square window of size=3, and target output size 13x12
|
| 756 |
+
>>> m = nn.FractionalMaxPool2d(3, output_size=(13, 12))
|
| 757 |
+
>>> # pool of square window and target output size being half of input image size
|
| 758 |
+
>>> m = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
|
| 759 |
+
>>> input = torch.randn(20, 16, 50, 32)
|
| 760 |
+
>>> output = m(input)
|
| 761 |
+
|
| 762 |
+
.. _Fractional MaxPooling:
|
| 763 |
+
https://arxiv.org/abs/1412.6071
|
| 764 |
+
"""
|
| 765 |
+
|
| 766 |
+
__constants__ = ['kernel_size', 'return_indices', 'output_size',
|
| 767 |
+
'output_ratio']
|
| 768 |
+
|
| 769 |
+
kernel_size: _size_2_t
|
| 770 |
+
return_indices: bool
|
| 771 |
+
output_size: _size_2_t
|
| 772 |
+
output_ratio: _ratio_2_t
|
| 773 |
+
|
| 774 |
+
def __init__(self, kernel_size: _size_2_t, output_size: Optional[_size_2_t] = None,
|
| 775 |
+
output_ratio: Optional[_ratio_2_t] = None,
|
| 776 |
+
return_indices: bool = False, _random_samples=None) -> None:
|
| 777 |
+
super().__init__()
|
| 778 |
+
self.kernel_size = _pair(kernel_size)
|
| 779 |
+
self.return_indices = return_indices
|
| 780 |
+
self.register_buffer('_random_samples', _random_samples)
|
| 781 |
+
self.output_size = _pair(output_size) if output_size is not None else None
|
| 782 |
+
self.output_ratio = _pair(output_ratio) if output_ratio is not None else None
|
| 783 |
+
if output_size is None and output_ratio is None:
|
| 784 |
+
raise ValueError("FractionalMaxPool2d requires specifying either "
|
| 785 |
+
"an output size, or a pooling ratio")
|
| 786 |
+
if output_size is not None and output_ratio is not None:
|
| 787 |
+
raise ValueError("only one of output_size and output_ratio may be specified")
|
| 788 |
+
if self.output_ratio is not None:
|
| 789 |
+
if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1):
|
| 790 |
+
raise ValueError(f"output_ratio must be between 0 and 1 (got {output_ratio})")
|
| 791 |
+
|
| 792 |
+
def forward(self, input: Tensor):
|
| 793 |
+
return F.fractional_max_pool2d(
|
| 794 |
+
input, self.kernel_size, self.output_size, self.output_ratio,
|
| 795 |
+
self.return_indices,
|
| 796 |
+
_random_samples=self._random_samples)
|
| 797 |
+
|
| 798 |
+
|
| 799 |
+
class FractionalMaxPool3d(Module):
|
| 800 |
+
r"""Applies a 3D fractional max pooling over an input signal composed of several input planes.
|
| 801 |
+
|
| 802 |
+
Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
|
| 803 |
+
|
| 804 |
+
The max-pooling operation is applied in :math:`kT \times kH \times kW` regions by a stochastic
|
| 805 |
+
step size determined by the target output size.
|
| 806 |
+
The number of output features is equal to the number of input planes.
|
| 807 |
+
|
| 808 |
+
.. note:: Exactly one of ``output_size`` or ``output_ratio`` must be defined.
|
| 809 |
+
|
| 810 |
+
Args:
|
| 811 |
+
kernel_size: the size of the window to take a max over.
|
| 812 |
+
Can be a single number k (for a square kernel of k x k x k) or a tuple `(kt x kh x kw)`
|
| 813 |
+
output_size: the target output size of the image of the form `oT x oH x oW`.
|
| 814 |
+
Can be a tuple `(oT, oH, oW)` or a single number oH for a square image `oH x oH x oH`
|
| 815 |
+
output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
|
| 816 |
+
This has to be a number or tuple in the range (0, 1)
|
| 817 |
+
return_indices: if ``True``, will return the indices along with the outputs.
|
| 818 |
+
Useful to pass to :meth:`nn.MaxUnpool3d`. Default: ``False``
|
| 819 |
+
|
| 820 |
+
Shape:
|
| 821 |
+
- Input: :math:`(N, C, T_{in}, H_{in}, W_{in})` or :math:`(C, T_{in}, H_{in}, W_{in})`.
|
| 822 |
+
- Output: :math:`(N, C, T_{out}, H_{out}, W_{out})` or :math:`(C, T_{out}, H_{out}, W_{out})`, where
|
| 823 |
+
:math:`(T_{out}, H_{out}, W_{out})=\text{output\_size}` or
|
| 824 |
+
:math:`(T_{out}, H_{out}, W_{out})=\text{output\_ratio} \times (T_{in}, H_{in}, W_{in})`
|
| 825 |
+
|
| 826 |
+
Examples:
|
| 827 |
+
>>> # pool of cubic window of size=3, and target output size 13x12x11
|
| 828 |
+
>>> m = nn.FractionalMaxPool3d(3, output_size=(13, 12, 11))
|
| 829 |
+
>>> # pool of cubic window and target output size being half of input size
|
| 830 |
+
>>> m = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5))
|
| 831 |
+
>>> input = torch.randn(20, 16, 50, 32, 16)
|
| 832 |
+
>>> output = m(input)
|
| 833 |
+
|
| 834 |
+
.. _Fractional MaxPooling:
|
| 835 |
+
https://arxiv.org/abs/1412.6071
|
| 836 |
+
"""
|
| 837 |
+
|
| 838 |
+
__constants__ = ['kernel_size', 'return_indices', 'output_size',
|
| 839 |
+
'output_ratio']
|
| 840 |
+
kernel_size: _size_3_t
|
| 841 |
+
return_indices: bool
|
| 842 |
+
output_size: _size_3_t
|
| 843 |
+
output_ratio: _ratio_3_t
|
| 844 |
+
|
| 845 |
+
def __init__(self, kernel_size: _size_3_t, output_size: Optional[_size_3_t] = None,
|
| 846 |
+
output_ratio: Optional[_ratio_3_t] = None,
|
| 847 |
+
return_indices: bool = False, _random_samples=None) -> None:
|
| 848 |
+
super().__init__()
|
| 849 |
+
self.kernel_size = _triple(kernel_size)
|
| 850 |
+
self.return_indices = return_indices
|
| 851 |
+
self.register_buffer('_random_samples', _random_samples)
|
| 852 |
+
self.output_size = _triple(output_size) if output_size is not None else None
|
| 853 |
+
self.output_ratio = _triple(output_ratio) if output_ratio is not None else None
|
| 854 |
+
if output_size is None and output_ratio is None:
|
| 855 |
+
raise ValueError("FractionalMaxPool3d requires specifying either "
|
| 856 |
+
"an output size, or a pooling ratio")
|
| 857 |
+
if output_size is not None and output_ratio is not None:
|
| 858 |
+
raise ValueError("only one of output_size and output_ratio may be specified")
|
| 859 |
+
if self.output_ratio is not None:
|
| 860 |
+
if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1 and 0 < self.output_ratio[2] < 1):
|
| 861 |
+
raise ValueError(f"output_ratio must be between 0 and 1 (got {output_ratio})")
|
| 862 |
+
|
| 863 |
+
def forward(self, input: Tensor):
|
| 864 |
+
return F.fractional_max_pool3d(
|
| 865 |
+
input, self.kernel_size, self.output_size, self.output_ratio,
|
| 866 |
+
self.return_indices,
|
| 867 |
+
_random_samples=self._random_samples)
|
| 868 |
+
|
| 869 |
+
|
| 870 |
+
class _LPPoolNd(Module):
|
| 871 |
+
__constants__ = ['norm_type', 'kernel_size', 'stride', 'ceil_mode']
|
| 872 |
+
|
| 873 |
+
norm_type: float
|
| 874 |
+
ceil_mode: bool
|
| 875 |
+
|
| 876 |
+
def __init__(self, norm_type: float, kernel_size: _size_any_t, stride: Optional[_size_any_t] = None,
|
| 877 |
+
ceil_mode: bool = False) -> None:
|
| 878 |
+
super().__init__()
|
| 879 |
+
self.norm_type = norm_type
|
| 880 |
+
self.kernel_size = kernel_size
|
| 881 |
+
self.stride = stride
|
| 882 |
+
self.ceil_mode = ceil_mode
|
| 883 |
+
|
| 884 |
+
def extra_repr(self) -> str:
|
| 885 |
+
return 'norm_type={norm_type}, kernel_size={kernel_size}, stride={stride}, ' \
|
| 886 |
+
'ceil_mode={ceil_mode}'.format(**self.__dict__)
|
| 887 |
+
|
| 888 |
+
|
| 889 |
+
class LPPool1d(_LPPoolNd):
|
| 890 |
+
r"""Applies a 1D power-average pooling over an input signal composed of several input planes.
|
| 891 |
+
|
| 892 |
+
On each window, the function computed is:
|
| 893 |
+
|
| 894 |
+
.. math::
|
| 895 |
+
f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
|
| 896 |
+
|
| 897 |
+
- At p = :math:`\infty`, one gets Max Pooling
|
| 898 |
+
- At p = 1, one gets Sum Pooling (which is proportional to Average Pooling)
|
| 899 |
+
|
| 900 |
+
.. note:: If the sum to the power of `p` is zero, the gradient of this function is
|
| 901 |
+
not defined. This implementation will set the gradient to zero in this case.
|
| 902 |
+
|
| 903 |
+
Args:
|
| 904 |
+
kernel_size: a single int, the size of the window
|
| 905 |
+
stride: a single int, the stride of the window. Default value is :attr:`kernel_size`
|
| 906 |
+
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
|
| 907 |
+
|
| 908 |
+
Shape:
|
| 909 |
+
- Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
|
| 910 |
+
- Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
|
| 911 |
+
|
| 912 |
+
.. math::
|
| 913 |
+
L_{out} = \left\lfloor\frac{L_{in} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
|
| 914 |
+
|
| 915 |
+
Examples::
|
| 916 |
+
>>> # power-2 pool of window of length 3, with stride 2.
|
| 917 |
+
>>> m = nn.LPPool1d(2, 3, stride=2)
|
| 918 |
+
>>> input = torch.randn(20, 16, 50)
|
| 919 |
+
>>> output = m(input)
|
| 920 |
+
"""
|
| 921 |
+
|
| 922 |
+
kernel_size: _size_1_t
|
| 923 |
+
stride: _size_1_t
|
| 924 |
+
|
| 925 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 926 |
+
return F.lp_pool1d(input, float(self.norm_type), self.kernel_size,
|
| 927 |
+
self.stride, self.ceil_mode)
|
| 928 |
+
|
| 929 |
+
|
| 930 |
+
class LPPool2d(_LPPoolNd):
|
| 931 |
+
r"""Applies a 2D power-average pooling over an input signal composed of several input planes.
|
| 932 |
+
|
| 933 |
+
On each window, the function computed is:
|
| 934 |
+
|
| 935 |
+
.. math::
|
| 936 |
+
f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
|
| 937 |
+
|
| 938 |
+
- At p = :math:`\infty`, one gets Max Pooling
|
| 939 |
+
- At p = 1, one gets Sum Pooling (which is proportional to average pooling)
|
| 940 |
+
|
| 941 |
+
The parameters :attr:`kernel_size`, :attr:`stride` can either be:
|
| 942 |
+
|
| 943 |
+
- a single ``int`` -- in which case the same value is used for the height and width dimension
|
| 944 |
+
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
|
| 945 |
+
and the second `int` for the width dimension
|
| 946 |
+
|
| 947 |
+
.. note:: If the sum to the power of `p` is zero, the gradient of this function is
|
| 948 |
+
not defined. This implementation will set the gradient to zero in this case.
|
| 949 |
+
|
| 950 |
+
Args:
|
| 951 |
+
kernel_size: the size of the window
|
| 952 |
+
stride: the stride of the window. Default value is :attr:`kernel_size`
|
| 953 |
+
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
|
| 954 |
+
|
| 955 |
+
Shape:
|
| 956 |
+
- Input: :math:`(N, C, H_{in}, W_{in})`
|
| 957 |
+
- Output: :math:`(N, C, H_{out}, W_{out})`, where
|
| 958 |
+
|
| 959 |
+
.. math::
|
| 960 |
+
H_{out} = \left\lfloor\frac{H_{in} - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
|
| 961 |
+
|
| 962 |
+
.. math::
|
| 963 |
+
W_{out} = \left\lfloor\frac{W_{in} - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
|
| 964 |
+
|
| 965 |
+
Examples::
|
| 966 |
+
|
| 967 |
+
>>> # power-2 pool of square window of size=3, stride=2
|
| 968 |
+
>>> m = nn.LPPool2d(2, 3, stride=2)
|
| 969 |
+
>>> # pool of non-square window of power 1.2
|
| 970 |
+
>>> m = nn.LPPool2d(1.2, (3, 2), stride=(2, 1))
|
| 971 |
+
>>> input = torch.randn(20, 16, 50, 32)
|
| 972 |
+
>>> output = m(input)
|
| 973 |
+
|
| 974 |
+
"""
|
| 975 |
+
|
| 976 |
+
kernel_size: _size_2_t
|
| 977 |
+
stride: _size_2_t
|
| 978 |
+
|
| 979 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 980 |
+
return F.lp_pool2d(input, float(self.norm_type), self.kernel_size,
|
| 981 |
+
self.stride, self.ceil_mode)
|
| 982 |
+
|
| 983 |
+
|
| 984 |
+
class _AdaptiveMaxPoolNd(Module):
|
| 985 |
+
__constants__ = ['output_size', 'return_indices']
|
| 986 |
+
return_indices: bool
|
| 987 |
+
|
| 988 |
+
def __init__(self, output_size: _size_any_opt_t, return_indices: bool = False) -> None:
|
| 989 |
+
super().__init__()
|
| 990 |
+
self.output_size = output_size
|
| 991 |
+
self.return_indices = return_indices
|
| 992 |
+
|
| 993 |
+
def extra_repr(self) -> str:
|
| 994 |
+
return f'output_size={self.output_size}'
|
| 995 |
+
|
| 996 |
+
# FIXME (by @ssnl): Improve adaptive pooling docs: specify what the input and
|
| 997 |
+
# output shapes are, and how the operation computes output.
|
| 998 |
+
|
| 999 |
+
|
| 1000 |
+
class AdaptiveMaxPool1d(_AdaptiveMaxPoolNd):
|
| 1001 |
+
r"""Applies a 1D adaptive max pooling over an input signal composed of several input planes.
|
| 1002 |
+
|
| 1003 |
+
The output size is :math:`L_{out}`, for any input size.
|
| 1004 |
+
The number of output features is equal to the number of input planes.
|
| 1005 |
+
|
| 1006 |
+
Args:
|
| 1007 |
+
output_size: the target output size :math:`L_{out}`.
|
| 1008 |
+
return_indices: if ``True``, will return the indices along with the outputs.
|
| 1009 |
+
Useful to pass to nn.MaxUnpool1d. Default: ``False``
|
| 1010 |
+
|
| 1011 |
+
Shape:
|
| 1012 |
+
- Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
|
| 1013 |
+
- Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
|
| 1014 |
+
:math:`L_{out}=\text{output\_size}`.
|
| 1015 |
+
|
| 1016 |
+
Examples:
|
| 1017 |
+
>>> # target output size of 5
|
| 1018 |
+
>>> m = nn.AdaptiveMaxPool1d(5)
|
| 1019 |
+
>>> input = torch.randn(1, 64, 8)
|
| 1020 |
+
>>> output = m(input)
|
| 1021 |
+
|
| 1022 |
+
"""
|
| 1023 |
+
|
| 1024 |
+
output_size: _size_1_t
|
| 1025 |
+
|
| 1026 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 1027 |
+
return F.adaptive_max_pool1d(input, self.output_size, self.return_indices)
|
| 1028 |
+
|
| 1029 |
+
|
| 1030 |
+
class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd):
|
| 1031 |
+
r"""Applies a 2D adaptive max pooling over an input signal composed of several input planes.
|
| 1032 |
+
|
| 1033 |
+
The output is of size :math:`H_{out} \times W_{out}`, for any input size.
|
| 1034 |
+
The number of output features is equal to the number of input planes.
|
| 1035 |
+
|
| 1036 |
+
Args:
|
| 1037 |
+
output_size: the target output size of the image of the form :math:`H_{out} \times W_{out}`.
|
| 1038 |
+
Can be a tuple :math:`(H_{out}, W_{out})` or a single :math:`H_{out}` for a
|
| 1039 |
+
square image :math:`H_{out} \times H_{out}`. :math:`H_{out}` and :math:`W_{out}`
|
| 1040 |
+
can be either a ``int``, or ``None`` which means the size will be the same as that
|
| 1041 |
+
of the input.
|
| 1042 |
+
return_indices: if ``True``, will return the indices along with the outputs.
|
| 1043 |
+
Useful to pass to nn.MaxUnpool2d. Default: ``False``
|
| 1044 |
+
|
| 1045 |
+
Shape:
|
| 1046 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
| 1047 |
+
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
|
| 1048 |
+
:math:`(H_{out}, W_{out})=\text{output\_size}`.
|
| 1049 |
+
|
| 1050 |
+
Examples:
|
| 1051 |
+
>>> # target output size of 5x7
|
| 1052 |
+
>>> m = nn.AdaptiveMaxPool2d((5, 7))
|
| 1053 |
+
>>> input = torch.randn(1, 64, 8, 9)
|
| 1054 |
+
>>> output = m(input)
|
| 1055 |
+
>>> # target output size of 7x7 (square)
|
| 1056 |
+
>>> m = nn.AdaptiveMaxPool2d(7)
|
| 1057 |
+
>>> input = torch.randn(1, 64, 10, 9)
|
| 1058 |
+
>>> output = m(input)
|
| 1059 |
+
>>> # target output size of 10x7
|
| 1060 |
+
>>> m = nn.AdaptiveMaxPool2d((None, 7))
|
| 1061 |
+
>>> input = torch.randn(1, 64, 10, 9)
|
| 1062 |
+
>>> output = m(input)
|
| 1063 |
+
|
| 1064 |
+
"""
|
| 1065 |
+
|
| 1066 |
+
output_size: _size_2_opt_t
|
| 1067 |
+
|
| 1068 |
+
def forward(self, input: Tensor):
|
| 1069 |
+
return F.adaptive_max_pool2d(input, self.output_size, self.return_indices)
|
| 1070 |
+
|
| 1071 |
+
|
| 1072 |
+
class AdaptiveMaxPool3d(_AdaptiveMaxPoolNd):
|
| 1073 |
+
r"""Applies a 3D adaptive max pooling over an input signal composed of several input planes.
|
| 1074 |
+
|
| 1075 |
+
The output is of size :math:`D_{out} \times H_{out} \times W_{out}`, for any input size.
|
| 1076 |
+
The number of output features is equal to the number of input planes.
|
| 1077 |
+
|
| 1078 |
+
Args:
|
| 1079 |
+
output_size: the target output size of the image of the form :math:`D_{out} \times H_{out} \times W_{out}`.
|
| 1080 |
+
Can be a tuple :math:`(D_{out}, H_{out}, W_{out})` or a single
|
| 1081 |
+
:math:`D_{out}` for a cube :math:`D_{out} \times D_{out} \times D_{out}`.
|
| 1082 |
+
:math:`D_{out}`, :math:`H_{out}` and :math:`W_{out}` can be either a
|
| 1083 |
+
``int``, or ``None`` which means the size will be the same as that of the input.
|
| 1084 |
+
|
| 1085 |
+
return_indices: if ``True``, will return the indices along with the outputs.
|
| 1086 |
+
Useful to pass to nn.MaxUnpool3d. Default: ``False``
|
| 1087 |
+
|
| 1088 |
+
Shape:
|
| 1089 |
+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
| 1090 |
+
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
|
| 1091 |
+
where :math:`(D_{out}, H_{out}, W_{out})=\text{output\_size}`.
|
| 1092 |
+
|
| 1093 |
+
Examples:
|
| 1094 |
+
>>> # target output size of 5x7x9
|
| 1095 |
+
>>> m = nn.AdaptiveMaxPool3d((5, 7, 9))
|
| 1096 |
+
>>> input = torch.randn(1, 64, 8, 9, 10)
|
| 1097 |
+
>>> output = m(input)
|
| 1098 |
+
>>> # target output size of 7x7x7 (cube)
|
| 1099 |
+
>>> m = nn.AdaptiveMaxPool3d(7)
|
| 1100 |
+
>>> input = torch.randn(1, 64, 10, 9, 8)
|
| 1101 |
+
>>> output = m(input)
|
| 1102 |
+
>>> # target output size of 7x9x8
|
| 1103 |
+
>>> m = nn.AdaptiveMaxPool3d((7, None, None))
|
| 1104 |
+
>>> input = torch.randn(1, 64, 10, 9, 8)
|
| 1105 |
+
>>> output = m(input)
|
| 1106 |
+
|
| 1107 |
+
"""
|
| 1108 |
+
|
| 1109 |
+
output_size: _size_3_opt_t
|
| 1110 |
+
|
| 1111 |
+
def forward(self, input: Tensor):
|
| 1112 |
+
return F.adaptive_max_pool3d(input, self.output_size, self.return_indices)
|
| 1113 |
+
|
| 1114 |
+
|
| 1115 |
+
class _AdaptiveAvgPoolNd(Module):
|
| 1116 |
+
__constants__ = ['output_size']
|
| 1117 |
+
|
| 1118 |
+
def __init__(self, output_size: _size_any_opt_t) -> None:
|
| 1119 |
+
super().__init__()
|
| 1120 |
+
self.output_size = output_size
|
| 1121 |
+
|
| 1122 |
+
def extra_repr(self) -> str:
|
| 1123 |
+
return f'output_size={self.output_size}'
|
| 1124 |
+
|
| 1125 |
+
|
| 1126 |
+
class AdaptiveAvgPool1d(_AdaptiveAvgPoolNd):
|
| 1127 |
+
r"""Applies a 1D adaptive average pooling over an input signal composed of several input planes.
|
| 1128 |
+
|
| 1129 |
+
The output size is :math:`L_{out}`, for any input size.
|
| 1130 |
+
The number of output features is equal to the number of input planes.
|
| 1131 |
+
|
| 1132 |
+
Args:
|
| 1133 |
+
output_size: the target output size :math:`L_{out}`.
|
| 1134 |
+
|
| 1135 |
+
Shape:
|
| 1136 |
+
- Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
|
| 1137 |
+
- Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
|
| 1138 |
+
:math:`L_{out}=\text{output\_size}`.
|
| 1139 |
+
|
| 1140 |
+
Examples:
|
| 1141 |
+
>>> # target output size of 5
|
| 1142 |
+
>>> m = nn.AdaptiveAvgPool1d(5)
|
| 1143 |
+
>>> input = torch.randn(1, 64, 8)
|
| 1144 |
+
>>> output = m(input)
|
| 1145 |
+
|
| 1146 |
+
"""
|
| 1147 |
+
|
| 1148 |
+
output_size: _size_1_t
|
| 1149 |
+
|
| 1150 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 1151 |
+
return F.adaptive_avg_pool1d(input, self.output_size)
|
| 1152 |
+
|
| 1153 |
+
|
| 1154 |
+
class AdaptiveAvgPool2d(_AdaptiveAvgPoolNd):
|
| 1155 |
+
r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes.
|
| 1156 |
+
|
| 1157 |
+
The output is of size H x W, for any input size.
|
| 1158 |
+
The number of output features is equal to the number of input planes.
|
| 1159 |
+
|
| 1160 |
+
Args:
|
| 1161 |
+
output_size: the target output size of the image of the form H x W.
|
| 1162 |
+
Can be a tuple (H, W) or a single H for a square image H x H.
|
| 1163 |
+
H and W can be either a ``int``, or ``None`` which means the size will
|
| 1164 |
+
be the same as that of the input.
|
| 1165 |
+
|
| 1166 |
+
Shape:
|
| 1167 |
+
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
| 1168 |
+
- Output: :math:`(N, C, S_{0}, S_{1})` or :math:`(C, S_{0}, S_{1})`, where
|
| 1169 |
+
:math:`S=\text{output\_size}`.
|
| 1170 |
+
|
| 1171 |
+
Examples:
|
| 1172 |
+
>>> # target output size of 5x7
|
| 1173 |
+
>>> m = nn.AdaptiveAvgPool2d((5, 7))
|
| 1174 |
+
>>> input = torch.randn(1, 64, 8, 9)
|
| 1175 |
+
>>> output = m(input)
|
| 1176 |
+
>>> # target output size of 7x7 (square)
|
| 1177 |
+
>>> m = nn.AdaptiveAvgPool2d(7)
|
| 1178 |
+
>>> input = torch.randn(1, 64, 10, 9)
|
| 1179 |
+
>>> output = m(input)
|
| 1180 |
+
>>> # target output size of 10x7
|
| 1181 |
+
>>> m = nn.AdaptiveAvgPool2d((None, 7))
|
| 1182 |
+
>>> input = torch.randn(1, 64, 10, 9)
|
| 1183 |
+
>>> output = m(input)
|
| 1184 |
+
|
| 1185 |
+
"""
|
| 1186 |
+
|
| 1187 |
+
output_size: _size_2_opt_t
|
| 1188 |
+
|
| 1189 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 1190 |
+
return F.adaptive_avg_pool2d(input, self.output_size)
|
| 1191 |
+
|
| 1192 |
+
|
| 1193 |
+
class AdaptiveAvgPool3d(_AdaptiveAvgPoolNd):
|
| 1194 |
+
r"""Applies a 3D adaptive average pooling over an input signal composed of several input planes.
|
| 1195 |
+
|
| 1196 |
+
The output is of size D x H x W, for any input size.
|
| 1197 |
+
The number of output features is equal to the number of input planes.
|
| 1198 |
+
|
| 1199 |
+
Args:
|
| 1200 |
+
output_size: the target output size of the form D x H x W.
|
| 1201 |
+
Can be a tuple (D, H, W) or a single number D for a cube D x D x D.
|
| 1202 |
+
D, H and W can be either a ``int``, or ``None`` which means the size will
|
| 1203 |
+
be the same as that of the input.
|
| 1204 |
+
|
| 1205 |
+
Shape:
|
| 1206 |
+
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
|
| 1207 |
+
- Output: :math:`(N, C, S_{0}, S_{1}, S_{2})` or :math:`(C, S_{0}, S_{1}, S_{2})`,
|
| 1208 |
+
where :math:`S=\text{output\_size}`.
|
| 1209 |
+
|
| 1210 |
+
Examples:
|
| 1211 |
+
>>> # target output size of 5x7x9
|
| 1212 |
+
>>> m = nn.AdaptiveAvgPool3d((5, 7, 9))
|
| 1213 |
+
>>> input = torch.randn(1, 64, 8, 9, 10)
|
| 1214 |
+
>>> output = m(input)
|
| 1215 |
+
>>> # target output size of 7x7x7 (cube)
|
| 1216 |
+
>>> m = nn.AdaptiveAvgPool3d(7)
|
| 1217 |
+
>>> input = torch.randn(1, 64, 10, 9, 8)
|
| 1218 |
+
>>> output = m(input)
|
| 1219 |
+
>>> # target output size of 7x9x8
|
| 1220 |
+
>>> m = nn.AdaptiveAvgPool3d((7, None, None))
|
| 1221 |
+
>>> input = torch.randn(1, 64, 10, 9, 8)
|
| 1222 |
+
>>> output = m(input)
|
| 1223 |
+
|
| 1224 |
+
"""
|
| 1225 |
+
|
| 1226 |
+
output_size: _size_3_opt_t
|
| 1227 |
+
|
| 1228 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 1229 |
+
return F.adaptive_avg_pool3d(input, self.output_size)
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/modules/sparse.py
ADDED
|
@@ -0,0 +1,455 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch import Tensor
|
| 5 |
+
from torch.nn.parameter import Parameter
|
| 6 |
+
|
| 7 |
+
from .module import Module
|
| 8 |
+
from .. import functional as F
|
| 9 |
+
from .. import init
|
| 10 |
+
|
| 11 |
+
__all__ = ['Embedding', 'EmbeddingBag']
|
| 12 |
+
|
| 13 |
+
class Embedding(Module):
|
| 14 |
+
r"""A simple lookup table that stores embeddings of a fixed dictionary and size.
|
| 15 |
+
|
| 16 |
+
This module is often used to store word embeddings and retrieve them using indices.
|
| 17 |
+
The input to the module is a list of indices, and the output is the corresponding
|
| 18 |
+
word embeddings.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
num_embeddings (int): size of the dictionary of embeddings
|
| 22 |
+
embedding_dim (int): the size of each embedding vector
|
| 23 |
+
padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
|
| 24 |
+
therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
|
| 25 |
+
i.e. it remains as a fixed "pad". For a newly constructed Embedding,
|
| 26 |
+
the embedding vector at :attr:`padding_idx` will default to all zeros,
|
| 27 |
+
but can be updated to another value to be used as the padding vector.
|
| 28 |
+
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
|
| 29 |
+
is renormalized to have norm :attr:`max_norm`.
|
| 30 |
+
norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
|
| 31 |
+
scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of
|
| 32 |
+
the words in the mini-batch. Default ``False``.
|
| 33 |
+
sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor.
|
| 34 |
+
See Notes for more details regarding sparse gradients.
|
| 35 |
+
|
| 36 |
+
Attributes:
|
| 37 |
+
weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)
|
| 38 |
+
initialized from :math:`\mathcal{N}(0, 1)`
|
| 39 |
+
|
| 40 |
+
Shape:
|
| 41 |
+
- Input: :math:`(*)`, IntTensor or LongTensor of arbitrary shape containing the indices to extract
|
| 42 |
+
- Output: :math:`(*, H)`, where `*` is the input shape and :math:`H=\text{embedding\_dim}`
|
| 43 |
+
|
| 44 |
+
.. note::
|
| 45 |
+
Keep in mind that only a limited number of optimizers support
|
| 46 |
+
sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`),
|
| 47 |
+
:class:`optim.SparseAdam` (`CUDA` and `CPU`) and :class:`optim.Adagrad` (`CPU`)
|
| 48 |
+
|
| 49 |
+
.. note::
|
| 50 |
+
When :attr:`max_norm` is not ``None``, :class:`Embedding`'s forward method will modify the
|
| 51 |
+
:attr:`weight` tensor in-place. Since tensors needed for gradient computations cannot be
|
| 52 |
+
modified in-place, performing a differentiable operation on ``Embedding.weight`` before
|
| 53 |
+
calling :class:`Embedding`'s forward method requires cloning ``Embedding.weight`` when
|
| 54 |
+
:attr:`max_norm` is not ``None``. For example::
|
| 55 |
+
|
| 56 |
+
n, d, m = 3, 5, 7
|
| 57 |
+
embedding = nn.Embedding(n, d, max_norm=True)
|
| 58 |
+
W = torch.randn((m, d), requires_grad=True)
|
| 59 |
+
idx = torch.tensor([1, 2])
|
| 60 |
+
a = embedding.weight.clone() @ W.t() # weight must be cloned for this to be differentiable
|
| 61 |
+
b = embedding(idx) @ W.t() # modifies weight in-place
|
| 62 |
+
out = (a.unsqueeze(0) + b.unsqueeze(1))
|
| 63 |
+
loss = out.sigmoid().prod()
|
| 64 |
+
loss.backward()
|
| 65 |
+
|
| 66 |
+
Examples::
|
| 67 |
+
|
| 68 |
+
>>> # an Embedding module containing 10 tensors of size 3
|
| 69 |
+
>>> embedding = nn.Embedding(10, 3)
|
| 70 |
+
>>> # a batch of 2 samples of 4 indices each
|
| 71 |
+
>>> input = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]])
|
| 72 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 73 |
+
>>> embedding(input)
|
| 74 |
+
tensor([[[-0.0251, -1.6902, 0.7172],
|
| 75 |
+
[-0.6431, 0.0748, 0.6969],
|
| 76 |
+
[ 1.4970, 1.3448, -0.9685],
|
| 77 |
+
[-0.3677, -2.7265, -0.1685]],
|
| 78 |
+
|
| 79 |
+
[[ 1.4970, 1.3448, -0.9685],
|
| 80 |
+
[ 0.4362, -0.4004, 0.9400],
|
| 81 |
+
[-0.6431, 0.0748, 0.6969],
|
| 82 |
+
[ 0.9124, -2.3616, 1.1151]]])
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
>>> # example with padding_idx
|
| 86 |
+
>>> embedding = nn.Embedding(10, 3, padding_idx=0)
|
| 87 |
+
>>> input = torch.LongTensor([[0, 2, 0, 5]])
|
| 88 |
+
>>> embedding(input)
|
| 89 |
+
tensor([[[ 0.0000, 0.0000, 0.0000],
|
| 90 |
+
[ 0.1535, -2.0309, 0.9315],
|
| 91 |
+
[ 0.0000, 0.0000, 0.0000],
|
| 92 |
+
[-0.1655, 0.9897, 0.0635]]])
|
| 93 |
+
|
| 94 |
+
>>> # example of changing `pad` vector
|
| 95 |
+
>>> padding_idx = 0
|
| 96 |
+
>>> embedding = nn.Embedding(3, 3, padding_idx=padding_idx)
|
| 97 |
+
>>> embedding.weight
|
| 98 |
+
Parameter containing:
|
| 99 |
+
tensor([[ 0.0000, 0.0000, 0.0000],
|
| 100 |
+
[-0.7895, -0.7089, -0.0364],
|
| 101 |
+
[ 0.6778, 0.5803, 0.2678]], requires_grad=True)
|
| 102 |
+
>>> with torch.no_grad():
|
| 103 |
+
... embedding.weight[padding_idx] = torch.ones(3)
|
| 104 |
+
>>> embedding.weight
|
| 105 |
+
Parameter containing:
|
| 106 |
+
tensor([[ 1.0000, 1.0000, 1.0000],
|
| 107 |
+
[-0.7895, -0.7089, -0.0364],
|
| 108 |
+
[ 0.6778, 0.5803, 0.2678]], requires_grad=True)
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
__constants__ = ['num_embeddings', 'embedding_dim', 'padding_idx', 'max_norm',
|
| 112 |
+
'norm_type', 'scale_grad_by_freq', 'sparse']
|
| 113 |
+
|
| 114 |
+
num_embeddings: int
|
| 115 |
+
embedding_dim: int
|
| 116 |
+
padding_idx: Optional[int]
|
| 117 |
+
max_norm: Optional[float]
|
| 118 |
+
norm_type: float
|
| 119 |
+
scale_grad_by_freq: bool
|
| 120 |
+
weight: Tensor
|
| 121 |
+
freeze: bool
|
| 122 |
+
sparse: bool
|
| 123 |
+
|
| 124 |
+
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None,
|
| 125 |
+
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
|
| 126 |
+
sparse: bool = False, _weight: Optional[Tensor] = None, _freeze: bool = False,
|
| 127 |
+
device=None, dtype=None) -> None:
|
| 128 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 129 |
+
super().__init__()
|
| 130 |
+
self.num_embeddings = num_embeddings
|
| 131 |
+
self.embedding_dim = embedding_dim
|
| 132 |
+
if padding_idx is not None:
|
| 133 |
+
if padding_idx > 0:
|
| 134 |
+
assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
|
| 135 |
+
elif padding_idx < 0:
|
| 136 |
+
assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
|
| 137 |
+
padding_idx = self.num_embeddings + padding_idx
|
| 138 |
+
self.padding_idx = padding_idx
|
| 139 |
+
self.max_norm = max_norm
|
| 140 |
+
self.norm_type = norm_type
|
| 141 |
+
self.scale_grad_by_freq = scale_grad_by_freq
|
| 142 |
+
if _weight is None:
|
| 143 |
+
self.weight = Parameter(torch.empty((num_embeddings, embedding_dim), **factory_kwargs),
|
| 144 |
+
requires_grad=not _freeze)
|
| 145 |
+
self.reset_parameters()
|
| 146 |
+
else:
|
| 147 |
+
assert list(_weight.shape) == [num_embeddings, embedding_dim], \
|
| 148 |
+
'Shape of weight does not match num_embeddings and embedding_dim'
|
| 149 |
+
self.weight = Parameter(_weight, requires_grad=not _freeze)
|
| 150 |
+
|
| 151 |
+
self.sparse = sparse
|
| 152 |
+
|
| 153 |
+
def reset_parameters(self) -> None:
|
| 154 |
+
init.normal_(self.weight)
|
| 155 |
+
self._fill_padding_idx_with_zero()
|
| 156 |
+
|
| 157 |
+
def _fill_padding_idx_with_zero(self) -> None:
|
| 158 |
+
if self.padding_idx is not None:
|
| 159 |
+
with torch.no_grad():
|
| 160 |
+
self.weight[self.padding_idx].fill_(0)
|
| 161 |
+
|
| 162 |
+
def forward(self, input: Tensor) -> Tensor:
|
| 163 |
+
return F.embedding(
|
| 164 |
+
input, self.weight, self.padding_idx, self.max_norm,
|
| 165 |
+
self.norm_type, self.scale_grad_by_freq, self.sparse)
|
| 166 |
+
|
| 167 |
+
def extra_repr(self) -> str:
|
| 168 |
+
s = '{num_embeddings}, {embedding_dim}'
|
| 169 |
+
if self.padding_idx is not None:
|
| 170 |
+
s += ', padding_idx={padding_idx}'
|
| 171 |
+
if self.max_norm is not None:
|
| 172 |
+
s += ', max_norm={max_norm}'
|
| 173 |
+
if self.norm_type != 2:
|
| 174 |
+
s += ', norm_type={norm_type}'
|
| 175 |
+
if self.scale_grad_by_freq is not False:
|
| 176 |
+
s += ', scale_grad_by_freq={scale_grad_by_freq}'
|
| 177 |
+
if self.sparse is not False:
|
| 178 |
+
s += ', sparse=True'
|
| 179 |
+
return s.format(**self.__dict__)
|
| 180 |
+
|
| 181 |
+
@classmethod
|
| 182 |
+
def from_pretrained(cls, embeddings, freeze=True, padding_idx=None,
|
| 183 |
+
max_norm=None, norm_type=2., scale_grad_by_freq=False,
|
| 184 |
+
sparse=False):
|
| 185 |
+
r"""Create Embedding instance from given 2-dimensional FloatTensor.
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
embeddings (Tensor): FloatTensor containing weights for the Embedding.
|
| 189 |
+
First dimension is being passed to Embedding as ``num_embeddings``, second as ``embedding_dim``.
|
| 190 |
+
freeze (bool, optional): If ``True``, the tensor does not get updated in the learning process.
|
| 191 |
+
Equivalent to ``embedding.weight.requires_grad = False``. Default: ``True``
|
| 192 |
+
padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
|
| 193 |
+
therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
|
| 194 |
+
i.e. it remains as a fixed "pad".
|
| 195 |
+
max_norm (float, optional): See module initialization documentation.
|
| 196 |
+
norm_type (float, optional): See module initialization documentation. Default ``2``.
|
| 197 |
+
scale_grad_by_freq (bool, optional): See module initialization documentation. Default ``False``.
|
| 198 |
+
sparse (bool, optional): See module initialization documentation.
|
| 199 |
+
|
| 200 |
+
Examples::
|
| 201 |
+
|
| 202 |
+
>>> # FloatTensor containing pretrained weights
|
| 203 |
+
>>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
|
| 204 |
+
>>> embedding = nn.Embedding.from_pretrained(weight)
|
| 205 |
+
>>> # Get embeddings for index 1
|
| 206 |
+
>>> input = torch.LongTensor([1])
|
| 207 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 208 |
+
>>> embedding(input)
|
| 209 |
+
tensor([[ 4.0000, 5.1000, 6.3000]])
|
| 210 |
+
"""
|
| 211 |
+
assert embeddings.dim() == 2, \
|
| 212 |
+
'Embeddings parameter is expected to be 2-dimensional'
|
| 213 |
+
rows, cols = embeddings.shape
|
| 214 |
+
embedding = cls(
|
| 215 |
+
num_embeddings=rows,
|
| 216 |
+
embedding_dim=cols,
|
| 217 |
+
_weight=embeddings,
|
| 218 |
+
_freeze=freeze,
|
| 219 |
+
padding_idx=padding_idx,
|
| 220 |
+
max_norm=max_norm,
|
| 221 |
+
norm_type=norm_type,
|
| 222 |
+
scale_grad_by_freq=scale_grad_by_freq,
|
| 223 |
+
sparse=sparse)
|
| 224 |
+
return embedding
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
class EmbeddingBag(Module):
|
| 228 |
+
r"""Compute sums or means of 'bags' of embeddings, without instantiating the intermediate embeddings.
|
| 229 |
+
|
| 230 |
+
For bags of constant length, no :attr:`per_sample_weights`, no indices equal to :attr:`padding_idx`,
|
| 231 |
+
and with 2D inputs, this class
|
| 232 |
+
|
| 233 |
+
* with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=1)``,
|
| 234 |
+
* with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=1)``,
|
| 235 |
+
* with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=1)``.
|
| 236 |
+
|
| 237 |
+
However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these
|
| 238 |
+
operations.
|
| 239 |
+
|
| 240 |
+
EmbeddingBag also supports per-sample weights as an argument to the forward
|
| 241 |
+
pass. This scales the output of the Embedding before performing a weighted
|
| 242 |
+
reduction as specified by ``mode``. If :attr:`per_sample_weights` is passed, the
|
| 243 |
+
only supported ``mode`` is ``"sum"``, which computes a weighted sum according to
|
| 244 |
+
:attr:`per_sample_weights`.
|
| 245 |
+
|
| 246 |
+
Args:
|
| 247 |
+
num_embeddings (int): size of the dictionary of embeddings
|
| 248 |
+
embedding_dim (int): the size of each embedding vector
|
| 249 |
+
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
|
| 250 |
+
is renormalized to have norm :attr:`max_norm`.
|
| 251 |
+
norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
|
| 252 |
+
scale_grad_by_freq (bool, optional): if given, this will scale gradients by the inverse of frequency of
|
| 253 |
+
the words in the mini-batch. Default ``False``.
|
| 254 |
+
Note: this option is not supported when ``mode="max"``.
|
| 255 |
+
mode (str, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
|
| 256 |
+
``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights`
|
| 257 |
+
into consideration. ``"mean"`` computes the average of the values
|
| 258 |
+
in the bag, ``"max"`` computes the max value over each bag.
|
| 259 |
+
Default: ``"mean"``
|
| 260 |
+
sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See
|
| 261 |
+
Notes for more details regarding sparse gradients. Note: this option is not
|
| 262 |
+
supported when ``mode="max"``.
|
| 263 |
+
include_last_offset (bool, optional): if ``True``, :attr:`offsets` has one additional element, where the last element
|
| 264 |
+
is equivalent to the size of `indices`. This matches the CSR format.
|
| 265 |
+
padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the
|
| 266 |
+
gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated
|
| 267 |
+
during training, i.e. it remains as a fixed "pad". For a newly constructed
|
| 268 |
+
EmbeddingBag, the embedding vector at :attr:`padding_idx` will default to all
|
| 269 |
+
zeros, but can be updated to another value to be used as the padding vector.
|
| 270 |
+
Note that the embedding vector at :attr:`padding_idx` is excluded from the
|
| 271 |
+
reduction.
|
| 272 |
+
|
| 273 |
+
Attributes:
|
| 274 |
+
weight (Tensor): the learnable weights of the module of shape `(num_embeddings, embedding_dim)`
|
| 275 |
+
initialized from :math:`\mathcal{N}(0, 1)`.
|
| 276 |
+
|
| 277 |
+
Examples::
|
| 278 |
+
|
| 279 |
+
>>> # an EmbeddingBag module containing 10 tensors of size 3
|
| 280 |
+
>>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum')
|
| 281 |
+
>>> # a batch of 2 samples of 4 indices each
|
| 282 |
+
>>> input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long)
|
| 283 |
+
>>> offsets = torch.tensor([0, 4], dtype=torch.long)
|
| 284 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 285 |
+
>>> embedding_sum(input, offsets)
|
| 286 |
+
tensor([[-0.8861, -5.4350, -0.0523],
|
| 287 |
+
[ 1.1306, -2.5798, -1.0044]])
|
| 288 |
+
|
| 289 |
+
>>> # Example with padding_idx
|
| 290 |
+
>>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum', padding_idx=2)
|
| 291 |
+
>>> input = torch.tensor([2, 2, 2, 2, 4, 3, 2, 9], dtype=torch.long)
|
| 292 |
+
>>> offsets = torch.tensor([0, 4], dtype=torch.long)
|
| 293 |
+
>>> embedding_sum(input, offsets)
|
| 294 |
+
tensor([[ 0.0000, 0.0000, 0.0000],
|
| 295 |
+
[-0.7082, 3.2145, -2.6251]])
|
| 296 |
+
|
| 297 |
+
>>> # An EmbeddingBag can be loaded from an Embedding like so
|
| 298 |
+
>>> embedding = nn.Embedding(10, 3, padding_idx=2)
|
| 299 |
+
>>> embedding_sum = nn.EmbeddingBag.from_pretrained(
|
| 300 |
+
embedding.weight,
|
| 301 |
+
padding_idx=embedding.padding_idx,
|
| 302 |
+
mode='sum')
|
| 303 |
+
"""
|
| 304 |
+
|
| 305 |
+
__constants__ = ['num_embeddings', 'embedding_dim', 'max_norm', 'norm_type',
|
| 306 |
+
'scale_grad_by_freq', 'mode', 'sparse', 'include_last_offset',
|
| 307 |
+
'padding_idx']
|
| 308 |
+
|
| 309 |
+
num_embeddings: int
|
| 310 |
+
embedding_dim: int
|
| 311 |
+
max_norm: Optional[float]
|
| 312 |
+
norm_type: float
|
| 313 |
+
scale_grad_by_freq: bool
|
| 314 |
+
weight: Tensor
|
| 315 |
+
mode: str
|
| 316 |
+
sparse: bool
|
| 317 |
+
include_last_offset: bool
|
| 318 |
+
padding_idx: Optional[int]
|
| 319 |
+
|
| 320 |
+
def __init__(self, num_embeddings: int, embedding_dim: int,
|
| 321 |
+
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
|
| 322 |
+
mode: str = 'mean', sparse: bool = False, _weight: Optional[Tensor] = None,
|
| 323 |
+
include_last_offset: bool = False, padding_idx: Optional[int] = None,
|
| 324 |
+
device=None, dtype=None) -> None:
|
| 325 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 326 |
+
super().__init__()
|
| 327 |
+
self.num_embeddings = num_embeddings
|
| 328 |
+
self.embedding_dim = embedding_dim
|
| 329 |
+
self.max_norm = max_norm
|
| 330 |
+
self.norm_type = norm_type
|
| 331 |
+
self.scale_grad_by_freq = scale_grad_by_freq
|
| 332 |
+
if padding_idx is not None:
|
| 333 |
+
if padding_idx > 0:
|
| 334 |
+
assert padding_idx < self.num_embeddings, 'padding_idx must be within num_embeddings'
|
| 335 |
+
elif padding_idx < 0:
|
| 336 |
+
assert padding_idx >= -self.num_embeddings, 'padding_idx must be within num_embeddings'
|
| 337 |
+
padding_idx = self.num_embeddings + padding_idx
|
| 338 |
+
self.padding_idx = padding_idx
|
| 339 |
+
if _weight is None:
|
| 340 |
+
self.weight = Parameter(torch.empty((num_embeddings, embedding_dim), **factory_kwargs))
|
| 341 |
+
self.reset_parameters()
|
| 342 |
+
else:
|
| 343 |
+
assert list(_weight.shape) == [num_embeddings, embedding_dim], \
|
| 344 |
+
'Shape of weight does not match num_embeddings and embedding_dim'
|
| 345 |
+
self.weight = Parameter(_weight)
|
| 346 |
+
self.mode = mode
|
| 347 |
+
self.sparse = sparse
|
| 348 |
+
self.include_last_offset = include_last_offset
|
| 349 |
+
|
| 350 |
+
def reset_parameters(self) -> None:
|
| 351 |
+
init.normal_(self.weight)
|
| 352 |
+
self._fill_padding_idx_with_zero()
|
| 353 |
+
|
| 354 |
+
def _fill_padding_idx_with_zero(self) -> None:
|
| 355 |
+
if self.padding_idx is not None:
|
| 356 |
+
with torch.no_grad():
|
| 357 |
+
self.weight[self.padding_idx].fill_(0)
|
| 358 |
+
|
| 359 |
+
def forward(self, input: Tensor, offsets: Optional[Tensor] = None, per_sample_weights: Optional[Tensor] = None) -> Tensor:
|
| 360 |
+
"""Forward pass of EmbeddingBag.
|
| 361 |
+
|
| 362 |
+
Args:
|
| 363 |
+
input (Tensor): Tensor containing bags of indices into the embedding matrix.
|
| 364 |
+
offsets (Tensor, optional): Only used when :attr:`input` is 1D. :attr:`offsets` determines
|
| 365 |
+
the starting index position of each bag (sequence) in :attr:`input`.
|
| 366 |
+
per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
|
| 367 |
+
to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights`
|
| 368 |
+
must have exactly the same shape as input and is treated as having the same
|
| 369 |
+
:attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``.
|
| 370 |
+
|
| 371 |
+
Returns:
|
| 372 |
+
Tensor output shape of `(B, embedding_dim)`.
|
| 373 |
+
|
| 374 |
+
.. note::
|
| 375 |
+
|
| 376 |
+
A few notes about ``input`` and ``offsets``:
|
| 377 |
+
|
| 378 |
+
- :attr:`input` and :attr:`offsets` have to be of the same type, either int or long
|
| 379 |
+
|
| 380 |
+
- If :attr:`input` is 2D of shape `(B, N)`, it will be treated as ``B`` bags (sequences)
|
| 381 |
+
each of fixed length ``N``, and this will return ``B`` values aggregated in a way
|
| 382 |
+
depending on the :attr:`mode`. :attr:`offsets` is ignored and required to be ``None`` in this case.
|
| 383 |
+
|
| 384 |
+
- If :attr:`input` is 1D of shape `(N)`, it will be treated as a concatenation of
|
| 385 |
+
multiple bags (sequences). :attr:`offsets` is required to be a 1D tensor containing the
|
| 386 |
+
starting index positions of each bag in :attr:`input`. Therefore, for :attr:`offsets` of shape `(B)`,
|
| 387 |
+
:attr:`input` will be viewed as having ``B`` bags. Empty bags (i.e., having 0-length) will have
|
| 388 |
+
returned vectors filled by zeros.
|
| 389 |
+
"""
|
| 390 |
+
return F.embedding_bag(input, self.weight, offsets,
|
| 391 |
+
self.max_norm, self.norm_type,
|
| 392 |
+
self.scale_grad_by_freq, self.mode, self.sparse,
|
| 393 |
+
per_sample_weights, self.include_last_offset,
|
| 394 |
+
self.padding_idx)
|
| 395 |
+
|
| 396 |
+
def extra_repr(self) -> str:
|
| 397 |
+
s = '{num_embeddings}, {embedding_dim}'
|
| 398 |
+
if self.max_norm is not None:
|
| 399 |
+
s += ', max_norm={max_norm}'
|
| 400 |
+
if self.norm_type != 2:
|
| 401 |
+
s += ', norm_type={norm_type}'
|
| 402 |
+
if self.scale_grad_by_freq is not False:
|
| 403 |
+
s += ', scale_grad_by_freq={scale_grad_by_freq}'
|
| 404 |
+
s += ', mode={mode}'
|
| 405 |
+
if self.padding_idx is not None:
|
| 406 |
+
s += ', padding_idx={padding_idx}'
|
| 407 |
+
return s.format(**{k: repr(v) for k, v in self.__dict__.items()})
|
| 408 |
+
|
| 409 |
+
@classmethod
|
| 410 |
+
def from_pretrained(cls, embeddings: Tensor, freeze: bool = True, max_norm: Optional[float] = None,
|
| 411 |
+
norm_type: float = 2., scale_grad_by_freq: bool = False,
|
| 412 |
+
mode: str = 'mean', sparse: bool = False, include_last_offset: bool = False,
|
| 413 |
+
padding_idx: Optional[int] = None) -> 'EmbeddingBag':
|
| 414 |
+
r"""Create EmbeddingBag instance from given 2-dimensional FloatTensor.
|
| 415 |
+
|
| 416 |
+
Args:
|
| 417 |
+
embeddings (Tensor): FloatTensor containing weights for the EmbeddingBag.
|
| 418 |
+
First dimension is being passed to EmbeddingBag as 'num_embeddings', second as 'embedding_dim'.
|
| 419 |
+
freeze (bool, optional): If ``True``, the tensor does not get updated in the learning process.
|
| 420 |
+
Equivalent to ``embeddingbag.weight.requires_grad = False``. Default: ``True``
|
| 421 |
+
max_norm (float, optional): See module initialization documentation. Default: ``None``
|
| 422 |
+
norm_type (float, optional): See module initialization documentation. Default ``2``.
|
| 423 |
+
scale_grad_by_freq (bool, optional): See module initialization documentation. Default ``False``.
|
| 424 |
+
mode (str, optional): See module initialization documentation. Default: ``"mean"``
|
| 425 |
+
sparse (bool, optional): See module initialization documentation. Default: ``False``.
|
| 426 |
+
include_last_offset (bool, optional): See module initialization documentation. Default: ``False``.
|
| 427 |
+
padding_idx (int, optional): See module initialization documentation. Default: ``None``.
|
| 428 |
+
|
| 429 |
+
Examples::
|
| 430 |
+
|
| 431 |
+
>>> # FloatTensor containing pretrained weights
|
| 432 |
+
>>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
|
| 433 |
+
>>> embeddingbag = nn.EmbeddingBag.from_pretrained(weight)
|
| 434 |
+
>>> # Get embeddings for index 1
|
| 435 |
+
>>> input = torch.LongTensor([[1, 0]])
|
| 436 |
+
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
|
| 437 |
+
>>> embeddingbag(input)
|
| 438 |
+
tensor([[ 2.5000, 3.7000, 4.6500]])
|
| 439 |
+
"""
|
| 440 |
+
assert embeddings.dim() == 2, \
|
| 441 |
+
'Embeddings parameter is expected to be 2-dimensional'
|
| 442 |
+
rows, cols = embeddings.shape
|
| 443 |
+
embeddingbag = cls(
|
| 444 |
+
num_embeddings=rows,
|
| 445 |
+
embedding_dim=cols,
|
| 446 |
+
_weight=embeddings,
|
| 447 |
+
max_norm=max_norm,
|
| 448 |
+
norm_type=norm_type,
|
| 449 |
+
scale_grad_by_freq=scale_grad_by_freq,
|
| 450 |
+
mode=mode,
|
| 451 |
+
sparse=sparse,
|
| 452 |
+
include_last_offset=include_last_offset,
|
| 453 |
+
padding_idx=padding_idx)
|
| 454 |
+
embeddingbag.weight.requires_grad = not freeze
|
| 455 |
+
return embeddingbag
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .linear import Linear
|
| 2 |
+
|
| 3 |
+
__all__ = ["Linear"]
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/qat/modules/__pycache__/embedding_ops.cpython-310.pyc
ADDED
|
Binary file (654 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__init__.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import rnn
|
| 2 |
+
from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_
|
| 3 |
+
from .weight_norm import weight_norm, remove_weight_norm
|
| 4 |
+
from .convert_parameters import parameters_to_vector, vector_to_parameters
|
| 5 |
+
from .spectral_norm import spectral_norm, remove_spectral_norm
|
| 6 |
+
from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights, fuse_linear_bn_eval, fuse_linear_bn_weights
|
| 7 |
+
from .memory_format import convert_conv2d_weight_memory_format
|
| 8 |
+
from . import parametrizations
|
| 9 |
+
from .init import skip_init
|
| 10 |
+
from . import stateless
|
| 11 |
+
|
| 12 |
+
__all__ = [
|
| 13 |
+
"clip_grad_norm",
|
| 14 |
+
"clip_grad_norm_",
|
| 15 |
+
"clip_grad_value_",
|
| 16 |
+
"convert_conv2d_weight_memory_format",
|
| 17 |
+
"fuse_conv_bn_eval",
|
| 18 |
+
"fuse_conv_bn_weights",
|
| 19 |
+
"fuse_linear_bn_eval",
|
| 20 |
+
"fuse_linear_bn_weights",
|
| 21 |
+
"parameters_to_vector",
|
| 22 |
+
"parametrizations",
|
| 23 |
+
"remove_spectral_norm",
|
| 24 |
+
"remove_weight_norm",
|
| 25 |
+
"rnn",
|
| 26 |
+
"skip_init",
|
| 27 |
+
"spectral_norm",
|
| 28 |
+
"stateless",
|
| 29 |
+
"vector_to_parameters",
|
| 30 |
+
"weight_norm",
|
| 31 |
+
]
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (964 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_deprecation_utils.cpython-310.pyc
ADDED
|
Binary file (1.84 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_named_member_accessor.cpython-310.pyc
ADDED
|
Binary file (12.1 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_per_sample_grad.cpython-310.pyc
ADDED
|
Binary file (5.29 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/clip_grad.cpython-310.pyc
ADDED
|
Binary file (5.84 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/convert_parameters.cpython-310.pyc
ADDED
|
Binary file (2.49 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/fusion.cpython-310.pyc
ADDED
|
Binary file (4.9 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/init.cpython-310.pyc
ADDED
|
Binary file (2.37 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/memory_format.cpython-310.pyc
ADDED
|
Binary file (3.9 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrizations.cpython-310.pyc
ADDED
|
Binary file (17.9 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrize.cpython-310.pyc
ADDED
|
Binary file (23.1 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/prune.cpython-310.pyc
ADDED
|
Binary file (46.6 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/rnn.cpython-310.pyc
ADDED
|
Binary file (19.5 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/spectral_norm.cpython-310.pyc
ADDED
|
Binary file (9.79 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/stateless.cpython-310.pyc
ADDED
|
Binary file (8.59 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/__pycache__/weight_norm.cpython-310.pyc
ADDED
|
Binary file (5.79 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/torch/nn/utils/_deprecation_utils.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Callable
|
| 2 |
+
import importlib
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
_MESSAGE_TEMPLATE = r"Usage of '{old_location}' is deprecated; please use '{new_location}' instead."
|
| 7 |
+
|
| 8 |
+
def lazy_deprecated_import(all: List[str], old_module: str, new_module: str) -> Callable:
|
| 9 |
+
r"""Import utility to lazily import deprecated packages / modules / functional.
|
| 10 |
+
|
| 11 |
+
The old_module and new_module are also used in the deprecation warning defined
|
| 12 |
+
by the `_MESSAGE_TEMPLATE`.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
all: The list of the functions that are imported. Generally, the module's
|
| 16 |
+
__all__ list of the module.
|
| 17 |
+
old_module: Old module location
|
| 18 |
+
new_module: New module location / Migrated location
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
Callable to assign to the `__getattr__`
|
| 22 |
+
|
| 23 |
+
Usage:
|
| 24 |
+
|
| 25 |
+
# In the `torch/nn/quantized/functional.py`
|
| 26 |
+
from torch.nn.utils._deprecation_utils import lazy_deprecated_import
|
| 27 |
+
_MIGRATED_TO = "torch.ao.nn.quantized.functional"
|
| 28 |
+
__getattr__ = lazy_deprecated_import(
|
| 29 |
+
all=__all__,
|
| 30 |
+
old_module=__name__,
|
| 31 |
+
new_module=_MIGRATED_TO)
|
| 32 |
+
"""
|
| 33 |
+
warning_message = _MESSAGE_TEMPLATE.format(
|
| 34 |
+
old_location=old_module,
|
| 35 |
+
new_location=new_module)
|
| 36 |
+
|
| 37 |
+
def getattr_dunder(name):
|
| 38 |
+
if name in all:
|
| 39 |
+
# We are using the "RuntimeWarning" to make sure it is not
|
| 40 |
+
# ignored by default.
|
| 41 |
+
warnings.warn(warning_message, RuntimeWarning)
|
| 42 |
+
package = importlib.import_module(new_module)
|
| 43 |
+
return getattr(package, name)
|
| 44 |
+
raise AttributeError(f"Module {new_module!r} has no attribute {name!r}.")
|
| 45 |
+
return getattr_dunder
|