diff --git a/.gitattributes b/.gitattributes index 1f7b9ccd600ac8a46585f97cb338382f88074192..e0089606536af5db965423c1a70004b229064422 100644 --- a/.gitattributes +++ b/.gitattributes @@ -214,3 +214,4 @@ wemm/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310 wemm/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text wemm/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text wemm/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text +wemm/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/wemm/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so b/wemm/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e8b928cb105c8504dc4a7ca736380db23de8d4d9 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f317d5ffb73de685f0b086df5593d098347cb711e611d7cd23ab2a859b7b7735 +size 2627272 diff --git a/wemm/lib/python3.10/site-packages/torch/nn/__pycache__/common_types.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/__pycache__/common_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e58da5cd196e98554d02f60044bb4b4b378a034e Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/__pycache__/common_types.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/__pycache__/cpp.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/__pycache__/cpp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1d604960d5e6f9f2662e9d7481230d5dd5a2224 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/__pycache__/cpp.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/__pycache__/grad.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/__pycache__/grad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42354413d75c38c2276ea0497252351f4789ecf4 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/__pycache__/grad.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/__pycache__/init.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/__pycache__/init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59d013602ffb5956347b49a428d059b401f4b808 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/__pycache__/init.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/__pycache__/parameter.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/__pycache__/parameter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40c49c552f8fb09322fe4ca24fd6f651d09dd2ad Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/__pycache__/parameter.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/backends/__init__.py b/wemm/lib/python3.10/site-packages/torch/nn/backends/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/wemm/lib/python3.10/site-packages/torch/nn/backends/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/backends/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91341e0e3674606ac929d89208c5bc08ebf102dd Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/backends/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/backends/__pycache__/thnn.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/backends/__pycache__/thnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a991020375e9ce7bf13f4cd2d0264629b713dff6 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/backends/__pycache__/thnn.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/modules/fused.py b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/modules/fused.py new file mode 100644 index 0000000000000000000000000000000000000000..dc962f956427ec6f6e6b1d0580a1d5c73bd9cd29 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/modules/fused.py @@ -0,0 +1,30 @@ +from torch.ao.nn.intrinsic import BNReLU2d +from torch.ao.nn.intrinsic import BNReLU3d +from torch.ao.nn.intrinsic import ConvBn1d +from torch.ao.nn.intrinsic import ConvBn2d +from torch.ao.nn.intrinsic import ConvBn3d +from torch.ao.nn.intrinsic import ConvBnReLU1d +from torch.ao.nn.intrinsic import ConvBnReLU2d +from torch.ao.nn.intrinsic import ConvBnReLU3d +from torch.ao.nn.intrinsic import ConvReLU1d +from torch.ao.nn.intrinsic import ConvReLU2d +from torch.ao.nn.intrinsic import ConvReLU3d +from torch.ao.nn.intrinsic import LinearBn1d +from torch.ao.nn.intrinsic import LinearReLU +from torch.ao.nn.intrinsic.modules.fused import _FusedModule # noqa: F401 + +__all__ = [ + 'BNReLU2d', + 'BNReLU3d', + 'ConvBn1d', + 'ConvBn2d', + 'ConvBn3d', + 'ConvBnReLU1d', + 'ConvBnReLU2d', + 'ConvBnReLU3d', + 'ConvReLU1d', + 'ConvReLU2d', + 'ConvReLU3d', + 'LinearBn1d', + 'LinearReLU', +] diff --git a/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8e5fe658f43f5d9be5f60222417c852d24c3ad1 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39f1a72643deca1d9e21fc1d6b33dacd87ef1d19 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/conv_fused.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5b52545f057ce6ce98dd5a4ccf67e300c832f03 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_relu.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/__init__.py b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d79bdbfe83209f18b17cc8c7b245f322871d6c0 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/__init__.py @@ -0,0 +1 @@ +from .modules import * # noqa: F403 diff --git a/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8f87737ddc5fa819489073d8a389d836a5fc44c Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__init__.py b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea1885a6aec4e570a8eed81bd0cce61bf0e8390a --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__init__.py @@ -0,0 +1,5 @@ +from .linear_relu import LinearReLU + +__all__ = [ + 'LinearReLU', +] diff --git a/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f623bc9ee980be869de8605d3621a22c89089b9 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3613477853d67f93b150aea91a553351523b337a Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/linear_relu.py b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/linear_relu.py new file mode 100644 index 0000000000000000000000000000000000000000..63cc8609e2d8580b994203c4fe58e0d2328dc7de --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/linear_relu.py @@ -0,0 +1,5 @@ +from torch.ao.nn.intrinsic.quantized.dynamic import LinearReLU + +__all__ = [ + 'LinearReLU', +] diff --git a/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60d9a9823d08f979f1b3aa01cf5157b0a36cf6c8 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45ec47b6037f30cd58baadb9ba3b5d40997d9dc3 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b06fbe7948d7f717f6e2904926c12ff1923e12e9 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e96e40042b6e5af5e1a536e9be5a54c5c8d4f698 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/activation.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/activation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..873676ae595711ff3e485bc9f5e13615173125fb Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/activation.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/pixelshuffle.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/pixelshuffle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7255c79f440766bc43766ae21e07b34529d12649 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/modules/__pycache__/pixelshuffle.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/qat/__init__.py b/wemm/lib/python3.10/site-packages/torch/nn/qat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6b4c4a181adae1c40ca6bf2225e78699fcd9e246 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/qat/__init__.py @@ -0,0 +1,18 @@ +# flake8: noqa: F401 +r"""QAT Dynamic Modules + +This package is in the process of being deprecated. +Please, use `torch.ao.nn.qat.dynamic` instead. +""" +from . import dynamic # noqa: F403 +from . import modules # noqa: F403 +from .modules import * # noqa: F403 + +__all__ = [ + "Linear", + "Conv1d", + "Conv2d", + "Conv3d", + "Embedding", + "EmbeddingBag", +] diff --git a/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..902ffd2c9b9e7bc2835cd8f2145f6e6ca52ee843 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/__pycache__/embedding_ops.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/__pycache__/embedding_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4f4037adc000b4011c137dc02d4fb50cf1d890d Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/__pycache__/embedding_ops.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/__pycache__/linear.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/__pycache__/linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ae4a0e00f71684506466d7c68b5857e9aefd18d Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/__pycache__/linear.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/conv.py b/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/conv.py new file mode 100644 index 0000000000000000000000000000000000000000..a64b6ac6da97d2a6436222c0a719f69a56366e93 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/conv.py @@ -0,0 +1,12 @@ +# flake8: noqa: F401 +r"""QAT Modules + +This file is in the process of migration to `torch/ao/nn/qat`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/qat/modules`, +while adding an import statement here. +""" +from torch.ao.nn.qat.modules.conv import Conv1d +from torch.ao.nn.qat.modules.conv import Conv2d +from torch.ao.nn.qat.modules.conv import Conv3d diff --git a/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/embedding_ops.py b/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/embedding_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..88c7f2dfd45cd7124c9dcf6c17b2bc3c0e0f43fc --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/embedding_ops.py @@ -0,0 +1,14 @@ +# flake8: noqa: F401 +r"""QAT Modules + +This file is in the process of migration to `torch/ao/nn/qat`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/qat/modules`, +while adding an import statement here. +""" + +__all__ = ['Embedding', 'EmbeddingBag'] + +from torch.ao.nn.qat.modules.embedding_ops import Embedding +from torch.ao.nn.qat.modules.embedding_ops import EmbeddingBag diff --git a/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/linear.py b/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/linear.py new file mode 100644 index 0000000000000000000000000000000000000000..a35f3f8d7e0eef2dacbf8c43c092fbe0290425c3 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/qat/modules/linear.py @@ -0,0 +1,10 @@ +# flake8: noqa: F401 +r"""QAT Modules + +This file is in the process of migration to `torch/ao/nn/qat`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/qat/modules`, +while adding an import statement here. +""" +from torch.ao.nn.qat.modules.linear import Linear diff --git a/wemm/lib/python3.10/site-packages/torch/nn/quantizable/__init__.py b/wemm/lib/python3.10/site-packages/torch/nn/quantizable/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d79bdbfe83209f18b17cc8c7b245f322871d6c0 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/quantizable/__init__.py @@ -0,0 +1 @@ +from .modules import * # noqa: F403 diff --git a/wemm/lib/python3.10/site-packages/torch/nn/quantizable/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/quantizable/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..168cf10fa137f6baf8cc581b3fab514251c7ea0a Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/quantizable/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/quantizable/modules/__init__.py b/wemm/lib/python3.10/site-packages/torch/nn/quantizable/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a1257b404b7346c6a96c4de3adb45c6e63564fac --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/quantizable/modules/__init__.py @@ -0,0 +1,9 @@ +from torch.ao.nn.quantizable.modules.activation import MultiheadAttention +from torch.ao.nn.quantizable.modules.rnn import LSTM +from torch.ao.nn.quantizable.modules.rnn import LSTMCell + +__all__ = [ + 'LSTM', + 'LSTMCell', + 'MultiheadAttention', +] diff --git a/wemm/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cca3dd41c75473d4ffd8e5a437aca88d12a85452 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25f07ac1cad6a7a723cf4dfe2cc5bc8ddc68899d Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/quantizable/modules/activation.py b/wemm/lib/python3.10/site-packages/torch/nn/quantizable/modules/activation.py new file mode 100644 index 0000000000000000000000000000000000000000..e854414ec8ca6e131ae96067cd88051ab51b96db --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/quantizable/modules/activation.py @@ -0,0 +1,10 @@ +# flake8: noqa: F401 +r"""Quantizable Modules + +This file is in the process of migration to `torch/ao/nn/quantizable`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantizable/modules`, +while adding an import statement here. +""" +from torch.ao.nn.quantizable.modules.activation import MultiheadAttention diff --git a/wemm/lib/python3.10/site-packages/torch/nn/quantizable/modules/rnn.py b/wemm/lib/python3.10/site-packages/torch/nn/quantizable/modules/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..b3449bf71611e0b27ae7c5797a99b5f25da28318 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/quantizable/modules/rnn.py @@ -0,0 +1,11 @@ +# flake8: noqa: F401 +r"""Quantizable Modules + +This file is in the process of migration to `torch/ao/nn/quantizable`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantizable/modules`, +while adding an import statement here. +""" +from torch.ao.nn.quantizable.modules.rnn import LSTM +from torch.ao.nn.quantizable.modules.rnn import LSTMCell diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61672c74b3230584ea4b4c988f215155b01b61ae Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_deprecation_utils.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_deprecation_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..750b0a5b3991fd946418d3b66f79a42a63f0392b Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_deprecation_utils.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_named_member_accessor.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_named_member_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..716e1859ea3dffa20ca40f819aa9a95a62e78268 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_named_member_accessor.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_per_sample_grad.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_per_sample_grad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59abfd26bd9d749904cd221c6a60559da1b32cbe Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_per_sample_grad.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/clip_grad.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/clip_grad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4a2d3ef863b3bfaa045771f009ae8837744a36f Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/clip_grad.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/convert_parameters.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/convert_parameters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39d3ceee7a93637a0285def81662eba217eda40d Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/convert_parameters.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/fusion.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/fusion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81bfea48b43e0ef3e695a8d3f8db90c9c3aabab8 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/fusion.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/init.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d524f008c9a6e339a0c814956d43a6507679f948 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/init.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/memory_format.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/memory_format.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bd94384cf78c42c68f2745d471287d08f880a43 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/memory_format.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrizations.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrizations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4796c099cbd525459149bba28d56f42e34174f2b Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrizations.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrize.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ff661277507f83024cc8af2ada48e4588a50ac9 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/parametrize.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/prune.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/prune.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77577c0e837e4802a4072b533751bfe20b169c64 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/prune.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/rnn.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/rnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7e972e2249bb612623a3219c2a14bb80d33ac6d Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/rnn.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/spectral_norm.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/spectral_norm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b49e2e3aa684690209a87f7122a701c7a9fe9f1a Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/spectral_norm.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/stateless.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/stateless.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc9157882c31c36694d533a2ed96803af34dbeaf Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/stateless.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/weight_norm.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/weight_norm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fce2857c9d06ec2b9dd4322c83e1f4ab380eb32f Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/__pycache__/weight_norm.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__init__.py b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..102474614238efec588ea4dc69d1d568d4fc60bb --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__init__.py @@ -0,0 +1,9 @@ +from .conv_expanded_weights import ConvPerSampleGrad +from .embedding_expanded_weights import EmbeddingPerSampleGrad +from .group_norm_expanded_weights import GroupNormPerSampleGrad +from .instance_norm_expanded_weights import InstanceNormPerSampleGrad +from .layer_norm_expanded_weights import LayerNormPerSampleGrad +from .linear_expanded_weights import LinearPerSampleGrad +from .expanded_weights_impl import ExpandedWeight + +__all__ = ['ExpandedWeight'] diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/__init__.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f8d8003a342a7190c31982bc8c9ae1a5d9d29ab Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/__init__.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/conv_expanded_weights.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/conv_expanded_weights.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..734cc1d6a115faf779bdff2c136d0fdeea164653 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/conv_expanded_weights.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/conv_utils.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/conv_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd71e66af04a7cd02098b7b53b836629a125717b Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/conv_utils.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/embedding_expanded_weights.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/embedding_expanded_weights.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1e2aa625d2032001794a1933fd078d7ca2f2bf1 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/embedding_expanded_weights.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/expanded_weights_impl.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/expanded_weights_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59b7f75af8605236b31bfc0696c5e312fc5325b7 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/expanded_weights_impl.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/expanded_weights_utils.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/expanded_weights_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b5bd228d5dab93b3dc510e811556de36e104ba9 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/expanded_weights_utils.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/group_norm_expanded_weights.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/group_norm_expanded_weights.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c050b70fd89f5526286e8fb971fed15ef3b1e327 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/group_norm_expanded_weights.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/instance_norm_expanded_weights.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/instance_norm_expanded_weights.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68dddcf2bd51b442f4298dff5123c784c38bb820 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/instance_norm_expanded_weights.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/layer_norm_expanded_weights.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/layer_norm_expanded_weights.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c312c38a98f54531c4a0febb5d03fece8b98c1a7 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/layer_norm_expanded_weights.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/linear_expanded_weights.cpython-310.pyc b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/linear_expanded_weights.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9eb9c9192824001623d0be721f6d392b2e57620 Binary files /dev/null and b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/linear_expanded_weights.cpython-310.pyc differ diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_expanded_weights.py b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_expanded_weights.py new file mode 100644 index 0000000000000000000000000000000000000000..c10ccb90ae92f1f57513de5c0ab7a56c26996298 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_expanded_weights.py @@ -0,0 +1,52 @@ +import torch +import torch.nn.functional as F + +from .conv_utils import conv_backward, conv_args_and_kwargs, conv_picker, conv_input_for_string_padding +from .expanded_weights_impl import ExpandedWeight, implements_per_sample_grads +from .expanded_weights_utils import forward_helper + +@implements_per_sample_grads(F.conv1d) +@implements_per_sample_grads(F.conv2d) +@implements_per_sample_grads(F.conv3d) +class ConvPerSampleGrad(torch.autograd.Function): + @staticmethod + def forward(ctx, kwarg_names, conv_fn, *expanded_args_and_kwargs): + expanded_args, expanded_kwargs = conv_args_and_kwargs(kwarg_names, expanded_args_and_kwargs) + orig_input = expanded_args[0] + was_same_padding = expanded_kwargs['padding'] == "same" + + if isinstance(expanded_kwargs['padding'], str): + # if padding is a string, we'll do the necessary padding (slowly) using F.pad + kernel_size = expanded_args[1].shape[2:] + padding, dilation = expanded_kwargs['padding'], expanded_kwargs['dilation'] + input = conv_input_for_string_padding(conv_fn, padding, expanded_args[0], dilation, kernel_size) + expanded_args = (input, expanded_args[1]) + # since we've already done the padding, don't need any more + expanded_kwargs['padding'] = 0 + + output = forward_helper(conv_fn, expanded_args, expanded_kwargs) + input, weight = expanded_args + batched_dim_size = conv_picker(conv_fn, 3, 4, 5) + if input.dim() != batched_dim_size: + raise RuntimeError(f"Expanded Weights only support convolution with batched input, got {conv_fn} with an" + f"unbatched input of dim {input.dim()}, expected input of dim {batched_dim_size}") + + ctx.conv_fn = conv_fn + + ctx.batch_size = orig_input.shape[0] + ctx.input_required_grad = orig_input.requires_grad + ctx.orig_input_shape = orig_input.shape + ctx.was_same_padding = was_same_padding + ctx.stride, ctx.padding = expanded_kwargs['stride'], expanded_kwargs['padding'] + ctx.dilation, ctx.groups = expanded_kwargs['dilation'], expanded_kwargs['groups'] + + if isinstance(weight, ExpandedWeight): + ctx.input = input + ctx.weight = weight + ctx.bias = expanded_kwargs['bias'] + + return output + + @staticmethod + def backward(ctx, grad_output): + return conv_backward(ctx.conv_fn, ctx, grad_output) diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_utils.py b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5cea94b4493334590a7b8f62e248cebf9d8ac7a0 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_utils.py @@ -0,0 +1,240 @@ +import torch +import torch.nn.functional as F + +import numpy as np +from typing import List, Optional + +from .expanded_weights_utils import \ + set_grad_sample_if_exists, unpack_expanded_weight_or_tensor + +THRESHOLD = 32 + + +def conv_picker(func, conv1dOpt, conv2dOpt, conv3dOpt): + if func == F.conv1d: + return conv1dOpt + if func == F.conv2d: + return conv2dOpt + else: + assert func == F.conv3d + return conv3dOpt + + +def conv_args_and_kwargs(kwarg_names, expanded_args_and_kwargs): + args = expanded_args_and_kwargs[:len(expanded_args_and_kwargs) - len(kwarg_names)] + kwargs = expanded_args_and_kwargs[len(expanded_args_and_kwargs) - len(kwarg_names):] + kwargs = {name: arg for (name, arg) in zip(kwarg_names, kwargs)} + + return conv_normalizer(*args, **kwargs) + + +def conv_normalizer(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): + return (input, weight), {'bias': bias, 'stride': stride, 'padding': padding, 'dilation': dilation, 'groups': groups} + + +def conv_input_for_string_padding(func, padding_style, input, dilation, kernel_size): + if padding_style == "valid": + return input + else: + padding = int_padding_for_string_padding(func, padding_style, dilation, kernel_size) + return F.pad(input, padding) + + +def int_padding_for_string_padding(func, padding_style, dilation, kernel_size): + def get_dilation(i): + return dilation[i] if isinstance(dilation, tuple) else dilation + + if padding_style == "same": + padding: List[int] = [] + # F.pad needs the padding in reverse order from what conv expects + for i in range(conv_picker(func, 0, 1, 2), -1, -1): + padding += conv_padding_for_same(get_dilation(i), kernel_size[i]) + return padding + elif padding_style == "valid": + return conv_picker(func, 2, 4, 6) * (0,) + else: + raise RuntimeError(f"got padding type of {padding_style}, only accept 'same' or 'valid'") + + +def conv_padding_for_same(dilation, kernel_size): + total_pad = dilation * (kernel_size - 1) + left_pad = total_pad // 2 + right_pad = total_pad - left_pad + return left_pad, right_pad + + +def conv_backward(func, ctx, grad_output): + + def weight_grad_sample(weight): + if (batch_size < THRESHOLD and groups == 1): + return conv_group_weight_grad_sample(ctx.input, grad_output, weight_shape, stride, padding, dilation, batch_size, func) + else: + return conv_unfold_weight_grad_sample(ctx.input, grad_output, weight_shape, kernel_size, + stride, padding, dilation, groups, func) + + def expand(param): + if isinstance(param, int): + return conv_picker(func, (param,), (param, param), (param, param, param)) + else: + return param + + def calc_total_padding(func, was_same, padding, dilation, kernel_size): + if was_same: + all_padding = int_padding_for_string_padding(func, "same", dilation, kernel_size) + # F.pad needs the padding in reverse order from what conv expects + total_padding = tuple(all_padding[i] + all_padding[i - 1] for i in range(len(all_padding) - 1, -1, -2)) + return total_padding + else: + return tuple(2 * pad for pad in padding) + + weight_shape = ctx.weight.shape + stride, padding, dilation, groups = expand(ctx.stride), expand(ctx.padding), expand(ctx.dilation), ctx.groups + + kernel_size = [] + for i in range(2, conv_picker(func, 3, 4, 5)): + kernel_size.append(weight_shape[i]) + + batch_size = ctx.batch_size + results: List[Optional[torch.Tensor]] = [] + results.append(None) # for kwarg names + results.append(None) # for op reference + + # "same" padding may give uneven padding on either side so we need to separate the "padding" attr and total padding + total_padding = calc_total_padding(func, ctx.was_same_padding, padding, dilation, kernel_size) + + if ctx.input_required_grad: + output_padding = [] + input_dims = conv_picker(func, 1, 2, 3) + for i in range(input_dims): + input_dim = ctx.orig_input_shape[2 + i] + output_padding.append((total_padding[i] + input_dim - (kernel_size[i] * dilation[i] - dilation[i] + 1)) % stride[i]) + weight_ = unpack_expanded_weight_or_tensor(ctx.weight) + transpose_func = conv_picker(func, F.conv_transpose1d, F.conv_transpose2d, F.conv_transpose3d) + out = transpose_func(grad_output, weight_, None, stride, padding, tuple(output_padding), groups, dilation) + + if ctx.was_same_padding: + for i in range(len(total_padding)): + out = torch.narrow(out, 2 + i, total_padding[i] // 2, ctx.orig_input_shape[2 + i]) + + results.append(out) + else: + results.append(None) + # weight and bias don't compute batched gradients; no other arguments are differentiable + results = results + [None] * 6 + + # set grad_sample field for weight and bias with per sample gradients + set_grad_sample_if_exists(ctx.weight, weight_grad_sample) + set_grad_sample_if_exists(ctx.bias, lambda _: grad_output.reshape(*grad_output.shape[:2], -1).sum(dim=2)) + return tuple(results) + + +def conv_unfold_weight_grad_sample(input, grad_output, weight_shape, kernel_size, stride, padding, dilation, groups, func): + n = input.shape[0] + in_channels = input.shape[1] + + unfold_func = conv_picker( + func, + lambda: F.unfold(input.unsqueeze(-2), + kernel_size=(1, kernel_size[0]), + dilation=(1, dilation[0]), + padding=(0, padding[0]), + stride=(1, stride[0])), + lambda: F.unfold(input, kernel_size, dilation=dilation, padding=padding, stride=stride), + lambda: unfold3d(input, kernel_size, padding, stride, dilation) + ) + + input = unfold_func() + grad_output = grad_output.reshape(n, -1, input.shape[-1]) + + # n=batch_sz; o=num_out_channels; p=(num_in_channels/groups)*kernel_sz + weight_grad_sample = torch.einsum("noq,npq->nop", grad_output, input) + # rearrange the above tensor and extract diagonals. + weight_grad_sample = weight_grad_sample.view( + n, + groups, + -1, + groups, + int(in_channels / groups), + np.prod(kernel_size), + ) + weight_grad_sample = torch.einsum("ngrg...->ngr...", weight_grad_sample).contiguous() + shape = [n] + list(weight_shape) + weight_grad_sample = weight_grad_sample.view(shape) + return weight_grad_sample + + +def conv_group_weight_grad_sample(input, grad_output, weight_shape, stride, padding, dilation, batch_size, func): + I = input.shape[1] + O = grad_output.shape[1] + + input_ = input.transpose(0, 1) + grad_output_ = grad_output.view(grad_output.shape[0] * grad_output.shape[1], 1, *grad_output.shape[2:]) + + weight_grad_sample = func(input_, grad_output_, None, stride=dilation, padding=padding, dilation=stride, groups=batch_size) + input_dims = conv_picker(func, 3, 4, 5) + for i in range(2, input_dims): + weight_grad_sample = weight_grad_sample.narrow(i, 0, weight_shape[i]) + weight_grad_sample = weight_grad_sample.view(I, batch_size, O, *weight_grad_sample.shape[2:]) + weight_grad_sample = weight_grad_sample.movedim(0, 2) + return weight_grad_sample + + +def unfold3d( + tensor, + kernel_size, + padding, + stride, + dilation, +): + r""" + Extracts sliding local blocks from an batched input tensor. + :class:`torch.nn.Unfold` only supports 4D inputs (batched image-like tensors). + This method implements the same action for 5D inputs + Args: + tensor: An input tensor of shape ``(B, C, D, H, W)``. + kernel_size: the size of the sliding blocks + padding: implicit zero padding to be added on both sides of input + stride: the stride of the sliding blocks in the input spatial dimensions + dilation: the spacing between the kernel points. + Returns: + A tensor of shape ``(B, C * np.product(kernel_size), L)``, where L - output spatial dimensions. + See :class:`torch.nn.Unfold` for more details + Example: + >>> # xdoctest: +SKIP + >>> B, C, D, H, W = 3, 4, 5, 6, 7 + >>> tensor = torch.arange(1, B * C * D * H * W + 1.).view(B, C, D, H, W) + >>> unfold3d(tensor, kernel_size=2, padding=0, stride=1).shape + torch.Size([3, 32, 120]) + """ + + if len(tensor.shape) != 5: + raise ValueError( + f"Input tensor must be of the shape [B, C, D, H, W]. Got{tensor.shape}" + ) + + if dilation != (1, 1, 1): + raise NotImplementedError(f"dilation={dilation} not supported.") + + batch_size, channels, _, _, _ = tensor.shape + + # Input shape: (B, C, D, H, W) + tensor = F.pad( + tensor, (padding[2], padding[2], padding[1], padding[1], padding[0], padding[0]) + ) + # Output shape: (B, C, D+2*padding[2], H+2*padding[1], W+2*padding[0]) + + tensor = tensor.unfold(dimension=2, size=kernel_size[0], step=stride[0]) + tensor = tensor.unfold(dimension=3, size=kernel_size[1], step=stride[1]) + tensor = tensor.unfold(dimension=4, size=kernel_size[2], step=stride[2]) + # Output shape: (B, C, D_out, H_out, W_out, kernel_size[0], kernel_size[1], kernel_size[2]) + # For D_out, H_out, W_out definitions see :class:`torch.nn.Unfold` + + tensor = tensor.permute(0, 2, 3, 4, 1, 5, 6, 7) + # Output shape: (B, D_out, H_out, W_out, C, kernel_size[0], kernel_size[1], kernel_size[2]) + + tensor = tensor.reshape(batch_size, -1, channels * np.prod(kernel_size)).transpose( + 1, 2 + ) + # Output shape: (B, D_out * H_out * W_out, C * kernel_size[0] * kernel_size[1] * kernel_size[2] + + return tensor diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/expanded_weights_impl.py b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/expanded_weights_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..a39c2bda09e3768d5a0bd2ae4dfb95222ec81ba6 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/expanded_weights_impl.py @@ -0,0 +1,154 @@ +from contextlib import contextmanager + +from torch._C import _TensorBase +import torch +import functools +from torch._decomp import decomposition_table + +from typing import Callable, Dict, cast + +from torch.utils._pytree import tree_map_only + +HANDLED_FUNCTIONS: Dict[Callable, torch.autograd.Function] = {} + +aten = torch._ops.ops.aten +# __torch_function__ runs before the pydispatcher so we need to manually use the same +# decompositions indexed by their torch equivalent +expanded_weights_rnn_decomps = { + # func: (input_decomp, data_decomp) + torch.rnn_relu: (decomposition_table[aten.rnn_relu.input], decomposition_table[aten.rnn_relu.data]), + torch.rnn_tanh: (decomposition_table[aten.rnn_tanh.input], decomposition_table[aten.rnn_tanh.data]), + torch.lstm: (decomposition_table[aten.lstm.input], decomposition_table[aten.lstm.data]), + torch.gru: (decomposition_table[aten.gru.input], decomposition_table[aten.gru.data]), +} + +# all of the RNN decomps run linear with the batch dimension second, even if batch_first was set +@contextmanager +def batch_second(args, kwargs): + def set_batch_second(ew): + ew.set_batch_first(False) + + def reset_batch_first(ew): + ew.set_batch_first(True) + + tree_map_only(ExpandedWeight, set_batch_second, args) + tree_map_only(ExpandedWeight, set_batch_second, kwargs) + try: + yield + finally: + tree_map_only(ExpandedWeight, reset_batch_first, args) + tree_map_only(ExpandedWeight, reset_batch_first, kwargs) + +# to support packed sequences, we need to allow for smaller batches. Expanded weights represents the largest batch +@contextmanager +def allow_smaller_batches(args, kwargs): + def allow(ew): + ew.set_allow_smaller_batches(True) + + def reset(ew): + ew.set_allow_smaller_batches(False) + + tree_map_only(ExpandedWeight, allow, args) + tree_map_only(ExpandedWeight, allow, kwargs) + try: + yield + finally: + tree_map_only(ExpandedWeight, reset, args) + tree_map_only(ExpandedWeight, reset, kwargs) + +@contextmanager +def setup_rnn(use_input_variant, args, kwargs): + with batch_second(args, kwargs) if use_input_variant else allow_smaller_batches(args, kwargs): + yield + + +def implements_per_sample_grads(torch_function): + @functools.wraps(torch_function) + def decorator(autograd_func): + HANDLED_FUNCTIONS[torch_function] = autograd_func + return autograd_func + return decorator + +# ExpandedWeight represents a weight (parameter) Tensor that has an expanded +# batch dimension. Operations on the ExpandedWeight Tensor act exactly like +# those without an expanded batch dimension but a call to .backward() populates +# the original (unexpanded) tensor with per-sample-gradients for in the grad_sample field +# +# ExpandedWeight has a fallback that always fails since we cannot know what the batch +# dimension of the input tensor is and therefore cannot know if this is a valid call +# +# This is a __torch_function__ object but it could have also been a Tensor Extension +# with a dispatch key. +# +# Needs to be a tensor subclass to allow reparamaterization +class ExpandedWeight(torch.Tensor): + def __init__(self, orig_weight, batch_size, loss_reduction): + self.batch_size = batch_size + self.batch_first = True + self.allow_smaller_batches = False + self.orig_weight = orig_weight + self.loss_reduction = loss_reduction + + handled_functions = HANDLED_FUNCTIONS + + def __new__(cls, orig_weight, batch_size, loss_reduction): + if not isinstance(orig_weight, torch.Tensor): + raise RuntimeError(f"Can only make Expanded Weights of Tensors, got {type(orig_weight).__name__}") + if not orig_weight.requires_grad: + raise RuntimeError("Can only build ExpandedWeights objects of tensors that require_grad") + ret = torch.Tensor._make_subclass(cast(_TensorBase, cls), orig_weight, True) + return ret + + @classmethod + def __torch_function__(cls, func, _, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + if func in expanded_weights_rnn_decomps: + # in aten, choosing the input or data variants is done by parsing logic. This mimics some of that + decomp_opts = expanded_weights_rnn_decomps[func] + use_input_variant = isinstance(args[2], list) # data variant uses a list here + decomp = decomp_opts[0] if use_input_variant else decomp_opts[1] + + if decomp is not None: + with setup_rnn(use_input_variant, args, kwargs): + return decomp(*args, **kwargs) + if func == torch._cudnn_rnn_flatten_weight: + # since we aren't using the fused cuda kernels for RNNs, don't do this + return + if func in cls.handled_functions: + return cls.handled_functions[func].apply(tuple(kwargs.keys()), func, *(args + tuple(kwargs.values()))) + # We cannot use a fallback here because we do not know the batch dimension for any regular tensor inputs, + # i.e. torch.add(torch.Tensor, ExpandedWeight) + raise RuntimeError(f"Expanded Weights encountered but cannot handle function {func.__name__}") + + @property + def dtype(self): + return self.orig_weight.dtype + + @property + def data(self): + return self.orig_weight.data + + @property + def shape(self): + return self.orig_weight.shape + + @property + def device(self): + return self.orig_weight.device + + @property + def is_cuda(self): + return self.orig_weight.is_cuda + + def data_ptr(self): + return self.orig_weight.data_ptr() + + def get_device(self): + return self.orig_weight.get_device() + + def set_allow_smaller_batches(self, is_allow_smaller_batches): + self.allow_smaller_batches = is_allow_smaller_batches + + def set_batch_first(self, is_batch_first=True): + self.batch_first = is_batch_first diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/expanded_weights_utils.py b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/expanded_weights_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b3c91481c18c7e5a440bf212d3122d826453f3df --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/expanded_weights_utils.py @@ -0,0 +1,142 @@ +from typing import Optional + +import torch +from .expanded_weights_impl import ExpandedWeight + +def is_batch_first(expanded_args_and_kwargs): + batch_first = None + for arg in expanded_args_and_kwargs: + if not isinstance(arg, ExpandedWeight): + continue + + if not batch_first: + batch_first = arg.batch_first + elif arg.batch_first != batch_first: + raise RuntimeError("Got conflicting batch_first arguments in the same layer") + return batch_first + +def standard_kwargs(kwarg_names, expanded_args): + r'''Most `__torch_function__`s standardize the kwargs that they give, so this will separate + the args and kwargs they pass. Functions that don't are linear and convND + ''' + kwarg_values = expanded_args[len(expanded_args) - len(kwarg_names):] + expanded_args_without_kwargs = expanded_args[:len(expanded_args) - len(kwarg_names)] + expanded_kwargs = {name: value for (name, value) in zip(kwarg_names, kwarg_values)} + return expanded_args_without_kwargs, expanded_kwargs + +def forward_helper(func, expanded_args, expanded_kwargs): + r'''Forward helper computes the forward pass for a function that has expanded weight(s) + passed to it. It will run the forward pass where all ExpandedWeights are their original + weight. It runs checks on the given arguments and detaches the outputs. + + .. note:: First argument in :attr:`expanded_args` must be the input with the batch + dimension as the first element of the shape + + .. note:: :attr:`func` must return a Tensor or tuple of Tensors + + Args: + func: The function to be called + expanded_args: Arguments to be passed to :attr:`func`. Will include arguments + that need to be unpacked because they are ExpandedWeights + expanded_kwargs: Keyword arguments to be passed to :attr:`func`. + Similar to :attr:`expanded_args`. + ''' + unexpanded_args, unexpanded_kwargs = _check_and_unexpand_args(func, expanded_args, expanded_kwargs) + return func(*unexpanded_args, **unexpanded_kwargs) + +def _check_and_unexpand_args(func, expanded_args, expanded_kwargs): + # input must be the first argument passed + input = expanded_args[0] + if isinstance(input, ExpandedWeight): + raise RuntimeError("Expanded Weights do not support inputs that are also ExpandedWeights. " + f"Input must be a Tensor, got {type(input).__name__} in function {func.__name__}") + if not isinstance(input, torch.Tensor): + raise RuntimeError("Expanded Weights requires a Tensor as the first input to get the batch dimension, " + f"got {type(input).__name__} in function {func.__name__}") + if len(input.shape) == 0: + raise RuntimeError(f"Expanded Weights requires a batch dimension but got an input of size 0 in function {func.__name__}") + if input.shape[0] == 0: + raise RuntimeError("0 is not a valid batch size for Expanded Weights but got input tensor of " + f"{input} in function {func.__name__}") + for arg in expanded_args + tuple(expanded_kwargs.values()): + if not isinstance(arg, ExpandedWeight): + continue + batch_size = input.shape[0] if arg.batch_first else input.shape[1] + if (arg.allow_smaller_batches and batch_size > arg.batch_size) or \ + (not arg.allow_smaller_batches and arg.batch_size != batch_size): + raise RuntimeError("Expected ExpandedWeights to have batch size matching input but got " + f"input batch size of {batch_size} with ExpandedWeight of batch size {arg.batch_size}") + + loss_reduction: Optional[str] = None + for arg in expanded_args + tuple(expanded_kwargs.values()): + if isinstance(arg, ExpandedWeight): + if loss_reduction is None: + loss_reduction = arg.loss_reduction + elif loss_reduction != arg.loss_reduction: + raise RuntimeError("Expected ExpandedWeights to all have the same loss_reduction argument but got one" + f"with {loss_reduction} and one with {arg.loss_reduction}") + + unexpanded_args = tuple(arg.orig_weight if isinstance(arg, ExpandedWeight) else arg for arg in expanded_args) + unexpanded_kwargs = {name: arg.orig_weight if isinstance(arg, ExpandedWeight) else arg + for (name, arg) in expanded_kwargs.items()} + return unexpanded_args, unexpanded_kwargs + +def maybe_scale_by_batch_size(grad_sample, expanded_weight): + if expanded_weight.loss_reduction == "mean": + return grad_sample * expanded_weight.batch_size + else: + return grad_sample + +def set_grad_sample_if_exists(maybe_expanded_weight, per_sample_grad_fn): + unpacked = unpack_expanded_weight_or_tensor(maybe_expanded_weight) + if isinstance(maybe_expanded_weight, ExpandedWeight): + grad_sample_contribution = maybe_scale_by_batch_size(per_sample_grad_fn(unpacked), maybe_expanded_weight) + + if maybe_expanded_weight.batch_size > grad_sample_contribution.shape[0]: + # this only passes the other checks if the arg allows smaller batch sizes + intermediate = torch.zeros(maybe_expanded_weight.batch_size, *grad_sample_contribution.shape[1:], + dtype=grad_sample_contribution.dtype, + device=grad_sample_contribution.device) + intermediate[:grad_sample_contribution.shape[0]] = grad_sample_contribution + grad_sample_contribution = intermediate + + if hasattr(unpacked, "grad_sample") and unpacked.grad_sample is not None: + unpacked.grad_sample = unpacked.grad_sample + grad_sample_contribution + else: + unpacked.grad_sample = grad_sample_contribution + +def unpack_expanded_weight_or_tensor(maybe_expanded_weight, func=lambda x: x): + if isinstance(maybe_expanded_weight, ExpandedWeight): + orig_weight = maybe_expanded_weight.orig_weight + return func(orig_weight) + elif isinstance(maybe_expanded_weight, torch.Tensor) and not maybe_expanded_weight.requires_grad: + return func(maybe_expanded_weight) + elif isinstance(maybe_expanded_weight, torch.Tensor): + raise RuntimeError("ExpandedWeights currently does not support a mixture of ExpandedWeight parameters " + "and normal Parameters. Please file and issue with pytorch/pytorch") + + + +def sum_over_all_but_batch_and_last_n( + tensor: torch.Tensor, n_dims: int +) -> torch.Tensor: + r""" + Calculates the sum over all dimensions, except the first + (batch dimension), and excluding the last n_dims. + This function will ignore the first dimension and it will + not aggregate over the last n_dims dimensions. + Args: + tensor: An input tensor of shape ``(B, ..., X[n_dims-1])``. + n_dims: Number of dimensions to keep. + Example: + >>> tensor = torch.ones(1, 2, 3, 4, 5) + >>> sum_over_all_but_batch_and_last_n(tensor, n_dims=2).shape + torch.Size([1, 4, 5]) + Returns: + A tensor of shape ``(B, ..., X[n_dims-1])`` + """ + if tensor.dim() == n_dims + 1: + return tensor + else: + dims = list(range(1, tensor.dim() - n_dims)) + return tensor.sum(dim=dims) diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/group_norm_expanded_weights.py b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/group_norm_expanded_weights.py new file mode 100644 index 0000000000000000000000000000000000000000..fe29b1eafbe2c0be87a96f4e24d8c026b310b3d7 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/group_norm_expanded_weights.py @@ -0,0 +1,64 @@ +from functools import reduce +import operator +import torch +import torch.nn.functional as F +from .expanded_weights_impl import ExpandedWeight, implements_per_sample_grads +from .expanded_weights_utils import standard_kwargs, \ + forward_helper, set_grad_sample_if_exists, unpack_expanded_weight_or_tensor +from typing import List, Optional + +@implements_per_sample_grads(F.group_norm) +class GroupNormPerSampleGrad(torch.autograd.Function): + @staticmethod + def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs): + expanded_args, expanded_kwargs = standard_kwargs(kwarg_names, expanded_args_and_kwargs) + input, num_groups = expanded_args + N = input.shape[0] + C = input.shape[1] + HxW = reduce(operator.mul, input.shape[2:], 1) + weight, bias, eps = expanded_kwargs['weight'], expanded_kwargs['bias'], expanded_kwargs['eps'] + output, mean, rstd = forward_helper(torch.native_group_norm, (input, weight, bias, N, C, HxW, num_groups, eps), {}) + ctx.input, ctx.num_groups = input, num_groups + ctx.weight, ctx.eps = weight, eps + ctx.mean, ctx.rstd = mean, rstd + if isinstance(bias, ExpandedWeight): + ctx.bias = bias + if input.requires_grad and isinstance(weight, ExpandedWeight): + ctx.weight = weight + return output + + @staticmethod + def backward(ctx, grad_output): + input, num_groups = ctx.input, ctx.num_groups + weight, bias, eps = ctx.weight, ctx.bias, ctx.eps + mean, rstd = ctx.mean, ctx.rstd + + results: List[Optional[torch.Tensor]] = [] + results.append(None) # for kwarg names + results.append(None) # for op reference + + if input.requires_grad: + weight_c = unpack_expanded_weight_or_tensor(weight, lambda t: t.contiguous()) + input_c = input.contiguous() + grad_output_c = grad_output.contiguous() if grad_output is not None else None + N = input.shape[0] + C = input.shape[1] + HxW = 1 + for s in input.shape[2:]: + HxW *= s + bw_fn = torch.ops.aten.native_group_norm_backward + results.append(bw_fn(grad_output_c, input_c, + mean, rstd, weight_c, N, C, HxW, num_groups, (True, False, False))[0]) + else: + results.append(None) + + # weight and bias don't compute batched gradients; no other arguments are differentiable + results = results + [None] * 4 + + # set grad_sample field for weight and bias with per sample gradients + if hasattr(ctx, "weight"): + set_grad_sample_if_exists(weight, + lambda _: torch.einsum("ni...->ni", F.group_norm(input, num_groups, eps=eps) * grad_output)) + if hasattr(ctx, "bias"): + set_grad_sample_if_exists(bias, lambda _: torch.einsum("ni...->ni", grad_output)) + return tuple(results) diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/layer_norm_expanded_weights.py b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/layer_norm_expanded_weights.py new file mode 100644 index 0000000000000000000000000000000000000000..f2ead2d4c08fb03aafec2469d86c672ebe9bb222 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/layer_norm_expanded_weights.py @@ -0,0 +1,59 @@ + +import torch +import torch.nn.functional as F +from .expanded_weights_impl import ExpandedWeight, implements_per_sample_grads +from .expanded_weights_utils import forward_helper, set_grad_sample_if_exists, \ + standard_kwargs, sum_over_all_but_batch_and_last_n, unpack_expanded_weight_or_tensor +from typing import List, Optional + +@implements_per_sample_grads(F.layer_norm) +class LayerNormPerSampleGrad(torch.autograd.Function): + @staticmethod + def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs): + expanded_args, expanded_kwargs = standard_kwargs(kwarg_names, expanded_args_and_kwargs) + input = expanded_args[0] + normalized_shape = expanded_args[1] + if len(input.shape) <= len(normalized_shape): + raise RuntimeError("Expanded Weights: Layer norm should not normalize over batch dimension for per sample gradient" + f"computations but got that normalized shape, {normalized_shape}, matched input shape.") + output, mean, rstd = forward_helper(torch.native_layer_norm, expanded_args, expanded_kwargs) + ctx.args = expanded_args + + if input.requires_grad or isinstance(expanded_kwargs['weight'], ExpandedWeight): + ctx.weight = expanded_kwargs['weight'] + if input.requires_grad or isinstance(expanded_kwargs['bias'], ExpandedWeight): + ctx.bias = expanded_kwargs['bias'] + ctx.eps = expanded_kwargs['eps'] + ctx.mean, ctx.rstd = mean, rstd + return output + + + @staticmethod + def backward(ctx, grad_output): + + def weight_per_sample_grad(weight): + return sum_over_all_but_batch_and_last_n(F.layer_norm(input, normalized_shape, eps=ctx.eps) * grad_output, weight.dim()) + + input, normalized_shape = ctx.args + mean, rstd = ctx.mean, ctx.rstd + + results: List[Optional[torch.Tensor]] = [] + results.append(None) # for kwarg names + results.append(None) # for op reference + if input.requires_grad: + weight_ = unpack_expanded_weight_or_tensor(ctx.weight) + bias_ = unpack_expanded_weight_or_tensor(ctx.bias) + results.append(torch.ops.aten.native_layer_norm_backward( + grad_output, input, normalized_shape, mean, rstd, weight_, bias_, (True, False, False))[0]) + else: + results.append(None) + + # weight and bias don't compute batched gradients; no other arguments are differentiable + results = results + [None] * 4 + + # set grad_sample field for weight and bias with per sample gradients + if hasattr(ctx, "weight"): + set_grad_sample_if_exists(ctx.weight, weight_per_sample_grad) + if hasattr(ctx, "bias"): + set_grad_sample_if_exists(ctx.bias, lambda bias: sum_over_all_but_batch_and_last_n(grad_output, bias.dim())) + return tuple(results) diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/linear_expanded_weights.py b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/linear_expanded_weights.py new file mode 100644 index 0000000000000000000000000000000000000000..c2cbae63f33651a0f44e287cb0fa6d5d4a25bc62 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/linear_expanded_weights.py @@ -0,0 +1,44 @@ +import torch +import torch.nn.functional as F +from .expanded_weights_impl import implements_per_sample_grads +from .expanded_weights_utils import \ + forward_helper, set_grad_sample_if_exists, unpack_expanded_weight_or_tensor, is_batch_first +from typing import List, Optional + +@implements_per_sample_grads(F.linear) +class LinearPerSampleGrad(torch.autograd.Function): + @staticmethod + def forward(ctx, _, __, *expanded_args_and_kwargs): + if len(expanded_args_and_kwargs[0].shape) <= 1: + raise RuntimeError("Input does not have a batch dimension. Expanded Weights expected input " + f"of at least rank 2, got of rank {len(expanded_args_and_kwargs[0].shape)}") + expanded_kwargs = {'bias': expanded_args_and_kwargs[2] if len(expanded_args_and_kwargs) == 3 else None} + expanded_args = expanded_args_and_kwargs[:2] + ctx.batch_first = is_batch_first(expanded_args_and_kwargs) + output = forward_helper(F.linear, expanded_args, expanded_kwargs) + ctx.args = expanded_args + ctx.kwargs = expanded_kwargs + return output + + @staticmethod + def backward(ctx, grad_output): + input, weight = ctx.args + bias = ctx.kwargs['bias'] + results: List[Optional[torch.Tensor]] = [] + results.append(None) # for kwarg_names + results.append(None) # for op reference + + if input.requires_grad: + results.append(grad_output.matmul(unpack_expanded_weight_or_tensor(weight))) + else: + results.append(None) + results.extend([None] * 2) # weight and bias don't compute batched gradients + + if not ctx.batch_first: + grad_output = grad_output.transpose(0, 1) + input = input.transpose(0, 1) + + # weight and bias get their grad_sample fields set directly if they exist + set_grad_sample_if_exists(weight, lambda _: torch.einsum("n...i,n...j->nij", grad_output, input)) + set_grad_sample_if_exists(bias, lambda _: torch.einsum("n...k->nk", grad_output)) + return tuple(results) diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/parametrizations.py b/wemm/lib/python3.10/site-packages/torch/nn/utils/parametrizations.py new file mode 100644 index 0000000000000000000000000000000000000000..7b097f66767136556ead729e879203d08f5ee136 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/utils/parametrizations.py @@ -0,0 +1,492 @@ +from enum import Enum, auto + +import torch +from torch import Tensor +from ..utils import parametrize +from ..modules import Module +from .. import functional as F + +from typing import Optional + +__all__ = ['orthogonal', 'spectral_norm'] + + +def _is_orthogonal(Q, eps=None): + n, k = Q.size(-2), Q.size(-1) + Id = torch.eye(k, dtype=Q.dtype, device=Q.device) + # A reasonable eps, but not too large + eps = 10. * n * torch.finfo(Q.dtype).eps + return torch.allclose(Q.mH @ Q, Id, atol=eps) + + +def _make_orthogonal(A): + """ Assume that A is a tall matrix. + Compute the Q factor s.t. A = QR (A may be complex) and diag(R) is real and non-negative + """ + X, tau = torch.geqrf(A) + Q = torch.linalg.householder_product(X, tau) + # The diagonal of X is the diagonal of R (which is always real) so we normalise by its signs + Q *= X.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2) + return Q + + +class _OrthMaps(Enum): + matrix_exp = auto() + cayley = auto() + householder = auto() + + +class _Orthogonal(Module): + base: Tensor + + def __init__(self, + weight, + orthogonal_map: _OrthMaps, + *, + use_trivialization=True) -> None: + super().__init__() + + # Note [Householder complex] + # For complex tensors, it is not possible to compute the tensor `tau` necessary for + # linalg.householder_product from the reflectors. + # To see this, note that the reflectors have a shape like: + # 0 0 0 + # * 0 0 + # * * 0 + # which, for complex matrices, give n(n-1) (real) parameters. Now, you need n^2 parameters + # to parametrize the unitary matrices. Saving tau on its own does not work either, because + # not every combination of `(A, tau)` gives a unitary matrix, meaning that if we optimise + # them as independent tensors we would not maintain the constraint + # An equivalent reasoning holds for rectangular matrices + if weight.is_complex() and orthogonal_map == _OrthMaps.householder: + raise ValueError("The householder parametrization does not support complex tensors.") + + self.shape = weight.shape + self.orthogonal_map = orthogonal_map + if use_trivialization: + self.register_buffer("base", None) + + def forward(self, X: torch.Tensor) -> torch.Tensor: + n, k = X.size(-2), X.size(-1) + transposed = n < k + if transposed: + X = X.mT + n, k = k, n + # Here n > k and X is a tall matrix + if self.orthogonal_map == _OrthMaps.matrix_exp or self.orthogonal_map == _OrthMaps.cayley: + # We just need n x k - k(k-1)/2 parameters + X = X.tril() + if n != k: + # Embed into a square matrix + X = torch.cat([X, X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1) + A = X - X.mH + # A is skew-symmetric (or skew-hermitian) + if self.orthogonal_map == _OrthMaps.matrix_exp: + Q = torch.matrix_exp(A) + elif self.orthogonal_map == _OrthMaps.cayley: + # Computes the Cayley retraction (I+A/2)(I-A/2)^{-1} + Id = torch.eye(n, dtype=A.dtype, device=A.device) + Q = torch.linalg.solve(torch.add(Id, A, alpha=-0.5), torch.add(Id, A, alpha=0.5)) + # Q is now orthogonal (or unitary) of size (..., n, n) + if n != k: + Q = Q[..., :k] + # Q is now the size of the X (albeit perhaps transposed) + else: + # X is real here, as we do not support householder with complex numbers + A = X.tril(diagonal=-1) + tau = 2. / (1. + (A * A).sum(dim=-2)) + Q = torch.linalg.householder_product(A, tau) + # The diagonal of X is 1's and -1's + # We do not want to differentiate through this or update the diagonal of X hence the casting + Q = Q * X.diagonal(dim1=-2, dim2=-1).int().unsqueeze(-2) + + if hasattr(self, "base"): + Q = self.base @ Q + if transposed: + Q = Q.mT + return Q + + @torch.autograd.no_grad() + def right_inverse(self, Q: torch.Tensor) -> torch.Tensor: + if Q.shape != self.shape: + raise ValueError(f"Expected a matrix or batch of matrices of shape {self.shape}. " + f"Got a tensor of shape {Q.shape}.") + + Q_init = Q + n, k = Q.size(-2), Q.size(-1) + transpose = n < k + if transpose: + Q = Q.mT + n, k = k, n + + # We always make sure to always copy Q in every path + if not hasattr(self, "base"): + # Note [right_inverse expm cayley] + # If we do not have use_trivialization=True, we just implement the inverse of the forward + # map for the Householder. To see why, think that for the Cayley map, + # we would need to find the matrix X \in R^{n x k} such that: + # Y = torch.cat([X.tril(), X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1) + # A = Y - Y.mH + # cayley(A)[:, :k] + # gives the original tensor. It is not clear how to do this. + # Perhaps via some algebraic manipulation involving the QR like that of + # Corollary 2.2 in Edelman, Arias and Smith? + if self.orthogonal_map == _OrthMaps.cayley or self.orthogonal_map == _OrthMaps.matrix_exp: + raise NotImplementedError("It is not possible to assign to the matrix exponential " + "or the Cayley parametrizations when use_trivialization=False.") + + # If parametrization == _OrthMaps.householder, make Q orthogonal via the QR decomposition. + # Here Q is always real because we do not support householder and complex matrices. + # See note [Householder complex] + A, tau = torch.geqrf(Q) + # We want to have a decomposition X = QR with diag(R) > 0, as otherwise we could + # decompose an orthogonal matrix Q as Q = (-Q)@(-Id), which is a valid QR decomposition + # The diagonal of Q is the diagonal of R from the qr decomposition + A.diagonal(dim1=-2, dim2=-1).sign_() + # Equality with zero is ok because LAPACK returns exactly zero when it does not want + # to use a particular reflection + A.diagonal(dim1=-2, dim2=-1)[tau == 0.] *= -1 + return A.mT if transpose else A + else: + if n == k: + # We check whether Q is orthogonal + if not _is_orthogonal(Q): + Q = _make_orthogonal(Q) + else: # Is orthogonal + Q = Q.clone() + else: + # Complete Q into a full n x n orthogonal matrix + N = torch.randn(*(Q.size()[:-2] + (n, n - k)), dtype=Q.dtype, device=Q.device) + Q = torch.cat([Q, N], dim=-1) + Q = _make_orthogonal(Q) + self.base = Q + + # It is necessary to return the -Id, as we use the diagonal for the + # Householder parametrization. Using -Id makes: + # householder(torch.zeros(m,n)) == torch.eye(m,n) + # Poor man's version of eye_like + neg_Id = torch.zeros_like(Q_init) + neg_Id.diagonal(dim1=-2, dim2=-1).fill_(-1.) + return neg_Id + + +def orthogonal(module: Module, + name: str = 'weight', + orthogonal_map: Optional[str] = None, + *, + use_trivialization: bool = True) -> Module: + r"""Applies an orthogonal or unitary parametrization to a matrix or a batch of matrices. + + Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, the parametrized + matrix :math:`Q \in \mathbb{K}^{m \times n}` is **orthogonal** as + + .. math:: + + \begin{align*} + Q^{\text{H}}Q &= \mathrm{I}_n \mathrlap{\qquad \text{if }m \geq n}\\ + QQ^{\text{H}} &= \mathrm{I}_m \mathrlap{\qquad \text{if }m < n} + \end{align*} + + where :math:`Q^{\text{H}}` is the conjugate transpose when :math:`Q` is complex + and the transpose when :math:`Q` is real-valued, and + :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix. + In plain words, :math:`Q` will have orthonormal columns whenever :math:`m \geq n` + and orthonormal rows otherwise. + + If the tensor has more than two dimensions, we consider it as a batch of matrices of shape `(..., m, n)`. + + The matrix :math:`Q` may be parametrized via three different ``orthogonal_map`` in terms of the original tensor: + + - ``"matrix_exp"``/``"cayley"``: + the :func:`~torch.matrix_exp` :math:`Q = \exp(A)` and the `Cayley map`_ + :math:`Q = (\mathrm{I}_n + A/2)(\mathrm{I}_n - A/2)^{-1}` are applied to a skew-symmetric + :math:`A` to give an orthogonal matrix. + - ``"householder"``: computes a product of Householder reflectors + (:func:`~torch.linalg.householder_product`). + + ``"matrix_exp"``/``"cayley"`` often make the parametrized weight converge faster than + ``"householder"``, but they are slower to compute for very thin or very wide matrices. + + If ``use_trivialization=True`` (default), the parametrization implements the "Dynamic Trivialization Framework", + where an extra matrix :math:`B \in \mathbb{K}^{n \times n}` is stored under + ``module.parametrizations.weight[0].base``. This helps the + convergence of the parametrized layer at the expense of some extra memory use. + See `Trivializations for Gradient-Based Optimization on Manifolds`_ . + + Initial value of :math:`Q`: + If the original tensor is not parametrized and ``use_trivialization=True`` (default), the initial value + of :math:`Q` is that of the original tensor if it is orthogonal (or unitary in the complex case) + and it is orthogonalized via the QR decomposition otherwise (see :func:`torch.linalg.qr`). + Same happens when it is not parametrized and ``orthogonal_map="householder"`` even when ``use_trivialization=False``. + Otherwise, the initial value is the result of the composition of all the registered + parametrizations applied to the original tensor. + + .. note:: + This function is implemented using the parametrization functionality + in :func:`~torch.nn.utils.parametrize.register_parametrization`. + + + .. _`Cayley map`: https://en.wikipedia.org/wiki/Cayley_transform#Matrix_map + .. _`Trivializations for Gradient-Based Optimization on Manifolds`: https://arxiv.org/abs/1909.09501 + + Args: + module (nn.Module): module on which to register the parametrization. + name (str, optional): name of the tensor to make orthogonal. Default: ``"weight"``. + orthogonal_map (str, optional): One of the following: ``"matrix_exp"``, ``"cayley"``, ``"householder"``. + Default: ``"matrix_exp"`` if the matrix is square or complex, ``"householder"`` otherwise. + use_trivialization (bool, optional): whether to use the dynamic trivialization framework. + Default: ``True``. + + Returns: + The original module with an orthogonal parametrization registered to the specified + weight + + Example:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) + >>> orth_linear = orthogonal(nn.Linear(20, 40)) + >>> orth_linear + ParametrizedLinear( + in_features=20, out_features=40, bias=True + (parametrizations): ModuleDict( + (weight): ParametrizationList( + (0): _Orthogonal() + ) + ) + ) + >>> # xdoctest: +IGNORE_WANT + >>> Q = orth_linear.weight + >>> torch.dist(Q.T @ Q, torch.eye(20)) + tensor(4.9332e-07) + """ + weight = getattr(module, name, None) + if not isinstance(weight, Tensor): + raise ValueError( + "Module '{}' has no parameter or buffer with name '{}'".format(module, name) + ) + + # We could implement this for 1-dim tensors as the maps on the sphere + # but I believe it'd bite more people than it'd help + if weight.ndim < 2: + raise ValueError("Expected a matrix or batch of matrices. " + f"Got a tensor of {weight.ndim} dimensions.") + + if orthogonal_map is None: + orthogonal_map = "matrix_exp" if weight.size(-2) == weight.size(-1) or weight.is_complex() else "householder" + + orth_enum = getattr(_OrthMaps, orthogonal_map, None) + if orth_enum is None: + raise ValueError('orthogonal_map has to be one of "matrix_exp", "cayley", "householder". ' + f'Got: {orthogonal_map}') + orth = _Orthogonal(weight, + orth_enum, + use_trivialization=use_trivialization) + parametrize.register_parametrization(module, name, orth, unsafe=True) + return module + + +class _SpectralNorm(Module): + def __init__( + self, + weight: torch.Tensor, + n_power_iterations: int = 1, + dim: int = 0, + eps: float = 1e-12 + ) -> None: + super().__init__() + ndim = weight.ndim + if dim >= ndim or dim < -ndim: + raise IndexError("Dimension out of range (expected to be in range of " + f"[-{ndim}, {ndim - 1}] but got {dim})") + + if n_power_iterations <= 0: + raise ValueError('Expected n_power_iterations to be positive, but ' + 'got n_power_iterations={}'.format(n_power_iterations)) + self.dim = dim if dim >= 0 else dim + ndim + self.eps = eps + if ndim > 1: + # For ndim == 1 we do not need to approximate anything (see _SpectralNorm.forward) + self.n_power_iterations = n_power_iterations + weight_mat = self._reshape_weight_to_matrix(weight) + h, w = weight_mat.size() + + u = weight_mat.new_empty(h).normal_(0, 1) + v = weight_mat.new_empty(w).normal_(0, 1) + self.register_buffer('_u', F.normalize(u, dim=0, eps=self.eps)) + self.register_buffer('_v', F.normalize(v, dim=0, eps=self.eps)) + + # Start with u, v initialized to some reasonable values by performing a number + # of iterations of the power method + self._power_method(weight_mat, 15) + + def _reshape_weight_to_matrix(self, weight: torch.Tensor) -> torch.Tensor: + # Precondition + assert weight.ndim > 1 + + if self.dim != 0: + # permute dim to front + weight = weight.permute(self.dim, *(d for d in range(weight.dim()) if d != self.dim)) + + return weight.flatten(1) + + @torch.autograd.no_grad() + def _power_method(self, weight_mat: torch.Tensor, n_power_iterations: int) -> None: + # See original note at torch/nn/utils/spectral_norm.py + # NB: If `do_power_iteration` is set, the `u` and `v` vectors are + # updated in power iteration **in-place**. This is very important + # because in `DataParallel` forward, the vectors (being buffers) are + # broadcast from the parallelized module to each module replica, + # which is a new module object created on the fly. And each replica + # runs its own spectral norm power iteration. So simply assigning + # the updated vectors to the module this function runs on will cause + # the update to be lost forever. And the next time the parallelized + # module is replicated, the same randomly initialized vectors are + # broadcast and used! + # + # Therefore, to make the change propagate back, we rely on two + # important behaviors (also enforced via tests): + # 1. `DataParallel` doesn't clone storage if the broadcast tensor + # is already on correct device; and it makes sure that the + # parallelized module is already on `device[0]`. + # 2. If the out tensor in `out=` kwarg has correct shape, it will + # just fill in the values. + # Therefore, since the same power iteration is performed on all + # devices, simply updating the tensors in-place will make sure that + # the module replica on `device[0]` will update the _u vector on the + # parallized module (by shared storage). + # + # However, after we update `u` and `v` in-place, we need to **clone** + # them before using them to normalize the weight. This is to support + # backproping through two forward passes, e.g., the common pattern in + # GAN training: loss = D(real) - D(fake). Otherwise, engine will + # complain that variables needed to do backward for the first forward + # (i.e., the `u` and `v` vectors) are changed in the second forward. + + # Precondition + assert weight_mat.ndim > 1 + + for _ in range(n_power_iterations): + # Spectral norm of weight equals to `u^T W v`, where `u` and `v` + # are the first left and right singular vectors. + # This power iteration produces approximations of `u` and `v`. + self._u = F.normalize(torch.mv(weight_mat, self._v), # type: ignore[has-type] + dim=0, eps=self.eps, out=self._u) # type: ignore[has-type] + self._v = F.normalize(torch.mv(weight_mat.t(), self._u), + dim=0, eps=self.eps, out=self._v) # type: ignore[has-type] + + def forward(self, weight: torch.Tensor) -> torch.Tensor: + if weight.ndim == 1: + # Faster and more exact path, no need to approximate anything + return F.normalize(weight, dim=0, eps=self.eps) + else: + weight_mat = self._reshape_weight_to_matrix(weight) + if self.training: + self._power_method(weight_mat, self.n_power_iterations) + # See above on why we need to clone + u = self._u.clone(memory_format=torch.contiguous_format) + v = self._v.clone(memory_format=torch.contiguous_format) + # The proper way of computing this should be through F.bilinear, but + # it seems to have some efficiency issues: + # https://github.com/pytorch/pytorch/issues/58093 + sigma = torch.dot(u, torch.mv(weight_mat, v)) + return weight / sigma + + def right_inverse(self, value: torch.Tensor) -> torch.Tensor: + # we may want to assert here that the passed value already + # satisfies constraints + return value + + +def spectral_norm(module: Module, + name: str = 'weight', + n_power_iterations: int = 1, + eps: float = 1e-12, + dim: Optional[int] = None) -> Module: + r"""Applies spectral normalization to a parameter in the given module. + + .. math:: + \mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})}, + \sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2} + + When applied on a vector, it simplifies to + + .. math:: + \mathbf{x}_{SN} = \dfrac{\mathbf{x}}{\|\mathbf{x}\|_2} + + Spectral normalization stabilizes the training of discriminators (critics) + in Generative Adversarial Networks (GANs) by reducing the Lipschitz constant + of the model. :math:`\sigma` is approximated performing one iteration of the + `power method`_ every time the weight is accessed. If the dimension of the + weight tensor is greater than 2, it is reshaped to 2D in power iteration + method to get spectral norm. + + + See `Spectral Normalization for Generative Adversarial Networks`_ . + + .. _`power method`: https://en.wikipedia.org/wiki/Power_iteration + .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 + + .. note:: + This function is implemented using the parametrization functionality + in :func:`~torch.nn.utils.parametrize.register_parametrization`. It is a + reimplementation of :func:`torch.nn.utils.spectral_norm`. + + .. note:: + When this constraint is registered, the singular vectors associated to the largest + singular value are estimated rather than sampled at random. These are then updated + performing :attr:`n_power_iterations` of the `power method`_ whenever the tensor + is accessed with the module on `training` mode. + + .. note:: + If the `_SpectralNorm` module, i.e., `module.parametrization.weight[idx]`, + is in training mode on removal, it will perform another power iteration. + If you'd like to avoid this iteration, set the module to eval mode + before its removal. + + Args: + module (nn.Module): containing module + name (str, optional): name of weight parameter. Default: ``"weight"``. + n_power_iterations (int, optional): number of power iterations to + calculate spectral norm. Default: ``1``. + eps (float, optional): epsilon for numerical stability in + calculating norms. Default: ``1e-12``. + dim (int, optional): dimension corresponding to number of outputs. + Default: ``0``, except for modules that are instances of + ConvTranspose{1,2,3}d, when it is ``1`` + + Returns: + The original module with a new parametrization registered to the specified + weight + + Example:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) + >>> # xdoctest: +IGNORE_WANT("non-determenistic") + >>> snm = spectral_norm(nn.Linear(20, 40)) + >>> snm + ParametrizedLinear( + in_features=20, out_features=40, bias=True + (parametrizations): ModuleDict( + (weight): ParametrizationList( + (0): _SpectralNorm() + ) + ) + ) + >>> torch.linalg.matrix_norm(snm.weight, 2) + tensor(1.0081, grad_fn=) + """ + weight = getattr(module, name, None) + if not isinstance(weight, Tensor): + raise ValueError( + "Module '{}' has no parameter or buffer with name '{}'".format(module, name) + ) + + if dim is None: + if isinstance(module, (torch.nn.ConvTranspose1d, + torch.nn.ConvTranspose2d, + torch.nn.ConvTranspose3d)): + dim = 1 + else: + dim = 0 + parametrize.register_parametrization(module, name, _SpectralNorm(weight, n_power_iterations, dim, eps)) + return module diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/parametrize.py b/wemm/lib/python3.10/site-packages/torch/nn/utils/parametrize.py new file mode 100644 index 0000000000000000000000000000000000000000..3a3931fec2565a9e2467fab79ad61660a4fefcd9 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/utils/parametrize.py @@ -0,0 +1,759 @@ +import torch +from torch.nn.modules.container import ModuleList, ModuleDict, Module +from torch.nn.parameter import Parameter +from torch import Tensor + +import collections +import copyreg +from copy import deepcopy +from contextlib import contextmanager +from typing import Union, Optional, Dict, Tuple, Sequence + +__all__ = ['cached', 'ParametrizationList', 'register_parametrization', 'is_parametrized', 'remove_parametrizations', + 'type_before_parametrizations', 'transfer_parametrizations_and_params'] + +_cache_enabled = 0 +_cache: Dict[Tuple[int, str], Optional[Tensor]] = {} + + +@contextmanager +def cached(): + r"""Context manager that enables the caching system within parametrizations + registered with :func:`register_parametrization`. + + The value of the parametrized objects is computed and cached the first time + they are required when this context manager is active. The cached values are + discarded when leaving the context manager. + + This is useful when using a parametrized parameter more than once in the forward pass. + An example of this is when parametrizing the recurrent kernel of an RNN or when + sharing weights. + + The simplest way to activate the cache is by wrapping the forward pass of the neural network + + .. code-block:: python + + import torch.nn.utils.parametrize as P + ... + with P.cached(): + output = model(inputs) + + in training and evaluation. One may also wrap the parts of the modules that use + several times the parametrized tensors. For example, the loop of an RNN with a + parametrized recurrent kernel: + + .. code-block:: python + + with P.cached(): + for x in xs: + out_rnn = self.rnn_cell(x, out_rnn) + """ + global _cache + global _cache_enabled + _cache_enabled += 1 + try: + yield + finally: + _cache_enabled -= 1 + if not _cache_enabled: + _cache = {} + + +def _register_parameter_or_buffer(module, name, X): + if isinstance(X, Parameter): + module.register_parameter(name, X) + else: + module.register_buffer(name, X) + + +class ParametrizationList(ModuleList): + r"""A sequential container that holds and manages the ``original`` or ``original0``, ``original1``, ... + parameters or buffers of a parametrized :class:`torch.nn.Module`. + + It is the type of ``module.parametrizations[tensor_name]`` when ``module[tensor_name]`` + has been parametrized with :func:`register_parametrization`. + + If the first registered parametrization has a ``right_inverse`` that returns one tensor or + does not have a ``right_inverse`` (in which case we assume that ``right_inverse`` is the identity), + it will hold the tensor under the name ``original``. + If it has a ``right_inverse`` that returns more than one tensor, these will be registered as + ``original0``, ``original1``, ... + + .. warning:: + This class is used internally by :func:`register_parametrization`. It is documented + here for completeness. It shall not be instantiated by the user. + + Args: + modules (sequence): sequence of modules representing the parametrizations + original (Parameter or Tensor): parameter or buffer that is parametrized + unsafe (bool): a boolean flag that denotes whether the parametrization + may change the dtype and shape of the tensor. Default: `False` + Warning: the parametrization is not checked for consistency upon registration. + Enable this flag at your own risk. + """ + original: Tensor + unsafe: bool + + def __init__( + self, modules: Sequence[Module], original: Union[Tensor, Parameter], unsafe: bool = False + ) -> None: + # We require this because we need to treat differently the first parametrization + # This should never throw, unless this class is used from the outside + if len(modules) == 0: + raise ValueError("ParametrizationList requires one or more modules.") + + super().__init__(modules) + self.unsafe = unsafe + + # In plain words: + # module.weight must keep its dtype and shape. + # Furthermore, if there is no right_inverse or the right_inverse returns a tensor, + # this should be of the same dtype as the original tensor + # + # We check that the following invariants hold: + # X = module.weight + # Y = param.right_inverse(X) + # assert isinstance(Y, Tensor) or + # (isinstance(Y, collections.abc.Sequence) and all(isinstance(t, Tensor) for t in Y)) + # Z = param(Y) if isisntance(Y, Tensor) else param(*Y) + # # Consistency checks + # assert X.dtype == Z.dtype and X.shape == Z.shape + # # If it has one input, this allows to be able to use set_ to be able to + # # move data to/from the original tensor without changing its id (which is what the + # # optimizer uses to track parameters) + # if isinstance(Y, Tensor) + # assert X.dtype == Y.dtype + # Below we use original = X, new = Y + + original_shape = original.shape + original_dtype = original.dtype + + # Compute new + with torch.no_grad(): + new = original + for module in reversed(self): # type: ignore[call-overload] + if hasattr(module, "right_inverse"): + try: + new = module.right_inverse(new) + except NotImplementedError: + pass + # else, or if it throws, we assume that right_inverse is the identity + + if not isinstance(new, Tensor) and not isinstance(new, collections.abc.Sequence): + raise ValueError("'right_inverse' must return a Tensor or a Sequence of tensors (list, tuple...). " + f"Got {type(new).__name__}") + + # Set the number of original tensors + self.is_tensor = isinstance(new, Tensor) + self.ntensors = 1 if self.is_tensor else len(new) + + # Register the tensor(s) + if self.is_tensor: + if original.dtype != new.dtype: + raise ValueError( + "When `right_inverse` outputs one tensor, it may not change the dtype.\n" + f"original.dtype: {original.dtype}\n" + f"right_inverse(original).dtype: {new.dtype}" + ) + # Set the original to original so that the user does not need to re-register the parameter + # manually in the optimiser + with torch.no_grad(): + original.set_(new) # type: ignore[call-overload] + _register_parameter_or_buffer(self, "original", original) + else: + for i, originali in enumerate(new): + if not isinstance(originali, Tensor): + raise ValueError("'right_inverse' must return a Tensor or a Sequence of tensors " + "(list, tuple...). " + f"Got element {i} of the sequence with type {type(originali).__name__}.") + + # If the original tensor was a Parameter that required grad, we expect the user to + # add the new parameters to the optimizer after registering the parametrization + # (this is documented) + if isinstance(original, Parameter): + originali = Parameter(originali) + originali.requires_grad_(original.requires_grad) + _register_parameter_or_buffer(self, f"original{i}", originali) + + if not self.unsafe: + # Consistency checks: + # Since f : A -> B, right_inverse : B -> A, Z and original should live in B + # Z = forward(right_inverse(original)) + Z = self() + if not isinstance(Z, Tensor): + raise ValueError( + f"A parametrization must return a tensor. Got {type(Z).__name__}." + ) + if Z.dtype != original_dtype: + raise ValueError( + "Registering a parametrization may not change the dtype of the tensor, unless `unsafe` flag is enabled.\n" + f"unparametrized dtype: {original_dtype}\n" + f"parametrized dtype: {Z.dtype}" + ) + if Z.shape != original_shape: + raise ValueError( + "Registering a parametrization may not change the shape of the tensor, unless `unsafe` flag is enabled.\n" + f"unparametrized shape: {original_shape}\n" + f"parametrized shape: {Z.shape}" + ) + + def right_inverse(self, value: Tensor) -> None: + r"""Calls the methods ``right_inverse`` (see :func:`register_parametrization`) + of the parametrizations in the inverse order they were registered in. + Then, it stores the result in ``self.original`` if ``right_inverse`` outputs one tensor + or in ``self.original0``, ``self.original1``, ... if it outputs several. + + Args: + value (Tensor): Value to which initialize the module + """ + # All the exceptions in this function should almost never throw. + # They could throw if, for example, right_inverse function returns a different + # dtype when given a different input, which should most likely be caused by a + # bug in the user's code + + with torch.no_grad(): + # See https://github.com/pytorch/pytorch/issues/53103 + for module in reversed(self): # type: ignore[call-overload] + if hasattr(module, "right_inverse"): + value = module.right_inverse(value) + else: + raise RuntimeError(f"parametrization {type(module).__name__} does not implement " + "right_inverse.") + if self.is_tensor: + # These exceptions should only throw when a right_inverse function does not + # return the same dtype for every input, which should most likely be caused by a bug + if not isinstance(value, Tensor): + raise ValueError( + f"`right_inverse` should return a tensor. Got {type(value).__name__}" + ) + if value.dtype != self.original.dtype: + raise ValueError( + f"The tensor returned by `right_inverse` has dtype {value.dtype} " + f"while `original` has dtype {self.original.dtype}" + ) + # We know that the result is going to have the same dtype + self.original.set_(value) # type: ignore[call-overload] + else: + if not isinstance(value, collections.abc.Sequence): + raise ValueError( + "'right_inverse' must return a sequence of tensors. " + f"Got {type(value).__name__}." + ) + if len(value) != self.ntensors: + raise ValueError( + "'right_inverse' must return a sequence of tensors of length " + f"{self.ntensors}. Got a sequence of length {len(value)}." + ) + for i, tensor in enumerate(value): + original_i = getattr(self, f"original{i}") + if not isinstance(tensor, Tensor): + raise ValueError( + f"`right_inverse` must return a sequence of tensors. " + f"Got element {i} of type {type(tensor).__name__}" + ) + if original_i.dtype != tensor.dtype: + raise ValueError( + f"Tensor {i} returned by `right_inverse` has dtype {tensor.dtype} " + f"while `original{i}` has dtype {original_i.dtype}" + ) + original_i.set_(tensor) + + def forward(self) -> Tensor: + if torch.jit.is_scripting(): + raise RuntimeError('Parametrization is not working with scripting.') + # Unpack the originals for the first parametrization + if self.is_tensor: + x = self[0](self.original) + else: + originals = (getattr(self, f"original{i}") for i in range(self.ntensors)) + x = self[0](*originals) + # It's not possible to call self[1:] here, so we have to be a bit more cryptic + # Also we want to skip all non-integer keys + curr_idx = 1 + while hasattr(self, str(curr_idx)): + x = self[curr_idx](x) + curr_idx += 1 + return x + + +def _inject_new_class(module: Module) -> None: + r"""Sets up a module to be parametrized. + + This works by substituting the class of the module by a class + that extends it to be able to inject a property + + Args: + module (nn.Module): module into which to inject the property + """ + cls = module.__class__ + + def default_deepcopy(self, memo): + # Just emulate a standard deepcopy procedure when __deepcopy__ doesn't exist in the current class. + obj = memo.get(id(self), None) + if obj is not None: + return obj + replica = self.__new__(self.__class__) + memo[id(self)] = replica + replica.__dict__ = deepcopy(self.__dict__, memo) + # Also save all slots if they exist. + slots_to_save = copyreg._slotnames(self.__class__) # type: ignore[attr-defined] + for slot in slots_to_save: + if hasattr(self, slot): + setattr(replica, slot, deepcopy(getattr(self, slot), memo)) + return replica + + def getstate(self): + raise RuntimeError( + "Serialization of parametrized modules is only " + "supported through state_dict(). See:\n" + "https://pytorch.org/tutorials/beginner/saving_loading_models.html" + "#saving-loading-a-general-checkpoint-for-inference-and-or-resuming-training" + ) + + dct = {"__getstate__": getstate} + # We don't allow serialization of parametrized modules but should still allow deepcopying. + # Default 'deepcopy' function invokes __deepcopy__ method instead of __getstate__ when it exists. + if not hasattr(cls, "__deepcopy__"): + dct["__deepcopy__"] = default_deepcopy # type: ignore[assignment] + + param_cls = type( + f"Parametrized{cls.__name__}", + (cls,), + dct, + ) + + module.__class__ = param_cls + + +def _inject_property(module: Module, tensor_name: str) -> None: + r"""Injects a property into module[tensor_name]. + + It assumes that the class in the module has already been modified from its + original one using _inject_new_class and that the tensor under :attr:`tensor_name` + has already been moved out + + Args: + module (nn.Module): module into which to inject the property + tensor_name (str): name of the name of the property to create + """ + # We check the precondition. + # This should never fire if register_parametrization is correctly implemented + assert not hasattr(module, tensor_name) + + @torch.jit.unused + def get_cached_parametrization(parametrization) -> Tensor: + global _cache + key = (id(module), tensor_name) + tensor = _cache.get(key) + if tensor is None: + tensor = parametrization() + _cache[key] = tensor + return tensor + + def get_parametrized(self) -> Tensor: + if torch.jit.is_scripting(): + raise RuntimeError('Parametrization is not working with scripting.') + parametrization = self.parametrizations[tensor_name] + if _cache_enabled: + if torch.jit.is_scripting(): + # Scripting + raise RuntimeError('Caching is not implemented for scripting. ' + 'Either disable caching or avoid scripting.') + elif torch._C._get_tracing_state() is not None: + # Tracing + raise RuntimeError('Cannot trace a model while caching parametrizations.') + else: + return get_cached_parametrization(parametrization) + else: + # If caching is not active, this function just evaluates the parametrization + return parametrization() + + def set_original(self, value: Tensor) -> None: + if torch.jit.is_scripting(): + raise RuntimeError('Parametrization is not working with scripting.') + self.parametrizations[tensor_name].right_inverse(value) + + setattr(module.__class__, tensor_name, property(get_parametrized, set_original)) + +def register_parametrization( + module: Module, tensor_name: str, parametrization: Module, *, unsafe: bool = False, +) -> Module: + r"""Adds a parametrization to a tensor in a module. + + Assume that ``tensor_name="weight"`` for simplicity. When accessing ``module.weight``, + the module will return the parametrized version ``parametrization(module.weight)``. + If the original tensor requires a gradient, the backward pass will differentiate + through :attr:`parametrization`, and the optimizer will update the tensor accordingly. + + The first time that a module registers a parametrization, this function will add an attribute + ``parametrizations`` to the module of type :class:`~ParametrizationList`. + + The list of parametrizations on the tensor ``weight`` will be accessible under + ``module.parametrizations.weight``. + + The original tensor will be accessible under + ``module.parametrizations.weight.original``. + + Parametrizations may be concatenated by registering several parametrizations + on the same attribute. + + The training mode of a registered parametrization is updated on registration + to match the training mode of the host module + + Parametrized parameters and buffers have an inbuilt caching system that can be activated + using the context manager :func:`cached`. + + A :attr:`parametrization` may optionally implement a method with signature + + .. code-block:: python + + def right_inverse(self, X: Tensor) -> Union[Tensor, Sequence[Tensor]] + + This method is called on the unparametrized tensor when the first parametrization + is registered to compute the initial value of the original tensor. + If this method is not implemented, the original tensor will be just the unparametrized tensor. + + If all the parametrizations registered on a tensor implement `right_inverse` it is possible + to initialize a parametrized tensor by assigning to it, as shown in the example below. + + It is possible for the first parametrization to depend on several inputs. + This may be implemented returning a tuple of tensors from ``right_inverse`` + (see the example implementation of a ``RankOne`` parametrization below). + + In this case, the unconstrained tensors are also located under ``module.parametrizations.weight`` + with names ``original0``, ``original1``,... + + .. note:: + + If unsafe=False (default) both the forward and right_inverse methods will be called + once to perform a number of consistency checks. + If unsafe=True, then right_inverse will be called if the tensor is not parametrized, + and nothing will be called otherwise. + + .. note:: + + In most situations, ``right_inverse`` will be a function such that + ``forward(right_inverse(X)) == X`` (see + `right inverse `_). + Sometimes, when the parametrization is not surjective, it may be reasonable + to relax this. + + .. warning:: + + If a parametrization depends on several inputs, :func:`~register_parametrization` + will register a number of new parameters. If such parametrization is registered + after the optimizer is created, these new parameters will need to be added manually + to the optimizer. See :meth:`torch.Optimizer.add_param_group`. + + Args: + module (nn.Module): module on which to register the parametrization + tensor_name (str): name of the parameter or buffer on which to register + the parametrization + parametrization (nn.Module): the parametrization to register + Keyword args: + unsafe (bool): a boolean flag that denotes whether the parametrization + may change the dtype and shape of the tensor. Default: `False` + Warning: the parametrization is not checked for consistency upon registration. + Enable this flag at your own risk. + + Raises: + ValueError: if the module does not have a parameter or a buffer named :attr:`tensor_name` + + Examples: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) + >>> import torch + >>> import torch.nn as nn + >>> import torch.nn.utils.parametrize as P + >>> + >>> class Symmetric(nn.Module): + >>> def forward(self, X): + >>> return X.triu() + X.triu(1).T # Return a symmetric matrix + >>> + >>> def right_inverse(self, A): + >>> return A.triu() + >>> + >>> m = nn.Linear(5, 5) + >>> P.register_parametrization(m, "weight", Symmetric()) + >>> print(torch.allclose(m.weight, m.weight.T)) # m.weight is now symmetric + True + >>> A = torch.rand(5, 5) + >>> A = A + A.T # A is now symmetric + >>> m.weight = A # Initialize the weight to be the symmetric matrix A + >>> print(torch.allclose(m.weight, A)) + True + + >>> class RankOne(nn.Module): + >>> def forward(self, x, y): + >>> # Form a rank 1 matrix multiplying two vectors + >>> return x.unsqueeze(-1) @ y.unsqueeze(-2) + >>> + >>> def right_inverse(self, Z): + >>> # Project Z onto the rank 1 matrices + >>> U, S, Vh = torch.linalg.svd(Z, full_matrices=False) + >>> # Return rescaled singular vectors + >>> s0_sqrt = S[0].sqrt().unsqueeze(-1) + >>> return U[..., :, 0] * s0_sqrt, Vh[..., 0, :] * s0_sqrt + >>> + >>> linear_rank_one = P.register_parametrization(nn.Linear(4, 4), "weight", RankOne()) + >>> print(torch.linalg.matrix_rank(linear_rank_one.weight).item()) + 1 + + """ + parametrization.train(module.training) + if is_parametrized(module, tensor_name): + # Correctness checks. + # If A is the space of tensors with shape and dtype equal to module.weight + # we check that parametrization.forward and parametrization.right_inverse are + # functions from A to A + if not unsafe: + Y = getattr(module, tensor_name) + X = parametrization(Y) + if not isinstance(X, Tensor): + raise ValueError( + f"A parametrization must return a tensor. Got {type(X).__name__}." + ) + if X.dtype != Y.dtype: + raise ValueError( + "Registering a parametrization may not change the dtype of the tensor, unless the `unsafe` flag is enabled.\n" + f"module.{tensor_name}.dtype: {Y.dtype}\n" + f"parametrization(module.{tensor_name}).dtype: {X.dtype}" + ) + if X.shape != Y.shape: + raise ValueError( + "Registering a parametrization may not change the shape of the tensor, unless the `unsafe` flag is enabled.\n" + f"module.{tensor_name}.shape: {Y.shape}\n" + f"parametrization(module.{tensor_name}).shape: {X.shape}" + ) + if hasattr(parametrization, "right_inverse"): + try: + Z = parametrization.right_inverse(X) # type: ignore[operator] + except NotImplementedError: + pass + else: + if not isinstance(Z, Tensor): + raise ValueError( + f"parametrization.right_inverse must return a tensor. Got: {type(Z).__name__}" + ) + if Z.dtype != Y.dtype: + raise ValueError( + "The tensor returned by parametrization.right_inverse must have the same dtype " + f"as module.{tensor_name}, unless the `unsafe` flag is enabled.\n" + f"module.{tensor_name}.dtype: {Y.dtype}\n" + f"returned dtype: {Z.dtype}" + ) + if Z.shape != Y.shape: + raise ValueError( + "The tensor returned by parametrization.right_inverse must have the same shape " + f"as module.{tensor_name}, unless the `unsafe` flag is enabled.\n" + f"module.{tensor_name}.shape: {Y.shape}\n" + f"returned shape: {Z.shape}" + ) + # else right_inverse is assumed to be the identity + + # add the new parametrization to the parametrization list + assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy + module.parametrizations[tensor_name].append(parametrization) + # If unsafe was True in previous parametrization, keep it enabled + module.parametrizations[tensor_name].unsafe |= unsafe # type: ignore[index, union-attr] + elif tensor_name in module._buffers or tensor_name in module._parameters: + # Set the parametrization mechanism + # Fetch the original buffer or parameter + original = getattr(module, tensor_name) + # We create this early to check for possible errors + parametrizations = ParametrizationList([parametrization], original, unsafe=unsafe) + # Delete the previous parameter or buffer + delattr(module, tensor_name) + # If this is the first parametrization registered on the module, + # we prepare the module to inject the property + if not is_parametrized(module): + # Change the class + _inject_new_class(module) + # Inject a ``ModuleDict`` into the instance under module.parametrizations + module.parametrizations = ModuleDict() + # Add a property into the class + _inject_property(module, tensor_name) + # Add a ParametrizationList + assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy + module.parametrizations[tensor_name] = parametrizations + else: + raise ValueError( + f"Module '{module}' does not have a parameter, a buffer, or a " + f"parametrized element with name '{tensor_name}'" + ) + return module + + +def is_parametrized(module: Module, tensor_name: Optional[str] = None) -> bool: + r"""Returns ``True`` if module has an active parametrization. + + If the argument :attr:`tensor_name` is specified, returns ``True`` if + ``module[tensor_name]`` is parametrized. + + Args: + module (nn.Module): module to query + tensor_name (str, optional): attribute in the module to query + Default: ``None`` + """ + parametrizations = getattr(module, "parametrizations", None) + if parametrizations is None or not isinstance(parametrizations, ModuleDict): + return False + if tensor_name is None: + # Check that there is at least one parametrized buffer or Parameter + return len(parametrizations) > 0 + else: + return tensor_name in parametrizations + +def remove_parametrizations( + module: Module, tensor_name: str, leave_parametrized: bool = True +) -> Module: + r"""Removes the parametrizations on a tensor in a module. + + - If ``leave_parametrized=True``, ``module[tensor_name]`` will be set to + its current output. In this case, the parametrization shall not change the ``dtype`` + of the tensor. + - If ``leave_parametrized=False``, ``module[tensor_name]`` will be set to + the unparametrised tensor in ``module.parametrizations[tensor_name].original``. + This is only possible when the parametrization depends on just one tensor. + + Args: + module (nn.Module): module from which remove the parametrization + tensor_name (str): name of the parametrization to be removed + leave_parametrized (bool, optional): leave the attribute :attr:`tensor_name` parametrized. + Default: ``True`` + + Returns: + Module: module + + Raises: + ValueError: if ``module[tensor_name]`` is not parametrized + ValueError: if ``leave_parametrized=False`` and the parametrization depends on several tensors + """ + + if not is_parametrized(module, tensor_name): + raise ValueError(f"Module {module} does not have a parametrization on {tensor_name}") + + # Fetch the original tensor + assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy + parametrizations = module.parametrizations[tensor_name] + if parametrizations.is_tensor: + original = parametrizations.original + if leave_parametrized: + with torch.no_grad(): + t = getattr(module, tensor_name) + # We know they have the same dtype because we have checked this when registering the + # parametrizations. As such, we can use set_ + # We do this so that the parameter does not to change the id() + # This way the user does not need to update the optimizer + with torch.no_grad(): + if type(original) is torch.Tensor: + original.set_(t) + else: + try: + original.set_(t) + except RuntimeError as e: + # TODO: Fix this for tensor subclasses that are parameters: + # RuntimeError: set_storage is not allowed on a Tensor created from .data or .detach(). + raise RuntimeError("Calling remove_parametrizations() with leave_parametrized=True " + "for a parameter that is an instance of a tensor subclass requires " + "set_() to be implemented correctly for the tensor subclass. Either " + "set leave_parametrized=False or provide a working implementation for " + "set_() in the tensor subclass.") from e + else: + if leave_parametrized: + # We cannot use no_grad because we need to know whether one or more + # original tensors required grad + t = getattr(module, tensor_name) + # We'll have to trust the user to add it to the optimizer + original = Parameter(t) if t.requires_grad else t + else: + raise ValueError("Cannot leave unparametrized (`leave_parametrized=False`) a tensor " + "that is parametrized in terms of a sequence of tensors.") + + # Delete the property that manages the parametrization + delattr(module.__class__, tensor_name) + # Delete the ParametrizationList + del module.parametrizations[tensor_name] + + # Restore the parameter / buffer into the main class + _register_parameter_or_buffer(module, tensor_name, original) + + # Roll back the parametrized class if no other buffer or parameter + # is currently parametrized in this class + if not is_parametrized(module): + delattr(module, "parametrizations") + # Restore class + orig_cls = module.__class__.__bases__[0] + module.__class__ = orig_cls + return module + +def type_before_parametrizations(module: Module) -> type: + r"""Returns the module type before parametrizations were applied and if not, + then it returns the module type. + + Args: + module (nn.Module): module to get type of + """ + if is_parametrized(module): + return module.__class__.__bases__[0] + else: + return type(module) + +def transfer_parametrizations_and_params( + from_module: Module, to_module: Module, tensor_name: Optional[str] = None +) -> Module: + r"""Transfers parametrizations and the parameters they parametrize from from_module + to to_module. If tensor_name is specified, only transfers the specified parameter, otherwise + transfers all parametrized parameters. If those parameters do not exist in to_module, it will create them. + Does nothing if from_module is not parametrized. + + Args: + from_module (nn.Module): module to transfer from + to_module (nn.Module): module to transfer to + tensor_name (str, optional): parameter to transfer + + Returns: + Module: to_module + """ + if is_parametrized(from_module): + assert isinstance(from_module.parametrizations, ModuleDict) # for mypy + + # get list of all params or the single param to transfer + parameters_to_transfer: Union[list, ModuleDict] = ( + from_module.parametrizations if tensor_name is None else [tensor_name] + ) + + assert hasattr(parameters_to_transfer, "__iter__") # for mypy + for parameter_name in parameters_to_transfer: + + # initialize the to-be-transfered param in to_module if it doesn't exist already + if not hasattr(to_module, parameter_name): + setattr( + to_module, + parameter_name, + Parameter(getattr(from_module, parameter_name)), + ) + + # apply the params's parametrizations to to_module + for param_func in from_module.parametrizations[parameter_name]: + register_parametrization(to_module, parameter_name, param_func) + assert isinstance(to_module.parametrizations, ModuleDict) # for mypy + + # make values match, original values can be stored in either original or + # original0, original1..., need to check both cases + if hasattr(from_module.parametrizations[parameter_name], "original"): + to_module.parametrizations[parameter_name].original = \ + from_module.parametrizations[parameter_name].original + else: + num = 0 + orig_num = "original" + str(num) + # loop through each original# until all values have been set + while hasattr(from_module.parametrizations[parameter_name], orig_num): + setattr( + to_module.parametrizations[parameter_name], + orig_num, + getattr(from_module.parametrizations[parameter_name], orig_num), + ) + num = num + 1 + orig_num = "original" + str(num) + + return to_module diff --git a/wemm/lib/python3.10/site-packages/torch/nn/utils/rnn.py b/wemm/lib/python3.10/site-packages/torch/nn/utils/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..b9db6a5f1a9c488b0fc52fd84b9eb218bffbb740 --- /dev/null +++ b/wemm/lib/python3.10/site-packages/torch/nn/utils/rnn.py @@ -0,0 +1,519 @@ +from collections import namedtuple +import warnings + +import torch +from torch import Tensor +from ... import _VF +from ..._jit_internal import Optional + +from typing import List, Tuple, Union, Iterable + + +__all__ = ['PackedSequence', 'invert_permutation', 'pack_padded_sequence', 'pad_packed_sequence', 'pad_sequence', + 'unpad_sequence', 'pack_sequence', 'unpack_sequence'] + +PackedSequence_ = namedtuple('PackedSequence_', + ['data', 'batch_sizes', 'sorted_indices', 'unsorted_indices']) + +# type annotation for PackedSequence_ to make it compatible with TorchScript +PackedSequence_.__annotations__ = {'data': torch.Tensor, 'batch_sizes': torch.Tensor, + 'sorted_indices': Optional[torch.Tensor], + 'unsorted_indices': Optional[torch.Tensor]} + + +def bind(optional, fn): + if optional is None: + return None + return fn(optional) + + +class PackedSequence(PackedSequence_): + r"""Holds the data and list of :attr:`batch_sizes` of a packed sequence. + + All RNN modules accept packed sequences as inputs. + + Note: + Instances of this class should never be created manually. They are meant + to be instantiated by functions like :func:`pack_padded_sequence`. + + Batch sizes represent the number elements at each sequence step in + the batch, not the varying sequence lengths passed to + :func:`pack_padded_sequence`. For instance, given data ``abc`` and ``x`` + the :class:`PackedSequence` would contain data ``axbc`` with + ``batch_sizes=[2,1,1]``. + + Attributes: + data (Tensor): Tensor containing packed sequence + batch_sizes (Tensor): Tensor of integers holding + information about the batch size at each sequence step + sorted_indices (Tensor, optional): Tensor of integers holding how this + :class:`PackedSequence` is constructed from sequences. + unsorted_indices (Tensor, optional): Tensor of integers holding how this + to recover the original sequences with correct order. + + .. note:: + :attr:`data` can be on arbitrary device and of arbitrary dtype. + :attr:`sorted_indices` and :attr:`unsorted_indices` must be ``torch.int64`` + tensors on the same device as :attr:`data`. + + However, :attr:`batch_sizes` should always be a CPU ``torch.int64`` tensor. + + This invariant is maintained throughout :class:`PackedSequence` class, + and all functions that construct a `:class:PackedSequence` in PyTorch + (i.e., they only pass in tensors conforming to this constraint). + + """ + def __new__(cls, data, batch_sizes=None, sorted_indices=None, unsorted_indices=None): + return super(PackedSequence, cls).__new__( + cls, + *_packed_sequence_init_args(data, batch_sizes, sorted_indices, + unsorted_indices)) + + # NOTE [ device and dtype of a PackedSequence ] + # + # See the note above in doc string (starting with ":attr:`data` can be on + # arbitrary device..."). + def pin_memory(self): + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + return type(self)(self.data.pin_memory(), self.batch_sizes, + bind(self.sorted_indices, lambda t: t.pin_memory()), + bind(self.unsorted_indices, lambda t: t.pin_memory())) + + def cuda(self, *args, **kwargs): + # Tests to see if 'cuda' should be added to kwargs + ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs) + if ex.is_cuda: + return self.to(*args, **kwargs) + return self.to(*args, device='cuda', **kwargs) + + def cpu(self, *args, **kwargs): + + ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs) + if ex.device.type == 'cpu': + return self.to(*args, **kwargs) + return self.to(*args, device='cpu', **kwargs) + + def double(self): + return self.to(dtype=torch.double) + + def float(self): + return self.to(dtype=torch.float) + + def half(self): + return self.to(dtype=torch.half) + + def long(self): + return self.to(dtype=torch.long) + + def int(self): + return self.to(dtype=torch.int) + + def short(self): + return self.to(dtype=torch.short) + + def char(self): + return self.to(dtype=torch.int8) + + def byte(self): + return self.to(dtype=torch.uint8) + + def to(self, *args, **kwargs): + r"""Performs dtype and/or device conversion on `self.data`. + + It has similar signature as :meth:`torch.Tensor.to`, except optional + arguments like `non_blocking` and `copy` should be passed as kwargs, + not args, or they will not apply to the index tensors. + + .. note:: + + If the ``self.data`` Tensor already has the correct :class:`torch.dtype` + and :class:`torch.device`, then ``self`` is returned. + Otherwise, returns a copy with the desired configuration. + """ + + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + data = self.data.to(*args, **kwargs) + if data is self.data: + return self + else: + # Does not forward device or dtype arg/kwargs, device is set from data.device + kwargs = {k : v for k, v in filter(lambda t: t[0] != 'device' and t[0] != 'dtype', kwargs.items())} + sorted_indices = bind(self.sorted_indices, lambda t: t.to(data.device, **kwargs)) + unsorted_indices = bind(self.unsorted_indices, lambda t: t.to(data.device, **kwargs)) + return type(self)(data, self.batch_sizes, sorted_indices, unsorted_indices) + + @property + def is_cuda(self): + r"""Returns true if `self.data` stored on a gpu""" + return self.data.is_cuda + + def is_pinned(self): + r"""Returns true if `self.data` stored on in pinned memory""" + return self.data.is_pinned() + + +# TorchScript doesn't support constructors on named tuples, so we use this helper +# method to construct PackedSequence +def _packed_sequence_init_args( + data: Tensor, + batch_sizes: Optional[Tensor] = None, + sorted_indices: Optional[Tensor] = None, + unsorted_indices: Optional[Tensor] = None, +) -> Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]: + # NB: if unsorted_indices is provided, it should be the inverse permutation + # to sorted_indices. Don't assert it here because the PackedSequence ctor + # should only be used internally. + + if unsorted_indices is None: + unsorted_indices = invert_permutation(sorted_indices) + + # support being called as `PackedSequence(data, batch_sizes, sorted_indices)` + if batch_sizes is not None: + # TODO: Re-enable this check (.type isn't supported in TorchScript) + if batch_sizes.device.type != 'cpu': + raise ValueError( + "batch_sizes should always be on CPU. " + "Instances of PackedSequence should never be created manually. " + "They should be instantiated by functions like pack_sequence " + "and pack_padded_sequences in nn.utils.rnn. " + "https://pytorch.org/docs/stable/nn.html#torch.nn.utils.rnn.pack_sequence") + return data, batch_sizes, sorted_indices, unsorted_indices + + # support being called as `PackedSequence((data, batch_sizes), *, sorted_indices)` + else: + assert isinstance(data, (list, tuple)) and len(data) == 2 + return data[0], data[1], sorted_indices, unsorted_indices + + +def _packed_sequence_init( + data: Tensor, + batch_sizes: Optional[Tensor] = None, + sorted_indices: Optional[Tensor] = None, + unsorted_indices: Optional[Tensor] = None, +) -> PackedSequence: + data, batch_sizes, sorted_indices, unsorted_indices = _packed_sequence_init_args( + data, batch_sizes, sorted_indices, unsorted_indices) + return PackedSequence(data, batch_sizes, sorted_indices, unsorted_indices) + + +def invert_permutation(permutation: Optional[Tensor]) -> Optional[Tensor]: + if permutation is None: + return None + output = torch.empty_like(permutation, memory_format=torch.legacy_contiguous_format) + output.scatter_(0, permutation, + torch.arange(0, permutation.numel(), device=permutation.device)) + return output + + +def pack_padded_sequence( + input: Tensor, + lengths: Tensor, + batch_first: bool = False, + enforce_sorted: bool = True, +) -> PackedSequence: + r"""Packs a Tensor containing padded sequences of variable length. + + :attr:`input` can be of size ``T x B x *`` where `T` is the length of the + longest sequence (equal to ``lengths[0]``), ``B`` is the batch size, and + ``*`` is any number of dimensions (including 0). If ``batch_first`` is + ``True``, ``B x T x *`` :attr:`input` is expected. + + For unsorted sequences, use `enforce_sorted = False`. If :attr:`enforce_sorted` is + ``True``, the sequences should be sorted by length in a decreasing order, i.e. + ``input[:,0]`` should be the longest sequence, and ``input[:,B-1]`` the shortest + one. `enforce_sorted = True` is only necessary for ONNX export. + + Note: + This function accepts any input that has at least two dimensions. You + can apply it to pack the labels, and use the output of the RNN with + them to compute the loss directly. A Tensor can be retrieved from + a :class:`PackedSequence` object by accessing its ``.data`` attribute. + + Args: + input (Tensor): padded batch of variable length sequences. + lengths (Tensor or list(int)): list of sequence lengths of each batch + element (must be on the CPU if provided as a tensor). + batch_first (bool, optional): if ``True``, the input is expected in ``B x T x *`` + format. + enforce_sorted (bool, optional): if ``True``, the input is expected to + contain sequences sorted by length in a decreasing order. If + ``False``, the input will get sorted unconditionally. Default: ``True``. + + Returns: + a :class:`PackedSequence` object + """ + if torch._C._get_tracing_state() and not isinstance(lengths, torch.Tensor): + warnings.warn('pack_padded_sequence has been called with a Python list of ' + 'sequence lengths. The tracer cannot track the data flow of Python ' + 'values, and it will treat them as constants, likely rendering ' + 'the trace incorrect for any other combination of lengths.', + stacklevel=2) + lengths = torch.as_tensor(lengths, dtype=torch.int64) + if enforce_sorted: + sorted_indices = None + else: + lengths, sorted_indices = torch.sort(lengths, descending=True) + sorted_indices = sorted_indices.to(input.device) + batch_dim = 0 if batch_first else 1 + input = input.index_select(batch_dim, sorted_indices) + + data, batch_sizes = \ + _VF._pack_padded_sequence(input, lengths, batch_first) + return _packed_sequence_init(data, batch_sizes, sorted_indices, None) + + +def pad_packed_sequence( + sequence: PackedSequence, + batch_first: bool = False, + padding_value: float = 0.0, + total_length: Optional[int] = None, +) -> Tuple[Tensor, Tensor]: + r"""Pads a packed batch of variable length sequences. + + It is an inverse operation to :func:`pack_padded_sequence`. + + The returned Tensor's data will be of size ``T x B x *``, where `T` is the length + of the longest sequence and `B` is the batch size. If ``batch_first`` is True, + the data will be transposed into ``B x T x *`` format. + + Example: + >>> from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence + >>> seq = torch.tensor([[1, 2, 0], [3, 0, 0], [4, 5, 6]]) + >>> lens = [2, 1, 3] + >>> packed = pack_padded_sequence(seq, lens, batch_first=True, enforce_sorted=False) + >>> packed + PackedSequence(data=tensor([4, 1, 3, 5, 2, 6]), batch_sizes=tensor([3, 2, 1]), + sorted_indices=tensor([2, 0, 1]), unsorted_indices=tensor([1, 2, 0])) + >>> seq_unpacked, lens_unpacked = pad_packed_sequence(packed, batch_first=True) + >>> seq_unpacked + tensor([[1, 2, 0], + [3, 0, 0], + [4, 5, 6]]) + >>> lens_unpacked + tensor([2, 1, 3]) + + .. note:: + :attr:`total_length` is useful to implement the + ``pack sequence -> recurrent network -> unpack sequence`` pattern in a + :class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`. + See :ref:`this FAQ section ` for + details. + + Args: + sequence (PackedSequence): batch to pad + batch_first (bool, optional): if ``True``, the output will be in ``B x T x *`` + format. + padding_value (float, optional): values for padded elements. + total_length (int, optional): if not ``None``, the output will be padded to + have length :attr:`total_length`. This method will throw :class:`ValueError` + if :attr:`total_length` is less than the max sequence length in + :attr:`sequence`. + + Returns: + Tuple of Tensor containing the padded sequence, and a Tensor + containing the list of lengths of each sequence in the batch. + Batch elements will be re-ordered as they were ordered originally when + the batch was passed to ``pack_padded_sequence`` or ``pack_sequence``. + + + + + """ + max_seq_length = sequence.batch_sizes.size(0) + if total_length is not None: + if total_length < max_seq_length: + raise ValueError("Expected total_length to be at least the length " + "of the longest sequence in input, but got " + "total_length={} and max sequence length being {}" + .format(total_length, max_seq_length)) + max_seq_length = total_length + padded_output, lengths = _VF._pad_packed_sequence( + sequence.data, sequence.batch_sizes, batch_first, padding_value, max_seq_length) + unsorted_indices = sequence.unsorted_indices + if unsorted_indices is not None: + batch_dim = 0 if batch_first else 1 + return padded_output.index_select(batch_dim, unsorted_indices), lengths[unsorted_indices.cpu()] + return padded_output, lengths + + +def pad_sequence( + sequences: Union[Tensor, List[Tensor]], + batch_first: bool = False, + padding_value: float = 0.0, +) -> Tensor: + r"""Pad a list of variable length Tensors with ``padding_value`` + + ``pad_sequence`` stacks a list of Tensors along a new dimension, + and pads them to equal length. For example, if the input is list of + sequences with size ``L x *`` and if batch_first is False, and ``T x B x *`` + otherwise. + + `B` is batch size. It is equal to the number of elements in ``sequences``. + `T` is length of the longest sequence. + `L` is length of the sequence. + `*` is any number of trailing dimensions, including none. + + Example: + >>> from torch.nn.utils.rnn import pad_sequence + >>> a = torch.ones(25, 300) + >>> b = torch.ones(22, 300) + >>> c = torch.ones(15, 300) + >>> pad_sequence([a, b, c]).size() + torch.Size([25, 3, 300]) + + Note: + This function returns a Tensor of size ``T x B x *`` or ``B x T x *`` + where `T` is the length of the longest sequence. This function assumes + trailing dimensions and type of all the Tensors in sequences are same. + + Args: + sequences (list[Tensor]): list of variable length sequences. + batch_first (bool, optional): output will be in ``B x T x *`` if True, or in + ``T x B x *`` otherwise. Default: False. + padding_value (float, optional): value for padded elements. Default: 0. + + Returns: + Tensor of size ``T x B x *`` if :attr:`batch_first` is ``False``. + Tensor of size ``B x T x *`` otherwise + """ + + if not (torch.jit.is_tracing() or torch.jit.is_scripting()): + # JIT doesn't support `Iterable` + if not isinstance(sequences, Iterable): + msg = ('pad_sequence: Expected iterable for input sequences, but got arg of type: ' + f'{type(sequences)}') + raise RuntimeError(msg) + + # In JIT context this leads to, + # RuntimeError: cannot statically infer the expected size of a list in this context + sequences = tuple(sequences) + else: + # For JIT, we only support Union[Tensor, Tuple[Tensor]] + if isinstance(sequences, torch.Tensor): + sequences = sequences.unbind(0) + + # assuming trailing dimensions and type of all the Tensors + # in sequences are same and fetching those from sequences[0] + return torch._C._nn.pad_sequence(sequences, batch_first, padding_value) + + +def unpad_sequence( + padded_sequences: Tensor, + lengths: Tensor, + batch_first: bool = False, +) -> List[Tensor]: + r"""Unpad padded Tensor into a list of variable length Tensors + + ``unpad_sequence`` unstacks padded Tensor into a list of variable length Tensors. + + Example: + >>> from torch.nn.utils.rnn import pad_sequence, unpad_sequence + >>> a = torch.ones(25, 300) + >>> b = torch.ones(22, 300) + >>> c = torch.ones(15, 300) + >>> sequences = [a, b, c] + >>> padded_sequences = pad_sequence(sequences) + >>> lengths = torch.as_tensor([v.size(0) for v in sequences]) + >>> unpadded_sequences = unpad_sequence(padded_sequences, lengths) + >>> torch.allclose(sequences[0], unpadded_sequences[0]) + True + >>> torch.allclose(sequences[1], unpadded_sequences[1]) + True + >>> torch.allclose(sequences[2], unpadded_sequences[2]) + True + + Args: + padded_sequences (Tensor): padded sequences. + lengths (Tensor): length of original (unpadded) sequences. + batch_first (bool, optional): whether batch dimension first or not. Default: False. + + Returns: + a list of :class:`Tensor` objects + """ + + unpadded_sequences = [] + + if not batch_first: + padded_sequences.transpose_(0, 1) + + max_length = padded_sequences.shape[1] + idx = torch.arange(max_length) + + for seq, length in zip(padded_sequences, lengths): + mask = idx < length + unpacked_seq = seq[mask] + unpadded_sequences.append(unpacked_seq) + + return unpadded_sequences + + +def pack_sequence(sequences: List[Tensor], enforce_sorted: bool = True) -> PackedSequence: + r"""Packs a list of variable length Tensors + + Consecutive call of the next functions: ``pad_sequence``, ``pack_padded_sequence``. + + ``sequences`` should be a list of Tensors of size ``L x *``, where `L` is + the length of a sequence and `*` is any number of trailing dimensions, + including zero. + + For unsorted sequences, use `enforce_sorted = False`. If ``enforce_sorted`` + is ``True``, the sequences should be sorted in the order of decreasing length. + ``enforce_sorted = True`` is only necessary for ONNX export. + + + Example: + >>> from torch.nn.utils.rnn import pack_sequence + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5]) + >>> c = torch.tensor([6]) + >>> pack_sequence([a, b, c]) + PackedSequence(data=tensor([1, 4, 6, 2, 5, 3]), batch_sizes=tensor([3, 2, 1]), sorted_indices=None, unsorted_indices=None) + + + Args: + sequences (list[Tensor]): A list of sequences of decreasing length. + enforce_sorted (bool, optional): if ``True``, checks that the input + contains sequences sorted by length in a decreasing order. If + ``False``, this condition is not checked. Default: ``True``. + + Returns: + a :class:`PackedSequence` object + """ + lengths = torch.as_tensor([v.size(0) for v in sequences]) + return pack_padded_sequence(pad_sequence(sequences), lengths, enforce_sorted=enforce_sorted) + + +def unpack_sequence(packed_sequences: PackedSequence) -> List[Tensor]: + r"""Unpacks PackedSequence into a list of variable length Tensors + + ``packed_sequences`` should be a PackedSequence object. + + + Example: + >>> from torch.nn.utils.rnn import pack_sequence, unpack_sequence + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5]) + >>> c = torch.tensor([6]) + >>> sequences = [a, b, c] + >>> print(sequences) + [tensor([1, 2, 3]), tensor([4, 5]), tensor([6])] + >>> packed_sequences = pack_sequence(sequences) + >>> print(packed_sequences) + PackedSequence(data=tensor([1, 4, 6, 2, 5, 3]), batch_sizes=tensor([3, 2, 1]), sorted_indices=None, unsorted_indices=None) + >>> unpacked_sequences = unpack_sequence(packed_sequences) + >>> print(unpacked_sequences) + [tensor([1, 2, 3]), tensor([4, 5]), tensor([6])] + + + Args: + packed_sequences (PackedSequence): A PackedSequence object. + + Returns: + a list of :class:`Tensor` objects + """ + + padded_sequences, lengths = pad_packed_sequence(packed_sequences, batch_first=True) + unpacked_sequences = unpad_sequence(padded_sequences, lengths, batch_first=True) + return unpacked_sequences