ZTWHHH commited on
Commit
2e431c6
·
verified ·
1 Parent(s): 4013689

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. janus/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc +3 -0
  3. janus/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc +3 -0
  4. janus/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/__init__.cpython-310.pyc +0 -0
  5. janus/lib/python3.10/site-packages/torch/multiprocessing/_atfork.py +35 -0
  6. janus/lib/python3.10/site-packages/torch/multiprocessing/queue.py +43 -0
  7. janus/lib/python3.10/site-packages/torch/multiprocessing/reductions.py +647 -0
  8. janus/lib/python3.10/site-packages/torch/nn/__init__.py +62 -0
  9. janus/lib/python3.10/site-packages/torch/nn/_reduction.py +60 -0
  10. janus/lib/python3.10/site-packages/torch/nn/backends/__init__.py +0 -0
  11. janus/lib/python3.10/site-packages/torch/nn/backends/__pycache__/__init__.cpython-310.pyc +0 -0
  12. janus/lib/python3.10/site-packages/torch/nn/common_types.py +44 -0
  13. janus/lib/python3.10/site-packages/torch/nn/functional.py +0 -0
  14. janus/lib/python3.10/site-packages/torch/nn/functional.pyi +691 -0
  15. janus/lib/python3.10/site-packages/torch/nn/grad.py +298 -0
  16. janus/lib/python3.10/site-packages/torch/nn/modules/__init__.py +334 -0
  17. janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/activation.cpython-310.pyc +0 -0
  18. janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/adaptive.cpython-310.pyc +0 -0
  19. janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/channelshuffle.cpython-310.pyc +0 -0
  20. janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/container.cpython-310.pyc +0 -0
  21. janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/conv.cpython-310.pyc +0 -0
  22. janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/dropout.cpython-310.pyc +0 -0
  23. janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/instancenorm.cpython-310.pyc +0 -0
  24. janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/lazy.cpython-310.pyc +0 -0
  25. janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/loss.cpython-310.pyc +0 -0
  26. janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/module.cpython-310.pyc +0 -0
  27. janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/padding.cpython-310.pyc +0 -0
  28. janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/rnn.cpython-310.pyc +0 -0
  29. janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/sparse.cpython-310.pyc +0 -0
  30. janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/transformer.cpython-310.pyc +0 -0
  31. janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/upsampling.cpython-310.pyc +0 -0
  32. janus/lib/python3.10/site-packages/torch/nn/modules/batchnorm.py +883 -0
  33. janus/lib/python3.10/site-packages/torch/nn/modules/container.py +976 -0
  34. janus/lib/python3.10/site-packages/torch/nn/modules/lazy.py +289 -0
  35. janus/lib/python3.10/site-packages/torch/nn/modules/linear.py +293 -0
  36. janus/lib/python3.10/site-packages/torch/nn/modules/padding.py +813 -0
  37. janus/lib/python3.10/site-packages/torch/nn/modules/pooling.py +1494 -0
  38. janus/lib/python3.10/site-packages/torch/nn/modules/rnn.py +1824 -0
  39. janus/lib/python3.10/site-packages/torch/nn/modules/sparse.py +546 -0
  40. janus/lib/python3.10/site-packages/torch/nn/modules/transformer.py +1198 -0
  41. janus/lib/python3.10/site-packages/torch/nn/quantizable/__pycache__/__init__.cpython-310.pyc +0 -0
  42. janus/lib/python3.10/site-packages/torch/nn/quantizable/modules/__init__.py +9 -0
  43. janus/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  44. janus/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc +0 -0
  45. janus/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc +0 -0
  46. janus/lib/python3.10/site-packages/torch/nn/quantizable/modules/activation.py +10 -0
  47. janus/lib/python3.10/site-packages/torch/nn/quantizable/modules/rnn.py +11 -0
  48. janus/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
  49. janus/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  50. janus/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -451,3 +451,5 @@ janus/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_
451
  janus/lib/python3.10/site-packages/sympy/printing/__pycache__/latex.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
452
  janus/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
453
  janus/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
451
  janus/lib/python3.10/site-packages/sympy/printing/__pycache__/latex.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
452
  janus/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
453
  janus/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
454
+ janus/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
455
+ janus/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
janus/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:554deaf6e39d37e541deda7a176b2807bbeb1185466af92b5241524b42e87a0d
3
+ size 111973
janus/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf29f3154afe9c9673ed42ca2fcbe239138077c20b759d6681c548ed458bebb1
3
+ size 137705
janus/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.76 kB). View file
 
janus/lib/python3.10/site-packages/torch/multiprocessing/_atfork.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import sys
3
+
4
+
5
+ __all__ = ["register_after_fork"]
6
+
7
+ if sys.platform == "win32":
8
+ import multiprocessing.util as _util
9
+
10
+ def _register(func):
11
+ def wrapper(arg):
12
+ func()
13
+
14
+ _util.register_after_fork(_register, wrapper)
15
+
16
+ else:
17
+ import os
18
+
19
+ def _register(func):
20
+ os.register_at_fork(after_in_child=func)
21
+
22
+
23
+ def register_after_fork(func):
24
+ """Register a callable to be executed in the child process after a fork.
25
+
26
+ Note:
27
+ In python < 3.7 this will only work with processes created using the
28
+ ``multiprocessing`` module. In python >= 3.7 it also works with
29
+ ``os.fork()``.
30
+
31
+ Args:
32
+ func (function): Function taking no arguments to be called in the child after fork
33
+
34
+ """
35
+ _register(func)
janus/lib/python3.10/site-packages/torch/multiprocessing/queue.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import io
3
+ import multiprocessing.queues
4
+ import pickle
5
+ from multiprocessing.reduction import ForkingPickler
6
+
7
+
8
+ class ConnectionWrapper:
9
+ """Proxy class for _multiprocessing.Connection which uses ForkingPickler for object serialization."""
10
+
11
+ def __init__(self, conn):
12
+ self.conn = conn
13
+
14
+ def send(self, obj):
15
+ buf = io.BytesIO()
16
+ ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj)
17
+ self.send_bytes(buf.getvalue())
18
+
19
+ def recv(self):
20
+ buf = self.recv_bytes()
21
+ return pickle.loads(buf)
22
+
23
+ def __getattr__(self, name):
24
+ if "conn" in self.__dict__:
25
+ return getattr(self.conn, name)
26
+ raise AttributeError(f"'{type(self).__name__}' object has no attribute 'conn'")
27
+
28
+
29
+ class Queue(multiprocessing.queues.Queue):
30
+ def __init__(self, *args, **kwargs):
31
+ super().__init__(*args, **kwargs)
32
+ self._reader: ConnectionWrapper = ConnectionWrapper(self._reader)
33
+ self._writer: ConnectionWrapper = ConnectionWrapper(self._writer)
34
+ self._send = self._writer.send
35
+ self._recv = self._reader.recv
36
+
37
+
38
+ class SimpleQueue(multiprocessing.queues.SimpleQueue):
39
+ def _make_methods(self):
40
+ if not isinstance(self._reader, ConnectionWrapper):
41
+ self._reader: ConnectionWrapper = ConnectionWrapper(self._reader)
42
+ self._writer: ConnectionWrapper = ConnectionWrapper(self._writer)
43
+ super()._make_methods() # type: ignore[misc]
janus/lib/python3.10/site-packages/torch/multiprocessing/reductions.py ADDED
@@ -0,0 +1,647 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import multiprocessing
3
+ import os
4
+ import threading
5
+ from multiprocessing.reduction import ForkingPickler
6
+ from multiprocessing.util import register_after_fork
7
+ from typing import Union
8
+
9
+ import torch
10
+ from torch._namedtensor_internals import check_serializing_named_tensor
11
+
12
+
13
+ try:
14
+ # Early load resource_sharer to prevent a partially initialized instance
15
+ # from being inherited in a forked child process. The reduce_storage method
16
+ # requires this module indirectly through DupFd(). The built-in mp.Queue
17
+ # class pickles arguments in a background thread which may overlap with the
18
+ # fork.
19
+ import multiprocessing.resource_sharer
20
+ except ImportError:
21
+ pass
22
+
23
+
24
+ class StorageWeakRef:
25
+ r"""A weak reference to a Storage.
26
+
27
+ The cdata member is a Python number containing the integer representation of
28
+ the Storage pointer.
29
+ """
30
+
31
+ __slots__ = ["cdata", "_free_weak_ref"]
32
+
33
+ def __init__(self, storage):
34
+ self.cdata = storage._weak_ref()
35
+ # Save a direct reference to _free_weak_ref because the `torch` module
36
+ # might be cleared during Python shutdown before this module is cleared.
37
+ self._free_weak_ref = torch.Storage._free_weak_ref # type: ignore[attr-defined]
38
+
39
+ @classmethod
40
+ def from_weakref(cls, cdata):
41
+ instance = cls.__new__(cls)
42
+ instance.cdata = cdata
43
+ instance._free_weak_ref = torch.Storage._free_weak_ref # type: ignore[attr-defined]
44
+ return instance
45
+
46
+ def expired(self):
47
+ return torch.Storage._expired(self.cdata) # type: ignore[attr-defined]
48
+
49
+ def __del__(self):
50
+ self._free_weak_ref(self.cdata)
51
+
52
+ def __hash__(self):
53
+ return self.cdata
54
+
55
+ def __eq__(self, other):
56
+ if id(self) == id(other):
57
+ return True
58
+ return self.cdata == other.cdata
59
+
60
+
61
+ class SharedCache(dict):
62
+ """Dictionary from multiprocessing handles to StorageWeakRef."""
63
+
64
+ def __init__(self) -> None:
65
+ # free_dead_references() is called if the len exceeds the current
66
+ # limit. The limit scales with the number of remaining live objects.
67
+ self.limit = 128
68
+ # `fork` inherits lock state, so in case we fork when the lock is held,
69
+ # we register a function to reset the lock to a new object to avoid
70
+ # possible deadlocks, following python multiprocessing library design.
71
+ self._after_fork()
72
+ register_after_fork(self, SharedCache._after_fork)
73
+
74
+ def _after_fork(self):
75
+ self.lock = threading.Lock()
76
+
77
+ def get(self, key):
78
+ with self.lock:
79
+ return dict.get(self, key)
80
+
81
+ def __setitem__(self, key, storage_ref):
82
+ with self.lock:
83
+ dict.__setitem__(self, key, storage_ref)
84
+ if len(self) > self.limit:
85
+ self.free_dead_references()
86
+
87
+ def free_dead_references(self):
88
+ live = 0
89
+ for key, storage_ref in list(self.items()):
90
+ if storage_ref.expired():
91
+ del self[key]
92
+ else:
93
+ live += 1
94
+ self.limit = max(128, live * 2)
95
+
96
+
97
+ # mapping from handles to StorageWeakRef objects
98
+ shared_cache = SharedCache()
99
+
100
+
101
+ def rebuild_event(device, handle):
102
+ return torch.cuda.Event.from_ipc_handle(device, handle)
103
+
104
+
105
+ def reduce_event(event):
106
+ handle = event.ipc_handle()
107
+ return (rebuild_event, (event.device, handle))
108
+
109
+
110
+ def rebuild_tensor(cls, storage, metadata):
111
+ storage_offset, size, stride, requires_grad = metadata
112
+ t = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
113
+ if cls == torch.nn.parameter.Parameter:
114
+ # we have to pass requires_grad into constructor, rather than set it as an
115
+ # attribute later, because it's an important check for Integer Tensors to
116
+ # have requires_grad=False (or else they raise an error)
117
+ t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
118
+ else:
119
+ t.requires_grad = requires_grad
120
+ return t
121
+
122
+
123
+ def rebuild_meta_tensor(
124
+ tensor_cls,
125
+ tensor_size,
126
+ tensor_stride,
127
+ tensor_offset,
128
+ dtype,
129
+ storage_size_bytes,
130
+ requires_grad,
131
+ ):
132
+ untyped_storage = torch.UntypedStorage(storage_size_bytes, device="meta")
133
+
134
+ typed_storage = torch.TypedStorage(
135
+ wrap_storage=untyped_storage, dtype=dtype, _internal=True
136
+ )
137
+
138
+ t = torch._utils._rebuild_tensor(
139
+ typed_storage,
140
+ tensor_offset,
141
+ tensor_size,
142
+ tensor_stride,
143
+ )
144
+
145
+ if tensor_cls == torch.nn.parameter.Parameter:
146
+ # It is crucial for integer tensors to receive
147
+ # the requires_grad=False as an argument in the constructor
148
+ t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
149
+ else:
150
+ t.requires_grad = requires_grad
151
+
152
+ return t
153
+
154
+
155
+ def rebuild_cuda_tensor(
156
+ tensor_cls,
157
+ tensor_size,
158
+ tensor_stride,
159
+ tensor_offset,
160
+ storage_cls,
161
+ dtype,
162
+ storage_device,
163
+ storage_handle,
164
+ storage_size_bytes,
165
+ storage_offset_bytes,
166
+ requires_grad,
167
+ ref_counter_handle,
168
+ ref_counter_offset,
169
+ event_handle,
170
+ event_sync_required,
171
+ ):
172
+ # If storage_handle is None, storage points to nullptr.
173
+ if storage_handle is None or storage_size_bytes == 0:
174
+ storage = storage_cls(0, dtype=dtype, device=storage_device, _internal=True)
175
+ else:
176
+ storage = storage_from_cache(
177
+ storage_cls, (storage_handle, storage_offset_bytes)
178
+ )
179
+ if storage is None:
180
+ torch.cuda._lazy_init()
181
+ storage = storage_cls._new_shared_cuda(
182
+ storage_device,
183
+ storage_handle,
184
+ storage_size_bytes,
185
+ storage_offset_bytes,
186
+ ref_counter_handle,
187
+ ref_counter_offset,
188
+ event_handle,
189
+ event_sync_required,
190
+ )
191
+ shared_cache[(storage_handle, storage_offset_bytes)] = StorageWeakRef(
192
+ storage
193
+ )
194
+ else:
195
+ # We already ref counting this Storage, but producer needs new ref-counters to be released.
196
+ storage_cls._release_ipc_counter(
197
+ ref_counter_handle, ref_counter_offset, device=storage_device
198
+ )
199
+
200
+ _storage = (
201
+ storage
202
+ if isinstance(storage, torch.UntypedStorage)
203
+ else storage._untyped_storage
204
+ )
205
+
206
+ t = torch._utils._rebuild_tensor(
207
+ torch.storage.TypedStorage(wrap_storage=_storage, dtype=dtype, _internal=True),
208
+ tensor_offset,
209
+ tensor_size,
210
+ tensor_stride,
211
+ )
212
+
213
+ if tensor_cls == torch.nn.parameter.Parameter:
214
+ # It is crucial for integer tensors to receive
215
+ # the requires_grad=False as an argument in the constructor
216
+ t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
217
+ else:
218
+ t.requires_grad = requires_grad
219
+
220
+ return t
221
+
222
+
223
+ def reduce_tensor(tensor):
224
+ if tensor.requires_grad and not tensor.is_leaf:
225
+ raise RuntimeError(
226
+ "Cowardly refusing to serialize non-leaf tensor which requires_grad, "
227
+ "since autograd does not support crossing process boundaries. "
228
+ "If you just want to transfer the data, call detach() on the tensor "
229
+ "before serializing (e.g., putting it on the queue)."
230
+ )
231
+
232
+ check_serializing_named_tensor(tensor)
233
+ torch.utils.hooks.warn_if_has_hooks(tensor)
234
+
235
+ # Note [CUDA IPC and the caching allocator]
236
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
237
+ # When you send a CUDA tensor over IPC, you might expect that you will
238
+ # get out the same storage from the other end. However, the CUDA caching
239
+ # allocator makes it difficult to preserve this invariant. Consider
240
+ # the following situation: a tensor of size 0x100 points to offset 0x20 of
241
+ # a storage at 0xA100 of size 0x100. (For simplicity, all of these
242
+ # sizes are given in bytes). HOWEVER, with the caching allocator, this storage
243
+ # might be part of a larger cudaMalloc allocation 0xA000 of size 0x4000.
244
+ #
245
+ # When we want to send this CUDA tensor over IPC, we must send the
246
+ # *entire* cudaMalloc allocation, i.e., the 0xA000 region, not just
247
+ # the storage 0xA100 (because that is what CUDA supports). So, on the
248
+ # other end, there simply isn't any way to say, "Wait, you gave me
249
+ # a bigger region (0xA000) than the one I wanted (0xA100)".
250
+ #
251
+ # OK, so if you sent the cudaMalloc allocation, can you just wrap that up as
252
+ # one storage itself? No, because this cudaMalloc allocation might contain
253
+ # storages of mixed types: float, bytes, double... If you make the entire
254
+ # allocation a single storage of a type A, we'll hit an error when constructing
255
+ # a tensor of type B on the storage.
256
+ #
257
+ # cudaIpcMemHandle is an identifier to access the sender cudaMalloc allocation on the
258
+ # receiver side. However, cudaIpcMemHandles from each device in a given process may
259
+ # only be opened by one context per device per other process.
260
+ # If we open and close a memory handle multiples times in a process, CUDA is allowed
261
+ # to give it a different address; similarly, once we close the memory, we're not
262
+ # allowed to access it(and the storage/tensor built on top of it), even if it is
263
+ # still live in the original process. As we cannot make a cudaMalloc allocation
264
+ # to a single storage in one go, this requires us to cache the device pointer for
265
+ # each cudaIpcMemHandle on C++ side to reconstruct types of storages, while keep
266
+ # the old ones alives.
267
+ # See [https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html]
268
+ #
269
+ # This is fine, because all we need to do is to save our position in the allocation,
270
+ # and reconstruct storage and tensor from it.
271
+ # 0xA000 -> -------CUDA Allocation------
272
+ # | |
273
+ # | |
274
+ # | |
275
+ # | |
276
+ # 0xA100 -> --------storage1 begin------
277
+ # | |
278
+ # 0xA120 -> --------tensor1 begin ------
279
+ # | |
280
+ # | |
281
+ # | |
282
+ # | |
283
+ # | |
284
+ # 0xA160 -> --------tensor1 end---------
285
+ # | |
286
+ # | |
287
+ # | |
288
+ # 0xA200 -> --------storage1 end--------
289
+ # | |
290
+ # 0xE000 -> --------CUDA allocation-----
291
+ #
292
+ # To send tensor1, the following info are required from sender to receiver for
293
+ # storage recontruction.
294
+ # 1. cudaIpcMemHandle of 0xA000(which can be mapped to a basePtr in receiver process).
295
+ # basePtr may not be exactly 0xA000 since it's a different process.
296
+ # 2. offset(0xA100) of storage1 in the CUDA allocation.
297
+ # 3. size of storage1(0x100).
298
+ #
299
+ # On receiver side:
300
+ # 1. Get the devPtr of the MemHandle to access the memory, reconstruct a storage
301
+ # of the same type using (basePtr, offset, size).
302
+ # 2. we can reconstruct the tensor on top of the reconstructed storage
303
+ # Tensor(size=0x040, offset=0x020, storage=Storage(data=basePtr+0xA100, size=0x0100))
304
+ #
305
+ # This strategy has a few implications:
306
+ #
307
+ # 1. When we serialize a CUDA tensor for IPC, we cannot do it all in one
308
+ # go (non-compositionally), and this requires to have a global map
309
+ # memHandle -> devPtr for each process.
310
+ #
311
+ # 2. We MUST NOT let the new IPC tensor be resizable. Originally, a resize
312
+ # of the storage beyond 0x100 would merely have caused us to do a
313
+ # reallocation. You don't really want to do this, but if you did,
314
+ # all that would happen is that you would lose IPC sharing. But if
315
+ # you do this in the new world, we will happily let you write out of
316
+ # bounds of your "allocation", clobbering unrelated data in the cached
317
+ # allocator block. BAD!
318
+ #
319
+ # By the way, in old versions of PyTorch, we supported this situation
320
+ # natively using a "storage view", which permitted multiple storages to be
321
+ # views on each other. But this was the *only* use of storage views, so we
322
+ # eliminated it so that we could just use tensor views to implement the same
323
+ # thing.
324
+ #
325
+
326
+ # TODO: Handle distinguishing between subclass and non-subclass versions of NT better
327
+ # https://github.com/pytorch/pytorch/issues/110543
328
+ from torch.nested._internal.nested_tensor import NestedTensor
329
+
330
+ if tensor.is_nested and not isinstance(tensor, NestedTensor):
331
+ return reduce_nested_tensor(tensor)
332
+
333
+ if tensor.layout in {
334
+ torch.sparse_coo,
335
+ torch.sparse_csr,
336
+ torch.sparse_bsr,
337
+ torch.sparse_csc,
338
+ torch.sparse_bsc,
339
+ }:
340
+ return reduce_sparse_tensor(tensor)
341
+
342
+ storage = tensor._typed_storage()
343
+
344
+ if storage._untyped_storage.device.type == "cuda":
345
+ (
346
+ device,
347
+ handle,
348
+ storage_size_bytes,
349
+ storage_offset_bytes,
350
+ ref_counter_handle,
351
+ ref_counter_offset,
352
+ event_handle,
353
+ event_sync_required,
354
+ ) = storage._share_cuda_()
355
+ tensor_offset = tensor.storage_offset()
356
+ shared_cache[handle] = StorageWeakRef(storage)
357
+ # _backward_hooks purposely omitted here, see
358
+ # Note [Don't serialize hooks]
359
+ return (
360
+ rebuild_cuda_tensor,
361
+ (
362
+ type(tensor),
363
+ tensor.size(),
364
+ tensor.stride(),
365
+ tensor_offset, # tensor offset in its storage
366
+ type(storage),
367
+ tensor.dtype,
368
+ device,
369
+ handle, # identifier which CUDA allocation is the storage in.
370
+ storage_size_bytes, # size(in bytes) of the storage
371
+ storage_offset_bytes, # offset(in bytes) of the storage in the CUDA allocation
372
+ tensor.requires_grad,
373
+ ref_counter_handle,
374
+ ref_counter_offset,
375
+ event_handle,
376
+ event_sync_required,
377
+ ),
378
+ )
379
+ elif storage._untyped_storage.device.type == "meta":
380
+ return (
381
+ rebuild_meta_tensor,
382
+ (
383
+ type(tensor),
384
+ tensor.size(),
385
+ tensor.stride(),
386
+ tensor.storage_offset(),
387
+ tensor.dtype,
388
+ tensor.untyped_storage().size(),
389
+ tensor.requires_grad,
390
+ ),
391
+ )
392
+
393
+ # _backward_hooks purposely omitted here, see Note [Don't serialize hooks]
394
+ metadata = (
395
+ tensor.storage_offset(),
396
+ tensor.size(),
397
+ tensor.stride(),
398
+ tensor.requires_grad,
399
+ )
400
+ return (rebuild_tensor, (type(tensor), storage, metadata))
401
+
402
+
403
+ def rebuild_nested_tensor(
404
+ rebuild_buffer_func,
405
+ rebuild_buffer_args,
406
+ rebuild_sizes_func,
407
+ rebuild_sizes_args,
408
+ rebuild_strides_func,
409
+ rebuild_strides_args,
410
+ rebuild_offsets_func,
411
+ rebuild_offsets_args,
412
+ ):
413
+ buffer = rebuild_buffer_func(*rebuild_buffer_args)
414
+ sizes = rebuild_sizes_func(*rebuild_sizes_args)
415
+ strides = rebuild_strides_func(*rebuild_strides_args)
416
+ offsets = rebuild_offsets_func(*rebuild_offsets_args)
417
+ return torch._nested_view_from_buffer_copy(buffer, sizes, strides, offsets)
418
+
419
+
420
+ def reduce_nested_tensor(nt):
421
+ rebuild_buffer_func, rebuild_buffer_args = reduce_tensor(nt.values())
422
+ rebuild_sizes_func, rebuild_sizes_args = reduce_tensor(nt._nested_tensor_size())
423
+ rebuild_strides_func, rebuild_strides_args = reduce_tensor(
424
+ nt._nested_tensor_strides()
425
+ )
426
+ rebuild_offsets_func, rebuild_offsets_args = reduce_tensor(
427
+ nt._nested_tensor_storage_offsets()
428
+ )
429
+
430
+ return (
431
+ rebuild_nested_tensor,
432
+ (
433
+ rebuild_buffer_func,
434
+ rebuild_buffer_args,
435
+ rebuild_sizes_func,
436
+ rebuild_sizes_args,
437
+ rebuild_strides_func,
438
+ rebuild_strides_args,
439
+ rebuild_offsets_func,
440
+ rebuild_offsets_args,
441
+ ),
442
+ )
443
+
444
+
445
+ def rebuild_sparse_coo_tensor(
446
+ rebuild_indices_func,
447
+ rebuild_indices_args,
448
+ rebuild_values_func,
449
+ rebuild_values_args,
450
+ shape,
451
+ is_coalesced,
452
+ ):
453
+ indices = rebuild_indices_func(*rebuild_indices_args)
454
+ values = rebuild_values_func(*rebuild_values_args)
455
+ return torch.sparse_coo_tensor(indices, values, shape, is_coalesced=is_coalesced)
456
+
457
+
458
+ def rebuild_sparse_compressed_tensor(
459
+ rebuild_compressed_indices_func,
460
+ rebuild_compressed_indices_args,
461
+ rebuild_plain_indices_func,
462
+ rebuild_plain_indices_args,
463
+ rebuild_values_func,
464
+ rebuild_values_args,
465
+ shape,
466
+ layout,
467
+ ):
468
+ compressed_indices = rebuild_compressed_indices_func(
469
+ *rebuild_compressed_indices_args
470
+ )
471
+ plain_indices = rebuild_plain_indices_func(*rebuild_plain_indices_args)
472
+ values = rebuild_values_func(*rebuild_values_args)
473
+ return torch.sparse_compressed_tensor(
474
+ compressed_indices, plain_indices, values, shape, layout=layout
475
+ )
476
+
477
+
478
+ def reduce_sparse_tensor(sparse):
479
+ if sparse.layout is torch.sparse_coo:
480
+ rebuild_indices_func, rebuild_indices_args = reduce_tensor(sparse._indices())
481
+ rebuild_values_func, rebuild_values_args = reduce_tensor(sparse._values())
482
+ return (
483
+ rebuild_sparse_coo_tensor,
484
+ (
485
+ rebuild_indices_func,
486
+ rebuild_indices_args,
487
+ rebuild_values_func,
488
+ rebuild_values_args,
489
+ sparse.shape,
490
+ sparse.is_coalesced(),
491
+ ),
492
+ )
493
+ else:
494
+ if sparse.layout in {torch.sparse_csr, torch.sparse_bsr}:
495
+ compressed_indices = sparse.crow_indices()
496
+ plain_indices = sparse.col_indices()
497
+ elif sparse.layout in {torch.sparse_csc, torch.sparse_bsc}:
498
+ compressed_indices = sparse.ccol_indices()
499
+ plain_indices = sparse.row_indices()
500
+ else:
501
+ raise NotImplementedError(sparse.layout)
502
+ (
503
+ rebuild_compressed_indices_func,
504
+ rebuild_compressed_indices_args,
505
+ ) = reduce_tensor(compressed_indices)
506
+ rebuild_plain_indices_func, rebuild_plain_indices_args = reduce_tensor(
507
+ plain_indices
508
+ )
509
+ rebuild_values_func, rebuild_values_args = reduce_tensor(sparse.values())
510
+ return (
511
+ rebuild_sparse_compressed_tensor,
512
+ (
513
+ rebuild_compressed_indices_func,
514
+ rebuild_compressed_indices_args,
515
+ rebuild_plain_indices_func,
516
+ rebuild_plain_indices_args,
517
+ rebuild_values_func,
518
+ rebuild_values_args,
519
+ sparse.shape,
520
+ sparse.layout,
521
+ ),
522
+ )
523
+
524
+
525
+ def fd_id(fd):
526
+ # Returns a tuple which uniquely identifies a file descriptor. In Mac OS,
527
+ # this doesn't work with shared memory handles, which is why we don't
528
+ # support the "file_descriptor" sharing method on that platform.
529
+ stat = os.fstat(fd)
530
+ return (stat.st_ino, stat.st_dev)
531
+
532
+
533
+ def storage_from_cache(cls, key):
534
+ storage_ref = shared_cache.get(key)
535
+ if storage_ref is None:
536
+ return None
537
+ return torch.UntypedStorage._new_with_weak_ptr(storage_ref.cdata)
538
+
539
+
540
+ def rebuild_storage_fd(cls, df, size):
541
+ fd = df.detach()
542
+ try:
543
+ storage = storage_from_cache(cls, fd_id(fd))
544
+ if storage is not None:
545
+ return storage
546
+ storage = cls._new_shared_fd_cpu(fd, size)
547
+ shared_cache[fd_id(fd)] = StorageWeakRef(storage)
548
+ return storage
549
+ finally:
550
+ os.close(fd)
551
+
552
+
553
+ def rebuild_storage_filename(cls, manager, handle, size, dtype=None):
554
+ storage: Union[torch.TypedStorage, torch.UntypedStorage] = storage_from_cache(
555
+ cls, handle
556
+ )
557
+ if storage is not None:
558
+ return storage._shared_decref()
559
+ if dtype is None:
560
+ storage = torch.UntypedStorage._new_shared_filename_cpu(manager, handle, size)
561
+ else:
562
+ byte_size = size * torch._utils._element_size(dtype)
563
+ untyped_storage: torch.UntypedStorage = (
564
+ torch.UntypedStorage._new_shared_filename_cpu(manager, handle, byte_size)
565
+ )
566
+ storage = torch.TypedStorage(
567
+ wrap_storage=untyped_storage, dtype=dtype, _internal=True
568
+ )
569
+ shared_cache[handle] = StorageWeakRef(storage)
570
+ return storage._shared_decref()
571
+
572
+
573
+ def rebuild_storage_empty(cls):
574
+ return cls()
575
+
576
+
577
+ def rebuild_typed_storage(storage, dtype):
578
+ return torch.storage.TypedStorage(wrap_storage=storage, dtype=dtype, _internal=True)
579
+
580
+
581
+ # Use for torch.storage.TypedStorage
582
+ def reduce_typed_storage(storage):
583
+ return (rebuild_typed_storage, (storage._untyped_storage, storage.dtype))
584
+
585
+
586
+ def rebuild_typed_storage_child(storage, storage_type):
587
+ return storage_type(wrap_storage=storage, _internal=True)
588
+
589
+
590
+ # Use for child classes of torch.storage.TypedStorage, like torch.FloatStorage
591
+ def reduce_typed_storage_child(storage):
592
+ return (rebuild_typed_storage_child, (storage._untyped_storage, type(storage)))
593
+
594
+
595
+ def reduce_storage(storage):
596
+ from . import get_sharing_strategy
597
+
598
+ if storage.is_cuda:
599
+ raise RuntimeError(
600
+ "Cannot pickle CUDA storage; try pickling a CUDA tensor instead"
601
+ )
602
+ elif storage.device.type == "meta":
603
+ raise RuntimeError(
604
+ "Cannot pickle meta storage; try pickling a meta tensor instead"
605
+ )
606
+ elif get_sharing_strategy() == "file_system":
607
+ metadata = storage._share_filename_cpu_()
608
+ cache_key = metadata[1]
609
+ rebuild = rebuild_storage_filename
610
+ if isinstance(storage, torch.TypedStorage):
611
+ metadata += (storage.dtype,)
612
+ storage._shared_incref()
613
+ elif storage.size() == 0:
614
+ # This is special cased because Empty tensors
615
+ # (with size 0) cannot be mmapped.
616
+ return (rebuild_storage_empty, (type(storage),))
617
+ else:
618
+ fd, size = storage._share_fd_cpu_()
619
+ df = multiprocessing.reduction.DupFd(fd)
620
+ cache_key = fd_id(fd)
621
+ metadata = (df, size)
622
+ rebuild = rebuild_storage_fd # type: ignore[assignment]
623
+
624
+ shared_cache[cache_key] = StorageWeakRef(storage)
625
+ return (rebuild, (type(storage),) + metadata)
626
+
627
+
628
+ def init_reductions():
629
+ ForkingPickler.register(torch.cuda.Event, reduce_event)
630
+
631
+ for t in torch._storage_classes:
632
+ if t.__name__ == "UntypedStorage":
633
+ ForkingPickler.register(t, reduce_storage)
634
+ else:
635
+ ForkingPickler.register(t, reduce_typed_storage_child)
636
+
637
+ ForkingPickler.register(torch.storage.TypedStorage, reduce_typed_storage)
638
+
639
+ for t in torch._tensor_classes:
640
+ ForkingPickler.register(t, reduce_tensor)
641
+
642
+ # TODO: Maybe this should be in tensor_classes? :)
643
+ ForkingPickler.register(torch.Tensor, reduce_tensor)
644
+
645
+ from torch.nn.parameter import Parameter
646
+
647
+ ForkingPickler.register(Parameter, reduce_tensor)
janus/lib/python3.10/site-packages/torch/nn/__init__.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from torch.nn.parameter import ( # usort: skip
3
+ Buffer as Buffer,
4
+ Parameter as Parameter,
5
+ UninitializedBuffer as UninitializedBuffer,
6
+ UninitializedParameter as UninitializedParameter,
7
+ )
8
+ from torch.nn.modules import * # usort: skip # noqa: F403
9
+ from torch.nn import (
10
+ attention as attention,
11
+ functional as functional,
12
+ init as init,
13
+ modules as modules,
14
+ parallel as parallel,
15
+ parameter as parameter,
16
+ utils as utils,
17
+ )
18
+ from torch.nn.parallel import DataParallel as DataParallel
19
+
20
+
21
+ def factory_kwargs(kwargs):
22
+ r"""Return a canonicalized dict of factory kwargs.
23
+
24
+ Given kwargs, returns a canonicalized dict of factory kwargs that can be directly passed
25
+ to factory functions like torch.empty, or errors if unrecognized kwargs are present.
26
+
27
+ This function makes it simple to write code like this::
28
+
29
+ class MyModule(nn.Module):
30
+ def __init__(self, **kwargs):
31
+ factory_kwargs = torch.nn.factory_kwargs(kwargs)
32
+ self.weight = Parameter(torch.empty(10, **factory_kwargs))
33
+
34
+ Why should you use this function instead of just passing `kwargs` along directly?
35
+
36
+ 1. This function does error validation, so if there are unexpected kwargs we will
37
+ immediately report an error, instead of deferring it to the factory call
38
+ 2. This function supports a special `factory_kwargs` argument, which can be used to
39
+ explicitly specify a kwarg to be used for factory functions, in the event one of the
40
+ factory kwargs conflicts with an already existing argument in the signature (e.g.
41
+ in the signature ``def f(dtype, **kwargs)``, you can specify ``dtype`` for factory
42
+ functions, as distinct from the dtype argument, by saying
43
+ ``f(dtype1, factory_kwargs={"dtype": dtype2})``)
44
+ """
45
+ if kwargs is None:
46
+ return {}
47
+ simple_keys = {"device", "dtype", "memory_format"}
48
+ expected_keys = simple_keys | {"factory_kwargs"}
49
+ if not kwargs.keys() <= expected_keys:
50
+ raise TypeError(f"unexpected kwargs {kwargs.keys() - expected_keys}")
51
+
52
+ # guarantee no input kwargs is untouched
53
+ r = dict(kwargs.get("factory_kwargs", {}))
54
+ for k in simple_keys:
55
+ if k in kwargs:
56
+ if k in r:
57
+ raise TypeError(
58
+ f"{k} specified twice, in **kwargs and in factory_kwargs"
59
+ )
60
+ r[k] = kwargs[k]
61
+
62
+ return r
janus/lib/python3.10/site-packages/torch/nn/_reduction.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from typing import Optional
3
+
4
+
5
+ # NB: Keep this file in sync with enums in aten/src/ATen/core/Reduction.h
6
+
7
+
8
+ def get_enum(reduction: str) -> int:
9
+ if reduction == "none":
10
+ ret = 0
11
+ elif reduction == "mean":
12
+ ret = 1
13
+ elif reduction == "elementwise_mean":
14
+ warnings.warn(
15
+ "reduction='elementwise_mean' is deprecated. "
16
+ "Please use reduction='mean' instead."
17
+ )
18
+ ret = 1
19
+ elif reduction == "sum":
20
+ ret = 2
21
+ else:
22
+ ret = -1 # TODO: remove once JIT exceptions support control flow
23
+ raise ValueError(f"{reduction} is not a valid value for reduction")
24
+ return ret
25
+
26
+
27
+ # In order to support previous versions, accept boolean size_average and reduce
28
+ # and convert them into the new constants for now
29
+
30
+
31
+ # We use these functions in torch/legacy as well, in which case we'll silence the warning
32
+ def legacy_get_string(
33
+ size_average: Optional[bool],
34
+ reduce: Optional[bool],
35
+ emit_warning: bool = True,
36
+ ) -> str:
37
+ warning = "size_average and reduce args will be deprecated, please use reduction='{}' instead."
38
+
39
+ if size_average is None:
40
+ size_average = True
41
+ if reduce is None:
42
+ reduce = True
43
+
44
+ if size_average and reduce:
45
+ ret = "mean"
46
+ elif reduce:
47
+ ret = "sum"
48
+ else:
49
+ ret = "none"
50
+ if emit_warning:
51
+ warnings.warn(warning.format(ret))
52
+ return ret
53
+
54
+
55
+ def legacy_get_enum(
56
+ size_average: Optional[bool],
57
+ reduce: Optional[bool],
58
+ emit_warning: bool = True,
59
+ ) -> int:
60
+ return get_enum(legacy_get_string(size_average, reduce, emit_warning))
janus/lib/python3.10/site-packages/torch/nn/backends/__init__.py ADDED
File without changes
janus/lib/python3.10/site-packages/torch/nn/backends/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (168 Bytes). View file
 
janus/lib/python3.10/site-packages/torch/nn/common_types.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple, TypeVar, Union
2
+
3
+ from torch import Tensor
4
+
5
+
6
+ # Create some useful type aliases
7
+
8
+ # Template for arguments which can be supplied as a tuple, or which can be a scalar which PyTorch will internally
9
+ # broadcast to a tuple.
10
+ # Comes in several variants: A tuple of unknown size, and a fixed-size tuple for 1d, 2d, or 3d operations.
11
+ T = TypeVar("T")
12
+ _scalar_or_tuple_any_t = Union[T, Tuple[T, ...]]
13
+ _scalar_or_tuple_1_t = Union[T, Tuple[T]]
14
+ _scalar_or_tuple_2_t = Union[T, Tuple[T, T]]
15
+ _scalar_or_tuple_3_t = Union[T, Tuple[T, T, T]]
16
+ _scalar_or_tuple_4_t = Union[T, Tuple[T, T, T, T]]
17
+ _scalar_or_tuple_5_t = Union[T, Tuple[T, T, T, T, T]]
18
+ _scalar_or_tuple_6_t = Union[T, Tuple[T, T, T, T, T, T]]
19
+
20
+ # For arguments which represent size parameters (eg, kernel size, padding)
21
+ _size_any_t = _scalar_or_tuple_any_t[int]
22
+ _size_1_t = _scalar_or_tuple_1_t[int]
23
+ _size_2_t = _scalar_or_tuple_2_t[int]
24
+ _size_3_t = _scalar_or_tuple_3_t[int]
25
+ _size_4_t = _scalar_or_tuple_4_t[int]
26
+ _size_5_t = _scalar_or_tuple_5_t[int]
27
+ _size_6_t = _scalar_or_tuple_6_t[int]
28
+
29
+ # For arguments which represent optional size parameters (eg, adaptive pool parameters)
30
+ _size_any_opt_t = _scalar_or_tuple_any_t[Optional[int]]
31
+ _size_2_opt_t = _scalar_or_tuple_2_t[Optional[int]]
32
+ _size_3_opt_t = _scalar_or_tuple_3_t[Optional[int]]
33
+
34
+ # For arguments that represent a ratio to adjust each dimension of an input with (eg, upsampling parameters)
35
+ _ratio_2_t = _scalar_or_tuple_2_t[float]
36
+ _ratio_3_t = _scalar_or_tuple_3_t[float]
37
+ _ratio_any_t = _scalar_or_tuple_any_t[float]
38
+
39
+ _tensor_list_t = _scalar_or_tuple_any_t[Tensor]
40
+
41
+ # For the return value of max pooling operations that may or may not return indices.
42
+ # With the proposed 'Literal' feature to Python typing, it might be possible to
43
+ # eventually eliminate this.
44
+ _maybe_indices_t = _scalar_or_tuple_2_t[Tensor]
janus/lib/python3.10/site-packages/torch/nn/functional.py ADDED
The diff for this file is too large to render. See raw diff
 
janus/lib/python3.10/site-packages/torch/nn/functional.pyi ADDED
@@ -0,0 +1,691 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @generated by tools/pyi/gen_pyi.py from torch/nn/functional.pyi.in
2
+ # mypy: allow-untyped-defs
3
+
4
+ from typing import (
5
+ Any,
6
+ Callable,
7
+ Dict,
8
+ List,
9
+ Literal,
10
+ Optional,
11
+ overload,
12
+ Sequence,
13
+ Tuple,
14
+ Union,
15
+ )
16
+
17
+ from torch import Tensor
18
+ from torch.types import _dtype, _int, _size
19
+
20
+ from .common_types import (
21
+ _ratio_any_t,
22
+ _size_1_t,
23
+ _size_2_opt_t,
24
+ _size_2_t,
25
+ _size_3_opt_t,
26
+ _size_3_t,
27
+ _size_any_t,
28
+ )
29
+
30
+ # 'TypedDict' is a new accepted type that represents a dictionary with a fixed set of allowed keys.
31
+ # It is standards-track but not in `typing` yet. We leave this hear to be uncommented once the feature
32
+ # is wide-spread.
33
+
34
+ # from mypy_extensions import TypedDict
35
+
36
+ # GRID_SAMPLE_INTERPOLATION_MODES = TypedDict('GRID_SAMPLE_INTERPOLATION_MODES', {'bilinear': int, 'nearest': int})
37
+ # GRID_SAMPLE_PADDING_MODES = TypedDict('GRID_SAMPLE_PADDING_MODES', {'zeros': int, 'border': int, 'reflection': int})
38
+
39
+ GRID_SAMPLE_INTERPOLATION_MODES = Dict[str, int]
40
+ GRID_SAMPLE_PADDING_MODES = Dict[str, int]
41
+
42
+ # These stubs were generated by running stubgen (`stubgen --parse-only functional.py`), followed by manual cleaning.
43
+ #
44
+ # The 'BroadcastingList{1,2,3}' types were replaced by `_size` or _output_ratio, as appropriate.
45
+ # This was necessary since the JIT uses BroadcastingList* types but static checking with mypy etc requires a `Sequence`
46
+ # type. There is no way to express the expected lengths of these lists in the current Python typing system.
47
+ #
48
+ # Functions created via `_add_docstr` in `functional.py` where merely typed as `Any` by `stubgen`, so those were
49
+ # deleted from the stub and replaced by generated declarations. See `gen_pyi` for the implementation of the code
50
+ # generation logic for those functions. In the future, it might be worth looking into using the mypy plugin system
51
+ # to encode the type semantics of `_add_docstr`, should that system ever become widespread.
52
+ def fractional_max_pool2d_with_indices(
53
+ input: Tensor,
54
+ kernel_size: _size,
55
+ output_size: Optional[_size] = ...,
56
+ output_ratio: Optional[_ratio_any_t] = ...,
57
+ return_indices: bool = ...,
58
+ _random_samples: Optional[Tensor] = ...,
59
+ ) -> Tuple[Tensor, Tensor]: ...
60
+ def fractional_max_pool3d_with_indices(
61
+ input: Tensor,
62
+ kernel_size: _size,
63
+ output_size: Optional[_size] = ...,
64
+ output_ratio: Optional[_ratio_any_t] = ...,
65
+ return_indices: bool = ...,
66
+ _random_samples: Optional[Tensor] = ...,
67
+ ) -> Tuple[Tensor, Tensor]: ...
68
+ def max_pool1d_with_indices(
69
+ input: Tensor,
70
+ kernel_size: _size,
71
+ stride: Optional[_size] = ...,
72
+ padding: _size = ...,
73
+ dilation: _size = ...,
74
+ ceil_mode: bool = ...,
75
+ return_indices: bool = ...,
76
+ ) -> Tuple[Tensor, Tensor]: ...
77
+ def max_pool2d_with_indices(
78
+ input: Tensor,
79
+ kernel_size: _size,
80
+ stride: Optional[_size] = ...,
81
+ padding: _size = ...,
82
+ dilation: _size = ...,
83
+ ceil_mode: bool = ...,
84
+ return_indices: bool = ...,
85
+ ) -> Tuple[Tensor, Tensor]: ...
86
+ def max_pool3d_with_indices(
87
+ input: Tensor,
88
+ kernel_size: _size,
89
+ stride: Optional[_size] = ...,
90
+ padding: _size = ...,
91
+ dilation: _size = ...,
92
+ ceil_mode: bool = ...,
93
+ return_indices: bool = ...,
94
+ ) -> Tuple[Tensor, Tensor]: ...
95
+ def max_unpool1d(
96
+ input: Tensor,
97
+ indices: Tensor,
98
+ kernel_size: _size,
99
+ stride: Optional[_size] = ...,
100
+ padding: _size = ...,
101
+ output_size: Optional[_size] = ...,
102
+ ) -> Tensor: ...
103
+ def max_unpool2d(
104
+ input: Tensor,
105
+ indices: Tensor,
106
+ kernel_size: _size,
107
+ stride: Optional[_size] = ...,
108
+ padding: _size = ...,
109
+ output_size: Optional[_size] = ...,
110
+ ) -> Tensor: ...
111
+ def max_unpool3d(
112
+ input: Tensor,
113
+ indices: Tensor,
114
+ kernel_size: _size,
115
+ stride: Optional[_size] = ...,
116
+ padding: _size = ...,
117
+ output_size: Optional[_size] = ...,
118
+ ) -> Tensor: ...
119
+ def lp_pool1d(
120
+ input: Tensor,
121
+ norm_type: float,
122
+ kernel_size: _size_1_t,
123
+ stride: Union[Optional[_size], Optional[int]] = ...,
124
+ ceil_mode: bool = ...,
125
+ ) -> Tensor: ...
126
+ def lp_pool2d(
127
+ input: Tensor,
128
+ norm_type: float,
129
+ kernel_size: _size_2_t,
130
+ stride: Union[Optional[_size], Optional[int]] = ...,
131
+ ceil_mode: bool = ...,
132
+ ) -> Tensor: ...
133
+ def lp_pool3d(
134
+ input: Tensor,
135
+ norm_type: float,
136
+ kernel_size: _size_3_t,
137
+ stride: Union[Optional[_size], Optional[int]] = ...,
138
+ ceil_mode: bool = ...,
139
+ ) -> Tensor: ...
140
+ def adaptive_max_pool1d_with_indices(
141
+ input: Tensor,
142
+ output_size: _size,
143
+ return_indices: bool = ...,
144
+ ) -> Tuple[Tensor, Tensor]: ...
145
+ def adaptive_max_pool2d_with_indices(
146
+ input: Tensor,
147
+ output_size: _size_2_opt_t,
148
+ return_indices: bool = ...,
149
+ ) -> Tuple[Tensor, Tensor]: ...
150
+ def adaptive_max_pool3d_with_indices(
151
+ input: Tensor,
152
+ output_size: _size_3_opt_t,
153
+ return_indices: bool = ...,
154
+ ) -> Tuple[Tensor, Tensor]: ...
155
+ def adaptive_avg_pool2d(input: Tensor, output_size: _size_2_opt_t) -> Tensor: ...
156
+ def adaptive_avg_pool3d(input: Tensor, output_size: _size_3_opt_t) -> Tensor: ...
157
+ def dropout(
158
+ input: Tensor,
159
+ p: float = ...,
160
+ training: bool = ...,
161
+ inplace: bool = ...,
162
+ ) -> Tensor: ...
163
+ def alpha_dropout(
164
+ input: Tensor,
165
+ p: float = ...,
166
+ training: bool = ...,
167
+ inplace: bool = ...,
168
+ ) -> Tensor: ...
169
+ def dropout1d(
170
+ input: Tensor,
171
+ p: float = ...,
172
+ training: bool = ...,
173
+ inplace: bool = ...,
174
+ ) -> Tensor: ...
175
+ def dropout2d(
176
+ input: Tensor,
177
+ p: float = ...,
178
+ training: bool = ...,
179
+ inplace: bool = ...,
180
+ ) -> Tensor: ...
181
+ def dropout3d(
182
+ input: Tensor,
183
+ p: float = ...,
184
+ training: bool = ...,
185
+ inplace: bool = ...,
186
+ ) -> Tensor: ...
187
+ def feature_alpha_dropout(
188
+ input: Tensor,
189
+ p: float = ...,
190
+ training: bool = ...,
191
+ inplace: bool = ...,
192
+ ) -> Tensor: ...
193
+ def threshold(
194
+ input: Tensor,
195
+ threshold: float,
196
+ value: float,
197
+ inplace: bool = ...,
198
+ ) -> Tensor: ...
199
+ def relu(input: Tensor, inplace: bool = ...) -> Tensor: ...
200
+ def glu(input: Tensor, dim: int = ...) -> Tensor: ...
201
+ def hardtanh(
202
+ input: Tensor,
203
+ min_val: float = ...,
204
+ max_val: float = ...,
205
+ inplace: bool = ...,
206
+ ) -> Tensor: ...
207
+ def relu6(input: Tensor, inplace: bool = ...) -> Tensor: ...
208
+ def elu(input: Tensor, alpha: float = ..., inplace: bool = ...) -> Tensor: ...
209
+ def selu(input: Tensor, inplace: bool = ...) -> Tensor: ...
210
+ def celu(input: Tensor, alpha: float = ..., inplace: bool = ...) -> Tensor: ...
211
+ def leaky_relu(
212
+ input: Tensor,
213
+ negative_slope: float = ...,
214
+ inplace: bool = ...,
215
+ ) -> Tensor: ...
216
+ def rrelu(
217
+ input: Tensor,
218
+ lower: float = ...,
219
+ upper: float = ...,
220
+ training: bool = ...,
221
+ inplace: bool = ...,
222
+ ) -> Tensor: ...
223
+ def tanhshrink(input: Any): ...
224
+ def softsign(input: Any): ...
225
+ def softmin(
226
+ input: Tensor,
227
+ dim: Optional[int] = ...,
228
+ _stacklevel: int = ...,
229
+ dtype: Optional[_dtype] = ...,
230
+ ) -> Tensor: ...
231
+ def softmax(
232
+ input: Tensor,
233
+ dim: Optional[int] = ...,
234
+ _stacklevel: int = ...,
235
+ dtype: Optional[_dtype] = ...,
236
+ ) -> Tensor: ...
237
+ def gumbel_softmax(
238
+ logits: Tensor,
239
+ tau: float = ...,
240
+ hard: bool = ...,
241
+ eps: float = ...,
242
+ dim: int = ...,
243
+ ) -> Tensor: ...
244
+ def log_softmax(
245
+ input: Tensor,
246
+ dim: Optional[int] = ...,
247
+ _stacklevel: int = ...,
248
+ dtype: Optional[_dtype] = ...,
249
+ ) -> Tensor: ...
250
+ def tanh(input: Any): ...
251
+ def sigmoid(input: Any) -> Tensor: ...
252
+ def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor: ...
253
+ def silu(input: Tensor, inplace: bool = False) -> Tensor: ...
254
+ def mish(input: Tensor, inplace: bool = False) -> Tensor: ...
255
+ def hardswish(input: Tensor, inplace: bool = False) -> Tensor: ...
256
+ def embedding(
257
+ input: Tensor,
258
+ weight: Tensor,
259
+ padding_idx: Optional[int] = ...,
260
+ max_norm: Optional[float] = ...,
261
+ norm_type: float = ...,
262
+ scale_grad_by_freq: bool = ...,
263
+ sparse: bool = ...,
264
+ ) -> Tensor: ...
265
+ def embedding_bag(
266
+ input: Tensor,
267
+ weight: Tensor,
268
+ offsets: Optional[Tensor] = ...,
269
+ max_norm: Optional[float] = ...,
270
+ norm_type: float = ...,
271
+ scale_grad_by_freq: bool = ...,
272
+ mode: str = ...,
273
+ sparse: bool = ...,
274
+ per_sample_weights: Optional[Tensor] = ...,
275
+ include_last_offset: bool = ...,
276
+ padding_idx: Optional[int] = ...,
277
+ ) -> Tensor: ...
278
+ def batch_norm(
279
+ input: Tensor,
280
+ running_mean: Optional[Tensor],
281
+ running_var: Optional[Tensor],
282
+ weight: Optional[Tensor] = ...,
283
+ bias: Optional[Tensor] = ...,
284
+ training: bool = ...,
285
+ momentum: float = ...,
286
+ eps: float = ...,
287
+ ) -> Tensor: ...
288
+ def instance_norm(
289
+ input: Tensor,
290
+ running_mean: Optional[Tensor] = ...,
291
+ running_var: Optional[Tensor] = ...,
292
+ weight: Optional[Tensor] = ...,
293
+ bias: Optional[Tensor] = ...,
294
+ use_input_stats: bool = ...,
295
+ momentum: float = ...,
296
+ eps: float = ...,
297
+ ) -> Tensor: ...
298
+ def layer_norm(
299
+ input: Tensor,
300
+ normalized_shape: Sequence[int],
301
+ weight: Optional[Tensor] = ...,
302
+ bias: Optional[Tensor] = ...,
303
+ eps: float = ...,
304
+ ) -> Tensor: ...
305
+ def rms_norm(
306
+ input: Tensor,
307
+ normalized_shape: Sequence[int],
308
+ weight: Optional[Tensor] = ...,
309
+ eps: Optional[float] = ...,
310
+ ) -> Tensor: ...
311
+ def group_norm(
312
+ input: Tensor,
313
+ num_groups: int,
314
+ weight: Optional[Tensor] = ...,
315
+ bias: Optional[Tensor] = ...,
316
+ eps: float = ...,
317
+ ) -> Tensor: ...
318
+ def local_response_norm(
319
+ input: Tensor,
320
+ size: int,
321
+ alpha: float = ...,
322
+ beta: float = ...,
323
+ k: float = ...,
324
+ ) -> Tensor: ...
325
+ def ctc_loss(
326
+ log_probs: Tensor,
327
+ targets: Tensor,
328
+ input_lengths: Tensor,
329
+ target_lengths: Tensor,
330
+ blank: int = ...,
331
+ reduction: str = ...,
332
+ zero_infinity: bool = ...,
333
+ ) -> Tensor: ...
334
+ def nll_loss(
335
+ input: Tensor,
336
+ target: Tensor,
337
+ weight: Optional[Tensor] = ...,
338
+ size_average: Optional[bool] = ...,
339
+ ignore_index: int = ...,
340
+ reduce: Optional[bool] = ...,
341
+ reduction: str = ...,
342
+ ) -> Tensor: ...
343
+ def poisson_nll_loss(
344
+ input: Tensor,
345
+ target: Tensor,
346
+ log_input: bool = ...,
347
+ full: bool = ...,
348
+ size_average: Optional[bool] = ...,
349
+ eps: float = ...,
350
+ reduce: Optional[bool] = ...,
351
+ reduction: str = ...,
352
+ ) -> Tensor: ...
353
+ def gaussian_nll_loss(
354
+ input: Tensor,
355
+ target: Tensor,
356
+ var: Tensor,
357
+ full: Optional[bool] = ...,
358
+ eps: Optional[float] = ...,
359
+ reduction: Optional[str] = ...,
360
+ ) -> Tensor: ...
361
+ def kl_div(
362
+ input: Tensor,
363
+ target: Tensor,
364
+ size_average: Optional[bool] = ...,
365
+ reduce: Optional[bool] = ...,
366
+ reduction: str = ...,
367
+ log_target: bool = ...,
368
+ ) -> Tensor: ...
369
+ def cross_entropy(
370
+ input: Tensor,
371
+ target: Tensor,
372
+ weight: Optional[Tensor] = ...,
373
+ size_average: Optional[bool] = ...,
374
+ ignore_index: int = ...,
375
+ reduce: Optional[bool] = ...,
376
+ reduction: str = ...,
377
+ label_smoothing: float = ...,
378
+ ) -> Tensor: ...
379
+ def binary_cross_entropy(
380
+ input: Tensor,
381
+ target: Tensor,
382
+ weight: Optional[Tensor] = ...,
383
+ size_average: Optional[bool] = ...,
384
+ reduce: Optional[bool] = ...,
385
+ reduction: str = ...,
386
+ ) -> Tensor: ...
387
+ def binary_cross_entropy_with_logits(
388
+ input: Tensor,
389
+ target: Tensor,
390
+ weight: Optional[Tensor] = ...,
391
+ size_average: Optional[bool] = ...,
392
+ reduce: Optional[bool] = ...,
393
+ reduction: str = ...,
394
+ pos_weight: Optional[Tensor] = ...,
395
+ ) -> Tensor: ...
396
+ def smooth_l1_loss(
397
+ input: Tensor,
398
+ target: Tensor,
399
+ size_average: Optional[bool] = ...,
400
+ reduce: Optional[bool] = ...,
401
+ reduction: str = ...,
402
+ beta: float = ...,
403
+ ) -> Tensor: ...
404
+ def huber_loss(
405
+ input: Tensor,
406
+ target: Tensor,
407
+ reduction: str = ...,
408
+ delta: float = ...,
409
+ ) -> Tensor: ...
410
+ def l1_loss(
411
+ input: Tensor,
412
+ target: Tensor,
413
+ size_average: Optional[bool] = ...,
414
+ reduce: Optional[bool] = ...,
415
+ reduction: str = ...,
416
+ ) -> Tensor: ...
417
+ def mse_loss(
418
+ input: Tensor,
419
+ target: Tensor,
420
+ size_average: Optional[bool] = ...,
421
+ reduce: Optional[bool] = ...,
422
+ reduction: str = ...,
423
+ ) -> Tensor: ...
424
+ def margin_ranking_loss(
425
+ input1: Tensor,
426
+ input2: Tensor,
427
+ target: Tensor,
428
+ margin: float = ...,
429
+ size_average: Optional[bool] = ...,
430
+ reduce: Optional[bool] = ...,
431
+ reduction: str = ...,
432
+ ) -> Tensor: ...
433
+ def hinge_embedding_loss(
434
+ input: Tensor,
435
+ target: Tensor,
436
+ margin: float = ...,
437
+ size_average: Optional[bool] = ...,
438
+ reduce: Optional[bool] = ...,
439
+ reduction: str = ...,
440
+ ) -> Tensor: ...
441
+ def multilabel_margin_loss(
442
+ input: Tensor,
443
+ target: Tensor,
444
+ size_average: Optional[bool] = ...,
445
+ reduce: Optional[bool] = ...,
446
+ reduction: str = ...,
447
+ ) -> Tensor: ...
448
+ def soft_margin_loss(
449
+ input: Tensor,
450
+ target: Tensor,
451
+ size_average: Optional[bool] = ...,
452
+ reduce: Optional[bool] = ...,
453
+ reduction: str = ...,
454
+ ) -> Tensor: ...
455
+ def multilabel_soft_margin_loss(
456
+ input: Tensor,
457
+ target: Tensor,
458
+ weight: Optional[Tensor] = ...,
459
+ size_average: Optional[bool] = ...,
460
+ reduce: Optional[bool] = ...,
461
+ reduction: str = ...,
462
+ ) -> Tensor: ...
463
+ def cosine_embedding_loss(
464
+ input1: Tensor,
465
+ input2: Tensor,
466
+ target: Tensor,
467
+ margin: float = ...,
468
+ size_average: Optional[bool] = ...,
469
+ reduce: Optional[bool] = ...,
470
+ reduction: str = ...,
471
+ ) -> Tensor: ...
472
+ def multi_margin_loss(
473
+ input: Tensor,
474
+ target: Tensor,
475
+ p: int = ...,
476
+ margin: float = ...,
477
+ weight: Optional[Tensor] = ...,
478
+ size_average: Optional[bool] = ...,
479
+ reduce: Optional[bool] = ...,
480
+ reduction: str = ...,
481
+ ) -> Tensor: ...
482
+ def upsample(
483
+ input: Any,
484
+ size: Optional[Any] = ...,
485
+ scale_factor: Optional[Any] = ...,
486
+ mode: str = ...,
487
+ align_corners: Optional[Any] = ...,
488
+ ): ...
489
+ def interpolate(
490
+ input: Any,
491
+ size: Optional[Any] = ...,
492
+ scale_factor: Optional[Any] = ...,
493
+ mode: str = ...,
494
+ align_corners: Optional[Any] = ...,
495
+ recompute_scale_factor: Optional[Any] = ...,
496
+ antialias: bool = ...,
497
+ ): ...
498
+ def upsample_nearest(
499
+ input: Any,
500
+ size: Optional[Any] = ...,
501
+ scale_factor: Optional[Any] = ...,
502
+ ): ...
503
+ def upsample_bilinear(
504
+ input: Any,
505
+ size: Optional[Any] = ...,
506
+ scale_factor: Optional[Any] = ...,
507
+ ): ...
508
+ def grid_sample(
509
+ input: Tensor,
510
+ grid: Tensor,
511
+ mode: str = ...,
512
+ padding_mode: str = ...,
513
+ align_corners: Optional[Any] = ...,
514
+ ) -> Tensor: ...
515
+ def affine_grid(
516
+ theta: Tensor,
517
+ size: List[int],
518
+ align_corners: Optional[Any] = ...,
519
+ ) -> Tensor: ...
520
+ def triplet_margin_loss(
521
+ anchor: Tensor,
522
+ positive: Tensor,
523
+ negative: Tensor,
524
+ margin: float = ...,
525
+ p: float = ...,
526
+ eps: float = ...,
527
+ swap: bool = ...,
528
+ size_average: Optional[bool] = ...,
529
+ reduce: Optional[bool] = ...,
530
+ reduction: str = ...,
531
+ ) -> Tensor: ...
532
+ def triplet_margin_with_distance_loss(
533
+ anchor: Tensor,
534
+ positive: Tensor,
535
+ negative: Tensor,
536
+ *,
537
+ distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = ...,
538
+ margin: float = ...,
539
+ swap: bool = ...,
540
+ reduction: str = ...,
541
+ ) -> Tensor: ...
542
+ def normalize(
543
+ input: Tensor,
544
+ p: float = ...,
545
+ dim: int = ...,
546
+ eps: float = ...,
547
+ out: Optional[Tensor] = ...,
548
+ ) -> Tensor: ...
549
+ def assert_int_or_pair(
550
+ arg: Any,
551
+ arg_name: Any,
552
+ message: Any,
553
+ ) -> None: ...
554
+ def unfold(
555
+ input: Tensor,
556
+ kernel_size: _size_any_t,
557
+ dilation: _size_any_t = ...,
558
+ padding: _size_any_t = ...,
559
+ stride: _size_any_t = ...,
560
+ ) -> Tensor: ...
561
+ def fold(
562
+ input: Tensor,
563
+ output_size: _size_any_t,
564
+ kernel_size: _size_any_t,
565
+ dilation: _size_any_t = ...,
566
+ padding: _size_any_t = ...,
567
+ stride: _size_any_t = ...,
568
+ ) -> Tensor: ...
569
+ def _canonical_mask(
570
+ mask: Optional[Tensor],
571
+ mask_name: str,
572
+ other_type: Optional[_dtype],
573
+ other_name: str,
574
+ target_type: _dtype,
575
+ check_other: bool = True,
576
+ ) -> Optional[Tensor]: ...
577
+ def _none_or_dtype(input: Optional[Tensor]) -> Optional[_dtype]: ...
578
+ def multi_head_attention_forward(
579
+ query: Tensor,
580
+ key: Tensor,
581
+ value: Tensor,
582
+ embed_dim_to_check: int,
583
+ num_heads: int,
584
+ in_proj_weight: Optional[Tensor],
585
+ in_proj_bias: Optional[Tensor],
586
+ bias_k: Optional[Tensor],
587
+ bias_v: Optional[Tensor],
588
+ add_zero_attn: bool,
589
+ dropout_p: float,
590
+ out_proj_weight: Tensor,
591
+ out_proj_bias: Optional[Tensor],
592
+ training: bool = True,
593
+ key_padding_mask: Optional[Tensor] = None,
594
+ need_weights: bool = True,
595
+ attn_mask: Optional[Tensor] = None,
596
+ use_separate_proj_weight: bool = False,
597
+ q_proj_weight: Optional[Tensor] = None,
598
+ k_proj_weight: Optional[Tensor] = None,
599
+ v_proj_weight: Optional[Tensor] = None,
600
+ static_k: Optional[Tensor] = None,
601
+ static_v: Optional[Tensor] = None,
602
+ average_attn_weights: bool = True,
603
+ is_causal: bool = False,
604
+ ) -> Tuple[Tensor, Optional[Tensor]]: ...
605
+
606
+ from torch import conv1d as conv1d
607
+ from torch import conv2d as conv2d
608
+ from torch import conv3d as conv3d
609
+ from torch import conv_transpose1d as conv_transpose1d
610
+ from torch import conv_transpose2d as conv_transpose2d
611
+ from torch import conv_transpose3d as conv_transpose3d
612
+ from torch import conv_tbc as conv_tbc
613
+ from torch import avg_pool1d as avg_pool1d
614
+ from torch import adaptive_avg_pool1d as adaptive_avg_pool1d
615
+ from torch import relu_ as relu_
616
+ from torch import selu_ as selu_
617
+ from torch import celu_ as celu_
618
+ from torch import prelu as prelu
619
+ from torch import rrelu_ as rrelu_
620
+ from torch import hardshrink as hardshrink
621
+ from torch import bilinear as bilinear
622
+ from torch import pixel_shuffle as pixel_shuffle
623
+ from torch import pixel_unshuffle as pixel_unshuffle
624
+ from torch import channel_shuffle as channel_shuffle
625
+ from torch import native_channel_shuffle as native_channel_shuffle
626
+ from torch import pairwise_distance as pairwise_distance
627
+ from torch import pdist as pdist
628
+ from torch import cosine_similarity as cosine_similarity
629
+ from torch._C._nn import avg_pool2d as avg_pool2d
630
+ from torch._C._nn import avg_pool3d as avg_pool3d
631
+ from torch._C._nn import hardtanh_ as hardtanh_
632
+ from torch._C._nn import elu_ as elu_
633
+ from torch._C._nn import leaky_relu_ as leaky_relu_
634
+ from torch._C._nn import gelu as gelu
635
+ from torch._C._nn import softplus as softplus
636
+ from torch._C._nn import softshrink as softshrink
637
+ from torch._C._nn import linear as linear
638
+ from torch._C._nn import pad as pad
639
+ from torch._C._nn import one_hot as one_hot
640
+ from torch._C._nn import scaled_dot_product_attention as scaled_dot_product_attention
641
+ from torch._C._nn import log_sigmoid
642
+ logsigmoid = log_sigmoid
643
+
644
+ @overload
645
+ def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[False] = False) -> Tensor: ...
646
+ @overload
647
+ def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
648
+ @overload
649
+ def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size], *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
650
+ @overload
651
+ def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[False] = False) -> Tensor: ...
652
+ @overload
653
+ def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
654
+ @overload
655
+ def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size], *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
656
+ @overload
657
+ def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[False] = False) -> Tensor: ...
658
+ @overload
659
+ def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
660
+ @overload
661
+ def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size], *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
662
+ @overload
663
+ def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, return_indices: Literal[False] = False, _random_samples: Optional[Tensor] = None) -> Tensor: ...
664
+ @overload
665
+ def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]], output_ratio: Optional[_ratio_any_t], return_indices: Literal[True], /, _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
666
+ @overload
667
+ def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, *, return_indices: Literal[True], _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
668
+ @overload
669
+ def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, return_indices: Literal[False] = False, _random_samples: Optional[Tensor] = None) -> Tensor: ...
670
+ @overload
671
+ def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]], output_ratio: Optional[_ratio_any_t], return_indices: Literal[True], /, _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
672
+ @overload
673
+ def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, *, return_indices: Literal[True], _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
674
+ @overload
675
+ def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, return_indices: Literal[False] = False) -> Tensor: ...
676
+ @overload
677
+ def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]], padding: Union[_int, _size], dilation: Union[_int, _size], ceil_mode: bool, return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
678
+ @overload
679
+ def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
680
+ @overload
681
+ def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, return_indices: Literal[False] = False) -> Tensor: ...
682
+ @overload
683
+ def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]], padding: Union[_int, _size], dilation: Union[_int, _size], ceil_mode: bool, return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
684
+ @overload
685
+ def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
686
+ @overload
687
+ def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, return_indices: Literal[False] = False) -> Tensor: ...
688
+ @overload
689
+ def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]], padding: Union[_int, _size], dilation: Union[_int, _size], ceil_mode: bool, return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
690
+ @overload
691
+ def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
janus/lib/python3.10/site-packages/torch/nn/grad.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ """Gradient interface."""
3
+
4
+ import torch
5
+ from torch.nn.modules.utils import _pair, _single, _triple
6
+
7
+
8
+ def conv1d_input(
9
+ input_size,
10
+ weight,
11
+ grad_output,
12
+ stride=1,
13
+ padding=0,
14
+ dilation=1,
15
+ groups=1,
16
+ ):
17
+ r"""Compute the gradient of conv1d with respect to the input of the convolution.
18
+
19
+ This is same as the 1D transposed convolution operator under the hood but requires
20
+ the shape of the gradient w.r.t. input to be specified explicitly.
21
+
22
+ Args:
23
+ input_size : Shape of the input gradient tensor
24
+ weight: weight tensor (out_channels x in_channels/groups x kW)
25
+ grad_output : output gradient tensor (minibatch x out_channels x oW)
26
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
27
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
28
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
29
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
30
+
31
+ Examples::
32
+
33
+ >>> input = torch.randn(1, 1, 3, requires_grad=True)
34
+ >>> weight = torch.randn(1, 1, 1, requires_grad=True)
35
+ >>> output = F.conv1d(input, weight)
36
+ >>> grad_output = torch.randn(output.shape)
37
+ >>> grad_input = torch.autograd.grad(output, input, grad_output)
38
+ >>> F.grad.conv1d_input(input.shape, weight, grad_output)
39
+
40
+ """
41
+ input = grad_output.new_empty(1).expand(input_size)
42
+
43
+ return torch.ops.aten.convolution_backward(
44
+ grad_output,
45
+ input,
46
+ weight,
47
+ None,
48
+ _single(stride),
49
+ _single(padding),
50
+ _single(dilation),
51
+ False,
52
+ [0],
53
+ groups,
54
+ (True, False, False),
55
+ )[0]
56
+
57
+
58
+ def conv1d_weight(
59
+ input,
60
+ weight_size,
61
+ grad_output,
62
+ stride=1,
63
+ padding=0,
64
+ dilation=1,
65
+ groups=1,
66
+ ):
67
+ r"""Compute the gradient of conv1d with respect to the weight of the convolution.
68
+
69
+ Args:
70
+ input: input tensor of shape (minibatch x in_channels x iW)
71
+ weight_size : Shape of the weight gradient tensor
72
+ grad_output : output gradient tensor (minibatch x out_channels x oW)
73
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
74
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
75
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
76
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
77
+
78
+ Examples::
79
+
80
+ >>> input = torch.randn(1, 1, 3, requires_grad=True)
81
+ >>> weight = torch.randn(1, 1, 1, requires_grad=True)
82
+ >>> output = F.conv1d(input, weight)
83
+ >>> grad_output = torch.randn(output.shape)
84
+ >>> # xdoctest: +SKIP
85
+ >>> grad_weight = torch.autograd.grad(output, filter, grad_output)
86
+ >>> F.grad.conv1d_weight(input, weight.shape, grad_output)
87
+
88
+ """
89
+ weight = grad_output.new_empty(1).expand(weight_size)
90
+
91
+ return torch.ops.aten.convolution_backward(
92
+ grad_output,
93
+ input,
94
+ weight,
95
+ None,
96
+ _single(stride),
97
+ _single(padding),
98
+ _single(dilation),
99
+ False,
100
+ [0],
101
+ groups,
102
+ (False, True, False),
103
+ )[1]
104
+
105
+
106
+ def conv2d_input(
107
+ input_size,
108
+ weight,
109
+ grad_output,
110
+ stride=1,
111
+ padding=0,
112
+ dilation=1,
113
+ groups=1,
114
+ ):
115
+ r"""Compute the gradient of conv2d with respect to the input of the convolution.
116
+
117
+ This is same as the 2D transposed convolution operator under the hood but requires
118
+ the shape of the gradient w.r.t. input to be specified explicitly.
119
+
120
+ Args:
121
+ input_size : Shape of the input gradient tensor
122
+ weight: weight tensor (out_channels x in_channels/groups x kH x kW)
123
+ grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
124
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
125
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
126
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
127
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
128
+
129
+ Examples::
130
+
131
+ >>> input = torch.randn(1, 1, 3, 3, requires_grad=True)
132
+ >>> weight = torch.randn(1, 1, 1, 2, requires_grad=True)
133
+ >>> output = F.conv2d(input, weight)
134
+ >>> grad_output = torch.randn(output.shape)
135
+ >>> grad_input = torch.autograd.grad(output, input, grad_output)
136
+ >>> F.grad.conv2d_input(input.shape, weight, grad_output)
137
+
138
+ """
139
+ input = grad_output.new_empty(1).expand(input_size)
140
+
141
+ return torch.ops.aten.convolution_backward(
142
+ grad_output,
143
+ input,
144
+ weight,
145
+ None,
146
+ _pair(stride),
147
+ _pair(padding),
148
+ _pair(dilation),
149
+ False,
150
+ [0],
151
+ groups,
152
+ (True, False, False),
153
+ )[0]
154
+
155
+
156
+ def conv2d_weight(
157
+ input,
158
+ weight_size,
159
+ grad_output,
160
+ stride=1,
161
+ padding=0,
162
+ dilation=1,
163
+ groups=1,
164
+ ):
165
+ r"""Compute the gradient of conv2d with respect to the weight of the convolution.
166
+
167
+ Args:
168
+ input: input tensor of shape (minibatch x in_channels x iH x iW)
169
+ weight_size : Shape of the weight gradient tensor
170
+ grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
171
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
172
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
173
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
174
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
175
+
176
+ Examples::
177
+
178
+ >>> input = torch.randn(1, 1, 3, 3, requires_grad=True)
179
+ >>> weight = torch.randn(1, 1, 1, 2, requires_grad=True)
180
+ >>> output = F.conv2d(input, weight)
181
+ >>> grad_output = torch.randn(output.shape)
182
+ >>> # xdoctest: +SKIP
183
+ >>> grad_weight = torch.autograd.grad(output, filter, grad_output)
184
+ >>> F.grad.conv2d_weight(input, weight.shape, grad_output)
185
+
186
+ """
187
+ weight = grad_output.new_empty(1).expand(weight_size)
188
+
189
+ return torch.ops.aten.convolution_backward(
190
+ grad_output,
191
+ input,
192
+ weight,
193
+ None,
194
+ _pair(stride),
195
+ _pair(padding),
196
+ _pair(dilation),
197
+ False,
198
+ [0],
199
+ groups,
200
+ (False, True, False),
201
+ )[1]
202
+
203
+
204
+ def conv3d_input(
205
+ input_size,
206
+ weight,
207
+ grad_output,
208
+ stride=1,
209
+ padding=0,
210
+ dilation=1,
211
+ groups=1,
212
+ ):
213
+ r"""Compute the gradient of conv3d with respect to the input of the convolution.
214
+
215
+ This is same as the 3D transposed convolution operator under the hood but requires
216
+ the shape of the gradient w.r.t. input to be specified explicitly.
217
+
218
+ Args:
219
+ input_size : Shape of the input gradient tensor
220
+ weight: weights tensor (out_channels x in_channels/groups x kT x kH x kW)
221
+ grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
222
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
223
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
224
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
225
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
226
+
227
+ Examples::
228
+
229
+ >>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
230
+ >>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
231
+ >>> output = F.conv3d(input, weight)
232
+ >>> grad_output = torch.randn(output.shape)
233
+ >>> grad_input = torch.autograd.grad(output, input, grad_output)
234
+ >>> F.grad.conv3d_input(input.shape, weight, grad_output)
235
+
236
+ """
237
+ input = grad_output.new_empty(1).expand(input_size)
238
+
239
+ return torch.ops.aten.convolution_backward(
240
+ grad_output,
241
+ input,
242
+ weight,
243
+ None,
244
+ _triple(stride),
245
+ _triple(padding),
246
+ _triple(dilation),
247
+ False,
248
+ [0],
249
+ groups,
250
+ (True, False, False),
251
+ )[0]
252
+
253
+
254
+ def conv3d_weight(
255
+ input,
256
+ weight_size,
257
+ grad_output,
258
+ stride=1,
259
+ padding=0,
260
+ dilation=1,
261
+ groups=1,
262
+ ):
263
+ r"""Compute the gradient of conv3d with respect to the weight of the convolution.
264
+
265
+ Args:
266
+ input: input tensor of shape (minibatch x in_channels x iT x iH x iW)
267
+ weight_size : Shape of the weight gradient tensor
268
+ grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
269
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
270
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
271
+ dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
272
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
273
+
274
+ Examples::
275
+
276
+ >>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
277
+ >>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
278
+ >>> output = F.conv3d(input, weight)
279
+ >>> grad_output = torch.randn(output.shape)
280
+ >>> grad_weight = torch.autograd.grad(output, weight, grad_output)
281
+ >>> F.grad.conv3d_weight(input, weight.shape, grad_output)
282
+
283
+ """
284
+ weight = grad_output.new_empty(1).expand(weight_size)
285
+
286
+ return torch.ops.aten.convolution_backward(
287
+ grad_output,
288
+ input,
289
+ weight,
290
+ None,
291
+ _triple(stride),
292
+ _triple(padding),
293
+ _triple(dilation),
294
+ False,
295
+ [0],
296
+ groups,
297
+ (False, True, False),
298
+ )[1]
janus/lib/python3.10/site-packages/torch/nn/modules/__init__.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .module import Module # usort: skip
2
+ from .linear import Bilinear, Identity, LazyLinear, Linear # usort: skip
3
+ from .activation import (
4
+ CELU,
5
+ ELU,
6
+ GELU,
7
+ GLU,
8
+ Hardshrink,
9
+ Hardsigmoid,
10
+ Hardswish,
11
+ Hardtanh,
12
+ LeakyReLU,
13
+ LogSigmoid,
14
+ LogSoftmax,
15
+ Mish,
16
+ MultiheadAttention,
17
+ PReLU,
18
+ ReLU,
19
+ ReLU6,
20
+ RReLU,
21
+ SELU,
22
+ Sigmoid,
23
+ SiLU,
24
+ Softmax,
25
+ Softmax2d,
26
+ Softmin,
27
+ Softplus,
28
+ Softshrink,
29
+ Softsign,
30
+ Tanh,
31
+ Tanhshrink,
32
+ Threshold,
33
+ )
34
+ from .adaptive import AdaptiveLogSoftmaxWithLoss
35
+ from .batchnorm import (
36
+ BatchNorm1d,
37
+ BatchNorm2d,
38
+ BatchNorm3d,
39
+ LazyBatchNorm1d,
40
+ LazyBatchNorm2d,
41
+ LazyBatchNorm3d,
42
+ SyncBatchNorm,
43
+ )
44
+ from .channelshuffle import ChannelShuffle
45
+ from .container import (
46
+ Container,
47
+ ModuleDict,
48
+ ModuleList,
49
+ ParameterDict,
50
+ ParameterList,
51
+ Sequential,
52
+ )
53
+ from .conv import (
54
+ Conv1d,
55
+ Conv2d,
56
+ Conv3d,
57
+ ConvTranspose1d,
58
+ ConvTranspose2d,
59
+ ConvTranspose3d,
60
+ LazyConv1d,
61
+ LazyConv2d,
62
+ LazyConv3d,
63
+ LazyConvTranspose1d,
64
+ LazyConvTranspose2d,
65
+ LazyConvTranspose3d,
66
+ )
67
+ from .distance import CosineSimilarity, PairwiseDistance
68
+ from .dropout import (
69
+ AlphaDropout,
70
+ Dropout,
71
+ Dropout1d,
72
+ Dropout2d,
73
+ Dropout3d,
74
+ FeatureAlphaDropout,
75
+ )
76
+ from .flatten import Flatten, Unflatten
77
+ from .fold import Fold, Unfold
78
+ from .instancenorm import (
79
+ InstanceNorm1d,
80
+ InstanceNorm2d,
81
+ InstanceNorm3d,
82
+ LazyInstanceNorm1d,
83
+ LazyInstanceNorm2d,
84
+ LazyInstanceNorm3d,
85
+ )
86
+ from .loss import (
87
+ BCELoss,
88
+ BCEWithLogitsLoss,
89
+ CosineEmbeddingLoss,
90
+ CrossEntropyLoss,
91
+ CTCLoss,
92
+ GaussianNLLLoss,
93
+ HingeEmbeddingLoss,
94
+ HuberLoss,
95
+ KLDivLoss,
96
+ L1Loss,
97
+ MarginRankingLoss,
98
+ MSELoss,
99
+ MultiLabelMarginLoss,
100
+ MultiLabelSoftMarginLoss,
101
+ MultiMarginLoss,
102
+ NLLLoss,
103
+ NLLLoss2d,
104
+ PoissonNLLLoss,
105
+ SmoothL1Loss,
106
+ SoftMarginLoss,
107
+ TripletMarginLoss,
108
+ TripletMarginWithDistanceLoss,
109
+ )
110
+ from .normalization import (
111
+ CrossMapLRN2d,
112
+ GroupNorm,
113
+ LayerNorm,
114
+ LocalResponseNorm,
115
+ RMSNorm,
116
+ )
117
+ from .padding import (
118
+ CircularPad1d,
119
+ CircularPad2d,
120
+ CircularPad3d,
121
+ ConstantPad1d,
122
+ ConstantPad2d,
123
+ ConstantPad3d,
124
+ ReflectionPad1d,
125
+ ReflectionPad2d,
126
+ ReflectionPad3d,
127
+ ReplicationPad1d,
128
+ ReplicationPad2d,
129
+ ReplicationPad3d,
130
+ ZeroPad1d,
131
+ ZeroPad2d,
132
+ ZeroPad3d,
133
+ )
134
+ from .pixelshuffle import PixelShuffle, PixelUnshuffle
135
+ from .pooling import (
136
+ AdaptiveAvgPool1d,
137
+ AdaptiveAvgPool2d,
138
+ AdaptiveAvgPool3d,
139
+ AdaptiveMaxPool1d,
140
+ AdaptiveMaxPool2d,
141
+ AdaptiveMaxPool3d,
142
+ AvgPool1d,
143
+ AvgPool2d,
144
+ AvgPool3d,
145
+ FractionalMaxPool2d,
146
+ FractionalMaxPool3d,
147
+ LPPool1d,
148
+ LPPool2d,
149
+ LPPool3d,
150
+ MaxPool1d,
151
+ MaxPool2d,
152
+ MaxPool3d,
153
+ MaxUnpool1d,
154
+ MaxUnpool2d,
155
+ MaxUnpool3d,
156
+ )
157
+ from .rnn import GRU, GRUCell, LSTM, LSTMCell, RNN, RNNBase, RNNCell, RNNCellBase
158
+ from .sparse import Embedding, EmbeddingBag
159
+ from .transformer import (
160
+ Transformer,
161
+ TransformerDecoder,
162
+ TransformerDecoderLayer,
163
+ TransformerEncoder,
164
+ TransformerEncoderLayer,
165
+ )
166
+ from .upsampling import Upsample, UpsamplingBilinear2d, UpsamplingNearest2d
167
+
168
+
169
+ __all__ = [
170
+ "AdaptiveAvgPool1d",
171
+ "AdaptiveAvgPool2d",
172
+ "AdaptiveAvgPool3d",
173
+ "AdaptiveLogSoftmaxWithLoss",
174
+ "AdaptiveMaxPool1d",
175
+ "AdaptiveMaxPool2d",
176
+ "AdaptiveMaxPool3d",
177
+ "AlphaDropout",
178
+ "AvgPool1d",
179
+ "AvgPool2d",
180
+ "AvgPool3d",
181
+ "BCELoss",
182
+ "BCEWithLogitsLoss",
183
+ "BatchNorm1d",
184
+ "BatchNorm2d",
185
+ "BatchNorm3d",
186
+ "Bilinear",
187
+ "CELU",
188
+ "CTCLoss",
189
+ "ChannelShuffle",
190
+ "CircularPad1d",
191
+ "CircularPad2d",
192
+ "CircularPad3d",
193
+ "ConstantPad1d",
194
+ "ConstantPad2d",
195
+ "ConstantPad3d",
196
+ "Container",
197
+ "Conv1d",
198
+ "Conv2d",
199
+ "Conv3d",
200
+ "ConvTranspose1d",
201
+ "ConvTranspose2d",
202
+ "ConvTranspose3d",
203
+ "CosineEmbeddingLoss",
204
+ "CosineSimilarity",
205
+ "CrossEntropyLoss",
206
+ "CrossMapLRN2d",
207
+ "Dropout",
208
+ "Dropout1d",
209
+ "Dropout2d",
210
+ "Dropout3d",
211
+ "ELU",
212
+ "Embedding",
213
+ "EmbeddingBag",
214
+ "FeatureAlphaDropout",
215
+ "Flatten",
216
+ "Fold",
217
+ "FractionalMaxPool2d",
218
+ "FractionalMaxPool3d",
219
+ "GELU",
220
+ "GLU",
221
+ "GRU",
222
+ "GRUCell",
223
+ "GaussianNLLLoss",
224
+ "GroupNorm",
225
+ "Hardshrink",
226
+ "Hardsigmoid",
227
+ "Hardswish",
228
+ "Hardtanh",
229
+ "HingeEmbeddingLoss",
230
+ "HuberLoss",
231
+ "Identity",
232
+ "InstanceNorm1d",
233
+ "InstanceNorm2d",
234
+ "InstanceNorm3d",
235
+ "KLDivLoss",
236
+ "L1Loss",
237
+ "LPPool1d",
238
+ "LPPool2d",
239
+ "LPPool3d",
240
+ "LSTM",
241
+ "LSTMCell",
242
+ "LayerNorm",
243
+ "LazyBatchNorm1d",
244
+ "LazyBatchNorm2d",
245
+ "LazyBatchNorm3d",
246
+ "LazyConv1d",
247
+ "LazyConv2d",
248
+ "LazyConv3d",
249
+ "LazyConvTranspose1d",
250
+ "LazyConvTranspose2d",
251
+ "LazyConvTranspose3d",
252
+ "LazyInstanceNorm1d",
253
+ "LazyInstanceNorm2d",
254
+ "LazyInstanceNorm3d",
255
+ "LazyLinear",
256
+ "LeakyReLU",
257
+ "Linear",
258
+ "LocalResponseNorm",
259
+ "LogSigmoid",
260
+ "LogSoftmax",
261
+ "MSELoss",
262
+ "MarginRankingLoss",
263
+ "MaxPool1d",
264
+ "MaxPool2d",
265
+ "MaxPool3d",
266
+ "MaxUnpool1d",
267
+ "MaxUnpool2d",
268
+ "MaxUnpool3d",
269
+ "Mish",
270
+ "Module",
271
+ "ModuleDict",
272
+ "ModuleList",
273
+ "MultiLabelMarginLoss",
274
+ "MultiLabelSoftMarginLoss",
275
+ "MultiMarginLoss",
276
+ "MultiheadAttention",
277
+ "NLLLoss",
278
+ "NLLLoss2d",
279
+ "PReLU",
280
+ "PairwiseDistance",
281
+ "ParameterDict",
282
+ "ParameterList",
283
+ "PixelShuffle",
284
+ "PixelUnshuffle",
285
+ "PoissonNLLLoss",
286
+ "RMSNorm",
287
+ "RNN",
288
+ "RNNBase",
289
+ "RNNCell",
290
+ "RNNCellBase",
291
+ "RReLU",
292
+ "ReLU",
293
+ "ReLU6",
294
+ "ReflectionPad1d",
295
+ "ReflectionPad2d",
296
+ "ReflectionPad3d",
297
+ "ReplicationPad1d",
298
+ "ReplicationPad2d",
299
+ "ReplicationPad3d",
300
+ "SELU",
301
+ "Sequential",
302
+ "SiLU",
303
+ "Sigmoid",
304
+ "SmoothL1Loss",
305
+ "SoftMarginLoss",
306
+ "Softmax",
307
+ "Softmax2d",
308
+ "Softmin",
309
+ "Softplus",
310
+ "Softshrink",
311
+ "Softsign",
312
+ "SyncBatchNorm",
313
+ "Tanh",
314
+ "Tanhshrink",
315
+ "Threshold",
316
+ "Transformer",
317
+ "TransformerDecoder",
318
+ "TransformerDecoderLayer",
319
+ "TransformerEncoder",
320
+ "TransformerEncoderLayer",
321
+ "TripletMarginLoss",
322
+ "TripletMarginWithDistanceLoss",
323
+ "Unflatten",
324
+ "Unfold",
325
+ "Upsample",
326
+ "UpsamplingBilinear2d",
327
+ "UpsamplingNearest2d",
328
+ "ZeroPad1d",
329
+ "ZeroPad2d",
330
+ "ZeroPad3d",
331
+ ]
332
+
333
+ # Please keep this list sorted
334
+ assert __all__ == sorted(__all__)
janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/activation.cpython-310.pyc ADDED
Binary file (55 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/adaptive.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/channelshuffle.cpython-310.pyc ADDED
Binary file (2.2 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/container.cpython-310.pyc ADDED
Binary file (34.6 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/conv.cpython-310.pyc ADDED
Binary file (59.1 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/dropout.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/instancenorm.cpython-310.pyc ADDED
Binary file (20.6 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/lazy.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/loss.cpython-310.pyc ADDED
Binary file (94.2 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/module.cpython-310.pyc ADDED
Binary file (95 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/padding.cpython-310.pyc ADDED
Binary file (33.5 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (55.4 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/sparse.cpython-310.pyc ADDED
Binary file (21.4 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/transformer.cpython-310.pyc ADDED
Binary file (37.6 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/modules/__pycache__/upsampling.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/modules/batchnorm.py ADDED
@@ -0,0 +1,883 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from typing import Any, Optional
3
+
4
+ import torch
5
+ from torch import Tensor
6
+ from torch.nn import functional as F, init
7
+ from torch.nn.parameter import Parameter, UninitializedBuffer, UninitializedParameter
8
+
9
+ from ._functions import SyncBatchNorm as sync_batch_norm
10
+ from .lazy import LazyModuleMixin
11
+ from .module import Module
12
+
13
+
14
+ __all__ = [
15
+ "BatchNorm1d",
16
+ "LazyBatchNorm1d",
17
+ "BatchNorm2d",
18
+ "LazyBatchNorm2d",
19
+ "BatchNorm3d",
20
+ "LazyBatchNorm3d",
21
+ "SyncBatchNorm",
22
+ ]
23
+
24
+
25
+ class _NormBase(Module):
26
+ """Common base of _InstanceNorm and _BatchNorm."""
27
+
28
+ _version = 2
29
+ __constants__ = ["track_running_stats", "momentum", "eps", "num_features", "affine"]
30
+ num_features: int
31
+ eps: float
32
+ momentum: Optional[float]
33
+ affine: bool
34
+ track_running_stats: bool
35
+ # WARNING: weight and bias purposely not defined here.
36
+ # See https://github.com/pytorch/pytorch/issues/39670
37
+
38
+ def __init__(
39
+ self,
40
+ num_features: int,
41
+ eps: float = 1e-5,
42
+ momentum: Optional[float] = 0.1,
43
+ affine: bool = True,
44
+ track_running_stats: bool = True,
45
+ device=None,
46
+ dtype=None,
47
+ ) -> None:
48
+ factory_kwargs = {"device": device, "dtype": dtype}
49
+ super().__init__()
50
+ self.num_features = num_features
51
+ self.eps = eps
52
+ self.momentum = momentum
53
+ self.affine = affine
54
+ self.track_running_stats = track_running_stats
55
+ if self.affine:
56
+ self.weight = Parameter(torch.empty(num_features, **factory_kwargs))
57
+ self.bias = Parameter(torch.empty(num_features, **factory_kwargs))
58
+ else:
59
+ self.register_parameter("weight", None)
60
+ self.register_parameter("bias", None)
61
+ if self.track_running_stats:
62
+ self.register_buffer(
63
+ "running_mean", torch.zeros(num_features, **factory_kwargs)
64
+ )
65
+ self.register_buffer(
66
+ "running_var", torch.ones(num_features, **factory_kwargs)
67
+ )
68
+ self.running_mean: Optional[Tensor]
69
+ self.running_var: Optional[Tensor]
70
+ self.register_buffer(
71
+ "num_batches_tracked",
72
+ torch.tensor(
73
+ 0,
74
+ dtype=torch.long,
75
+ **{k: v for k, v in factory_kwargs.items() if k != "dtype"},
76
+ ),
77
+ )
78
+ self.num_batches_tracked: Optional[Tensor]
79
+ else:
80
+ self.register_buffer("running_mean", None)
81
+ self.register_buffer("running_var", None)
82
+ self.register_buffer("num_batches_tracked", None)
83
+ self.reset_parameters()
84
+
85
+ def reset_running_stats(self) -> None:
86
+ if self.track_running_stats:
87
+ # running_mean/running_var/num_batches... are registered at runtime depending
88
+ # if self.track_running_stats is on
89
+ self.running_mean.zero_() # type: ignore[union-attr]
90
+ self.running_var.fill_(1) # type: ignore[union-attr]
91
+ self.num_batches_tracked.zero_() # type: ignore[union-attr,operator]
92
+
93
+ def reset_parameters(self) -> None:
94
+ self.reset_running_stats()
95
+ if self.affine:
96
+ init.ones_(self.weight)
97
+ init.zeros_(self.bias)
98
+
99
+ def _check_input_dim(self, input):
100
+ raise NotImplementedError
101
+
102
+ def extra_repr(self):
103
+ return (
104
+ "{num_features}, eps={eps}, momentum={momentum}, affine={affine}, "
105
+ "track_running_stats={track_running_stats}".format(**self.__dict__)
106
+ )
107
+
108
+ def _load_from_state_dict(
109
+ self,
110
+ state_dict,
111
+ prefix,
112
+ local_metadata,
113
+ strict,
114
+ missing_keys,
115
+ unexpected_keys,
116
+ error_msgs,
117
+ ):
118
+ version = local_metadata.get("version", None)
119
+
120
+ if (version is None or version < 2) and self.track_running_stats:
121
+ # at version 2: added num_batches_tracked buffer
122
+ # this should have a default value of 0
123
+ num_batches_tracked_key = prefix + "num_batches_tracked"
124
+ if num_batches_tracked_key not in state_dict:
125
+ state_dict[num_batches_tracked_key] = (
126
+ self.num_batches_tracked
127
+ if self.num_batches_tracked is not None
128
+ and self.num_batches_tracked.device != torch.device("meta")
129
+ else torch.tensor(0, dtype=torch.long)
130
+ )
131
+
132
+ super()._load_from_state_dict(
133
+ state_dict,
134
+ prefix,
135
+ local_metadata,
136
+ strict,
137
+ missing_keys,
138
+ unexpected_keys,
139
+ error_msgs,
140
+ )
141
+
142
+
143
+ class _BatchNorm(_NormBase):
144
+ def __init__(
145
+ self,
146
+ num_features: int,
147
+ eps: float = 1e-5,
148
+ momentum: Optional[float] = 0.1,
149
+ affine: bool = True,
150
+ track_running_stats: bool = True,
151
+ device=None,
152
+ dtype=None,
153
+ ) -> None:
154
+ factory_kwargs = {"device": device, "dtype": dtype}
155
+ super().__init__(
156
+ num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
157
+ )
158
+
159
+ def forward(self, input: Tensor) -> Tensor:
160
+ self._check_input_dim(input)
161
+
162
+ # exponential_average_factor is set to self.momentum
163
+ # (when it is available) only so that it gets updated
164
+ # in ONNX graph when this node is exported to ONNX.
165
+ if self.momentum is None:
166
+ exponential_average_factor = 0.0
167
+ else:
168
+ exponential_average_factor = self.momentum
169
+
170
+ if self.training and self.track_running_stats:
171
+ # TODO: if statement only here to tell the jit to skip emitting this when it is None
172
+ if self.num_batches_tracked is not None: # type: ignore[has-type]
173
+ self.num_batches_tracked.add_(1) # type: ignore[has-type]
174
+ if self.momentum is None: # use cumulative moving average
175
+ exponential_average_factor = 1.0 / float(self.num_batches_tracked)
176
+ else: # use exponential moving average
177
+ exponential_average_factor = self.momentum
178
+
179
+ r"""
180
+ Decide whether the mini-batch stats should be used for normalization rather than the buffers.
181
+ Mini-batch stats are used in training mode, and in eval mode when buffers are None.
182
+ """
183
+ if self.training:
184
+ bn_training = True
185
+ else:
186
+ bn_training = (self.running_mean is None) and (self.running_var is None)
187
+
188
+ r"""
189
+ Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
190
+ passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
191
+ used for normalization (i.e. in eval mode when buffers are not None).
192
+ """
193
+ return F.batch_norm(
194
+ input,
195
+ # If buffers are not to be tracked, ensure that they won't be updated
196
+ self.running_mean
197
+ if not self.training or self.track_running_stats
198
+ else None,
199
+ self.running_var if not self.training or self.track_running_stats else None,
200
+ self.weight,
201
+ self.bias,
202
+ bn_training,
203
+ exponential_average_factor,
204
+ self.eps,
205
+ )
206
+
207
+
208
+ class _LazyNormBase(LazyModuleMixin, _NormBase):
209
+ weight: UninitializedParameter # type: ignore[assignment]
210
+ bias: UninitializedParameter # type: ignore[assignment]
211
+
212
+ def __init__(
213
+ self,
214
+ eps=1e-5,
215
+ momentum=0.1,
216
+ affine=True,
217
+ track_running_stats=True,
218
+ device=None,
219
+ dtype=None,
220
+ ) -> None:
221
+ factory_kwargs = {"device": device, "dtype": dtype}
222
+ super().__init__(
223
+ # affine and track_running_stats are hardcoded to False to
224
+ # avoid creating tensors that will soon be overwritten.
225
+ 0,
226
+ eps,
227
+ momentum,
228
+ False,
229
+ False,
230
+ **factory_kwargs,
231
+ )
232
+ self.affine = affine
233
+ self.track_running_stats = track_running_stats
234
+ if self.affine:
235
+ self.weight = UninitializedParameter(**factory_kwargs)
236
+ self.bias = UninitializedParameter(**factory_kwargs)
237
+ if self.track_running_stats:
238
+ self.running_mean = UninitializedBuffer(**factory_kwargs)
239
+ self.running_var = UninitializedBuffer(**factory_kwargs)
240
+ self.num_batches_tracked = torch.tensor(
241
+ 0,
242
+ dtype=torch.long,
243
+ **{k: v for k, v in factory_kwargs.items() if k != "dtype"},
244
+ )
245
+
246
+ def reset_parameters(self) -> None:
247
+ if not self.has_uninitialized_params() and self.num_features != 0:
248
+ super().reset_parameters()
249
+
250
+ def initialize_parameters(self, input) -> None: # type: ignore[override]
251
+ if self.has_uninitialized_params():
252
+ self.num_features = input.shape[1]
253
+ if self.affine:
254
+ assert isinstance(self.weight, UninitializedParameter)
255
+ assert isinstance(self.bias, UninitializedParameter)
256
+ self.weight.materialize((self.num_features,))
257
+ self.bias.materialize((self.num_features,))
258
+ if self.track_running_stats:
259
+ self.running_mean.materialize( # type:ignore[union-attr]
260
+ (self.num_features,)
261
+ )
262
+ self.running_var.materialize( # type:ignore[union-attr]
263
+ (self.num_features,)
264
+ )
265
+ self.reset_parameters()
266
+
267
+
268
+ class BatchNorm1d(_BatchNorm):
269
+ r"""Applies Batch Normalization over a 2D or 3D input.
270
+
271
+ Method described in the paper
272
+ `Batch Normalization: Accelerating Deep Network Training by Reducing
273
+ Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
274
+
275
+ .. math::
276
+
277
+ y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
278
+
279
+ The mean and standard-deviation are calculated per-dimension over
280
+ the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
281
+ of size `C` (where `C` is the number of features or channels of the input). By default, the
282
+ elements of :math:`\gamma` are set to 1 and the elements of :math:`\beta` are set to 0.
283
+ At train time in the forward pass, the standard-deviation is calculated via the biased estimator,
284
+ equivalent to ``torch.var(input, unbiased=False)``. However, the value stored in the
285
+ moving average of the standard-deviation is calculated via the unbiased estimator, equivalent to
286
+ ``torch.var(input, unbiased=True)``.
287
+
288
+ Also by default, during training this layer keeps running estimates of its
289
+ computed mean and variance, which are then used for normalization during
290
+ evaluation. The running estimates are kept with a default :attr:`momentum`
291
+ of 0.1.
292
+
293
+ If :attr:`track_running_stats` is set to ``False``, this layer then does not
294
+ keep running estimates, and batch statistics are instead used during
295
+ evaluation time as well.
296
+
297
+ .. note::
298
+ This :attr:`momentum` argument is different from one used in optimizer
299
+ classes and the conventional notion of momentum. Mathematically, the
300
+ update rule for running statistics here is
301
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
302
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
303
+ new observed value.
304
+
305
+ Because the Batch Normalization is done over the `C` dimension, computing statistics
306
+ on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization.
307
+
308
+ Args:
309
+ num_features: number of features or channels :math:`C` of the input
310
+ eps: a value added to the denominator for numerical stability.
311
+ Default: 1e-5
312
+ momentum: the value used for the running_mean and running_var
313
+ computation. Can be set to ``None`` for cumulative moving average
314
+ (i.e. simple average). Default: 0.1
315
+ affine: a boolean value that when set to ``True``, this module has
316
+ learnable affine parameters. Default: ``True``
317
+ track_running_stats: a boolean value that when set to ``True``, this
318
+ module tracks the running mean and variance, and when set to ``False``,
319
+ this module does not track such statistics, and initializes statistics
320
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
321
+ When these buffers are ``None``, this module always uses batch statistics.
322
+ in both training and eval modes. Default: ``True``
323
+
324
+ Shape:
325
+ - Input: :math:`(N, C)` or :math:`(N, C, L)`, where :math:`N` is the batch size,
326
+ :math:`C` is the number of features or channels, and :math:`L` is the sequence length
327
+ - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
328
+
329
+ Examples::
330
+
331
+ >>> # With Learnable Parameters
332
+ >>> m = nn.BatchNorm1d(100)
333
+ >>> # Without Learnable Parameters
334
+ >>> m = nn.BatchNorm1d(100, affine=False)
335
+ >>> input = torch.randn(20, 100)
336
+ >>> output = m(input)
337
+ """
338
+
339
+ def _check_input_dim(self, input):
340
+ if input.dim() != 2 and input.dim() != 3:
341
+ raise ValueError(f"expected 2D or 3D input (got {input.dim()}D input)")
342
+
343
+
344
+ class LazyBatchNorm1d(_LazyNormBase, _BatchNorm):
345
+ r"""A :class:`torch.nn.BatchNorm1d` module with lazy initialization.
346
+
347
+ Lazy initialization based on the ``num_features`` argument of the :class:`BatchNorm1d` that is inferred
348
+ from the ``input.size(1)``.
349
+ The attributes that will be lazily initialized are `weight`, `bias`,
350
+ `running_mean` and `running_var`.
351
+
352
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
353
+ on lazy modules and their limitations.
354
+
355
+ Args:
356
+ eps: a value added to the denominator for numerical stability.
357
+ Default: 1e-5
358
+ momentum: the value used for the running_mean and running_var
359
+ computation. Can be set to ``None`` for cumulative moving average
360
+ (i.e. simple average). Default: 0.1
361
+ affine: a boolean value that when set to ``True``, this module has
362
+ learnable affine parameters. Default: ``True``
363
+ track_running_stats: a boolean value that when set to ``True``, this
364
+ module tracks the running mean and variance, and when set to ``False``,
365
+ this module does not track such statistics, and initializes statistics
366
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
367
+ When these buffers are ``None``, this module always uses batch statistics.
368
+ in both training and eval modes. Default: ``True``
369
+ """
370
+
371
+ cls_to_become = BatchNorm1d # type: ignore[assignment]
372
+
373
+ def _check_input_dim(self, input):
374
+ if input.dim() != 2 and input.dim() != 3:
375
+ raise ValueError(f"expected 2D or 3D input (got {input.dim()}D input)")
376
+
377
+
378
+ class BatchNorm2d(_BatchNorm):
379
+ r"""Applies Batch Normalization over a 4D input.
380
+
381
+ 4D is a mini-batch of 2D inputs
382
+ with additional channel dimension. Method described in the paper
383
+ `Batch Normalization: Accelerating Deep Network Training by Reducing
384
+ Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
385
+
386
+ .. math::
387
+
388
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
389
+
390
+ The mean and standard-deviation are calculated per-dimension over
391
+ the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
392
+ of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set
393
+ to 1 and the elements of :math:`\beta` are set to 0. At train time in the forward pass, the
394
+ standard-deviation is calculated via the biased estimator, equivalent to
395
+ ``torch.var(input, unbiased=False)``. However, the value stored in the moving average of the
396
+ standard-deviation is calculated via the unbiased estimator, equivalent to
397
+ ``torch.var(input, unbiased=True)``.
398
+
399
+ Also by default, during training this layer keeps running estimates of its
400
+ computed mean and variance, which are then used for normalization during
401
+ evaluation. The running estimates are kept with a default :attr:`momentum`
402
+ of 0.1.
403
+
404
+ If :attr:`track_running_stats` is set to ``False``, this layer then does not
405
+ keep running estimates, and batch statistics are instead used during
406
+ evaluation time as well.
407
+
408
+ .. note::
409
+ This :attr:`momentum` argument is different from one used in optimizer
410
+ classes and the conventional notion of momentum. Mathematically, the
411
+ update rule for running statistics here is
412
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
413
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
414
+ new observed value.
415
+
416
+ Because the Batch Normalization is done over the `C` dimension, computing statistics
417
+ on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization.
418
+
419
+ Args:
420
+ num_features: :math:`C` from an expected input of size
421
+ :math:`(N, C, H, W)`
422
+ eps: a value added to the denominator for numerical stability.
423
+ Default: 1e-5
424
+ momentum: the value used for the running_mean and running_var
425
+ computation. Can be set to ``None`` for cumulative moving average
426
+ (i.e. simple average). Default: 0.1
427
+ affine: a boolean value that when set to ``True``, this module has
428
+ learnable affine parameters. Default: ``True``
429
+ track_running_stats: a boolean value that when set to ``True``, this
430
+ module tracks the running mean and variance, and when set to ``False``,
431
+ this module does not track such statistics, and initializes statistics
432
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
433
+ When these buffers are ``None``, this module always uses batch statistics.
434
+ in both training and eval modes. Default: ``True``
435
+
436
+ Shape:
437
+ - Input: :math:`(N, C, H, W)`
438
+ - Output: :math:`(N, C, H, W)` (same shape as input)
439
+
440
+ Examples::
441
+
442
+ >>> # With Learnable Parameters
443
+ >>> m = nn.BatchNorm2d(100)
444
+ >>> # Without Learnable Parameters
445
+ >>> m = nn.BatchNorm2d(100, affine=False)
446
+ >>> input = torch.randn(20, 100, 35, 45)
447
+ >>> output = m(input)
448
+ """
449
+
450
+ def _check_input_dim(self, input):
451
+ if input.dim() != 4:
452
+ raise ValueError(f"expected 4D input (got {input.dim()}D input)")
453
+
454
+
455
+ class LazyBatchNorm2d(_LazyNormBase, _BatchNorm):
456
+ r"""A :class:`torch.nn.BatchNorm2d` module with lazy initialization.
457
+
458
+ Lazy initialization is done for the ``num_features`` argument of the :class:`BatchNorm2d` that is inferred
459
+ from the ``input.size(1)``.
460
+ The attributes that will be lazily initialized are `weight`, `bias`,
461
+ `running_mean` and `running_var`.
462
+
463
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
464
+ on lazy modules and their limitations.
465
+
466
+ Args:
467
+ eps: a value added to the denominator for numerical stability.
468
+ Default: 1e-5
469
+ momentum: the value used for the running_mean and running_var
470
+ computation. Can be set to ``None`` for cumulative moving average
471
+ (i.e. simple average). Default: 0.1
472
+ affine: a boolean value that when set to ``True``, this module has
473
+ learnable affine parameters. Default: ``True``
474
+ track_running_stats: a boolean value that when set to ``True``, this
475
+ module tracks the running mean and variance, and when set to ``False``,
476
+ this module does not track such statistics, and initializes statistics
477
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
478
+ When these buffers are ``None``, this module always uses batch statistics.
479
+ in both training and eval modes. Default: ``True``
480
+ """
481
+
482
+ cls_to_become = BatchNorm2d # type: ignore[assignment]
483
+
484
+ def _check_input_dim(self, input):
485
+ if input.dim() != 4:
486
+ raise ValueError(f"expected 4D input (got {input.dim()}D input)")
487
+
488
+
489
+ class BatchNorm3d(_BatchNorm):
490
+ r"""Applies Batch Normalization over a 5D input.
491
+
492
+ 5D is a mini-batch of 3D inputs with additional channel dimension as described in the paper
493
+ `Batch Normalization: Accelerating Deep Network Training by Reducing
494
+ Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
495
+
496
+ .. math::
497
+
498
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
499
+
500
+ The mean and standard-deviation are calculated per-dimension over
501
+ the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
502
+ of size `C` (where `C` is the input size). By default, the elements of :math:`\gamma` are set
503
+ to 1 and the elements of :math:`\beta` are set to 0. At train time in the forward pass, the
504
+ standard-deviation is calculated via the biased estimator, equivalent to
505
+ ``torch.var(input, unbiased=False)``. However, the value stored in the moving average of the
506
+ standard-deviation is calculated via the unbiased estimator, equivalent to
507
+ ``torch.var(input, unbiased=True)``.
508
+
509
+ Also by default, during training this layer keeps running estimates of its
510
+ computed mean and variance, which are then used for normalization during
511
+ evaluation. The running estimates are kept with a default :attr:`momentum`
512
+ of 0.1.
513
+
514
+ If :attr:`track_running_stats` is set to ``False``, this layer then does not
515
+ keep running estimates, and batch statistics are instead used during
516
+ evaluation time as well.
517
+
518
+ .. note::
519
+ This :attr:`momentum` argument is different from one used in optimizer
520
+ classes and the conventional notion of momentum. Mathematically, the
521
+ update rule for running statistics here is
522
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
523
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
524
+ new observed value.
525
+
526
+ Because the Batch Normalization is done over the `C` dimension, computing statistics
527
+ on `(N, D, H, W)` slices, it's common terminology to call this Volumetric Batch Normalization
528
+ or Spatio-temporal Batch Normalization.
529
+
530
+ Args:
531
+ num_features: :math:`C` from an expected input of size
532
+ :math:`(N, C, D, H, W)`
533
+ eps: a value added to the denominator for numerical stability.
534
+ Default: 1e-5
535
+ momentum: the value used for the running_mean and running_var
536
+ computation. Can be set to ``None`` for cumulative moving average
537
+ (i.e. simple average). Default: 0.1
538
+ affine: a boolean value that when set to ``True``, this module has
539
+ learnable affine parameters. Default: ``True``
540
+ track_running_stats: a boolean value that when set to ``True``, this
541
+ module tracks the running mean and variance, and when set to ``False``,
542
+ this module does not track such statistics, and initializes statistics
543
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
544
+ When these buffers are ``None``, this module always uses batch statistics.
545
+ in both training and eval modes. Default: ``True``
546
+
547
+ Shape:
548
+ - Input: :math:`(N, C, D, H, W)`
549
+ - Output: :math:`(N, C, D, H, W)` (same shape as input)
550
+
551
+ Examples::
552
+
553
+ >>> # With Learnable Parameters
554
+ >>> m = nn.BatchNorm3d(100)
555
+ >>> # Without Learnable Parameters
556
+ >>> m = nn.BatchNorm3d(100, affine=False)
557
+ >>> input = torch.randn(20, 100, 35, 45, 10)
558
+ >>> output = m(input)
559
+ """
560
+
561
+ def _check_input_dim(self, input):
562
+ if input.dim() != 5:
563
+ raise ValueError(f"expected 5D input (got {input.dim()}D input)")
564
+
565
+
566
+ class LazyBatchNorm3d(_LazyNormBase, _BatchNorm):
567
+ r"""A :class:`torch.nn.BatchNorm3d` module with lazy initialization.
568
+
569
+ Lazy initialization is done for the ``num_features`` argument of the :class:`BatchNorm3d` that is inferred
570
+ from the ``input.size(1)``.
571
+ The attributes that will be lazily initialized are `weight`, `bias`,
572
+ `running_mean` and `running_var`.
573
+
574
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
575
+ on lazy modules and their limitations.
576
+
577
+ Args:
578
+ eps: a value added to the denominator for numerical stability.
579
+ Default: 1e-5
580
+ momentum: the value used for the running_mean and running_var
581
+ computation. Can be set to ``None`` for cumulative moving average
582
+ (i.e. simple average). Default: 0.1
583
+ affine: a boolean value that when set to ``True``, this module has
584
+ learnable affine parameters. Default: ``True``
585
+ track_running_stats: a boolean value that when set to ``True``, this
586
+ module tracks the running mean and variance, and when set to ``False``,
587
+ this module does not track such statistics, and initializes statistics
588
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
589
+ When these buffers are ``None``, this module always uses batch statistics.
590
+ in both training and eval modes. Default: ``True``
591
+ """
592
+
593
+ cls_to_become = BatchNorm3d # type: ignore[assignment]
594
+
595
+ def _check_input_dim(self, input):
596
+ if input.dim() != 5:
597
+ raise ValueError(f"expected 5D input (got {input.dim()}D input)")
598
+
599
+
600
+ class SyncBatchNorm(_BatchNorm):
601
+ r"""Applies Batch Normalization over a N-Dimensional input.
602
+
603
+ The N-D input is a mini-batch of [N-2]D inputs with additional channel dimension) as described in the paper
604
+ `Batch Normalization: Accelerating Deep Network Training by Reducing
605
+ Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
606
+
607
+ .. math::
608
+
609
+ y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
610
+
611
+ The mean and standard-deviation are calculated per-dimension over all
612
+ mini-batches of the same process groups. :math:`\gamma` and :math:`\beta`
613
+ are learnable parameter vectors of size `C` (where `C` is the input size).
614
+ By default, the elements of :math:`\gamma` are sampled from
615
+ :math:`\mathcal{U}(0, 1)` and the elements of :math:`\beta` are set to 0.
616
+ The standard-deviation is calculated via the biased estimator, equivalent to
617
+ `torch.var(input, unbiased=False)`.
618
+
619
+ Also by default, during training this layer keeps running estimates of its
620
+ computed mean and variance, which are then used for normalization during
621
+ evaluation. The running estimates are kept with a default :attr:`momentum`
622
+ of 0.1.
623
+
624
+ If :attr:`track_running_stats` is set to ``False``, this layer then does not
625
+ keep running estimates, and batch statistics are instead used during
626
+ evaluation time as well.
627
+
628
+ .. note::
629
+ This :attr:`momentum` argument is different from one used in optimizer
630
+ classes and the conventional notion of momentum. Mathematically, the
631
+ update rule for running statistics here is
632
+ :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
633
+ where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
634
+ new observed value.
635
+
636
+ Because the Batch Normalization is done for each channel in the ``C`` dimension, computing
637
+ statistics on ``(N, +)`` slices, it's common terminology to call this Volumetric Batch
638
+ Normalization or Spatio-temporal Batch Normalization.
639
+
640
+ Currently :class:`SyncBatchNorm` only supports
641
+ :class:`~torch.nn.DistributedDataParallel` (DDP) with single GPU per process. Use
642
+ :meth:`torch.nn.SyncBatchNorm.convert_sync_batchnorm()` to convert
643
+ :attr:`BatchNorm*D` layer to :class:`SyncBatchNorm` before wrapping
644
+ Network with DDP.
645
+
646
+ Args:
647
+ num_features: :math:`C` from an expected input of size
648
+ :math:`(N, C, +)`
649
+ eps: a value added to the denominator for numerical stability.
650
+ Default: ``1e-5``
651
+ momentum: the value used for the running_mean and running_var
652
+ computation. Can be set to ``None`` for cumulative moving average
653
+ (i.e. simple average). Default: 0.1
654
+ affine: a boolean value that when set to ``True``, this module has
655
+ learnable affine parameters. Default: ``True``
656
+ track_running_stats: a boolean value that when set to ``True``, this
657
+ module tracks the running mean and variance, and when set to ``False``,
658
+ this module does not track such statistics, and initializes statistics
659
+ buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
660
+ When these buffers are ``None``, this module always uses batch statistics.
661
+ in both training and eval modes. Default: ``True``
662
+ process_group: synchronization of stats happen within each process group
663
+ individually. Default behavior is synchronization across the whole
664
+ world
665
+
666
+ Shape:
667
+ - Input: :math:`(N, C, +)`
668
+ - Output: :math:`(N, C, +)` (same shape as input)
669
+
670
+ .. note::
671
+ Synchronization of batchnorm statistics occurs only while training, i.e.
672
+ synchronization is disabled when ``model.eval()`` is set or if
673
+ ``self.training`` is otherwise ``False``.
674
+
675
+ Examples::
676
+
677
+ >>> # xdoctest: +SKIP
678
+ >>> # With Learnable Parameters
679
+ >>> m = nn.SyncBatchNorm(100)
680
+ >>> # creating process group (optional)
681
+ >>> # ranks is a list of int identifying rank ids.
682
+ >>> ranks = list(range(8))
683
+ >>> r1, r2 = ranks[:4], ranks[4:]
684
+ >>> # Note: every rank calls into new_group for every
685
+ >>> # process group created, even if that rank is not
686
+ >>> # part of the group.
687
+ >>> process_groups = [torch.distributed.new_group(pids) for pids in [r1, r2]]
688
+ >>> process_group = process_groups[0 if dist.get_rank() <= 3 else 1]
689
+ >>> # Without Learnable Parameters
690
+ >>> m = nn.BatchNorm3d(100, affine=False, process_group=process_group)
691
+ >>> input = torch.randn(20, 100, 35, 45, 10)
692
+ >>> output = m(input)
693
+
694
+ >>> # network is nn.BatchNorm layer
695
+ >>> sync_bn_network = nn.SyncBatchNorm.convert_sync_batchnorm(network, process_group)
696
+ >>> # only single gpu per process is currently supported
697
+ >>> ddp_sync_bn_network = torch.nn.parallel.DistributedDataParallel(
698
+ >>> sync_bn_network,
699
+ >>> device_ids=[args.local_rank],
700
+ >>> output_device=args.local_rank)
701
+ """
702
+
703
+ def __init__(
704
+ self,
705
+ num_features: int,
706
+ eps: float = 1e-5,
707
+ momentum: Optional[float] = 0.1,
708
+ affine: bool = True,
709
+ track_running_stats: bool = True,
710
+ process_group: Optional[Any] = None,
711
+ device=None,
712
+ dtype=None,
713
+ ) -> None:
714
+ factory_kwargs = {"device": device, "dtype": dtype}
715
+ super().__init__(
716
+ num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
717
+ )
718
+ self.process_group = process_group
719
+
720
+ def _check_input_dim(self, input):
721
+ if input.dim() < 2:
722
+ raise ValueError(f"expected at least 2D input (got {input.dim()}D input)")
723
+
724
+ def _check_non_zero_input_channels(self, input):
725
+ if input.size(1) == 0:
726
+ raise ValueError(
727
+ "SyncBatchNorm number of input channels should be non-zero"
728
+ )
729
+
730
+ def forward(self, input: Tensor) -> Tensor:
731
+ self._check_input_dim(input)
732
+ self._check_non_zero_input_channels(input)
733
+
734
+ # exponential_average_factor is set to self.momentum
735
+ # (when it is available) only so that it gets updated
736
+ # in ONNX graph when this node is exported to ONNX.
737
+ if self.momentum is None:
738
+ exponential_average_factor = 0.0
739
+ else:
740
+ exponential_average_factor = self.momentum
741
+
742
+ if self.training and self.track_running_stats:
743
+ assert self.num_batches_tracked is not None
744
+ self.num_batches_tracked.add_(1)
745
+ if self.momentum is None: # use cumulative moving average
746
+ exponential_average_factor = 1.0 / self.num_batches_tracked.item()
747
+ else: # use exponential moving average
748
+ exponential_average_factor = self.momentum
749
+
750
+ r"""
751
+ Decide whether the mini-batch stats should be used for normalization rather than the buffers.
752
+ Mini-batch stats are used in training mode, and in eval mode when buffers are None.
753
+ """
754
+ if self.training:
755
+ bn_training = True
756
+ else:
757
+ bn_training = (self.running_mean is None) and (self.running_var is None)
758
+
759
+ r"""
760
+ Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
761
+ passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
762
+ used for normalization (i.e. in eval mode when buffers are not None).
763
+ """
764
+ # If buffers are not to be tracked, ensure that they won't be updated
765
+ running_mean = (
766
+ self.running_mean if not self.training or self.track_running_stats else None
767
+ )
768
+ running_var = (
769
+ self.running_var if not self.training or self.track_running_stats else None
770
+ )
771
+
772
+ # Don't sync batchnorm stats in inference mode (model.eval()).
773
+ need_sync = (
774
+ bn_training
775
+ and self.training
776
+ and torch.distributed.is_available()
777
+ and torch.distributed.is_initialized()
778
+ )
779
+ if need_sync:
780
+ # currently only GPU/PrivateUse1 input is supported
781
+ if input.device.type not in [
782
+ "cuda",
783
+ torch._C._get_privateuse1_backend_name(),
784
+ ]:
785
+ raise ValueError(
786
+ "SyncBatchNorm expected input tensor to be on GPU or "
787
+ f"{torch._C._get_privateuse1_backend_name()}"
788
+ )
789
+
790
+ process_group = torch.distributed.group.WORLD
791
+ if self.process_group:
792
+ process_group = self.process_group
793
+ world_size = torch.distributed.get_world_size(process_group)
794
+ need_sync = world_size > 1
795
+
796
+ # fallback to framework BN when synchronization is not necessary
797
+ if not need_sync:
798
+ return F.batch_norm(
799
+ input,
800
+ running_mean,
801
+ running_var,
802
+ self.weight,
803
+ self.bias,
804
+ bn_training,
805
+ exponential_average_factor,
806
+ self.eps,
807
+ )
808
+ else:
809
+ assert bn_training
810
+ return sync_batch_norm.apply(
811
+ input,
812
+ self.weight,
813
+ self.bias,
814
+ running_mean,
815
+ running_var,
816
+ self.eps,
817
+ exponential_average_factor,
818
+ process_group, # type: ignore[possibly-undefined]
819
+ world_size, # type: ignore[possibly-undefined]
820
+ )
821
+
822
+ @classmethod
823
+ def convert_sync_batchnorm(cls, module, process_group=None):
824
+ r"""Converts all :attr:`BatchNorm*D` layers in the model to :class:`torch.nn.SyncBatchNorm` layers.
825
+
826
+ Args:
827
+ module (nn.Module): module containing one or more :attr:`BatchNorm*D` layers
828
+ process_group (optional): process group to scope synchronization,
829
+ default is the whole world
830
+
831
+ Returns:
832
+ The original :attr:`module` with the converted :class:`torch.nn.SyncBatchNorm`
833
+ layers. If the original :attr:`module` is a :attr:`BatchNorm*D` layer,
834
+ a new :class:`torch.nn.SyncBatchNorm` layer object will be returned
835
+ instead.
836
+
837
+ Example::
838
+
839
+ >>> # Network with nn.BatchNorm layer
840
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
841
+ >>> module = torch.nn.Sequential(
842
+ >>> torch.nn.Linear(20, 100),
843
+ >>> torch.nn.BatchNorm1d(100),
844
+ >>> ).cuda()
845
+ >>> # creating process group (optional)
846
+ >>> # ranks is a list of int identifying rank ids.
847
+ >>> ranks = list(range(8))
848
+ >>> r1, r2 = ranks[:4], ranks[4:]
849
+ >>> # Note: every rank calls into new_group for every
850
+ >>> # process group created, even if that rank is not
851
+ >>> # part of the group.
852
+ >>> # xdoctest: +SKIP("distributed")
853
+ >>> process_groups = [torch.distributed.new_group(pids) for pids in [r1, r2]]
854
+ >>> process_group = process_groups[0 if dist.get_rank() <= 3 else 1]
855
+ >>> sync_bn_module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
856
+
857
+ """
858
+ module_output = module
859
+ if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
860
+ module_output = torch.nn.SyncBatchNorm(
861
+ module.num_features,
862
+ module.eps,
863
+ module.momentum,
864
+ module.affine,
865
+ module.track_running_stats,
866
+ process_group,
867
+ )
868
+ if module.affine:
869
+ with torch.no_grad():
870
+ module_output.weight = module.weight
871
+ module_output.bias = module.bias
872
+ module_output.running_mean = module.running_mean
873
+ module_output.running_var = module.running_var
874
+ module_output.num_batches_tracked = module.num_batches_tracked
875
+ module_output.training = module.training
876
+ if hasattr(module, "qconfig"):
877
+ module_output.qconfig = module.qconfig
878
+ for name, child in module.named_children():
879
+ module_output.add_module(
880
+ name, cls.convert_sync_batchnorm(child, process_group)
881
+ )
882
+ del module
883
+ return module_output
janus/lib/python3.10/site-packages/torch/nn/modules/container.py ADDED
@@ -0,0 +1,976 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-decorators
2
+ # mypy: allow-untyped-defs
3
+ import operator
4
+ from collections import abc as container_abcs, OrderedDict
5
+ from itertools import chain, islice
6
+ from typing import (
7
+ Any,
8
+ Dict,
9
+ Iterable,
10
+ Iterator,
11
+ Mapping,
12
+ Optional,
13
+ overload,
14
+ Tuple,
15
+ TypeVar,
16
+ Union,
17
+ )
18
+ from typing_extensions import deprecated, Self
19
+
20
+ import torch
21
+ from torch._jit_internal import _copy_to_script_wrapper
22
+ from torch.nn.parameter import Parameter
23
+
24
+ from .module import Module
25
+
26
+
27
+ __all__ = [
28
+ "Container",
29
+ "Sequential",
30
+ "ModuleList",
31
+ "ModuleDict",
32
+ "ParameterList",
33
+ "ParameterDict",
34
+ ]
35
+
36
+ T = TypeVar("T", bound=Module)
37
+
38
+
39
+ # Copied from torch.nn.modules.module, required for a custom __repr__ for ModuleList
40
+ def _addindent(s_, numSpaces):
41
+ s = s_.split("\n")
42
+ # don't do anything for single-line stuff
43
+ if len(s) == 1:
44
+ return s_
45
+ first = s.pop(0)
46
+ s = [(numSpaces * " ") + line for line in s]
47
+ s = "\n".join(s)
48
+ s = first + "\n" + s
49
+ return s
50
+
51
+
52
+ @deprecated(
53
+ "`nn.Container` is deprecated. "
54
+ "All of it's functionality is now implemented in `nn.Module`. Subclass that instead.",
55
+ category=FutureWarning,
56
+ )
57
+ class Container(Module):
58
+ def __init__(self, **kwargs: Any) -> None:
59
+ super().__init__()
60
+ for key, value in kwargs.items():
61
+ self.add_module(key, value)
62
+
63
+
64
+ class Sequential(Module):
65
+ r"""A sequential container.
66
+
67
+ Modules will be added to it in the order they are passed in the
68
+ constructor. Alternatively, an ``OrderedDict`` of modules can be
69
+ passed in. The ``forward()`` method of ``Sequential`` accepts any
70
+ input and forwards it to the first module it contains. It then
71
+ "chains" outputs to inputs sequentially for each subsequent module,
72
+ finally returning the output of the last module.
73
+
74
+ The value a ``Sequential`` provides over manually calling a sequence
75
+ of modules is that it allows treating the whole container as a
76
+ single module, such that performing a transformation on the
77
+ ``Sequential`` applies to each of the modules it stores (which are
78
+ each a registered submodule of the ``Sequential``).
79
+
80
+ What's the difference between a ``Sequential`` and a
81
+ :class:`torch.nn.ModuleList`? A ``ModuleList`` is exactly what it
82
+ sounds like--a list for storing ``Module`` s! On the other hand,
83
+ the layers in a ``Sequential`` are connected in a cascading way.
84
+
85
+ Example::
86
+
87
+ # Using Sequential to create a small model. When `model` is run,
88
+ # input will first be passed to `Conv2d(1,20,5)`. The output of
89
+ # `Conv2d(1,20,5)` will be used as the input to the first
90
+ # `ReLU`; the output of the first `ReLU` will become the input
91
+ # for `Conv2d(20,64,5)`. Finally, the output of
92
+ # `Conv2d(20,64,5)` will be used as input to the second `ReLU`
93
+ model = nn.Sequential(
94
+ nn.Conv2d(1,20,5),
95
+ nn.ReLU(),
96
+ nn.Conv2d(20,64,5),
97
+ nn.ReLU()
98
+ )
99
+
100
+ # Using Sequential with OrderedDict. This is functionally the
101
+ # same as the above code
102
+ model = nn.Sequential(OrderedDict([
103
+ ('conv1', nn.Conv2d(1,20,5)),
104
+ ('relu1', nn.ReLU()),
105
+ ('conv2', nn.Conv2d(20,64,5)),
106
+ ('relu2', nn.ReLU())
107
+ ]))
108
+ """
109
+
110
+ _modules: Dict[str, Module] # type: ignore[assignment]
111
+
112
+ @overload
113
+ def __init__(self, *args: Module) -> None:
114
+ ...
115
+
116
+ @overload
117
+ def __init__(self, arg: "OrderedDict[str, Module]") -> None:
118
+ ...
119
+
120
+ def __init__(self, *args):
121
+ super().__init__()
122
+ if len(args) == 1 and isinstance(args[0], OrderedDict):
123
+ for key, module in args[0].items():
124
+ self.add_module(key, module)
125
+ else:
126
+ for idx, module in enumerate(args):
127
+ self.add_module(str(idx), module)
128
+
129
+ def _get_item_by_idx(self, iterator, idx) -> T: # type: ignore[misc, type-var]
130
+ """Get the idx-th item of the iterator."""
131
+ size = len(self)
132
+ idx = operator.index(idx)
133
+ if not -size <= idx < size:
134
+ raise IndexError(f"index {idx} is out of range")
135
+ idx %= size
136
+ return next(islice(iterator, idx, None))
137
+
138
+ @_copy_to_script_wrapper
139
+ def __getitem__(self, idx: Union[slice, int]) -> Union["Sequential", T]:
140
+ if isinstance(idx, slice):
141
+ return self.__class__(OrderedDict(list(self._modules.items())[idx]))
142
+ else:
143
+ return self._get_item_by_idx(self._modules.values(), idx)
144
+
145
+ def __setitem__(self, idx: int, module: Module) -> None:
146
+ key: str = self._get_item_by_idx(self._modules.keys(), idx)
147
+ return setattr(self, key, module)
148
+
149
+ def __delitem__(self, idx: Union[slice, int]) -> None:
150
+ if isinstance(idx, slice):
151
+ for key in list(self._modules.keys())[idx]:
152
+ delattr(self, key)
153
+ else:
154
+ key = self._get_item_by_idx(self._modules.keys(), idx)
155
+ delattr(self, key)
156
+ # To preserve numbering
157
+ str_indices = [str(i) for i in range(len(self._modules))]
158
+ self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
159
+
160
+ @_copy_to_script_wrapper
161
+ def __len__(self) -> int:
162
+ return len(self._modules)
163
+
164
+ def __add__(self, other) -> "Sequential":
165
+ if isinstance(other, Sequential):
166
+ ret = Sequential()
167
+ for layer in self:
168
+ ret.append(layer)
169
+ for layer in other:
170
+ ret.append(layer)
171
+ return ret
172
+ else:
173
+ raise ValueError(
174
+ "add operator supports only objects "
175
+ f"of Sequential class, but {str(type(other))} is given."
176
+ )
177
+
178
+ def pop(self, key: Union[int, slice]) -> Module:
179
+ v = self[key]
180
+ del self[key]
181
+ return v
182
+
183
+ def __iadd__(self, other) -> Self:
184
+ if isinstance(other, Sequential):
185
+ offset = len(self)
186
+ for i, module in enumerate(other):
187
+ self.add_module(str(i + offset), module)
188
+ return self
189
+ else:
190
+ raise ValueError(
191
+ "add operator supports only objects "
192
+ f"of Sequential class, but {str(type(other))} is given."
193
+ )
194
+
195
+ def __mul__(self, other: int) -> "Sequential":
196
+ if not isinstance(other, int):
197
+ raise TypeError(
198
+ f"unsupported operand type(s) for *: {type(self)} and {type(other)}"
199
+ )
200
+ elif other <= 0:
201
+ raise ValueError(
202
+ f"Non-positive multiplication factor {other} for {type(self)}"
203
+ )
204
+ else:
205
+ combined = Sequential()
206
+ offset = 0
207
+ for _ in range(other):
208
+ for module in self:
209
+ combined.add_module(str(offset), module)
210
+ offset += 1
211
+ return combined
212
+
213
+ def __rmul__(self, other: int) -> "Sequential":
214
+ return self.__mul__(other)
215
+
216
+ def __imul__(self, other: int) -> Self:
217
+ if not isinstance(other, int):
218
+ raise TypeError(
219
+ f"unsupported operand type(s) for *: {type(self)} and {type(other)}"
220
+ )
221
+ elif other <= 0:
222
+ raise ValueError(
223
+ f"Non-positive multiplication factor {other} for {type(self)}"
224
+ )
225
+ else:
226
+ len_original = len(self)
227
+ offset = len(self)
228
+ for _ in range(other - 1):
229
+ for i in range(len_original):
230
+ self.add_module(str(i + offset), self._modules[str(i)])
231
+ offset += len_original
232
+ return self
233
+
234
+ @_copy_to_script_wrapper
235
+ def __dir__(self):
236
+ keys = super().__dir__()
237
+ keys = [key for key in keys if not key.isdigit()]
238
+ return keys
239
+
240
+ @_copy_to_script_wrapper
241
+ def __iter__(self) -> Iterator[Module]:
242
+ return iter(self._modules.values())
243
+
244
+ # NB: We can't really type check this function as the type of input
245
+ # may change dynamically (as is tested in
246
+ # TestScript.test_sequential_intermediary_types). Cannot annotate
247
+ # with Any as TorchScript expects a more precise type
248
+ def forward(self, input):
249
+ for module in self:
250
+ input = module(input)
251
+ return input
252
+
253
+ def append(self, module: Module) -> "Sequential":
254
+ r"""Append a given module to the end.
255
+
256
+ Args:
257
+ module (nn.Module): module to append
258
+ """
259
+ self.add_module(str(len(self)), module)
260
+ return self
261
+
262
+ def insert(self, index: int, module: Module) -> "Sequential":
263
+ if not isinstance(module, Module):
264
+ raise AssertionError(f"module should be of type: {Module}")
265
+ n = len(self._modules)
266
+ if not (-n <= index <= n):
267
+ raise IndexError(f"Index out of range: {index}")
268
+ if index < 0:
269
+ index += n
270
+ for i in range(n, index, -1):
271
+ self._modules[str(i)] = self._modules[str(i - 1)]
272
+ self._modules[str(index)] = module
273
+ return self
274
+
275
+ def extend(self, sequential) -> "Sequential":
276
+ for layer in sequential:
277
+ self.append(layer)
278
+ return self
279
+
280
+
281
+ class ModuleList(Module):
282
+ r"""Holds submodules in a list.
283
+
284
+ :class:`~torch.nn.ModuleList` can be indexed like a regular Python list, but
285
+ modules it contains are properly registered, and will be visible by all
286
+ :class:`~torch.nn.Module` methods.
287
+
288
+ Args:
289
+ modules (iterable, optional): an iterable of modules to add
290
+
291
+ Example::
292
+
293
+ class MyModule(nn.Module):
294
+ def __init__(self) -> None:
295
+ super().__init__()
296
+ self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)])
297
+
298
+ def forward(self, x):
299
+ # ModuleList can act as an iterable, or be indexed using ints
300
+ for i, l in enumerate(self.linears):
301
+ x = self.linears[i // 2](x) + l(x)
302
+ return x
303
+ """
304
+
305
+ _modules: Dict[str, Module] # type: ignore[assignment]
306
+
307
+ def __init__(self, modules: Optional[Iterable[Module]] = None) -> None:
308
+ super().__init__()
309
+ if modules is not None:
310
+ self += modules
311
+
312
+ def _get_abs_string_index(self, idx):
313
+ """Get the absolute index for the list of modules."""
314
+ idx = operator.index(idx)
315
+ if not (-len(self) <= idx < len(self)):
316
+ raise IndexError(f"index {idx} is out of range")
317
+ if idx < 0:
318
+ idx += len(self)
319
+ return str(idx)
320
+
321
+ @overload
322
+ def __getitem__(self, idx: slice) -> "ModuleList":
323
+ ...
324
+
325
+ @overload
326
+ def __getitem__(self, idx: int) -> Module:
327
+ ...
328
+
329
+ @_copy_to_script_wrapper
330
+ def __getitem__(self, idx: Union[int, slice]) -> Union[Module, "ModuleList"]:
331
+ if isinstance(idx, slice):
332
+ return self.__class__(list(self._modules.values())[idx])
333
+ else:
334
+ return self._modules[self._get_abs_string_index(idx)]
335
+
336
+ def __setitem__(self, idx: int, module: Module) -> None:
337
+ idx = self._get_abs_string_index(idx)
338
+ return setattr(self, str(idx), module)
339
+
340
+ def __delitem__(self, idx: Union[int, slice]) -> None:
341
+ if isinstance(idx, slice):
342
+ for k in range(len(self._modules))[idx]:
343
+ delattr(self, str(k))
344
+ else:
345
+ delattr(self, self._get_abs_string_index(idx))
346
+ # To preserve numbering, self._modules is being reconstructed with modules after deletion
347
+ str_indices = [str(i) for i in range(len(self._modules))]
348
+ self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
349
+
350
+ @_copy_to_script_wrapper
351
+ def __len__(self) -> int:
352
+ return len(self._modules)
353
+
354
+ @_copy_to_script_wrapper
355
+ def __iter__(self) -> Iterator[Module]:
356
+ return iter(self._modules.values())
357
+
358
+ def __iadd__(self, modules: Iterable[Module]) -> Self:
359
+ return self.extend(modules)
360
+
361
+ def __add__(self, other: Iterable[Module]) -> "ModuleList":
362
+ combined = ModuleList()
363
+ for i, module in enumerate(chain(self, other)):
364
+ combined.add_module(str(i), module)
365
+ return combined
366
+
367
+ def __repr__(self):
368
+ """Return a custom repr for ModuleList that compresses repeated module representations."""
369
+ list_of_reprs = [repr(item) for item in self]
370
+ if len(list_of_reprs) == 0:
371
+ return self._get_name() + "()"
372
+
373
+ start_end_indices = [[0, 0]]
374
+ repeated_blocks = [list_of_reprs[0]]
375
+ for i, r in enumerate(list_of_reprs[1:], 1):
376
+ if r == repeated_blocks[-1]:
377
+ start_end_indices[-1][1] += 1
378
+ continue
379
+
380
+ start_end_indices.append([i, i])
381
+ repeated_blocks.append(r)
382
+
383
+ lines = []
384
+ main_str = self._get_name() + "("
385
+ for (start_id, end_id), b in zip(start_end_indices, repeated_blocks):
386
+ local_repr = f"({start_id}): {b}" # default repr
387
+
388
+ if start_id != end_id:
389
+ n = end_id - start_id + 1
390
+ local_repr = f"({start_id}-{end_id}): {n} x {b}"
391
+
392
+ local_repr = _addindent(local_repr, 2)
393
+ lines.append(local_repr)
394
+
395
+ main_str += "\n " + "\n ".join(lines) + "\n"
396
+ main_str += ")"
397
+ return main_str
398
+
399
+ @_copy_to_script_wrapper
400
+ def __dir__(self):
401
+ keys = super().__dir__()
402
+ keys = [key for key in keys if not key.isdigit()]
403
+ return keys
404
+
405
+ def insert(self, index: int, module: Module) -> None:
406
+ r"""Insert a given module before a given index in the list.
407
+
408
+ Args:
409
+ index (int): index to insert.
410
+ module (nn.Module): module to insert
411
+ """
412
+ for i in range(len(self._modules), index, -1):
413
+ self._modules[str(i)] = self._modules[str(i - 1)]
414
+ self._modules[str(index)] = module
415
+
416
+ def append(self, module: Module) -> "ModuleList":
417
+ r"""Append a given module to the end of the list.
418
+
419
+ Args:
420
+ module (nn.Module): module to append
421
+ """
422
+ self.add_module(str(len(self)), module)
423
+ return self
424
+
425
+ def pop(self, key: Union[int, slice]) -> Module:
426
+ v = self[key]
427
+ del self[key]
428
+ return v
429
+
430
+ def extend(self, modules: Iterable[Module]) -> Self:
431
+ r"""Append modules from a Python iterable to the end of the list.
432
+
433
+ Args:
434
+ modules (iterable): iterable of modules to append
435
+ """
436
+ if not isinstance(modules, container_abcs.Iterable):
437
+ raise TypeError(
438
+ "ModuleList.extend should be called with an "
439
+ "iterable, but got " + type(modules).__name__
440
+ )
441
+ offset = len(self)
442
+ for i, module in enumerate(modules):
443
+ self.add_module(str(offset + i), module)
444
+ return self
445
+
446
+ # remove forward alltogether to fallback on Module's _forward_unimplemented
447
+
448
+
449
+ class ModuleDict(Module):
450
+ r"""Holds submodules in a dictionary.
451
+
452
+ :class:`~torch.nn.ModuleDict` can be indexed like a regular Python dictionary,
453
+ but modules it contains are properly registered, and will be visible by all
454
+ :class:`~torch.nn.Module` methods.
455
+
456
+ :class:`~torch.nn.ModuleDict` is an **ordered** dictionary that respects
457
+
458
+ * the order of insertion, and
459
+
460
+ * in :meth:`~torch.nn.ModuleDict.update`, the order of the merged
461
+ ``OrderedDict``, ``dict`` (started from Python 3.6) or another
462
+ :class:`~torch.nn.ModuleDict` (the argument to
463
+ :meth:`~torch.nn.ModuleDict.update`).
464
+
465
+ Note that :meth:`~torch.nn.ModuleDict.update` with other unordered mapping
466
+ types (e.g., Python's plain ``dict`` before Python version 3.6) does not
467
+ preserve the order of the merged mapping.
468
+
469
+ Args:
470
+ modules (iterable, optional): a mapping (dictionary) of (string: module)
471
+ or an iterable of key-value pairs of type (string, module)
472
+
473
+ Example::
474
+
475
+ class MyModule(nn.Module):
476
+ def __init__(self) -> None:
477
+ super().__init__()
478
+ self.choices = nn.ModuleDict({
479
+ 'conv': nn.Conv2d(10, 10, 3),
480
+ 'pool': nn.MaxPool2d(3)
481
+ })
482
+ self.activations = nn.ModuleDict([
483
+ ['lrelu', nn.LeakyReLU()],
484
+ ['prelu', nn.PReLU()]
485
+ ])
486
+
487
+ def forward(self, x, choice, act):
488
+ x = self.choices[choice](x)
489
+ x = self.activations[act](x)
490
+ return x
491
+ """
492
+
493
+ _modules: Dict[str, Module] # type: ignore[assignment]
494
+
495
+ def __init__(self, modules: Optional[Mapping[str, Module]] = None) -> None:
496
+ super().__init__()
497
+ if modules is not None:
498
+ self.update(modules)
499
+
500
+ @_copy_to_script_wrapper
501
+ def __getitem__(self, key: str) -> Module:
502
+ return self._modules[key]
503
+
504
+ def __setitem__(self, key: str, module: Module) -> None:
505
+ self.add_module(key, module)
506
+
507
+ def __delitem__(self, key: str) -> None:
508
+ del self._modules[key]
509
+
510
+ @_copy_to_script_wrapper
511
+ def __len__(self) -> int:
512
+ return len(self._modules)
513
+
514
+ @_copy_to_script_wrapper
515
+ def __iter__(self) -> Iterator[str]:
516
+ return iter(self._modules)
517
+
518
+ @_copy_to_script_wrapper
519
+ def __contains__(self, key: str) -> bool:
520
+ return key in self._modules
521
+
522
+ def clear(self) -> None:
523
+ """Remove all items from the ModuleDict."""
524
+ self._modules.clear()
525
+
526
+ def pop(self, key: str) -> Module:
527
+ r"""Remove key from the ModuleDict and return its module.
528
+
529
+ Args:
530
+ key (str): key to pop from the ModuleDict
531
+ """
532
+ v = self[key]
533
+ del self[key]
534
+ return v
535
+
536
+ @_copy_to_script_wrapper
537
+ def keys(self) -> Iterable[str]:
538
+ r"""Return an iterable of the ModuleDict keys."""
539
+ return self._modules.keys()
540
+
541
+ @_copy_to_script_wrapper
542
+ def items(self) -> Iterable[Tuple[str, Module]]:
543
+ r"""Return an iterable of the ModuleDict key/value pairs."""
544
+ return self._modules.items()
545
+
546
+ @_copy_to_script_wrapper
547
+ def values(self) -> Iterable[Module]:
548
+ r"""Return an iterable of the ModuleDict values."""
549
+ return self._modules.values()
550
+
551
+ def update(self, modules: Mapping[str, Module]) -> None:
552
+ r"""Update the :class:`~torch.nn.ModuleDict` with key-value pairs from a mapping, overwriting existing keys.
553
+
554
+ .. note::
555
+ If :attr:`modules` is an ``OrderedDict``, a :class:`~torch.nn.ModuleDict`, or
556
+ an iterable of key-value pairs, the order of new elements in it is preserved.
557
+
558
+ Args:
559
+ modules (iterable): a mapping (dictionary) from string to :class:`~torch.nn.Module`,
560
+ or an iterable of key-value pairs of type (string, :class:`~torch.nn.Module`)
561
+ """
562
+ if not isinstance(modules, container_abcs.Iterable):
563
+ raise TypeError(
564
+ "ModuleDict.update should be called with an "
565
+ "iterable of key/value pairs, but got " + type(modules).__name__
566
+ )
567
+
568
+ if isinstance(modules, (OrderedDict, ModuleDict, container_abcs.Mapping)):
569
+ for key, module in modules.items():
570
+ self[key] = module
571
+ else:
572
+ # modules here can be a list with two items
573
+ for j, m in enumerate(modules):
574
+ if not isinstance(m, container_abcs.Iterable):
575
+ raise TypeError(
576
+ "ModuleDict update sequence element "
577
+ "#" + str(j) + " should be Iterable; is" + type(m).__name__
578
+ )
579
+ if not len(m) == 2:
580
+ raise ValueError(
581
+ "ModuleDict update sequence element "
582
+ "#" + str(j) + " has length " + str(len(m)) + "; 2 is required"
583
+ )
584
+ # modules can be Mapping (what it's typed at), or a list: [(name1, module1), (name2, module2)]
585
+ # that's too cumbersome to type correctly with overloads, so we add an ignore here
586
+ self[m[0]] = m[1] # type: ignore[assignment]
587
+
588
+ # remove forward alltogether to fallback on Module's _forward_unimplemented
589
+
590
+
591
+ class ParameterList(Module):
592
+ r"""Holds parameters in a list.
593
+
594
+ :class:`~torch.nn.ParameterList` can be used like a regular Python
595
+ list, but Tensors that are :class:`~torch.nn.Parameter` are properly registered,
596
+ and will be visible by all :class:`~torch.nn.Module` methods.
597
+
598
+ Note that the constructor, assigning an element of the list, the
599
+ :meth:`~torch.nn.ParameterList.append` method and the :meth:`~torch.nn.ParameterList.extend`
600
+ method will convert any :class:`~torch.Tensor` into :class:`~torch.nn.Parameter`.
601
+
602
+ Args:
603
+ parameters (iterable, optional): an iterable of elements to add to the list.
604
+
605
+ Example::
606
+
607
+ class MyModule(nn.Module):
608
+ def __init__(self) -> None:
609
+ super().__init__()
610
+ self.params = nn.ParameterList([nn.Parameter(torch.randn(10, 10)) for i in range(10)])
611
+
612
+ def forward(self, x):
613
+ # ParameterList can act as an iterable, or be indexed using ints
614
+ for i, p in enumerate(self.params):
615
+ x = self.params[i // 2].mm(x) + p.mm(x)
616
+ return x
617
+ """
618
+
619
+ def __init__(self, values: Optional[Iterable[Any]] = None) -> None:
620
+ super().__init__()
621
+ self._size = 0
622
+ if values is not None:
623
+ self += values
624
+
625
+ def _get_abs_string_index(self, idx):
626
+ """Get the absolute index for the list of modules."""
627
+ idx = operator.index(idx)
628
+ if not (-len(self) <= idx < len(self)):
629
+ raise IndexError(f"index {idx} is out of range")
630
+ if idx < 0:
631
+ idx += len(self)
632
+ return str(idx)
633
+
634
+ @overload
635
+ def __getitem__(self, idx: int) -> Any:
636
+ ...
637
+
638
+ @overload
639
+ def __getitem__(self: T, idx: slice) -> T:
640
+ ...
641
+
642
+ def __getitem__(self, idx):
643
+ if isinstance(idx, slice):
644
+ start, stop, step = idx.indices(len(self))
645
+ out = self.__class__()
646
+ for i in range(start, stop, step):
647
+ out.append(self[i])
648
+ return out
649
+ else:
650
+ idx = self._get_abs_string_index(idx)
651
+ return getattr(self, str(idx))
652
+
653
+ def __setitem__(self, idx: int, param: Any) -> None:
654
+ # Note that all other function that add an entry to the list part of
655
+ # the ParameterList end up here. So this is the only place where we need
656
+ # to wrap things into Parameter if needed.
657
+ # Objects added via setattr() are not in the list part and thus won't
658
+ # call into this function.
659
+ idx = self._get_abs_string_index(idx)
660
+ if isinstance(param, torch.Tensor) and not isinstance(param, Parameter):
661
+ param = Parameter(param)
662
+ return setattr(self, str(idx), param)
663
+
664
+ def __len__(self) -> int:
665
+ return self._size
666
+
667
+ def __iter__(self) -> Iterator[Any]:
668
+ return iter(self[i] for i in range(len(self)))
669
+
670
+ def __iadd__(self, parameters: Iterable[Any]) -> Self:
671
+ return self.extend(parameters)
672
+
673
+ def __dir__(self):
674
+ keys = super().__dir__()
675
+ keys = [key for key in keys if not key.isdigit()]
676
+ return keys
677
+
678
+ def append(self, value: Any) -> "ParameterList":
679
+ """Append a given value at the end of the list.
680
+
681
+ Args:
682
+ value (Any): value to append
683
+ """
684
+ new_idx = len(self)
685
+ self._size += 1
686
+ self[new_idx] = value
687
+ return self
688
+
689
+ def extend(self, values: Iterable[Any]) -> Self:
690
+ """Append values from a Python iterable to the end of the list.
691
+
692
+ Args:
693
+ values (iterable): iterable of values to append
694
+ """
695
+ # Tensor is an iterable but we never want to unpack it here
696
+ if not isinstance(values, container_abcs.Iterable) or isinstance(
697
+ values, torch.Tensor
698
+ ):
699
+ raise TypeError(
700
+ "ParameterList.extend should be called with an "
701
+ "iterable, but got " + type(values).__name__
702
+ )
703
+ for value in values:
704
+ self.append(value)
705
+ return self
706
+
707
+ def extra_repr(self) -> str:
708
+ child_lines = []
709
+ for k, p in enumerate(self):
710
+ if isinstance(p, torch.Tensor):
711
+ size_str = "x".join(str(size) for size in p.size())
712
+ if p.device.type in ["cuda", torch._C._get_privateuse1_backend_name()]:
713
+ device_str = f" ({p.device})"
714
+ else:
715
+ device_str = ""
716
+ parastr = "{} containing: [{} of size {}{}]".format(
717
+ "Parameter" if isinstance(p, Parameter) else "Tensor",
718
+ p.dtype,
719
+ size_str,
720
+ device_str,
721
+ )
722
+ child_lines.append(" (" + str(k) + "): " + parastr)
723
+ else:
724
+ child_lines.append(
725
+ " (" + str(k) + "): Object of type: " + type(p).__name__
726
+ )
727
+
728
+ tmpstr = "\n".join(child_lines)
729
+ return tmpstr
730
+
731
+ def __call__(self, *args, **kwargs):
732
+ raise RuntimeError("ParameterList should not be called.")
733
+
734
+
735
+ class ParameterDict(Module):
736
+ r"""Holds parameters in a dictionary.
737
+
738
+ ParameterDict can be indexed like a regular Python dictionary, but Parameters it
739
+ contains are properly registered, and will be visible by all Module methods.
740
+ Other objects are treated as would be done by a regular Python dictionary
741
+
742
+ :class:`~torch.nn.ParameterDict` is an **ordered** dictionary.
743
+ :meth:`~torch.nn.ParameterDict.update` with other unordered mapping
744
+ types (e.g., Python's plain ``dict``) does not preserve the order of the
745
+ merged mapping. On the other hand, ``OrderedDict`` or another :class:`~torch.nn.ParameterDict`
746
+ will preserve their ordering.
747
+
748
+ Note that the constructor, assigning an element of the dictionary and the
749
+ :meth:`~torch.nn.ParameterDict.update` method will convert any :class:`~torch.Tensor` into
750
+ :class:`~torch.nn.Parameter`.
751
+
752
+ Args:
753
+ values (iterable, optional): a mapping (dictionary) of
754
+ (string : Any) or an iterable of key-value pairs
755
+ of type (string, Any)
756
+
757
+ Example::
758
+
759
+ class MyModule(nn.Module):
760
+ def __init__(self) -> None:
761
+ super().__init__()
762
+ self.params = nn.ParameterDict({
763
+ 'left': nn.Parameter(torch.randn(5, 10)),
764
+ 'right': nn.Parameter(torch.randn(5, 10))
765
+ })
766
+
767
+ def forward(self, x, choice):
768
+ x = self.params[choice].mm(x)
769
+ return x
770
+ """
771
+
772
+ def __init__(self, parameters: Any = None) -> None:
773
+ super().__init__()
774
+ self._keys: Dict[str, None] = {}
775
+ if parameters is not None:
776
+ self.update(parameters)
777
+
778
+ def _key_to_attr(self, key: str) -> str:
779
+ if not isinstance(key, str):
780
+ raise TypeError(
781
+ "Index given to ParameterDict cannot be used as a key as it is "
782
+ f"not a string (type is '{type(key).__name__}'). Open an issue on "
783
+ "github if you need non-string keys."
784
+ )
785
+ else:
786
+ # Use the key as-is so that `.named_parameters()` returns the right thing
787
+ return key
788
+
789
+ def __getitem__(self, key: str) -> Any:
790
+ attr = self._key_to_attr(key)
791
+ return getattr(self, attr)
792
+
793
+ def __setitem__(self, key: str, value: Any) -> None:
794
+ # Note that all other function that add an entry to the dictionary part of
795
+ # the ParameterDict end up here. So this is the only place where we need
796
+ # to wrap things into Parameter if needed.
797
+ # Objects added via setattr() are not in the dictionary part and thus won't
798
+ # call into this function.
799
+ self._keys[key] = None
800
+ attr = self._key_to_attr(key)
801
+ if isinstance(value, torch.Tensor) and not isinstance(value, Parameter):
802
+ value = Parameter(value)
803
+ setattr(self, attr, value)
804
+
805
+ def __delitem__(self, key: str) -> None:
806
+ del self._keys[key]
807
+ attr = self._key_to_attr(key)
808
+ delattr(self, attr)
809
+
810
+ def __len__(self) -> int:
811
+ return len(self._keys)
812
+
813
+ def __iter__(self) -> Iterator[str]:
814
+ return iter(self._keys)
815
+
816
+ def __reversed__(self) -> Iterator[str]:
817
+ return reversed(list(self._keys))
818
+
819
+ def copy(self) -> "ParameterDict":
820
+ """Return a copy of this :class:`~torch.nn.ParameterDict` instance."""
821
+ # We have to use an OrderedDict because the ParameterDict constructor
822
+ # behaves differently on plain dict vs OrderedDict
823
+ return ParameterDict(OrderedDict((k, self[k]) for k in self._keys))
824
+
825
+ def __contains__(self, key: str) -> bool:
826
+ return key in self._keys
827
+
828
+ def setdefault(self, key: str, default: Optional[Any] = None) -> Any:
829
+ """Set the default for a key in the Parameterdict.
830
+
831
+ If key is in the ParameterDict, return its value.
832
+ If not, insert `key` with a parameter `default` and return `default`.
833
+ `default` defaults to `None`.
834
+
835
+ Args:
836
+ key (str): key to set default for
837
+ default (Any): the parameter set to the key
838
+ """
839
+ if key not in self:
840
+ self[key] = default
841
+ return self[key]
842
+
843
+ def clear(self) -> None:
844
+ """Remove all items from the ParameterDict."""
845
+ for k in self._keys.copy():
846
+ del self[k]
847
+
848
+ def pop(self, key: str) -> Any:
849
+ r"""Remove key from the ParameterDict and return its parameter.
850
+
851
+ Args:
852
+ key (str): key to pop from the ParameterDict
853
+ """
854
+ v = self[key]
855
+ del self[key]
856
+ return v
857
+
858
+ def popitem(self) -> Tuple[str, Any]:
859
+ """Remove and return the last inserted `(key, parameter)` pair from the ParameterDict."""
860
+ k, _ = self._keys.popitem()
861
+ # We need the key in the _keys to be able to access/del
862
+ self._keys[k] = None
863
+ val = self[k]
864
+ del self[k]
865
+ return k, val
866
+
867
+ def get(self, key: str, default: Optional[Any] = None) -> Any:
868
+ r"""Return the parameter associated with key if present. Otherwise return default if provided, None if not.
869
+
870
+ Args:
871
+ key (str): key to get from the ParameterDict
872
+ default (Parameter, optional): value to return if key not present
873
+ """
874
+ return self[key] if key in self else default
875
+
876
+ def fromkeys(
877
+ self, keys: Iterable[str], default: Optional[Any] = None
878
+ ) -> "ParameterDict":
879
+ r"""Return a new ParameterDict with the keys provided.
880
+
881
+ Args:
882
+ keys (iterable, string): keys to make the new ParameterDict from
883
+ default (Parameter, optional): value to set for all keys
884
+ """
885
+ return ParameterDict((k, default) for k in keys)
886
+
887
+ def keys(self) -> Iterable[str]:
888
+ r"""Return an iterable of the ParameterDict keys."""
889
+ return self._keys.keys()
890
+
891
+ def items(self) -> Iterable[Tuple[str, Any]]:
892
+ r"""Return an iterable of the ParameterDict key/value pairs."""
893
+ return ((k, self[k]) for k in self._keys)
894
+
895
+ def values(self) -> Iterable[Any]:
896
+ r"""Return an iterable of the ParameterDict values."""
897
+ return (self[k] for k in self._keys)
898
+
899
+ def update(self, parameters: Union[Mapping[str, Any], "ParameterDict"]) -> None:
900
+ r"""Update the :class:`~torch.nn.ParameterDict` with key-value pairs from ``parameters``, overwriting existing keys.
901
+
902
+ .. note::
903
+ If :attr:`parameters` is an ``OrderedDict``, a :class:`~torch.nn.ParameterDict`, or
904
+ an iterable of key-value pairs, the order of new elements in it is preserved.
905
+
906
+ Args:
907
+ parameters (iterable): a mapping (dictionary) from string to
908
+ :class:`~torch.nn.Parameter`, or an iterable of
909
+ key-value pairs of type (string, :class:`~torch.nn.Parameter`)
910
+ """
911
+ if not isinstance(parameters, container_abcs.Iterable):
912
+ raise TypeError(
913
+ "ParametersDict.update should be called with an "
914
+ "iterable of key/value pairs, but got " + type(parameters).__name__
915
+ )
916
+
917
+ if isinstance(parameters, (OrderedDict, ParameterDict)):
918
+ for key, parameter in parameters.items():
919
+ self[key] = parameter
920
+ elif isinstance(parameters, container_abcs.Mapping):
921
+ for key, parameter in sorted(parameters.items()):
922
+ self[key] = parameter
923
+ else:
924
+ for j, p in enumerate(parameters):
925
+ if not isinstance(p, container_abcs.Iterable):
926
+ raise TypeError(
927
+ "ParameterDict update sequence element "
928
+ "#" + str(j) + " should be Iterable; is" + type(p).__name__
929
+ )
930
+ if not len(p) == 2:
931
+ raise ValueError(
932
+ "ParameterDict update sequence element "
933
+ "#" + str(j) + " has length " + str(len(p)) + "; 2 is required"
934
+ )
935
+ # parameters as length-2 list too cumbersome to type, see ModuleDict.update comment
936
+ self[p[0]] = p[1] # type: ignore[assignment]
937
+
938
+ def extra_repr(self) -> str:
939
+ child_lines = []
940
+ for k, p in self.items():
941
+ if isinstance(p, torch.Tensor):
942
+ size_str = "x".join(str(size) for size in p.size())
943
+ if p.device.type in ["cuda", torch._C._get_privateuse1_backend_name()]:
944
+ device_str = f" ({p.device})"
945
+ else:
946
+ device_str = ""
947
+ parastr = "{} containing: [{} of size {}{}]".format(
948
+ "Parameter" if isinstance(p, Parameter) else "Tensor",
949
+ torch.typename(p),
950
+ size_str,
951
+ device_str,
952
+ )
953
+ child_lines.append(" (" + str(k) + "): " + parastr)
954
+ else:
955
+ child_lines.append(
956
+ " (" + str(k) + "): Object of type: " + type(p).__name__
957
+ )
958
+ tmpstr = "\n".join(child_lines)
959
+ return tmpstr
960
+
961
+ def __call__(self, input):
962
+ raise RuntimeError("ParameterDict should not be called.")
963
+
964
+ def __or__(self, other: "ParameterDict") -> "ParameterDict":
965
+ copy = self.copy()
966
+ copy.update(other)
967
+ return copy
968
+
969
+ def __ror__(self, other: "ParameterDict") -> "ParameterDict":
970
+ copy = other.copy()
971
+ copy.update(self)
972
+ return copy
973
+
974
+ def __ior__(self, other: "ParameterDict") -> Self:
975
+ self.update(other)
976
+ return self
janus/lib/python3.10/site-packages/torch/nn/modules/lazy.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import itertools
3
+ from typing import Any, Optional, Protocol, Type
4
+
5
+ import torch
6
+ from torch.nn.parameter import is_lazy
7
+
8
+
9
+ __all__ = ["LazyModuleMixin"]
10
+
11
+
12
+ class _LazyProtocol(Protocol):
13
+ """This class is used to avoid errors with mypy checks for the attributes in a mixin.
14
+
15
+ https://mypy.readthedocs.io/en/latest/more_types.html#mixin-classes
16
+ """
17
+
18
+ def _register_load_state_dict_pre_hook(self, hook):
19
+ ...
20
+
21
+ def register_forward_pre_hook(self, hook, *, prepend=False, with_kwargs=False):
22
+ ...
23
+
24
+ def _lazy_load_hook(
25
+ self,
26
+ state_dict,
27
+ prefix,
28
+ local_metadata,
29
+ strict,
30
+ missing_keys,
31
+ unexpected_keys,
32
+ error_msgs,
33
+ ):
34
+ ...
35
+
36
+ def _get_name(self):
37
+ ...
38
+
39
+ def _infer_parameters(self, module, input):
40
+ ...
41
+
42
+ @property
43
+ def _parameters(self):
44
+ ...
45
+
46
+ @property
47
+ def _buffers(self):
48
+ ...
49
+
50
+ @property
51
+ def _non_persistent_buffers_set(self):
52
+ ...
53
+
54
+ @property
55
+ def _load_hook(self):
56
+ ...
57
+
58
+ @property
59
+ def _initialize_hook(self):
60
+ ...
61
+
62
+
63
+ class LazyModuleMixin:
64
+ r"""A mixin for modules that lazily initialize parameters, also known as "lazy modules".
65
+
66
+ .. warning:
67
+ Lazy modules are an experimental new feature under active development,
68
+ and their API is likely to change.
69
+
70
+ Modules that lazily initialize parameters, or "lazy modules",
71
+ derive the shapes of their parameters from the first input(s)
72
+ to their forward method. Until that first forward they contain
73
+ :class:`torch.nn.UninitializedParameter` s that should not be accessed
74
+ or used, and afterward they contain regular :class:`torch.nn.Parameter` s.
75
+ Lazy modules are convenient since they don't require computing some
76
+ module arguments, like the :attr:`in_features` argument of a
77
+ typical :class:`torch.nn.Linear`.
78
+
79
+ After construction, networks with lazy modules should first
80
+ be converted to the desired dtype and placed on the expected device.
81
+ This is because lazy modules only perform shape inference so the usual dtype
82
+ and device placement behavior applies.
83
+ The lazy modules should then perform "dry runs" to initialize all the components in the module.
84
+ These "dry runs" send inputs of the correct size, dtype, and device through
85
+ the network and to each one of its lazy modules. After this the network can be used as usual.
86
+
87
+ >>> # xdoctest: +SKIP
88
+ >>> class LazyMLP(torch.nn.Module):
89
+ ... def __init__(self) -> None:
90
+ ... super().__init__()
91
+ ... self.fc1 = torch.nn.LazyLinear(10)
92
+ ... self.relu1 = torch.nn.ReLU()
93
+ ... self.fc2 = torch.nn.LazyLinear(1)
94
+ ... self.relu2 = torch.nn.ReLU()
95
+ ...
96
+ ... def forward(self, input):
97
+ ... x = self.relu1(self.fc1(input))
98
+ ... y = self.relu2(self.fc2(x))
99
+ ... return y
100
+ >>> # constructs a network with lazy modules
101
+ >>> lazy_mlp = LazyMLP()
102
+ >>> # transforms the network's device and dtype
103
+ >>> # NOTE: these transforms can and should be applied after construction and before any 'dry runs'
104
+ >>> lazy_mlp = lazy_mlp.cuda().double()
105
+ >>> lazy_mlp
106
+ LazyMLP( (fc1): LazyLinear(in_features=0, out_features=10, bias=True)
107
+ (relu1): ReLU()
108
+ (fc2): LazyLinear(in_features=0, out_features=1, bias=True)
109
+ (relu2): ReLU()
110
+ )
111
+ >>> # performs a dry run to initialize the network's lazy modules
112
+ >>> lazy_mlp(torch.ones(10,10).cuda())
113
+ >>> # after initialization, LazyLinear modules become regular Linear modules
114
+ >>> lazy_mlp
115
+ LazyMLP(
116
+ (fc1): Linear(in_features=10, out_features=10, bias=True)
117
+ (relu1): ReLU()
118
+ (fc2): Linear(in_features=10, out_features=1, bias=True)
119
+ (relu2): ReLU()
120
+ )
121
+ >>> # attaches an optimizer, since parameters can now be used as usual
122
+ >>> optim = torch.optim.SGD(mlp.parameters(), lr=0.01)
123
+
124
+ A final caveat when using lazy modules is that the order of initialization of a network's
125
+ parameters may change, since the lazy modules are always initialized after other modules.
126
+ For example, if the LazyMLP class defined above had a :class:`torch.nn.LazyLinear` module
127
+ first and then a regular :class:`torch.nn.Linear` second, the second module would be
128
+ initialized on construction and the first module would be initialized during the first dry run.
129
+ This can cause the parameters of a network using lazy modules to be initialized differently
130
+ than the parameters of a network without lazy modules as the order of parameter initializations,
131
+ which often depends on a stateful random number generator, is different.
132
+ Check :doc:`/notes/randomness` for more details.
133
+
134
+ Lazy modules can be serialized with a state dict like other modules. For example:
135
+
136
+ >>> lazy_mlp = LazyMLP()
137
+ >>> # The state dict shows the uninitialized parameters
138
+ >>> lazy_mlp.state_dict()
139
+ OrderedDict([('fc1.weight', Uninitialized parameter),
140
+ ('fc1.bias',
141
+ tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30,
142
+ 4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])),
143
+ ('fc2.weight', Uninitialized parameter),
144
+ ('fc2.bias', tensor([0.0019]))])
145
+
146
+
147
+ Lazy modules can load regular :class:`torch.nn.Parameter` s (i.e. you can serialize/deserialize
148
+ initialized LazyModules and they will remain initialized)
149
+
150
+
151
+ >>> full_mlp = LazyMLP()
152
+ >>> # Dry run to initialize another module
153
+ >>> full_mlp.forward(torch.ones(10, 1))
154
+ >>> # Load an initialized state into a lazy module
155
+ >>> lazy_mlp.load_state_dict(full_mlp.state_dict())
156
+ >>> # The state dict now holds valid values
157
+ >>> lazy_mlp.state_dict()
158
+ OrderedDict([('fc1.weight',
159
+ tensor([[-0.3837],
160
+ [ 0.0907],
161
+ [ 0.6708],
162
+ [-0.5223],
163
+ [-0.9028],
164
+ [ 0.2851],
165
+ [-0.4537],
166
+ [ 0.6813],
167
+ [ 0.5766],
168
+ [-0.8678]])),
169
+ ('fc1.bias',
170
+ tensor([-1.8832e+25, 4.5636e-41, -1.8832e+25, 4.5636e-41, -6.1598e-30,
171
+ 4.5637e-41, -1.8788e+22, 4.5636e-41, -2.0042e-31, 4.5637e-41])),
172
+ ('fc2.weight',
173
+ tensor([[ 0.1320, 0.2938, 0.0679, 0.2793, 0.1088, -0.1795, -0.2301, 0.2807,
174
+ 0.2479, 0.1091]])),
175
+ ('fc2.bias', tensor([0.0019]))])
176
+
177
+ Note, however, that the loaded parameters will not be replaced when doing a "dry run" if they are initialized
178
+ when the state is loaded. This prevents using initialized modules in different contexts.
179
+ """
180
+
181
+ # modules inheriting from this will change their __class__ to the specified
182
+ # one after they are fully initialized
183
+ cls_to_become: Optional[Type[Any]] = None
184
+
185
+ def __init__(self: _LazyProtocol, *args, **kwargs):
186
+ # Mypy doesnt like this super call in a mixin
187
+ super().__init__(*args, **kwargs) # type: ignore[misc]
188
+ self._load_hook = self._register_load_state_dict_pre_hook(self._lazy_load_hook)
189
+ self._initialize_hook = self.register_forward_pre_hook(
190
+ self._infer_parameters, with_kwargs=True
191
+ )
192
+
193
+ def _save_to_state_dict(self: _LazyProtocol, destination, prefix, keep_vars):
194
+ # This should be ideally implemented as a hook,
195
+ # but we should override `detach` in the UninitializedParameter to return itself
196
+ # which is not clean
197
+ for name, param in self._parameters.items():
198
+ if param is not None:
199
+ if not (is_lazy(param) or keep_vars):
200
+ param = param.detach()
201
+ destination[prefix + name] = param
202
+ for name, buf in self._buffers.items():
203
+ if buf is not None and name not in self._non_persistent_buffers_set:
204
+ if not (is_lazy(buf) or keep_vars):
205
+ buf = buf.detach()
206
+ destination[prefix + name] = buf
207
+
208
+ def _lazy_load_hook(
209
+ self: _LazyProtocol,
210
+ state_dict,
211
+ prefix,
212
+ local_metadata,
213
+ strict,
214
+ missing_keys,
215
+ unexpected_keys,
216
+ error_msgs,
217
+ ):
218
+ """load_state_dict pre-hook function for lazy buffers and parameters.
219
+
220
+ The purpose of this hook is to adjust the current state and/or
221
+ ``state_dict`` being loaded so that a module instance serialized in
222
+ both un/initialized state can be deserialized onto both un/initialized
223
+ module instance.
224
+ See comment in ``torch.nn.Module._register_load_state_dict_pre_hook``
225
+ for the details of the hook specification.
226
+ """
227
+ for name, param in itertools.chain(
228
+ self._parameters.items(), self._buffers.items()
229
+ ):
230
+ key = prefix + name
231
+ if key in state_dict and param is not None:
232
+ input_param = state_dict[key]
233
+ if is_lazy(param):
234
+ # The current parameter is not initialized but the one being loaded one is
235
+ # create a new parameter based on the uninitialized one
236
+ if not is_lazy(input_param):
237
+ with torch.no_grad():
238
+ param.materialize(input_param.shape)
239
+
240
+ def initialize_parameters(self: _LazyProtocol, *args, **kwargs):
241
+ r"""Initialize parameters according to the input batch properties.
242
+
243
+ This adds an interface to isolate parameter initialization from the
244
+ forward pass when doing parameter shape inference.
245
+ """
246
+ raise NotImplementedError(
247
+ f"initialize_parameters is not implemented for {self.__class__.__name__}"
248
+ )
249
+
250
+ def has_uninitialized_params(self: _LazyProtocol):
251
+ r"""Check if a module has parameters that are not initialized."""
252
+ # This is to avoid the JIT to track this parameter and force
253
+ # custom modules __setstate__ to add it
254
+ params = self._parameters.values()
255
+ buffers = self._buffers.values()
256
+ for param in itertools.chain(params, buffers):
257
+ if is_lazy(param):
258
+ return True
259
+ return False
260
+
261
+ # torchrec tests the code consistency with the following code
262
+ # fmt: off
263
+ def _infer_parameters(self: _LazyProtocol, module, args, kwargs=None):
264
+ r"""Infers the size and initializes the parameters according to the provided input batch.
265
+
266
+ Given a module that contains parameters that were declared inferrable
267
+ using :class:`torch.nn.parameter.ParameterMode.Infer`, runs a forward pass
268
+ in the complete module using the provided input to initialize all the parameters
269
+ as needed.
270
+ The module is set into evaluation mode before running the forward pass in order
271
+ to avoid saving statistics or calculating gradients
272
+ """
273
+ kwargs = kwargs if kwargs else {}
274
+ module.initialize_parameters(*args, **kwargs)
275
+ if module.has_uninitialized_params():
276
+ raise RuntimeError(f'module {self._get_name()} has not been fully initialized')
277
+ module._initialize_hook.remove()
278
+ module._load_hook.remove()
279
+ delattr(module, '_initialize_hook')
280
+ delattr(module, '_load_hook')
281
+ if module.cls_to_become is not None:
282
+ module.__class__ = module.cls_to_become
283
+ # fmt: on
284
+
285
+ def _replicate_for_data_parallel(self: _LazyProtocol):
286
+ raise RuntimeError(
287
+ "Modules with uninitialized parameters can't be used with `DataParallel`. "
288
+ "Run a dummy forward pass to correctly initialize the modules"
289
+ )
janus/lib/python3.10/site-packages/torch/nn/modules/linear.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import math
3
+ from typing import Any
4
+
5
+ import torch
6
+ from torch import Tensor
7
+ from torch.nn import functional as F, init
8
+ from torch.nn.parameter import Parameter, UninitializedParameter
9
+
10
+ from .lazy import LazyModuleMixin
11
+ from .module import Module
12
+
13
+
14
+ __all__ = [
15
+ "Bilinear",
16
+ "Identity",
17
+ "LazyLinear",
18
+ "Linear",
19
+ ]
20
+
21
+
22
+ class Identity(Module):
23
+ r"""A placeholder identity operator that is argument-insensitive.
24
+
25
+ Args:
26
+ args: any argument (unused)
27
+ kwargs: any keyword argument (unused)
28
+
29
+ Shape:
30
+ - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
31
+ - Output: :math:`(*)`, same shape as the input.
32
+
33
+ Examples::
34
+
35
+ >>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False)
36
+ >>> input = torch.randn(128, 20)
37
+ >>> output = m(input)
38
+ >>> print(output.size())
39
+ torch.Size([128, 20])
40
+
41
+ """
42
+
43
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
44
+ super().__init__()
45
+
46
+ def forward(self, input: Tensor) -> Tensor:
47
+ return input
48
+
49
+
50
+ class Linear(Module):
51
+ r"""Applies an affine linear transformation to the incoming data: :math:`y = xA^T + b`.
52
+
53
+ This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
54
+
55
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
56
+
57
+ Args:
58
+ in_features: size of each input sample
59
+ out_features: size of each output sample
60
+ bias: If set to ``False``, the layer will not learn an additive bias.
61
+ Default: ``True``
62
+
63
+ Shape:
64
+ - Input: :math:`(*, H_{in})` where :math:`*` means any number of
65
+ dimensions including none and :math:`H_{in} = \text{in\_features}`.
66
+ - Output: :math:`(*, H_{out})` where all but the last dimension
67
+ are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
68
+
69
+ Attributes:
70
+ weight: the learnable weights of the module of shape
71
+ :math:`(\text{out\_features}, \text{in\_features})`. The values are
72
+ initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
73
+ :math:`k = \frac{1}{\text{in\_features}}`
74
+ bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
75
+ If :attr:`bias` is ``True``, the values are initialized from
76
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
77
+ :math:`k = \frac{1}{\text{in\_features}}`
78
+
79
+ Examples::
80
+
81
+ >>> m = nn.Linear(20, 30)
82
+ >>> input = torch.randn(128, 20)
83
+ >>> output = m(input)
84
+ >>> print(output.size())
85
+ torch.Size([128, 30])
86
+ """
87
+
88
+ __constants__ = ["in_features", "out_features"]
89
+ in_features: int
90
+ out_features: int
91
+ weight: Tensor
92
+
93
+ def __init__(
94
+ self,
95
+ in_features: int,
96
+ out_features: int,
97
+ bias: bool = True,
98
+ device=None,
99
+ dtype=None,
100
+ ) -> None:
101
+ factory_kwargs = {"device": device, "dtype": dtype}
102
+ super().__init__()
103
+ self.in_features = in_features
104
+ self.out_features = out_features
105
+ self.weight = Parameter(
106
+ torch.empty((out_features, in_features), **factory_kwargs)
107
+ )
108
+ if bias:
109
+ self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
110
+ else:
111
+ self.register_parameter("bias", None)
112
+ self.reset_parameters()
113
+
114
+ def reset_parameters(self) -> None:
115
+ # Setting a=sqrt(5) in kaiming_uniform is the same as initializing with
116
+ # uniform(-1/sqrt(in_features), 1/sqrt(in_features)). For details, see
117
+ # https://github.com/pytorch/pytorch/issues/57109
118
+ init.kaiming_uniform_(self.weight, a=math.sqrt(5))
119
+ if self.bias is not None:
120
+ fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
121
+ bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
122
+ init.uniform_(self.bias, -bound, bound)
123
+
124
+ def forward(self, input: Tensor) -> Tensor:
125
+ return F.linear(input, self.weight, self.bias)
126
+
127
+ def extra_repr(self) -> str:
128
+ return f"in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}"
129
+
130
+
131
+ # This class exists solely to avoid triggering an obscure error when scripting
132
+ # an improperly quantized attention layer. See this issue for details:
133
+ # https://github.com/pytorch/pytorch/issues/58969
134
+ # TODO: fail fast on quantization API usage error, then remove this class
135
+ # and replace uses of it with plain Linear
136
+ class NonDynamicallyQuantizableLinear(Linear):
137
+ def __init__(
138
+ self,
139
+ in_features: int,
140
+ out_features: int,
141
+ bias: bool = True,
142
+ device=None,
143
+ dtype=None,
144
+ ) -> None:
145
+ super().__init__(
146
+ in_features, out_features, bias=bias, device=device, dtype=dtype
147
+ )
148
+
149
+
150
+ class Bilinear(Module):
151
+ r"""Applies a bilinear transformation to the incoming data: :math:`y = x_1^T A x_2 + b`.
152
+
153
+ Args:
154
+ in1_features: size of each first input sample
155
+ in2_features: size of each second input sample
156
+ out_features: size of each output sample
157
+ bias: If set to False, the layer will not learn an additive bias.
158
+ Default: ``True``
159
+
160
+ Shape:
161
+ - Input1: :math:`(*, H_{in1})` where :math:`H_{in1}=\text{in1\_features}` and
162
+ :math:`*` means any number of additional dimensions including none. All but the last dimension
163
+ of the inputs should be the same.
164
+ - Input2: :math:`(*, H_{in2})` where :math:`H_{in2}=\text{in2\_features}`.
165
+ - Output: :math:`(*, H_{out})` where :math:`H_{out}=\text{out\_features}`
166
+ and all but the last dimension are the same shape as the input.
167
+
168
+ Attributes:
169
+ weight: the learnable weights of the module of shape
170
+ :math:`(\text{out\_features}, \text{in1\_features}, \text{in2\_features})`.
171
+ The values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
172
+ :math:`k = \frac{1}{\text{in1\_features}}`
173
+ bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
174
+ If :attr:`bias` is ``True``, the values are initialized from
175
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
176
+ :math:`k = \frac{1}{\text{in1\_features}}`
177
+
178
+ Examples::
179
+
180
+ >>> m = nn.Bilinear(20, 30, 40)
181
+ >>> input1 = torch.randn(128, 20)
182
+ >>> input2 = torch.randn(128, 30)
183
+ >>> output = m(input1, input2)
184
+ >>> print(output.size())
185
+ torch.Size([128, 40])
186
+ """
187
+
188
+ __constants__ = ["in1_features", "in2_features", "out_features"]
189
+ in1_features: int
190
+ in2_features: int
191
+ out_features: int
192
+ weight: Tensor
193
+
194
+ def __init__(
195
+ self,
196
+ in1_features: int,
197
+ in2_features: int,
198
+ out_features: int,
199
+ bias: bool = True,
200
+ device=None,
201
+ dtype=None,
202
+ ) -> None:
203
+ factory_kwargs = {"device": device, "dtype": dtype}
204
+ super().__init__()
205
+ self.in1_features = in1_features
206
+ self.in2_features = in2_features
207
+ self.out_features = out_features
208
+ self.weight = Parameter(
209
+ torch.empty((out_features, in1_features, in2_features), **factory_kwargs)
210
+ )
211
+
212
+ if bias:
213
+ self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
214
+ else:
215
+ self.register_parameter("bias", None)
216
+ self.reset_parameters()
217
+
218
+ def reset_parameters(self) -> None:
219
+ bound = 1 / math.sqrt(self.weight.size(1))
220
+ init.uniform_(self.weight, -bound, bound)
221
+ if self.bias is not None:
222
+ init.uniform_(self.bias, -bound, bound)
223
+
224
+ def forward(self, input1: Tensor, input2: Tensor) -> Tensor:
225
+ return F.bilinear(input1, input2, self.weight, self.bias)
226
+
227
+ def extra_repr(self) -> str:
228
+ return (
229
+ f"in1_features={self.in1_features}, in2_features={self.in2_features}, "
230
+ f"out_features={self.out_features}, bias={self.bias is not None}"
231
+ )
232
+
233
+
234
+ class LazyLinear(LazyModuleMixin, Linear):
235
+ r"""A :class:`torch.nn.Linear` module where `in_features` is inferred.
236
+
237
+ In this module, the `weight` and `bias` are of :class:`torch.nn.UninitializedParameter`
238
+ class. They will be initialized after the first call to ``forward`` is done and the
239
+ module will become a regular :class:`torch.nn.Linear` module. The ``in_features`` argument
240
+ of the :class:`Linear` is inferred from the ``input.shape[-1]``.
241
+
242
+ Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
243
+ on lazy modules and their limitations.
244
+
245
+ Args:
246
+ out_features: size of each output sample
247
+ bias: If set to ``False``, the layer will not learn an additive bias.
248
+ Default: ``True``
249
+
250
+ Attributes:
251
+ weight: the learnable weights of the module of shape
252
+ :math:`(\text{out\_features}, \text{in\_features})`. The values are
253
+ initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
254
+ :math:`k = \frac{1}{\text{in\_features}}`
255
+ bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
256
+ If :attr:`bias` is ``True``, the values are initialized from
257
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
258
+ :math:`k = \frac{1}{\text{in\_features}}`
259
+
260
+
261
+ """
262
+
263
+ cls_to_become = Linear # type: ignore[assignment]
264
+ weight: UninitializedParameter
265
+ bias: UninitializedParameter # type: ignore[assignment]
266
+
267
+ def __init__(
268
+ self, out_features: int, bias: bool = True, device=None, dtype=None
269
+ ) -> None:
270
+ factory_kwargs = {"device": device, "dtype": dtype}
271
+ # bias is hardcoded to False to avoid creating tensor
272
+ # that will soon be overwritten.
273
+ super().__init__(0, 0, False)
274
+ self.weight = UninitializedParameter(**factory_kwargs)
275
+ self.out_features = out_features
276
+ if bias:
277
+ self.bias = UninitializedParameter(**factory_kwargs)
278
+
279
+ def reset_parameters(self) -> None:
280
+ if not self.has_uninitialized_params() and self.in_features != 0:
281
+ super().reset_parameters()
282
+
283
+ def initialize_parameters(self, input) -> None: # type: ignore[override]
284
+ if self.has_uninitialized_params():
285
+ with torch.no_grad():
286
+ self.in_features = input.shape[-1]
287
+ self.weight.materialize((self.out_features, self.in_features))
288
+ if self.bias is not None:
289
+ self.bias.materialize((self.out_features,))
290
+ self.reset_parameters()
291
+
292
+
293
+ # TODO: PartialLinear - maybe in sparse?
janus/lib/python3.10/site-packages/torch/nn/modules/padding.py ADDED
@@ -0,0 +1,813 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from typing import Sequence, Tuple
3
+
4
+ import torch.nn.functional as F
5
+ from torch import Tensor
6
+ from torch.nn.common_types import _size_2_t, _size_4_t, _size_6_t
7
+
8
+ from .module import Module
9
+ from .utils import _ntuple, _pair, _quadruple
10
+
11
+
12
+ # TODO: grad_output size asserts in THNN
13
+
14
+ __all__ = [
15
+ "CircularPad1d",
16
+ "CircularPad2d",
17
+ "CircularPad3d",
18
+ "ConstantPad1d",
19
+ "ConstantPad2d",
20
+ "ConstantPad3d",
21
+ "ReflectionPad1d",
22
+ "ReflectionPad2d",
23
+ "ReflectionPad3d",
24
+ "ReplicationPad1d",
25
+ "ReplicationPad2d",
26
+ "ReplicationPad3d",
27
+ "ZeroPad1d",
28
+ "ZeroPad2d",
29
+ "ZeroPad3d",
30
+ ]
31
+
32
+
33
+ class _CircularPadNd(Module):
34
+ __constants__ = ["padding"]
35
+ padding: Sequence[int]
36
+
37
+ def _check_input_dim(self, input):
38
+ raise NotImplementedError
39
+
40
+ def forward(self, input: Tensor) -> Tensor:
41
+ self._check_input_dim(input)
42
+ return F.pad(input, self.padding, "circular")
43
+
44
+ def extra_repr(self) -> str:
45
+ return f"{self.padding}"
46
+
47
+
48
+ class CircularPad1d(_CircularPadNd):
49
+ r"""Pads the input tensor using circular padding of the input boundary.
50
+
51
+ Tensor values at the beginning of the dimension are used to pad the end,
52
+ and values at the end are used to pad the beginning. If negative padding is
53
+ applied then the ends of the tensor get removed.
54
+
55
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
56
+
57
+ Args:
58
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
59
+ padding in all boundaries. If a 2-`tuple`, uses
60
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
61
+
62
+ Shape:
63
+ - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
64
+ - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
65
+
66
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
67
+
68
+ Examples::
69
+
70
+ >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
71
+ >>> m = nn.CircularPad1d(2)
72
+ >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
73
+ >>> input
74
+ tensor([[[0., 1., 2., 3.],
75
+ [4., 5., 6., 7.]]])
76
+ >>> m(input)
77
+ tensor([[[2., 3., 0., 1., 2., 3., 0., 1.],
78
+ [6., 7., 4., 5., 6., 7., 4., 5.]]])
79
+ >>> # using different paddings for different sides
80
+ >>> m = nn.CircularPad1d((3, 1))
81
+ >>> m(input)
82
+ tensor([[[1., 2., 3., 0., 1., 2., 3., 0.],
83
+ [5., 6., 7., 4., 5., 6., 7., 4.]]])
84
+ """
85
+
86
+ padding: Tuple[int, int]
87
+
88
+ def __init__(self, padding: _size_2_t) -> None:
89
+ super().__init__()
90
+ self.padding = _pair(padding)
91
+
92
+ def _check_input_dim(self, input):
93
+ if input.dim() != 2 and input.dim() != 3:
94
+ raise ValueError(f"expected 2D or 3D input (got {input.dim()}D input)")
95
+
96
+
97
+ class CircularPad2d(_CircularPadNd):
98
+ r"""Pads the input tensor using circular padding of the input boundary.
99
+
100
+ Tensor values at the beginning of the dimension are used to pad the end,
101
+ and values at the end are used to pad the beginning. If negative padding is
102
+ applied then the ends of the tensor get removed.
103
+
104
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
105
+
106
+ Args:
107
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
108
+ padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
109
+ :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
110
+
111
+ Shape:
112
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
113
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
114
+
115
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
116
+
117
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
118
+
119
+ Examples::
120
+
121
+ >>> m = nn.CircularPad2d(2)
122
+ >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
123
+ >>> input
124
+ tensor([[[[0., 1., 2.],
125
+ [3., 4., 5.],
126
+ [6., 7., 8.]]]])
127
+ >>> m(input)
128
+ tensor([[[[4., 5., 3., 4., 5., 3., 4.],
129
+ [7., 8., 6., 7., 8., 6., 7.],
130
+ [1., 2., 0., 1., 2., 0., 1.],
131
+ [4., 5., 3., 4., 5., 3., 4.],
132
+ [7., 8., 6., 7., 8., 6., 7.],
133
+ [1., 2., 0., 1., 2., 0., 1.],
134
+ [4., 5., 3., 4., 5., 3., 4.]]]])
135
+ >>> # using different paddings for different sides
136
+ >>> m = nn.CircularPad2d((1, 1, 2, 0))
137
+ >>> m(input)
138
+ tensor([[[[5., 3., 4., 5., 3.],
139
+ [8., 6., 7., 8., 6.],
140
+ [2., 0., 1., 2., 0.],
141
+ [5., 3., 4., 5., 3.],
142
+ [8., 6., 7., 8., 6.]]]])
143
+ """
144
+
145
+ padding: Tuple[int, int, int, int]
146
+
147
+ def __init__(self, padding: _size_4_t) -> None:
148
+ super().__init__()
149
+ self.padding = _quadruple(padding)
150
+
151
+ def _check_input_dim(self, input):
152
+ if input.dim() != 3 and input.dim() != 4:
153
+ raise ValueError(f"expected 3D or 4D input (got {input.dim()}D input)")
154
+
155
+
156
+ class CircularPad3d(_CircularPadNd):
157
+ r"""Pads the input tensor using circular padding of the input boundary.
158
+
159
+ Tensor values at the beginning of the dimension are used to pad the end,
160
+ and values at the end are used to pad the beginning. If negative padding is
161
+ applied then the ends of the tensor get removed.
162
+
163
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
164
+
165
+ Args:
166
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
167
+ padding in all boundaries. If a 6-`tuple`, uses
168
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
169
+ :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
170
+ :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
171
+
172
+ Shape:
173
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
174
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
175
+ where
176
+
177
+ :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
178
+
179
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
180
+
181
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
182
+
183
+ Examples::
184
+
185
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
186
+ >>> m = nn.CircularPad3d(3)
187
+ >>> input = torch.randn(16, 3, 8, 320, 480)
188
+ >>> output = m(input)
189
+ >>> # using different paddings for different sides
190
+ >>> m = nn.CircularPad3d((3, 3, 6, 6, 1, 1))
191
+ >>> output = m(input)
192
+ """
193
+
194
+ padding: Tuple[int, int, int, int, int, int]
195
+
196
+ def __init__(self, padding: _size_6_t) -> None:
197
+ super().__init__()
198
+ self.padding = _ntuple(6)(padding)
199
+
200
+ def _check_input_dim(self, input):
201
+ if input.dim() != 4 and input.dim() != 5:
202
+ raise ValueError(f"expected 4D or 5D input (got {input.dim()}D input)")
203
+
204
+
205
+ class _ConstantPadNd(Module):
206
+ __constants__ = ["padding", "value"]
207
+ value: float
208
+ padding: Sequence[int]
209
+
210
+ def __init__(self, value: float) -> None:
211
+ super().__init__()
212
+ self.value = value
213
+
214
+ def forward(self, input: Tensor) -> Tensor:
215
+ return F.pad(input, self.padding, "constant", self.value)
216
+
217
+ def extra_repr(self) -> str:
218
+ return f"padding={self.padding}, value={self.value}"
219
+
220
+
221
+ class ConstantPad1d(_ConstantPadNd):
222
+ r"""Pads the input tensor boundaries with a constant value.
223
+
224
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
225
+
226
+ Args:
227
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
228
+ padding in both boundaries. If a 2-`tuple`, uses
229
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
230
+
231
+ Shape:
232
+ - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
233
+ - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
234
+
235
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
236
+
237
+ Examples::
238
+
239
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
240
+ >>> m = nn.ConstantPad1d(2, 3.5)
241
+ >>> input = torch.randn(1, 2, 4)
242
+ >>> input
243
+ tensor([[[-1.0491, -0.7152, -0.0749, 0.8530],
244
+ [-1.3287, 1.8966, 0.1466, -0.2771]]])
245
+ >>> m(input)
246
+ tensor([[[ 3.5000, 3.5000, -1.0491, -0.7152, -0.0749, 0.8530, 3.5000,
247
+ 3.5000],
248
+ [ 3.5000, 3.5000, -1.3287, 1.8966, 0.1466, -0.2771, 3.5000,
249
+ 3.5000]]])
250
+ >>> m = nn.ConstantPad1d(2, 3.5)
251
+ >>> input = torch.randn(1, 2, 3)
252
+ >>> input
253
+ tensor([[[ 1.6616, 1.4523, -1.1255],
254
+ [-3.6372, 0.1182, -1.8652]]])
255
+ >>> m(input)
256
+ tensor([[[ 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000, 3.5000],
257
+ [ 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000, 3.5000]]])
258
+ >>> # using different paddings for different sides
259
+ >>> m = nn.ConstantPad1d((3, 1), 3.5)
260
+ >>> m(input)
261
+ tensor([[[ 3.5000, 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000],
262
+ [ 3.5000, 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000]]])
263
+ """
264
+
265
+ padding: Tuple[int, int]
266
+
267
+ def __init__(self, padding: _size_2_t, value: float):
268
+ super().__init__(value)
269
+ self.padding = _pair(padding)
270
+
271
+
272
+ class ConstantPad2d(_ConstantPadNd):
273
+ r"""Pads the input tensor boundaries with a constant value.
274
+
275
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
276
+
277
+ Args:
278
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
279
+ padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
280
+ :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
281
+
282
+ Shape:
283
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
284
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
285
+
286
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
287
+
288
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
289
+
290
+ Examples::
291
+
292
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
293
+ >>> m = nn.ConstantPad2d(2, 3.5)
294
+ >>> input = torch.randn(1, 2, 2)
295
+ >>> input
296
+ tensor([[[ 1.6585, 0.4320],
297
+ [-0.8701, -0.4649]]])
298
+ >>> m(input)
299
+ tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
300
+ [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
301
+ [ 3.5000, 3.5000, 1.6585, 0.4320, 3.5000, 3.5000],
302
+ [ 3.5000, 3.5000, -0.8701, -0.4649, 3.5000, 3.5000],
303
+ [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
304
+ [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
305
+ >>> # using different paddings for different sides
306
+ >>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)
307
+ >>> m(input)
308
+ tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
309
+ [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
310
+ [ 3.5000, 3.5000, 3.5000, 1.6585, 0.4320],
311
+ [ 3.5000, 3.5000, 3.5000, -0.8701, -0.4649],
312
+ [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
313
+ """
314
+
315
+ __constants__ = ["padding", "value"]
316
+ padding: Tuple[int, int, int, int]
317
+
318
+ def __init__(self, padding: _size_4_t, value: float) -> None:
319
+ super().__init__(value)
320
+ self.padding = _quadruple(padding)
321
+
322
+
323
+ class ConstantPad3d(_ConstantPadNd):
324
+ r"""Pads the input tensor boundaries with a constant value.
325
+
326
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
327
+
328
+ Args:
329
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
330
+ padding in all boundaries. If a 6-`tuple`, uses
331
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
332
+ :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
333
+ :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
334
+
335
+ Shape:
336
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
337
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
338
+ :math:`(C, D_{out}, H_{out}, W_{out})`, where
339
+
340
+ :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
341
+
342
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
343
+
344
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
345
+
346
+ Examples::
347
+
348
+ >>> m = nn.ConstantPad3d(3, 3.5)
349
+ >>> input = torch.randn(16, 3, 10, 20, 30)
350
+ >>> output = m(input)
351
+ >>> # using different paddings for different sides
352
+ >>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5)
353
+ >>> output = m(input)
354
+ """
355
+
356
+ padding: Tuple[int, int, int, int, int, int]
357
+
358
+ def __init__(self, padding: _size_6_t, value: float) -> None:
359
+ super().__init__(value)
360
+ self.padding = _ntuple(6)(padding)
361
+
362
+
363
+ class _ReflectionPadNd(Module):
364
+ __constants__ = ["padding"]
365
+ padding: Sequence[int]
366
+
367
+ def forward(self, input: Tensor) -> Tensor:
368
+ return F.pad(input, self.padding, "reflect")
369
+
370
+ def extra_repr(self) -> str:
371
+ return f"{self.padding}"
372
+
373
+
374
+ class ReflectionPad1d(_ReflectionPadNd):
375
+ r"""Pads the input tensor using the reflection of the input boundary.
376
+
377
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
378
+
379
+ Args:
380
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
381
+ padding in all boundaries. If a 2-`tuple`, uses
382
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
383
+
384
+ Shape:
385
+ - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
386
+ - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
387
+
388
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
389
+
390
+ Examples::
391
+
392
+ >>> m = nn.ReflectionPad1d(2)
393
+ >>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles")
394
+ >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
395
+ >>> input
396
+ tensor([[[0., 1., 2., 3.],
397
+ [4., 5., 6., 7.]]])
398
+ >>> m(input)
399
+ tensor([[[2., 1., 0., 1., 2., 3., 2., 1.],
400
+ [6., 5., 4., 5., 6., 7., 6., 5.]]])
401
+ >>> # using different paddings for different sides
402
+ >>> m = nn.ReflectionPad1d((3, 1))
403
+ >>> m(input)
404
+ tensor([[[3., 2., 1., 0., 1., 2., 3., 2.],
405
+ [7., 6., 5., 4., 5., 6., 7., 6.]]])
406
+ """
407
+
408
+ padding: Tuple[int, int]
409
+
410
+ def __init__(self, padding: _size_2_t) -> None:
411
+ super().__init__()
412
+ self.padding = _pair(padding)
413
+
414
+
415
+ class ReflectionPad2d(_ReflectionPadNd):
416
+ r"""Pads the input tensor using the reflection of the input boundary.
417
+
418
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
419
+
420
+ Args:
421
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
422
+ padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
423
+ :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
424
+ Note that padding size should be less than the corresponding input dimension.
425
+
426
+ Shape:
427
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
428
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})` where
429
+
430
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
431
+
432
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
433
+
434
+ Examples::
435
+
436
+ >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
437
+ >>> m = nn.ReflectionPad2d(2)
438
+ >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
439
+ >>> input
440
+ tensor([[[[0., 1., 2.],
441
+ [3., 4., 5.],
442
+ [6., 7., 8.]]]])
443
+ >>> m(input)
444
+ tensor([[[[8., 7., 6., 7., 8., 7., 6.],
445
+ [5., 4., 3., 4., 5., 4., 3.],
446
+ [2., 1., 0., 1., 2., 1., 0.],
447
+ [5., 4., 3., 4., 5., 4., 3.],
448
+ [8., 7., 6., 7., 8., 7., 6.],
449
+ [5., 4., 3., 4., 5., 4., 3.],
450
+ [2., 1., 0., 1., 2., 1., 0.]]]])
451
+ >>> # using different paddings for different sides
452
+ >>> m = nn.ReflectionPad2d((1, 1, 2, 0))
453
+ >>> m(input)
454
+ tensor([[[[7., 6., 7., 8., 7.],
455
+ [4., 3., 4., 5., 4.],
456
+ [1., 0., 1., 2., 1.],
457
+ [4., 3., 4., 5., 4.],
458
+ [7., 6., 7., 8., 7.]]]])
459
+ """
460
+
461
+ padding: Tuple[int, int, int, int]
462
+
463
+ def __init__(self, padding: _size_4_t) -> None:
464
+ super().__init__()
465
+ self.padding = _quadruple(padding)
466
+
467
+
468
+ class ReflectionPad3d(_ReflectionPadNd):
469
+ r"""Pads the input tensor using the reflection of the input boundary.
470
+
471
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
472
+
473
+ Args:
474
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
475
+ padding in all boundaries. If a 6-`tuple`, uses
476
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
477
+ :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
478
+ :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
479
+
480
+ Shape:
481
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
482
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
483
+ where
484
+
485
+ :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
486
+
487
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
488
+
489
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
490
+
491
+ Examples::
492
+
493
+ >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
494
+ >>> m = nn.ReflectionPad3d(1)
495
+ >>> input = torch.arange(8, dtype=torch.float).reshape(1, 1, 2, 2, 2)
496
+ >>> m(input)
497
+ tensor([[[[[7., 6., 7., 6.],
498
+ [5., 4., 5., 4.],
499
+ [7., 6., 7., 6.],
500
+ [5., 4., 5., 4.]],
501
+ [[3., 2., 3., 2.],
502
+ [1., 0., 1., 0.],
503
+ [3., 2., 3., 2.],
504
+ [1., 0., 1., 0.]],
505
+ [[7., 6., 7., 6.],
506
+ [5., 4., 5., 4.],
507
+ [7., 6., 7., 6.],
508
+ [5., 4., 5., 4.]],
509
+ [[3., 2., 3., 2.],
510
+ [1., 0., 1., 0.],
511
+ [3., 2., 3., 2.],
512
+ [1., 0., 1., 0.]]]]])
513
+ """
514
+
515
+ padding: Tuple[int, int, int, int, int, int]
516
+
517
+ def __init__(self, padding: _size_6_t) -> None:
518
+ super().__init__()
519
+ self.padding = _ntuple(6)(padding)
520
+
521
+
522
+ class _ReplicationPadNd(Module):
523
+ __constants__ = ["padding"]
524
+ padding: Sequence[int]
525
+
526
+ def forward(self, input: Tensor) -> Tensor:
527
+ return F.pad(input, self.padding, "replicate")
528
+
529
+ def extra_repr(self) -> str:
530
+ return f"{self.padding}"
531
+
532
+
533
+ class ReplicationPad1d(_ReplicationPadNd):
534
+ r"""Pads the input tensor using replication of the input boundary.
535
+
536
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
537
+
538
+ Args:
539
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
540
+ padding in all boundaries. If a 2-`tuple`, uses
541
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
542
+
543
+ Shape:
544
+ - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
545
+ - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
546
+
547
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
548
+
549
+ Examples::
550
+
551
+ >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
552
+ >>> m = nn.ReplicationPad1d(2)
553
+ >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
554
+ >>> input
555
+ tensor([[[0., 1., 2., 3.],
556
+ [4., 5., 6., 7.]]])
557
+ >>> m(input)
558
+ tensor([[[0., 0., 0., 1., 2., 3., 3., 3.],
559
+ [4., 4., 4., 5., 6., 7., 7., 7.]]])
560
+ >>> # using different paddings for different sides
561
+ >>> m = nn.ReplicationPad1d((3, 1))
562
+ >>> m(input)
563
+ tensor([[[0., 0., 0., 0., 1., 2., 3., 3.],
564
+ [4., 4., 4., 4., 5., 6., 7., 7.]]])
565
+ """
566
+
567
+ padding: Tuple[int, int]
568
+
569
+ def __init__(self, padding: _size_2_t) -> None:
570
+ super().__init__()
571
+ self.padding = _pair(padding)
572
+
573
+
574
+ class ReplicationPad2d(_ReplicationPadNd):
575
+ r"""Pads the input tensor using replication of the input boundary.
576
+
577
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
578
+
579
+ Args:
580
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
581
+ padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
582
+ :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
583
+
584
+ Shape:
585
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
586
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
587
+
588
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
589
+
590
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
591
+
592
+ Examples::
593
+
594
+ >>> m = nn.ReplicationPad2d(2)
595
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
596
+ >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
597
+ >>> input
598
+ tensor([[[[0., 1., 2.],
599
+ [3., 4., 5.],
600
+ [6., 7., 8.]]]])
601
+ >>> m(input)
602
+ tensor([[[[0., 0., 0., 1., 2., 2., 2.],
603
+ [0., 0., 0., 1., 2., 2., 2.],
604
+ [0., 0., 0., 1., 2., 2., 2.],
605
+ [3., 3., 3., 4., 5., 5., 5.],
606
+ [6., 6., 6., 7., 8., 8., 8.],
607
+ [6., 6., 6., 7., 8., 8., 8.],
608
+ [6., 6., 6., 7., 8., 8., 8.]]]])
609
+ >>> # using different paddings for different sides
610
+ >>> m = nn.ReplicationPad2d((1, 1, 2, 0))
611
+ >>> m(input)
612
+ tensor([[[[0., 0., 1., 2., 2.],
613
+ [0., 0., 1., 2., 2.],
614
+ [0., 0., 1., 2., 2.],
615
+ [3., 3., 4., 5., 5.],
616
+ [6., 6., 7., 8., 8.]]]])
617
+ """
618
+
619
+ padding: Tuple[int, int, int, int]
620
+
621
+ def __init__(self, padding: _size_4_t) -> None:
622
+ super().__init__()
623
+ self.padding = _quadruple(padding)
624
+
625
+
626
+ class ReplicationPad3d(_ReplicationPadNd):
627
+ r"""Pads the input tensor using replication of the input boundary.
628
+
629
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
630
+
631
+ Args:
632
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
633
+ padding in all boundaries. If a 6-`tuple`, uses
634
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
635
+ :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
636
+ :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
637
+
638
+ Shape:
639
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
640
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
641
+ where
642
+
643
+ :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
644
+
645
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
646
+
647
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
648
+
649
+ Examples::
650
+
651
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
652
+ >>> m = nn.ReplicationPad3d(3)
653
+ >>> input = torch.randn(16, 3, 8, 320, 480)
654
+ >>> output = m(input)
655
+ >>> # using different paddings for different sides
656
+ >>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1))
657
+ >>> output = m(input)
658
+ """
659
+
660
+ padding: Tuple[int, int, int, int, int, int]
661
+
662
+ def __init__(self, padding: _size_6_t) -> None:
663
+ super().__init__()
664
+ self.padding = _ntuple(6)(padding)
665
+
666
+
667
+ class ZeroPad1d(ConstantPad1d):
668
+ r"""Pads the input tensor boundaries with zero.
669
+
670
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
671
+
672
+ Args:
673
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
674
+ padding in both boundaries. If a 2-`tuple`, uses
675
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
676
+
677
+ Shape:
678
+ - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
679
+ - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
680
+
681
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
682
+
683
+ Examples::
684
+
685
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
686
+ >>> m = nn.ZeroPad1d(2)
687
+ >>> input = torch.randn(1, 2, 4)
688
+ >>> input
689
+ tensor([[[-1.0491, -0.7152, -0.0749, 0.8530],
690
+ [-1.3287, 1.8966, 0.1466, -0.2771]]])
691
+ >>> m(input)
692
+ tensor([[[ 0.0000, 0.0000, -1.0491, -0.7152, -0.0749, 0.8530, 0.0000,
693
+ 0.0000],
694
+ [ 0.0000, 0.0000, -1.3287, 1.8966, 0.1466, -0.2771, 0.0000,
695
+ 0.0000]]])
696
+ >>> m = nn.ZeroPad1d(2)
697
+ >>> input = torch.randn(1, 2, 3)
698
+ >>> input
699
+ tensor([[[ 1.6616, 1.4523, -1.1255],
700
+ [-3.6372, 0.1182, -1.8652]]])
701
+ >>> m(input)
702
+ tensor([[[ 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000, 0.0000],
703
+ [ 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000, 0.0000]]])
704
+ >>> # using different paddings for different sides
705
+ >>> m = nn.ZeroPad1d((3, 1))
706
+ >>> m(input)
707
+ tensor([[[ 0.0000, 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000],
708
+ [ 0.0000, 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000]]])
709
+ """
710
+
711
+ padding: Tuple[int, int]
712
+
713
+ def __init__(self, padding: _size_2_t) -> None:
714
+ super().__init__(padding, 0.0)
715
+
716
+ def extra_repr(self) -> str:
717
+ return f"{self.padding}"
718
+
719
+
720
+ class ZeroPad2d(ConstantPad2d):
721
+ r"""Pads the input tensor boundaries with zero.
722
+
723
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
724
+
725
+ Args:
726
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
727
+ padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
728
+ :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
729
+
730
+ Shape:
731
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
732
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
733
+
734
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
735
+
736
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
737
+
738
+ Examples::
739
+
740
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
741
+ >>> m = nn.ZeroPad2d(2)
742
+ >>> input = torch.randn(1, 1, 3, 3)
743
+ >>> input
744
+ tensor([[[[-0.1678, -0.4418, 1.9466],
745
+ [ 0.9604, -0.4219, -0.5241],
746
+ [-0.9162, -0.5436, -0.6446]]]])
747
+ >>> m(input)
748
+ tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
749
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
750
+ [ 0.0000, 0.0000, -0.1678, -0.4418, 1.9466, 0.0000, 0.0000],
751
+ [ 0.0000, 0.0000, 0.9604, -0.4219, -0.5241, 0.0000, 0.0000],
752
+ [ 0.0000, 0.0000, -0.9162, -0.5436, -0.6446, 0.0000, 0.0000],
753
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
754
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
755
+ >>> # using different paddings for different sides
756
+ >>> m = nn.ZeroPad2d((1, 1, 2, 0))
757
+ >>> m(input)
758
+ tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
759
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
760
+ [ 0.0000, -0.1678, -0.4418, 1.9466, 0.0000],
761
+ [ 0.0000, 0.9604, -0.4219, -0.5241, 0.0000],
762
+ [ 0.0000, -0.9162, -0.5436, -0.6446, 0.0000]]]])
763
+ """
764
+
765
+ padding: Tuple[int, int, int, int]
766
+
767
+ def __init__(self, padding: _size_4_t) -> None:
768
+ super().__init__(padding, 0.0)
769
+
770
+ def extra_repr(self) -> str:
771
+ return f"{self.padding}"
772
+
773
+
774
+ class ZeroPad3d(ConstantPad3d):
775
+ r"""Pads the input tensor boundaries with zero.
776
+
777
+ For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
778
+
779
+ Args:
780
+ padding (int, tuple): the size of the padding. If is `int`, uses the same
781
+ padding in all boundaries. If a 6-`tuple`, uses
782
+ (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
783
+ :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
784
+ :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
785
+
786
+ Shape:
787
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
788
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
789
+ :math:`(C, D_{out}, H_{out}, W_{out})`, where
790
+
791
+ :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
792
+
793
+ :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
794
+
795
+ :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
796
+
797
+ Examples::
798
+
799
+ >>> m = nn.ZeroPad3d(3)
800
+ >>> input = torch.randn(16, 3, 10, 20, 30)
801
+ >>> output = m(input)
802
+ >>> # using different paddings for different sides
803
+ >>> m = nn.ZeroPad3d((3, 3, 6, 6, 0, 1))
804
+ >>> output = m(input)
805
+ """
806
+
807
+ padding: Tuple[int, int, int, int, int, int]
808
+
809
+ def __init__(self, padding: _size_6_t) -> None:
810
+ super().__init__(padding, 0.0)
811
+
812
+ def extra_repr(self) -> str:
813
+ return f"{self.padding}"
janus/lib/python3.10/site-packages/torch/nn/modules/pooling.py ADDED
@@ -0,0 +1,1494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import torch.nn.functional as F
4
+ from torch import Tensor
5
+ from torch.nn.common_types import (
6
+ _ratio_2_t,
7
+ _ratio_3_t,
8
+ _size_1_t,
9
+ _size_2_opt_t,
10
+ _size_2_t,
11
+ _size_3_opt_t,
12
+ _size_3_t,
13
+ _size_any_opt_t,
14
+ _size_any_t,
15
+ )
16
+
17
+ from .module import Module
18
+ from .utils import _pair, _single, _triple
19
+
20
+
21
+ __all__ = [
22
+ "MaxPool1d",
23
+ "MaxPool2d",
24
+ "MaxPool3d",
25
+ "MaxUnpool1d",
26
+ "MaxUnpool2d",
27
+ "MaxUnpool3d",
28
+ "AvgPool1d",
29
+ "AvgPool2d",
30
+ "AvgPool3d",
31
+ "FractionalMaxPool2d",
32
+ "FractionalMaxPool3d",
33
+ "LPPool1d",
34
+ "LPPool2d",
35
+ "LPPool3d",
36
+ "AdaptiveMaxPool1d",
37
+ "AdaptiveMaxPool2d",
38
+ "AdaptiveMaxPool3d",
39
+ "AdaptiveAvgPool1d",
40
+ "AdaptiveAvgPool2d",
41
+ "AdaptiveAvgPool3d",
42
+ ]
43
+
44
+
45
+ class _MaxPoolNd(Module):
46
+ __constants__ = [
47
+ "kernel_size",
48
+ "stride",
49
+ "padding",
50
+ "dilation",
51
+ "return_indices",
52
+ "ceil_mode",
53
+ ]
54
+ return_indices: bool
55
+ ceil_mode: bool
56
+
57
+ def __init__(
58
+ self,
59
+ kernel_size: _size_any_t,
60
+ stride: Optional[_size_any_t] = None,
61
+ padding: _size_any_t = 0,
62
+ dilation: _size_any_t = 1,
63
+ return_indices: bool = False,
64
+ ceil_mode: bool = False,
65
+ ) -> None:
66
+ super().__init__()
67
+ self.kernel_size = kernel_size
68
+ self.stride = stride if (stride is not None) else kernel_size
69
+ self.padding = padding
70
+ self.dilation = dilation
71
+ self.return_indices = return_indices
72
+ self.ceil_mode = ceil_mode
73
+
74
+ def extra_repr(self) -> str:
75
+ return (
76
+ "kernel_size={kernel_size}, stride={stride}, padding={padding}"
77
+ ", dilation={dilation}, ceil_mode={ceil_mode}".format(**self.__dict__)
78
+ )
79
+
80
+
81
+ class MaxPool1d(_MaxPoolNd):
82
+ r"""Applies a 1D max pooling over an input signal composed of several input planes.
83
+
84
+ In the simplest case, the output value of the layer with input size :math:`(N, C, L)`
85
+ and output :math:`(N, C, L_{out})` can be precisely described as:
86
+
87
+ .. math::
88
+ out(N_i, C_j, k) = \max_{m=0, \ldots, \text{kernel\_size} - 1}
89
+ input(N_i, C_j, stride \times k + m)
90
+
91
+ If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
92
+ for :attr:`padding` number of points. :attr:`dilation` is the stride between the elements within the
93
+ sliding window. This `link`_ has a nice visualization of the pooling parameters.
94
+
95
+ Note:
96
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
97
+ or the input. Sliding windows that would start in the right padded region are ignored.
98
+
99
+ Args:
100
+ kernel_size: The size of the sliding window, must be > 0.
101
+ stride: The stride of the sliding window, must be > 0. Default value is :attr:`kernel_size`.
102
+ padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
103
+ dilation: The stride between elements within a sliding window, must be > 0.
104
+ return_indices: If ``True``, will return the argmax along with the max values.
105
+ Useful for :class:`torch.nn.MaxUnpool1d` later
106
+ ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
107
+ ensures that every element in the input tensor is covered by a sliding window.
108
+
109
+ Shape:
110
+ - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
111
+ - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
112
+
113
+ .. math::
114
+ L_{out} = \left\lfloor \frac{L_{in} + 2 \times \text{padding} - \text{dilation}
115
+ \times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
116
+
117
+ Examples::
118
+
119
+ >>> # pool of size=3, stride=2
120
+ >>> m = nn.MaxPool1d(3, stride=2)
121
+ >>> input = torch.randn(20, 16, 50)
122
+ >>> output = m(input)
123
+
124
+ .. _link:
125
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
126
+ """
127
+
128
+ kernel_size: _size_1_t
129
+ stride: _size_1_t
130
+ padding: _size_1_t
131
+ dilation: _size_1_t
132
+
133
+ def forward(self, input: Tensor):
134
+ return F.max_pool1d(
135
+ input,
136
+ self.kernel_size,
137
+ self.stride,
138
+ self.padding,
139
+ self.dilation,
140
+ ceil_mode=self.ceil_mode,
141
+ return_indices=self.return_indices,
142
+ )
143
+
144
+
145
+ class MaxPool2d(_MaxPoolNd):
146
+ r"""Applies a 2D max pooling over an input signal composed of several input planes.
147
+
148
+ In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
149
+ output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
150
+ can be precisely described as:
151
+
152
+ .. math::
153
+ \begin{aligned}
154
+ out(N_i, C_j, h, w) ={} & \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
155
+ & \text{input}(N_i, C_j, \text{stride[0]} \times h + m,
156
+ \text{stride[1]} \times w + n)
157
+ \end{aligned}
158
+
159
+ If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
160
+ for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
161
+ It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
162
+
163
+ Note:
164
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
165
+ or the input. Sliding windows that would start in the right padded region are ignored.
166
+
167
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
168
+
169
+ - a single ``int`` -- in which case the same value is used for the height and width dimension
170
+ - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
171
+ and the second `int` for the width dimension
172
+
173
+ Args:
174
+ kernel_size: the size of the window to take a max over
175
+ stride: the stride of the window. Default value is :attr:`kernel_size`
176
+ padding: Implicit negative infinity padding to be added on both sides
177
+ dilation: a parameter that controls the stride of elements in the window
178
+ return_indices: if ``True``, will return the max indices along with the outputs.
179
+ Useful for :class:`torch.nn.MaxUnpool2d` later
180
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
181
+
182
+ Shape:
183
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`
184
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
185
+
186
+ .. math::
187
+ H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]}
188
+ \times (\text{kernel\_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor
189
+
190
+ .. math::
191
+ W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]}
192
+ \times (\text{kernel\_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor
193
+
194
+ Examples::
195
+
196
+ >>> # pool of square window of size=3, stride=2
197
+ >>> m = nn.MaxPool2d(3, stride=2)
198
+ >>> # pool of non-square window
199
+ >>> m = nn.MaxPool2d((3, 2), stride=(2, 1))
200
+ >>> input = torch.randn(20, 16, 50, 32)
201
+ >>> output = m(input)
202
+
203
+ .. _link:
204
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
205
+ """
206
+
207
+ kernel_size: _size_2_t
208
+ stride: _size_2_t
209
+ padding: _size_2_t
210
+ dilation: _size_2_t
211
+
212
+ def forward(self, input: Tensor):
213
+ return F.max_pool2d(
214
+ input,
215
+ self.kernel_size,
216
+ self.stride,
217
+ self.padding,
218
+ self.dilation,
219
+ ceil_mode=self.ceil_mode,
220
+ return_indices=self.return_indices,
221
+ )
222
+
223
+
224
+ class MaxPool3d(_MaxPoolNd):
225
+ r"""Applies a 3D max pooling over an input signal composed of several input planes.
226
+
227
+ In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
228
+ output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
229
+ can be precisely described as:
230
+
231
+ .. math::
232
+ \begin{aligned}
233
+ \text{out}(N_i, C_j, d, h, w) ={} & \max_{k=0, \ldots, kD-1} \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
234
+ & \text{input}(N_i, C_j, \text{stride[0]} \times d + k,
235
+ \text{stride[1]} \times h + m, \text{stride[2]} \times w + n)
236
+ \end{aligned}
237
+
238
+ If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
239
+ for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
240
+ It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
241
+
242
+ Note:
243
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
244
+ or the input. Sliding windows that would start in the right padded region are ignored.
245
+
246
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
247
+
248
+ - a single ``int`` -- in which case the same value is used for the depth, height and width dimension
249
+ - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
250
+ the second `int` for the height dimension and the third `int` for the width dimension
251
+
252
+ Args:
253
+ kernel_size: the size of the window to take a max over
254
+ stride: the stride of the window. Default value is :attr:`kernel_size`
255
+ padding: Implicit negative infinity padding to be added on all three sides
256
+ dilation: a parameter that controls the stride of elements in the window
257
+ return_indices: if ``True``, will return the max indices along with the outputs.
258
+ Useful for :class:`torch.nn.MaxUnpool3d` later
259
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
260
+
261
+ Shape:
262
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
263
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where
264
+
265
+ .. math::
266
+ D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times
267
+ (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
268
+
269
+ .. math::
270
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times
271
+ (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
272
+
273
+ .. math::
274
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] \times
275
+ (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
276
+
277
+ Examples::
278
+
279
+ >>> # pool of square window of size=3, stride=2
280
+ >>> m = nn.MaxPool3d(3, stride=2)
281
+ >>> # pool of non-square window
282
+ >>> m = nn.MaxPool3d((3, 2, 2), stride=(2, 1, 2))
283
+ >>> input = torch.randn(20, 16, 50, 44, 31)
284
+ >>> output = m(input)
285
+
286
+ .. _link:
287
+ https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
288
+ """ # noqa: E501
289
+
290
+ kernel_size: _size_3_t
291
+ stride: _size_3_t
292
+ padding: _size_3_t
293
+ dilation: _size_3_t
294
+
295
+ def forward(self, input: Tensor):
296
+ return F.max_pool3d(
297
+ input,
298
+ self.kernel_size,
299
+ self.stride,
300
+ self.padding,
301
+ self.dilation,
302
+ ceil_mode=self.ceil_mode,
303
+ return_indices=self.return_indices,
304
+ )
305
+
306
+
307
+ class _MaxUnpoolNd(Module):
308
+ def extra_repr(self) -> str:
309
+ return f"kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}"
310
+
311
+
312
+ class MaxUnpool1d(_MaxUnpoolNd):
313
+ r"""Computes a partial inverse of :class:`MaxPool1d`.
314
+
315
+ :class:`MaxPool1d` is not fully invertible, since the non-maximal values are lost.
316
+
317
+ :class:`MaxUnpool1d` takes in as input the output of :class:`MaxPool1d`
318
+ including the indices of the maximal values and computes a partial inverse
319
+ in which all non-maximal values are set to zero.
320
+
321
+ Note:
322
+ This operation may behave nondeterministically when the input indices has repeat values.
323
+ See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
324
+
325
+ .. note:: :class:`MaxPool1d` can map several input sizes to the same output
326
+ sizes. Hence, the inversion process can get ambiguous.
327
+ To accommodate this, you can provide the needed output size
328
+ as an additional argument :attr:`output_size` in the forward call.
329
+ See the Inputs and Example below.
330
+
331
+ Args:
332
+ kernel_size (int or tuple): Size of the max pooling window.
333
+ stride (int or tuple): Stride of the max pooling window.
334
+ It is set to :attr:`kernel_size` by default.
335
+ padding (int or tuple): Padding that was added to the input
336
+
337
+ Inputs:
338
+ - `input`: the input Tensor to invert
339
+ - `indices`: the indices given out by :class:`~torch.nn.MaxPool1d`
340
+ - `output_size` (optional): the targeted output size
341
+
342
+ Shape:
343
+ - Input: :math:`(N, C, H_{in})` or :math:`(C, H_{in})`.
344
+ - Output: :math:`(N, C, H_{out})` or :math:`(C, H_{out})`, where
345
+
346
+ .. math::
347
+ H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{kernel\_size}[0]
348
+
349
+ or as given by :attr:`output_size` in the call operator
350
+
351
+ Example::
352
+
353
+ >>> # xdoctest: +IGNORE_WANT("do other tests modify the global state?")
354
+ >>> pool = nn.MaxPool1d(2, stride=2, return_indices=True)
355
+ >>> unpool = nn.MaxUnpool1d(2, stride=2)
356
+ >>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8]]])
357
+ >>> output, indices = pool(input)
358
+ >>> unpool(output, indices)
359
+ tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
360
+
361
+ >>> # Example showcasing the use of output_size
362
+ >>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8, 9]]])
363
+ >>> output, indices = pool(input)
364
+ >>> unpool(output, indices, output_size=input.size())
365
+ tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8., 0.]]])
366
+
367
+ >>> unpool(output, indices)
368
+ tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
369
+ """
370
+
371
+ kernel_size: _size_1_t
372
+ stride: _size_1_t
373
+ padding: _size_1_t
374
+
375
+ def __init__(
376
+ self,
377
+ kernel_size: _size_1_t,
378
+ stride: Optional[_size_1_t] = None,
379
+ padding: _size_1_t = 0,
380
+ ) -> None:
381
+ super().__init__()
382
+ self.kernel_size = _single(kernel_size)
383
+ self.stride = _single(stride if (stride is not None) else kernel_size)
384
+ self.padding = _single(padding)
385
+
386
+ def forward(
387
+ self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None
388
+ ) -> Tensor:
389
+ return F.max_unpool1d(
390
+ input, indices, self.kernel_size, self.stride, self.padding, output_size
391
+ )
392
+
393
+
394
+ class MaxUnpool2d(_MaxUnpoolNd):
395
+ r"""Computes a partial inverse of :class:`MaxPool2d`.
396
+
397
+ :class:`MaxPool2d` is not fully invertible, since the non-maximal values are lost.
398
+
399
+ :class:`MaxUnpool2d` takes in as input the output of :class:`MaxPool2d`
400
+ including the indices of the maximal values and computes a partial inverse
401
+ in which all non-maximal values are set to zero.
402
+
403
+ Note:
404
+ This operation may behave nondeterministically when the input indices has repeat values.
405
+ See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
406
+
407
+ .. note:: :class:`MaxPool2d` can map several input sizes to the same output
408
+ sizes. Hence, the inversion process can get ambiguous.
409
+ To accommodate this, you can provide the needed output size
410
+ as an additional argument :attr:`output_size` in the forward call.
411
+ See the Inputs and Example below.
412
+
413
+ Args:
414
+ kernel_size (int or tuple): Size of the max pooling window.
415
+ stride (int or tuple): Stride of the max pooling window.
416
+ It is set to :attr:`kernel_size` by default.
417
+ padding (int or tuple): Padding that was added to the input
418
+
419
+ Inputs:
420
+ - `input`: the input Tensor to invert
421
+ - `indices`: the indices given out by :class:`~torch.nn.MaxPool2d`
422
+ - `output_size` (optional): the targeted output size
423
+
424
+ Shape:
425
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
426
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
427
+
428
+ .. math::
429
+ H_{out} = (H_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
430
+
431
+ .. math::
432
+ W_{out} = (W_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
433
+
434
+ or as given by :attr:`output_size` in the call operator
435
+
436
+ Example::
437
+
438
+ >>> pool = nn.MaxPool2d(2, stride=2, return_indices=True)
439
+ >>> unpool = nn.MaxUnpool2d(2, stride=2)
440
+ >>> input = torch.tensor([[[[ 1., 2., 3., 4.],
441
+ [ 5., 6., 7., 8.],
442
+ [ 9., 10., 11., 12.],
443
+ [13., 14., 15., 16.]]]])
444
+ >>> output, indices = pool(input)
445
+ >>> unpool(output, indices)
446
+ tensor([[[[ 0., 0., 0., 0.],
447
+ [ 0., 6., 0., 8.],
448
+ [ 0., 0., 0., 0.],
449
+ [ 0., 14., 0., 16.]]]])
450
+ >>> # Now using output_size to resolve an ambiguous size for the inverse
451
+ >>> input = torch.tensor([[[[ 1., 2., 3., 4., 5.],
452
+ [ 6., 7., 8., 9., 10.],
453
+ [11., 12., 13., 14., 15.],
454
+ [16., 17., 18., 19., 20.]]]])
455
+ >>> output, indices = pool(input)
456
+ >>> # This call will not work without specifying output_size
457
+ >>> unpool(output, indices, output_size=input.size())
458
+ tensor([[[[ 0., 0., 0., 0., 0.],
459
+ [ 0., 7., 0., 9., 0.],
460
+ [ 0., 0., 0., 0., 0.],
461
+ [ 0., 17., 0., 19., 0.]]]])
462
+
463
+
464
+ """
465
+
466
+ kernel_size: _size_2_t
467
+ stride: _size_2_t
468
+ padding: _size_2_t
469
+
470
+ def __init__(
471
+ self,
472
+ kernel_size: _size_2_t,
473
+ stride: Optional[_size_2_t] = None,
474
+ padding: _size_2_t = 0,
475
+ ) -> None:
476
+ super().__init__()
477
+ self.kernel_size = _pair(kernel_size)
478
+ self.stride = _pair(stride if (stride is not None) else kernel_size)
479
+ self.padding = _pair(padding)
480
+
481
+ def forward(
482
+ self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None
483
+ ) -> Tensor:
484
+ return F.max_unpool2d(
485
+ input, indices, self.kernel_size, self.stride, self.padding, output_size
486
+ )
487
+
488
+
489
+ class MaxUnpool3d(_MaxUnpoolNd):
490
+ r"""Computes a partial inverse of :class:`MaxPool3d`.
491
+
492
+ :class:`MaxPool3d` is not fully invertible, since the non-maximal values are lost.
493
+ :class:`MaxUnpool3d` takes in as input the output of :class:`MaxPool3d`
494
+ including the indices of the maximal values and computes a partial inverse
495
+ in which all non-maximal values are set to zero.
496
+
497
+ Note:
498
+ This operation may behave nondeterministically when the input indices has repeat values.
499
+ See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
500
+
501
+ .. note:: :class:`MaxPool3d` can map several input sizes to the same output
502
+ sizes. Hence, the inversion process can get ambiguous.
503
+ To accommodate this, you can provide the needed output size
504
+ as an additional argument :attr:`output_size` in the forward call.
505
+ See the Inputs section below.
506
+
507
+ Args:
508
+ kernel_size (int or tuple): Size of the max pooling window.
509
+ stride (int or tuple): Stride of the max pooling window.
510
+ It is set to :attr:`kernel_size` by default.
511
+ padding (int or tuple): Padding that was added to the input
512
+
513
+ Inputs:
514
+ - `input`: the input Tensor to invert
515
+ - `indices`: the indices given out by :class:`~torch.nn.MaxPool3d`
516
+ - `output_size` (optional): the targeted output size
517
+
518
+ Shape:
519
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
520
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where
521
+
522
+ .. math::
523
+ D_{out} = (D_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
524
+
525
+ .. math::
526
+ H_{out} = (H_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
527
+
528
+ .. math::
529
+ W_{out} = (W_{in} - 1) \times \text{stride[2]} - 2 \times \text{padding[2]} + \text{kernel\_size[2]}
530
+
531
+ or as given by :attr:`output_size` in the call operator
532
+
533
+ Example::
534
+
535
+ >>> # pool of square window of size=3, stride=2
536
+ >>> pool = nn.MaxPool3d(3, stride=2, return_indices=True)
537
+ >>> unpool = nn.MaxUnpool3d(3, stride=2)
538
+ >>> output, indices = pool(torch.randn(20, 16, 51, 33, 15))
539
+ >>> unpooled_output = unpool(output, indices)
540
+ >>> unpooled_output.size()
541
+ torch.Size([20, 16, 51, 33, 15])
542
+ """
543
+
544
+ kernel_size: _size_3_t
545
+ stride: _size_3_t
546
+ padding: _size_3_t
547
+
548
+ def __init__(
549
+ self,
550
+ kernel_size: _size_3_t,
551
+ stride: Optional[_size_3_t] = None,
552
+ padding: _size_3_t = 0,
553
+ ) -> None:
554
+ super().__init__()
555
+ self.kernel_size = _triple(kernel_size)
556
+ self.stride = _triple(stride if (stride is not None) else kernel_size)
557
+ self.padding = _triple(padding)
558
+
559
+ def forward(
560
+ self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None
561
+ ) -> Tensor:
562
+ return F.max_unpool3d(
563
+ input, indices, self.kernel_size, self.stride, self.padding, output_size
564
+ )
565
+
566
+
567
+ class _AvgPoolNd(Module):
568
+ __constants__ = [
569
+ "kernel_size",
570
+ "stride",
571
+ "padding",
572
+ "ceil_mode",
573
+ "count_include_pad",
574
+ ]
575
+
576
+ def extra_repr(self) -> str:
577
+ return f"kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}"
578
+
579
+
580
+ class AvgPool1d(_AvgPoolNd):
581
+ r"""Applies a 1D average pooling over an input signal composed of several input planes.
582
+
583
+ In the simplest case, the output value of the layer with input size :math:`(N, C, L)`,
584
+ output :math:`(N, C, L_{out})` and :attr:`kernel_size` :math:`k`
585
+ can be precisely described as:
586
+
587
+ .. math::
588
+
589
+ \text{out}(N_i, C_j, l) = \frac{1}{k} \sum_{m=0}^{k-1}
590
+ \text{input}(N_i, C_j, \text{stride} \times l + m)
591
+
592
+ If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
593
+ for :attr:`padding` number of points.
594
+
595
+ Note:
596
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
597
+ or the input. Sliding windows that would start in the right padded region are ignored.
598
+
599
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can each be
600
+ an ``int`` or a one-element tuple.
601
+
602
+ Args:
603
+ kernel_size: the size of the window
604
+ stride: the stride of the window. Default value is :attr:`kernel_size`
605
+ padding: implicit zero padding to be added on both sides
606
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
607
+ count_include_pad: when True, will include the zero-padding in the averaging calculation
608
+
609
+ Shape:
610
+ - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
611
+ - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
612
+
613
+ .. math::
614
+ L_{out} = \left\lfloor \frac{L_{in} +
615
+ 2 \times \text{padding} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
616
+
617
+ Per the note above, if ``ceil_mode`` is True and :math:`(L_{out} - 1) \times \text{stride} \geq L_{in}
618
+ + \text{padding}`, we skip the last window as it would start in the right padded region, resulting in
619
+ :math:`L_{out}` being reduced by one.
620
+
621
+ Examples::
622
+
623
+ >>> # pool with window of size=3, stride=2
624
+ >>> m = nn.AvgPool1d(3, stride=2)
625
+ >>> m(torch.tensor([[[1., 2, 3, 4, 5, 6, 7]]]))
626
+ tensor([[[2., 4., 6.]]])
627
+ """
628
+
629
+ kernel_size: _size_1_t
630
+ stride: _size_1_t
631
+ padding: _size_1_t
632
+ ceil_mode: bool
633
+ count_include_pad: bool
634
+
635
+ def __init__(
636
+ self,
637
+ kernel_size: _size_1_t,
638
+ stride: _size_1_t = None,
639
+ padding: _size_1_t = 0,
640
+ ceil_mode: bool = False,
641
+ count_include_pad: bool = True,
642
+ ) -> None:
643
+ super().__init__()
644
+ self.kernel_size = _single(kernel_size)
645
+ self.stride = _single(stride if stride is not None else kernel_size)
646
+ self.padding = _single(padding)
647
+ self.ceil_mode = ceil_mode
648
+ self.count_include_pad = count_include_pad
649
+
650
+ def forward(self, input: Tensor) -> Tensor:
651
+ return F.avg_pool1d(
652
+ input,
653
+ self.kernel_size,
654
+ self.stride,
655
+ self.padding,
656
+ self.ceil_mode,
657
+ self.count_include_pad,
658
+ )
659
+
660
+
661
+ class AvgPool2d(_AvgPoolNd):
662
+ r"""Applies a 2D average pooling over an input signal composed of several input planes.
663
+
664
+ In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
665
+ output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
666
+ can be precisely described as:
667
+
668
+ .. math::
669
+
670
+ out(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1}
671
+ input(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n)
672
+
673
+ If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
674
+ for :attr:`padding` number of points.
675
+
676
+ Note:
677
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
678
+ or the input. Sliding windows that would start in the right padded region are ignored.
679
+
680
+ The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can either be:
681
+
682
+ - a single ``int`` -- in which case the same value is used for the height and width dimension
683
+ - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
684
+ and the second `int` for the width dimension
685
+
686
+ Args:
687
+ kernel_size: the size of the window
688
+ stride: the stride of the window. Default value is :attr:`kernel_size`
689
+ padding: implicit zero padding to be added on both sides
690
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
691
+ count_include_pad: when True, will include the zero-padding in the averaging calculation
692
+ divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used.
693
+
694
+
695
+ Shape:
696
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
697
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
698
+
699
+ .. math::
700
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] -
701
+ \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
702
+
703
+ .. math::
704
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] -
705
+ \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
706
+
707
+ Per the note above, if ``ceil_mode`` is True and :math:`(H_{out} - 1)\times \text{stride}[0]\geq H_{in}
708
+ + \text{padding}[0]`, we skip the last window as it would start in the bottom padded region,
709
+ resulting in :math:`H_{out}` being reduced by one.
710
+
711
+ The same applies for :math:`W_{out}`.
712
+
713
+ Examples::
714
+
715
+ >>> # pool of square window of size=3, stride=2
716
+ >>> m = nn.AvgPool2d(3, stride=2)
717
+ >>> # pool of non-square window
718
+ >>> m = nn.AvgPool2d((3, 2), stride=(2, 1))
719
+ >>> input = torch.randn(20, 16, 50, 32)
720
+ >>> output = m(input)
721
+ """
722
+
723
+ __constants__ = [
724
+ "kernel_size",
725
+ "stride",
726
+ "padding",
727
+ "ceil_mode",
728
+ "count_include_pad",
729
+ "divisor_override",
730
+ ]
731
+
732
+ kernel_size: _size_2_t
733
+ stride: _size_2_t
734
+ padding: _size_2_t
735
+ ceil_mode: bool
736
+ count_include_pad: bool
737
+
738
+ def __init__(
739
+ self,
740
+ kernel_size: _size_2_t,
741
+ stride: Optional[_size_2_t] = None,
742
+ padding: _size_2_t = 0,
743
+ ceil_mode: bool = False,
744
+ count_include_pad: bool = True,
745
+ divisor_override: Optional[int] = None,
746
+ ) -> None:
747
+ super().__init__()
748
+ self.kernel_size = kernel_size
749
+ self.stride = stride if (stride is not None) else kernel_size
750
+ self.padding = padding
751
+ self.ceil_mode = ceil_mode
752
+ self.count_include_pad = count_include_pad
753
+ self.divisor_override = divisor_override
754
+
755
+ def forward(self, input: Tensor) -> Tensor:
756
+ return F.avg_pool2d(
757
+ input,
758
+ self.kernel_size,
759
+ self.stride,
760
+ self.padding,
761
+ self.ceil_mode,
762
+ self.count_include_pad,
763
+ self.divisor_override,
764
+ )
765
+
766
+
767
+ class AvgPool3d(_AvgPoolNd):
768
+ r"""Applies a 3D average pooling over an input signal composed of several input planes.
769
+
770
+ In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
771
+ output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
772
+ can be precisely described as:
773
+
774
+ .. math::
775
+ \begin{aligned}
776
+ \text{out}(N_i, C_j, d, h, w) ={} & \sum_{k=0}^{kD-1} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1} \\
777
+ & \frac{\text{input}(N_i, C_j, \text{stride}[0] \times d + k,
778
+ \text{stride}[1] \times h + m, \text{stride}[2] \times w + n)}
779
+ {kD \times kH \times kW}
780
+ \end{aligned}
781
+
782
+ If :attr:`padding` is non-zero, then the input is implicitly zero-padded on all three sides
783
+ for :attr:`padding` number of points.
784
+
785
+ Note:
786
+ When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
787
+ or the input. Sliding windows that would start in the right padded region are ignored.
788
+
789
+ The parameters :attr:`kernel_size`, :attr:`stride` can either be:
790
+
791
+ - a single ``int`` -- in which case the same value is used for the depth, height and width dimension
792
+ - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
793
+ the second `int` for the height dimension and the third `int` for the width dimension
794
+
795
+ Args:
796
+ kernel_size: the size of the window
797
+ stride: the stride of the window. Default value is :attr:`kernel_size`
798
+ padding: implicit zero padding to be added on all three sides
799
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
800
+ count_include_pad: when True, will include the zero-padding in the averaging calculation
801
+ divisor_override: if specified, it will be used as divisor, otherwise :attr:`kernel_size` will be used
802
+
803
+ Shape:
804
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
805
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
806
+ :math:`(C, D_{out}, H_{out}, W_{out})`, where
807
+
808
+ .. math::
809
+ D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] -
810
+ \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
811
+
812
+ .. math::
813
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] -
814
+ \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
815
+
816
+ .. math::
817
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] -
818
+ \text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor
819
+
820
+ Per the note above, if ``ceil_mode`` is True and :math:`(D_{out} - 1)\times \text{stride}[0]\geq D_{in}
821
+ + \text{padding}[0]`, we skip the last window as it would start in the padded region,
822
+ resulting in :math:`D_{out}` being reduced by one.
823
+
824
+ The same applies for :math:`W_{out}` and :math:`H_{out}`.
825
+
826
+ Examples::
827
+
828
+ >>> # pool of square window of size=3, stride=2
829
+ >>> m = nn.AvgPool3d(3, stride=2)
830
+ >>> # pool of non-square window
831
+ >>> m = nn.AvgPool3d((3, 2, 2), stride=(2, 1, 2))
832
+ >>> input = torch.randn(20, 16, 50, 44, 31)
833
+ >>> output = m(input)
834
+ """
835
+
836
+ __constants__ = [
837
+ "kernel_size",
838
+ "stride",
839
+ "padding",
840
+ "ceil_mode",
841
+ "count_include_pad",
842
+ "divisor_override",
843
+ ]
844
+
845
+ kernel_size: _size_3_t
846
+ stride: _size_3_t
847
+ padding: _size_3_t
848
+ ceil_mode: bool
849
+ count_include_pad: bool
850
+
851
+ def __init__(
852
+ self,
853
+ kernel_size: _size_3_t,
854
+ stride: Optional[_size_3_t] = None,
855
+ padding: _size_3_t = 0,
856
+ ceil_mode: bool = False,
857
+ count_include_pad: bool = True,
858
+ divisor_override: Optional[int] = None,
859
+ ) -> None:
860
+ super().__init__()
861
+ self.kernel_size = kernel_size
862
+ self.stride = stride if (stride is not None) else kernel_size
863
+ self.padding = padding
864
+ self.ceil_mode = ceil_mode
865
+ self.count_include_pad = count_include_pad
866
+ self.divisor_override = divisor_override
867
+
868
+ def forward(self, input: Tensor) -> Tensor:
869
+ return F.avg_pool3d(
870
+ input,
871
+ self.kernel_size,
872
+ self.stride,
873
+ self.padding,
874
+ self.ceil_mode,
875
+ self.count_include_pad,
876
+ self.divisor_override,
877
+ )
878
+
879
+ def __setstate__(self, d):
880
+ super().__setstate__(d)
881
+ self.__dict__.setdefault("padding", 0)
882
+ self.__dict__.setdefault("ceil_mode", False)
883
+ self.__dict__.setdefault("count_include_pad", True)
884
+
885
+
886
+ class FractionalMaxPool2d(Module):
887
+ r"""Applies a 2D fractional max pooling over an input signal composed of several input planes.
888
+
889
+ Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
890
+
891
+ The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic
892
+ step size determined by the target output size.
893
+ The number of output features is equal to the number of input planes.
894
+
895
+ .. note:: Exactly one of ``output_size`` or ``output_ratio`` must be defined.
896
+
897
+ Args:
898
+ kernel_size: the size of the window to take a max over.
899
+ Can be a single number k (for a square kernel of k x k) or a tuple `(kh, kw)`
900
+ output_size: the target output size of the image of the form `oH x oW`.
901
+ Can be a tuple `(oH, oW)` or a single number oH for a square image `oH x oH`.
902
+ Note that we must have :math:`kH + oH - 1 <= H_{in}` and :math:`kW + oW - 1 <= W_{in}`
903
+ output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
904
+ This has to be a number or tuple in the range (0, 1).
905
+ Note that we must have :math:`kH + (output\_ratio\_H * H_{in}) - 1 <= H_{in}`
906
+ and :math:`kW + (output\_ratio\_W * W_{in}) - 1 <= W_{in}`
907
+ return_indices: if ``True``, will return the indices along with the outputs.
908
+ Useful to pass to :meth:`nn.MaxUnpool2d`. Default: ``False``
909
+
910
+ Shape:
911
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
912
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
913
+ :math:`(H_{out}, W_{out})=\text{output\_size}` or
914
+ :math:`(H_{out}, W_{out})=\text{output\_ratio} \times (H_{in}, W_{in})`.
915
+
916
+ Examples:
917
+ >>> # pool of square window of size=3, and target output size 13x12
918
+ >>> m = nn.FractionalMaxPool2d(3, output_size=(13, 12))
919
+ >>> # pool of square window and target output size being half of input image size
920
+ >>> m = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
921
+ >>> input = torch.randn(20, 16, 50, 32)
922
+ >>> output = m(input)
923
+
924
+ .. _Fractional MaxPooling:
925
+ https://arxiv.org/abs/1412.6071
926
+ """
927
+
928
+ __constants__ = ["kernel_size", "return_indices", "output_size", "output_ratio"]
929
+
930
+ kernel_size: _size_2_t
931
+ return_indices: bool
932
+ output_size: _size_2_t
933
+ output_ratio: _ratio_2_t
934
+
935
+ def __init__(
936
+ self,
937
+ kernel_size: _size_2_t,
938
+ output_size: Optional[_size_2_t] = None,
939
+ output_ratio: Optional[_ratio_2_t] = None,
940
+ return_indices: bool = False,
941
+ _random_samples=None,
942
+ ) -> None:
943
+ super().__init__()
944
+ self.kernel_size = _pair(kernel_size)
945
+ self.return_indices = return_indices
946
+ self.register_buffer("_random_samples", _random_samples)
947
+ self.output_size = _pair(output_size) if output_size is not None else None
948
+ self.output_ratio = _pair(output_ratio) if output_ratio is not None else None
949
+ if output_size is None and output_ratio is None:
950
+ raise ValueError(
951
+ "FractionalMaxPool2d requires specifying either "
952
+ "an output size, or a pooling ratio"
953
+ )
954
+ if output_size is not None and output_ratio is not None:
955
+ raise ValueError(
956
+ "only one of output_size and output_ratio may be specified"
957
+ )
958
+ if self.output_ratio is not None:
959
+ if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1):
960
+ raise ValueError(
961
+ f"output_ratio must be between 0 and 1 (got {output_ratio})"
962
+ )
963
+
964
+ def forward(self, input: Tensor):
965
+ return F.fractional_max_pool2d(
966
+ input,
967
+ self.kernel_size,
968
+ self.output_size,
969
+ self.output_ratio,
970
+ self.return_indices,
971
+ _random_samples=self._random_samples,
972
+ )
973
+
974
+
975
+ class FractionalMaxPool3d(Module):
976
+ r"""Applies a 3D fractional max pooling over an input signal composed of several input planes.
977
+
978
+ Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
979
+
980
+ The max-pooling operation is applied in :math:`kT \times kH \times kW` regions by a stochastic
981
+ step size determined by the target output size.
982
+ The number of output features is equal to the number of input planes.
983
+
984
+ .. note:: Exactly one of ``output_size`` or ``output_ratio`` must be defined.
985
+
986
+ Args:
987
+ kernel_size: the size of the window to take a max over.
988
+ Can be a single number k (for a square kernel of k x k x k) or a tuple `(kt x kh x kw)`
989
+ output_size: the target output size of the image of the form `oT x oH x oW`.
990
+ Can be a tuple `(oT, oH, oW)` or a single number oH for a square image `oH x oH x oH`
991
+ output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
992
+ This has to be a number or tuple in the range (0, 1)
993
+ return_indices: if ``True``, will return the indices along with the outputs.
994
+ Useful to pass to :meth:`nn.MaxUnpool3d`. Default: ``False``
995
+
996
+ Shape:
997
+ - Input: :math:`(N, C, T_{in}, H_{in}, W_{in})` or :math:`(C, T_{in}, H_{in}, W_{in})`.
998
+ - Output: :math:`(N, C, T_{out}, H_{out}, W_{out})` or :math:`(C, T_{out}, H_{out}, W_{out})`, where
999
+ :math:`(T_{out}, H_{out}, W_{out})=\text{output\_size}` or
1000
+ :math:`(T_{out}, H_{out}, W_{out})=\text{output\_ratio} \times (T_{in}, H_{in}, W_{in})`
1001
+
1002
+ Examples:
1003
+ >>> # pool of cubic window of size=3, and target output size 13x12x11
1004
+ >>> m = nn.FractionalMaxPool3d(3, output_size=(13, 12, 11))
1005
+ >>> # pool of cubic window and target output size being half of input size
1006
+ >>> m = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5))
1007
+ >>> input = torch.randn(20, 16, 50, 32, 16)
1008
+ >>> output = m(input)
1009
+
1010
+ .. _Fractional MaxPooling:
1011
+ https://arxiv.org/abs/1412.6071
1012
+ """
1013
+
1014
+ __constants__ = ["kernel_size", "return_indices", "output_size", "output_ratio"]
1015
+ kernel_size: _size_3_t
1016
+ return_indices: bool
1017
+ output_size: _size_3_t
1018
+ output_ratio: _ratio_3_t
1019
+
1020
+ def __init__(
1021
+ self,
1022
+ kernel_size: _size_3_t,
1023
+ output_size: Optional[_size_3_t] = None,
1024
+ output_ratio: Optional[_ratio_3_t] = None,
1025
+ return_indices: bool = False,
1026
+ _random_samples=None,
1027
+ ) -> None:
1028
+ super().__init__()
1029
+ self.kernel_size = _triple(kernel_size)
1030
+ self.return_indices = return_indices
1031
+ self.register_buffer("_random_samples", _random_samples)
1032
+ self.output_size = _triple(output_size) if output_size is not None else None
1033
+ self.output_ratio = _triple(output_ratio) if output_ratio is not None else None
1034
+ if output_size is None and output_ratio is None:
1035
+ raise ValueError(
1036
+ "FractionalMaxPool3d requires specifying either "
1037
+ "an output size, or a pooling ratio"
1038
+ )
1039
+ if output_size is not None and output_ratio is not None:
1040
+ raise ValueError(
1041
+ "only one of output_size and output_ratio may be specified"
1042
+ )
1043
+ if self.output_ratio is not None:
1044
+ if not (
1045
+ 0 < self.output_ratio[0] < 1
1046
+ and 0 < self.output_ratio[1] < 1
1047
+ and 0 < self.output_ratio[2] < 1
1048
+ ):
1049
+ raise ValueError(
1050
+ f"output_ratio must be between 0 and 1 (got {output_ratio})"
1051
+ )
1052
+
1053
+ def forward(self, input: Tensor):
1054
+ return F.fractional_max_pool3d(
1055
+ input,
1056
+ self.kernel_size,
1057
+ self.output_size,
1058
+ self.output_ratio,
1059
+ self.return_indices,
1060
+ _random_samples=self._random_samples,
1061
+ )
1062
+
1063
+
1064
+ class _LPPoolNd(Module):
1065
+ __constants__ = ["norm_type", "kernel_size", "stride", "ceil_mode"]
1066
+
1067
+ norm_type: float
1068
+ ceil_mode: bool
1069
+
1070
+ def __init__(
1071
+ self,
1072
+ norm_type: float,
1073
+ kernel_size: _size_any_t,
1074
+ stride: Optional[_size_any_t] = None,
1075
+ ceil_mode: bool = False,
1076
+ ) -> None:
1077
+ super().__init__()
1078
+ self.norm_type = norm_type
1079
+ self.kernel_size = kernel_size
1080
+ self.stride = stride
1081
+ self.ceil_mode = ceil_mode
1082
+
1083
+ def extra_repr(self) -> str:
1084
+ return (
1085
+ "norm_type={norm_type}, kernel_size={kernel_size}, stride={stride}, "
1086
+ "ceil_mode={ceil_mode}".format(**self.__dict__)
1087
+ )
1088
+
1089
+
1090
+ class LPPool1d(_LPPoolNd):
1091
+ r"""Applies a 1D power-average pooling over an input signal composed of several input planes.
1092
+
1093
+ On each window, the function computed is:
1094
+
1095
+ .. math::
1096
+ f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
1097
+
1098
+ - At p = :math:`\infty`, one gets Max Pooling
1099
+ - At p = 1, one gets Sum Pooling (which is proportional to Average Pooling)
1100
+
1101
+ .. note:: If the sum to the power of `p` is zero, the gradient of this function is
1102
+ not defined. This implementation will set the gradient to zero in this case.
1103
+
1104
+ Args:
1105
+ kernel_size: a single int, the size of the window
1106
+ stride: a single int, the stride of the window. Default value is :attr:`kernel_size`
1107
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
1108
+
1109
+ Shape:
1110
+ - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
1111
+ - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
1112
+
1113
+ .. math::
1114
+ L_{out} = \left\lfloor\frac{L_{in} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
1115
+
1116
+ Examples::
1117
+ >>> # power-2 pool of window of length 3, with stride 2.
1118
+ >>> m = nn.LPPool1d(2, 3, stride=2)
1119
+ >>> input = torch.randn(20, 16, 50)
1120
+ >>> output = m(input)
1121
+ """
1122
+
1123
+ kernel_size: _size_1_t
1124
+ stride: _size_1_t
1125
+
1126
+ def forward(self, input: Tensor) -> Tensor:
1127
+ return F.lp_pool1d(
1128
+ input, float(self.norm_type), self.kernel_size, self.stride, self.ceil_mode
1129
+ )
1130
+
1131
+
1132
+ class LPPool2d(_LPPoolNd):
1133
+ r"""Applies a 2D power-average pooling over an input signal composed of several input planes.
1134
+
1135
+ On each window, the function computed is:
1136
+
1137
+ .. math::
1138
+ f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
1139
+
1140
+ - At p = :math:`\infty`, one gets Max Pooling
1141
+ - At p = 1, one gets Sum Pooling (which is proportional to average pooling)
1142
+
1143
+ The parameters :attr:`kernel_size`, :attr:`stride` can either be:
1144
+
1145
+ - a single ``int`` -- in which case the same value is used for the height and width dimension
1146
+ - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
1147
+ and the second `int` for the width dimension
1148
+
1149
+ .. note:: If the sum to the power of `p` is zero, the gradient of this function is
1150
+ not defined. This implementation will set the gradient to zero in this case.
1151
+
1152
+ Args:
1153
+ kernel_size: the size of the window
1154
+ stride: the stride of the window. Default value is :attr:`kernel_size`
1155
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
1156
+
1157
+ Shape:
1158
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
1159
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
1160
+
1161
+ .. math::
1162
+ H_{out} = \left\lfloor\frac{H_{in} - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
1163
+
1164
+ .. math::
1165
+ W_{out} = \left\lfloor\frac{W_{in} - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
1166
+
1167
+ Examples::
1168
+
1169
+ >>> # power-2 pool of square window of size=3, stride=2
1170
+ >>> m = nn.LPPool2d(2, 3, stride=2)
1171
+ >>> # pool of non-square window of power 1.2
1172
+ >>> m = nn.LPPool2d(1.2, (3, 2), stride=(2, 1))
1173
+ >>> input = torch.randn(20, 16, 50, 32)
1174
+ >>> output = m(input)
1175
+
1176
+ """
1177
+
1178
+ kernel_size: _size_2_t
1179
+ stride: _size_2_t
1180
+
1181
+ def forward(self, input: Tensor) -> Tensor:
1182
+ return F.lp_pool2d(
1183
+ input, float(self.norm_type), self.kernel_size, self.stride, self.ceil_mode
1184
+ )
1185
+
1186
+
1187
+ class LPPool3d(_LPPoolNd):
1188
+ r"""Applies a 3D power-average pooling over an input signal composed of several input planes.
1189
+
1190
+ On each window, the function computed is:
1191
+
1192
+ .. math::
1193
+ f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
1194
+
1195
+ - At p = :math:`\infty`, one gets Max Pooling
1196
+ - At p = 1, one gets Sum Pooling (which is proportional to average pooling)
1197
+
1198
+ The parameters :attr:`kernel_size`, :attr:`stride` can either be:
1199
+
1200
+ - a single ``int`` -- in which case the same value is used for the height, width and depth dimension
1201
+ - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
1202
+ the second `int` for the height dimension and the third `int` for the width dimension
1203
+
1204
+ .. note:: If the sum to the power of `p` is zero, the gradient of this function is
1205
+ not defined. This implementation will set the gradient to zero in this case.
1206
+
1207
+ Args:
1208
+ kernel_size: the size of the window
1209
+ stride: the stride of the window. Default value is :attr:`kernel_size`
1210
+ ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
1211
+
1212
+ Shape:
1213
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
1214
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
1215
+ :math:`(C, D_{out}, H_{out}, W_{out})`, where
1216
+
1217
+ .. math::
1218
+ D_{out} = \left\lfloor\frac{D_{in} - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
1219
+
1220
+ .. math::
1221
+ H_{out} = \left\lfloor\frac{H_{in} - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
1222
+
1223
+ .. math::
1224
+ W_{out} = \left\lfloor\frac{W_{in} - \text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor
1225
+
1226
+ Examples::
1227
+
1228
+ >>> # power-2 pool of square window of size=3, stride=2
1229
+ >>> m = nn.LPPool3d(2, 3, stride=2)
1230
+ >>> # pool of non-square window of power 1.2
1231
+ >>> m = nn.LPPool3d(1.2, (3, 2, 2), stride=(2, 1, 2))
1232
+ >>> input = torch.randn(20, 16, 50, 44, 31)
1233
+ >>> output = m(input)
1234
+
1235
+ """
1236
+
1237
+ kernel_size: _size_3_t
1238
+ stride: _size_3_t
1239
+
1240
+ def forward(self, input: Tensor) -> Tensor:
1241
+ return F.lp_pool3d(
1242
+ input, float(self.norm_type), self.kernel_size, self.stride, self.ceil_mode
1243
+ )
1244
+
1245
+
1246
+ class _AdaptiveMaxPoolNd(Module):
1247
+ __constants__ = ["output_size", "return_indices"]
1248
+ return_indices: bool
1249
+
1250
+ def __init__(
1251
+ self, output_size: _size_any_opt_t, return_indices: bool = False
1252
+ ) -> None:
1253
+ super().__init__()
1254
+ self.output_size = output_size
1255
+ self.return_indices = return_indices
1256
+
1257
+ def extra_repr(self) -> str:
1258
+ return f"output_size={self.output_size}"
1259
+
1260
+
1261
+ # FIXME (by @ssnl): Improve adaptive pooling docs: specify what the input and
1262
+ # output shapes are, and how the operation computes output.
1263
+
1264
+
1265
+ class AdaptiveMaxPool1d(_AdaptiveMaxPoolNd):
1266
+ r"""Applies a 1D adaptive max pooling over an input signal composed of several input planes.
1267
+
1268
+ The output size is :math:`L_{out}`, for any input size.
1269
+ The number of output features is equal to the number of input planes.
1270
+
1271
+ Args:
1272
+ output_size: the target output size :math:`L_{out}`.
1273
+ return_indices: if ``True``, will return the indices along with the outputs.
1274
+ Useful to pass to nn.MaxUnpool1d. Default: ``False``
1275
+
1276
+ Shape:
1277
+ - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
1278
+ - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
1279
+ :math:`L_{out}=\text{output\_size}`.
1280
+
1281
+ Examples:
1282
+ >>> # target output size of 5
1283
+ >>> m = nn.AdaptiveMaxPool1d(5)
1284
+ >>> input = torch.randn(1, 64, 8)
1285
+ >>> output = m(input)
1286
+
1287
+ """
1288
+
1289
+ output_size: _size_1_t
1290
+
1291
+ def forward(self, input: Tensor):
1292
+ return F.adaptive_max_pool1d(input, self.output_size, self.return_indices)
1293
+
1294
+
1295
+ class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd):
1296
+ r"""Applies a 2D adaptive max pooling over an input signal composed of several input planes.
1297
+
1298
+ The output is of size :math:`H_{out} \times W_{out}`, for any input size.
1299
+ The number of output features is equal to the number of input planes.
1300
+
1301
+ Args:
1302
+ output_size: the target output size of the image of the form :math:`H_{out} \times W_{out}`.
1303
+ Can be a tuple :math:`(H_{out}, W_{out})` or a single :math:`H_{out}` for a
1304
+ square image :math:`H_{out} \times H_{out}`. :math:`H_{out}` and :math:`W_{out}`
1305
+ can be either a ``int``, or ``None`` which means the size will be the same as that
1306
+ of the input.
1307
+ return_indices: if ``True``, will return the indices along with the outputs.
1308
+ Useful to pass to nn.MaxUnpool2d. Default: ``False``
1309
+
1310
+ Shape:
1311
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
1312
+ - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
1313
+ :math:`(H_{out}, W_{out})=\text{output\_size}`.
1314
+
1315
+ Examples:
1316
+ >>> # target output size of 5x7
1317
+ >>> m = nn.AdaptiveMaxPool2d((5, 7))
1318
+ >>> input = torch.randn(1, 64, 8, 9)
1319
+ >>> output = m(input)
1320
+ >>> # target output size of 7x7 (square)
1321
+ >>> m = nn.AdaptiveMaxPool2d(7)
1322
+ >>> input = torch.randn(1, 64, 10, 9)
1323
+ >>> output = m(input)
1324
+ >>> # target output size of 10x7
1325
+ >>> m = nn.AdaptiveMaxPool2d((None, 7))
1326
+ >>> input = torch.randn(1, 64, 10, 9)
1327
+ >>> output = m(input)
1328
+
1329
+ """
1330
+
1331
+ output_size: _size_2_opt_t
1332
+
1333
+ def forward(self, input: Tensor):
1334
+ return F.adaptive_max_pool2d(input, self.output_size, self.return_indices)
1335
+
1336
+
1337
+ class AdaptiveMaxPool3d(_AdaptiveMaxPoolNd):
1338
+ r"""Applies a 3D adaptive max pooling over an input signal composed of several input planes.
1339
+
1340
+ The output is of size :math:`D_{out} \times H_{out} \times W_{out}`, for any input size.
1341
+ The number of output features is equal to the number of input planes.
1342
+
1343
+ Args:
1344
+ output_size: the target output size of the image of the form :math:`D_{out} \times H_{out} \times W_{out}`.
1345
+ Can be a tuple :math:`(D_{out}, H_{out}, W_{out})` or a single
1346
+ :math:`D_{out}` for a cube :math:`D_{out} \times D_{out} \times D_{out}`.
1347
+ :math:`D_{out}`, :math:`H_{out}` and :math:`W_{out}` can be either a
1348
+ ``int``, or ``None`` which means the size will be the same as that of the input.
1349
+
1350
+ return_indices: if ``True``, will return the indices along with the outputs.
1351
+ Useful to pass to nn.MaxUnpool3d. Default: ``False``
1352
+
1353
+ Shape:
1354
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
1355
+ - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
1356
+ where :math:`(D_{out}, H_{out}, W_{out})=\text{output\_size}`.
1357
+
1358
+ Examples:
1359
+ >>> # target output size of 5x7x9
1360
+ >>> m = nn.AdaptiveMaxPool3d((5, 7, 9))
1361
+ >>> input = torch.randn(1, 64, 8, 9, 10)
1362
+ >>> output = m(input)
1363
+ >>> # target output size of 7x7x7 (cube)
1364
+ >>> m = nn.AdaptiveMaxPool3d(7)
1365
+ >>> input = torch.randn(1, 64, 10, 9, 8)
1366
+ >>> output = m(input)
1367
+ >>> # target output size of 7x9x8
1368
+ >>> m = nn.AdaptiveMaxPool3d((7, None, None))
1369
+ >>> input = torch.randn(1, 64, 10, 9, 8)
1370
+ >>> output = m(input)
1371
+
1372
+ """
1373
+
1374
+ output_size: _size_3_opt_t
1375
+
1376
+ def forward(self, input: Tensor):
1377
+ return F.adaptive_max_pool3d(input, self.output_size, self.return_indices)
1378
+
1379
+
1380
+ class _AdaptiveAvgPoolNd(Module):
1381
+ __constants__ = ["output_size"]
1382
+
1383
+ def __init__(self, output_size: _size_any_opt_t) -> None:
1384
+ super().__init__()
1385
+ self.output_size = output_size
1386
+
1387
+ def extra_repr(self) -> str:
1388
+ return f"output_size={self.output_size}"
1389
+
1390
+
1391
+ class AdaptiveAvgPool1d(_AdaptiveAvgPoolNd):
1392
+ r"""Applies a 1D adaptive average pooling over an input signal composed of several input planes.
1393
+
1394
+ The output size is :math:`L_{out}`, for any input size.
1395
+ The number of output features is equal to the number of input planes.
1396
+
1397
+ Args:
1398
+ output_size: the target output size :math:`L_{out}`.
1399
+
1400
+ Shape:
1401
+ - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
1402
+ - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
1403
+ :math:`L_{out}=\text{output\_size}`.
1404
+
1405
+ Examples:
1406
+ >>> # target output size of 5
1407
+ >>> m = nn.AdaptiveAvgPool1d(5)
1408
+ >>> input = torch.randn(1, 64, 8)
1409
+ >>> output = m(input)
1410
+
1411
+ """
1412
+
1413
+ output_size: _size_1_t
1414
+
1415
+ def forward(self, input: Tensor) -> Tensor:
1416
+ return F.adaptive_avg_pool1d(input, self.output_size)
1417
+
1418
+
1419
+ class AdaptiveAvgPool2d(_AdaptiveAvgPoolNd):
1420
+ r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes.
1421
+
1422
+ The output is of size H x W, for any input size.
1423
+ The number of output features is equal to the number of input planes.
1424
+
1425
+ Args:
1426
+ output_size: the target output size of the image of the form H x W.
1427
+ Can be a tuple (H, W) or a single H for a square image H x H.
1428
+ H and W can be either a ``int``, or ``None`` which means the size will
1429
+ be the same as that of the input.
1430
+
1431
+ Shape:
1432
+ - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
1433
+ - Output: :math:`(N, C, S_{0}, S_{1})` or :math:`(C, S_{0}, S_{1})`, where
1434
+ :math:`S=\text{output\_size}`.
1435
+
1436
+ Examples:
1437
+ >>> # target output size of 5x7
1438
+ >>> m = nn.AdaptiveAvgPool2d((5, 7))
1439
+ >>> input = torch.randn(1, 64, 8, 9)
1440
+ >>> output = m(input)
1441
+ >>> # target output size of 7x7 (square)
1442
+ >>> m = nn.AdaptiveAvgPool2d(7)
1443
+ >>> input = torch.randn(1, 64, 10, 9)
1444
+ >>> output = m(input)
1445
+ >>> # target output size of 10x7
1446
+ >>> m = nn.AdaptiveAvgPool2d((None, 7))
1447
+ >>> input = torch.randn(1, 64, 10, 9)
1448
+ >>> output = m(input)
1449
+
1450
+ """
1451
+
1452
+ output_size: _size_2_opt_t
1453
+
1454
+ def forward(self, input: Tensor) -> Tensor:
1455
+ return F.adaptive_avg_pool2d(input, self.output_size)
1456
+
1457
+
1458
+ class AdaptiveAvgPool3d(_AdaptiveAvgPoolNd):
1459
+ r"""Applies a 3D adaptive average pooling over an input signal composed of several input planes.
1460
+
1461
+ The output is of size D x H x W, for any input size.
1462
+ The number of output features is equal to the number of input planes.
1463
+
1464
+ Args:
1465
+ output_size: the target output size of the form D x H x W.
1466
+ Can be a tuple (D, H, W) or a single number D for a cube D x D x D.
1467
+ D, H and W can be either a ``int``, or ``None`` which means the size will
1468
+ be the same as that of the input.
1469
+
1470
+ Shape:
1471
+ - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
1472
+ - Output: :math:`(N, C, S_{0}, S_{1}, S_{2})` or :math:`(C, S_{0}, S_{1}, S_{2})`,
1473
+ where :math:`S=\text{output\_size}`.
1474
+
1475
+ Examples:
1476
+ >>> # target output size of 5x7x9
1477
+ >>> m = nn.AdaptiveAvgPool3d((5, 7, 9))
1478
+ >>> input = torch.randn(1, 64, 8, 9, 10)
1479
+ >>> output = m(input)
1480
+ >>> # target output size of 7x7x7 (cube)
1481
+ >>> m = nn.AdaptiveAvgPool3d(7)
1482
+ >>> input = torch.randn(1, 64, 10, 9, 8)
1483
+ >>> output = m(input)
1484
+ >>> # target output size of 7x9x8
1485
+ >>> m = nn.AdaptiveAvgPool3d((7, None, None))
1486
+ >>> input = torch.randn(1, 64, 10, 9, 8)
1487
+ >>> output = m(input)
1488
+
1489
+ """
1490
+
1491
+ output_size: _size_3_opt_t
1492
+
1493
+ def forward(self, input: Tensor) -> Tensor:
1494
+ return F.adaptive_avg_pool3d(input, self.output_size)
janus/lib/python3.10/site-packages/torch/nn/modules/rnn.py ADDED
@@ -0,0 +1,1824 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-decorators
2
+ # mypy: allow-untyped-defs
3
+ import math
4
+ import numbers
5
+ import warnings
6
+ import weakref
7
+ from typing import List, Optional, overload, Tuple
8
+ from typing_extensions import deprecated
9
+
10
+ import torch
11
+ from torch import _VF, Tensor
12
+ from torch.nn import init
13
+ from torch.nn.parameter import Parameter
14
+ from torch.nn.utils.rnn import PackedSequence
15
+
16
+ from .module import Module
17
+
18
+
19
+ __all__ = [
20
+ "RNNBase",
21
+ "RNN",
22
+ "LSTM",
23
+ "GRU",
24
+ "RNNCellBase",
25
+ "RNNCell",
26
+ "LSTMCell",
27
+ "GRUCell",
28
+ ]
29
+
30
+ _rnn_impls = {
31
+ "RNN_TANH": _VF.rnn_tanh,
32
+ "RNN_RELU": _VF.rnn_relu,
33
+ }
34
+
35
+
36
+ def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
37
+ return tensor.index_select(dim, permutation)
38
+
39
+
40
+ @deprecated(
41
+ "`apply_permutation` is deprecated, please use `tensor.index_select(dim, permutation)` instead",
42
+ category=FutureWarning,
43
+ )
44
+ def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
45
+ return _apply_permutation(tensor, permutation, dim)
46
+
47
+
48
+ class RNNBase(Module):
49
+ r"""Base class for RNN modules (RNN, LSTM, GRU).
50
+
51
+ Implements aspects of RNNs shared by the RNN, LSTM, and GRU classes, such as module initialization
52
+ and utility methods for parameter storage management.
53
+
54
+ .. note::
55
+ The forward method is not implemented by the RNNBase class.
56
+
57
+ .. note::
58
+ LSTM and GRU classes override some methods implemented by RNNBase.
59
+ """
60
+
61
+ __constants__ = [
62
+ "mode",
63
+ "input_size",
64
+ "hidden_size",
65
+ "num_layers",
66
+ "bias",
67
+ "batch_first",
68
+ "dropout",
69
+ "bidirectional",
70
+ "proj_size",
71
+ ]
72
+ __jit_unused_properties__ = ["all_weights"]
73
+
74
+ mode: str
75
+ input_size: int
76
+ hidden_size: int
77
+ num_layers: int
78
+ bias: bool
79
+ batch_first: bool
80
+ dropout: float
81
+ bidirectional: bool
82
+ proj_size: int
83
+
84
+ def __init__(
85
+ self,
86
+ mode: str,
87
+ input_size: int,
88
+ hidden_size: int,
89
+ num_layers: int = 1,
90
+ bias: bool = True,
91
+ batch_first: bool = False,
92
+ dropout: float = 0.0,
93
+ bidirectional: bool = False,
94
+ proj_size: int = 0,
95
+ device=None,
96
+ dtype=None,
97
+ ) -> None:
98
+ factory_kwargs = {"device": device, "dtype": dtype}
99
+ super().__init__()
100
+ self.mode = mode
101
+ self.input_size = input_size
102
+ self.hidden_size = hidden_size
103
+ self.num_layers = num_layers
104
+ self.bias = bias
105
+ self.batch_first = batch_first
106
+ self.dropout = float(dropout)
107
+ self.bidirectional = bidirectional
108
+ self.proj_size = proj_size
109
+ self._flat_weight_refs: List[Optional[weakref.ReferenceType[Parameter]]] = []
110
+ num_directions = 2 if bidirectional else 1
111
+
112
+ if (
113
+ not isinstance(dropout, numbers.Number)
114
+ or not 0 <= dropout <= 1
115
+ or isinstance(dropout, bool)
116
+ ):
117
+ raise ValueError(
118
+ "dropout should be a number in range [0, 1] "
119
+ "representing the probability of an element being "
120
+ "zeroed"
121
+ )
122
+ if dropout > 0 and num_layers == 1:
123
+ warnings.warn(
124
+ "dropout option adds dropout after all but last "
125
+ "recurrent layer, so non-zero dropout expects "
126
+ f"num_layers greater than 1, but got dropout={dropout} and "
127
+ f"num_layers={num_layers}"
128
+ )
129
+
130
+ if not isinstance(hidden_size, int):
131
+ raise TypeError(
132
+ f"hidden_size should be of type int, got: {type(hidden_size).__name__}"
133
+ )
134
+ if hidden_size <= 0:
135
+ raise ValueError("hidden_size must be greater than zero")
136
+ if num_layers <= 0:
137
+ raise ValueError("num_layers must be greater than zero")
138
+ if proj_size < 0:
139
+ raise ValueError(
140
+ "proj_size should be a positive integer or zero to disable projections"
141
+ )
142
+ if proj_size >= hidden_size:
143
+ raise ValueError("proj_size has to be smaller than hidden_size")
144
+
145
+ if mode == "LSTM":
146
+ gate_size = 4 * hidden_size
147
+ elif mode == "GRU":
148
+ gate_size = 3 * hidden_size
149
+ elif mode == "RNN_TANH":
150
+ gate_size = hidden_size
151
+ elif mode == "RNN_RELU":
152
+ gate_size = hidden_size
153
+ else:
154
+ raise ValueError("Unrecognized RNN mode: " + mode)
155
+
156
+ self._flat_weights_names = []
157
+ self._all_weights = []
158
+ for layer in range(num_layers):
159
+ for direction in range(num_directions):
160
+ real_hidden_size = proj_size if proj_size > 0 else hidden_size
161
+ layer_input_size = (
162
+ input_size if layer == 0 else real_hidden_size * num_directions
163
+ )
164
+
165
+ w_ih = Parameter(
166
+ torch.empty((gate_size, layer_input_size), **factory_kwargs)
167
+ )
168
+ w_hh = Parameter(
169
+ torch.empty((gate_size, real_hidden_size), **factory_kwargs)
170
+ )
171
+ b_ih = Parameter(torch.empty(gate_size, **factory_kwargs))
172
+ # Second bias vector included for CuDNN compatibility. Only one
173
+ # bias vector is needed in standard definition.
174
+ b_hh = Parameter(torch.empty(gate_size, **factory_kwargs))
175
+ layer_params: Tuple[Tensor, ...] = ()
176
+ if self.proj_size == 0:
177
+ if bias:
178
+ layer_params = (w_ih, w_hh, b_ih, b_hh)
179
+ else:
180
+ layer_params = (w_ih, w_hh)
181
+ else:
182
+ w_hr = Parameter(
183
+ torch.empty((proj_size, hidden_size), **factory_kwargs)
184
+ )
185
+ if bias:
186
+ layer_params = (w_ih, w_hh, b_ih, b_hh, w_hr)
187
+ else:
188
+ layer_params = (w_ih, w_hh, w_hr)
189
+
190
+ suffix = "_reverse" if direction == 1 else ""
191
+ param_names = ["weight_ih_l{}{}", "weight_hh_l{}{}"]
192
+ if bias:
193
+ param_names += ["bias_ih_l{}{}", "bias_hh_l{}{}"]
194
+ if self.proj_size > 0:
195
+ param_names += ["weight_hr_l{}{}"]
196
+ param_names = [x.format(layer, suffix) for x in param_names]
197
+
198
+ for name, param in zip(param_names, layer_params):
199
+ setattr(self, name, param)
200
+ self._flat_weights_names.extend(param_names)
201
+ self._all_weights.append(param_names)
202
+
203
+ self._init_flat_weights()
204
+
205
+ self.reset_parameters()
206
+
207
+ def _init_flat_weights(self):
208
+ self._flat_weights = [
209
+ getattr(self, wn) if hasattr(self, wn) else None
210
+ for wn in self._flat_weights_names
211
+ ]
212
+ self._flat_weight_refs = [
213
+ weakref.ref(w) if w is not None else None for w in self._flat_weights
214
+ ]
215
+ self.flatten_parameters()
216
+
217
+ def __setattr__(self, attr, value):
218
+ if hasattr(self, "_flat_weights_names") and attr in self._flat_weights_names:
219
+ # keep self._flat_weights up to date if you do self.weight = ...
220
+ idx = self._flat_weights_names.index(attr)
221
+ self._flat_weights[idx] = value
222
+ super().__setattr__(attr, value)
223
+
224
+ def flatten_parameters(self) -> None:
225
+ """Reset parameter data pointer so that they can use faster code paths.
226
+
227
+ Right now, this works only if the module is on the GPU and cuDNN is enabled.
228
+ Otherwise, it's a no-op.
229
+ """
230
+ # Short-circuits if _flat_weights is only partially instantiated
231
+ if len(self._flat_weights) != len(self._flat_weights_names):
232
+ return
233
+
234
+ for w in self._flat_weights:
235
+ if not isinstance(w, Tensor):
236
+ return
237
+ # Short-circuits if any tensor in self._flat_weights is not acceptable to cuDNN
238
+ # or the tensors in _flat_weights are of different dtypes
239
+
240
+ first_fw = self._flat_weights[0]
241
+ dtype = first_fw.dtype
242
+ for fw in self._flat_weights:
243
+ if (
244
+ not isinstance(fw, Tensor)
245
+ or not (fw.dtype == dtype)
246
+ or not fw.is_cuda
247
+ or not torch.backends.cudnn.is_acceptable(fw)
248
+ ):
249
+ return
250
+
251
+ # If any parameters alias, we fall back to the slower, copying code path. This is
252
+ # a sufficient check, because overlapping parameter buffers that don't completely
253
+ # alias would break the assumptions of the uniqueness check in
254
+ # Module.named_parameters().
255
+ unique_data_ptrs = {p.data_ptr() for p in self._flat_weights}
256
+ if len(unique_data_ptrs) != len(self._flat_weights):
257
+ return
258
+
259
+ with torch.cuda.device_of(first_fw):
260
+ import torch.backends.cudnn.rnn as rnn
261
+
262
+ # Note: no_grad() is necessary since _cudnn_rnn_flatten_weight is
263
+ # an inplace operation on self._flat_weights
264
+ with torch.no_grad():
265
+ if torch._use_cudnn_rnn_flatten_weight():
266
+ num_weights = 4 if self.bias else 2
267
+ if self.proj_size > 0:
268
+ num_weights += 1
269
+ torch._cudnn_rnn_flatten_weight(
270
+ self._flat_weights,
271
+ num_weights,
272
+ self.input_size,
273
+ rnn.get_cudnn_mode(self.mode),
274
+ self.hidden_size,
275
+ self.proj_size,
276
+ self.num_layers,
277
+ self.batch_first,
278
+ bool(self.bidirectional),
279
+ )
280
+
281
+ def _apply(self, fn, recurse=True):
282
+ self._flat_weight_refs = []
283
+ ret = super()._apply(fn, recurse)
284
+
285
+ # Resets _flat_weights
286
+ # Note: be v. careful before removing this, as 3rd party device types
287
+ # likely rely on this behavior to properly .to() modules like LSTM.
288
+ self._init_flat_weights()
289
+
290
+ return ret
291
+
292
+ def reset_parameters(self) -> None:
293
+ stdv = 1.0 / math.sqrt(self.hidden_size) if self.hidden_size > 0 else 0
294
+ for weight in self.parameters():
295
+ init.uniform_(weight, -stdv, stdv)
296
+
297
+ def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None:
298
+ if not torch.jit.is_scripting():
299
+ if (
300
+ input.dtype != self._flat_weights[0].dtype
301
+ and not torch._C._is_any_autocast_enabled()
302
+ ):
303
+ raise ValueError(
304
+ f"input must have the type {self._flat_weights[0].dtype}, got type {input.dtype}"
305
+ )
306
+ expected_input_dim = 2 if batch_sizes is not None else 3
307
+ if input.dim() != expected_input_dim:
308
+ raise RuntimeError(
309
+ f"input must have {expected_input_dim} dimensions, got {input.dim()}"
310
+ )
311
+ if self.input_size != input.size(-1):
312
+ raise RuntimeError(
313
+ f"input.size(-1) must be equal to input_size. Expected {self.input_size}, got {input.size(-1)}"
314
+ )
315
+
316
+ def get_expected_hidden_size(
317
+ self, input: Tensor, batch_sizes: Optional[Tensor]
318
+ ) -> Tuple[int, int, int]:
319
+ if batch_sizes is not None:
320
+ mini_batch = int(batch_sizes[0])
321
+ else:
322
+ mini_batch = input.size(0) if self.batch_first else input.size(1)
323
+ num_directions = 2 if self.bidirectional else 1
324
+ if self.proj_size > 0:
325
+ expected_hidden_size = (
326
+ self.num_layers * num_directions,
327
+ mini_batch,
328
+ self.proj_size,
329
+ )
330
+ else:
331
+ expected_hidden_size = (
332
+ self.num_layers * num_directions,
333
+ mini_batch,
334
+ self.hidden_size,
335
+ )
336
+ return expected_hidden_size
337
+
338
+ def check_hidden_size(
339
+ self,
340
+ hx: Tensor,
341
+ expected_hidden_size: Tuple[int, int, int],
342
+ msg: str = "Expected hidden size {}, got {}",
343
+ ) -> None:
344
+ if hx.size() != expected_hidden_size:
345
+ raise RuntimeError(msg.format(expected_hidden_size, list(hx.size())))
346
+
347
+ def _weights_have_changed(self):
348
+ # Returns True if the weight tensors have changed since the last forward pass.
349
+ # This is the case when used with torch.func.functional_call(), for example.
350
+ weights_changed = False
351
+ for ref, name in zip(self._flat_weight_refs, self._flat_weights_names):
352
+ weight = getattr(self, name) if hasattr(self, name) else None
353
+ if weight is not None and ref is not None and ref() is not weight:
354
+ weights_changed = True
355
+ break
356
+ return weights_changed
357
+
358
+ def check_forward_args(
359
+ self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]
360
+ ):
361
+ self.check_input(input, batch_sizes)
362
+ expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
363
+
364
+ self.check_hidden_size(hidden, expected_hidden_size)
365
+
366
+ def permute_hidden(self, hx: Tensor, permutation: Optional[Tensor]):
367
+ if permutation is None:
368
+ return hx
369
+ return _apply_permutation(hx, permutation)
370
+
371
+ def extra_repr(self) -> str:
372
+ s = "{input_size}, {hidden_size}"
373
+ if self.proj_size != 0:
374
+ s += ", proj_size={proj_size}"
375
+ if self.num_layers != 1:
376
+ s += ", num_layers={num_layers}"
377
+ if self.bias is not True:
378
+ s += ", bias={bias}"
379
+ if self.batch_first is not False:
380
+ s += ", batch_first={batch_first}"
381
+ if self.dropout != 0:
382
+ s += ", dropout={dropout}"
383
+ if self.bidirectional is not False:
384
+ s += ", bidirectional={bidirectional}"
385
+ return s.format(**self.__dict__)
386
+
387
+ def _update_flat_weights(self):
388
+ if not torch.jit.is_scripting():
389
+ if self._weights_have_changed():
390
+ self._init_flat_weights()
391
+
392
+ def __getstate__(self):
393
+ # If weights have been changed, update the _flat_weights in __getstate__ here.
394
+ self._update_flat_weights()
395
+ # Don't serialize the weight references.
396
+ state = self.__dict__.copy()
397
+ del state["_flat_weight_refs"]
398
+ return state
399
+
400
+ def __setstate__(self, d):
401
+ super().__setstate__(d)
402
+ if "all_weights" in d:
403
+ self._all_weights = d["all_weights"]
404
+ # In PyTorch 1.8 we added a proj_size member variable to LSTM.
405
+ # LSTMs that were serialized via torch.save(module) before PyTorch 1.8
406
+ # don't have it, so to preserve compatibility we set proj_size here.
407
+ if "proj_size" not in d:
408
+ self.proj_size = 0
409
+
410
+ if not isinstance(self._all_weights[0][0], str):
411
+ num_layers = self.num_layers
412
+ num_directions = 2 if self.bidirectional else 1
413
+ self._flat_weights_names = []
414
+ self._all_weights = []
415
+ for layer in range(num_layers):
416
+ for direction in range(num_directions):
417
+ suffix = "_reverse" if direction == 1 else ""
418
+ weights = [
419
+ "weight_ih_l{}{}",
420
+ "weight_hh_l{}{}",
421
+ "bias_ih_l{}{}",
422
+ "bias_hh_l{}{}",
423
+ "weight_hr_l{}{}",
424
+ ]
425
+ weights = [x.format(layer, suffix) for x in weights]
426
+ if self.bias:
427
+ if self.proj_size > 0:
428
+ self._all_weights += [weights]
429
+ self._flat_weights_names.extend(weights)
430
+ else:
431
+ self._all_weights += [weights[:4]]
432
+ self._flat_weights_names.extend(weights[:4])
433
+ else:
434
+ if self.proj_size > 0:
435
+ self._all_weights += [weights[:2]] + [weights[-1:]]
436
+ self._flat_weights_names.extend(
437
+ weights[:2] + [weights[-1:]]
438
+ )
439
+ else:
440
+ self._all_weights += [weights[:2]]
441
+ self._flat_weights_names.extend(weights[:2])
442
+ self._flat_weights = [
443
+ getattr(self, wn) if hasattr(self, wn) else None
444
+ for wn in self._flat_weights_names
445
+ ]
446
+
447
+ self._flat_weight_refs = [
448
+ weakref.ref(w) if w is not None else None for w in self._flat_weights
449
+ ]
450
+
451
+ @property
452
+ def all_weights(self) -> List[List[Parameter]]:
453
+ return [
454
+ [getattr(self, weight) for weight in weights]
455
+ for weights in self._all_weights
456
+ ]
457
+
458
+ def _replicate_for_data_parallel(self):
459
+ replica = super()._replicate_for_data_parallel()
460
+ # Need to copy these caches, otherwise the replica will share the same
461
+ # flat weights list.
462
+ replica._flat_weights = replica._flat_weights[:]
463
+ replica._flat_weights_names = replica._flat_weights_names[:]
464
+ return replica
465
+
466
+
467
+ class RNN(RNNBase):
468
+ r"""__init__(input_size,hidden_size,num_layers=1,nonlinearity='tanh',bias=True,batch_first=False,dropout=0.0,bidirectional=False,device=None,dtype=None)
469
+
470
+ Apply a multi-layer Elman RNN with :math:`\tanh` or :math:`\text{ReLU}`
471
+ non-linearity to an input sequence. For each element in the input sequence,
472
+ each layer computes the following function:
473
+
474
+ .. math::
475
+ h_t = \tanh(x_t W_{ih}^T + b_{ih} + h_{t-1}W_{hh}^T + b_{hh})
476
+
477
+ where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is
478
+ the input at time `t`, and :math:`h_{(t-1)}` is the hidden state of the
479
+ previous layer at time `t-1` or the initial hidden state at time `0`.
480
+ If :attr:`nonlinearity` is ``'relu'``, then :math:`\text{ReLU}` is used instead of :math:`\tanh`.
481
+
482
+ .. code-block:: python
483
+
484
+ # Efficient implementation equivalent to the following with bidirectional=False
485
+ def forward(x, h_0=None):
486
+ if batch_first:
487
+ x = x.transpose(0, 1)
488
+ seq_len, batch_size, _ = x.size()
489
+ if h_0 is None:
490
+ h_0 = torch.zeros(num_layers, batch_size, hidden_size)
491
+ h_t_minus_1 = h_0
492
+ h_t = h_0
493
+ output = []
494
+ for t in range(seq_len):
495
+ for layer in range(num_layers):
496
+ h_t[layer] = torch.tanh(
497
+ x[t] @ weight_ih[layer].T
498
+ + bias_ih[layer]
499
+ + h_t_minus_1[layer] @ weight_hh[layer].T
500
+ + bias_hh[layer]
501
+ )
502
+ output.append(h_t[-1])
503
+ h_t_minus_1 = h_t
504
+ output = torch.stack(output)
505
+ if batch_first:
506
+ output = output.transpose(0, 1)
507
+ return output, h_t
508
+
509
+ Args:
510
+ input_size: The number of expected features in the input `x`
511
+ hidden_size: The number of features in the hidden state `h`
512
+ num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
513
+ would mean stacking two RNNs together to form a `stacked RNN`,
514
+ with the second RNN taking in outputs of the first RNN and
515
+ computing the final results. Default: 1
516
+ nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'``
517
+ bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
518
+ Default: ``True``
519
+ batch_first: If ``True``, then the input and output tensors are provided
520
+ as `(batch, seq, feature)` instead of `(seq, batch, feature)`.
521
+ Note that this does not apply to hidden or cell states. See the
522
+ Inputs/Outputs sections below for details. Default: ``False``
523
+ dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
524
+ RNN layer except the last layer, with dropout probability equal to
525
+ :attr:`dropout`. Default: 0
526
+ bidirectional: If ``True``, becomes a bidirectional RNN. Default: ``False``
527
+
528
+ Inputs: input, h_0
529
+ * **input**: tensor of shape :math:`(L, H_{in})` for unbatched input,
530
+ :math:`(L, N, H_{in})` when ``batch_first=False`` or
531
+ :math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of
532
+ the input sequence. The input can also be a packed variable length sequence.
533
+ See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
534
+ :func:`torch.nn.utils.rnn.pack_sequence` for details.
535
+ * **h_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
536
+ :math:`(D * \text{num\_layers}, N, H_{out})` containing the initial hidden
537
+ state for the input sequence batch. Defaults to zeros if not provided.
538
+
539
+ where:
540
+
541
+ .. math::
542
+ \begin{aligned}
543
+ N ={} & \text{batch size} \\
544
+ L ={} & \text{sequence length} \\
545
+ D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
546
+ H_{in} ={} & \text{input\_size} \\
547
+ H_{out} ={} & \text{hidden\_size}
548
+ \end{aligned}
549
+
550
+ Outputs: output, h_n
551
+ * **output**: tensor of shape :math:`(L, D * H_{out})` for unbatched input,
552
+ :math:`(L, N, D * H_{out})` when ``batch_first=False`` or
553
+ :math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features
554
+ `(h_t)` from the last layer of the RNN, for each `t`. If a
555
+ :class:`torch.nn.utils.rnn.PackedSequence` has been given as the input, the output
556
+ will also be a packed sequence.
557
+ * **h_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
558
+ :math:`(D * \text{num\_layers}, N, H_{out})` containing the final hidden state
559
+ for each element in the batch.
560
+
561
+ Attributes:
562
+ weight_ih_l[k]: the learnable input-hidden weights of the k-th layer,
563
+ of shape `(hidden_size, input_size)` for `k = 0`. Otherwise, the shape is
564
+ `(hidden_size, num_directions * hidden_size)`
565
+ weight_hh_l[k]: the learnable hidden-hidden weights of the k-th layer,
566
+ of shape `(hidden_size, hidden_size)`
567
+ bias_ih_l[k]: the learnable input-hidden bias of the k-th layer,
568
+ of shape `(hidden_size)`
569
+ bias_hh_l[k]: the learnable hidden-hidden bias of the k-th layer,
570
+ of shape `(hidden_size)`
571
+
572
+ .. note::
573
+ All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
574
+ where :math:`k = \frac{1}{\text{hidden\_size}}`
575
+
576
+ .. note::
577
+ For bidirectional RNNs, forward and backward are directions 0 and 1 respectively.
578
+ Example of splitting the output layers when ``batch_first=False``:
579
+ ``output.view(seq_len, batch, num_directions, hidden_size)``.
580
+
581
+ .. note::
582
+ ``batch_first`` argument is ignored for unbatched inputs.
583
+
584
+ .. include:: ../cudnn_rnn_determinism.rst
585
+
586
+ .. include:: ../cudnn_persistent_rnn.rst
587
+
588
+ Examples::
589
+
590
+ >>> rnn = nn.RNN(10, 20, 2)
591
+ >>> input = torch.randn(5, 3, 10)
592
+ >>> h0 = torch.randn(2, 3, 20)
593
+ >>> output, hn = rnn(input, h0)
594
+ """
595
+
596
+ @overload
597
+ def __init__(
598
+ self,
599
+ input_size: int,
600
+ hidden_size: int,
601
+ num_layers: int = 1,
602
+ nonlinearity: str = "tanh",
603
+ bias: bool = True,
604
+ batch_first: bool = False,
605
+ dropout: float = 0.0,
606
+ bidirectional: bool = False,
607
+ device=None,
608
+ dtype=None,
609
+ ) -> None:
610
+ ...
611
+
612
+ @overload
613
+ def __init__(self, *args, **kwargs):
614
+ ...
615
+
616
+ def __init__(self, *args, **kwargs):
617
+ if "proj_size" in kwargs:
618
+ raise ValueError(
619
+ "proj_size argument is only supported for LSTM, not RNN or GRU"
620
+ )
621
+ if len(args) > 3:
622
+ self.nonlinearity = args[3]
623
+ args = args[:3] + args[4:]
624
+ else:
625
+ self.nonlinearity = kwargs.pop("nonlinearity", "tanh")
626
+ if self.nonlinearity == "tanh":
627
+ mode = "RNN_TANH"
628
+ elif self.nonlinearity == "relu":
629
+ mode = "RNN_RELU"
630
+ else:
631
+ raise ValueError(
632
+ f"Unknown nonlinearity '{self.nonlinearity}'. Select from 'tanh' or 'relu'."
633
+ )
634
+ super().__init__(mode, *args, **kwargs)
635
+
636
+ @overload
637
+ @torch._jit_internal._overload_method # noqa: F811
638
+ def forward(
639
+ self, input: Tensor, hx: Optional[Tensor] = None
640
+ ) -> Tuple[Tensor, Tensor]:
641
+ pass
642
+
643
+ @overload
644
+ @torch._jit_internal._overload_method # noqa: F811
645
+ def forward(
646
+ self, input: PackedSequence, hx: Optional[Tensor] = None
647
+ ) -> Tuple[PackedSequence, Tensor]:
648
+ pass
649
+
650
+ def forward(self, input, hx=None): # noqa: F811
651
+ self._update_flat_weights()
652
+
653
+ num_directions = 2 if self.bidirectional else 1
654
+ orig_input = input
655
+
656
+ if isinstance(orig_input, PackedSequence):
657
+ input, batch_sizes, sorted_indices, unsorted_indices = input
658
+ max_batch_size = batch_sizes[0]
659
+ # script() is unhappy when max_batch_size is different type in cond branches, so we duplicate
660
+ if hx is None:
661
+ hx = torch.zeros(
662
+ self.num_layers * num_directions,
663
+ max_batch_size,
664
+ self.hidden_size,
665
+ dtype=input.dtype,
666
+ device=input.device,
667
+ )
668
+ else:
669
+ # Each batch of the hidden state should match the input sequence that
670
+ # the user believes he/she is passing in.
671
+ hx = self.permute_hidden(hx, sorted_indices)
672
+ else:
673
+ batch_sizes = None
674
+ if input.dim() not in (2, 3):
675
+ raise ValueError(
676
+ f"RNN: Expected input to be 2D or 3D, got {input.dim()}D tensor instead"
677
+ )
678
+ is_batched = input.dim() == 3
679
+ batch_dim = 0 if self.batch_first else 1
680
+ if not is_batched:
681
+ input = input.unsqueeze(batch_dim)
682
+ if hx is not None:
683
+ if hx.dim() != 2:
684
+ raise RuntimeError(
685
+ f"For unbatched 2-D input, hx should also be 2-D but got {hx.dim()}-D tensor"
686
+ )
687
+ hx = hx.unsqueeze(1)
688
+ else:
689
+ if hx is not None and hx.dim() != 3:
690
+ raise RuntimeError(
691
+ f"For batched 3-D input, hx should also be 3-D but got {hx.dim()}-D tensor"
692
+ )
693
+ max_batch_size = input.size(0) if self.batch_first else input.size(1)
694
+ sorted_indices = None
695
+ unsorted_indices = None
696
+ if hx is None:
697
+ hx = torch.zeros(
698
+ self.num_layers * num_directions,
699
+ max_batch_size,
700
+ self.hidden_size,
701
+ dtype=input.dtype,
702
+ device=input.device,
703
+ )
704
+ else:
705
+ # Each batch of the hidden state should match the input sequence that
706
+ # the user believes he/she is passing in.
707
+ hx = self.permute_hidden(hx, sorted_indices)
708
+
709
+ assert hx is not None
710
+ self.check_forward_args(input, hx, batch_sizes)
711
+ assert self.mode == "RNN_TANH" or self.mode == "RNN_RELU"
712
+ if batch_sizes is None:
713
+ if self.mode == "RNN_TANH":
714
+ result = _VF.rnn_tanh(
715
+ input,
716
+ hx,
717
+ self._flat_weights,
718
+ self.bias,
719
+ self.num_layers,
720
+ self.dropout,
721
+ self.training,
722
+ self.bidirectional,
723
+ self.batch_first,
724
+ )
725
+ else:
726
+ result = _VF.rnn_relu(
727
+ input,
728
+ hx,
729
+ self._flat_weights,
730
+ self.bias,
731
+ self.num_layers,
732
+ self.dropout,
733
+ self.training,
734
+ self.bidirectional,
735
+ self.batch_first,
736
+ )
737
+ else:
738
+ if self.mode == "RNN_TANH":
739
+ result = _VF.rnn_tanh(
740
+ input,
741
+ batch_sizes,
742
+ hx,
743
+ self._flat_weights,
744
+ self.bias,
745
+ self.num_layers,
746
+ self.dropout,
747
+ self.training,
748
+ self.bidirectional,
749
+ )
750
+ else:
751
+ result = _VF.rnn_relu(
752
+ input,
753
+ batch_sizes,
754
+ hx,
755
+ self._flat_weights,
756
+ self.bias,
757
+ self.num_layers,
758
+ self.dropout,
759
+ self.training,
760
+ self.bidirectional,
761
+ )
762
+
763
+ output = result[0]
764
+ hidden = result[1]
765
+
766
+ if isinstance(orig_input, PackedSequence):
767
+ output_packed = PackedSequence(
768
+ output, batch_sizes, sorted_indices, unsorted_indices
769
+ )
770
+ return output_packed, self.permute_hidden(hidden, unsorted_indices)
771
+
772
+ if not is_batched: # type: ignore[possibly-undefined]
773
+ output = output.squeeze(batch_dim) # type: ignore[possibly-undefined]
774
+ hidden = hidden.squeeze(1)
775
+
776
+ return output, self.permute_hidden(hidden, unsorted_indices)
777
+
778
+
779
+ # XXX: LSTM and GRU implementation is different from RNNBase, this is because:
780
+ # 1. we want to support nn.LSTM and nn.GRU in TorchScript and TorchScript in
781
+ # its current state could not support the python Union Type or Any Type
782
+ # 2. TorchScript static typing does not allow a Function or Callable type in
783
+ # Dict values, so we have to separately call _VF instead of using _rnn_impls
784
+ # 3. This is temporary only and in the transition state that we want to make it
785
+ # on time for the release
786
+ #
787
+ # More discussion details in https://github.com/pytorch/pytorch/pull/23266
788
+ #
789
+ # TODO: remove the overriding implementations for LSTM and GRU when TorchScript
790
+ # support expressing these two modules generally.
791
+
792
+
793
+ class LSTM(RNNBase):
794
+ r"""__init__(input_size,hidden_size,num_layers=1,bias=True,batch_first=False,dropout=0.0,bidirectional=False,proj_size=0,device=None,dtype=None)
795
+
796
+ Apply a multi-layer long short-term memory (LSTM) RNN to an input sequence.
797
+ For each element in the input sequence, each layer computes the following
798
+ function:
799
+
800
+ .. math::
801
+ \begin{array}{ll} \\
802
+ i_t = \sigma(W_{ii} x_t + b_{ii} + W_{hi} h_{t-1} + b_{hi}) \\
803
+ f_t = \sigma(W_{if} x_t + b_{if} + W_{hf} h_{t-1} + b_{hf}) \\
804
+ g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hg} h_{t-1} + b_{hg}) \\
805
+ o_t = \sigma(W_{io} x_t + b_{io} + W_{ho} h_{t-1} + b_{ho}) \\
806
+ c_t = f_t \odot c_{t-1} + i_t \odot g_t \\
807
+ h_t = o_t \odot \tanh(c_t) \\
808
+ \end{array}
809
+
810
+ where :math:`h_t` is the hidden state at time `t`, :math:`c_t` is the cell
811
+ state at time `t`, :math:`x_t` is the input at time `t`, :math:`h_{t-1}`
812
+ is the hidden state of the layer at time `t-1` or the initial hidden
813
+ state at time `0`, and :math:`i_t`, :math:`f_t`, :math:`g_t`,
814
+ :math:`o_t` are the input, forget, cell, and output gates, respectively.
815
+ :math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product.
816
+
817
+ In a multilayer LSTM, the input :math:`x^{(l)}_t` of the :math:`l` -th layer
818
+ (:math:`l \ge 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by
819
+ dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random
820
+ variable which is :math:`0` with probability :attr:`dropout`.
821
+
822
+ If ``proj_size > 0`` is specified, LSTM with projections will be used. This changes
823
+ the LSTM cell in the following way. First, the dimension of :math:`h_t` will be changed from
824
+ ``hidden_size`` to ``proj_size`` (dimensions of :math:`W_{hi}` will be changed accordingly).
825
+ Second, the output hidden state of each layer will be multiplied by a learnable projection
826
+ matrix: :math:`h_t = W_{hr}h_t`. Note that as a consequence of this, the output
827
+ of LSTM network will be of different shape as well. See Inputs/Outputs sections below for exact
828
+ dimensions of all variables. You can find more details in https://arxiv.org/abs/1402.1128.
829
+
830
+ Args:
831
+ input_size: The number of expected features in the input `x`
832
+ hidden_size: The number of features in the hidden state `h`
833
+ num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
834
+ would mean stacking two LSTMs together to form a `stacked LSTM`,
835
+ with the second LSTM taking in outputs of the first LSTM and
836
+ computing the final results. Default: 1
837
+ bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
838
+ Default: ``True``
839
+ batch_first: If ``True``, then the input and output tensors are provided
840
+ as `(batch, seq, feature)` instead of `(seq, batch, feature)`.
841
+ Note that this does not apply to hidden or cell states. See the
842
+ Inputs/Outputs sections below for details. Default: ``False``
843
+ dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
844
+ LSTM layer except the last layer, with dropout probability equal to
845
+ :attr:`dropout`. Default: 0
846
+ bidirectional: If ``True``, becomes a bidirectional LSTM. Default: ``False``
847
+ proj_size: If ``> 0``, will use LSTM with projections of corresponding size. Default: 0
848
+
849
+ Inputs: input, (h_0, c_0)
850
+ * **input**: tensor of shape :math:`(L, H_{in})` for unbatched input,
851
+ :math:`(L, N, H_{in})` when ``batch_first=False`` or
852
+ :math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of
853
+ the input sequence. The input can also be a packed variable length sequence.
854
+ See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
855
+ :func:`torch.nn.utils.rnn.pack_sequence` for details.
856
+ * **h_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
857
+ :math:`(D * \text{num\_layers}, N, H_{out})` containing the
858
+ initial hidden state for each element in the input sequence.
859
+ Defaults to zeros if (h_0, c_0) is not provided.
860
+ * **c_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{cell})` for unbatched input or
861
+ :math:`(D * \text{num\_layers}, N, H_{cell})` containing the
862
+ initial cell state for each element in the input sequence.
863
+ Defaults to zeros if (h_0, c_0) is not provided.
864
+
865
+ where:
866
+
867
+ .. math::
868
+ \begin{aligned}
869
+ N ={} & \text{batch size} \\
870
+ L ={} & \text{sequence length} \\
871
+ D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
872
+ H_{in} ={} & \text{input\_size} \\
873
+ H_{cell} ={} & \text{hidden\_size} \\
874
+ H_{out} ={} & \text{proj\_size if } \text{proj\_size}>0 \text{ otherwise hidden\_size} \\
875
+ \end{aligned}
876
+
877
+ Outputs: output, (h_n, c_n)
878
+ * **output**: tensor of shape :math:`(L, D * H_{out})` for unbatched input,
879
+ :math:`(L, N, D * H_{out})` when ``batch_first=False`` or
880
+ :math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features
881
+ `(h_t)` from the last layer of the LSTM, for each `t`. If a
882
+ :class:`torch.nn.utils.rnn.PackedSequence` has been given as the input, the output
883
+ will also be a packed sequence. When ``bidirectional=True``, `output` will contain
884
+ a concatenation of the forward and reverse hidden states at each time step in the sequence.
885
+ * **h_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
886
+ :math:`(D * \text{num\_layers}, N, H_{out})` containing the
887
+ final hidden state for each element in the sequence. When ``bidirectional=True``,
888
+ `h_n` will contain a concatenation of the final forward and reverse hidden states, respectively.
889
+ * **c_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{cell})` for unbatched input or
890
+ :math:`(D * \text{num\_layers}, N, H_{cell})` containing the
891
+ final cell state for each element in the sequence. When ``bidirectional=True``,
892
+ `c_n` will contain a concatenation of the final forward and reverse cell states, respectively.
893
+
894
+ Attributes:
895
+ weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
896
+ `(W_ii|W_if|W_ig|W_io)`, of shape `(4*hidden_size, input_size)` for `k = 0`.
897
+ Otherwise, the shape is `(4*hidden_size, num_directions * hidden_size)`. If
898
+ ``proj_size > 0`` was specified, the shape will be
899
+ `(4*hidden_size, num_directions * proj_size)` for `k > 0`
900
+ weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
901
+ `(W_hi|W_hf|W_hg|W_ho)`, of shape `(4*hidden_size, hidden_size)`. If ``proj_size > 0``
902
+ was specified, the shape will be `(4*hidden_size, proj_size)`.
903
+ bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
904
+ `(b_ii|b_if|b_ig|b_io)`, of shape `(4*hidden_size)`
905
+ bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
906
+ `(b_hi|b_hf|b_hg|b_ho)`, of shape `(4*hidden_size)`
907
+ weight_hr_l[k] : the learnable projection weights of the :math:`\text{k}^{th}` layer
908
+ of shape `(proj_size, hidden_size)`. Only present when ``proj_size > 0`` was
909
+ specified.
910
+ weight_ih_l[k]_reverse: Analogous to `weight_ih_l[k]` for the reverse direction.
911
+ Only present when ``bidirectional=True``.
912
+ weight_hh_l[k]_reverse: Analogous to `weight_hh_l[k]` for the reverse direction.
913
+ Only present when ``bidirectional=True``.
914
+ bias_ih_l[k]_reverse: Analogous to `bias_ih_l[k]` for the reverse direction.
915
+ Only present when ``bidirectional=True``.
916
+ bias_hh_l[k]_reverse: Analogous to `bias_hh_l[k]` for the reverse direction.
917
+ Only present when ``bidirectional=True``.
918
+ weight_hr_l[k]_reverse: Analogous to `weight_hr_l[k]` for the reverse direction.
919
+ Only present when ``bidirectional=True`` and ``proj_size > 0`` was specified.
920
+
921
+ .. note::
922
+ All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
923
+ where :math:`k = \frac{1}{\text{hidden\_size}}`
924
+
925
+ .. note::
926
+ For bidirectional LSTMs, forward and backward are directions 0 and 1 respectively.
927
+ Example of splitting the output layers when ``batch_first=False``:
928
+ ``output.view(seq_len, batch, num_directions, hidden_size)``.
929
+
930
+ .. note::
931
+ For bidirectional LSTMs, `h_n` is not equivalent to the last element of `output`; the
932
+ former contains the final forward and reverse hidden states, while the latter contains the
933
+ final forward hidden state and the initial reverse hidden state.
934
+
935
+ .. note::
936
+ ``batch_first`` argument is ignored for unbatched inputs.
937
+
938
+ .. note::
939
+ ``proj_size`` should be smaller than ``hidden_size``.
940
+
941
+ .. include:: ../cudnn_rnn_determinism.rst
942
+
943
+ .. include:: ../cudnn_persistent_rnn.rst
944
+
945
+ Examples::
946
+
947
+ >>> rnn = nn.LSTM(10, 20, 2)
948
+ >>> input = torch.randn(5, 3, 10)
949
+ >>> h0 = torch.randn(2, 3, 20)
950
+ >>> c0 = torch.randn(2, 3, 20)
951
+ >>> output, (hn, cn) = rnn(input, (h0, c0))
952
+ """
953
+
954
+ @overload
955
+ def __init__(
956
+ self,
957
+ input_size: int,
958
+ hidden_size: int,
959
+ num_layers: int = 1,
960
+ bias: bool = True,
961
+ batch_first: bool = False,
962
+ dropout: float = 0.0,
963
+ bidirectional: bool = False,
964
+ proj_size: int = 0,
965
+ device=None,
966
+ dtype=None,
967
+ ) -> None:
968
+ ...
969
+
970
+ @overload
971
+ def __init__(self, *args, **kwargs):
972
+ ...
973
+
974
+ def __init__(self, *args, **kwargs):
975
+ super().__init__("LSTM", *args, **kwargs)
976
+
977
+ def get_expected_cell_size(
978
+ self, input: Tensor, batch_sizes: Optional[Tensor]
979
+ ) -> Tuple[int, int, int]:
980
+ if batch_sizes is not None:
981
+ mini_batch = int(batch_sizes[0])
982
+ else:
983
+ mini_batch = input.size(0) if self.batch_first else input.size(1)
984
+ num_directions = 2 if self.bidirectional else 1
985
+ expected_hidden_size = (
986
+ self.num_layers * num_directions,
987
+ mini_batch,
988
+ self.hidden_size,
989
+ )
990
+ return expected_hidden_size
991
+
992
+ # In the future, we should prevent mypy from applying contravariance rules here.
993
+ # See torch/nn/modules/module.py::_forward_unimplemented
994
+ def check_forward_args(
995
+ self,
996
+ input: Tensor,
997
+ hidden: Tuple[Tensor, Tensor], # type: ignore[override]
998
+ batch_sizes: Optional[Tensor],
999
+ ):
1000
+ self.check_input(input, batch_sizes)
1001
+ self.check_hidden_size(
1002
+ hidden[0],
1003
+ self.get_expected_hidden_size(input, batch_sizes),
1004
+ "Expected hidden[0] size {}, got {}",
1005
+ )
1006
+ self.check_hidden_size(
1007
+ hidden[1],
1008
+ self.get_expected_cell_size(input, batch_sizes),
1009
+ "Expected hidden[1] size {}, got {}",
1010
+ )
1011
+
1012
+ # Same as above, see torch/nn/modules/module.py::_forward_unimplemented
1013
+ def permute_hidden( # type: ignore[override]
1014
+ self,
1015
+ hx: Tuple[Tensor, Tensor],
1016
+ permutation: Optional[Tensor],
1017
+ ) -> Tuple[Tensor, Tensor]:
1018
+ if permutation is None:
1019
+ return hx
1020
+ return _apply_permutation(hx[0], permutation), _apply_permutation(
1021
+ hx[1], permutation
1022
+ )
1023
+
1024
+ # Same as above, see torch/nn/modules/module.py::_forward_unimplemented
1025
+ @overload # type: ignore[override]
1026
+ @torch._jit_internal._overload_method # noqa: F811
1027
+ def forward(
1028
+ self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None
1029
+ ) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: # noqa: F811
1030
+ pass
1031
+
1032
+ # Same as above, see torch/nn/modules/module.py::_forward_unimplemented
1033
+ @overload
1034
+ @torch._jit_internal._overload_method # noqa: F811
1035
+ def forward(
1036
+ self, input: PackedSequence, hx: Optional[Tuple[Tensor, Tensor]] = None
1037
+ ) -> Tuple[PackedSequence, Tuple[Tensor, Tensor]]: # noqa: F811
1038
+ pass
1039
+
1040
+ def forward(self, input, hx=None): # noqa: F811
1041
+ self._update_flat_weights()
1042
+
1043
+ orig_input = input
1044
+ # xxx: isinstance check needs to be in conditional for TorchScript to compile
1045
+ batch_sizes = None
1046
+ do_permute = False
1047
+ num_directions = 2 if self.bidirectional else 1
1048
+ real_hidden_size = self.proj_size if self.proj_size > 0 else self.hidden_size
1049
+ if isinstance(orig_input, PackedSequence):
1050
+ input, batch_sizes, sorted_indices, unsorted_indices = input
1051
+ max_batch_size = batch_sizes[0]
1052
+ if hx is None:
1053
+ h_zeros = torch.zeros(
1054
+ self.num_layers * num_directions,
1055
+ max_batch_size,
1056
+ real_hidden_size,
1057
+ dtype=input.dtype,
1058
+ device=input.device,
1059
+ )
1060
+ c_zeros = torch.zeros(
1061
+ self.num_layers * num_directions,
1062
+ max_batch_size,
1063
+ self.hidden_size,
1064
+ dtype=input.dtype,
1065
+ device=input.device,
1066
+ )
1067
+ hx = (h_zeros, c_zeros)
1068
+ else:
1069
+ # Each batch of the hidden state should match the input sequence that
1070
+ # the user believes he/she is passing in.
1071
+ hx = self.permute_hidden(hx, sorted_indices)
1072
+ else:
1073
+ if input.dim() not in (2, 3):
1074
+ raise ValueError(
1075
+ f"LSTM: Expected input to be 2D or 3D, got {input.dim()}D instead"
1076
+ )
1077
+ is_batched = input.dim() == 3
1078
+ batch_dim = 0 if self.batch_first else 1
1079
+ if not is_batched:
1080
+ input = input.unsqueeze(batch_dim)
1081
+ max_batch_size = input.size(0) if self.batch_first else input.size(1)
1082
+ sorted_indices = None
1083
+ unsorted_indices = None
1084
+ if hx is None:
1085
+ h_zeros = torch.zeros(
1086
+ self.num_layers * num_directions,
1087
+ max_batch_size,
1088
+ real_hidden_size,
1089
+ dtype=input.dtype,
1090
+ device=input.device,
1091
+ )
1092
+ c_zeros = torch.zeros(
1093
+ self.num_layers * num_directions,
1094
+ max_batch_size,
1095
+ self.hidden_size,
1096
+ dtype=input.dtype,
1097
+ device=input.device,
1098
+ )
1099
+ hx = (h_zeros, c_zeros)
1100
+ self.check_forward_args(input, hx, batch_sizes)
1101
+ else:
1102
+ if is_batched:
1103
+ if hx[0].dim() != 3 or hx[1].dim() != 3:
1104
+ msg = (
1105
+ "For batched 3-D input, hx and cx should "
1106
+ f"also be 3-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors"
1107
+ )
1108
+ raise RuntimeError(msg)
1109
+ else:
1110
+ if hx[0].dim() != 2 or hx[1].dim() != 2:
1111
+ msg = (
1112
+ "For unbatched 2-D input, hx and cx should "
1113
+ f"also be 2-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors"
1114
+ )
1115
+ raise RuntimeError(msg)
1116
+ hx = (hx[0].unsqueeze(1), hx[1].unsqueeze(1))
1117
+ # Each batch of the hidden state should match the input sequence that
1118
+ # the user believes he/she is passing in.
1119
+ self.check_forward_args(input, hx, batch_sizes)
1120
+ hx = self.permute_hidden(hx, sorted_indices)
1121
+
1122
+ if batch_sizes is None:
1123
+ result = _VF.lstm(
1124
+ input,
1125
+ hx,
1126
+ self._flat_weights,
1127
+ self.bias,
1128
+ self.num_layers,
1129
+ self.dropout,
1130
+ self.training,
1131
+ self.bidirectional,
1132
+ self.batch_first,
1133
+ )
1134
+ else:
1135
+ result = _VF.lstm(
1136
+ input,
1137
+ batch_sizes,
1138
+ hx,
1139
+ self._flat_weights,
1140
+ self.bias,
1141
+ self.num_layers,
1142
+ self.dropout,
1143
+ self.training,
1144
+ self.bidirectional,
1145
+ )
1146
+ output = result[0]
1147
+ hidden = result[1:]
1148
+ # xxx: isinstance check needs to be in conditional for TorchScript to compile
1149
+ if isinstance(orig_input, PackedSequence):
1150
+ output_packed = PackedSequence(
1151
+ output, batch_sizes, sorted_indices, unsorted_indices
1152
+ )
1153
+ return output_packed, self.permute_hidden(hidden, unsorted_indices)
1154
+ else:
1155
+ if not is_batched: # type: ignore[possibly-undefined]
1156
+ output = output.squeeze(batch_dim) # type: ignore[possibly-undefined]
1157
+ hidden = (hidden[0].squeeze(1), hidden[1].squeeze(1))
1158
+ return output, self.permute_hidden(hidden, unsorted_indices)
1159
+
1160
+
1161
+ class GRU(RNNBase):
1162
+ r"""__init__(input_size,hidden_size,num_layers=1,bias=True,batch_first=False,dropout=0.0,bidirectional=False,device=None,dtype=None)
1163
+
1164
+ Apply a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
1165
+ For each element in the input sequence, each layer computes the following
1166
+ function:
1167
+
1168
+ .. math::
1169
+ \begin{array}{ll}
1170
+ r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
1171
+ z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
1172
+ n_t = \tanh(W_{in} x_t + b_{in} + r_t \odot (W_{hn} h_{(t-1)}+ b_{hn})) \\
1173
+ h_t = (1 - z_t) \odot n_t + z_t \odot h_{(t-1)}
1174
+ \end{array}
1175
+
1176
+ where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the input
1177
+ at time `t`, :math:`h_{(t-1)}` is the hidden state of the layer
1178
+ at time `t-1` or the initial hidden state at time `0`, and :math:`r_t`,
1179
+ :math:`z_t`, :math:`n_t` are the reset, update, and new gates, respectively.
1180
+ :math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product.
1181
+
1182
+ In a multilayer GRU, the input :math:`x^{(l)}_t` of the :math:`l` -th layer
1183
+ (:math:`l \ge 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by
1184
+ dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random
1185
+ variable which is :math:`0` with probability :attr:`dropout`.
1186
+
1187
+ Args:
1188
+ input_size: The number of expected features in the input `x`
1189
+ hidden_size: The number of features in the hidden state `h`
1190
+ num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
1191
+ would mean stacking two GRUs together to form a `stacked GRU`,
1192
+ with the second GRU taking in outputs of the first GRU and
1193
+ computing the final results. Default: 1
1194
+ bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
1195
+ Default: ``True``
1196
+ batch_first: If ``True``, then the input and output tensors are provided
1197
+ as `(batch, seq, feature)` instead of `(seq, batch, feature)`.
1198
+ Note that this does not apply to hidden or cell states. See the
1199
+ Inputs/Outputs sections below for details. Default: ``False``
1200
+ dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
1201
+ GRU layer except the last layer, with dropout probability equal to
1202
+ :attr:`dropout`. Default: 0
1203
+ bidirectional: If ``True``, becomes a bidirectional GRU. Default: ``False``
1204
+
1205
+ Inputs: input, h_0
1206
+ * **input**: tensor of shape :math:`(L, H_{in})` for unbatched input,
1207
+ :math:`(L, N, H_{in})` when ``batch_first=False`` or
1208
+ :math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of
1209
+ the input sequence. The input can also be a packed variable length sequence.
1210
+ See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
1211
+ :func:`torch.nn.utils.rnn.pack_sequence` for details.
1212
+ * **h_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` or
1213
+ :math:`(D * \text{num\_layers}, N, H_{out})`
1214
+ containing the initial hidden state for the input sequence. Defaults to zeros if not provided.
1215
+
1216
+ where:
1217
+
1218
+ .. math::
1219
+ \begin{aligned}
1220
+ N ={} & \text{batch size} \\
1221
+ L ={} & \text{sequence length} \\
1222
+ D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
1223
+ H_{in} ={} & \text{input\_size} \\
1224
+ H_{out} ={} & \text{hidden\_size}
1225
+ \end{aligned}
1226
+
1227
+ Outputs: output, h_n
1228
+ * **output**: tensor of shape :math:`(L, D * H_{out})` for unbatched input,
1229
+ :math:`(L, N, D * H_{out})` when ``batch_first=False`` or
1230
+ :math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features
1231
+ `(h_t)` from the last layer of the GRU, for each `t`. If a
1232
+ :class:`torch.nn.utils.rnn.PackedSequence` has been given as the input, the output
1233
+ will also be a packed sequence.
1234
+ * **h_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` or
1235
+ :math:`(D * \text{num\_layers}, N, H_{out})` containing the final hidden state
1236
+ for the input sequence.
1237
+
1238
+ Attributes:
1239
+ weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
1240
+ (W_ir|W_iz|W_in), of shape `(3*hidden_size, input_size)` for `k = 0`.
1241
+ Otherwise, the shape is `(3*hidden_size, num_directions * hidden_size)`
1242
+ weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
1243
+ (W_hr|W_hz|W_hn), of shape `(3*hidden_size, hidden_size)`
1244
+ bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
1245
+ (b_ir|b_iz|b_in), of shape `(3*hidden_size)`
1246
+ bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
1247
+ (b_hr|b_hz|b_hn), of shape `(3*hidden_size)`
1248
+
1249
+ .. note::
1250
+ All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
1251
+ where :math:`k = \frac{1}{\text{hidden\_size}}`
1252
+
1253
+ .. note::
1254
+ For bidirectional GRUs, forward and backward are directions 0 and 1 respectively.
1255
+ Example of splitting the output layers when ``batch_first=False``:
1256
+ ``output.view(seq_len, batch, num_directions, hidden_size)``.
1257
+
1258
+ .. note::
1259
+ ``batch_first`` argument is ignored for unbatched inputs.
1260
+
1261
+ .. note::
1262
+ The calculation of new gate :math:`n_t` subtly differs from the original paper and other frameworks.
1263
+ In the original implementation, the Hadamard product :math:`(\odot)` between :math:`r_t` and the
1264
+ previous hidden state :math:`h_{(t-1)}` is done before the multiplication with the weight matrix
1265
+ `W` and addition of bias:
1266
+
1267
+ .. math::
1268
+ \begin{aligned}
1269
+ n_t = \tanh(W_{in} x_t + b_{in} + W_{hn} ( r_t \odot h_{(t-1)} ) + b_{hn})
1270
+ \end{aligned}
1271
+
1272
+ This is in contrast to PyTorch implementation, which is done after :math:`W_{hn} h_{(t-1)}`
1273
+
1274
+ .. math::
1275
+ \begin{aligned}
1276
+ n_t = \tanh(W_{in} x_t + b_{in} + r_t \odot (W_{hn} h_{(t-1)}+ b_{hn}))
1277
+ \end{aligned}
1278
+
1279
+ This implementation differs on purpose for efficiency.
1280
+
1281
+ .. include:: ../cudnn_persistent_rnn.rst
1282
+
1283
+ Examples::
1284
+
1285
+ >>> rnn = nn.GRU(10, 20, 2)
1286
+ >>> input = torch.randn(5, 3, 10)
1287
+ >>> h0 = torch.randn(2, 3, 20)
1288
+ >>> output, hn = rnn(input, h0)
1289
+ """
1290
+
1291
+ @overload
1292
+ def __init__(
1293
+ self,
1294
+ input_size: int,
1295
+ hidden_size: int,
1296
+ num_layers: int = 1,
1297
+ bias: bool = True,
1298
+ batch_first: bool = False,
1299
+ dropout: float = 0.0,
1300
+ bidirectional: bool = False,
1301
+ device=None,
1302
+ dtype=None,
1303
+ ) -> None:
1304
+ ...
1305
+
1306
+ @overload
1307
+ def __init__(self, *args, **kwargs):
1308
+ ...
1309
+
1310
+ def __init__(self, *args, **kwargs):
1311
+ if "proj_size" in kwargs:
1312
+ raise ValueError(
1313
+ "proj_size argument is only supported for LSTM, not RNN or GRU"
1314
+ )
1315
+ super().__init__("GRU", *args, **kwargs)
1316
+
1317
+ @overload # type: ignore[override]
1318
+ @torch._jit_internal._overload_method # noqa: F811
1319
+ def forward(
1320
+ self, input: Tensor, hx: Optional[Tensor] = None
1321
+ ) -> Tuple[Tensor, Tensor]: # noqa: F811
1322
+ pass
1323
+
1324
+ @overload
1325
+ @torch._jit_internal._overload_method # noqa: F811
1326
+ def forward(
1327
+ self, input: PackedSequence, hx: Optional[Tensor] = None
1328
+ ) -> Tuple[PackedSequence, Tensor]: # noqa: F811
1329
+ pass
1330
+
1331
+ def forward(self, input, hx=None): # noqa: F811
1332
+ self._update_flat_weights()
1333
+
1334
+ orig_input = input
1335
+ # xxx: isinstance check needs to be in conditional for TorchScript to compile
1336
+ if isinstance(orig_input, PackedSequence):
1337
+ input, batch_sizes, sorted_indices, unsorted_indices = input
1338
+ max_batch_size = batch_sizes[0]
1339
+ if hx is None:
1340
+ num_directions = 2 if self.bidirectional else 1
1341
+ hx = torch.zeros(
1342
+ self.num_layers * num_directions,
1343
+ max_batch_size,
1344
+ self.hidden_size,
1345
+ dtype=input.dtype,
1346
+ device=input.device,
1347
+ )
1348
+ else:
1349
+ # Each batch of the hidden state should match the input sequence that
1350
+ # the user believes he/she is passing in.
1351
+ hx = self.permute_hidden(hx, sorted_indices)
1352
+ else:
1353
+ batch_sizes = None
1354
+ if input.dim() not in (2, 3):
1355
+ raise ValueError(
1356
+ f"GRU: Expected input to be 2D or 3D, got {input.dim()}D instead"
1357
+ )
1358
+ is_batched = input.dim() == 3
1359
+ batch_dim = 0 if self.batch_first else 1
1360
+ if not is_batched:
1361
+ input = input.unsqueeze(batch_dim)
1362
+ if hx is not None:
1363
+ if hx.dim() != 2:
1364
+ raise RuntimeError(
1365
+ f"For unbatched 2-D input, hx should also be 2-D but got {hx.dim()}-D tensor"
1366
+ )
1367
+ hx = hx.unsqueeze(1)
1368
+ else:
1369
+ if hx is not None and hx.dim() != 3:
1370
+ raise RuntimeError(
1371
+ f"For batched 3-D input, hx should also be 3-D but got {hx.dim()}-D tensor"
1372
+ )
1373
+ max_batch_size = input.size(0) if self.batch_first else input.size(1)
1374
+ sorted_indices = None
1375
+ unsorted_indices = None
1376
+ if hx is None:
1377
+ num_directions = 2 if self.bidirectional else 1
1378
+ hx = torch.zeros(
1379
+ self.num_layers * num_directions,
1380
+ max_batch_size,
1381
+ self.hidden_size,
1382
+ dtype=input.dtype,
1383
+ device=input.device,
1384
+ )
1385
+ else:
1386
+ # Each batch of the hidden state should match the input sequence that
1387
+ # the user believes he/she is passing in.
1388
+ hx = self.permute_hidden(hx, sorted_indices)
1389
+
1390
+ self.check_forward_args(input, hx, batch_sizes)
1391
+ if batch_sizes is None:
1392
+ result = _VF.gru(
1393
+ input,
1394
+ hx,
1395
+ self._flat_weights,
1396
+ self.bias,
1397
+ self.num_layers,
1398
+ self.dropout,
1399
+ self.training,
1400
+ self.bidirectional,
1401
+ self.batch_first,
1402
+ )
1403
+ else:
1404
+ result = _VF.gru(
1405
+ input,
1406
+ batch_sizes,
1407
+ hx,
1408
+ self._flat_weights,
1409
+ self.bias,
1410
+ self.num_layers,
1411
+ self.dropout,
1412
+ self.training,
1413
+ self.bidirectional,
1414
+ )
1415
+ output = result[0]
1416
+ hidden = result[1]
1417
+
1418
+ # xxx: isinstance check needs to be in conditional for TorchScript to compile
1419
+ if isinstance(orig_input, PackedSequence):
1420
+ output_packed = PackedSequence(
1421
+ output, batch_sizes, sorted_indices, unsorted_indices
1422
+ )
1423
+ return output_packed, self.permute_hidden(hidden, unsorted_indices)
1424
+ else:
1425
+ if not is_batched: # type: ignore[possibly-undefined]
1426
+ output = output.squeeze(batch_dim) # type: ignore[possibly-undefined]
1427
+ hidden = hidden.squeeze(1)
1428
+
1429
+ return output, self.permute_hidden(hidden, unsorted_indices)
1430
+
1431
+
1432
+ class RNNCellBase(Module):
1433
+ __constants__ = ["input_size", "hidden_size", "bias"]
1434
+
1435
+ input_size: int
1436
+ hidden_size: int
1437
+ bias: bool
1438
+ weight_ih: Tensor
1439
+ weight_hh: Tensor
1440
+ # WARNING: bias_ih and bias_hh purposely not defined here.
1441
+ # See https://github.com/pytorch/pytorch/issues/39670
1442
+
1443
+ def __init__(
1444
+ self,
1445
+ input_size: int,
1446
+ hidden_size: int,
1447
+ bias: bool,
1448
+ num_chunks: int,
1449
+ device=None,
1450
+ dtype=None,
1451
+ ) -> None:
1452
+ factory_kwargs = {"device": device, "dtype": dtype}
1453
+ super().__init__()
1454
+ self.input_size = input_size
1455
+ self.hidden_size = hidden_size
1456
+ self.bias = bias
1457
+ self.weight_ih = Parameter(
1458
+ torch.empty((num_chunks * hidden_size, input_size), **factory_kwargs)
1459
+ )
1460
+ self.weight_hh = Parameter(
1461
+ torch.empty((num_chunks * hidden_size, hidden_size), **factory_kwargs)
1462
+ )
1463
+ if bias:
1464
+ self.bias_ih = Parameter(
1465
+ torch.empty(num_chunks * hidden_size, **factory_kwargs)
1466
+ )
1467
+ self.bias_hh = Parameter(
1468
+ torch.empty(num_chunks * hidden_size, **factory_kwargs)
1469
+ )
1470
+ else:
1471
+ self.register_parameter("bias_ih", None)
1472
+ self.register_parameter("bias_hh", None)
1473
+
1474
+ self.reset_parameters()
1475
+
1476
+ def extra_repr(self) -> str:
1477
+ s = "{input_size}, {hidden_size}"
1478
+ if "bias" in self.__dict__ and self.bias is not True:
1479
+ s += ", bias={bias}"
1480
+ if "nonlinearity" in self.__dict__ and self.nonlinearity != "tanh":
1481
+ s += ", nonlinearity={nonlinearity}"
1482
+ return s.format(**self.__dict__)
1483
+
1484
+ def reset_parameters(self) -> None:
1485
+ stdv = 1.0 / math.sqrt(self.hidden_size) if self.hidden_size > 0 else 0
1486
+ for weight in self.parameters():
1487
+ init.uniform_(weight, -stdv, stdv)
1488
+
1489
+
1490
+ class RNNCell(RNNCellBase):
1491
+ r"""An Elman RNN cell with tanh or ReLU non-linearity.
1492
+
1493
+ .. math::
1494
+
1495
+ h' = \tanh(W_{ih} x + b_{ih} + W_{hh} h + b_{hh})
1496
+
1497
+ If :attr:`nonlinearity` is `'relu'`, then ReLU is used in place of tanh.
1498
+
1499
+ Args:
1500
+ input_size: The number of expected features in the input `x`
1501
+ hidden_size: The number of features in the hidden state `h`
1502
+ bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
1503
+ Default: ``True``
1504
+ nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'``
1505
+
1506
+ Inputs: input, hidden
1507
+ - **input**: tensor containing input features
1508
+ - **hidden**: tensor containing the initial hidden state
1509
+ Defaults to zero if not provided.
1510
+
1511
+ Outputs: h'
1512
+ - **h'** of shape `(batch, hidden_size)`: tensor containing the next hidden state
1513
+ for each element in the batch
1514
+
1515
+ Shape:
1516
+ - input: :math:`(N, H_{in})` or :math:`(H_{in})` tensor containing input features where
1517
+ :math:`H_{in}` = `input_size`.
1518
+ - hidden: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the initial hidden
1519
+ state where :math:`H_{out}` = `hidden_size`. Defaults to zero if not provided.
1520
+ - output: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the next hidden state.
1521
+
1522
+ Attributes:
1523
+ weight_ih: the learnable input-hidden weights, of shape
1524
+ `(hidden_size, input_size)`
1525
+ weight_hh: the learnable hidden-hidden weights, of shape
1526
+ `(hidden_size, hidden_size)`
1527
+ bias_ih: the learnable input-hidden bias, of shape `(hidden_size)`
1528
+ bias_hh: the learnable hidden-hidden bias, of shape `(hidden_size)`
1529
+
1530
+ .. note::
1531
+ All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
1532
+ where :math:`k = \frac{1}{\text{hidden\_size}}`
1533
+
1534
+ Examples::
1535
+
1536
+ >>> rnn = nn.RNNCell(10, 20)
1537
+ >>> input = torch.randn(6, 3, 10)
1538
+ >>> hx = torch.randn(3, 20)
1539
+ >>> output = []
1540
+ >>> for i in range(6):
1541
+ ... hx = rnn(input[i], hx)
1542
+ ... output.append(hx)
1543
+ """
1544
+
1545
+ __constants__ = ["input_size", "hidden_size", "bias", "nonlinearity"]
1546
+ nonlinearity: str
1547
+
1548
+ def __init__(
1549
+ self,
1550
+ input_size: int,
1551
+ hidden_size: int,
1552
+ bias: bool = True,
1553
+ nonlinearity: str = "tanh",
1554
+ device=None,
1555
+ dtype=None,
1556
+ ) -> None:
1557
+ factory_kwargs = {"device": device, "dtype": dtype}
1558
+ super().__init__(input_size, hidden_size, bias, num_chunks=1, **factory_kwargs)
1559
+ self.nonlinearity = nonlinearity
1560
+
1561
+ def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
1562
+ if input.dim() not in (1, 2):
1563
+ raise ValueError(
1564
+ f"RNNCell: Expected input to be 1D or 2D, got {input.dim()}D instead"
1565
+ )
1566
+ if hx is not None and hx.dim() not in (1, 2):
1567
+ raise ValueError(
1568
+ f"RNNCell: Expected hidden to be 1D or 2D, got {hx.dim()}D instead"
1569
+ )
1570
+ is_batched = input.dim() == 2
1571
+ if not is_batched:
1572
+ input = input.unsqueeze(0)
1573
+
1574
+ if hx is None:
1575
+ hx = torch.zeros(
1576
+ input.size(0), self.hidden_size, dtype=input.dtype, device=input.device
1577
+ )
1578
+ else:
1579
+ hx = hx.unsqueeze(0) if not is_batched else hx
1580
+
1581
+ if self.nonlinearity == "tanh":
1582
+ ret = _VF.rnn_tanh_cell(
1583
+ input,
1584
+ hx,
1585
+ self.weight_ih,
1586
+ self.weight_hh,
1587
+ self.bias_ih,
1588
+ self.bias_hh,
1589
+ )
1590
+ elif self.nonlinearity == "relu":
1591
+ ret = _VF.rnn_relu_cell(
1592
+ input,
1593
+ hx,
1594
+ self.weight_ih,
1595
+ self.weight_hh,
1596
+ self.bias_ih,
1597
+ self.bias_hh,
1598
+ )
1599
+ else:
1600
+ ret = input # TODO: remove when jit supports exception flow
1601
+ raise RuntimeError(f"Unknown nonlinearity: {self.nonlinearity}")
1602
+
1603
+ if not is_batched:
1604
+ ret = ret.squeeze(0)
1605
+
1606
+ return ret
1607
+
1608
+
1609
+ class LSTMCell(RNNCellBase):
1610
+ r"""A long short-term memory (LSTM) cell.
1611
+
1612
+ .. math::
1613
+
1614
+ \begin{array}{ll}
1615
+ i = \sigma(W_{ii} x + b_{ii} + W_{hi} h + b_{hi}) \\
1616
+ f = \sigma(W_{if} x + b_{if} + W_{hf} h + b_{hf}) \\
1617
+ g = \tanh(W_{ig} x + b_{ig} + W_{hg} h + b_{hg}) \\
1618
+ o = \sigma(W_{io} x + b_{io} + W_{ho} h + b_{ho}) \\
1619
+ c' = f \odot c + i \odot g \\
1620
+ h' = o \odot \tanh(c') \\
1621
+ \end{array}
1622
+
1623
+ where :math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product.
1624
+
1625
+ Args:
1626
+ input_size: The number of expected features in the input `x`
1627
+ hidden_size: The number of features in the hidden state `h`
1628
+ bias: If ``False``, then the layer does not use bias weights `b_ih` and
1629
+ `b_hh`. Default: ``True``
1630
+
1631
+ Inputs: input, (h_0, c_0)
1632
+ - **input** of shape `(batch, input_size)` or `(input_size)`: tensor containing input features
1633
+ - **h_0** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the initial hidden state
1634
+ - **c_0** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the initial cell state
1635
+
1636
+ If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero.
1637
+
1638
+ Outputs: (h_1, c_1)
1639
+ - **h_1** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the next hidden state
1640
+ - **c_1** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the next cell state
1641
+
1642
+ Attributes:
1643
+ weight_ih: the learnable input-hidden weights, of shape
1644
+ `(4*hidden_size, input_size)`
1645
+ weight_hh: the learnable hidden-hidden weights, of shape
1646
+ `(4*hidden_size, hidden_size)`
1647
+ bias_ih: the learnable input-hidden bias, of shape `(4*hidden_size)`
1648
+ bias_hh: the learnable hidden-hidden bias, of shape `(4*hidden_size)`
1649
+
1650
+ .. note::
1651
+ All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
1652
+ where :math:`k = \frac{1}{\text{hidden\_size}}`
1653
+
1654
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
1655
+
1656
+ Examples::
1657
+
1658
+ >>> rnn = nn.LSTMCell(10, 20) # (input_size, hidden_size)
1659
+ >>> input = torch.randn(2, 3, 10) # (time_steps, batch, input_size)
1660
+ >>> hx = torch.randn(3, 20) # (batch, hidden_size)
1661
+ >>> cx = torch.randn(3, 20)
1662
+ >>> output = []
1663
+ >>> for i in range(input.size()[0]):
1664
+ ... hx, cx = rnn(input[i], (hx, cx))
1665
+ ... output.append(hx)
1666
+ >>> output = torch.stack(output, dim=0)
1667
+ """
1668
+
1669
+ def __init__(
1670
+ self,
1671
+ input_size: int,
1672
+ hidden_size: int,
1673
+ bias: bool = True,
1674
+ device=None,
1675
+ dtype=None,
1676
+ ) -> None:
1677
+ factory_kwargs = {"device": device, "dtype": dtype}
1678
+ super().__init__(input_size, hidden_size, bias, num_chunks=4, **factory_kwargs)
1679
+
1680
+ def forward(
1681
+ self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None
1682
+ ) -> Tuple[Tensor, Tensor]:
1683
+ if input.dim() not in (1, 2):
1684
+ raise ValueError(
1685
+ f"LSTMCell: Expected input to be 1D or 2D, got {input.dim()}D instead"
1686
+ )
1687
+ if hx is not None:
1688
+ for idx, value in enumerate(hx):
1689
+ if value.dim() not in (1, 2):
1690
+ raise ValueError(
1691
+ f"LSTMCell: Expected hx[{idx}] to be 1D or 2D, got {value.dim()}D instead"
1692
+ )
1693
+ is_batched = input.dim() == 2
1694
+ if not is_batched:
1695
+ input = input.unsqueeze(0)
1696
+
1697
+ if hx is None:
1698
+ zeros = torch.zeros(
1699
+ input.size(0), self.hidden_size, dtype=input.dtype, device=input.device
1700
+ )
1701
+ hx = (zeros, zeros)
1702
+ else:
1703
+ hx = (hx[0].unsqueeze(0), hx[1].unsqueeze(0)) if not is_batched else hx
1704
+
1705
+ ret = _VF.lstm_cell(
1706
+ input,
1707
+ hx,
1708
+ self.weight_ih,
1709
+ self.weight_hh,
1710
+ self.bias_ih,
1711
+ self.bias_hh,
1712
+ )
1713
+
1714
+ if not is_batched:
1715
+ ret = (ret[0].squeeze(0), ret[1].squeeze(0))
1716
+ return ret
1717
+
1718
+
1719
+ class GRUCell(RNNCellBase):
1720
+ r"""A gated recurrent unit (GRU) cell.
1721
+
1722
+ .. math::
1723
+
1724
+ \begin{array}{ll}
1725
+ r = \sigma(W_{ir} x + b_{ir} + W_{hr} h + b_{hr}) \\
1726
+ z = \sigma(W_{iz} x + b_{iz} + W_{hz} h + b_{hz}) \\
1727
+ n = \tanh(W_{in} x + b_{in} + r \odot (W_{hn} h + b_{hn})) \\
1728
+ h' = (1 - z) \odot n + z \odot h
1729
+ \end{array}
1730
+
1731
+ where :math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product.
1732
+
1733
+ Args:
1734
+ input_size: The number of expected features in the input `x`
1735
+ hidden_size: The number of features in the hidden state `h`
1736
+ bias: If ``False``, then the layer does not use bias weights `b_ih` and
1737
+ `b_hh`. Default: ``True``
1738
+
1739
+ Inputs: input, hidden
1740
+ - **input** : tensor containing input features
1741
+ - **hidden** : tensor containing the initial hidden
1742
+ state for each element in the batch.
1743
+ Defaults to zero if not provided.
1744
+
1745
+ Outputs: h'
1746
+ - **h'** : tensor containing the next hidden state
1747
+ for each element in the batch
1748
+
1749
+ Shape:
1750
+ - input: :math:`(N, H_{in})` or :math:`(H_{in})` tensor containing input features where
1751
+ :math:`H_{in}` = `input_size`.
1752
+ - hidden: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the initial hidden
1753
+ state where :math:`H_{out}` = `hidden_size`. Defaults to zero if not provided.
1754
+ - output: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the next hidden state.
1755
+
1756
+ Attributes:
1757
+ weight_ih: the learnable input-hidden weights, of shape
1758
+ `(3*hidden_size, input_size)`
1759
+ weight_hh: the learnable hidden-hidden weights, of shape
1760
+ `(3*hidden_size, hidden_size)`
1761
+ bias_ih: the learnable input-hidden bias, of shape `(3*hidden_size)`
1762
+ bias_hh: the learnable hidden-hidden bias, of shape `(3*hidden_size)`
1763
+
1764
+ .. note::
1765
+ All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
1766
+ where :math:`k = \frac{1}{\text{hidden\_size}}`
1767
+
1768
+ On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.
1769
+
1770
+ Examples::
1771
+
1772
+ >>> rnn = nn.GRUCell(10, 20)
1773
+ >>> input = torch.randn(6, 3, 10)
1774
+ >>> hx = torch.randn(3, 20)
1775
+ >>> output = []
1776
+ >>> for i in range(6):
1777
+ ... hx = rnn(input[i], hx)
1778
+ ... output.append(hx)
1779
+ """
1780
+
1781
+ def __init__(
1782
+ self,
1783
+ input_size: int,
1784
+ hidden_size: int,
1785
+ bias: bool = True,
1786
+ device=None,
1787
+ dtype=None,
1788
+ ) -> None:
1789
+ factory_kwargs = {"device": device, "dtype": dtype}
1790
+ super().__init__(input_size, hidden_size, bias, num_chunks=3, **factory_kwargs)
1791
+
1792
+ def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
1793
+ if input.dim() not in (1, 2):
1794
+ raise ValueError(
1795
+ f"GRUCell: Expected input to be 1D or 2D, got {input.dim()}D instead"
1796
+ )
1797
+ if hx is not None and hx.dim() not in (1, 2):
1798
+ raise ValueError(
1799
+ f"GRUCell: Expected hidden to be 1D or 2D, got {hx.dim()}D instead"
1800
+ )
1801
+ is_batched = input.dim() == 2
1802
+ if not is_batched:
1803
+ input = input.unsqueeze(0)
1804
+
1805
+ if hx is None:
1806
+ hx = torch.zeros(
1807
+ input.size(0), self.hidden_size, dtype=input.dtype, device=input.device
1808
+ )
1809
+ else:
1810
+ hx = hx.unsqueeze(0) if not is_batched else hx
1811
+
1812
+ ret = _VF.gru_cell(
1813
+ input,
1814
+ hx,
1815
+ self.weight_ih,
1816
+ self.weight_hh,
1817
+ self.bias_ih,
1818
+ self.bias_hh,
1819
+ )
1820
+
1821
+ if not is_batched:
1822
+ ret = ret.squeeze(0)
1823
+
1824
+ return ret
janus/lib/python3.10/site-packages/torch/nn/modules/sparse.py ADDED
@@ -0,0 +1,546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from typing import Optional
3
+
4
+ import torch
5
+ from torch import Tensor
6
+ from torch.nn import functional as F, init
7
+ from torch.nn.parameter import Parameter
8
+
9
+ from .module import Module
10
+
11
+
12
+ __all__ = ["Embedding", "EmbeddingBag"]
13
+
14
+
15
+ class Embedding(Module):
16
+ r"""A simple lookup table that stores embeddings of a fixed dictionary and size.
17
+
18
+ This module is often used to store word embeddings and retrieve them using indices.
19
+ The input to the module is a list of indices, and the output is the corresponding
20
+ word embeddings.
21
+
22
+ Args:
23
+ num_embeddings (int): size of the dictionary of embeddings
24
+ embedding_dim (int): the size of each embedding vector
25
+ padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
26
+ therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
27
+ i.e. it remains as a fixed "pad". For a newly constructed Embedding,
28
+ the embedding vector at :attr:`padding_idx` will default to all zeros,
29
+ but can be updated to another value to be used as the padding vector.
30
+ max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
31
+ is renormalized to have norm :attr:`max_norm`.
32
+ norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
33
+ scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of
34
+ the words in the mini-batch. Default ``False``.
35
+ sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor.
36
+ See Notes for more details regarding sparse gradients.
37
+
38
+ Attributes:
39
+ weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)
40
+ initialized from :math:`\mathcal{N}(0, 1)`
41
+
42
+ Shape:
43
+ - Input: :math:`(*)`, IntTensor or LongTensor of arbitrary shape containing the indices to extract
44
+ - Output: :math:`(*, H)`, where `*` is the input shape and :math:`H=\text{embedding\_dim}`
45
+
46
+ .. note::
47
+ Keep in mind that only a limited number of optimizers support
48
+ sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`),
49
+ :class:`optim.SparseAdam` (`CUDA` and `CPU`) and :class:`optim.Adagrad` (`CPU`)
50
+
51
+ .. note::
52
+ When :attr:`max_norm` is not ``None``, :class:`Embedding`'s forward method will modify the
53
+ :attr:`weight` tensor in-place. Since tensors needed for gradient computations cannot be
54
+ modified in-place, performing a differentiable operation on ``Embedding.weight`` before
55
+ calling :class:`Embedding`'s forward method requires cloning ``Embedding.weight`` when
56
+ :attr:`max_norm` is not ``None``. For example::
57
+
58
+ n, d, m = 3, 5, 7
59
+ embedding = nn.Embedding(n, d, max_norm=1.0)
60
+ W = torch.randn((m, d), requires_grad=True)
61
+ idx = torch.tensor([1, 2])
62
+ a = embedding.weight.clone() @ W.t() # weight must be cloned for this to be differentiable
63
+ b = embedding(idx) @ W.t() # modifies weight in-place
64
+ out = (a.unsqueeze(0) + b.unsqueeze(1))
65
+ loss = out.sigmoid().prod()
66
+ loss.backward()
67
+
68
+ Examples::
69
+
70
+ >>> # an Embedding module containing 10 tensors of size 3
71
+ >>> embedding = nn.Embedding(10, 3)
72
+ >>> # a batch of 2 samples of 4 indices each
73
+ >>> input = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]])
74
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
75
+ >>> embedding(input)
76
+ tensor([[[-0.0251, -1.6902, 0.7172],
77
+ [-0.6431, 0.0748, 0.6969],
78
+ [ 1.4970, 1.3448, -0.9685],
79
+ [-0.3677, -2.7265, -0.1685]],
80
+
81
+ [[ 1.4970, 1.3448, -0.9685],
82
+ [ 0.4362, -0.4004, 0.9400],
83
+ [-0.6431, 0.0748, 0.6969],
84
+ [ 0.9124, -2.3616, 1.1151]]])
85
+
86
+
87
+ >>> # example with padding_idx
88
+ >>> embedding = nn.Embedding(10, 3, padding_idx=0)
89
+ >>> input = torch.LongTensor([[0, 2, 0, 5]])
90
+ >>> embedding(input)
91
+ tensor([[[ 0.0000, 0.0000, 0.0000],
92
+ [ 0.1535, -2.0309, 0.9315],
93
+ [ 0.0000, 0.0000, 0.0000],
94
+ [-0.1655, 0.9897, 0.0635]]])
95
+
96
+ >>> # example of changing `pad` vector
97
+ >>> padding_idx = 0
98
+ >>> embedding = nn.Embedding(3, 3, padding_idx=padding_idx)
99
+ >>> embedding.weight
100
+ Parameter containing:
101
+ tensor([[ 0.0000, 0.0000, 0.0000],
102
+ [-0.7895, -0.7089, -0.0364],
103
+ [ 0.6778, 0.5803, 0.2678]], requires_grad=True)
104
+ >>> with torch.no_grad():
105
+ ... embedding.weight[padding_idx] = torch.ones(3)
106
+ >>> embedding.weight
107
+ Parameter containing:
108
+ tensor([[ 1.0000, 1.0000, 1.0000],
109
+ [-0.7895, -0.7089, -0.0364],
110
+ [ 0.6778, 0.5803, 0.2678]], requires_grad=True)
111
+ """
112
+
113
+ __constants__ = [
114
+ "num_embeddings",
115
+ "embedding_dim",
116
+ "padding_idx",
117
+ "max_norm",
118
+ "norm_type",
119
+ "scale_grad_by_freq",
120
+ "sparse",
121
+ ]
122
+
123
+ num_embeddings: int
124
+ embedding_dim: int
125
+ padding_idx: Optional[int]
126
+ max_norm: Optional[float]
127
+ norm_type: float
128
+ scale_grad_by_freq: bool
129
+ weight: Tensor
130
+ freeze: bool
131
+ sparse: bool
132
+
133
+ def __init__(
134
+ self,
135
+ num_embeddings: int,
136
+ embedding_dim: int,
137
+ padding_idx: Optional[int] = None,
138
+ max_norm: Optional[float] = None,
139
+ norm_type: float = 2.0,
140
+ scale_grad_by_freq: bool = False,
141
+ sparse: bool = False,
142
+ _weight: Optional[Tensor] = None,
143
+ _freeze: bool = False,
144
+ device=None,
145
+ dtype=None,
146
+ ) -> None:
147
+ factory_kwargs = {"device": device, "dtype": dtype}
148
+ super().__init__()
149
+ self.num_embeddings = num_embeddings
150
+ self.embedding_dim = embedding_dim
151
+ if padding_idx is not None:
152
+ if padding_idx > 0:
153
+ assert (
154
+ padding_idx < self.num_embeddings
155
+ ), "Padding_idx must be within num_embeddings"
156
+ elif padding_idx < 0:
157
+ assert (
158
+ padding_idx >= -self.num_embeddings
159
+ ), "Padding_idx must be within num_embeddings"
160
+ padding_idx = self.num_embeddings + padding_idx
161
+ self.padding_idx = padding_idx
162
+ self.max_norm = max_norm
163
+ self.norm_type = norm_type
164
+ self.scale_grad_by_freq = scale_grad_by_freq
165
+ if _weight is None:
166
+ self.weight = Parameter(
167
+ torch.empty((num_embeddings, embedding_dim), **factory_kwargs),
168
+ requires_grad=not _freeze,
169
+ )
170
+ self.reset_parameters()
171
+ else:
172
+ assert list(_weight.shape) == [
173
+ num_embeddings,
174
+ embedding_dim,
175
+ ], "Shape of weight does not match num_embeddings and embedding_dim"
176
+ self.weight = Parameter(_weight, requires_grad=not _freeze)
177
+
178
+ self.sparse = sparse
179
+
180
+ def reset_parameters(self) -> None:
181
+ init.normal_(self.weight)
182
+ self._fill_padding_idx_with_zero()
183
+
184
+ def _fill_padding_idx_with_zero(self) -> None:
185
+ if self.padding_idx is not None:
186
+ with torch.no_grad():
187
+ self.weight[self.padding_idx].fill_(0)
188
+
189
+ def forward(self, input: Tensor) -> Tensor:
190
+ return F.embedding(
191
+ input,
192
+ self.weight,
193
+ self.padding_idx,
194
+ self.max_norm,
195
+ self.norm_type,
196
+ self.scale_grad_by_freq,
197
+ self.sparse,
198
+ )
199
+
200
+ def extra_repr(self) -> str:
201
+ s = "{num_embeddings}, {embedding_dim}"
202
+ if self.padding_idx is not None:
203
+ s += ", padding_idx={padding_idx}"
204
+ if self.max_norm is not None:
205
+ s += ", max_norm={max_norm}"
206
+ if self.norm_type != 2:
207
+ s += ", norm_type={norm_type}"
208
+ if self.scale_grad_by_freq is not False:
209
+ s += ", scale_grad_by_freq={scale_grad_by_freq}"
210
+ if self.sparse is not False:
211
+ s += ", sparse=True"
212
+ return s.format(**self.__dict__)
213
+
214
+ @classmethod
215
+ def from_pretrained(
216
+ cls,
217
+ embeddings,
218
+ freeze=True,
219
+ padding_idx=None,
220
+ max_norm=None,
221
+ norm_type=2.0,
222
+ scale_grad_by_freq=False,
223
+ sparse=False,
224
+ ):
225
+ r"""Create Embedding instance from given 2-dimensional FloatTensor.
226
+
227
+ Args:
228
+ embeddings (Tensor): FloatTensor containing weights for the Embedding.
229
+ First dimension is being passed to Embedding as ``num_embeddings``, second as ``embedding_dim``.
230
+ freeze (bool, optional): If ``True``, the tensor does not get updated in the learning process.
231
+ Equivalent to ``embedding.weight.requires_grad = False``. Default: ``True``
232
+ padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
233
+ therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
234
+ i.e. it remains as a fixed "pad".
235
+ max_norm (float, optional): See module initialization documentation.
236
+ norm_type (float, optional): See module initialization documentation. Default ``2``.
237
+ scale_grad_by_freq (bool, optional): See module initialization documentation. Default ``False``.
238
+ sparse (bool, optional): See module initialization documentation.
239
+
240
+ Examples::
241
+
242
+ >>> # FloatTensor containing pretrained weights
243
+ >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
244
+ >>> embedding = nn.Embedding.from_pretrained(weight)
245
+ >>> # Get embeddings for index 1
246
+ >>> input = torch.LongTensor([1])
247
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
248
+ >>> embedding(input)
249
+ tensor([[ 4.0000, 5.1000, 6.3000]])
250
+ """
251
+ assert (
252
+ embeddings.dim() == 2
253
+ ), "Embeddings parameter is expected to be 2-dimensional"
254
+ rows, cols = embeddings.shape
255
+ embedding = cls(
256
+ num_embeddings=rows,
257
+ embedding_dim=cols,
258
+ _weight=embeddings,
259
+ _freeze=freeze,
260
+ padding_idx=padding_idx,
261
+ max_norm=max_norm,
262
+ norm_type=norm_type,
263
+ scale_grad_by_freq=scale_grad_by_freq,
264
+ sparse=sparse,
265
+ )
266
+ return embedding
267
+
268
+
269
+ class EmbeddingBag(Module):
270
+ r"""Compute sums or means of 'bags' of embeddings, without instantiating the intermediate embeddings.
271
+
272
+ For bags of constant length, no :attr:`per_sample_weights`, no indices equal to :attr:`padding_idx`,
273
+ and with 2D inputs, this class
274
+
275
+ * with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=1)``,
276
+ * with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=1)``,
277
+ * with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=1)``.
278
+
279
+ However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these
280
+ operations.
281
+
282
+ EmbeddingBag also supports per-sample weights as an argument to the forward
283
+ pass. This scales the output of the Embedding before performing a weighted
284
+ reduction as specified by ``mode``. If :attr:`per_sample_weights` is passed, the
285
+ only supported ``mode`` is ``"sum"``, which computes a weighted sum according to
286
+ :attr:`per_sample_weights`.
287
+
288
+ Args:
289
+ num_embeddings (int): size of the dictionary of embeddings
290
+ embedding_dim (int): the size of each embedding vector
291
+ max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
292
+ is renormalized to have norm :attr:`max_norm`.
293
+ norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
294
+ scale_grad_by_freq (bool, optional): if given, this will scale gradients by the inverse of frequency of
295
+ the words in the mini-batch. Default ``False``.
296
+ Note: this option is not supported when ``mode="max"``.
297
+ mode (str, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
298
+ ``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights`
299
+ into consideration. ``"mean"`` computes the average of the values
300
+ in the bag, ``"max"`` computes the max value over each bag.
301
+ Default: ``"mean"``
302
+ sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See
303
+ Notes for more details regarding sparse gradients. Note: this option is not
304
+ supported when ``mode="max"``.
305
+ include_last_offset (bool, optional): if ``True``, :attr:`offsets` has one additional element, where the last element
306
+ is equivalent to the size of `indices`. This matches the CSR format.
307
+ padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the
308
+ gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated
309
+ during training, i.e. it remains as a fixed "pad". For a newly constructed
310
+ EmbeddingBag, the embedding vector at :attr:`padding_idx` will default to all
311
+ zeros, but can be updated to another value to be used as the padding vector.
312
+ Note that the embedding vector at :attr:`padding_idx` is excluded from the
313
+ reduction.
314
+
315
+ Attributes:
316
+ weight (Tensor): the learnable weights of the module of shape `(num_embeddings, embedding_dim)`
317
+ initialized from :math:`\mathcal{N}(0, 1)`.
318
+
319
+ Examples::
320
+
321
+ >>> # an EmbeddingBag module containing 10 tensors of size 3
322
+ >>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum')
323
+ >>> # a batch of 2 samples of 4 indices each
324
+ >>> input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long)
325
+ >>> offsets = torch.tensor([0, 4], dtype=torch.long)
326
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
327
+ >>> embedding_sum(input, offsets)
328
+ tensor([[-0.8861, -5.4350, -0.0523],
329
+ [ 1.1306, -2.5798, -1.0044]])
330
+
331
+ >>> # Example with padding_idx
332
+ >>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum', padding_idx=2)
333
+ >>> input = torch.tensor([2, 2, 2, 2, 4, 3, 2, 9], dtype=torch.long)
334
+ >>> offsets = torch.tensor([0, 4], dtype=torch.long)
335
+ >>> embedding_sum(input, offsets)
336
+ tensor([[ 0.0000, 0.0000, 0.0000],
337
+ [-0.7082, 3.2145, -2.6251]])
338
+
339
+ >>> # An EmbeddingBag can be loaded from an Embedding like so
340
+ >>> embedding = nn.Embedding(10, 3, padding_idx=2)
341
+ >>> embedding_sum = nn.EmbeddingBag.from_pretrained(
342
+ embedding.weight,
343
+ padding_idx=embedding.padding_idx,
344
+ mode='sum')
345
+ """
346
+
347
+ __constants__ = [
348
+ "num_embeddings",
349
+ "embedding_dim",
350
+ "max_norm",
351
+ "norm_type",
352
+ "scale_grad_by_freq",
353
+ "mode",
354
+ "sparse",
355
+ "include_last_offset",
356
+ "padding_idx",
357
+ ]
358
+
359
+ num_embeddings: int
360
+ embedding_dim: int
361
+ max_norm: Optional[float]
362
+ norm_type: float
363
+ scale_grad_by_freq: bool
364
+ weight: Tensor
365
+ mode: str
366
+ sparse: bool
367
+ include_last_offset: bool
368
+ padding_idx: Optional[int]
369
+
370
+ def __init__(
371
+ self,
372
+ num_embeddings: int,
373
+ embedding_dim: int,
374
+ max_norm: Optional[float] = None,
375
+ norm_type: float = 2.0,
376
+ scale_grad_by_freq: bool = False,
377
+ mode: str = "mean",
378
+ sparse: bool = False,
379
+ _weight: Optional[Tensor] = None,
380
+ include_last_offset: bool = False,
381
+ padding_idx: Optional[int] = None,
382
+ device=None,
383
+ dtype=None,
384
+ ) -> None:
385
+ factory_kwargs = {"device": device, "dtype": dtype}
386
+ super().__init__()
387
+ self.num_embeddings = num_embeddings
388
+ self.embedding_dim = embedding_dim
389
+ self.max_norm = max_norm
390
+ self.norm_type = norm_type
391
+ self.scale_grad_by_freq = scale_grad_by_freq
392
+ if padding_idx is not None:
393
+ if padding_idx > 0:
394
+ assert (
395
+ padding_idx < self.num_embeddings
396
+ ), "padding_idx must be within num_embeddings"
397
+ elif padding_idx < 0:
398
+ assert (
399
+ padding_idx >= -self.num_embeddings
400
+ ), "padding_idx must be within num_embeddings"
401
+ padding_idx = self.num_embeddings + padding_idx
402
+ self.padding_idx = padding_idx
403
+ if _weight is None:
404
+ self.weight = Parameter(
405
+ torch.empty((num_embeddings, embedding_dim), **factory_kwargs)
406
+ )
407
+ self.reset_parameters()
408
+ else:
409
+ assert list(_weight.shape) == [
410
+ num_embeddings,
411
+ embedding_dim,
412
+ ], "Shape of weight does not match num_embeddings and embedding_dim"
413
+ self.weight = Parameter(_weight)
414
+ self.mode = mode
415
+ self.sparse = sparse
416
+ self.include_last_offset = include_last_offset
417
+
418
+ def reset_parameters(self) -> None:
419
+ init.normal_(self.weight)
420
+ self._fill_padding_idx_with_zero()
421
+
422
+ def _fill_padding_idx_with_zero(self) -> None:
423
+ if self.padding_idx is not None:
424
+ with torch.no_grad():
425
+ self.weight[self.padding_idx].fill_(0)
426
+
427
+ def forward(
428
+ self,
429
+ input: Tensor,
430
+ offsets: Optional[Tensor] = None,
431
+ per_sample_weights: Optional[Tensor] = None,
432
+ ) -> Tensor:
433
+ """Forward pass of EmbeddingBag.
434
+
435
+ Args:
436
+ input (Tensor): Tensor containing bags of indices into the embedding matrix.
437
+ offsets (Tensor, optional): Only used when :attr:`input` is 1D. :attr:`offsets` determines
438
+ the starting index position of each bag (sequence) in :attr:`input`.
439
+ per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
440
+ to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights`
441
+ must have exactly the same shape as input and is treated as having the same
442
+ :attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``.
443
+
444
+ Returns:
445
+ Tensor output shape of `(B, embedding_dim)`.
446
+
447
+ .. note::
448
+
449
+ A few notes about ``input`` and ``offsets``:
450
+
451
+ - :attr:`input` and :attr:`offsets` have to be of the same type, either int or long
452
+
453
+ - If :attr:`input` is 2D of shape `(B, N)`, it will be treated as ``B`` bags (sequences)
454
+ each of fixed length ``N``, and this will return ``B`` values aggregated in a way
455
+ depending on the :attr:`mode`. :attr:`offsets` is ignored and required to be ``None`` in this case.
456
+
457
+ - If :attr:`input` is 1D of shape `(N)`, it will be treated as a concatenation of
458
+ multiple bags (sequences). :attr:`offsets` is required to be a 1D tensor containing the
459
+ starting index positions of each bag in :attr:`input`. Therefore, for :attr:`offsets` of shape `(B)`,
460
+ :attr:`input` will be viewed as having ``B`` bags. Empty bags (i.e., having 0-length) will have
461
+ returned vectors filled by zeros.
462
+ """
463
+ return F.embedding_bag(
464
+ input,
465
+ self.weight,
466
+ offsets,
467
+ self.max_norm,
468
+ self.norm_type,
469
+ self.scale_grad_by_freq,
470
+ self.mode,
471
+ self.sparse,
472
+ per_sample_weights,
473
+ self.include_last_offset,
474
+ self.padding_idx,
475
+ )
476
+
477
+ def extra_repr(self) -> str:
478
+ s = "{num_embeddings}, {embedding_dim}"
479
+ if self.max_norm is not None:
480
+ s += ", max_norm={max_norm}"
481
+ if self.norm_type != 2:
482
+ s += ", norm_type={norm_type}"
483
+ if self.scale_grad_by_freq is not False:
484
+ s += ", scale_grad_by_freq={scale_grad_by_freq}"
485
+ s += ", mode={mode}"
486
+ if self.padding_idx is not None:
487
+ s += ", padding_idx={padding_idx}"
488
+ return s.format(**{k: repr(v) for k, v in self.__dict__.items()})
489
+
490
+ @classmethod
491
+ def from_pretrained(
492
+ cls,
493
+ embeddings: Tensor,
494
+ freeze: bool = True,
495
+ max_norm: Optional[float] = None,
496
+ norm_type: float = 2.0,
497
+ scale_grad_by_freq: bool = False,
498
+ mode: str = "mean",
499
+ sparse: bool = False,
500
+ include_last_offset: bool = False,
501
+ padding_idx: Optional[int] = None,
502
+ ) -> "EmbeddingBag":
503
+ r"""Create EmbeddingBag instance from given 2-dimensional FloatTensor.
504
+
505
+ Args:
506
+ embeddings (Tensor): FloatTensor containing weights for the EmbeddingBag.
507
+ First dimension is being passed to EmbeddingBag as 'num_embeddings', second as 'embedding_dim'.
508
+ freeze (bool, optional): If ``True``, the tensor does not get updated in the learning process.
509
+ Equivalent to ``embeddingbag.weight.requires_grad = False``. Default: ``True``
510
+ max_norm (float, optional): See module initialization documentation. Default: ``None``
511
+ norm_type (float, optional): See module initialization documentation. Default ``2``.
512
+ scale_grad_by_freq (bool, optional): See module initialization documentation. Default ``False``.
513
+ mode (str, optional): See module initialization documentation. Default: ``"mean"``
514
+ sparse (bool, optional): See module initialization documentation. Default: ``False``.
515
+ include_last_offset (bool, optional): See module initialization documentation. Default: ``False``.
516
+ padding_idx (int, optional): See module initialization documentation. Default: ``None``.
517
+
518
+ Examples::
519
+
520
+ >>> # FloatTensor containing pretrained weights
521
+ >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
522
+ >>> embeddingbag = nn.EmbeddingBag.from_pretrained(weight)
523
+ >>> # Get embeddings for index 1
524
+ >>> input = torch.LongTensor([[1, 0]])
525
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
526
+ >>> embeddingbag(input)
527
+ tensor([[ 2.5000, 3.7000, 4.6500]])
528
+ """
529
+ assert (
530
+ embeddings.dim() == 2
531
+ ), "Embeddings parameter is expected to be 2-dimensional"
532
+ rows, cols = embeddings.shape
533
+ embeddingbag = cls(
534
+ num_embeddings=rows,
535
+ embedding_dim=cols,
536
+ _weight=embeddings,
537
+ max_norm=max_norm,
538
+ norm_type=norm_type,
539
+ scale_grad_by_freq=scale_grad_by_freq,
540
+ mode=mode,
541
+ sparse=sparse,
542
+ include_last_offset=include_last_offset,
543
+ padding_idx=padding_idx,
544
+ )
545
+ embeddingbag.weight.requires_grad = not freeze
546
+ return embeddingbag
janus/lib/python3.10/site-packages/torch/nn/modules/transformer.py ADDED
@@ -0,0 +1,1198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import copy
3
+ import warnings
4
+ from typing import Any, Callable, Optional, Union
5
+
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from torch import Tensor
9
+ from torch.nn.init import xavier_uniform_
10
+
11
+ from .activation import MultiheadAttention
12
+ from .container import ModuleList
13
+ from .dropout import Dropout
14
+ from .linear import Linear
15
+ from .module import Module
16
+ from .normalization import LayerNorm
17
+
18
+
19
+ __all__ = [
20
+ "Transformer",
21
+ "TransformerEncoder",
22
+ "TransformerDecoder",
23
+ "TransformerEncoderLayer",
24
+ "TransformerDecoderLayer",
25
+ ]
26
+
27
+
28
+ def _generate_square_subsequent_mask(
29
+ sz: int,
30
+ device: Optional[torch.device] = None,
31
+ dtype: Optional[torch.dtype] = None,
32
+ ) -> Tensor:
33
+ r"""Generate a square causal mask for the sequence.
34
+
35
+ The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0).
36
+ """
37
+ if device is None:
38
+ device = torch.device("cpu")
39
+ if dtype is None:
40
+ dtype = torch.float32
41
+ return torch.triu(
42
+ torch.full((sz, sz), float("-inf"), dtype=dtype, device=device),
43
+ diagonal=1,
44
+ )
45
+
46
+
47
+ def _get_seq_len(src: Tensor, batch_first: bool) -> Optional[int]:
48
+ if src.is_nested:
49
+ return None
50
+ else:
51
+ src_size = src.size()
52
+ if len(src_size) == 2:
53
+ # unbatched: S, E
54
+ return src_size[0]
55
+ else:
56
+ # batched: B, S, E if batch_first else S, B, E
57
+ seq_len_pos = 1 if batch_first else 0
58
+ return src_size[seq_len_pos]
59
+
60
+
61
+ class Transformer(Module):
62
+ r"""A transformer model.
63
+
64
+ User is able to modify the attributes as needed. The architecture
65
+ is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
66
+ Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
67
+ Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information
68
+ Processing Systems, pages 6000-6010.
69
+
70
+ Args:
71
+ d_model: the number of expected features in the encoder/decoder inputs (default=512).
72
+ nhead: the number of heads in the multiheadattention models (default=8).
73
+ num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
74
+ num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6).
75
+ dim_feedforward: the dimension of the feedforward network model (default=2048).
76
+ dropout: the dropout value (default=0.1).
77
+ activation: the activation function of encoder/decoder intermediate layer, can be a string
78
+ ("relu" or "gelu") or a unary callable. Default: relu
79
+ custom_encoder: custom encoder (default=None).
80
+ custom_decoder: custom decoder (default=None).
81
+ layer_norm_eps: the eps value in layer normalization components (default=1e-5).
82
+ batch_first: If ``True``, then the input and output tensors are provided
83
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
84
+ norm_first: if ``True``, encoder and decoder layers will perform LayerNorms before
85
+ other attention and feedforward operations, otherwise after. Default: ``False`` (after).
86
+ bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive
87
+ bias. Default: ``True``.
88
+
89
+ Examples::
90
+ >>> transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12)
91
+ >>> src = torch.rand((10, 32, 512))
92
+ >>> tgt = torch.rand((20, 32, 512))
93
+ >>> out = transformer_model(src, tgt)
94
+
95
+ Note: A full example to apply nn.Transformer module for the word language model is available in
96
+ https://github.com/pytorch/examples/tree/master/word_language_model
97
+ """
98
+
99
+ def __init__(
100
+ self,
101
+ d_model: int = 512,
102
+ nhead: int = 8,
103
+ num_encoder_layers: int = 6,
104
+ num_decoder_layers: int = 6,
105
+ dim_feedforward: int = 2048,
106
+ dropout: float = 0.1,
107
+ activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
108
+ custom_encoder: Optional[Any] = None,
109
+ custom_decoder: Optional[Any] = None,
110
+ layer_norm_eps: float = 1e-5,
111
+ batch_first: bool = False,
112
+ norm_first: bool = False,
113
+ bias: bool = True,
114
+ device=None,
115
+ dtype=None,
116
+ ) -> None:
117
+ factory_kwargs = {"device": device, "dtype": dtype}
118
+ super().__init__()
119
+ torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}")
120
+
121
+ if custom_encoder is not None:
122
+ self.encoder = custom_encoder
123
+ else:
124
+ encoder_layer = TransformerEncoderLayer(
125
+ d_model,
126
+ nhead,
127
+ dim_feedforward,
128
+ dropout,
129
+ activation,
130
+ layer_norm_eps,
131
+ batch_first,
132
+ norm_first,
133
+ bias,
134
+ **factory_kwargs,
135
+ )
136
+ encoder_norm = LayerNorm(
137
+ d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs
138
+ )
139
+ self.encoder = TransformerEncoder(
140
+ encoder_layer, num_encoder_layers, encoder_norm
141
+ )
142
+
143
+ if custom_decoder is not None:
144
+ self.decoder = custom_decoder
145
+ else:
146
+ decoder_layer = TransformerDecoderLayer(
147
+ d_model,
148
+ nhead,
149
+ dim_feedforward,
150
+ dropout,
151
+ activation,
152
+ layer_norm_eps,
153
+ batch_first,
154
+ norm_first,
155
+ bias,
156
+ **factory_kwargs,
157
+ )
158
+ decoder_norm = LayerNorm(
159
+ d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs
160
+ )
161
+ self.decoder = TransformerDecoder(
162
+ decoder_layer, num_decoder_layers, decoder_norm
163
+ )
164
+
165
+ self._reset_parameters()
166
+
167
+ self.d_model = d_model
168
+ self.nhead = nhead
169
+
170
+ self.batch_first = batch_first
171
+
172
+ def forward(
173
+ self,
174
+ src: Tensor,
175
+ tgt: Tensor,
176
+ src_mask: Optional[Tensor] = None,
177
+ tgt_mask: Optional[Tensor] = None,
178
+ memory_mask: Optional[Tensor] = None,
179
+ src_key_padding_mask: Optional[Tensor] = None,
180
+ tgt_key_padding_mask: Optional[Tensor] = None,
181
+ memory_key_padding_mask: Optional[Tensor] = None,
182
+ src_is_causal: Optional[bool] = None,
183
+ tgt_is_causal: Optional[bool] = None,
184
+ memory_is_causal: bool = False,
185
+ ) -> Tensor:
186
+ r"""Take in and process masked source/target sequences.
187
+
188
+ .. note::
189
+
190
+ If a boolean tensor is provided for any of the [src/tgt/memory]_mask arguments, positions with a ``True`` value are
191
+ not allowed to participate in the attention,
192
+ which is the opposite of the definition for :attr:`attn_mask`
193
+ in :func:`torch.nn.functional.scaled_dot_product_attention`.
194
+
195
+ Args:
196
+ src: the sequence to the encoder (required).
197
+ tgt: the sequence to the decoder (required).
198
+ src_mask: the additive mask for the src sequence (optional).
199
+ tgt_mask: the additive mask for the tgt sequence (optional).
200
+ memory_mask: the additive mask for the encoder output (optional).
201
+ src_key_padding_mask: the Tensor mask for src keys per batch (optional).
202
+ tgt_key_padding_mask: the Tensor mask for tgt keys per batch (optional).
203
+ memory_key_padding_mask: the Tensor mask for memory keys per batch (optional).
204
+ src_is_causal: If specified, applies a causal mask as ``src_mask``.
205
+ Default: ``None``; try to detect a causal mask.
206
+ Warning:
207
+ ``src_is_causal`` provides a hint that ``src_mask`` is
208
+ the causal mask. Providing incorrect hints can result in
209
+ incorrect execution, including forward and backward
210
+ compatibility.
211
+ tgt_is_causal: If specified, applies a causal mask as ``tgt_mask``.
212
+ Default: ``None``; try to detect a causal mask.
213
+ Warning:
214
+ ``tgt_is_causal`` provides a hint that ``tgt_mask`` is
215
+ the causal mask. Providing incorrect hints can result in
216
+ incorrect execution, including forward and backward
217
+ compatibility.
218
+ memory_is_causal: If specified, applies a causal mask as
219
+ ``memory_mask``.
220
+ Default: ``False``.
221
+ Warning:
222
+ ``memory_is_causal`` provides a hint that
223
+ ``memory_mask`` is the causal mask. Providing incorrect
224
+ hints can result in incorrect execution, including
225
+ forward and backward compatibility.
226
+
227
+ Shape:
228
+ - src: :math:`(S, E)` for unbatched input, :math:`(S, N, E)` if `batch_first=False` or
229
+ `(N, S, E)` if `batch_first=True`.
230
+ - tgt: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or
231
+ `(N, T, E)` if `batch_first=True`.
232
+ - src_mask: :math:`(S, S)` or :math:`(N\cdot\text{num\_heads}, S, S)`.
233
+ - tgt_mask: :math:`(T, T)` or :math:`(N\cdot\text{num\_heads}, T, T)`.
234
+ - memory_mask: :math:`(T, S)`.
235
+ - src_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`.
236
+ - tgt_key_padding_mask: :math:`(T)` for unbatched input otherwise :math:`(N, T)`.
237
+ - memory_key_padding_mask: :math:`(S)` for unbatched input otherwise :math:`(N, S)`.
238
+
239
+ Note: [src/tgt/memory]_mask ensures that position :math:`i` is allowed to attend the unmasked
240
+ positions. If a BoolTensor is provided, positions with ``True``
241
+ are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
242
+ is provided, it will be added to the attention weight.
243
+ [src/tgt/memory]_key_padding_mask provides specified elements in the key to be ignored by
244
+ the attention. If a BoolTensor is provided, the positions with the
245
+ value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
246
+
247
+ - output: :math:`(T, E)` for unbatched input, :math:`(T, N, E)` if `batch_first=False` or
248
+ `(N, T, E)` if `batch_first=True`.
249
+
250
+ Note: Due to the multi-head attention architecture in the transformer model,
251
+ the output sequence length of a transformer is same as the input sequence
252
+ (i.e. target) length of the decoder.
253
+
254
+ where :math:`S` is the source sequence length, :math:`T` is the target sequence length, :math:`N` is the
255
+ batch size, :math:`E` is the feature number
256
+
257
+ Examples:
258
+ >>> # xdoctest: +SKIP
259
+ >>> output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask)
260
+ """
261
+ is_batched = src.dim() == 3
262
+ if not self.batch_first and src.size(1) != tgt.size(1) and is_batched:
263
+ raise RuntimeError("the batch number of src and tgt must be equal")
264
+ elif self.batch_first and src.size(0) != tgt.size(0) and is_batched:
265
+ raise RuntimeError("the batch number of src and tgt must be equal")
266
+
267
+ if src.size(-1) != self.d_model or tgt.size(-1) != self.d_model:
268
+ raise RuntimeError(
269
+ "the feature number of src and tgt must be equal to d_model"
270
+ )
271
+
272
+ memory = self.encoder(
273
+ src,
274
+ mask=src_mask,
275
+ src_key_padding_mask=src_key_padding_mask,
276
+ is_causal=src_is_causal,
277
+ )
278
+ output = self.decoder(
279
+ tgt,
280
+ memory,
281
+ tgt_mask=tgt_mask,
282
+ memory_mask=memory_mask,
283
+ tgt_key_padding_mask=tgt_key_padding_mask,
284
+ memory_key_padding_mask=memory_key_padding_mask,
285
+ tgt_is_causal=tgt_is_causal,
286
+ memory_is_causal=memory_is_causal,
287
+ )
288
+ return output
289
+
290
+ @staticmethod
291
+ def generate_square_subsequent_mask(
292
+ sz: int,
293
+ device: Optional[torch.device] = None,
294
+ dtype: Optional[torch.dtype] = None,
295
+ ) -> Tensor:
296
+ r"""Generate a square causal mask for the sequence.
297
+
298
+ The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0).
299
+ """
300
+ return _generate_square_subsequent_mask(sz, dtype=dtype, device=device)
301
+
302
+ def _reset_parameters(self):
303
+ r"""Initiate parameters in the transformer model."""
304
+ for p in self.parameters():
305
+ if p.dim() > 1:
306
+ xavier_uniform_(p)
307
+
308
+
309
+ class TransformerEncoder(Module):
310
+ r"""TransformerEncoder is a stack of N encoder layers.
311
+
312
+ Users can build the BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
313
+
314
+ Args:
315
+ encoder_layer: an instance of the TransformerEncoderLayer() class (required).
316
+ num_layers: the number of sub-encoder-layers in the encoder (required).
317
+ norm: the layer normalization component (optional).
318
+ enable_nested_tensor: if True, input will automatically convert to nested tensor
319
+ (and convert back on output). This will improve the overall performance of
320
+ TransformerEncoder when padding rate is high. Default: ``True`` (enabled).
321
+
322
+ Examples::
323
+ >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
324
+ >>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
325
+ >>> src = torch.rand(10, 32, 512)
326
+ >>> out = transformer_encoder(src)
327
+ """
328
+
329
+ __constants__ = ["norm"]
330
+
331
+ def __init__(
332
+ self,
333
+ encoder_layer: "TransformerEncoderLayer",
334
+ num_layers: int,
335
+ norm: Optional[Module] = None,
336
+ enable_nested_tensor: bool = True,
337
+ mask_check: bool = True,
338
+ ) -> None:
339
+ super().__init__()
340
+ torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}")
341
+ self.layers = _get_clones(encoder_layer, num_layers)
342
+ self.num_layers = num_layers
343
+ self.norm = norm
344
+ # this attribute saves the value providedat object construction
345
+ self.enable_nested_tensor = enable_nested_tensor
346
+ # this attribute controls whether nested tensors are used
347
+ self.use_nested_tensor = enable_nested_tensor
348
+ self.mask_check = mask_check
349
+
350
+ enc_layer = "encoder_layer"
351
+ why_not_sparsity_fast_path = ""
352
+ if not isinstance(encoder_layer, torch.nn.TransformerEncoderLayer):
353
+ why_not_sparsity_fast_path = f"{enc_layer} was not TransformerEncoderLayer"
354
+ elif encoder_layer.norm_first:
355
+ why_not_sparsity_fast_path = f"{enc_layer}.norm_first was True"
356
+ elif not encoder_layer.self_attn.batch_first:
357
+ why_not_sparsity_fast_path = (
358
+ f"{enc_layer}.self_attn.batch_first was not True"
359
+ + "(use batch_first for better inference performance)"
360
+ )
361
+ elif not encoder_layer.self_attn._qkv_same_embed_dim:
362
+ why_not_sparsity_fast_path = (
363
+ f"{enc_layer}.self_attn._qkv_same_embed_dim was not True"
364
+ )
365
+ elif encoder_layer.self_attn.in_proj_bias is None:
366
+ why_not_sparsity_fast_path = f"{enc_layer}.self_attn was passed bias=False"
367
+ elif not encoder_layer.activation_relu_or_gelu:
368
+ why_not_sparsity_fast_path = (
369
+ f"{enc_layer}.activation_relu_or_gelu was not True"
370
+ )
371
+ elif not (encoder_layer.norm1.eps == encoder_layer.norm2.eps):
372
+ why_not_sparsity_fast_path = (
373
+ f"{enc_layer}.norm1.eps was not equal to {enc_layer}.norm2.eps"
374
+ )
375
+ elif encoder_layer.self_attn.num_heads % 2 == 1:
376
+ why_not_sparsity_fast_path = f"{enc_layer}.self_attn.num_heads is odd"
377
+
378
+ if enable_nested_tensor and why_not_sparsity_fast_path:
379
+ warnings.warn(
380
+ f"enable_nested_tensor is True, but self.use_nested_tensor is False because {why_not_sparsity_fast_path}"
381
+ )
382
+ self.use_nested_tensor = False
383
+
384
+ def forward(
385
+ self,
386
+ src: Tensor,
387
+ mask: Optional[Tensor] = None,
388
+ src_key_padding_mask: Optional[Tensor] = None,
389
+ is_causal: Optional[bool] = None,
390
+ ) -> Tensor:
391
+ r"""Pass the input through the encoder layers in turn.
392
+
393
+ Args:
394
+ src: the sequence to the encoder (required).
395
+ mask: the mask for the src sequence (optional).
396
+ src_key_padding_mask: the mask for the src keys per batch (optional).
397
+ is_causal: If specified, applies a causal mask as ``mask``.
398
+ Default: ``None``; try to detect a causal mask.
399
+ Warning:
400
+ ``is_causal`` provides a hint that ``mask`` is the
401
+ causal mask. Providing incorrect hints can result in
402
+ incorrect execution, including forward and backward
403
+ compatibility.
404
+
405
+ Shape:
406
+ see the docs in :class:`~torch.nn.Transformer`.
407
+ """
408
+ src_key_padding_mask = F._canonical_mask(
409
+ mask=src_key_padding_mask,
410
+ mask_name="src_key_padding_mask",
411
+ other_type=F._none_or_dtype(mask),
412
+ other_name="mask",
413
+ target_type=src.dtype,
414
+ )
415
+
416
+ mask = F._canonical_mask(
417
+ mask=mask,
418
+ mask_name="mask",
419
+ other_type=None,
420
+ other_name="",
421
+ target_type=src.dtype,
422
+ check_other=False,
423
+ )
424
+
425
+ output = src
426
+ convert_to_nested = False
427
+ first_layer = self.layers[0]
428
+ src_key_padding_mask_for_layers = src_key_padding_mask
429
+ why_not_sparsity_fast_path = ""
430
+ str_first_layer = "self.layers[0]"
431
+ batch_first = first_layer.self_attn.batch_first
432
+ is_fastpath_enabled = torch.backends.mha.get_fastpath_enabled()
433
+
434
+ if not is_fastpath_enabled:
435
+ why_not_sparsity_fast_path = (
436
+ "torch.backends.mha.get_fastpath_enabled() was not True"
437
+ )
438
+ elif not hasattr(self, "use_nested_tensor"):
439
+ why_not_sparsity_fast_path = "use_nested_tensor attribute not present"
440
+ elif not self.use_nested_tensor:
441
+ why_not_sparsity_fast_path = (
442
+ "self.use_nested_tensor (set in init) was not True"
443
+ )
444
+ elif first_layer.training:
445
+ why_not_sparsity_fast_path = f"{str_first_layer} was in training mode"
446
+ elif not src.dim() == 3:
447
+ why_not_sparsity_fast_path = (
448
+ f"input not batched; expected src.dim() of 3 but got {src.dim()}"
449
+ )
450
+ elif src_key_padding_mask is None:
451
+ why_not_sparsity_fast_path = "src_key_padding_mask was None"
452
+ elif (
453
+ (not hasattr(self, "mask_check")) or self.mask_check
454
+ ) and not torch._nested_tensor_from_mask_left_aligned(
455
+ src, src_key_padding_mask.logical_not()
456
+ ):
457
+ why_not_sparsity_fast_path = "mask_check enabled, and src and src_key_padding_mask was not left aligned"
458
+ elif output.is_nested:
459
+ why_not_sparsity_fast_path = "NestedTensor input is not supported"
460
+ elif mask is not None:
461
+ why_not_sparsity_fast_path = (
462
+ "src_key_padding_mask and mask were both supplied"
463
+ )
464
+ elif torch.is_autocast_enabled():
465
+ why_not_sparsity_fast_path = "autocast is enabled"
466
+
467
+ if not why_not_sparsity_fast_path:
468
+ tensor_args = (
469
+ src,
470
+ first_layer.self_attn.in_proj_weight,
471
+ first_layer.self_attn.in_proj_bias,
472
+ first_layer.self_attn.out_proj.weight,
473
+ first_layer.self_attn.out_proj.bias,
474
+ first_layer.norm1.weight,
475
+ first_layer.norm1.bias,
476
+ first_layer.norm2.weight,
477
+ first_layer.norm2.bias,
478
+ first_layer.linear1.weight,
479
+ first_layer.linear1.bias,
480
+ first_layer.linear2.weight,
481
+ first_layer.linear2.bias,
482
+ )
483
+ _supported_device_type = [
484
+ "cpu",
485
+ "cuda",
486
+ torch.utils.backend_registration._privateuse1_backend_name,
487
+ ]
488
+ if torch.overrides.has_torch_function(tensor_args):
489
+ why_not_sparsity_fast_path = "some Tensor argument has_torch_function"
490
+ elif src.device.type not in _supported_device_type:
491
+ why_not_sparsity_fast_path = (
492
+ f"src device is neither one of {_supported_device_type}"
493
+ )
494
+ elif torch.is_grad_enabled() and any(x.requires_grad for x in tensor_args):
495
+ why_not_sparsity_fast_path = (
496
+ "grad is enabled and at least one of query or the "
497
+ "input/output projection weights or biases requires_grad"
498
+ )
499
+
500
+ if (not why_not_sparsity_fast_path) and (src_key_padding_mask is not None):
501
+ convert_to_nested = True
502
+ output = torch._nested_tensor_from_mask(
503
+ output, src_key_padding_mask.logical_not(), mask_check=False
504
+ )
505
+ src_key_padding_mask_for_layers = None
506
+
507
+ seq_len = _get_seq_len(src, batch_first)
508
+ is_causal = _detect_is_causal_mask(mask, is_causal, seq_len)
509
+
510
+ for mod in self.layers:
511
+ output = mod(
512
+ output,
513
+ src_mask=mask,
514
+ is_causal=is_causal,
515
+ src_key_padding_mask=src_key_padding_mask_for_layers,
516
+ )
517
+
518
+ if convert_to_nested:
519
+ output = output.to_padded_tensor(0.0, src.size())
520
+
521
+ if self.norm is not None:
522
+ output = self.norm(output)
523
+
524
+ return output
525
+
526
+
527
+ class TransformerDecoder(Module):
528
+ r"""TransformerDecoder is a stack of N decoder layers.
529
+
530
+ Args:
531
+ decoder_layer: an instance of the TransformerDecoderLayer() class (required).
532
+ num_layers: the number of sub-decoder-layers in the decoder (required).
533
+ norm: the layer normalization component (optional).
534
+
535
+ Examples::
536
+ >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
537
+ >>> transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
538
+ >>> memory = torch.rand(10, 32, 512)
539
+ >>> tgt = torch.rand(20, 32, 512)
540
+ >>> out = transformer_decoder(tgt, memory)
541
+ """
542
+
543
+ __constants__ = ["norm"]
544
+
545
+ def __init__(
546
+ self,
547
+ decoder_layer: "TransformerDecoderLayer",
548
+ num_layers: int,
549
+ norm: Optional[Module] = None,
550
+ ) -> None:
551
+ super().__init__()
552
+ torch._C._log_api_usage_once(f"torch.nn.modules.{self.__class__.__name__}")
553
+ self.layers = _get_clones(decoder_layer, num_layers)
554
+ self.num_layers = num_layers
555
+ self.norm = norm
556
+
557
+ def forward(
558
+ self,
559
+ tgt: Tensor,
560
+ memory: Tensor,
561
+ tgt_mask: Optional[Tensor] = None,
562
+ memory_mask: Optional[Tensor] = None,
563
+ tgt_key_padding_mask: Optional[Tensor] = None,
564
+ memory_key_padding_mask: Optional[Tensor] = None,
565
+ tgt_is_causal: Optional[bool] = None,
566
+ memory_is_causal: bool = False,
567
+ ) -> Tensor:
568
+ r"""Pass the inputs (and mask) through the decoder layer in turn.
569
+
570
+ Args:
571
+ tgt: the sequence to the decoder (required).
572
+ memory: the sequence from the last layer of the encoder (required).
573
+ tgt_mask: the mask for the tgt sequence (optional).
574
+ memory_mask: the mask for the memory sequence (optional).
575
+ tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
576
+ memory_key_padding_mask: the mask for the memory keys per batch (optional).
577
+ tgt_is_causal: If specified, applies a causal mask as ``tgt mask``.
578
+ Default: ``None``; try to detect a causal mask.
579
+ Warning:
580
+ ``tgt_is_causal`` provides a hint that ``tgt_mask`` is
581
+ the causal mask. Providing incorrect hints can result in
582
+ incorrect execution, including forward and backward
583
+ compatibility.
584
+ memory_is_causal: If specified, applies a causal mask as
585
+ ``memory mask``.
586
+ Default: ``False``.
587
+ Warning:
588
+ ``memory_is_causal`` provides a hint that
589
+ ``memory_mask`` is the causal mask. Providing incorrect
590
+ hints can result in incorrect execution, including
591
+ forward and backward compatibility.
592
+
593
+ Shape:
594
+ see the docs in :class:`~torch.nn.Transformer`.
595
+ """
596
+ output = tgt
597
+
598
+ seq_len = _get_seq_len(tgt, self.layers[0].self_attn.batch_first)
599
+ tgt_is_causal = _detect_is_causal_mask(tgt_mask, tgt_is_causal, seq_len)
600
+
601
+ for mod in self.layers:
602
+ output = mod(
603
+ output,
604
+ memory,
605
+ tgt_mask=tgt_mask,
606
+ memory_mask=memory_mask,
607
+ tgt_key_padding_mask=tgt_key_padding_mask,
608
+ memory_key_padding_mask=memory_key_padding_mask,
609
+ tgt_is_causal=tgt_is_causal,
610
+ memory_is_causal=memory_is_causal,
611
+ )
612
+
613
+ if self.norm is not None:
614
+ output = self.norm(output)
615
+
616
+ return output
617
+
618
+
619
+ class TransformerEncoderLayer(Module):
620
+ r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
621
+
622
+ This standard encoder layer is based on the paper "Attention Is All You Need".
623
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
624
+ Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
625
+ Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
626
+ in a different way during application.
627
+
628
+ TransformerEncoderLayer can handle either traditional torch.tensor inputs,
629
+ or Nested Tensor inputs. Derived classes are expected to similarly accept
630
+ both input formats. (Not all combinations of inputs are currently
631
+ supported by TransformerEncoderLayer while Nested Tensor is in prototype
632
+ state.)
633
+
634
+ If you are implementing a custom layer, you may derive it either from
635
+ the Module or TransformerEncoderLayer class. If your custom layer
636
+ supports both torch.Tensors and Nested Tensors inputs, make its
637
+ implementation a derived class of TransformerEncoderLayer. If your custom
638
+ Layer supports only torch.Tensor inputs, derive its implementation from
639
+ Module.
640
+
641
+ Args:
642
+ d_model: the number of expected features in the input (required).
643
+ nhead: the number of heads in the multiheadattention models (required).
644
+ dim_feedforward: the dimension of the feedforward network model (default=2048).
645
+ dropout: the dropout value (default=0.1).
646
+ activation: the activation function of the intermediate layer, can be a string
647
+ ("relu" or "gelu") or a unary callable. Default: relu
648
+ layer_norm_eps: the eps value in layer normalization components (default=1e-5).
649
+ batch_first: If ``True``, then the input and output tensors are provided
650
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
651
+ norm_first: if ``True``, layer norm is done prior to attention and feedforward
652
+ operations, respectively. Otherwise it's done after. Default: ``False`` (after).
653
+ bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive
654
+ bias. Default: ``True``.
655
+
656
+ Examples::
657
+ >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
658
+ >>> src = torch.rand(10, 32, 512)
659
+ >>> out = encoder_layer(src)
660
+
661
+ Alternatively, when ``batch_first`` is ``True``:
662
+ >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)
663
+ >>> src = torch.rand(32, 10, 512)
664
+ >>> out = encoder_layer(src)
665
+
666
+ Fast path:
667
+ forward() will use a special optimized implementation described in
668
+ `FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`_ if all of the following
669
+ conditions are met:
670
+
671
+ - Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor
672
+ argument ``requires_grad``
673
+ - training is disabled (using ``.eval()``)
674
+ - batch_first is ``True`` and the input is batched (i.e., ``src.dim() == 3``)
675
+ - activation is one of: ``"relu"``, ``"gelu"``, ``torch.functional.relu``, or ``torch.functional.gelu``
676
+ - at most one of ``src_mask`` and ``src_key_padding_mask`` is passed
677
+ - if src is a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_, neither ``src_mask``
678
+ nor ``src_key_padding_mask`` is passed
679
+ - the two ``LayerNorm`` instances have a consistent ``eps`` value (this will naturally be the case
680
+ unless the caller has manually modified one without modifying the other)
681
+
682
+ If the optimized implementation is in use, a
683
+ `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be
684
+ passed for ``src`` to represent padding more efficiently than using a padding
685
+ mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ will be
686
+ returned, and an additional speedup proportional to the fraction of the input that
687
+ is padding can be expected.
688
+
689
+ .. _`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`:
690
+ https://arxiv.org/abs/2205.14135
691
+
692
+ """
693
+
694
+ __constants__ = ["norm_first"]
695
+
696
+ def __init__(
697
+ self,
698
+ d_model: int,
699
+ nhead: int,
700
+ dim_feedforward: int = 2048,
701
+ dropout: float = 0.1,
702
+ activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
703
+ layer_norm_eps: float = 1e-5,
704
+ batch_first: bool = False,
705
+ norm_first: bool = False,
706
+ bias: bool = True,
707
+ device=None,
708
+ dtype=None,
709
+ ) -> None:
710
+ factory_kwargs = {"device": device, "dtype": dtype}
711
+ super().__init__()
712
+ self.self_attn = MultiheadAttention(
713
+ d_model,
714
+ nhead,
715
+ dropout=dropout,
716
+ bias=bias,
717
+ batch_first=batch_first,
718
+ **factory_kwargs,
719
+ )
720
+ # Implementation of Feedforward model
721
+ self.linear1 = Linear(d_model, dim_feedforward, bias=bias, **factory_kwargs)
722
+ self.dropout = Dropout(dropout)
723
+ self.linear2 = Linear(dim_feedforward, d_model, bias=bias, **factory_kwargs)
724
+
725
+ self.norm_first = norm_first
726
+ self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
727
+ self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
728
+ self.dropout1 = Dropout(dropout)
729
+ self.dropout2 = Dropout(dropout)
730
+
731
+ # Legacy string support for activation function.
732
+ if isinstance(activation, str):
733
+ activation = _get_activation_fn(activation)
734
+
735
+ # We can't test self.activation in forward() in TorchScript,
736
+ # so stash some information about it instead.
737
+ if activation is F.relu or isinstance(activation, torch.nn.ReLU):
738
+ self.activation_relu_or_gelu = 1
739
+ elif activation is F.gelu or isinstance(activation, torch.nn.GELU):
740
+ self.activation_relu_or_gelu = 2
741
+ else:
742
+ self.activation_relu_or_gelu = 0
743
+ self.activation = activation
744
+
745
+ def __setstate__(self, state):
746
+ super().__setstate__(state)
747
+ if not hasattr(self, "activation"):
748
+ self.activation = F.relu
749
+
750
+ def forward(
751
+ self,
752
+ src: Tensor,
753
+ src_mask: Optional[Tensor] = None,
754
+ src_key_padding_mask: Optional[Tensor] = None,
755
+ is_causal: bool = False,
756
+ ) -> Tensor:
757
+ r"""Pass the input through the encoder layer.
758
+
759
+ Args:
760
+ src: the sequence to the encoder layer (required).
761
+ src_mask: the mask for the src sequence (optional).
762
+ src_key_padding_mask: the mask for the src keys per batch (optional).
763
+ is_causal: If specified, applies a causal mask as ``src mask``.
764
+ Default: ``False``.
765
+ Warning:
766
+ ``is_causal`` provides a hint that ``src_mask`` is the
767
+ causal mask. Providing incorrect hints can result in
768
+ incorrect execution, including forward and backward
769
+ compatibility.
770
+
771
+ Shape:
772
+ see the docs in :class:`~torch.nn.Transformer`.
773
+ """
774
+ src_key_padding_mask = F._canonical_mask(
775
+ mask=src_key_padding_mask,
776
+ mask_name="src_key_padding_mask",
777
+ other_type=F._none_or_dtype(src_mask),
778
+ other_name="src_mask",
779
+ target_type=src.dtype,
780
+ )
781
+
782
+ src_mask = F._canonical_mask(
783
+ mask=src_mask,
784
+ mask_name="src_mask",
785
+ other_type=None,
786
+ other_name="",
787
+ target_type=src.dtype,
788
+ check_other=False,
789
+ )
790
+
791
+ is_fastpath_enabled = torch.backends.mha.get_fastpath_enabled()
792
+
793
+ why_not_sparsity_fast_path = ""
794
+ if not is_fastpath_enabled:
795
+ why_not_sparsity_fast_path = (
796
+ "torch.backends.mha.get_fastpath_enabled() was not True"
797
+ )
798
+ elif not src.dim() == 3:
799
+ why_not_sparsity_fast_path = (
800
+ f"input not batched; expected src.dim() of 3 but got {src.dim()}"
801
+ )
802
+ elif self.training:
803
+ why_not_sparsity_fast_path = "training is enabled"
804
+ elif not self.self_attn.batch_first:
805
+ why_not_sparsity_fast_path = "self_attn.batch_first was not True"
806
+ elif self.self_attn.in_proj_bias is None:
807
+ why_not_sparsity_fast_path = "self_attn was passed bias=False"
808
+ elif not self.self_attn._qkv_same_embed_dim:
809
+ why_not_sparsity_fast_path = "self_attn._qkv_same_embed_dim was not True"
810
+ elif not self.activation_relu_or_gelu:
811
+ why_not_sparsity_fast_path = "activation_relu_or_gelu was not True"
812
+ elif not (self.norm1.eps == self.norm2.eps):
813
+ why_not_sparsity_fast_path = "norm1.eps is not equal to norm2.eps"
814
+ elif src.is_nested and (
815
+ src_key_padding_mask is not None or src_mask is not None
816
+ ):
817
+ why_not_sparsity_fast_path = "neither src_key_padding_mask nor src_mask are not supported with NestedTensor input"
818
+ elif self.self_attn.num_heads % 2 == 1:
819
+ why_not_sparsity_fast_path = "num_head is odd"
820
+ elif torch.is_autocast_enabled():
821
+ why_not_sparsity_fast_path = "autocast is enabled"
822
+ elif any(
823
+ len(getattr(m, "_forward_hooks", {}))
824
+ + len(getattr(m, "_forward_pre_hooks", {}))
825
+ for m in self.modules()
826
+ ):
827
+ why_not_sparsity_fast_path = "forward pre-/hooks are attached to the module"
828
+ if not why_not_sparsity_fast_path:
829
+ tensor_args = (
830
+ src,
831
+ self.self_attn.in_proj_weight,
832
+ self.self_attn.in_proj_bias,
833
+ self.self_attn.out_proj.weight,
834
+ self.self_attn.out_proj.bias,
835
+ self.norm1.weight,
836
+ self.norm1.bias,
837
+ self.norm2.weight,
838
+ self.norm2.bias,
839
+ self.linear1.weight,
840
+ self.linear1.bias,
841
+ self.linear2.weight,
842
+ self.linear2.bias,
843
+ )
844
+
845
+ # We have to use list comprehensions below because TorchScript does not support
846
+ # generator expressions.
847
+ _supported_device_type = [
848
+ "cpu",
849
+ "cuda",
850
+ torch.utils.backend_registration._privateuse1_backend_name,
851
+ ]
852
+ if torch.overrides.has_torch_function(tensor_args):
853
+ why_not_sparsity_fast_path = "some Tensor argument has_torch_function"
854
+ elif not all(
855
+ (x.device.type in _supported_device_type) for x in tensor_args
856
+ ):
857
+ why_not_sparsity_fast_path = (
858
+ "some Tensor argument's device is neither one of "
859
+ f"{_supported_device_type}"
860
+ )
861
+ elif torch.is_grad_enabled() and any(x.requires_grad for x in tensor_args):
862
+ why_not_sparsity_fast_path = (
863
+ "grad is enabled and at least one of query or the "
864
+ "input/output projection weights or biases requires_grad"
865
+ )
866
+
867
+ if not why_not_sparsity_fast_path:
868
+ merged_mask, mask_type = self.self_attn.merge_masks(
869
+ src_mask, src_key_padding_mask, src
870
+ )
871
+ return torch._transformer_encoder_layer_fwd(
872
+ src,
873
+ self.self_attn.embed_dim,
874
+ self.self_attn.num_heads,
875
+ self.self_attn.in_proj_weight,
876
+ self.self_attn.in_proj_bias,
877
+ self.self_attn.out_proj.weight,
878
+ self.self_attn.out_proj.bias,
879
+ self.activation_relu_or_gelu == 2,
880
+ self.norm_first,
881
+ self.norm1.eps,
882
+ self.norm1.weight,
883
+ self.norm1.bias,
884
+ self.norm2.weight,
885
+ self.norm2.bias,
886
+ self.linear1.weight,
887
+ self.linear1.bias,
888
+ self.linear2.weight,
889
+ self.linear2.bias,
890
+ merged_mask,
891
+ mask_type,
892
+ )
893
+
894
+ # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
895
+ x = src
896
+ if self.norm_first:
897
+ x = x + self._sa_block(
898
+ self.norm1(x), src_mask, src_key_padding_mask, is_causal=is_causal
899
+ )
900
+ x = x + self._ff_block(self.norm2(x))
901
+ else:
902
+ x = self.norm1(
903
+ x
904
+ + self._sa_block(x, src_mask, src_key_padding_mask, is_causal=is_causal)
905
+ )
906
+ x = self.norm2(x + self._ff_block(x))
907
+
908
+ return x
909
+
910
+ # self-attention block
911
+ def _sa_block(
912
+ self,
913
+ x: Tensor,
914
+ attn_mask: Optional[Tensor],
915
+ key_padding_mask: Optional[Tensor],
916
+ is_causal: bool = False,
917
+ ) -> Tensor:
918
+ x = self.self_attn(
919
+ x,
920
+ x,
921
+ x,
922
+ attn_mask=attn_mask,
923
+ key_padding_mask=key_padding_mask,
924
+ need_weights=False,
925
+ is_causal=is_causal,
926
+ )[0]
927
+ return self.dropout1(x)
928
+
929
+ # feed forward block
930
+ def _ff_block(self, x: Tensor) -> Tensor:
931
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
932
+ return self.dropout2(x)
933
+
934
+
935
+ class TransformerDecoderLayer(Module):
936
+ r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
937
+
938
+ This standard decoder layer is based on the paper "Attention Is All You Need".
939
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
940
+ Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
941
+ Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
942
+ in a different way during application.
943
+
944
+ Args:
945
+ d_model: the number of expected features in the input (required).
946
+ nhead: the number of heads in the multiheadattention models (required).
947
+ dim_feedforward: the dimension of the feedforward network model (default=2048).
948
+ dropout: the dropout value (default=0.1).
949
+ activation: the activation function of the intermediate layer, can be a string
950
+ ("relu" or "gelu") or a unary callable. Default: relu
951
+ layer_norm_eps: the eps value in layer normalization components (default=1e-5).
952
+ batch_first: If ``True``, then the input and output tensors are provided
953
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
954
+ norm_first: if ``True``, layer norm is done prior to self attention, multihead
955
+ attention and feedforward operations, respectively. Otherwise it's done after.
956
+ Default: ``False`` (after).
957
+ bias: If set to ``False``, ``Linear`` and ``LayerNorm`` layers will not learn an additive
958
+ bias. Default: ``True``.
959
+
960
+ Examples::
961
+ >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
962
+ >>> memory = torch.rand(10, 32, 512)
963
+ >>> tgt = torch.rand(20, 32, 512)
964
+ >>> out = decoder_layer(tgt, memory)
965
+
966
+ Alternatively, when ``batch_first`` is ``True``:
967
+ >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=True)
968
+ >>> memory = torch.rand(32, 10, 512)
969
+ >>> tgt = torch.rand(32, 20, 512)
970
+ >>> out = decoder_layer(tgt, memory)
971
+ """
972
+
973
+ __constants__ = ["norm_first"]
974
+
975
+ def __init__(
976
+ self,
977
+ d_model: int,
978
+ nhead: int,
979
+ dim_feedforward: int = 2048,
980
+ dropout: float = 0.1,
981
+ activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
982
+ layer_norm_eps: float = 1e-5,
983
+ batch_first: bool = False,
984
+ norm_first: bool = False,
985
+ bias: bool = True,
986
+ device=None,
987
+ dtype=None,
988
+ ) -> None:
989
+ factory_kwargs = {"device": device, "dtype": dtype}
990
+ super().__init__()
991
+ self.self_attn = MultiheadAttention(
992
+ d_model,
993
+ nhead,
994
+ dropout=dropout,
995
+ batch_first=batch_first,
996
+ bias=bias,
997
+ **factory_kwargs,
998
+ )
999
+ self.multihead_attn = MultiheadAttention(
1000
+ d_model,
1001
+ nhead,
1002
+ dropout=dropout,
1003
+ batch_first=batch_first,
1004
+ bias=bias,
1005
+ **factory_kwargs,
1006
+ )
1007
+ # Implementation of Feedforward model
1008
+ self.linear1 = Linear(d_model, dim_feedforward, bias=bias, **factory_kwargs)
1009
+ self.dropout = Dropout(dropout)
1010
+ self.linear2 = Linear(dim_feedforward, d_model, bias=bias, **factory_kwargs)
1011
+
1012
+ self.norm_first = norm_first
1013
+ self.norm1 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
1014
+ self.norm2 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
1015
+ self.norm3 = LayerNorm(d_model, eps=layer_norm_eps, bias=bias, **factory_kwargs)
1016
+ self.dropout1 = Dropout(dropout)
1017
+ self.dropout2 = Dropout(dropout)
1018
+ self.dropout3 = Dropout(dropout)
1019
+
1020
+ # Legacy string support for activation function.
1021
+ if isinstance(activation, str):
1022
+ self.activation = _get_activation_fn(activation)
1023
+ else:
1024
+ self.activation = activation
1025
+
1026
+ def __setstate__(self, state):
1027
+ if "activation" not in state:
1028
+ state["activation"] = F.relu
1029
+ super().__setstate__(state)
1030
+
1031
+ def forward(
1032
+ self,
1033
+ tgt: Tensor,
1034
+ memory: Tensor,
1035
+ tgt_mask: Optional[Tensor] = None,
1036
+ memory_mask: Optional[Tensor] = None,
1037
+ tgt_key_padding_mask: Optional[Tensor] = None,
1038
+ memory_key_padding_mask: Optional[Tensor] = None,
1039
+ tgt_is_causal: bool = False,
1040
+ memory_is_causal: bool = False,
1041
+ ) -> Tensor:
1042
+ r"""Pass the inputs (and mask) through the decoder layer.
1043
+
1044
+ Args:
1045
+ tgt: the sequence to the decoder layer (required).
1046
+ memory: the sequence from the last layer of the encoder (required).
1047
+ tgt_mask: the mask for the tgt sequence (optional).
1048
+ memory_mask: the mask for the memory sequence (optional).
1049
+ tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
1050
+ memory_key_padding_mask: the mask for the memory keys per batch (optional).
1051
+ tgt_is_causal: If specified, applies a causal mask as ``tgt mask``.
1052
+ Default: ``False``.
1053
+ Warning:
1054
+ ``tgt_is_causal`` provides a hint that ``tgt_mask`` is
1055
+ the causal mask. Providing incorrect hints can result in
1056
+ incorrect execution, including forward and backward
1057
+ compatibility.
1058
+ memory_is_causal: If specified, applies a causal mask as
1059
+ ``memory mask``.
1060
+ Default: ``False``.
1061
+ Warning:
1062
+ ``memory_is_causal`` provides a hint that
1063
+ ``memory_mask`` is the causal mask. Providing incorrect
1064
+ hints can result in incorrect execution, including
1065
+ forward and backward compatibility.
1066
+
1067
+ Shape:
1068
+ see the docs in :class:`~torch.nn.Transformer`.
1069
+ """
1070
+ # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf
1071
+
1072
+ x = tgt
1073
+ if self.norm_first:
1074
+ x = x + self._sa_block(
1075
+ self.norm1(x), tgt_mask, tgt_key_padding_mask, tgt_is_causal
1076
+ )
1077
+ x = x + self._mha_block(
1078
+ self.norm2(x),
1079
+ memory,
1080
+ memory_mask,
1081
+ memory_key_padding_mask,
1082
+ memory_is_causal,
1083
+ )
1084
+ x = x + self._ff_block(self.norm3(x))
1085
+ else:
1086
+ x = self.norm1(
1087
+ x + self._sa_block(x, tgt_mask, tgt_key_padding_mask, tgt_is_causal)
1088
+ )
1089
+ x = self.norm2(
1090
+ x
1091
+ + self._mha_block(
1092
+ x, memory, memory_mask, memory_key_padding_mask, memory_is_causal
1093
+ )
1094
+ )
1095
+ x = self.norm3(x + self._ff_block(x))
1096
+
1097
+ return x
1098
+
1099
+ # self-attention block
1100
+ def _sa_block(
1101
+ self,
1102
+ x: Tensor,
1103
+ attn_mask: Optional[Tensor],
1104
+ key_padding_mask: Optional[Tensor],
1105
+ is_causal: bool = False,
1106
+ ) -> Tensor:
1107
+ x = self.self_attn(
1108
+ x,
1109
+ x,
1110
+ x,
1111
+ attn_mask=attn_mask,
1112
+ key_padding_mask=key_padding_mask,
1113
+ is_causal=is_causal,
1114
+ need_weights=False,
1115
+ )[0]
1116
+ return self.dropout1(x)
1117
+
1118
+ # multihead attention block
1119
+ def _mha_block(
1120
+ self,
1121
+ x: Tensor,
1122
+ mem: Tensor,
1123
+ attn_mask: Optional[Tensor],
1124
+ key_padding_mask: Optional[Tensor],
1125
+ is_causal: bool = False,
1126
+ ) -> Tensor:
1127
+ x = self.multihead_attn(
1128
+ x,
1129
+ mem,
1130
+ mem,
1131
+ attn_mask=attn_mask,
1132
+ key_padding_mask=key_padding_mask,
1133
+ is_causal=is_causal,
1134
+ need_weights=False,
1135
+ )[0]
1136
+ return self.dropout2(x)
1137
+
1138
+ # feed forward block
1139
+ def _ff_block(self, x: Tensor) -> Tensor:
1140
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
1141
+ return self.dropout3(x)
1142
+
1143
+
1144
+ def _get_clones(module, N):
1145
+ # FIXME: copy.deepcopy() is not defined on nn.module
1146
+ return ModuleList([copy.deepcopy(module) for i in range(N)])
1147
+
1148
+
1149
+ def _get_activation_fn(activation: str) -> Callable[[Tensor], Tensor]:
1150
+ if activation == "relu":
1151
+ return F.relu
1152
+ elif activation == "gelu":
1153
+ return F.gelu
1154
+
1155
+ raise RuntimeError(f"activation should be relu/gelu, not {activation}")
1156
+
1157
+
1158
+ def _detect_is_causal_mask(
1159
+ mask: Optional[Tensor],
1160
+ is_causal: Optional[bool] = None,
1161
+ size: Optional[int] = None,
1162
+ ) -> bool:
1163
+ """Return whether the given attention mask is causal.
1164
+
1165
+ Warning:
1166
+ If ``is_causal`` is not ``None``, its value will be returned as is. If a
1167
+ user supplies an incorrect ``is_causal`` hint,
1168
+
1169
+ ``is_causal=False`` when the mask is in fact a causal attention.mask
1170
+ may lead to reduced performance relative to what would be achievable
1171
+ with ``is_causal=True``;
1172
+ ``is_causal=True`` when the mask is in fact not a causal attention.mask
1173
+ may lead to incorrect and unpredictable execution - in some scenarios,
1174
+ a causal mask may be applied based on the hint, in other execution
1175
+ scenarios the specified mask may be used. The choice may not appear
1176
+ to be deterministic, in that a number of factors like alignment,
1177
+ hardware SKU, etc influence the decision whether to use a mask or
1178
+ rely on the hint.
1179
+ ``size`` if not None, check whether the mask is a causal mask of the provided size
1180
+ Otherwise, checks for any causal mask.
1181
+ """
1182
+ # Prevent type refinement
1183
+ make_causal = is_causal is True
1184
+
1185
+ if is_causal is None and mask is not None:
1186
+ sz = size if size is not None else mask.size(-2)
1187
+ causal_comparison = _generate_square_subsequent_mask(
1188
+ sz, device=mask.device, dtype=mask.dtype
1189
+ )
1190
+
1191
+ # Do not use `torch.equal` so we handle batched masks by
1192
+ # broadcasting the comparison.
1193
+ if mask.size() == causal_comparison.size():
1194
+ make_causal = bool((mask == causal_comparison).all())
1195
+ else:
1196
+ make_causal = False
1197
+
1198
+ return make_causal
janus/lib/python3.10/site-packages/torch/nn/quantizable/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (216 Bytes). View file
 
janus/lib/python3.10/site-packages/torch/nn/quantizable/modules/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from torch.ao.nn.quantizable.modules.activation import MultiheadAttention
2
+ from torch.ao.nn.quantizable.modules.rnn import LSTM, LSTMCell
3
+
4
+
5
+ __all__ = [
6
+ "LSTM",
7
+ "LSTMCell",
8
+ "MultiheadAttention",
9
+ ]
janus/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (383 Bytes). View file
 
janus/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc ADDED
Binary file (622 Bytes). View file
 
janus/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (613 Bytes). View file
 
janus/lib/python3.10/site-packages/torch/nn/quantizable/modules/activation.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantizable Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/quantizable`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/quantizable/modules`,
8
+ while adding an import statement here.
9
+ """
10
+ from torch.ao.nn.quantizable.modules.activation import MultiheadAttention
janus/lib/python3.10/site-packages/torch/nn/quantizable/modules/rnn.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantizable Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/quantizable`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/quantizable/modules`,
8
+ while adding an import statement here.
9
+ """
10
+
11
+ from torch.ao.nn.quantizable.modules.rnn import LSTM, LSTMCell
janus/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (223 Bytes). View file
 
janus/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.13 kB). View file
 
janus/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc ADDED
Binary file (759 Bytes). View file