ZTWHHH commited on
Commit
a9939d3
·
verified ·
1 Parent(s): 324a4e3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -1
  2. parrot/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110_nocublaslt.so +3 -0
  3. parrot/lib/python3.10/site-packages/opencv_python.libs/libavcodec-9aae324f.so.59.37.100 +3 -0
  4. wemm/lib/python3.10/site-packages/torch/futures/__init__.py +318 -0
  5. wemm/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc +0 -0
  6. wemm/lib/python3.10/site-packages/torch/nn/intrinsic/__pycache__/__init__.cpython-310.pyc +0 -0
  7. wemm/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__init__.py +31 -0
  8. wemm/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  9. wemm/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc +0 -0
  10. wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__init__.py +1 -0
  11. wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__init__.py +31 -0
  12. wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  13. wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc +0 -0
  14. wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/conv_fused.py +37 -0
  15. wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_fused.py +15 -0
  16. wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_relu.py +15 -0
  17. wemm/lib/python3.10/site-packages/torch/nn/parallel/__init__.py +14 -0
  18. wemm/lib/python3.10/site-packages/torch/nn/parallel/__init__.pyi +5 -0
  19. wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc +0 -0
  20. wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc +0 -0
  21. wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_replicated_tensor_ddp_interop.cpython-310.pyc +0 -0
  22. wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_replicated_tensor_ddp_utils.cpython-310.pyc +0 -0
  23. wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc +0 -0
  24. wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc +0 -0
  25. wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-310.pyc +0 -0
  26. wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc +0 -0
  27. wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc +0 -0
  28. wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/scatter_gather.cpython-310.pyc +0 -0
  29. wemm/lib/python3.10/site-packages/torch/nn/parallel/_functions.py +124 -0
  30. wemm/lib/python3.10/site-packages/torch/nn/parallel/_replicated_tensor_ddp_interop.py +46 -0
  31. wemm/lib/python3.10/site-packages/torch/nn/parallel/_replicated_tensor_ddp_utils.py +31 -0
  32. wemm/lib/python3.10/site-packages/torch/nn/parallel/comm.py +241 -0
  33. wemm/lib/python3.10/site-packages/torch/nn/parallel/common_types.pyi +5 -0
  34. wemm/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py +235 -0
  35. wemm/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.pyi +19 -0
  36. wemm/lib/python3.10/site-packages/torch/nn/parallel/distributed.py +1921 -0
  37. wemm/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py +91 -0
  38. wemm/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.pyi +7 -0
  39. wemm/lib/python3.10/site-packages/torch/nn/parallel/replicate.py +167 -0
  40. wemm/lib/python3.10/site-packages/torch/nn/parallel/replicate.pyi +7 -0
  41. wemm/lib/python3.10/site-packages/torch/nn/parallel/scatter_gather.py +89 -0
  42. wemm/lib/python3.10/site-packages/torch/nn/parallel/scatter_gather.pyi +24 -0
  43. wemm/lib/python3.10/site-packages/torch/nn/qat/__pycache__/__init__.cpython-310.pyc +0 -0
  44. wemm/lib/python3.10/site-packages/torch/nn/qat/dynamic/__init__.py +7 -0
  45. wemm/lib/python3.10/site-packages/torch/nn/qat/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
  46. wemm/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__init__.py +3 -0
  47. wemm/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  48. wemm/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/linear.cpython-310.pyc +0 -0
  49. wemm/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/linear.py +10 -0
  50. wemm/lib/python3.10/site-packages/torch/nn/qat/modules/__init__.py +24 -0
.gitattributes CHANGED
@@ -214,4 +214,5 @@ wemm/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310
214
  wemm/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
215
  wemm/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
216
  wemm/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
217
- wemm/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
214
  wemm/lib/python3.10/site-packages/sympy/logic/__pycache__/boolalg.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
215
  wemm/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
216
  wemm/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
217
+ parrot/lib/python3.10/site-packages/opencv_python.libs/libavcodec-9aae324f.so.59.37.100 filter=lfs diff=lfs merge=lfs -text
218
+ parrot/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110_nocublaslt.so filter=lfs diff=lfs merge=lfs -text
parrot/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110_nocublaslt.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abfd599f616509de8b6976308e40eb13faead2516e3700e318126558cfcdb9f8
3
+ size 11110784
parrot/lib/python3.10/site-packages/opencv_python.libs/libavcodec-9aae324f.so.59.37.100 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82a8362a2fba0bb6d6b8f565c472f2ae30fafa0f6836c7701a0ea4641dad0616
3
+ size 13448513
wemm/lib/python3.10/site-packages/torch/futures/__init__.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import cast, Callable, Generic, List, Optional, Type, TypeVar, Union
4
+
5
+ import torch
6
+
7
+ __all__ = ['Future', 'collect_all', 'wait_all']
8
+
9
+ T = TypeVar("T")
10
+ S = TypeVar("S")
11
+
12
+
13
+ class _PyFutureMeta(type(torch._C.Future), type(Generic)): # type: ignore[misc, no-redef]
14
+ pass
15
+
16
+
17
+ class Future(torch._C.Future, Generic[T], metaclass=_PyFutureMeta):
18
+ r"""
19
+ Wrapper around a ``torch._C.Future`` which encapsulates an asynchronous
20
+ execution of a callable, e.g. :meth:`~torch.distributed.rpc.rpc_async`. It
21
+ also exposes a set of APIs to add callback functions and set results.
22
+
23
+ .. warning:: GPU support is a beta feature, subject to changes.
24
+ """
25
+
26
+ def __init__(self, *, devices: Optional[List[Union[int, str, torch.device]]] = None):
27
+ r"""
28
+ Create an empty unset ``Future``. If the future is intended to hold
29
+ values containing CUDA tensors, (a superset of) their CUDA devices must
30
+ be specified at construction. (This is only supported if
31
+ ``torch.cuda.is_available()`` returns ``True``). This is needed to
32
+ ensure proper CUDA stream synchronization. The child futures, returned
33
+ by the ``then`` method, will inherit these devices.
34
+
35
+ Args:
36
+ devices(``List[Union[int, str, torch.device]]``, optional): the set
37
+ of devices on which tensors contained in this future's value are
38
+ allowed to reside and on which callbacks are allowed to operate.
39
+ """
40
+ if devices is None:
41
+ devices = []
42
+ super().__init__([torch.device(d) for d in devices])
43
+
44
+ def done(self) -> bool:
45
+ r"""
46
+ Return ``True`` if this ``Future`` is done. A ``Future`` is done if it
47
+ has a result or an exception.
48
+
49
+ If the value contains tensors that reside on GPUs, ``Future.done()``
50
+ will return ``True`` even if the asynchronous kernels that are
51
+ populating those tensors haven't yet completed running on the device,
52
+ because at such stage the result is already usable, provided one
53
+ performs the appropriate synchronizations (see :meth:`wait`).
54
+ """
55
+ return super().done()
56
+
57
+ def wait(self) -> T:
58
+ r"""
59
+ Block until the value of this ``Future`` is ready.
60
+
61
+ If the value contains tensors that reside on GPUs, then an additional
62
+ synchronization is performed with the kernels (executing on the device)
63
+ which may be asynchronously populating those tensors. Such sync is
64
+ non-blocking, which means that ``wait()`` will insert the necessary
65
+ instructions in the current streams to ensure that further operations
66
+ enqueued on those streams will be properly scheduled after the async
67
+ kernels but, once that is done, ``wait()`` will return, even if those
68
+ kernels are still running. No further synchronization is required when
69
+ accessing and using the values, as long as one doesn't change streams.
70
+
71
+ Returns:
72
+ The value held by this ``Future``. If the function (callback or RPC)
73
+ creating the value has thrown an error, this ``wait`` method will
74
+ also throw an error.
75
+ """
76
+ return super().wait()
77
+
78
+ def value(self) -> T:
79
+ r"""
80
+ Obtain the value of an already-completed future.
81
+
82
+ This method should only be called after a call to :meth:`wait` has
83
+ completed, or inside a callback function passed to :meth:`then`. In
84
+ other cases this ``Future`` may not yet hold a value and calling
85
+ ``value()`` could fail.
86
+
87
+ If the value contains tensors that reside on GPUs, then this method will
88
+ *not* perform any additional synchronization. This should be done
89
+ beforehand, separately, through a call to :meth:`wait` (except within
90
+ callbacks, for which it's already being taken care of by :meth:`then`).
91
+
92
+ Returns:
93
+ The value held by this ``Future``. If the function (callback or RPC)
94
+ creating the value has thrown an error, this ``value()`` method will
95
+ also throw an error.
96
+ """
97
+ return super().value()
98
+
99
+ def then(self, callback: Callable[[Future[T]], S]) -> Future[S]:
100
+ r"""
101
+ Append the given callback function to this ``Future``, which will be run
102
+ when the ``Future`` is completed. Multiple callbacks can be added to
103
+ the same ``Future``, but the order in which they will be executed cannot
104
+ be guaranteed (to enforce a certain order consider chaining:
105
+ ``fut.then(cb1).then(cb2)``). The callback must take one argument, which
106
+ is the reference to this ``Future``. The callback function can use the
107
+ :meth:`value` method to get the value. Note that if this ``Future`` is
108
+ already completed, the given callback will be run immediately inline.
109
+
110
+ If the ``Future``'s value contains tensors that reside on GPUs, the
111
+ callback might be invoked while the async kernels that are populating
112
+ those tensors haven't yet finished executing on the device. However, the
113
+ callback will be invoked with some dedicated streams set as current
114
+ (fetched from a global pool) which will be synchronized with those
115
+ kernels. Hence any operation performed by the callback on these tensors
116
+ will be scheduled on the device after the kernels complete. In other
117
+ words, as long as the callback doesn't switch streams, it can safely
118
+ manipulate the result without any additional synchronization. This is
119
+ similar to the non-blocking behavior of :meth:`wait`.
120
+
121
+ Similarly, if the callback returns a value that contains tensors that
122
+ reside on a GPU, it can do so even if the kernels that are producing
123
+ these tensors are still running on the device, as long as the callback
124
+ didn't change streams during its execution. If one wants to change
125
+ streams, one must be careful to re-synchronize them with the original
126
+ streams, that is, those that were current when the callback was invoked.
127
+
128
+ Args:
129
+ callback(``Callable``): a ``Callable`` that takes this ``Future`` as
130
+ the only argument.
131
+
132
+ Returns:
133
+ A new ``Future`` object that holds the return value of the
134
+ ``callback`` and will be marked as completed when the given
135
+ ``callback`` finishes.
136
+
137
+ .. note:: Note that if the callback function throws, either
138
+ through the original future being completed with an exception and
139
+ calling ``fut.wait()``, or through other code in the callback, the
140
+ future returned by ``then`` will be marked appropriately with the
141
+ encountered error. However, if this callback later completes
142
+ additional futures, those futures are not marked as completed with
143
+ an error and the user is responsible for handling completion/waiting
144
+ on those futures independently.
145
+
146
+ Example::
147
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
148
+ >>> def callback(fut):
149
+ ... print(f"RPC return value is {fut.wait()}.")
150
+ >>> fut = torch.futures.Future()
151
+ >>> # The inserted callback will print the return value when
152
+ >>> # receiving the response from "worker1"
153
+ >>> cb_fut = fut.then(callback)
154
+ >>> chain_cb_fut = cb_fut.then(
155
+ ... lambda x : print(f"Chained cb done. {x.wait()}")
156
+ ... )
157
+ >>> fut.set_result(5)
158
+ RPC return value is 5.
159
+ Chained cb done. None
160
+ """
161
+ return cast(Future[S], super().then(callback))
162
+
163
+ def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None:
164
+ r"""
165
+ Append the given callback function to this ``Future``, which will be run
166
+ when the ``Future`` is completed. Multiple callbacks can be added to
167
+ the same ``Future``, but the order in which they will be executed cannot
168
+ be guaranteed. The callback must take one argument, which is the
169
+ reference to this ``Future``. The callback function can use the
170
+ :meth:`value` method to get the value. Note that if this ``Future`` is
171
+ already completed, the given callback will be run inline.
172
+
173
+ We recommend that you use the :meth:`then` method as it provides a way
174
+ to synchronize after your callback has completed. ``add_done_callback``
175
+ can be cheaper if your callback does not return anything. But both
176
+ :meth:`then` and ``add_done_callback`` use the same callback
177
+ registration API under the hood.
178
+
179
+ With respect to GPU tensors, this method behaves in the same way as
180
+ :meth:`then`.
181
+
182
+ Args:
183
+ callback(``Future``): a ``Callable`` that takes in one argument,
184
+ which is the reference to this ``Future``.
185
+
186
+ .. note:: Note that if the callback function throws, either
187
+ through the original future being completed with an exception and
188
+ calling ``fut.wait()``, or through other code in the callback,
189
+ error handling must be carefully taken care of. For example, if
190
+ this callback later completes additional futures, those futures are
191
+ not marked as completed with an error and the user is responsible
192
+ for handling completion/waiting on those futures independently.
193
+
194
+ Example::
195
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
196
+ >>> def callback(fut):
197
+ ... print("This will run after the future has finished.")
198
+ ... print(fut.wait())
199
+ >>> fut = torch.futures.Future()
200
+ >>> fut.add_done_callback(callback)
201
+ >>> fut.set_result(5)
202
+ This will run after the future has finished.
203
+ 5
204
+ """
205
+ super().add_done_callback(callback)
206
+
207
+ def set_result(self, result: T) -> None:
208
+ r"""
209
+ Set the result for this ``Future``, which will mark this ``Future`` as
210
+ completed and trigger all attached callbacks. Note that a ``Future``
211
+ cannot be marked completed twice.
212
+
213
+ If the result contains tensors that reside on GPUs, this method can be
214
+ called even if the asynchronous kernels that are populating those
215
+ tensors haven't yet completed running on the device, provided that the
216
+ streams on which those kernels were enqueued are set as the current ones
217
+ when this method is called. Put simply, it's safe to call this method
218
+ immediately after launching those kernels, without any additional
219
+ synchronization, as long as one doesn't change streams in between. This
220
+ method will record events on all the relevant current streams and will
221
+ use them to ensure proper scheduling for all the consumers of this
222
+ ``Future``.
223
+
224
+ Args:
225
+ result (object): the result object of this ``Future``.
226
+
227
+ Example::
228
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
229
+ >>> import threading
230
+ >>> import time
231
+ >>> def slow_set_future(fut, value):
232
+ ... time.sleep(0.5)
233
+ ... fut.set_result(value)
234
+ >>> fut = torch.futures.Future()
235
+ >>> t = threading.Thread(
236
+ ... target=slow_set_future,
237
+ ... args=(fut, torch.ones(2) * 3)
238
+ ... )
239
+ >>> t.start()
240
+ >>> print(fut.wait())
241
+ tensor([3., 3.])
242
+ >>> t.join()
243
+ """
244
+ super().set_result(result)
245
+
246
+ def set_exception(self, result: T) -> None:
247
+ r"""
248
+ Set an exception for this ``Future``, which will mark this ``Future`` as
249
+ completed with an error and trigger all attached callbacks. Note that
250
+ when calling wait()/value() on this ``Future``, the exception set here
251
+ will be raised inline.
252
+
253
+ Args:
254
+ result (BaseException): the exception for this ``Future``.
255
+
256
+ Example::
257
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
258
+ >>> fut = torch.futures.Future()
259
+ >>> fut.set_exception(ValueError("foo"))
260
+ >>> fut.wait()
261
+ Traceback (most recent call last):
262
+ ...
263
+ ValueError: foo
264
+ """
265
+ assert isinstance(result, Exception), f"{result} is of type {type(result)}, not an Exception."
266
+
267
+ def raise_error(fut_result):
268
+ raise fut_result
269
+
270
+ super()._set_unwrap_func(raise_error)
271
+ self.set_result(result) # type: ignore[arg-type]
272
+
273
+
274
+ def collect_all(futures: List[Future]) -> Future[List[Future]]:
275
+ r"""
276
+ Collects the provided :class:`~torch.futures.Future` objects into a single
277
+ combined :class:`~torch.futures.Future` that is completed when all of the
278
+ sub-futures are completed.
279
+
280
+ Args:
281
+ futures (list): a list of :class:`~torch.futures.Future` objects.
282
+
283
+ Returns:
284
+ Returns a :class:`~torch.futures.Future` object to a list of the passed
285
+ in Futures.
286
+
287
+ Example::
288
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
289
+ >>> fut0 = torch.futures.Future()
290
+ >>> fut1 = torch.futures.Future()
291
+ >>> fut = torch.futures.collect_all([fut0, fut1])
292
+ >>> fut0.set_result(0)
293
+ >>> fut1.set_result(1)
294
+ >>> fut_list = fut.wait()
295
+ >>> print(f"fut0 result = {fut_list[0].wait()}")
296
+ fut0 result = 0
297
+ >>> print(f"fut1 result = {fut_list[1].wait()}")
298
+ fut1 result = 1
299
+ """
300
+ return cast(Future[List[Future]], torch._C._collect_all(cast(List[torch._C.Future], futures)))
301
+
302
+
303
+ def wait_all(futures: List[Future]) -> List:
304
+ r"""
305
+ Waits for all provided futures to be complete, and returns
306
+ the list of completed values. If any of the futures encounters an error,
307
+ the method will exit early and report the error not waiting for other
308
+ futures to complete.
309
+
310
+ Args:
311
+ futures (list): a list of :class:`~torch.futures.Future` object.
312
+
313
+ Returns:
314
+ A list of the completed :class:`~torch.futures.Future` results. This
315
+ method will throw an error if ``wait`` on any
316
+ :class:`~torch.futures.Future` throws.
317
+ """
318
+ return [fut.wait() for fut in torch._C._collect_all(cast(List[torch._C.Future], futures)).wait()]
wemm/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (15.8 kB). View file
 
wemm/lib/python3.10/site-packages/torch/nn/intrinsic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (869 Bytes). View file
 
wemm/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .fused import _FusedModule # noqa: F401
2
+ from .fused import BNReLU2d
3
+ from .fused import BNReLU3d
4
+ from .fused import ConvBn1d
5
+ from .fused import ConvBn2d
6
+ from .fused import ConvBn3d
7
+ from .fused import ConvBnReLU1d
8
+ from .fused import ConvBnReLU2d
9
+ from .fused import ConvBnReLU3d
10
+ from .fused import ConvReLU1d
11
+ from .fused import ConvReLU2d
12
+ from .fused import ConvReLU3d
13
+ from .fused import LinearBn1d
14
+ from .fused import LinearReLU
15
+
16
+
17
+ __all__ = [
18
+ 'BNReLU2d',
19
+ 'BNReLU3d',
20
+ 'ConvBn1d',
21
+ 'ConvBn2d',
22
+ 'ConvBn3d',
23
+ 'ConvBnReLU1d',
24
+ 'ConvBnReLU2d',
25
+ 'ConvBnReLU3d',
26
+ 'ConvReLU1d',
27
+ 'ConvReLU2d',
28
+ 'ConvReLU3d',
29
+ 'LinearBn1d',
30
+ 'LinearReLU',
31
+ ]
wemm/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (729 Bytes). View file
 
wemm/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc ADDED
Binary file (779 Bytes). View file
 
wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .linear_relu import LinearReLU
2
+ from .linear_fused import LinearBn1d
3
+ from .conv_fused import (
4
+ ConvBn1d,
5
+ ConvBn2d,
6
+ ConvBn3d,
7
+ ConvBnReLU1d,
8
+ ConvBnReLU2d,
9
+ ConvBnReLU3d,
10
+ ConvReLU1d,
11
+ ConvReLU2d,
12
+ ConvReLU3d,
13
+ update_bn_stats,
14
+ freeze_bn_stats,
15
+ )
16
+
17
+ __all__ = [
18
+ "LinearReLU",
19
+ "LinearBn1d",
20
+ "ConvReLU1d",
21
+ "ConvReLU2d",
22
+ "ConvReLU3d",
23
+ "ConvBn1d",
24
+ "ConvBn2d",
25
+ "ConvBn3d",
26
+ "ConvBnReLU1d",
27
+ "ConvBnReLU2d",
28
+ "ConvBnReLU3d",
29
+ "update_bn_stats",
30
+ "freeze_bn_stats",
31
+ ]
wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (624 Bytes). View file
 
wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/linear_fused.cpython-310.pyc ADDED
Binary file (629 Bytes). View file
 
wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/conv_fused.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Intrinsic QAT Modules
3
+
4
+ This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
8
+ while adding an import statement here.
9
+ """
10
+
11
+ __all__ = [
12
+ # Modules
13
+ 'ConvBn1d',
14
+ 'ConvBnReLU1d',
15
+ 'ConvReLU1d',
16
+ 'ConvBn2d',
17
+ 'ConvBnReLU2d',
18
+ 'ConvReLU2d',
19
+ 'ConvBn3d',
20
+ 'ConvBnReLU3d',
21
+ 'ConvReLU3d',
22
+ # Utilities
23
+ 'freeze_bn_stats',
24
+ 'update_bn_stats',
25
+ ]
26
+
27
+ from torch.ao.nn.intrinsic.qat import ConvBn1d
28
+ from torch.ao.nn.intrinsic.qat import ConvBnReLU1d
29
+ from torch.ao.nn.intrinsic.qat import ConvReLU1d
30
+ from torch.ao.nn.intrinsic.qat import ConvBn2d
31
+ from torch.ao.nn.intrinsic.qat import ConvBnReLU2d
32
+ from torch.ao.nn.intrinsic.qat import ConvReLU2d
33
+ from torch.ao.nn.intrinsic.qat import ConvBn3d
34
+ from torch.ao.nn.intrinsic.qat import ConvBnReLU3d
35
+ from torch.ao.nn.intrinsic.qat import ConvReLU3d
36
+ from torch.ao.nn.intrinsic.qat import freeze_bn_stats
37
+ from torch.ao.nn.intrinsic.qat import update_bn_stats
wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_fused.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Intrinsic QAT Modules
3
+
4
+ This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
8
+ while adding an import statement here.
9
+ """
10
+
11
+ __all__ = [
12
+ 'LinearBn1d',
13
+ ]
14
+
15
+ from torch.ao.nn.intrinsic.qat import LinearBn1d
wemm/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_relu.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Intrinsic QAT Modules
3
+
4
+ This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
8
+ while adding an import statement here.
9
+ """
10
+
11
+ __all__ = [
12
+ 'LinearReLU',
13
+ ]
14
+
15
+ from torch.ao.nn.intrinsic.qat import LinearReLU
wemm/lib/python3.10/site-packages/torch/nn/parallel/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .parallel_apply import parallel_apply
2
+ from .replicate import replicate
3
+ from .data_parallel import DataParallel, data_parallel
4
+ from .scatter_gather import scatter, gather
5
+ from .distributed import DistributedDataParallel
6
+
7
+ __all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
8
+ 'DataParallel', 'DistributedDataParallel']
9
+
10
+ def DistributedDataParallelCPU(*args, **kwargs):
11
+ import warnings
12
+ warnings.warn("torch.nn.parallel.DistributedDataParallelCPU is deprecated, "
13
+ "please use torch.nn.parallel.DistributedDataParallel instead.")
14
+ return DistributedDataParallel(*args, **kwargs)
wemm/lib/python3.10/site-packages/torch/nn/parallel/__init__.pyi ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .data_parallel import DataParallel as DataParallel, data_parallel as data_parallel
2
+ from .distributed import DistributedDataParallel as DistributedDataParallel
3
+ from .parallel_apply import parallel_apply as parallel_apply
4
+ from .replicate import replicate as replicate
5
+ from .scatter_gather import gather as gather, scatter as scatter
wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (788 Bytes). View file
 
wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc ADDED
Binary file (5.65 kB). View file
 
wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_replicated_tensor_ddp_interop.cpython-310.pyc ADDED
Binary file (1.96 kB). View file
 
wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_replicated_tensor_ddp_utils.cpython-310.pyc ADDED
Binary file (1.17 kB). View file
 
wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc ADDED
Binary file (9.7 kB). View file
 
wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-310.pyc ADDED
Binary file (67.5 kB). View file
 
wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc ADDED
Binary file (3.31 kB). View file
 
wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc ADDED
Binary file (4.42 kB). View file
 
wemm/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/scatter_gather.cpython-310.pyc ADDED
Binary file (3.74 kB). View file
 
wemm/lib/python3.10/site-packages/torch/nn/parallel/_functions.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import torch
4
+ from . import comm
5
+ from torch.autograd import Function
6
+ from torch._utils import _get_device_index
7
+ from typing import List, Optional
8
+
9
+
10
+ class Broadcast(Function):
11
+
12
+ @staticmethod
13
+ def forward(ctx, target_gpus, *inputs):
14
+ assert all(i.device.type != 'cpu' for i in inputs), (
15
+ 'Broadcast function not implemented for CPU tensors'
16
+ )
17
+ target_gpus = [_get_device_index(x, True) for x in target_gpus]
18
+ ctx.target_gpus = target_gpus
19
+ if len(inputs) == 0:
20
+ return tuple()
21
+ ctx.num_inputs = len(inputs)
22
+ ctx.input_device = inputs[0].get_device()
23
+ outputs = comm.broadcast_coalesced(inputs, ctx.target_gpus)
24
+ non_differentiables = []
25
+ for idx, input_requires_grad in enumerate(ctx.needs_input_grad[1:]):
26
+ if not input_requires_grad:
27
+ for output in outputs:
28
+ non_differentiables.append(output[idx])
29
+ ctx.mark_non_differentiable(*non_differentiables)
30
+ return tuple([t for tensors in outputs for t in tensors])
31
+
32
+ @staticmethod
33
+ def backward(ctx, *grad_outputs):
34
+ return (None,) + ReduceAddCoalesced.apply(ctx.input_device, ctx.num_inputs, *grad_outputs)
35
+
36
+
37
+ class ReduceAddCoalesced(Function):
38
+
39
+ @staticmethod
40
+ def forward(ctx, destination, num_inputs, *grads):
41
+ ctx.target_gpus = [grads[i].get_device() for i in range(0, len(grads), num_inputs)]
42
+
43
+ grads_ = [grads[i:i + num_inputs]
44
+ for i in range(0, len(grads), num_inputs)]
45
+ return comm.reduce_add_coalesced(grads_, destination)
46
+
47
+ @staticmethod
48
+ def backward(ctx, *grad_outputs):
49
+ return (None, None,) + Broadcast.apply(ctx.target_gpus, *grad_outputs)
50
+
51
+
52
+ class Gather(Function):
53
+
54
+ @staticmethod
55
+ def forward(ctx, target_device, dim, *inputs):
56
+ assert all(i.device.type != 'cpu' for i in inputs), (
57
+ 'Gather function not implemented for CPU tensors'
58
+ )
59
+ if (target_device == 'cpu'):
60
+ ctx.target_device = 'cpu'
61
+ else:
62
+ target_device = _get_device_index(target_device, True)
63
+ ctx.target_device = target_device
64
+ ctx.dim = dim
65
+ ctx.input_gpus = tuple(i.get_device() for i in inputs)
66
+ if all(t.dim() == 0 for t in inputs) and dim == 0:
67
+ inputs = tuple(t.view(1) for t in inputs)
68
+ warnings.warn('Was asked to gather along dimension 0, but all '
69
+ 'input tensors were scalars; will instead unsqueeze '
70
+ 'and return a vector.')
71
+ ctx.unsqueezed_scalar = True
72
+ else:
73
+ ctx.unsqueezed_scalar = False
74
+ ctx.input_sizes = tuple(i.size(ctx.dim) for i in inputs)
75
+ return comm.gather(inputs, ctx.dim, ctx.target_device)
76
+
77
+ @staticmethod
78
+ def backward(ctx, grad_output):
79
+ scattered_grads = Scatter.apply(ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output)
80
+ if ctx.unsqueezed_scalar:
81
+ scattered_grads = tuple(g[0] for g in scattered_grads)
82
+ return (None, None) + scattered_grads
83
+
84
+
85
+ class Scatter(Function):
86
+
87
+ @staticmethod
88
+ def forward(ctx, target_gpus, chunk_sizes, dim, input):
89
+ target_gpus = [_get_device_index(x, True) for x in target_gpus]
90
+ ctx.dim = dim
91
+ ctx.input_device = input.get_device() if input.device.type != "cpu" else -1
92
+ streams = None
93
+ if torch.cuda.is_available() and ctx.input_device == -1:
94
+ # Perform CPU to GPU copies in a background stream
95
+ streams = [_get_stream(device) for device in target_gpus]
96
+ outputs = comm.scatter(input, target_gpus, chunk_sizes, ctx.dim, streams)
97
+ # Synchronize with the copy stream
98
+ if streams is not None:
99
+ for i, output in enumerate(outputs):
100
+ with torch.cuda.device(target_gpus[i]):
101
+ main_stream = torch.cuda.current_stream()
102
+ main_stream.wait_stream(streams[i])
103
+ output.record_stream(main_stream)
104
+ return outputs
105
+
106
+ @staticmethod
107
+ def backward(ctx, *grad_output):
108
+ return None, None, None, Gather.apply(ctx.input_device, ctx.dim, *grad_output)
109
+
110
+
111
+ # background streams used for copying
112
+ _streams: Optional[List[Optional[torch.cuda.Stream]]] = None
113
+
114
+
115
+ def _get_stream(device: int):
116
+ """Gets a background stream for copying between CPU and GPU"""
117
+ global _streams
118
+ if device == -1:
119
+ return None
120
+ if _streams is None:
121
+ _streams = [None] * torch.cuda.device_count()
122
+ if _streams[device] is None:
123
+ _streams[device] = torch.cuda.Stream(device)
124
+ return _streams[device]
wemm/lib/python3.10/site-packages/torch/nn/parallel/_replicated_tensor_ddp_interop.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.distributed._shard.replicated_tensor import ReplicatedTensor
3
+
4
+ class ReplicatedTensorFunction(torch.autograd.Function):
5
+ """
6
+ Autograd function to ensure gradients are replicated between the
7
+ replicated tensor and the original one.
8
+ """
9
+ @staticmethod
10
+ def forward(ctx, inp, process_group=None):
11
+ # set_materialize_grads(False) will ensure that None gradients stay as
12
+ # None and are not filled with zeros.
13
+ ctx.set_materialize_grads(False)
14
+ return ReplicatedTensor(inp, process_group)
15
+
16
+ @staticmethod
17
+ def backward(ctx, grad_output):
18
+ return grad_output, None
19
+
20
+ def _make_replicated_tensor(tensor, process_group):
21
+ replicated_tensor = ReplicatedTensorFunction.apply(tensor, process_group)
22
+ replicated_tensor.grad = tensor.grad
23
+ return replicated_tensor
24
+
25
+ def _replicate_module_recurse(module, process_group):
26
+ replica = module._replicate_for_data_parallel()
27
+ for param_name, param in module._parameters.items():
28
+ if param is not None:
29
+ setattr(replica, param_name, _make_replicated_tensor(param, process_group))
30
+ else:
31
+ setattr(replica, param_name, param)
32
+
33
+ for buffer_name, buffer in module._buffers.items():
34
+ setattr(replica, buffer_name, buffer)
35
+
36
+ for module_name, child in module._modules.items():
37
+ setattr(replica, module_name, _replicate_module_recurse(child, process_group))
38
+ return replica
39
+
40
+ def _replicate_module(network, process_group):
41
+ from torch.nn.parallel.replicate import _replicatable_module # type: ignore[attr-defined]
42
+ if not _replicatable_module(network):
43
+ raise RuntimeError("Cannot replicate network where python modules are "
44
+ "childrens of ScriptModule")
45
+
46
+ return _replicate_module_recurse(network, process_group)
wemm/lib/python3.10/site-packages/torch/nn/parallel/_replicated_tensor_ddp_utils.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+
3
+ _DDP_WITH_REPLICATED_TENSOR = False
4
+
5
+ @contextmanager
6
+ def _ddp_replicated_tensor(val):
7
+ """
8
+ A context manager to tag tensors in the forward pass of DDP to be
9
+ ``ReplicatedTensor``. This can be used by ReplicatedTensor inter-op
10
+ during the forward pass to perform appropriate optimizations.
11
+
12
+ This context manager needs to wrap DDP creation and modifying the underlying
13
+ module passed into DDP after leaving this context manager would cause
14
+ inconsitencies and the changes will not be picked up during the forward
15
+ pass.
16
+ """
17
+ global _DDP_WITH_REPLICATED_TENSOR
18
+ old_val = _DDP_WITH_REPLICATED_TENSOR
19
+ _DDP_WITH_REPLICATED_TENSOR = val
20
+ try:
21
+ yield
22
+ finally:
23
+ _DDP_WITH_REPLICATED_TENSOR = old_val
24
+
25
+ def _ddp_with_replicated_tensor_enabled():
26
+ global _DDP_WITH_REPLICATED_TENSOR
27
+ return _DDP_WITH_REPLICATED_TENSOR
28
+
29
+ def _set_ddp_with_replicated_tensor(value):
30
+ global _DDP_WITH_REPLICATED_TENSOR
31
+ _DDP_WITH_REPLICATED_TENSOR = value
wemm/lib/python3.10/site-packages/torch/nn/parallel/comm.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import torch
3
+ from torch.cuda import nccl
4
+ from torch._utils import _take_tensors, _flatten_dense_tensors, \
5
+ _unflatten_dense_tensors, _reorder_tensors_as, _get_device_index, _handle_complex
6
+ from typing import List
7
+
8
+ def broadcast(tensor, devices=None, *, out=None):
9
+ r"""Broadcasts a tensor to specified GPU devices.
10
+
11
+ Args:
12
+ tensor (Tensor): tensor to broadcast. Can be on CPU or GPU.
13
+ devices (Iterable[torch.device, str or int], optional): an iterable of
14
+ GPU devices, among which to broadcast.
15
+ out (Sequence[Tensor], optional, keyword-only): the GPU tensors to
16
+ store output results.
17
+
18
+ .. note::
19
+ Exactly one of :attr:`devices` and :attr:`out` must be specified.
20
+
21
+ Returns:
22
+ - If :attr:`devices` is specified,
23
+ a tuple containing copies of :attr:`tensor`, placed on
24
+ :attr:`devices`.
25
+ - If :attr:`out` is specified,
26
+ a tuple containing :attr:`out` tensors, each containing a copy of
27
+ :attr:`tensor`.
28
+ """
29
+ tensor = _handle_complex(tensor)
30
+ if not ((devices is None) ^ (out is None)):
31
+ raise RuntimeError(
32
+ "Exactly one of 'devices' and 'out' must be specified, but got "
33
+ "devices={} and out={}".format(devices, out))
34
+ if devices is not None:
35
+ devices = [_get_device_index(d) for d in devices]
36
+ return torch._C._broadcast(tensor, devices)
37
+ else:
38
+ return torch._C._broadcast_out(tensor, out)
39
+
40
+
41
+ def broadcast_coalesced(tensors, devices, buffer_size=10485760):
42
+ """Broadcasts a sequence tensors to the specified GPUs.
43
+ Small tensors are first coalesced into a buffer to reduce the number
44
+ of synchronizations.
45
+
46
+ Args:
47
+ tensors (sequence): tensors to broadcast. Must be on the same device,
48
+ either CPU or GPU.
49
+ devices (Iterable[torch.device, str or int]): an iterable of GPU
50
+ devices, among which to broadcast.
51
+ buffer_size (int): maximum size of the buffer used for coalescing
52
+
53
+ Returns:
54
+ A tuple containing copies of :attr:`tensor`, placed on :attr:`devices`.
55
+ """
56
+ devices = [_get_device_index(d) for d in devices]
57
+ tensors = [_handle_complex(t) for t in tensors]
58
+ return torch._C._broadcast_coalesced(tensors, devices, buffer_size)
59
+
60
+
61
+ def reduce_add(inputs, destination=None):
62
+ """Sums tensors from multiple GPUs.
63
+
64
+ All inputs should have matching shapes, dtype, and layout. The output tensor
65
+ will be of the same shape, dtype, and layout.
66
+
67
+ Args:
68
+ inputs (Iterable[Tensor]): an iterable of tensors to add.
69
+ destination (int, optional): a device on which the output will be
70
+ placed (default: current device).
71
+
72
+ Returns:
73
+ A tensor containing an elementwise sum of all inputs, placed on the
74
+ :attr:`destination` device.
75
+ """
76
+ destination = _get_device_index(destination, optional=True)
77
+ input_size = inputs[0].size()
78
+ root_index = None # index of input tensor that already is on the correct device
79
+ for i, inp in enumerate(inputs):
80
+ assert inp.device.type != "cpu", "reduce_add expects all inputs to be on GPUs"
81
+ if inp.get_device() == destination:
82
+ root_index = i
83
+ if inp.size() != input_size:
84
+ got = 'x'.join(str(x) for x in inp.size())
85
+ expected = 'x'.join(str(x) for x in input_size)
86
+ raise ValueError("input {} has invalid size: got {}, but expected "
87
+ "{}".format(i, got, expected))
88
+ if root_index is None:
89
+ raise RuntimeError("reduce_add expects destination to be on the same GPU with one of the tensors")
90
+
91
+ if len(inputs) == 1:
92
+ return inputs[0]
93
+
94
+ if nccl.is_available(inputs):
95
+ result = torch.empty_like(inputs[root_index])
96
+ nccl.reduce(inputs, output=result, root=root_index)
97
+ else:
98
+ destination_device = torch.device(inputs[root_index].device.type, destination)
99
+ nonroot = [t for i, t in enumerate(inputs) if i != root_index]
100
+ # make a new tensor w/o clone
101
+ result = inputs[root_index] + nonroot[0].to(device=destination_device, non_blocking=True)
102
+ for other in nonroot[1:]:
103
+ result.add_(other.to(device=destination_device, non_blocking=True))
104
+ return result
105
+
106
+
107
+ def reduce_add_coalesced(inputs, destination=None, buffer_size=10485760):
108
+ """Sums tensors from multiple GPUs.
109
+
110
+ Small tensors are first coalesced into a buffer to reduce the number
111
+ of synchronizations.
112
+
113
+ Args:
114
+ inputs (Iterable[Iterable[Tensor]]): iterable of iterables that
115
+ contain tensors from a single device.
116
+ destination (int, optional): a device on which the output will be
117
+ placed (default: current device).
118
+ buffer_size (int): maximum size of the buffer used for coalescing
119
+
120
+ Returns:
121
+ A tuple of tensors containing an elementwise sum of each group of
122
+ inputs, placed on the ``destination`` device.
123
+ """
124
+ # TODO: When `len(inputs) == 1` and all inputs are on `destination`, just
125
+ # return `inputs`.
126
+ dense_tensors: List[List] = [[] for _ in inputs] # shape (num_gpus, num_tensors)
127
+ output = []
128
+ ref_order = []
129
+ # process sparse ones first since they may have different sizes on different gpus
130
+ for tensor_at_gpus in zip(*inputs):
131
+ if all(t.is_sparse for t in tensor_at_gpus):
132
+ result = reduce_add(tensor_at_gpus, destination) # this will be sparse too
133
+ output.append(result)
134
+ ref_order.append(tensor_at_gpus[0])
135
+ else:
136
+ for coll, t in zip(dense_tensors, tensor_at_gpus):
137
+ coll.append(t.to_dense() if t.is_sparse else t)
138
+ ref_order.append(dense_tensors[0][-1])
139
+ itrs = [_take_tensors(tensors, buffer_size) for tensors in dense_tensors]
140
+ # now the dense ones, which have consistent sizes
141
+ for chunks in zip(*itrs):
142
+ flat_tensors = [_flatten_dense_tensors(chunk) for chunk in chunks] # (num_gpus,)
143
+ flat_result = reduce_add(flat_tensors, destination)
144
+ for t in _unflatten_dense_tensors(flat_result, chunks[0]):
145
+ # The unflattened tensors do not share storage, and we don't expose
146
+ # base flat tensor anyways, so give them different version counters.
147
+ # See NOTE [ Version Counter in comm.*_coalesced ]
148
+ output.append(t.data)
149
+ return tuple(_reorder_tensors_as(output, ref_order))
150
+
151
+
152
+ def scatter(tensor, devices=None, chunk_sizes=None, dim=0, streams=None, *, out=None):
153
+ """Scatters tensor across multiple GPUs.
154
+
155
+ Args:
156
+ tensor (Tensor): tensor to scatter. Can be on CPU or GPU.
157
+ devices (Iterable[torch.device, str or int], optional): an iterable of
158
+ GPU devices, among which to scatter.
159
+ chunk_sizes (Iterable[int], optional): sizes of chunks to be placed on
160
+ each device. It should match :attr:`devices` in length and sums to
161
+ ``tensor.size(dim)``. If not specified, :attr:`tensor` will be divided
162
+ into equal chunks.
163
+ dim (int, optional): A dimension along which to chunk :attr:`tensor`.
164
+ Default: ``0``.
165
+ streams (Iterable[Stream], optional): an iterable of Streams, among
166
+ which to execute the scatter. If not specified, the default stream will
167
+ be utilized.
168
+ out (Sequence[Tensor], optional, keyword-only): the GPU tensors to
169
+ store output results. Sizes of these tensors must match that of
170
+ :attr:`tensor`, except for :attr:`dim`, where the total size must
171
+ sum to ``tensor.size(dim)``.
172
+
173
+ .. note::
174
+ Exactly one of :attr:`devices` and :attr:`out` must be specified. When
175
+ :attr:`out` is specified, :attr:`chunk_sizes` must not be specified and
176
+ will be inferred from sizes of :attr:`out`.
177
+
178
+ Returns:
179
+ - If :attr:`devices` is specified,
180
+ a tuple containing chunks of :attr:`tensor`, placed on
181
+ :attr:`devices`.
182
+ - If :attr:`out` is specified,
183
+ a tuple containing :attr:`out` tensors, each containing a chunk of
184
+ :attr:`tensor`.
185
+ """
186
+ tensor = _handle_complex(tensor)
187
+ if out is None:
188
+ devices = [_get_device_index(d) for d in devices]
189
+ return tuple(torch._C._scatter(tensor, devices, chunk_sizes, dim, streams))
190
+ else:
191
+ if devices is not None:
192
+ raise RuntimeError(
193
+ "'devices' must not be specified when 'out' is specified, but "
194
+ "got devices={}".format(devices))
195
+ if chunk_sizes is not None:
196
+ raise RuntimeError(
197
+ "'chunk_sizes' must not be specified when 'out' is specified, "
198
+ "but got chunk_sizes={}".format(chunk_sizes))
199
+ return tuple(torch._C._scatter_out(tensor, out, dim, streams))
200
+
201
+
202
+ def gather(tensors, dim=0, destination=None, *, out=None):
203
+ r"""Gathers tensors from multiple GPU devices.
204
+
205
+ Args:
206
+ tensors (Iterable[Tensor]): an iterable of tensors to gather.
207
+ Tensor sizes in all dimensions other than :attr:`dim` have to match.
208
+ dim (int, optional): a dimension along which the tensors will be
209
+ concatenated. Default: ``0``.
210
+ destination (torch.device, str, or int, optional): the output device.
211
+ Can be CPU or CUDA. Default: the current CUDA device.
212
+ out (Tensor, optional, keyword-only): the tensor to store gather result.
213
+ Its sizes must match those of :attr:`tensors`, except for :attr:`dim`,
214
+ where the size must equal ``sum(tensor.size(dim) for tensor in tensors)``.
215
+ Can be on CPU or CUDA.
216
+
217
+ .. note::
218
+ :attr:`destination` must not be specified when :attr:`out` is specified.
219
+
220
+ Returns:
221
+ - If :attr:`destination` is specified,
222
+ a tensor located on :attr:`destination` device, that is a result of
223
+ concatenating :attr:`tensors` along :attr:`dim`.
224
+ - If :attr:`out` is specified,
225
+ the :attr:`out` tensor, now containing results of concatenating
226
+ :attr:`tensors` along :attr:`dim`.
227
+ """
228
+ tensors = [_handle_complex(t) for t in tensors]
229
+ if out is None:
230
+ if destination == -1:
231
+ warnings.warn(
232
+ 'Using -1 to represent CPU tensor is deprecated. Please use a '
233
+ 'device object or string instead, e.g., "cpu".')
234
+ destination = _get_device_index(destination, allow_cpu=True, optional=True)
235
+ return torch._C._gather(tensors, dim, destination)
236
+ else:
237
+ if destination is not None:
238
+ raise RuntimeError(
239
+ "'destination' must not be specified when 'out' is specified, but "
240
+ "got destination={}".format(destination))
241
+ return torch._C._gather_out(tensors, out, dim)
wemm/lib/python3.10/site-packages/torch/nn/parallel/common_types.pyi ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from typing import Union, Sequence
2
+ from ... import device
3
+
4
+ _device_t = Union[int, device]
5
+ _devices_t = Sequence[_device_t]
wemm/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ import torch
3
+ import warnings
4
+ from itertools import chain
5
+ from ..modules import Module
6
+ from .scatter_gather import scatter_kwargs, gather
7
+ from .replicate import replicate
8
+ from .parallel_apply import parallel_apply
9
+ from torch._utils import (
10
+ _get_all_device_indices,
11
+ _get_available_device_type,
12
+ _get_device_index,
13
+ _get_devices_properties
14
+ )
15
+
16
+ __all__ = ['DataParallel', 'data_parallel']
17
+
18
+ def _check_balance(device_ids):
19
+ imbalance_warn = """
20
+ There is an imbalance between your GPUs. You may want to exclude GPU {} which
21
+ has less than 75% of the memory or cores of GPU {}. You can do so by setting
22
+ the device_ids argument to DataParallel, or by setting the CUDA_VISIBLE_DEVICES
23
+ environment variable."""
24
+ device_ids = [_get_device_index(x, True) for x in device_ids]
25
+ dev_props = _get_devices_properties(device_ids)
26
+
27
+ def warn_imbalance(get_prop):
28
+ values = [get_prop(props) for props in dev_props]
29
+ min_pos, min_val = min(enumerate(values), key=operator.itemgetter(1))
30
+ max_pos, max_val = max(enumerate(values), key=operator.itemgetter(1))
31
+ if min_val / max_val < 0.75:
32
+ warnings.warn(imbalance_warn.format(device_ids[min_pos], device_ids[max_pos]))
33
+ return True
34
+ return False
35
+
36
+ if warn_imbalance(lambda props: props.total_memory):
37
+ return
38
+ if warn_imbalance(lambda props: props.multi_processor_count):
39
+ return
40
+
41
+
42
+ class DataParallel(Module):
43
+ r"""Implements data parallelism at the module level.
44
+
45
+ This container parallelizes the application of the given :attr:`module` by
46
+ splitting the input across the specified devices by chunking in the batch
47
+ dimension (other objects will be copied once per device). In the forward
48
+ pass, the module is replicated on each device, and each replica handles a
49
+ portion of the input. During the backwards pass, gradients from each replica
50
+ are summed into the original module.
51
+
52
+ The batch size should be larger than the number of GPUs used.
53
+
54
+ .. warning::
55
+ It is recommended to use :class:`~torch.nn.parallel.DistributedDataParallel`,
56
+ instead of this class, to do multi-GPU training, even if there is only a single
57
+ node. See: :ref:`cuda-nn-ddp-instead` and :ref:`ddp`.
58
+
59
+ Arbitrary positional and keyword inputs are allowed to be passed into
60
+ DataParallel but some types are specially handled. tensors will be
61
+ **scattered** on dim specified (default 0). tuple, list and dict types will
62
+ be shallow copied. The other types will be shared among different threads
63
+ and can be corrupted if written to in the model's forward pass.
64
+
65
+ The parallelized :attr:`module` must have its parameters and buffers on
66
+ ``device_ids[0]`` before running this :class:`~torch.nn.DataParallel`
67
+ module.
68
+
69
+ .. warning::
70
+ In each forward, :attr:`module` is **replicated** on each device, so any
71
+ updates to the running module in ``forward`` will be lost. For example,
72
+ if :attr:`module` has a counter attribute that is incremented in each
73
+ ``forward``, it will always stay at the initial value because the update
74
+ is done on the replicas which are destroyed after ``forward``. However,
75
+ :class:`~torch.nn.DataParallel` guarantees that the replica on
76
+ ``device[0]`` will have its parameters and buffers sharing storage with
77
+ the base parallelized :attr:`module`. So **in-place** updates to the
78
+ parameters or buffers on ``device[0]`` will be recorded. E.g.,
79
+ :class:`~torch.nn.BatchNorm2d` and :func:`~torch.nn.utils.spectral_norm`
80
+ rely on this behavior to update the buffers.
81
+
82
+ .. warning::
83
+ Forward and backward hooks defined on :attr:`module` and its submodules
84
+ will be invoked ``len(device_ids)`` times, each with inputs located on
85
+ a particular device. Particularly, the hooks are only guaranteed to be
86
+ executed in correct order with respect to operations on corresponding
87
+ devices. For example, it is not guaranteed that hooks set via
88
+ :meth:`~torch.nn.Module.register_forward_pre_hook` be executed before
89
+ `all` ``len(device_ids)`` :meth:`~torch.nn.Module.forward` calls, but
90
+ that each such hook be executed before the corresponding
91
+ :meth:`~torch.nn.Module.forward` call of that device.
92
+
93
+ .. warning::
94
+ When :attr:`module` returns a scalar (i.e., 0-dimensional tensor) in
95
+ :func:`forward`, this wrapper will return a vector of length equal to
96
+ number of devices used in data parallelism, containing the result from
97
+ each device.
98
+
99
+ .. note::
100
+ There is a subtlety in using the
101
+ ``pack sequence -> recurrent network -> unpack sequence`` pattern in a
102
+ :class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`.
103
+ See :ref:`pack-rnn-unpack-with-data-parallelism` section in FAQ for
104
+ details.
105
+
106
+
107
+ Args:
108
+ module (Module): module to be parallelized
109
+ device_ids (list of int or torch.device): CUDA devices (default: all devices)
110
+ output_device (int or torch.device): device location of output (default: device_ids[0])
111
+
112
+ Attributes:
113
+ module (Module): the module to be parallelized
114
+
115
+ Example::
116
+
117
+ >>> # xdoctest: +SKIP
118
+ >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
119
+ >>> output = net(input_var) # input_var can be on any device, including CPU
120
+ """
121
+
122
+ # TODO: update notes/cuda.rst when this class handles 8+ GPUs well
123
+
124
+ def __init__(self, module, device_ids=None, output_device=None, dim=0):
125
+ super().__init__()
126
+ torch._C._log_api_usage_once("torch.nn.parallel.DataParallel")
127
+ device_type = _get_available_device_type()
128
+ if device_type is None:
129
+ self.module = module
130
+ self.device_ids = []
131
+ return
132
+
133
+ if device_ids is None:
134
+ device_ids = _get_all_device_indices()
135
+
136
+ if output_device is None:
137
+ output_device = device_ids[0]
138
+
139
+ self.dim = dim
140
+ self.module = module
141
+ self.device_ids = [_get_device_index(x, True) for x in device_ids]
142
+ self.output_device = _get_device_index(output_device, True)
143
+ self.src_device_obj = torch.device(device_type, self.device_ids[0])
144
+
145
+ _check_balance(self.device_ids)
146
+
147
+ if len(self.device_ids) == 1:
148
+ self.module.to(self.src_device_obj)
149
+
150
+ def forward(self, *inputs, **kwargs):
151
+ with torch.autograd.profiler.record_function("DataParallel.forward"):
152
+ if not self.device_ids:
153
+ return self.module(*inputs, **kwargs)
154
+
155
+ for t in chain(self.module.parameters(), self.module.buffers()):
156
+ if t.device != self.src_device_obj:
157
+ raise RuntimeError("module must have its parameters and buffers "
158
+ "on device {} (device_ids[0]) but found one of "
159
+ "them on device: {}".format(self.src_device_obj, t.device))
160
+
161
+ inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
162
+ # for forward function without any inputs, empty list and dict will be created
163
+ # so the module can be executed on one device which is the first one in device_ids
164
+ if not inputs and not kwargs:
165
+ inputs = ((),)
166
+ kwargs = ({},)
167
+
168
+ if len(self.device_ids) == 1:
169
+ return self.module(*inputs[0], **kwargs[0])
170
+ replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
171
+ outputs = self.parallel_apply(replicas, inputs, kwargs)
172
+ return self.gather(outputs, self.output_device)
173
+
174
+ def replicate(self, module, device_ids):
175
+ return replicate(module, device_ids, not torch.is_grad_enabled())
176
+
177
+ def scatter(self, inputs, kwargs, device_ids):
178
+ return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
179
+
180
+ def parallel_apply(self, replicas, inputs, kwargs):
181
+ return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
182
+
183
+ def gather(self, outputs, output_device):
184
+ return gather(outputs, output_device, dim=self.dim)
185
+
186
+
187
+ def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
188
+ r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
189
+
190
+ This is the functional version of the DataParallel module.
191
+
192
+ Args:
193
+ module (Module): the module to evaluate in parallel
194
+ inputs (Tensor): inputs to the module
195
+ device_ids (list of int or torch.device): GPU ids on which to replicate module
196
+ output_device (list of int or torch.device): GPU location of the output Use -1 to indicate the CPU.
197
+ (default: device_ids[0])
198
+ Returns:
199
+ a Tensor containing the result of module(input) located on
200
+ output_device
201
+ """
202
+ if not isinstance(inputs, tuple):
203
+ inputs = (inputs,) if inputs is not None else ()
204
+
205
+ device_type = _get_available_device_type()
206
+
207
+ if device_ids is None:
208
+ device_ids = _get_all_device_indices()
209
+
210
+ if output_device is None:
211
+ output_device = device_ids[0]
212
+
213
+ device_ids = [_get_device_index(x, True) for x in device_ids]
214
+ output_device = _get_device_index(output_device, True)
215
+ src_device_obj = torch.device(device_type, device_ids[0])
216
+
217
+ for t in chain(module.parameters(), module.buffers()):
218
+ if t.device != src_device_obj:
219
+ raise RuntimeError("module must have its parameters and buffers "
220
+ "on device {} (device_ids[0]) but found one of "
221
+ "them on device: {}".format(src_device_obj, t.device))
222
+
223
+ inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
224
+ # for module without any inputs, empty list and dict will be created
225
+ # so the module can be executed on one device which is the first one in device_ids
226
+ if not inputs and not module_kwargs:
227
+ inputs = ((),)
228
+ module_kwargs = ({},)
229
+
230
+ if len(device_ids) == 1:
231
+ return module(*inputs[0], **module_kwargs[0])
232
+ used_device_ids = device_ids[:len(inputs)]
233
+ replicas = replicate(module, used_device_ids)
234
+ outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
235
+ return gather(outputs, output_device, dim)
wemm/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.pyi ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Optional
2
+ from .common_types import _devices_t, _device_t
3
+ from ..modules import Module
4
+ from ... import device, Tensor
5
+
6
+ class DataParallel(Module):
7
+ module: Module = ...
8
+ device_ids: _devices_t = ...
9
+ dim: int = ...
10
+ output_device: _device_t = ...
11
+ src_device_obj: device = ...
12
+
13
+ def __init__(self, module: Module, device_ids: Optional[_devices_t] = ..., output_device: Optional[_device_t] = ...,
14
+ dim: int = ...) -> None: ...
15
+
16
+
17
+ def data_parallel(module: Module, inputs: Any, device_ids: Optional[_devices_t] = ...,
18
+ output_device: Optional[_device_t] = ..., dim: int = ...,
19
+ module_kwargs: Optional[Any] = ...) -> Tensor: ...
wemm/lib/python3.10/site-packages/torch/nn/parallel/distributed.py ADDED
@@ -0,0 +1,1921 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import inspect
3
+ import itertools
4
+ import logging
5
+ import os
6
+ import sys
7
+ import warnings
8
+ import weakref
9
+ from contextlib import contextmanager
10
+ from dataclasses import dataclass, fields, is_dataclass
11
+ from enum import Enum, auto
12
+ from typing import Callable, Any, Type
13
+
14
+ import torch
15
+ import torch.distributed as dist
16
+ from torch.autograd import Function, Variable
17
+ from torch.distributed.algorithms.join import (
18
+ Join,
19
+ Joinable,
20
+ JoinHook,
21
+ )
22
+
23
+ from torch.utils._pytree import tree_flatten, tree_unflatten
24
+
25
+ RPC_AVAILABLE = False
26
+ if dist.is_available():
27
+ from torch.distributed.utils import (
28
+ _verify_param_shape_across_processes,
29
+ _sync_module_states,
30
+ _to_kwargs,
31
+ )
32
+ from torch.distributed.distributed_c10d import ReduceOp, _get_default_group
33
+ if torch.distributed.rpc.is_available():
34
+ RPC_AVAILABLE = True
35
+ from torch.distributed.rpc import RRef
36
+
37
+ from torch._utils import _get_device_index
38
+
39
+ from ..modules import Module
40
+ from ._replicated_tensor_ddp_utils import _ddp_with_replicated_tensor_enabled
41
+ from .scatter_gather import gather, scatter_kwargs # noqa: F401
42
+
43
+ __all__ = ["DistributedDataParallel"]
44
+
45
+ logger = logging.getLogger(__name__)
46
+
47
+
48
+ def _tree_flatten_with_rref(output):
49
+ output_is_rref = RPC_AVAILABLE and isinstance(output, RRef)
50
+ if output_is_rref:
51
+ output_tensor_list, treespec = tree_flatten(output.local_value())
52
+ else:
53
+ output_tensor_list, treespec = tree_flatten(output)
54
+ # Need to return flattened tensors, spec to re-pack them, as well
55
+ # as if the return type was actually an RRef to reconstruct.
56
+ return output_tensor_list, treespec, output_is_rref
57
+
58
+
59
+ def _tree_unflatten_with_rref(output, treespec, output_is_rref):
60
+ output = tree_unflatten(output, treespec)
61
+ if output_is_rref:
62
+ output = RRef(output)
63
+ return output
64
+
65
+
66
+ def _find_tensors(obj):
67
+ r"""
68
+ Recursively find all tensors contained in the specified object.
69
+ """
70
+ if RPC_AVAILABLE and isinstance(obj, RRef):
71
+ # If the current node is the owner of the RRef, unwrap it and try to
72
+ # find Tensors.
73
+ # TODO: Expand to remote RRefs.
74
+ if obj.is_owner():
75
+ return _find_tensors(obj.local_value())
76
+ if isinstance(obj, torch.Tensor):
77
+ return [obj]
78
+ if isinstance(obj, (list, tuple)):
79
+ return itertools.chain(*map(_find_tensors, obj))
80
+ if isinstance(obj, dict):
81
+ return itertools.chain(*map(_find_tensors, obj.values()))
82
+ if is_dataclass(obj):
83
+ return itertools.chain(
84
+ *map(_find_tensors, (getattr(obj, f.name) for f in fields(obj)))
85
+ )
86
+
87
+ return []
88
+
89
+
90
+ def _dump_DDP_relevant_env_vars():
91
+ relevant_env_vars = [
92
+ "RANK",
93
+ "LOCAL_RANK",
94
+ "WORLD_SIZE",
95
+ "MASTER_PORT",
96
+ "MASTER_ADDR",
97
+ "CUDA_VISIBLE_DEVICES",
98
+ "GLOO_SOCKET_IFNAME",
99
+ "GLOO_DEVICE_TRANSPORT",
100
+ "NCCL_SOCKET_IFNAME",
101
+ "NCCL_BLOCKING_WAIT",
102
+ "NCCL_DEBUG",
103
+ "NCCL_DEBUG_SUBSYS",
104
+ "NCCL_IB_DISABLE",
105
+ # More NCCL env vars:
106
+ "NCCL_P2P_DISABLE",
107
+ "NCCL_P2P_LEVEL",
108
+ "NCCL_SHM_DISABLE",
109
+ "NCCL_SOCKET_NTHREADS",
110
+ "NCCL_NSOCKS_PERTHREAD",
111
+ "NCCL_BUFFSIZE",
112
+ "NCCL_NTHREADS",
113
+ "NCCL_RINGS",
114
+ "NCCL_MAX_NCHANNELS",
115
+ "NCCL_MIN_NCHANNELS",
116
+ "NCCL_CHECKS_DISABLE",
117
+ "NCCL_CHECK_POINTERS",
118
+ "NCCL_LAUNCH_MODE",
119
+ "NCCL_IB_HCA",
120
+ "NCCL_IB_TIMEOUT",
121
+ "NCCL_IB_RETRY_CNT",
122
+ "NCCL_IB_GID_INDEX",
123
+ "NCCL_IB_SL",
124
+ "NCCL_IB_TC",
125
+ "NCCL_IB_AR_THRESHOLD",
126
+ "NCCL_IB_CUDA_SUPPORT",
127
+ "NCCL_NET_GDR_LEVEL",
128
+ "NCCL_NET_GDR_READ",
129
+ "NCCL_SINGLE_RING_THRESHOLD",
130
+ "NCCL_LL_THRESHOLD",
131
+ "NCCL_TREE_THRESHOLD",
132
+ "NCCL_ALGO",
133
+ "NCCL_PROTO",
134
+ "NCCL_IGNORE_CPU_AFFINITY",
135
+ "NCCL_DEBUG_FILE",
136
+ "NCCL_COLLNET_ENABLE",
137
+ "NCCL_TOPO_FILE",
138
+ "NCCL_TOPO_DUMP_FILE",
139
+ "NCCL_ASYNC_ERROR_HANDLING",
140
+ ]
141
+ formatted_output = ""
142
+ for var in relevant_env_vars:
143
+ value = os.environ[var] if var in os.environ else "N/A"
144
+ formatted_output += "env:%s=%s\n" % (var, value)
145
+ print(formatted_output)
146
+
147
+
148
+ class _BufferCommHookLocation(Enum):
149
+ PRE_FORWARD = auto()
150
+ POST_FORWARD = auto()
151
+
152
+
153
+ @dataclass
154
+ class _BufferCommHook:
155
+ buffer_comm_hook: Callable
156
+ buffer_comm_hook_state: Any
157
+ buffer_comm_hook_location: _BufferCommHookLocation
158
+
159
+
160
+ # Add a DDPSink to run various functions when backwards starts, such as
161
+ # queueing call back of out-most backward/graph task,
162
+ # this helps call back is fired after all gradients' calculation
163
+ # is completed.
164
+ class _DDPSink(Function):
165
+ @staticmethod
166
+ def forward(ctx, reducer, state_dict, *inputs):
167
+ # set_materialize_grads(False) will ensure that None gradients stay as
168
+ # None and are not filled with zeros.
169
+ ctx.set_materialize_grads(False)
170
+ ctx.reducer = reducer
171
+ ctx.state_dict = state_dict
172
+ ret = tuple(
173
+ inp.clone() if isinstance(inp, torch.Tensor) else inp
174
+ for inp in inputs
175
+ )
176
+ return ret
177
+
178
+ @staticmethod
179
+ def backward(ctx, *grad_outputs):
180
+ state_dict = ctx.state_dict
181
+ # Enqueue delay allreduce for static graph training on the first
182
+ # iteration.
183
+ if (
184
+ ctx.state_dict["static_graph"]
185
+ and ctx.state_dict["num_iterations"] == 1
186
+ ):
187
+ Variable._execution_engine.queue_callback( # type: ignore[call-arg,misc]
188
+ ctx.reducer._delay_all_reduce
189
+ )
190
+
191
+ return (None, None, *grad_outputs)
192
+
193
+
194
+ class _DDPJoinHook(JoinHook):
195
+ def __init__(self, ddp, divide_by_initial_world_size):
196
+ """
197
+ Sets config variables for internal usage.
198
+ """
199
+ assert isinstance(ddp, DistributedDataParallel), (
200
+ "DDP join hook requires passing in a DistributedDataParallel "
201
+ "instance as the state"
202
+ )
203
+ assert ddp.logger is not None
204
+ ddp.logger._set_uneven_input_join()
205
+ self.ddp = ddp
206
+ self.ddp._divide_by_initial_world_size = divide_by_initial_world_size
207
+ super().__init__()
208
+
209
+ def main_hook(self):
210
+ """
211
+ Shadows the DDP collective communication operations in the forward and
212
+ backward passes.
213
+ """
214
+ ddp = self.ddp
215
+ # Buckets are rebuilt only once during a training period
216
+ ddp.reducer._rebuild_buckets()
217
+
218
+ # Schedule a broadcast if we are syncing module buffers in the
219
+ # forward pass
220
+ # TODO: make DDP uneven inputs context manager support buffer
221
+ # comm hook (https://github.com/pytorch/pytorch/issues/65436)
222
+ ddp._check_and_sync_module_buffers()
223
+
224
+ # Check if need to sync in the backward pass
225
+ work = ddp._check_global_requires_backward_grad_sync(
226
+ is_joined_rank=True
227
+ )
228
+ work.wait()
229
+ should_sync_backwards = work.result()[0].item() != 0
230
+ # Forward parameter sync is disabled in the next iteration if we
231
+ # are skipping gradient sync this iteration, so set
232
+ # `require_forward_param_sync` accordingly
233
+ ddp.require_forward_param_sync = should_sync_backwards
234
+ if not should_sync_backwards:
235
+ return
236
+
237
+ # Schedule one allreduce per gradient bucket to match the backward
238
+ # pass allreduce
239
+ ddp._match_all_reduce_for_bwd_pass()
240
+
241
+ # Check if we need to allreduce locally unused parameters
242
+ if ddp.find_unused_parameters:
243
+ ddp._match_unused_params_allreduce()
244
+
245
+ # Rebuilt parameters are pushed only once during a training period
246
+ ddp.reducer._push_all_rebuilt_params()
247
+
248
+ def post_hook(self, is_last_joiner: bool):
249
+ """
250
+ Syncs the final model to ensure that the model is the same across all
251
+ processes.
252
+ """
253
+ self.ddp._sync_final_model(is_last_joiner)
254
+
255
+
256
+ class DistributedDataParallel(Module, Joinable):
257
+ r"""Implements distributed data parallelism that is based on
258
+ ``torch.distributed`` package at the module level.
259
+
260
+ This container provides data parallelism by synchronizing gradients
261
+ across each model replica. The devices to synchronize across are
262
+ specified by the input ``process_group``, which is the entire world
263
+ by default. Note that ``DistributedDataParallel`` does not chunk or
264
+ otherwise shard the input across participating GPUs; the user is
265
+ responsible for defining how to do so, for example through the use
266
+ of a :class:`DistributedSampler`.
267
+
268
+ See also: :ref:`distributed-basics` and :ref:`cuda-nn-ddp-instead`.
269
+ The same constraints on input as in :class:`torch.nn.DataParallel` apply.
270
+
271
+ Creation of this class requires that ``torch.distributed`` to be already
272
+ initialized, by calling :func:`torch.distributed.init_process_group`.
273
+
274
+ ``DistributedDataParallel`` is proven to be significantly faster than
275
+ :class:`torch.nn.DataParallel` for single-node multi-GPU data
276
+ parallel training.
277
+
278
+ To use ``DistributedDataParallel`` on a host with N GPUs, you should spawn
279
+ up ``N`` processes, ensuring that each process exclusively works on a single
280
+ GPU from 0 to N-1. This can be done by either setting
281
+ ``CUDA_VISIBLE_DEVICES`` for every process or by calling:
282
+
283
+ >>> # xdoctest: +SKIP("undefined variables")
284
+ >>> torch.cuda.set_device(i)
285
+
286
+ where i is from 0 to N-1. In each process, you should refer the following
287
+ to construct this module:
288
+
289
+ >>> # xdoctest: +SKIP("undefined variables")
290
+ >>> torch.distributed.init_process_group(
291
+ >>> backend='nccl', world_size=N, init_method='...'
292
+ >>> )
293
+ >>> model = DistributedDataParallel(model, device_ids=[i], output_device=i)
294
+
295
+ In order to spawn up multiple processes per node, you can use either
296
+ ``torch.distributed.launch`` or ``torch.multiprocessing.spawn``.
297
+
298
+ .. note::
299
+ Please refer to `PyTorch Distributed Overview <https://pytorch.org/tutorials/beginner/dist_overview.html>`__
300
+ for a brief introduction to all features related to distributed training.
301
+
302
+ .. note::
303
+ ``DistributedDataParallel`` can be used in conjunction with
304
+ :class:`torch.distributed.optim.ZeroRedundancyOptimizer` to reduce
305
+ per-rank optimizer states memory footprint. Please refer to
306
+ `ZeroRedundancyOptimizer recipe <https://pytorch.org/tutorials/recipes/zero_redundancy_optimizer.html>`__
307
+ for more details.
308
+
309
+ .. note:: ``nccl`` backend is currently the fastest and highly recommended
310
+ backend when using GPUs. This applies to both single-node and
311
+ multi-node distributed training.
312
+
313
+ .. note:: This module also supports mixed-precision distributed training.
314
+ This means that your model can have different types of parameters such
315
+ as mixed types of ``fp16`` and ``fp32``, the gradient reduction on these
316
+ mixed types of parameters will just work fine.
317
+
318
+ .. note:: If you use ``torch.save`` on one process to checkpoint the module,
319
+ and ``torch.load`` on some other processes to recover it, make sure that
320
+ ``map_location`` is configured properly for every process. Without
321
+ ``map_location``, ``torch.load`` would recover the module to devices
322
+ where the module was saved from.
323
+
324
+ .. note:: When a model is trained on ``M`` nodes with ``batch=N``, the
325
+ gradient will be ``M`` times smaller when compared to the same model
326
+ trained on a single node with ``batch=M*N`` if the loss is summed (NOT
327
+ averaged as usual) across instances in a batch (because the gradients
328
+ between different nodes are averaged). You should take this into
329
+ consideration when you want to obtain a mathematically equivalent
330
+ training process compared to the local training counterpart. But in most
331
+ cases, you can just treat a DistributedDataParallel wrapped model, a
332
+ DataParallel wrapped model and an ordinary model on a single GPU as the
333
+ same (E.g. using the same learning rate for equivalent batch size).
334
+
335
+ .. note::
336
+ Parameters are never broadcast between processes. The module performs
337
+ an all-reduce step on gradients and assumes that they will be modified
338
+ by the optimizer in all processes in the same way. Buffers
339
+ (e.g. BatchNorm stats) are broadcast from the module in process of rank
340
+ 0, to all other replicas in the system in every iteration.
341
+
342
+ .. note::
343
+ If you are using DistributedDataParallel in conjunction with the
344
+ :ref:`distributed-rpc-framework`, you should always use
345
+ :meth:`torch.distributed.autograd.backward` to compute gradients and
346
+ :class:`torch.distributed.optim.DistributedOptimizer` for optimizing
347
+ parameters.
348
+
349
+ Example::
350
+
351
+ >>> # xdoctest: +SKIP("undefined variables")
352
+ >>> import torch.distributed.autograd as dist_autograd
353
+ >>> from torch.nn.parallel import DistributedDataParallel as DDP
354
+ >>> import torch
355
+ >>> from torch import optim
356
+ >>> from torch.distributed.optim import DistributedOptimizer
357
+ >>> import torch.distributed.rpc as rpc
358
+ >>> from torch.distributed.rpc import RRef
359
+ >>>
360
+ >>> t1 = torch.rand((3, 3), requires_grad=True)
361
+ >>> t2 = torch.rand((3, 3), requires_grad=True)
362
+ >>> rref = rpc.remote("worker1", torch.add, args=(t1, t2))
363
+ >>> ddp_model = DDP(my_model)
364
+ >>>
365
+ >>> # Setup optimizer
366
+ >>> optimizer_params = [rref]
367
+ >>> for param in ddp_model.parameters():
368
+ >>> optimizer_params.append(RRef(param))
369
+ >>>
370
+ >>> dist_optim = DistributedOptimizer(
371
+ >>> optim.SGD,
372
+ >>> optimizer_params,
373
+ >>> lr=0.05,
374
+ >>> )
375
+ >>>
376
+ >>> with dist_autograd.context() as context_id:
377
+ >>> pred = ddp_model(rref.to_here())
378
+ >>> loss = loss_func(pred, target)
379
+ >>> dist_autograd.backward(context_id, [loss])
380
+ >>> dist_optim.step(context_id)
381
+
382
+ .. note::
383
+ DistributedDataParallel currently offers limited support for gradient
384
+ checkpointing with :meth:`torch.utils.checkpoint`. DDP will work as
385
+ expected when there are no unused parameters in the model and each layer
386
+ is checkpointed at most once (make sure you are not passing
387
+ `find_unused_parameters=True` to DDP). We currently do not support the
388
+ case where a layer is checkpointed multiple times, or when there unused
389
+ parameters in the checkpointed model.
390
+
391
+ .. note::
392
+ To let a non-DDP model load a state dict from a DDP model,
393
+ :meth:`~torch.nn.modules.utils.consume_prefix_in_state_dict_if_present`
394
+ needs to be applied to strip the prefix "module." in the DDP state dict before loading.
395
+
396
+ .. warning::
397
+ Constructor, forward method, and differentiation of the output (or a
398
+ function of the output of this module) are distributed synchronization
399
+ points. Take that into account in case different processes might be
400
+ executing different code.
401
+
402
+ .. warning::
403
+ This module assumes all parameters are registered in the model by the
404
+ time it is created. No parameters should be added nor removed later.
405
+ Same applies to buffers.
406
+
407
+ .. warning::
408
+ This module assumes all parameters are registered in the model of each
409
+ distributed processes are in the same order. The module itself will
410
+ conduct gradient ``allreduce`` following the reverse order of the
411
+ registered parameters of the model. In other words, it is users'
412
+ responsibility to ensure that each distributed process has the exact
413
+ same model and thus the exact same parameter registration order.
414
+
415
+ .. warning::
416
+ This module allows parameters with non-rowmajor-contiguous strides.
417
+ For example, your model may contain some parameters whose
418
+ :class:`torch.memory_format` is ``torch.contiguous_format``
419
+ and others whose format is ``torch.channels_last``. However,
420
+ corresponding parameters in different processes must have the
421
+ same strides.
422
+
423
+ .. warning::
424
+ This module doesn't work with :func:`torch.autograd.grad` (i.e. it will
425
+ only work if gradients are to be accumulated in ``.grad`` attributes of
426
+ parameters).
427
+
428
+ .. warning::
429
+ If you plan on using this module with a ``nccl`` backend or a ``gloo``
430
+ backend (that uses Infiniband), together with a DataLoader that uses
431
+ multiple workers, please change the multiprocessing start method to
432
+ ``forkserver`` (Python 3 only) or ``spawn``. Unfortunately
433
+ Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will
434
+ likely experience deadlocks if you don't change this setting.
435
+
436
+ .. warning::
437
+ You should never try to change your model's parameters after wrapping
438
+ up your model with ``DistributedDataParallel``. Because, when
439
+ wrapping up your model with ``DistributedDataParallel``, the constructor
440
+ of ``DistributedDataParallel`` will register the additional gradient
441
+ reduction functions on all the parameters of the model itself at the
442
+ time of construction. If you change the model's parameters afterwards,
443
+ gradient reduction functions no longer match the correct set of
444
+ parameters.
445
+
446
+ .. warning::
447
+ Using ``DistributedDataParallel`` in conjunction with the
448
+ :ref:`distributed-rpc-framework` is experimental and subject to change.
449
+
450
+ Args:
451
+ module (Module): module to be parallelized
452
+ device_ids (list of int or torch.device): CUDA devices.
453
+ 1) For single-device modules, ``device_ids`` can
454
+ contain exactly one device id, which represents the only
455
+ CUDA device where the input module corresponding to this process resides.
456
+ Alternatively, ``device_ids`` can also be ``None``.
457
+ 2) For multi-device modules and CPU modules,
458
+ ``device_ids`` must be ``None``.
459
+
460
+ When ``device_ids`` is ``None`` for both cases,
461
+ both the input data for the forward pass and the actual module
462
+ must be placed on the correct device.
463
+ (default: ``None``)
464
+ output_device (int or torch.device): Device location of output for
465
+ single-device CUDA modules. For multi-device modules and
466
+ CPU modules, it must be ``None``, and the module itself
467
+ dictates the output location. (default: ``device_ids[0]``
468
+ for single-device modules)
469
+ broadcast_buffers (bool): Flag that enables syncing (broadcasting)
470
+ buffers of the module at beginning of the ``forward``
471
+ function. (default: ``True``)
472
+ process_group: The process group to be used for distributed data
473
+ all-reduction. If ``None``, the default process group, which
474
+ is created by :func:`torch.distributed.init_process_group`,
475
+ will be used. (default: ``None``)
476
+ bucket_cap_mb: ``DistributedDataParallel`` will bucket parameters into
477
+ multiple buckets so that gradient reduction of each
478
+ bucket can potentially overlap with backward computation.
479
+ :attr:`bucket_cap_mb` controls the bucket size in
480
+ MegaBytes (MB). (default: 25)
481
+ find_unused_parameters (bool): Traverse the autograd graph from all
482
+ tensors contained in the return value of the
483
+ wrapped module's ``forward`` function. Parameters
484
+ that don't receive gradients as part of this
485
+ graph are preemptively marked as being ready to
486
+ be reduced. In addition, parameters that may have
487
+ been used in the wrapped module's ``forward``
488
+ function but were not part of loss computation and
489
+ thus would also not receive gradients are
490
+ preemptively marked as ready to be reduced.
491
+ (default: ``False``)
492
+ check_reduction: This argument is deprecated.
493
+ gradient_as_bucket_view (bool): When set to ``True``, gradients will be views
494
+ pointing to different offsets of ``allreduce`` communication
495
+ buckets. This can reduce peak memory usage, where the
496
+ saved memory size will be equal to the total gradients
497
+ size. Moreover, it avoids the overhead of copying between
498
+ gradients and ``allreduce`` communication buckets. When
499
+ gradients are views, ``detach_()`` cannot be called on the
500
+ gradients. If hitting such errors, please fix it by
501
+ referring to the :meth:`~torch.optim.Optimizer.zero_grad`
502
+ function in ``torch/optim/optimizer.py`` as a solution.
503
+ Note that gradients will be views after first iteration, so
504
+ the peak memory saving should be checked after first iteration.
505
+ static_graph (bool): When set to ``True``, DDP knows the trained graph is
506
+ static. Static graph means 1) The set of used and unused
507
+ parameters will not change during the whole training loop; in
508
+ this case, it does not matter whether users set
509
+ ``find_unused_parameters = True`` or not. 2) How the graph is trained
510
+ will not change during the whole training loop (meaning there is
511
+ no control flow depending on iterations).
512
+ When static_graph is set to be ``True``, DDP will support cases that
513
+ can not be supported in the past:
514
+ 1) Reentrant backwards.
515
+ 2) Activation checkpointing multiple times.
516
+ 3) Activation checkpointing when model has unused parameters.
517
+ 4) There are model parameters that are outside of forward function.
518
+ 5) Potentially improve performance when there are unused parameters,
519
+ as DDP will not search graph in each iteration to detect unused
520
+ parameters when static_graph is set to be ``True``.
521
+ To check whether you can set static_graph to be ``True``, one way is to
522
+ check ddp logging data at the end of your previous model training,
523
+ if ``ddp_logging_data.get("can_set_static_graph") == True``, mostly you
524
+ can set ``static_graph = True`` as well.
525
+
526
+ Example::
527
+ >>> # xdoctest: +SKIP("undefined variables")
528
+ >>> model_DDP = torch.nn.parallel.DistributedDataParallel(model)
529
+ >>> # Training loop
530
+ >>> ...
531
+ >>> ddp_logging_data = model_DDP._get_ddp_logging_data()
532
+ >>> static_graph = ddp_logging_data.get("can_set_static_graph")
533
+
534
+
535
+ Attributes:
536
+ module (Module): the module to be parallelized.
537
+
538
+ Example::
539
+
540
+ >>> # xdoctest: +SKIP("undefined variables")
541
+ >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
542
+ >>> net = torch.nn.parallel.DistributedDataParallel(model)
543
+ """
544
+
545
+ # used to track whether the given thread is inside ddp forward for torchdynamo purposes
546
+ _active_ddp_module = None
547
+
548
+ def __init__(
549
+ self,
550
+ module,
551
+ device_ids=None,
552
+ output_device=None,
553
+ dim=0,
554
+ broadcast_buffers=True,
555
+ process_group=None,
556
+ bucket_cap_mb=25,
557
+ find_unused_parameters=False,
558
+ check_reduction=False,
559
+ gradient_as_bucket_view=False,
560
+ static_graph=False,
561
+ ):
562
+ super().__init__()
563
+ Joinable.__init__(self)
564
+ self.logger = None
565
+ if hasattr(module, "_ddp_params_and_buffers_to_ignore"):
566
+ self.parameters_to_ignore = set(module._ddp_params_and_buffers_to_ignore)
567
+ else:
568
+ self.parameters_to_ignore = set()
569
+ self._module_parameters = [p for n, p in module.named_parameters() if n not in self.parameters_to_ignore]
570
+ if not any((p.requires_grad for p in self._module_parameters)):
571
+ self._log_and_throw(
572
+ RuntimeError,
573
+ "DistributedDataParallel is not needed when a module "
574
+ "doesn't have any parameter that requires a gradient.",
575
+ )
576
+
577
+ if device_ids is not None and len(device_ids) > 1:
578
+ self._log_and_throw(
579
+ ValueError,
580
+ "device_ids can only be None or contain a single element.",
581
+ )
582
+
583
+ self.is_multi_device_module = len({p.device for p in self._module_parameters}) > 1
584
+ distinct_device_types = {p.device.type for p in self._module_parameters if p.device is not None}
585
+ if len(distinct_device_types) != 1:
586
+ self._log_and_throw(
587
+ ValueError,
588
+ "DistributedDataParallel's input module must be on "
589
+ "the same type of devices, but input module parameters locate in {}.".format(
590
+ distinct_device_types
591
+ ),
592
+ )
593
+
594
+ self.device_type = list(distinct_device_types)[0]
595
+
596
+ if (
597
+ device_ids is None
598
+ or len(device_ids) == 0 # For backward compatibility.
599
+ or self.device_type == "cpu"
600
+ or self.is_multi_device_module
601
+ ):
602
+ if device_ids or output_device:
603
+ self._log_and_throw(
604
+ ValueError,
605
+ "DistributedDataParallel device_ids and output_device arguments "
606
+ "only work with single-device/multiple-device GPU modules or CPU modules, "
607
+ "but got device_ids {}, output_device {}, and module parameters {}.".format(
608
+ device_ids,
609
+ output_device,
610
+ {p.device for p in self._module_parameters},
611
+ ),
612
+ )
613
+
614
+ self.device_ids = None
615
+ self.output_device = None
616
+ else:
617
+ self.device_ids = [_get_device_index(x, True) for x in device_ids]
618
+
619
+ if output_device is None:
620
+ output_device = device_ids[0]
621
+
622
+ self.output_device = _get_device_index(output_device, True)
623
+
624
+ if process_group is None:
625
+ self.process_group = _get_default_group()
626
+ else:
627
+ self.process_group = process_group
628
+
629
+ self.static_graph = False
630
+ self.dim = dim
631
+ self.module = module
632
+ self.device = list(self._module_parameters)[0].device
633
+ self.broadcast_buffers = broadcast_buffers
634
+ self.find_unused_parameters = find_unused_parameters
635
+ self.require_backward_grad_sync = True
636
+ self.require_forward_param_sync = True
637
+ self.gradient_as_bucket_view = gradient_as_bucket_view
638
+
639
+ self._use_replicated_tensor_module = (
640
+ _ddp_with_replicated_tensor_enabled()
641
+ )
642
+ self._build_replicated_tensor_module()
643
+
644
+ if check_reduction:
645
+ # This argument is no longer used since the reducer
646
+ # will ensure reduction completes even if some parameters
647
+ # do not receive gradients.
648
+ warnings.warn(
649
+ "The `check_reduction` argument in `DistributedDataParallel` "
650
+ "module is deprecated. Please avoid using it."
651
+ )
652
+
653
+ # Check that a module does not have Uninitialized parameters
654
+ for param in self._module_parameters:
655
+ if isinstance(param, torch.nn.parameter.UninitializedParameter):
656
+ self._log_and_throw(
657
+ RuntimeError,
658
+ "Modules with uninitialized parameters can't be used with `DistributedDataParallel`. "
659
+ "Run a dummy forward pass to correctly initialize the modules",
660
+ )
661
+ # used for intra-node param sync and inter-node sync as well
662
+ self.broadcast_bucket_size = int(250 * 1024 * 1024)
663
+
664
+ # reduction bucket size
665
+ self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024)
666
+ # Whether to perform input tensor CPU to GPU copies on a side-stream
667
+ self.use_side_stream_for_tensor_copies = (
668
+ os.environ.get("PYTORCH_DDP_USE_SIDE_STREAM", "1") == "1"
669
+ )
670
+
671
+ # Build parameters for reducer.
672
+ parameters, expect_sparse_gradient = self._build_params_for_reducer()
673
+ # Verify model equivalence.
674
+ _verify_param_shape_across_processes(self.process_group, parameters)
675
+ # Sync params and buffers. Ensures all DDP models start off at the same value.
676
+ _sync_module_states(
677
+ module=self.module,
678
+ process_group=self.process_group,
679
+ broadcast_bucket_size=self.broadcast_bucket_size,
680
+ src=0,
681
+ params_and_buffers_to_ignore=self.parameters_to_ignore,
682
+ )
683
+ # In debug mode, build a mapping of parameter index -> parameter.
684
+ param_to_name_mapping = self._build_debug_param_to_name_mapping(
685
+ parameters
686
+ )
687
+ # Builds reducer.
688
+ self._ddp_init_helper(
689
+ parameters,
690
+ expect_sparse_gradient,
691
+ param_to_name_mapping,
692
+ static_graph,
693
+ )
694
+ self._has_rebuilt_buckets = False
695
+
696
+ if static_graph:
697
+ self._set_static_graph()
698
+
699
+ self._setup_in_backward_optimizers()
700
+
701
+ def _setup_in_backward_optimizers(self):
702
+ # Check if user has used apply_optim_in_backward to overlap optimizer
703
+ # step + DDP backward. Current constraints:
704
+ # 1. Only allreduce is supported at the moment, no custom communication.
705
+ # 2. The reducer by default sets all grads for parameters DDP manages to
706
+ # None after they have been applied by the optimizer. There is no support
707
+ # for setting only some parameter grads to None, this must be done manually
708
+ # by user (and DDP_OVERLAPPED_OPTIM_SET_GRADS_TO_NONE=0 needs to be set.)
709
+ # If your use case requires some DDP managed parameters to run with
710
+ # an in-backward optimizer and some with a traditional optimizer, please
711
+ # ping https://github.com/pytorch/pytorch/issues/90052.
712
+ # NOTE: we use self._module_parameters instead of .parameters() since
713
+ # the former excludes ignored (non-DDP managed) parameters.
714
+ if any(
715
+ hasattr(p, '_in_backward_optimizers') for p in self._module_parameters
716
+ ):
717
+ # Remove hooks that apply_optim_in_backward had registered because
718
+ # DDP customizes how optimizer is overlapped with backward due to
719
+ # the allreduce.
720
+ param_to_handle_map = dist.optim.apply_optimizer_in_backward.param_to_optim_hook_handle_map
721
+ for p in self._module_parameters:
722
+ for handle in param_to_handle_map.get(p, []):
723
+ handle.remove()
724
+
725
+ # Need a weakref to the reducer in order to run all_reduce.
726
+ reducer_weakref = weakref.ref(self.reducer)
727
+ # Note: importing in function, otherwise this will cause a circular
728
+ # import.
729
+ from torch.distributed.algorithms.ddp_comm_hooks.optimizer_overlap_hooks import (
730
+ _apply_optim_in_backward_hook
731
+ )
732
+ self.register_comm_hook(
733
+ (reducer_weakref, self.process_group),
734
+ _apply_optim_in_backward_hook(
735
+ gradient_is_bucket_view=self.gradient_as_bucket_view
736
+ ),
737
+ )
738
+
739
+ # TODO (rohan-varma): this is a workaround that allows users to
740
+ # disable the default behavior of DDP managed parameters with
741
+ # optimizer runing in backwards having their gradients all set to None.
742
+ # Currently, it is an "all or nothing behavior" where DDP will set
743
+ # no grads to None or all of them, relaxing this behavior will be
744
+ # done dependent on use cases.
745
+ if os.getenv("DDP_OVERLAPPED_OPTIM_SET_GRADS_TO_NONE", "1") != "0":
746
+ warnings.warn(
747
+ "DDP + apply_optim_in_backward will currently set all "
748
+ "parameter gradients to None. If this is not the desired "
749
+ "behavior, please set env variable "
750
+ "DDP_OVERLAPPED_OPTIM_SET_GRADS_TO_NONE=0, and manually set"
751
+ "gradients to None/zero as desired."
752
+ )
753
+ self.reducer._set_grads_to_none() # type: ignore[attr-defined]
754
+
755
+ def _build_replicated_tensor_module(self):
756
+ if self._use_replicated_tensor_module:
757
+ # Create a module with ReplicatedTensor without copying tensors. Avoid
758
+ # registering '_replicated_tensor_module' as a submodule by directly
759
+ # adding to self.__dict__.
760
+ from ._replicated_tensor_ddp_interop import _replicate_module
761
+
762
+ self.__dict__["_replicated_tensor_module"] = _replicate_module(
763
+ self.module, self.process_group
764
+ )
765
+
766
+ def _log_and_throw(self, err_type, err_msg):
767
+ if self.logger is not None:
768
+ self.logger.set_error_and_log(f"{str(err_type)}: {err_msg}")
769
+ raise err_type(err_msg)
770
+
771
+ def _ddp_init_helper(
772
+ self,
773
+ parameters,
774
+ expect_sparse_gradient,
775
+ param_to_name_mapping,
776
+ static_graph,
777
+ ):
778
+ """
779
+ Initialization helper function that does the following:
780
+ (1) bucketing the parameters for reductions
781
+ (2) resetting the bucketing states
782
+ (3) registering the grad hooks
783
+ (4) Logging construction-time DDP logging data
784
+ (5) passing a handle of DDP to SyncBatchNorm Layer
785
+ """
786
+ self.num_iterations = 0
787
+ # Notice, the parameters order is not in the order in which they are used,
788
+ # especially in models with control flow.
789
+ #
790
+ # Alongside parameters are not presented in the real execution order,
791
+ # if a certain model happens to also
792
+ # 1) have other collectives comm ops in its backward graph.
793
+ # 2) have unused parameter in subset ranks of the whole world.
794
+ # bucketing could insert ALL-REDUCE comm op too early on the rank with unused parameter,
795
+ # matching up with other collectives comm ops on other ranks unexpectedly.
796
+ #
797
+ # In order to handle this corner case, when the parameters are not in the real execution order,
798
+ # we don't do bucketing, thus only one ALL-REDUCE is inserted after all the gradients
799
+ # of the whole graph are computed.
800
+ #
801
+ # Notice, here we only disable bucketing for the first iteration.
802
+ # After the first iteration, it's OK to rebuild buckets,
803
+ # because "bucket rebuild" bucketizes parameters based on its real execution order in backward graph.
804
+
805
+ # Can remove this branching once #73732 is landed.
806
+ if static_graph is True or self.find_unused_parameters is False:
807
+ bucket_size_limits = [sys.maxsize]
808
+ else:
809
+ bucket_size_limits = [
810
+ dist._DEFAULT_FIRST_BUCKET_BYTES,
811
+ self.bucket_bytes_cap,
812
+ ]
813
+ (
814
+ bucket_indices,
815
+ per_bucket_size_limits,
816
+ ) = dist._compute_bucket_assignment_by_size(
817
+ parameters,
818
+ bucket_size_limits,
819
+ expect_sparse_gradient,
820
+ )
821
+
822
+ # Note: reverse list of buckets because we want to approximate the
823
+ # order in which their gradients are produced, and assume they
824
+ # are used in the forward pass in the order they are defined.
825
+ self.reducer = dist.Reducer(
826
+ parameters,
827
+ list(reversed(bucket_indices)),
828
+ list(reversed(per_bucket_size_limits)),
829
+ self.process_group,
830
+ expect_sparse_gradient,
831
+ # The bucket size limit is specified in the constructor.
832
+ # Additionally, we allow for a single small bucket for parameters
833
+ # that are defined first, such that their gradients don't spill into
834
+ # a much larger bucket, adding unnecessary latency after gradient
835
+ # computation finishes. Experiments showed 1MB is a reasonable value.
836
+ self.bucket_bytes_cap,
837
+ self.find_unused_parameters,
838
+ self.gradient_as_bucket_view,
839
+ param_to_name_mapping,
840
+ # User can set dist._DEFAULT_FIRST_BUCKET_BYTES to tune DDP first
841
+ # bucket.
842
+ dist._DEFAULT_FIRST_BUCKET_BYTES,
843
+ )
844
+
845
+ self.logger = dist.Logger(self.reducer)
846
+ # Set as a weak reference to avoid reference cycle between
847
+ # logger and reducer.
848
+ self.reducer.set_logger(self.logger)
849
+
850
+ has_sync_bn = False
851
+ for submodule in self.module.modules():
852
+ if isinstance(submodule, torch.nn.SyncBatchNorm):
853
+ has_sync_bn = True
854
+ break
855
+
856
+ # Set logging data that can be got during construction time.
857
+ self.logger.set_construction_data_and_log(
858
+ self.module.__class__.__name__,
859
+ [] if self.device_ids is None else self.device_ids,
860
+ -1 if self.output_device is None else self.output_device,
861
+ self.broadcast_buffers,
862
+ has_sync_bn,
863
+ static_graph,
864
+ )
865
+
866
+ # passing a handle to torch.nn.SyncBatchNorm layer
867
+ self._passing_sync_batchnorm_handle(self.module)
868
+
869
+ def __getstate__(self):
870
+ self._check_default_group()
871
+ attrs = copy.copy(self.__dict__)
872
+ del attrs["process_group"]
873
+ del attrs["reducer"]
874
+ del attrs["logger"]
875
+ if self._use_replicated_tensor_module:
876
+ del attrs["_replicated_tensor_module"]
877
+ return attrs
878
+
879
+ def __setstate__(self, state):
880
+ # If serializable, then the process group should be the default one
881
+ self.process_group = _get_default_group()
882
+ super().__setstate__(state)
883
+ self._build_replicated_tensor_module()
884
+ self.__dict__.setdefault("require_forward_param_sync", True)
885
+ self.__dict__.setdefault("require_backward_grad_sync", True)
886
+ parameters, expect_sparse_gradient = self._build_params_for_reducer()
887
+ # In debug mode, build a mapping of parameter index -> parameter.
888
+ param_to_name_mapping = self._build_debug_param_to_name_mapping(
889
+ parameters
890
+ )
891
+ # Builds reducer.
892
+ self._ddp_init_helper(
893
+ parameters,
894
+ expect_sparse_gradient,
895
+ param_to_name_mapping,
896
+ self.static_graph,
897
+ )
898
+ if self.static_graph:
899
+ self.reducer._set_static_graph()
900
+ assert self.logger is not None
901
+ self.logger._set_static_graph()
902
+
903
+ def _build_params_for_reducer(self):
904
+ # Build tuple of (module, parameter) for all parameters that require grads.
905
+ modules_and_parameters = [
906
+ (module, parameter)
907
+ for module_name, module in self.module.named_modules()
908
+ for parameter in [
909
+ param
910
+ # Note that we access module.named_parameters instead of
911
+ # parameters(module). parameters(module) is only needed in the
912
+ # single-process multi device case, where it accesses replicated
913
+ # parameters through _former_parameters.
914
+ for param_name, param in module.named_parameters(recurse=False)
915
+ if param.requires_grad
916
+ and f"{module_name}.{param_name}"
917
+ not in self.parameters_to_ignore
918
+ ]
919
+ ]
920
+
921
+ # Deduplicate any parameters that might be shared across child modules.
922
+ memo = set()
923
+ modules_and_parameters = [
924
+ # "p not in memo" is the deduplication check.
925
+ # "not memo.add(p)" is always True, and it's only there to cause "add(p)" if needed.
926
+ (m, p)
927
+ for m, p in modules_and_parameters
928
+ if p not in memo and not memo.add(p) # type: ignore[func-returns-value]
929
+ ]
930
+
931
+ # Build list of parameters.
932
+ parameters = [parameter for _, parameter in modules_and_parameters]
933
+
934
+ # Checks if a module will produce a sparse gradient.
935
+ def produces_sparse_gradient(module):
936
+ if isinstance(module, (torch.nn.Embedding, torch.nn.EmbeddingBag)):
937
+ return module.sparse
938
+ return False
939
+
940
+ # Build list of booleans indicating whether or not to expect sparse
941
+ # gradients for the corresponding parameters.
942
+ expect_sparse_gradient = [
943
+ produces_sparse_gradient(module)
944
+ for module, _ in modules_and_parameters
945
+ ]
946
+
947
+ self._assign_modules_buffers()
948
+
949
+ return parameters, expect_sparse_gradient
950
+
951
+ def _assign_modules_buffers(self):
952
+ """
953
+ Assigns module buffers to self.modules_buffers which are then used to
954
+ broadcast across ranks when broadcast_buffers=True. Note that this
955
+ must be called every time buffers need to be synced because buffers can
956
+ be reassigned by user module,
957
+ see https://github.com/pytorch/pytorch/issues/63916.
958
+ """
959
+ # Collect buffers for modules, filtering out buffers that should be ignored.
960
+ named_module_buffers = [
961
+ (buffer, buffer_name)
962
+ for buffer_name, buffer in self.module.named_buffers()
963
+ if buffer_name not in self.parameters_to_ignore
964
+ ]
965
+ self.modules_buffers = [
966
+ buffer for (buffer, buffer_name) in named_module_buffers
967
+ ]
968
+ # Dict[str, tensor] representing module buffers not ignored by DDP.
969
+ self.named_module_buffers = {
970
+ buffer_name: buffer
971
+ for (buffer, buffer_name) in named_module_buffers
972
+ }
973
+
974
+ def _build_debug_param_to_name_mapping(self, parameters):
975
+ if dist.get_debug_level() == dist.DebugLevel.OFF:
976
+ return {}
977
+
978
+ param_to_param_index = {
979
+ parameters[i]: i for i in range(len(parameters))
980
+ }
981
+ param_set = set(parameters)
982
+ param_index_to_param_fqn = {}
983
+ for module_name, module in self.module.named_modules():
984
+ for param_name, param in module.named_parameters(recurse=False):
985
+ fqn = f"{module_name}.{param_name}"
986
+ # Bypass ignored parameters since those are not reduced by DDP
987
+ # to begin with.
988
+ if fqn not in self.parameters_to_ignore and param.requires_grad:
989
+ if param not in param_set:
990
+ self._log_and_throw(
991
+ ValueError,
992
+ f"Param with name {fqn} found in module parameters, but not DDP parameters."
993
+ " This indicates a bug in DDP, please report an issue to PyTorch.",
994
+ )
995
+ param_index = param_to_param_index[param]
996
+ param_index_to_param_fqn[param_index] = fqn
997
+
998
+ # Ensure we covered all parameters
999
+ if len(param_set) != len(param_index_to_param_fqn):
1000
+ self._log_and_throw(
1001
+ ValueError,
1002
+ (
1003
+ "Expected param to name mapping to cover all parameters, but"
1004
+ f" got conflicting lengths: {len(param_set)} vs "
1005
+ f"{len(param_index_to_param_fqn)}. This indicates a bug in DDP"
1006
+ ", please report an issue to PyTorch."
1007
+ ),
1008
+ )
1009
+
1010
+ return param_index_to_param_fqn
1011
+
1012
+ def _get_parameters(self, m, recurse=True):
1013
+ """
1014
+ Returns a generator of module parameters
1015
+ """
1016
+
1017
+ def model_parameters(m):
1018
+ ps = (
1019
+ m._former_parameters.values()
1020
+ if hasattr(m, "_former_parameters")
1021
+ else m.parameters(recurse=False)
1022
+ )
1023
+ yield from ps
1024
+
1025
+ for m in m.modules() if recurse else [m]:
1026
+ for p in model_parameters(m):
1027
+ yield p
1028
+
1029
+ def _check_default_group(self):
1030
+ pickle_not_supported = False
1031
+ try:
1032
+ if self.process_group != _get_default_group():
1033
+ pickle_not_supported = True
1034
+ except RuntimeError:
1035
+ pickle_not_supported = True
1036
+
1037
+ if pickle_not_supported:
1038
+ self._log_and_throw(
1039
+ RuntimeError,
1040
+ "DDP Pickling/Unpickling are only supported "
1041
+ "when using DDP with the default process "
1042
+ "group. That is, when you have called "
1043
+ "init_process_group and have not passed "
1044
+ "process_group argument to DDP constructor",
1045
+ )
1046
+
1047
+ @contextmanager
1048
+ def no_sync(self):
1049
+ r"""
1050
+ A context manager to disable gradient synchronizations across DDP
1051
+ processes. Within this context, gradients will be accumulated on module
1052
+ variables, which will later be synchronized in the first
1053
+ forward-backward pass exiting the context.
1054
+
1055
+ Example::
1056
+
1057
+ >>> # xdoctest: +SKIP("undefined variables")
1058
+ >>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg)
1059
+ >>> with ddp.no_sync():
1060
+ >>> for input in inputs:
1061
+ >>> ddp(input).backward() # no synchronization, accumulate grads
1062
+ >>> ddp(another_input).backward() # synchronize grads
1063
+
1064
+ .. warning::
1065
+ The forward pass should be included inside the context manager, or
1066
+ else gradients will still be synchronized.
1067
+ """
1068
+ old_require_backward_grad_sync = self.require_backward_grad_sync
1069
+ self.require_backward_grad_sync = False
1070
+ try:
1071
+ yield
1072
+ finally:
1073
+ self.require_backward_grad_sync = old_require_backward_grad_sync
1074
+
1075
+ @classmethod
1076
+ def _get_active_ddp_module(cls):
1077
+ """
1078
+ TorchDynamo needs to know whether DDP is currently active, and access the DDP module in order to cooperatively optimize it.
1079
+ """
1080
+ return cls._active_ddp_module
1081
+
1082
+ # note, this ctxmgr function is marked 'skip' in torchdynamo, so dynamo only kicks in
1083
+ # for the 'module_to_run' underneath
1084
+ # see torch._dynamo/eval_frame.py TorchPatcher.patch for more details
1085
+ @contextmanager
1086
+ def _inside_ddp_forward(self):
1087
+ DistributedDataParallel._active_ddp_module = self
1088
+ try:
1089
+ yield
1090
+ except Exception:
1091
+ raise
1092
+ finally:
1093
+ DistributedDataParallel._active_ddp_module = None
1094
+
1095
+ def _run_ddp_forward(self, *inputs, **kwargs):
1096
+ module_to_run = (
1097
+ self._replicated_tensor_module
1098
+ if self._use_replicated_tensor_module
1099
+ else self.module
1100
+ )
1101
+
1102
+ if self.device_ids:
1103
+ inputs, kwargs = _to_kwargs(
1104
+ inputs,
1105
+ kwargs,
1106
+ self.device_ids[0],
1107
+ self.use_side_stream_for_tensor_copies,
1108
+ )
1109
+ with self._inside_ddp_forward():
1110
+ return module_to_run(*inputs[0], **kwargs[0]) # type: ignore[index]
1111
+ else:
1112
+ with self._inside_ddp_forward():
1113
+ return module_to_run(*inputs, **kwargs)
1114
+
1115
+ def forward(self, *inputs, **kwargs):
1116
+ with torch.autograd.profiler.record_function(
1117
+ "DistributedDataParallel.forward"
1118
+ ):
1119
+ if torch.is_grad_enabled() and self.require_backward_grad_sync:
1120
+ assert self.logger is not None
1121
+ self.logger.set_runtime_stats_and_log()
1122
+ self.num_iterations += 1
1123
+ self.reducer.prepare_for_forward()
1124
+
1125
+ # Notify the join context that this process has not joined, if
1126
+ # needed
1127
+ work = Join.notify_join_context(self)
1128
+ if work:
1129
+ self.reducer._set_forward_pass_work_handle(
1130
+ work, self._divide_by_initial_world_size # type: ignore[arg-type]
1131
+ )
1132
+
1133
+ # Calling _rebuild_buckets before forward compuation,
1134
+ # It may allocate new buckets before deallocating old buckets
1135
+ # inside _rebuild_buckets. To save peak memory usage,
1136
+ # call _rebuild_buckets before the peak memory usage increases
1137
+ # during forward computation.
1138
+ # This should be called only once during whole training period.
1139
+ if torch.is_grad_enabled() and self.reducer._rebuild_buckets():
1140
+ logger.info(
1141
+ "Reducer buckets have been rebuilt in this iteration."
1142
+ )
1143
+ self._has_rebuilt_buckets = True
1144
+
1145
+ # sync params according to location (before/after forward) user
1146
+ # specified as part of hook, if hook was specified.
1147
+ if self._check_sync_bufs_pre_fwd():
1148
+ self._sync_buffers()
1149
+
1150
+ if self._join_config.enable:
1151
+ # Notify joined ranks whether they should sync in backwards pass or not.
1152
+ self._check_global_requires_backward_grad_sync(
1153
+ is_joined_rank=False
1154
+ )
1155
+
1156
+ output = self._run_ddp_forward(*inputs, **kwargs)
1157
+
1158
+ # sync params according to location (before/after forward) user
1159
+ # specified as part of hook, if hook was specified.
1160
+ if self._check_sync_bufs_post_fwd():
1161
+ self._sync_buffers()
1162
+
1163
+ if torch.is_grad_enabled() and self.require_backward_grad_sync:
1164
+ self.require_forward_param_sync = True
1165
+ # We'll return the output object verbatim since it is a freeform
1166
+ # object. We need to find any tensors in this object, though,
1167
+ # because we need to figure out which parameters were used during
1168
+ # this forward pass, to ensure we short circuit reduction for any
1169
+ # unused parameters. Only if `find_unused_parameters` is set.
1170
+ if self.find_unused_parameters and not self.static_graph:
1171
+ # Do not need to populate this for static graph.
1172
+ self.reducer.prepare_for_backward(
1173
+ list(_find_tensors(output))
1174
+ )
1175
+ else:
1176
+ self.reducer.prepare_for_backward([])
1177
+ else:
1178
+ self.require_forward_param_sync = False
1179
+
1180
+ # TODO: DDPSink is currently enabled for unused parameter detection and
1181
+ # static graph training for first iteration.
1182
+ if (self.find_unused_parameters and not self.static_graph) or (
1183
+ self.static_graph and self.num_iterations == 1
1184
+ ):
1185
+ state_dict = {
1186
+ "static_graph": self.static_graph,
1187
+ "num_iterations": self.num_iterations,
1188
+ }
1189
+
1190
+ (
1191
+ output_tensor_list,
1192
+ treespec,
1193
+ output_is_rref,
1194
+ ) = _tree_flatten_with_rref(output)
1195
+ output_placeholders = [None for _ in range(len(output_tensor_list))]
1196
+ # Do not touch tensors that have no grad_fn, which can cause issues
1197
+ # such as https://github.com/pytorch/pytorch/issues/60733
1198
+ for i, output in enumerate(output_tensor_list):
1199
+ if torch.is_tensor(output) and output.grad_fn is None:
1200
+ output_placeholders[i] = output
1201
+
1202
+ # When find_unused_parameters=True, makes tensors which require grad
1203
+ # run through the DDPSink backward pass. When not all outputs are
1204
+ # used in loss, this makes those corresponding tensors receive
1205
+ # undefined gradient which the reducer then handles to ensure
1206
+ # param.grad field is not touched and we don't error out.
1207
+ passthrough_tensor_list = _DDPSink.apply(
1208
+ self.reducer,
1209
+ state_dict,
1210
+ *output_tensor_list,
1211
+ )
1212
+ for i in range(len(output_placeholders)):
1213
+ if output_placeholders[i] is None:
1214
+ output_placeholders[i] = passthrough_tensor_list[i]
1215
+
1216
+ # Reconstruct output data structure.
1217
+ output = _tree_unflatten_with_rref(
1218
+ output_placeholders, treespec, output_is_rref
1219
+ )
1220
+ return output
1221
+
1222
+ def scatter(self, inputs, kwargs, device_ids):
1223
+ return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
1224
+
1225
+ def to_kwargs(self, inputs, kwargs, device_id):
1226
+ # Kept for BC
1227
+ return _to_kwargs(
1228
+ inputs, kwargs, device_id, self.use_side_stream_for_tensor_copies
1229
+ )
1230
+
1231
+ def gather(self, outputs, output_device):
1232
+ return gather(outputs, output_device, dim=self.dim)
1233
+
1234
+ def train(self, mode=True):
1235
+ super().train(mode)
1236
+ if self._use_replicated_tensor_module:
1237
+ self._replicated_tensor_module.train(mode) # type: ignore[union-attr]
1238
+ return self
1239
+
1240
+ # When running in join mode, schedules an allreduce to notify joined ranks
1241
+ # of whether backwards pass synchronization will run this iteration or not.
1242
+ def _check_global_requires_backward_grad_sync(self, is_joined_rank):
1243
+ if not is_joined_rank and self.require_backward_grad_sync:
1244
+ requires_sync_tensor = torch.ones(1, device=self.device)
1245
+ else:
1246
+ requires_sync_tensor = torch.zeros(1, device=self.device)
1247
+
1248
+ work = dist.all_reduce(
1249
+ requires_sync_tensor, group=self.process_group, async_op=True
1250
+ )
1251
+ return work
1252
+
1253
+ # When running in join mode, checks and performs sync of module buffers if
1254
+ # the models have buffers that should be synchronized in the forward pass.
1255
+ def _check_and_sync_module_buffers(self):
1256
+ if self._check_sync_bufs_pre_fwd():
1257
+ authoritative_rank = self._find_common_rank(
1258
+ self._distributed_rank, False
1259
+ )
1260
+ self._sync_module_buffers(authoritative_rank)
1261
+
1262
+ # When running in join model, agrees upon a common rank and broadcast model
1263
+ # parameters to all other ranks.
1264
+ def _sync_final_model(self, is_last_joiner):
1265
+ # Agree upon the process that will be the authoritative model copy.
1266
+ # The current rank is a candidate for being the authoritative copy if
1267
+ # is_last_joiner=True. We break ties via picking the larger rank.
1268
+ self._authoritative_rank = self._find_common_rank(
1269
+ self._distributed_rank, is_last_joiner
1270
+ )
1271
+ _sync_module_states(
1272
+ module=self.module,
1273
+ process_group=self.process_group,
1274
+ broadcast_bucket_size=self.broadcast_bucket_size,
1275
+ src=self._authoritative_rank,
1276
+ params_and_buffers_to_ignore=self.parameters_to_ignore,
1277
+ )
1278
+
1279
+ # Schedule comm ops to match those scheduled in the reducer's backward
1280
+ # pass.
1281
+ def _match_all_reduce_for_bwd_pass(self):
1282
+ comm_work = []
1283
+ # Schedule comm in the same order as Reducer schedules them, i.e.
1284
+ # the order of the buckets. Retrieving the bucket order from the reducer
1285
+ # ensures that we keep the same order in join mode, such as when bucket
1286
+ # order is rebuilt dynamically.
1287
+
1288
+ # Returns grad_buckets in order, but real tensors are substituted with
1289
+ # zero tensors of the same shape.
1290
+ grad_buckets = self.reducer._get_zeros_like_grad_buckets()
1291
+ for grad_bucket in grad_buckets:
1292
+ # Joined processes contribute zero gradient. In the case that
1293
+ # divide_by_initial_world_size=True, we divide grads by the static
1294
+ # world size, if not, the dividing factor is reduced by the number
1295
+ # of joined processes.
1296
+ work = self.reducer._run_comm_hook(grad_bucket)
1297
+ comm_work.append(work)
1298
+ for work in comm_work:
1299
+ work.wait()
1300
+
1301
+ # Allreduces the used parameter mapping across ranks.
1302
+ def _match_unused_params_allreduce(self):
1303
+ locally_used_param_map = self.reducer._get_local_used_map()
1304
+ self.process_group.allreduce(locally_used_param_map)
1305
+
1306
+ def join(
1307
+ self,
1308
+ divide_by_initial_world_size: bool = True,
1309
+ enable: bool = True,
1310
+ throw_on_early_termination: bool = False,
1311
+ ):
1312
+ r"""
1313
+ A context manager to be used in conjunction with an instance of
1314
+ :class:`torch.nn.parallel.DistributedDataParallel` to be
1315
+ able to train with uneven inputs across participating processes.
1316
+
1317
+ This context manager will keep track of already-joined DDP processes,
1318
+ and "shadow" the forward and backward passes by inserting collective
1319
+ communication operations to match with the ones created by non-joined
1320
+ DDP processes. This will ensure each collective call has a corresponding
1321
+ call by already-joined DDP processes, preventing hangs or errors that
1322
+ would otherwise happen when training with uneven inputs across
1323
+ processes. Alternatively, if the flag ``throw_on_early_termination`` is
1324
+ specified to be ``True``, all trainers will throw an error once one rank
1325
+ runs out of inputs, allowing these errors to be caught and handled
1326
+ according to application logic.
1327
+
1328
+ Once all DDP processes have joined, the context manager will broadcast
1329
+ the model corresponding to the last joined process to all processes to
1330
+ ensure the model is the same across all processes
1331
+ (which is guaranteed by DDP).
1332
+
1333
+ To use this to enable training with uneven inputs across processes,
1334
+ simply wrap this context manager around your training loop. No further
1335
+ modifications to the model or data loading is required.
1336
+
1337
+ .. warning::
1338
+ If the model or training loop this context manager is wrapped around
1339
+ has additional distributed collective operations, such as
1340
+ ``SyncBatchNorm`` in the model's forward pass, then the flag
1341
+ ``throw_on_early_termination`` must be enabled. This is because this
1342
+ context manager is not aware of non-DDP collective communication.
1343
+ This flag will cause all ranks to throw when any one rank
1344
+ exhausts inputs, allowing these errors to be caught and recovered
1345
+ from across all ranks.
1346
+
1347
+ Args:
1348
+ divide_by_initial_world_size (bool): If ``True``, will divide
1349
+ gradients by the initial ``world_size`` DDP training was launched
1350
+ with. If ``False``, will compute the effective world size
1351
+ (number of ranks that have not depleted their inputs yet) and
1352
+ divide gradients by that during allreduce. Set
1353
+ ``divide_by_initial_world_size=True`` to ensure every input
1354
+ sample including the uneven inputs have equal weight in terms of
1355
+ how much they contribute to the global gradient. This is
1356
+ achieved by always dividing the gradient by the initial
1357
+ ``world_size`` even when we encounter uneven inputs. If you set
1358
+ this to ``False``, we divide the gradient by the remaining
1359
+ number of nodes. This ensures parity with training on a smaller
1360
+ ``world_size`` although it also means the uneven inputs would
1361
+ contribute more towards the global gradient. Typically, you
1362
+ would want to set this to ``True`` for cases where the last few
1363
+ inputs of your training job are uneven. In extreme cases, where
1364
+ there is a large discrepancy in the number of inputs, setting
1365
+ this to ``False`` might provide better results.
1366
+ enable (bool): Whether to enable uneven input detection or not. Pass
1367
+ in ``enable=False`` to disable in cases where you know that
1368
+ inputs are even across participating processes. Default is
1369
+ ``True``.
1370
+ throw_on_early_termination (bool): Whether to throw an error
1371
+ or continue training when at least one rank has exhausted
1372
+ inputs. If ``True``, will throw upon the first rank reaching end
1373
+ of data. If ``False``, will continue training with a smaller
1374
+ effective world size until all ranks are joined. Note that if
1375
+ this flag is specified, then the flag
1376
+ ``divide_by_initial_world_size`` would be ignored. Default
1377
+ is ``False``.
1378
+
1379
+
1380
+ Example::
1381
+
1382
+ >>> # xdoctest: +SKIP("Distributed")
1383
+ >>> import torch
1384
+ >>> import torch.distributed as dist
1385
+ >>> import os
1386
+ >>> import torch.multiprocessing as mp
1387
+ >>> import torch.nn as nn
1388
+ >>> # On each spawned worker
1389
+ >>> def worker(rank):
1390
+ >>> dist.init_process_group("nccl", rank=rank, world_size=2)
1391
+ >>> torch.cuda.set_device(rank)
1392
+ >>> model = nn.Linear(1, 1, bias=False).to(rank)
1393
+ >>> model = torch.nn.parallel.DistributedDataParallel(
1394
+ >>> model, device_ids=[rank], output_device=rank
1395
+ >>> )
1396
+ >>> # Rank 1 gets one more input than rank 0.
1397
+ >>> inputs = [torch.tensor([1]).float() for _ in range(10 + rank)]
1398
+ >>> with model.join():
1399
+ >>> for _ in range(5):
1400
+ >>> for inp in inputs:
1401
+ >>> loss = model(inp).sum()
1402
+ >>> loss.backward()
1403
+ >>> # Without the join() API, the below synchronization will hang
1404
+ >>> # blocking for rank 1's allreduce to complete.
1405
+ >>> torch.cuda.synchronize(device=rank)
1406
+ """
1407
+ return Join(
1408
+ [self],
1409
+ enable,
1410
+ throw_on_early_termination,
1411
+ divide_by_initial_world_size=divide_by_initial_world_size,
1412
+ )
1413
+
1414
+ def join_hook(
1415
+ self,
1416
+ **kwargs,
1417
+ ):
1418
+ r"""
1419
+ Returns the DDP join hook, which enables training on uneven inputs by
1420
+ shadowing the collective communications in the forward and backward
1421
+ passes.
1422
+
1423
+ Arguments:
1424
+ kwargs (dict): a :class:`dict` containing any keyword arguments
1425
+ to modify the behavior of the join hook at run time; all
1426
+ :class:`Joinable` instances sharing the same join context
1427
+ manager are forwarded the same value for ``kwargs``.
1428
+
1429
+ The hook supports the following keyword arguments:
1430
+ divide_by_initial_world_size (bool, optional):
1431
+ If ``True``, then gradients are divided by the initial world
1432
+ size that DDP was launched with.
1433
+ If ``False``, then gradients are divided by the effective world
1434
+ size (i.e. the number of non-joined processes), meaning that
1435
+ the uneven inputs contribute more toward the global gradient.
1436
+ Typically, this should be set to ``True`` if the degree of
1437
+ unevenness is small but can be set to ``False`` in extreme
1438
+ cases for possibly better results.
1439
+ Default is ``True``.
1440
+ """
1441
+ divide_by_initial_world_size = kwargs.get(
1442
+ "divide_by_initial_world_size", True
1443
+ )
1444
+ return _DDPJoinHook(
1445
+ self, divide_by_initial_world_size=divide_by_initial_world_size
1446
+ )
1447
+
1448
+ @property
1449
+ def join_device(self):
1450
+ return self.device
1451
+
1452
+ @property
1453
+ def join_process_group(self):
1454
+ return self.process_group
1455
+
1456
+ def _register_buffer_comm_hook(
1457
+ self,
1458
+ state,
1459
+ hook: Callable,
1460
+ comm_hook_location=_BufferCommHookLocation.POST_FORWARD,
1461
+ ):
1462
+ r"""
1463
+ Allows custom registration of hooks that define how buffer are
1464
+ synchronized across ranks. The hook takes in an optional state
1465
+ and is passed in a Dict[str, Tensor] corresponding to buffer names
1466
+ and the buffers, and can run arbitrary reductions on buffers as
1467
+ opposed to DDP's default broadcast from rank 0. This is useful for
1468
+ example if a counter needs to be summed or averaged across ranks
1469
+ every iteration.
1470
+
1471
+ Args:
1472
+ state (Any): Optional state that is passed to the hook.
1473
+ hook (Callable): Callable with the following signature:
1474
+ ``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]``
1475
+ comm_hook_location (_BufferCommHookLocation): Enum value indicating
1476
+ where to run the hook.
1477
+ _BufferCommHookLocation.PRE_FORWARD means that the
1478
+ hook will run _before_ the forward pass, and
1479
+ _BufferCommHookLocation.POST_FORWARD means that the
1480
+ hook will run _after_ the forward pass.
1481
+
1482
+ NOTE: To maximize performance, users can return a
1483
+ List[torch.futures.Future] from their hook, and DDP will
1484
+ install and await these hooks appropriately at the end of
1485
+ the backward pass. This will ensure all buffers are
1486
+ synchronized by the end of the backward pass. If this
1487
+ setting is used, it is recommended to pass
1488
+ comm_hook_location=_BufferCommHookLocation.POST_FORWARD,
1489
+ which will trigger the hook after the forward pass.
1490
+ If _BufferCommHookLocation.PRE_FORWARD is used, users must
1491
+ ensure appropriate synchronization when manipulating GPU
1492
+ buffers in the forward pass.
1493
+ """
1494
+ assert callable(hook)
1495
+ self.buffer_hook = _BufferCommHook(
1496
+ buffer_comm_hook=hook,
1497
+ buffer_comm_hook_state=state,
1498
+ buffer_comm_hook_location=comm_hook_location,
1499
+ )
1500
+
1501
+ def register_comm_hook(self, state: object, hook: Callable):
1502
+ r"""
1503
+ Registers a communication hook which is an enhancement that provides a
1504
+ flexible hook to users where they can specify how DDP aggregates gradients
1505
+ across multiple workers.
1506
+
1507
+ This hook would be very useful for researchers to try out new ideas. For
1508
+ example, this hook can be used to implement several algorithms like GossipGrad
1509
+ and gradient compression which involve different communication strategies for
1510
+ parameter syncs while running Distributed DataParallel training.
1511
+
1512
+ Args:
1513
+ state (object): Passed to the hook to maintain any state information during the training process.
1514
+ Examples include error feedback in gradient compression,
1515
+ peers to communicate with next in GossipGrad, etc.
1516
+
1517
+ It is locally stored by each worker
1518
+ and shared by all the gradient tensors on the worker.
1519
+ hook (Callable): Callable with the following signature:
1520
+ ``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]``:
1521
+
1522
+ This function is called once the bucket is ready. The
1523
+ hook can perform whatever processing is needed and return
1524
+ a Future indicating completion of any async work (ex: allreduce).
1525
+ If the hook doesn't perform any communication, it still
1526
+ must return a completed Future. The Future should hold the
1527
+ new value of grad bucket's tensors. Once a bucket is ready,
1528
+ c10d reducer would call this hook and use the tensors returned
1529
+ by the Future and copy grads to individual parameters.
1530
+ Note that the future's return type must be a single tensor.
1531
+
1532
+ We also provide an API called ``get_future`` to retrieve a
1533
+ Future associated with the completion of ``c10d.ProcessGroup.Work``.
1534
+ ``get_future`` is currently supported for NCCL and also supported for most
1535
+ operations on GLOO and MPI, except for peer to peer operations (send/recv).
1536
+
1537
+ .. warning ::
1538
+ Grad bucket's tensors will not be predivided by world_size. User is responsible
1539
+ to divide by the world_size in case of operations like allreduce.
1540
+
1541
+ .. warning ::
1542
+ DDP communication hook can only be registered once and should be registered
1543
+ before calling backward.
1544
+
1545
+ .. warning ::
1546
+ The Future object that hook returns should contain a single tensor
1547
+ that has the same shape with the tensors inside grad bucket.
1548
+
1549
+ .. warning ::
1550
+ ``get_future`` API supports NCCL, and partially GLOO and MPI backends (no support
1551
+ for peer-to-peer operations like send/recv) and will return a ``torch.futures.Future``.
1552
+
1553
+ Example::
1554
+ Below is an example of a noop hook that returns the same tensor.
1555
+
1556
+ >>> # xdoctest: +SKIP('undefined name')
1557
+ >>> def noop(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:
1558
+ >>> fut = torch.futures.Future()
1559
+ >>> fut.set_result(bucket.buffer())
1560
+ >>> return fut
1561
+ >>> ddp.register_comm_hook(state=None, hook=noop)
1562
+
1563
+ Example::
1564
+ Below is an example of a Parallel SGD algorithm where gradients are encoded before
1565
+ allreduce, and then decoded after allreduce.
1566
+
1567
+ >>> # xdoctest: +SKIP('undefined name')
1568
+ >>> def encode_and_decode(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:
1569
+ >>> encoded_tensor = encode(bucket.buffer()) # encode gradients
1570
+ >>> fut = torch.distributed.all_reduce(encoded_tensor).get_future()
1571
+ >>> # Define the then callback to decode.
1572
+ >>> def decode(fut):
1573
+ >>> decoded_tensor = decode(fut.value()[0]) # decode gradients
1574
+ >>> return decoded_tensor
1575
+ >>> return fut.then(decode)
1576
+ >>> ddp.register_comm_hook(state=None, hook=encode_and_decode)
1577
+ """
1578
+ self._check_comm_hook(hook)
1579
+ assert self.logger is not None
1580
+ self.logger._set_comm_hook_name(hook.__qualname__)
1581
+ dist._register_comm_hook(self.reducer, state, hook)
1582
+
1583
+ def _register_builtin_comm_hook(self, comm_hook_type):
1584
+ r"""
1585
+ Registers a built-in communication hook that specifies how DDP
1586
+ aggregates gradients across multiple workers.
1587
+ The built-in hooks aim to provide efficient C++ implementations for certain hooks,
1588
+ which might not be as efficient if implemented in Python using a Python communication hook.
1589
+
1590
+ Args:
1591
+ comm_hook_type (dist.BuiltinCommHookType): type of communication hook, such as ALLREDUCE, FP16_COMPRESS, etc.
1592
+
1593
+ .. warning ::
1594
+ DDP communication hook can only be registered once and should be registered
1595
+ before calling backward.
1596
+
1597
+ Example::
1598
+ Below is an example of a FP16 compression where gradients are
1599
+ compressed into 16-bit floating-point numbers before allreduce, and
1600
+ then decompressed after allreduce.
1601
+
1602
+ >>> # xdoctest: +SKIP('undefined name')
1603
+ >>> ddp._register_builtin_comm_hook(dist.BuiltinCommHookType.FP16_COMPRESS)
1604
+
1605
+ """
1606
+ assert self.logger is not None
1607
+ self.logger._set_comm_hook_name(str(comm_hook_type))
1608
+ dist._register_builtin_comm_hook(self.reducer, comm_hook_type)
1609
+
1610
+ def _register_fused_optim(
1611
+ self, optim: Type, *args, optim_params=None, **kwargs
1612
+ ):
1613
+ r"""
1614
+ Registers an optimizer with DDP such that the optimization for a
1615
+ parameter will run immediately when that parameter's gradient is
1616
+ finished with reduction, instead of waiting for all parameters'
1617
+ gradients to finish reduction. This can result in a training speedup
1618
+ depending on your workload since the optimizer can run while gradient
1619
+ reduction for other parameters are still ongoing. In addition, this has
1620
+ the potential to reduce peak memory consumption during training, as it
1621
+ only needs to load the per-parameter optimizer states of a single
1622
+ parameter at a time, instead of loading all per-parameter optimizer
1623
+ states at once.
1624
+
1625
+ Args:
1626
+ optim (Type): a ``torch.optim.Optimizer`` class to be registered
1627
+ as a fused optimizer.
1628
+ *args (Sequence[Any]): Arguments to forward to `optim`.
1629
+ optim_params (Optional[Iterable[torch.Tensor]]): Set of parameters
1630
+ to optimize, similar to `params` argument of traditional `torch.optim`
1631
+ Optimizers. If this is omitted, all DDP model parameters will be
1632
+ optimized.
1633
+ **kwargs: (Dict[str, Any]): Keyword arguments to forward to `optim`.
1634
+
1635
+ .. warning ::
1636
+ _register_fused_optim should only be called once on a DDP instance,
1637
+ and registering multiple fused optimizers for the same DDP model
1638
+ is not currently supported. Please ping
1639
+ https://github.com/pytorch/pytorch/issues/71595 if this is necessary
1640
+ for your use case.
1641
+
1642
+ .. warning ::
1643
+ _register_fused_optim and register_comm_hook currently do not
1644
+ compose together, meaning that custom DDP communication hooks are
1645
+ not supported with overlapped optimizers. Please ping
1646
+ https://github.com/pytorch/pytorch/issues/71595 if this is necessary
1647
+ for your use case.
1648
+
1649
+ .. warning ::
1650
+ Gradient accumulation and DDP `no_sync` are currently not supported
1651
+ with overlapped optimizer. Please ping
1652
+ https://github.com/pytorch/pytorch/issues/71595 if this is necessary
1653
+ for your use case.
1654
+
1655
+ Example::
1656
+
1657
+ >>> # xdoctest: +SKIP("No rendezvous handler")
1658
+ >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
1659
+ >>> net = torch.nn.parallel.DistributedDataParallel(model, pg)
1660
+ >>> lr = 1e-2
1661
+ >>> betas = (0.9, 0.99)
1662
+ >>> eps = 1e-6
1663
+ >>> net._register_fused_optim(torch.optim.Adam, lr, betas=betas, eps=eps)
1664
+ >>> # Example with subset of parameters
1665
+ >>> params_to_opt = [list(net.parameters())[0]]
1666
+ >>> net._register_fused_optim(
1667
+ ... torch.optim.Adam, lr, optim_params=params_to_opt, betas=betas, eps=eps
1668
+ ... )
1669
+ """
1670
+ # Note: importing in function, otherwise this will cause a circular
1671
+ # import as optimizer_overlap module needs to import DistributedDataParallel.
1672
+ from torch.distributed.algorithms._optimizer_overlap import (
1673
+ _as_overlapped_optim,
1674
+ )
1675
+
1676
+ overlapped_optim = _as_overlapped_optim(
1677
+ optim, optim_params, *args, **kwargs
1678
+ )
1679
+ try:
1680
+ overlapped_optim.register_ddp(self)
1681
+ except NotImplementedError as e:
1682
+ raise RuntimeError(
1683
+ f"{optim} does not support overlapped DDP. Please file an issue to PyTorch or the respective owner of {optim}."
1684
+ ) from e
1685
+
1686
+ def _distributed_broadcast_coalesced(
1687
+ self, tensors, buffer_size, authoritative_rank=0
1688
+ ):
1689
+ dist._broadcast_coalesced(
1690
+ self.process_group, tensors, buffer_size, authoritative_rank
1691
+ )
1692
+
1693
+ def _check_sync_bufs_post_fwd(self):
1694
+ return (
1695
+ self.will_sync_module_buffers()
1696
+ and hasattr(self, "buffer_hook")
1697
+ and self.buffer_hook.buffer_comm_hook_location
1698
+ == _BufferCommHookLocation.POST_FORWARD
1699
+ )
1700
+
1701
+ def _check_sync_bufs_pre_fwd(self):
1702
+ return self.will_sync_module_buffers() and (
1703
+ not hasattr(self, "buffer_hook")
1704
+ or self.buffer_hook.buffer_comm_hook_location
1705
+ == _BufferCommHookLocation.PRE_FORWARD
1706
+ )
1707
+
1708
+ def will_sync_module_buffers(self):
1709
+ return (
1710
+ self.require_forward_param_sync
1711
+ and self.broadcast_buffers
1712
+ and len(self.modules_buffers) > 0
1713
+ )
1714
+
1715
+ def _find_common_rank(self, input_rank, rank_cond):
1716
+ # -1 indicates that this rank is not under consideration to be the
1717
+ # common_rank
1718
+ rank_to_use = torch.tensor(
1719
+ [input_rank if rank_cond else -1],
1720
+ device=self.device,
1721
+ )
1722
+ dist.all_reduce(rank_to_use, op=ReduceOp.MAX, group=self.process_group)
1723
+ if rank_to_use.item() == -1:
1724
+ self._log_and_throw(
1725
+ ValueError,
1726
+ "BUG! Expected rank_cond to be true for at least one process."
1727
+ " This indicates a bug in PyTorch, please report an issue.",
1728
+ )
1729
+ return rank_to_use.item()
1730
+
1731
+ def _sync_buffers(self):
1732
+ with torch.no_grad():
1733
+ # module buffer sync
1734
+ # Synchronize buffers across processes.
1735
+ # If we are running DDP with the join manager, we have to agree
1736
+ # upon a rank to sync module buffers from, since rank 0 may
1737
+ # already have been joined and have stale module buffers.
1738
+ if self._join_config.enable:
1739
+ authoritative_rank = self._find_common_rank(
1740
+ self._distributed_rank, True
1741
+ )
1742
+ else:
1743
+ # The process with rank 0 is considered the authoritative copy.
1744
+ authoritative_rank = 0
1745
+ # Update self.modules_buffers incase any buffers were
1746
+ # reassigned.
1747
+ self._assign_modules_buffers()
1748
+ self._sync_module_buffers(authoritative_rank)
1749
+
1750
+ def _sync_module_buffers(self, authoritative_rank):
1751
+ if not hasattr(self, "buffer_hook"):
1752
+ self._default_broadcast_coalesced(
1753
+ authoritative_rank=authoritative_rank
1754
+ )
1755
+ else:
1756
+ hook = self.buffer_hook.buffer_comm_hook
1757
+ state = self.buffer_hook.buffer_comm_hook_state
1758
+ futs = hook(state, self.named_module_buffers)
1759
+ if futs is not None:
1760
+ self.reducer._install_post_backward_futures(futs)
1761
+
1762
+ def _default_broadcast_coalesced(
1763
+ self, bufs=None, bucket_size=None, authoritative_rank=0
1764
+ ):
1765
+ """
1766
+ Broadcasts buffers from rank 0 to rest of workers. If bufs, bucket_size
1767
+ are None, default values self.modules_buffers and
1768
+ self.broadcast_bucket_size are used instead.
1769
+ """
1770
+ if bufs is None:
1771
+ bufs = self.modules_buffers
1772
+ if bucket_size is None:
1773
+ bucket_size = self.broadcast_bucket_size
1774
+
1775
+ self._distributed_broadcast_coalesced(
1776
+ bufs, bucket_size, authoritative_rank
1777
+ )
1778
+
1779
+ def _passing_sync_batchnorm_handle(self, module):
1780
+ for layer in module.modules():
1781
+ if isinstance(layer, torch.nn.modules.SyncBatchNorm):
1782
+ if self.device_type == "cpu":
1783
+ self._log_and_throw(
1784
+ ValueError,
1785
+ "SyncBatchNorm layers only work with GPU modules",
1786
+ )
1787
+
1788
+ def _check_comm_hook(self, hook):
1789
+ if not callable(hook):
1790
+ self._log_and_throw(
1791
+ TypeError, "Communication hook must be callable."
1792
+ )
1793
+
1794
+ sig = inspect.signature(hook)
1795
+ if (
1796
+ sig.parameters["bucket"].annotation != inspect._empty
1797
+ and sig.parameters["bucket"].annotation != dist.GradBucket
1798
+ ):
1799
+ self._log_and_throw(
1800
+ ValueError,
1801
+ "Communication hook: bucket annotation should be dist.GradBucket.",
1802
+ )
1803
+
1804
+ if (
1805
+ sig.return_annotation != inspect._empty
1806
+ and sig.return_annotation != torch.futures.Future[torch.Tensor]
1807
+ ):
1808
+ self._log_and_throw(
1809
+ ValueError,
1810
+ "Communication hook: return annotation should be torch.futures.Future[torch.Tensor].",
1811
+ )
1812
+
1813
+ if hook.__name__ in [
1814
+ "bf16_compress_hook",
1815
+ "bf16_compress_wrapper_hook",
1816
+ ] and (
1817
+ (torch.version.cuda is None and torch.version.hip is None)
1818
+ or (
1819
+ torch.version.cuda is not None
1820
+ and int(torch.version.cuda.split(".")[0]) < 11
1821
+ )
1822
+ or not dist.is_available()
1823
+ or not dist.is_nccl_available()
1824
+ or torch.cuda.nccl.version() < (2, 10)
1825
+ ):
1826
+ self._log_and_throw(
1827
+ TypeError,
1828
+ "BF16 all reduce communication hook required CUDA 11+ and NCCL 2.10+.",
1829
+ )
1830
+
1831
+ @property
1832
+ def _distributed_rank(self):
1833
+ return dist.get_rank(self.process_group)
1834
+
1835
+ @staticmethod
1836
+ def _set_params_and_buffers_to_ignore_for_model(
1837
+ module, params_and_buffers_to_ignore
1838
+ ):
1839
+ """
1840
+ Sets parameters and buffers to be ignored by DDP. Expected format for
1841
+ parameters is the fully qualified name: {module_name}.{param_name}, and
1842
+ similarly, {module_name}.{buffer_name} for buffers. For example:
1843
+ params_to_ignore = []
1844
+ # NB: model here is vanilla PyTorch module, not yet wrapped with DDP.
1845
+ for module_name, module in model.named_modules():
1846
+ for param_name, param in module.named_parameters(recurse=False):
1847
+ if should_ignore(param):
1848
+ # Create expected format
1849
+ fqn = f"{module_name}.{param_name}"
1850
+ params_to_ignore.append(fqn)
1851
+ torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
1852
+ model,
1853
+ params_to_ignore
1854
+ )
1855
+ """
1856
+ # This is a workaround to set parameters and buffers DDP should ignore
1857
+ # during synchronization. It will be removed when the API is finalized
1858
+ # as part of addressing https://github.com/pytorch/pytorch/issues/43690.
1859
+ module._ddp_params_and_buffers_to_ignore = params_and_buffers_to_ignore
1860
+ for name, param in module.named_parameters():
1861
+ if name in params_and_buffers_to_ignore:
1862
+ param._ddp_ignored = True
1863
+ for name, buffer in module.named_buffers():
1864
+ if name in params_and_buffers_to_ignore:
1865
+ buffer._ddp_ignored = True
1866
+
1867
+ def _get_ddp_logging_data(self):
1868
+ r"""
1869
+ This interface can be called after DistributedDataParallel() is
1870
+ constructed. It returns a dictionary of logging data. It could help
1871
+ for debugging and analysis. The loggind data includes DistributedDataParallel
1872
+ constructor input parameters, some internal states of DistributedDataParallel
1873
+ and performance metrics. Simply print the dictorinary and see what
1874
+ these metrics are.
1875
+ This is a prototype interface and subject to change in the future.
1876
+ """
1877
+ assert self.logger is not None
1878
+ ddp_logging_data = self.logger._get_ddp_logging_data()
1879
+ return {**ddp_logging_data.strs_map, **ddp_logging_data.ints_map}
1880
+
1881
+ def _set_ddp_runtime_logging_sample_rate(self, sample_rate):
1882
+ r"""
1883
+ This interface allows users to set sample_rate of collecting
1884
+ runtime stats. The runtime stats will be recorded for the
1885
+ first 10 iterations, after 10 iterations runtime stats will be
1886
+ recorded once every "sample_rate" training iterations. In
1887
+ default, runtime stats are recorded for the first 10 iterations,
1888
+ after 10 iterations runtime stats are recorded once every
1889
+ "kDDPRuntimeLoggingSampleRate=100" training iterations.
1890
+ This is a prototype interface and subject to change in the future.
1891
+ """
1892
+ if sample_rate < 1:
1893
+ self._log_and_throw(
1894
+ ValueError,
1895
+ "DDP runtime logging sample rate should be equal or greater than 1",
1896
+ )
1897
+ self.reducer._set_ddp_runtime_logging_sample_rate(sample_rate)
1898
+
1899
+ def _set_static_graph(self):
1900
+ """
1901
+ It is recommended to set static graph in the DDP constructor, which will
1902
+ call this private API internally.
1903
+ """
1904
+ # If self.static_graph has been set, no need to set it again
1905
+ if self.static_graph:
1906
+ warnings.warn(
1907
+ "You've set static_graph to be True, no need to set it again."
1908
+ )
1909
+ return
1910
+ self.static_graph = True
1911
+ self.reducer._set_static_graph()
1912
+ assert self.logger is not None
1913
+ self.logger._set_static_graph()
1914
+ if self.find_unused_parameters:
1915
+ warnings.warn(
1916
+ "You passed find_unused_parameters=true to DistributedDataParallel, "
1917
+ "`_set_static_graph` will detect unused parameters automatically, so "
1918
+ "you do not need to set find_unused_parameters=true, just be sure these "
1919
+ "unused parameters will not change during training loop while calling "
1920
+ "`_set_static_graph`."
1921
+ )
wemm/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ import torch
3
+ from torch.cuda._utils import _get_device_index
4
+ from torch.cuda.amp import autocast
5
+ from torch._utils import ExceptionWrapper
6
+
7
+
8
+ def get_a_var(obj):
9
+ if isinstance(obj, torch.Tensor):
10
+ return obj
11
+
12
+ if isinstance(obj, (list, tuple)):
13
+ for result in map(get_a_var, obj):
14
+ if isinstance(result, torch.Tensor):
15
+ return result
16
+ if isinstance(obj, dict):
17
+ for result in map(get_a_var, obj.items()):
18
+ if isinstance(result, torch.Tensor):
19
+ return result
20
+ return None
21
+
22
+
23
+ def parallel_apply(modules, inputs, kwargs_tup=None, devices=None):
24
+ r"""Applies each `module` in :attr:`modules` in parallel on arguments
25
+ contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
26
+ on each of :attr:`devices`.
27
+
28
+ Args:
29
+ modules (Module): modules to be parallelized
30
+ inputs (tensor): inputs to the modules
31
+ devices (list of int or torch.device): CUDA devices
32
+
33
+ :attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
34
+ :attr:`devices` (if given) should all have same length. Moreover, each
35
+ element of :attr:`inputs` can either be a single object as the only argument
36
+ to a module, or a collection of positional arguments.
37
+ """
38
+ assert len(modules) == len(inputs)
39
+ if kwargs_tup is not None:
40
+ assert len(modules) == len(kwargs_tup)
41
+ else:
42
+ kwargs_tup = ({},) * len(modules)
43
+ if devices is not None:
44
+ assert len(modules) == len(devices)
45
+ else:
46
+ devices = [None] * len(modules)
47
+ devices = [_get_device_index(x, True) for x in devices]
48
+ streams = [torch.cuda.current_stream(x) for x in devices]
49
+ lock = threading.Lock()
50
+ results = {}
51
+ grad_enabled, autocast_enabled = torch.is_grad_enabled(), torch.is_autocast_enabled()
52
+
53
+ def _worker(i, module, input, kwargs, device=None, stream=None):
54
+ torch.set_grad_enabled(grad_enabled)
55
+ if device is None:
56
+ device = get_a_var(input).get_device()
57
+ if stream is None:
58
+ stream = torch.cuda.current_stream(device)
59
+ try:
60
+ with torch.cuda.device(device), torch.cuda.stream(stream), autocast(enabled=autocast_enabled):
61
+ # this also avoids accidental slicing of `input` if it is a Tensor
62
+ if not isinstance(input, (list, tuple)):
63
+ input = (input,)
64
+ output = module(*input, **kwargs)
65
+ with lock:
66
+ results[i] = output
67
+ except Exception:
68
+ with lock:
69
+ results[i] = ExceptionWrapper(
70
+ where="in replica {} on device {}".format(i, device))
71
+
72
+ if len(modules) > 1:
73
+ threads = [threading.Thread(target=_worker,
74
+ args=(i, module, input, kwargs, device, stream))
75
+ for i, (module, input, kwargs, device, stream) in
76
+ enumerate(zip(modules, inputs, kwargs_tup, devices, streams))]
77
+
78
+ for thread in threads:
79
+ thread.start()
80
+ for thread in threads:
81
+ thread.join()
82
+ else:
83
+ _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0], streams[0])
84
+
85
+ outputs = []
86
+ for i in range(len(inputs)):
87
+ output = results[i]
88
+ if isinstance(output, ExceptionWrapper):
89
+ output.reraise()
90
+ outputs.append(output)
91
+ return outputs
wemm/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.pyi ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from typing import Any, Optional, Sequence, List
2
+ from .common_types import _devices_t
3
+ from ..modules import Module
4
+
5
+
6
+ def parallel_apply(modules: Sequence[Module], inputs: Sequence[Any], kwargs_tup: Optional[Any] = ...,
7
+ devices: Optional[_devices_t] = ...) -> List[Any]: ...
wemm/lib/python3.10/site-packages/torch/nn/parallel/replicate.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import comm
2
+ from torch._utils import _get_device_index
3
+
4
+ from collections import OrderedDict
5
+
6
+
7
+ def _is_script_module(module):
8
+ import torch.jit
9
+ return isinstance(module, torch.jit.ScriptModule)
10
+
11
+
12
+ def _is_script_method(module):
13
+ import torch.jit
14
+ return isinstance(module, torch._C.ScriptMethod)
15
+
16
+
17
+ def _init_script_module():
18
+ import torch.jit
19
+ return torch.jit.ScriptModule()
20
+
21
+
22
+ def _is_jit_enabled():
23
+ import torch.jit
24
+ return torch.jit._state._enabled
25
+
26
+
27
+ # Check if we can safely replicate the module.
28
+ # there are two types of module:
29
+ # 1. python modules
30
+ # 2. ScriptModule
31
+ #
32
+ # currently a module cannot be replicated properly if the descendants of
33
+ # any ScriptModule contains python module (type 1 above)
34
+ def _replicatable_module(module, memo=None):
35
+
36
+ # module.modules() contains module itself as the first element
37
+ def descendant_modules(module):
38
+ gen = module.modules()
39
+ next(gen)
40
+ return gen
41
+
42
+ if not _is_jit_enabled():
43
+ return True
44
+ if memo is None:
45
+ memo = set()
46
+
47
+ # memoize visited modules
48
+ memo.add(module)
49
+ if _is_script_module(module):
50
+ memo.update(descendant_modules(module))
51
+ return all(_is_script_module(descendant) for
52
+ descendant in descendant_modules(module))
53
+
54
+ for child in module.children():
55
+ # since any unreplicatable module will cause the check to return
56
+ # False early, visited modules here can be safely ignored.
57
+ if child in memo:
58
+ continue
59
+ if not _replicatable_module(child, memo):
60
+ return False
61
+
62
+ return True
63
+
64
+ def _broadcast_coalesced_reshape(tensors, devices, detach=False):
65
+ from ._functions import Broadcast
66
+ if detach:
67
+ return comm.broadcast_coalesced(tensors, devices)
68
+ else:
69
+ # Use the autograd function to broadcast if not detach
70
+ if len(tensors) > 0:
71
+ tensor_copies = Broadcast.apply(devices, *tensors)
72
+ return [tensor_copies[i:i + len(tensors)]
73
+ for i in range(0, len(tensor_copies), len(tensors))]
74
+ else:
75
+ return []
76
+
77
+
78
+ def replicate(network, devices, detach=False):
79
+ if not _replicatable_module(network):
80
+ raise RuntimeError("Cannot replicate network where python modules are "
81
+ "childrens of ScriptModule")
82
+
83
+ if not devices:
84
+ return []
85
+
86
+ devices = [_get_device_index(x, True) for x in devices]
87
+ num_replicas = len(devices)
88
+
89
+ params = list(network.parameters())
90
+ param_indices = {param: idx for idx, param in enumerate(params)}
91
+ param_copies = _broadcast_coalesced_reshape(params, devices, detach)
92
+
93
+ buffers = list(network.buffers())
94
+ buffers_rg = []
95
+ buffers_not_rg = []
96
+ for buf in buffers:
97
+ if buf.requires_grad and not detach:
98
+ buffers_rg.append(buf)
99
+ else:
100
+ buffers_not_rg.append(buf)
101
+
102
+ buffer_indices_rg = {buf: idx for idx, buf in enumerate(buffers_rg)}
103
+ buffer_indices_not_rg = {buf: idx for idx, buf in enumerate(buffers_not_rg)}
104
+
105
+ buffer_copies_rg = _broadcast_coalesced_reshape(buffers_rg, devices, detach=detach)
106
+ buffer_copies_not_rg = _broadcast_coalesced_reshape(buffers_not_rg, devices, detach=True)
107
+
108
+ modules = list(network.modules())
109
+ module_copies = [[] for device in devices]
110
+ module_indices = {}
111
+
112
+ for i, module in enumerate(modules):
113
+ module_indices[module] = i
114
+ for j in range(num_replicas):
115
+ replica = module._replicate_for_data_parallel()
116
+ # This is a temporary fix for DDP. DDP needs to access the
117
+ # replicated model parameters. It used to do so through
118
+ # `mode.parameters()`. The fix added in #33907 for DP stops the
119
+ # `parameters()` API from exposing the replicated parameters.
120
+ # Hence, we add a `_former_parameters` dict here to support DDP.
121
+ replica._former_parameters = OrderedDict()
122
+
123
+ module_copies[j].append(replica)
124
+
125
+ for i, module in enumerate(modules):
126
+ for key, child in module._modules.items():
127
+ if child is None:
128
+ for j in range(num_replicas):
129
+ replica = module_copies[j][i]
130
+ replica._modules[key] = None
131
+ else:
132
+ module_idx = module_indices[child]
133
+ for j in range(num_replicas):
134
+ replica = module_copies[j][i]
135
+ setattr(replica, key, module_copies[j][module_idx])
136
+ for key, param in module._parameters.items():
137
+ if param is None:
138
+ for j in range(num_replicas):
139
+ replica = module_copies[j][i]
140
+ replica._parameters[key] = None
141
+ else:
142
+ param_idx = param_indices[param]
143
+ for j in range(num_replicas):
144
+ replica = module_copies[j][i]
145
+ param = param_copies[j][param_idx]
146
+ # parameters in replicas are no longer leaves,
147
+ # so setattr them as non-parameter attributes
148
+ setattr(replica, key, param)
149
+ # expose the parameter for DDP
150
+ replica._former_parameters[key] = param
151
+ for key, buf in module._buffers.items():
152
+ if buf is None:
153
+ for j in range(num_replicas):
154
+ replica = module_copies[j][i]
155
+ replica._buffers[key] = None
156
+ else:
157
+ if buf.requires_grad and not detach:
158
+ buffer_copies = buffer_copies_rg
159
+ buffer_idx = buffer_indices_rg[buf]
160
+ else:
161
+ buffer_copies = buffer_copies_not_rg
162
+ buffer_idx = buffer_indices_not_rg[buf]
163
+ for j in range(num_replicas):
164
+ replica = module_copies[j][i]
165
+ setattr(replica, key, buffer_copies[j][buffer_idx])
166
+
167
+ return [module_copies[j][0] for j in range(num_replicas)]
wemm/lib/python3.10/site-packages/torch/nn/parallel/replicate.pyi ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Sequence, TypeVar
2
+ from ..modules import Module
3
+ from .common_types import _devices_t
4
+
5
+
6
+ def replicate(network: Module, devices: Union[_devices_t, Sequence[_devices_t]], detach: bool = ...) -> List[
7
+ Module]: ...
wemm/lib/python3.10/site-packages/torch/nn/parallel/scatter_gather.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._functions import Scatter, Gather
3
+ import warnings
4
+
5
+ __all__ = ['scatter', 'scatter_kwargs', 'gather']
6
+
7
+ def is_namedtuple(obj):
8
+ # Check if type was created from collections.namedtuple or a typing.NamedTuple.
9
+ warnings.warn("is_namedtuple is deprecated, please use the python checks instead")
10
+ return _is_namedtuple(obj)
11
+
12
+ def _is_namedtuple(obj):
13
+ # Check if type was created from collections.namedtuple or a typing.NamedTuple.
14
+ return (
15
+ isinstance(obj, tuple) and hasattr(obj, "_asdict") and hasattr(obj, "_fields")
16
+ )
17
+
18
+
19
+ def scatter(inputs, target_gpus, dim=0):
20
+ r"""
21
+ Slices tensors into approximately equal chunks and
22
+ distributes them across given GPUs. Duplicates
23
+ references to objects that are not tensors.
24
+ """
25
+ def scatter_map(obj):
26
+ if isinstance(obj, torch.Tensor):
27
+ return Scatter.apply(target_gpus, None, dim, obj)
28
+ if _is_namedtuple(obj):
29
+ return [type(obj)(*args) for args in zip(*map(scatter_map, obj))]
30
+ if isinstance(obj, tuple) and len(obj) > 0:
31
+ return list(zip(*map(scatter_map, obj)))
32
+ if isinstance(obj, list) and len(obj) > 0:
33
+ return [list(i) for i in zip(*map(scatter_map, obj))]
34
+ if isinstance(obj, dict) and len(obj) > 0:
35
+ return [type(obj)(i) for i in zip(*map(scatter_map, obj.items()))]
36
+ return [obj for targets in target_gpus]
37
+
38
+ # After scatter_map is called, a scatter_map cell will exist. This cell
39
+ # has a reference to the actual function scatter_map, which has references
40
+ # to a closure that has a reference to the scatter_map cell (because the
41
+ # fn is recursive). To avoid this reference cycle, we set the function to
42
+ # None, clearing the cell
43
+ try:
44
+ res = scatter_map(inputs)
45
+ finally:
46
+ scatter_map = None
47
+ return res
48
+
49
+
50
+ def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
51
+ r"""Scatter with support for kwargs dictionary"""
52
+ inputs = scatter(inputs, target_gpus, dim) if inputs else []
53
+ kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
54
+ if len(inputs) < len(kwargs):
55
+ inputs.extend(() for _ in range(len(kwargs) - len(inputs)))
56
+ elif len(kwargs) < len(inputs):
57
+ kwargs.extend({} for _ in range(len(inputs) - len(kwargs)))
58
+ inputs = tuple(inputs)
59
+ kwargs = tuple(kwargs)
60
+ return inputs, kwargs
61
+
62
+
63
+ def gather(outputs, target_device, dim=0):
64
+ r"""
65
+ Gathers tensors from different GPUs on a specified device.
66
+ Use 'cpu' for CPU to avoid a deprecation warning.
67
+ """
68
+ def gather_map(outputs):
69
+ out = outputs[0]
70
+ if isinstance(out, torch.Tensor):
71
+ return Gather.apply(target_device, dim, *outputs)
72
+ if out is None:
73
+ return None
74
+ if isinstance(out, dict):
75
+ if not all(len(out) == len(d) for d in outputs):
76
+ raise ValueError('All dicts must have the same number of keys')
77
+ return type(out)((k, gather_map([d[k] for d in outputs]))
78
+ for k in out)
79
+ if _is_namedtuple(out):
80
+ return type(out)._make(map(gather_map, zip(*outputs)))
81
+ return type(out)(map(gather_map, zip(*outputs)))
82
+
83
+ # Recursive function calls like this create reference cycles.
84
+ # Setting the function to None clears the refcycle.
85
+ try:
86
+ res = gather_map(outputs)
87
+ finally:
88
+ gather_map = None
89
+ return res
wemm/lib/python3.10/site-packages/torch/nn/parallel/scatter_gather.pyi ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Tuple, overload, TypeVar
2
+ from ... import Tensor
3
+ from .common_types import _device_t, _devices_t
4
+
5
+
6
+ T = TypeVar('T', Dict, List, Tuple)
7
+
8
+ # For some reason, 'scatter' returns a tuple when given a single Tensor input but a list otherwise.
9
+ @overload
10
+ def scatter(inputs: Tensor, target_gpus: _devices_t, dim: int = ...) -> Tuple[Tensor, ...]: ...
11
+
12
+ # flake8 will raise a spurious error here since `torch/__init__.pyi` has not been generated yet
13
+ # so mypy will interpret `Tensor` as `Any` since it is an import from what it believes to be an
14
+ # untyped module. Thus to mypy, the first definition of `scatter` looks strictly more general
15
+ # than this overload.
16
+ @overload
17
+ def scatter(inputs: T, target_gpus: _devices_t, dim: int = ...) -> List[T]: ...
18
+
19
+
20
+ # TODO More precise types here.
21
+ def scatter_kwargs(inputs: Any, kwargs: Any, target_gpus: _devices_t, dim: int = ...) -> Any: ...
22
+
23
+
24
+ def gather(outputs: Any, target_device: _device_t, dim: int = ...) -> Any: ...
wemm/lib/python3.10/site-packages/torch/nn/qat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (454 Bytes). View file
 
wemm/lib/python3.10/site-packages/torch/nn/qat/dynamic/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""QAT Dynamic Modules
3
+
4
+ This package is in the process of being deprecated.
5
+ Please, use `torch.ao.nn.qat.dynamic` instead.
6
+ """
7
+ from .modules import * # noqa: F403
wemm/lib/python3.10/site-packages/torch/nn/qat/dynamic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (331 Bytes). View file
 
wemm/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .linear import Linear
2
+
3
+ __all__ = ["Linear"]
wemm/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (237 Bytes). View file
 
wemm/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/linear.cpython-310.pyc ADDED
Binary file (592 Bytes). View file
 
wemm/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/linear.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""QAT Modules
3
+
4
+ This file is in the process of migration to `torch/ao/nn/qat/dynamic`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/qat/dynamic/modules`,
8
+ while adding an import statement here.
9
+ """
10
+ from torch.ao.nn.qat.dynamic.modules.linear import Linear
wemm/lib/python3.10/site-packages/torch/nn/qat/modules/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""QAT Modules
3
+
4
+ This package is in the process of being deprecated.
5
+ Please, use `torch.ao.nn.qat.modules` instead.
6
+ """
7
+ from torch.ao.nn.qat.modules.linear import Linear
8
+ from torch.ao.nn.qat.modules.conv import Conv1d
9
+ from torch.ao.nn.qat.modules.conv import Conv2d
10
+ from torch.ao.nn.qat.modules.conv import Conv3d
11
+ from torch.ao.nn.qat.modules.embedding_ops import EmbeddingBag, Embedding
12
+
13
+ from . import conv
14
+ from . import embedding_ops
15
+ from . import linear
16
+
17
+ __all__ = [
18
+ "Linear",
19
+ "Conv1d",
20
+ "Conv2d",
21
+ "Conv3d",
22
+ "Embedding",
23
+ "EmbeddingBag",
24
+ ]