ZTWHHH commited on
Commit
f1ccc51
·
verified ·
1 Parent(s): 88e9af2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. evalkit_tf437/lib/python3.10/site-packages/torch/_higher_order_ops/wrap.py +158 -0
  3. evalkit_tf437/lib/python3.10/site-packages/torch/nn/__pycache__/__init__.cpython-310.pyc +0 -0
  4. evalkit_tf437/lib/python3.10/site-packages/torch/nn/__pycache__/_reduction.cpython-310.pyc +0 -0
  5. evalkit_tf437/lib/python3.10/site-packages/torch/nn/__pycache__/cpp.cpython-310.pyc +0 -0
  6. evalkit_tf437/lib/python3.10/site-packages/torch/nn/__pycache__/grad.cpython-310.pyc +0 -0
  7. evalkit_tf437/lib/python3.10/site-packages/torch/nn/__pycache__/init.cpython-310.pyc +0 -0
  8. evalkit_tf437/lib/python3.10/site-packages/torch/nn/__pycache__/parameter.cpython-310.pyc +0 -0
  9. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/__init__.py +35 -0
  10. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__init__.py +31 -0
  11. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc +0 -0
  12. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/modules/fused.py +30 -0
  13. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc +0 -0
  14. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  15. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/conv_fused.py +37 -0
  16. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_relu.py +15 -0
  17. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__init__.py +13 -0
  18. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__pycache__/__init__.cpython-310.pyc +0 -0
  19. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
  20. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__init__.py +5 -0
  21. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
  22. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/linear_relu.py +5 -0
  23. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__init__.py +12 -0
  24. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc +0 -0
  25. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc +0 -0
  26. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
  27. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/bn_relu.py +7 -0
  28. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/conv_relu.py +9 -0
  29. evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/linear_relu.py +5 -0
  30. evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/__init__.py +14 -0
  31. evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc +0 -0
  32. evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc +0 -0
  33. evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc +0 -0
  34. evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc +0 -0
  35. evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/comm.py +236 -0
  36. evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py +269 -0
  37. evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/distributed.py +0 -0
  38. evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/replicate.py +186 -0
  39. evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/scatter_gather.py +107 -0
  40. evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/__init__.py +18 -0
  41. evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
  42. evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__init__.py +3 -0
  43. evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  44. evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/linear.cpython-310.pyc +0 -0
  45. evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/modules/__init__.py +24 -0
  46. evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  47. evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/modules/__pycache__/embedding_ops.cpython-310.pyc +0 -0
  48. evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/modules/embedding_ops.py +14 -0
  49. evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/modules/linear.py +10 -0
  50. evalkit_tf437/lib/python3.10/site-packages/torch/nn/utils/__pycache__/__init__.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -1283,3 +1283,6 @@ falcon/lib/python3.10/site-packages/setuptools/_vendor/__pycache__/typing_extens
1283
  evalkit_tf437/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1284
  evalkit_tf437/lib/python3.10/site-packages/sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1285
  evalkit_tf437/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
1283
  evalkit_tf437/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1284
  evalkit_tf437/lib/python3.10/site-packages/sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1285
  evalkit_tf437/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1286
+ falcon/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1287
+ falcon/lib/python3.10/site-packages/pandas/_libs/algos.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1288
+ falcon/lib/python3.10/site-packages/pandas/_libs/sas.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
evalkit_tf437/lib/python3.10/site-packages/torch/_higher_order_ops/wrap.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import logging
3
+
4
+ import torch
5
+ from torch._ops import HigherOrderOperator
6
+ from torch.utils.checkpoint import checkpoint, uid
7
+ import torch._dynamo.config
8
+
9
+ log = logging.getLogger(__name__)
10
+
11
+
12
+
13
+ # Used for testing the HigherOrderOperator mechanism
14
+ class Wrap(HigherOrderOperator):
15
+ def __init__(self):
16
+ super().__init__("wrap")
17
+
18
+ def __call__(self, func, *args, **kwargs):
19
+ # Dynamo already traces the body of HigherOrderOp beforehand when it
20
+ # so no need to trace into it.
21
+ import torch._dynamo # noqa: F401
22
+ from torch._dynamo import disable
23
+
24
+ @disable
25
+ def wrapper():
26
+ result = func(*args, **kwargs)
27
+ return result
28
+
29
+ return wrapper()
30
+
31
+ wrap = Wrap()
32
+
33
+ class WrapActivationCheckpoint(HigherOrderOperator):
34
+ """
35
+ This operator is used to wrap torch.utils.checkpoint. This avoids
36
+ TorchDynamo to look into saved tensor hooks and directly passes the control
37
+ to AOT Autograd, which is ok with tracing saved tensor hooks. As a result of
38
+ AOT tracing torch.utils.checkpoint code, we have a backward graph with
39
+ recomputed forward nodes.
40
+
41
+ However, we might deprecate this operator soon. The difficulty arises in the
42
+ functionalization of rng ops. Today, there are two different
43
+ functionalization of rng ops - one at AOT autograd and other at Inductor.
44
+ And they are difficult to map to each other. The rng states also complicate
45
+ pattern matching in Inductor. Due to the ease of implementation, we are
46
+ currently inclined towards functionalization at Inductor level, which means
47
+ that duplication/recomputation is done as a compiler pass in the
48
+ partitioners. See TagActivationCheckpoint for more information.
49
+ """
50
+ def __init__(self):
51
+ super().__init__("wrap_activation_checkpoint")
52
+
53
+ def __call__(self, function, *args, **kwargs):
54
+ # use_reentrant is set to False because this op is going to be traced.
55
+ # And we ensure that AOT Autograd traces through the non reentrant
56
+ # version of checkpointing.
57
+ import torch.fx.traceback as fx_traceback
58
+ from torch.fx import Interpreter
59
+ kwargs["use_reentrant"] = False
60
+ kwargs["preserve_rng_state"] = False
61
+ # Using interpreter allows preservation of metadata through torch.compile stack.
62
+ with fx_traceback.preserve_node_meta():
63
+ return checkpoint(Interpreter(function).run, *args, **kwargs)
64
+
65
+ wrap_activation_checkpoint = WrapActivationCheckpoint()
66
+
67
+ class TagActivationCheckpoint(HigherOrderOperator):
68
+ """
69
+ This operator is supposed to be used only with torch.compile stack. This
70
+ accepts a Fx graph module which needs to be checkpointed. This operator adds
71
+ "recomputable" tag to the nodes of the Fx graph that should be recomputed.
72
+
73
+ The goal is to:
74
+ 1. Avoid using Dynamo to trace through saved tensor hooks.
75
+ 2. For selective checkpointing case, let AOTAutograd trace through
76
+ saved tensor hooks but has special logic with TorchDispatchMode to override
77
+ the usual saved_tensor_hooks fn logic in order to tag the nodes.
78
+ 3. Rely on the partitioners to actually duplicate the nodes.
79
+ This sits well in the torch.compile stack, because by the time graph
80
+ reaches partitioner, inductor has already run its functionalization of rng
81
+ ops. Therefore, the duplication of nodes, by design, respects the rng states
82
+ in the forward and recomputed forward in backward.
83
+ """
84
+
85
+ def __init__(self):
86
+ super().__init__("tag_activation_checkpoint")
87
+
88
+ @staticmethod
89
+ def divide_kwargs(kwargs):
90
+ """
91
+ checkpoint fn can have mixed kwargs between checkpointed fn and
92
+ checkpoint fn itself. For example
93
+ >> def gn(x, y, z=None):
94
+ >> a = torch.matmul(x, y)
95
+ >> if z is not None:
96
+ >> return torch.matmul(a, z)
97
+ >> return a
98
+ >> def fn(x, y, z):
99
+ >> return torch.cos(checkpoint(gn, x, y, use_reentrant=False, z=z))
100
+ In the above case, z belongs to checkpointed function gn, but
101
+ use_reentrant belongs to the checkpoint function. This function splits
102
+ the kwargs into checkpoint_kwargs and gmod_kwargs (or
103
+ checkpointed_fn_kwargs).
104
+ We do sorting to ensure same graph from run to run for better
105
+ debuggability. It is not required for correctness.
106
+ """
107
+ ckpt_signature = inspect.signature(checkpoint)
108
+ checkpoint_keys = set()
109
+ for name in ckpt_signature.parameters:
110
+ if name in ("function", "args", "kwargs"):
111
+ continue
112
+ checkpoint_keys.add(name)
113
+
114
+ # `preserve_rng_state` is not a regular kwarg
115
+ checkpoint_keys.add("preserve_rng_state")
116
+
117
+ checkpoint_kwargs = {name: kwargs[name] for name in kwargs.keys() if name in checkpoint_keys}
118
+ gmod_kwargs = {name: kwargs[name] for name in kwargs.keys() if name not in checkpoint_keys}
119
+ return checkpoint_kwargs, gmod_kwargs
120
+
121
+ def tag_nodes(self, gmod):
122
+ unique_graph_id = next(uid)
123
+ for node in gmod.graph.nodes:
124
+ if node.op in ("call_function", "call_method", "call_module"):
125
+ node.meta["recompute"] = unique_graph_id
126
+ return gmod
127
+
128
+ def __call__(self, gmod, *args, **kwargs):
129
+ import torch.fx.traceback as fx_traceback
130
+ from torch.fx import Interpreter
131
+ if "_checkpoint_context_fn" in gmod.meta:
132
+ assert torch._dynamo.config._experimental_support_context_fn_in_torch_utils_checkpoint, \
133
+ "Passing context_fn to torch.utils.checkpoint is currently not supported under torch.compile"
134
+ log.warning("""
135
+ Detected that context_fn is passed to torch.utils.checkpoint under torch.compile.
136
+ Please make sure the checkpointed region does not contain in-place ops (e.g. torch.relu_).
137
+ """)
138
+ # use_reentrant is set to False because this op is going to be traced.
139
+ # And we ensure that AOT Autograd traces through the non reentrant
140
+ # version of checkpointing.
141
+ kwargs["use_reentrant"] = False
142
+ kwargs["context_fn"] = gmod.meta["_checkpoint_context_fn"]
143
+ # We first tag all nodes as "recompute" in this graph, and then we undo the "recompute" tag
144
+ # for specific nodes in _CachingTorchDispatchMode in torch/utils/checkpoint.py.
145
+ gmod = self.tag_nodes(gmod)
146
+ # Using interpreter allows preservation of metadata through torch.compile stack.
147
+ with fx_traceback.preserve_node_meta():
148
+ return checkpoint(Interpreter(gmod).run, *args, **kwargs)
149
+ else:
150
+ gmod = self.tag_nodes(gmod)
151
+ # Using interpreter allows preservation of metadata through torch.compile stack.
152
+ # TODO: We want to use the same `checkpoint(Interpreter(gmod).run, *args, **kwargs)` here
153
+ # as the `context_fn != None` case, but that depends on in-place op support in TorchDispatchMode + torch.compile.
154
+ # (for details on in-place op issue, run `test_compile_selective_checkpoint_inplace_op` unit test)
155
+ with fx_traceback.preserve_node_meta():
156
+ return Interpreter(gmod).run(*args)
157
+
158
+ tag_activation_checkpoint = TagActivationCheckpoint()
evalkit_tf437/lib/python3.10/site-packages/torch/nn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.11 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/__pycache__/_reduction.cpython-310.pyc ADDED
Binary file (1.28 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/__pycache__/cpp.cpython-310.pyc ADDED
Binary file (3.44 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/__pycache__/grad.cpython-310.pyc ADDED
Binary file (8.45 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/__pycache__/init.cpython-310.pyc ADDED
Binary file (19.3 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/__pycache__/parameter.cpython-310.pyc ADDED
Binary file (8.98 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/__init__.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.ao.nn.intrinsic import ConvBn1d
2
+ from torch.ao.nn.intrinsic import ConvBn2d
3
+ from torch.ao.nn.intrinsic import ConvBn3d
4
+ from torch.ao.nn.intrinsic import ConvBnReLU1d
5
+ from torch.ao.nn.intrinsic import ConvBnReLU2d
6
+ from torch.ao.nn.intrinsic import ConvBnReLU3d
7
+ from torch.ao.nn.intrinsic import ConvReLU1d
8
+ from torch.ao.nn.intrinsic import ConvReLU2d
9
+ from torch.ao.nn.intrinsic import ConvReLU3d
10
+ from torch.ao.nn.intrinsic import LinearReLU
11
+ from torch.ao.nn.intrinsic import BNReLU2d
12
+ from torch.ao.nn.intrinsic import BNReLU3d
13
+ from torch.ao.nn.intrinsic import LinearBn1d
14
+ from torch.ao.nn.intrinsic.modules.fused import _FusedModule # noqa: F401
15
+
16
+ # Include the subpackages in case user imports from it directly
17
+ from . import modules # noqa: F401
18
+ from . import qat # noqa: F401
19
+ from . import quantized # noqa: F401
20
+
21
+ __all__ = [
22
+ 'ConvBn1d',
23
+ 'ConvBn2d',
24
+ 'ConvBn3d',
25
+ 'ConvBnReLU1d',
26
+ 'ConvBnReLU2d',
27
+ 'ConvBnReLU3d',
28
+ 'ConvReLU1d',
29
+ 'ConvReLU2d',
30
+ 'ConvReLU3d',
31
+ 'LinearReLU',
32
+ 'BNReLU2d',
33
+ 'BNReLU3d',
34
+ 'LinearBn1d',
35
+ ]
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .fused import _FusedModule # noqa: F401
2
+ from .fused import BNReLU2d
3
+ from .fused import BNReLU3d
4
+ from .fused import ConvBn1d
5
+ from .fused import ConvBn2d
6
+ from .fused import ConvBn3d
7
+ from .fused import ConvBnReLU1d
8
+ from .fused import ConvBnReLU2d
9
+ from .fused import ConvBnReLU3d
10
+ from .fused import ConvReLU1d
11
+ from .fused import ConvReLU2d
12
+ from .fused import ConvReLU3d
13
+ from .fused import LinearBn1d
14
+ from .fused import LinearReLU
15
+
16
+
17
+ __all__ = [
18
+ 'BNReLU2d',
19
+ 'BNReLU3d',
20
+ 'ConvBn1d',
21
+ 'ConvBn2d',
22
+ 'ConvBn3d',
23
+ 'ConvBnReLU1d',
24
+ 'ConvBnReLU2d',
25
+ 'ConvBnReLU3d',
26
+ 'ConvReLU1d',
27
+ 'ConvReLU2d',
28
+ 'ConvReLU3d',
29
+ 'LinearBn1d',
30
+ 'LinearReLU',
31
+ ]
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/modules/__pycache__/fused.cpython-310.pyc ADDED
Binary file (788 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/modules/fused.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.ao.nn.intrinsic import BNReLU2d
2
+ from torch.ao.nn.intrinsic import BNReLU3d
3
+ from torch.ao.nn.intrinsic import ConvBn1d
4
+ from torch.ao.nn.intrinsic import ConvBn2d
5
+ from torch.ao.nn.intrinsic import ConvBn3d
6
+ from torch.ao.nn.intrinsic import ConvBnReLU1d
7
+ from torch.ao.nn.intrinsic import ConvBnReLU2d
8
+ from torch.ao.nn.intrinsic import ConvBnReLU3d
9
+ from torch.ao.nn.intrinsic import ConvReLU1d
10
+ from torch.ao.nn.intrinsic import ConvReLU2d
11
+ from torch.ao.nn.intrinsic import ConvReLU3d
12
+ from torch.ao.nn.intrinsic import LinearBn1d
13
+ from torch.ao.nn.intrinsic import LinearReLU
14
+ from torch.ao.nn.intrinsic.modules.fused import _FusedModule # noqa: F401
15
+
16
+ __all__ = [
17
+ 'BNReLU2d',
18
+ 'BNReLU3d',
19
+ 'ConvBn1d',
20
+ 'ConvBn2d',
21
+ 'ConvBn3d',
22
+ 'ConvBnReLU1d',
23
+ 'ConvBnReLU2d',
24
+ 'ConvBnReLU3d',
25
+ 'ConvReLU1d',
26
+ 'ConvReLU2d',
27
+ 'ConvReLU3d',
28
+ 'LinearBn1d',
29
+ 'LinearReLU',
30
+ ]
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/qat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (205 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (633 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/conv_fused.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Intrinsic QAT Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
8
+ while adding an import statement here.
9
+ """
10
+
11
+ __all__ = [
12
+ # Modules
13
+ 'ConvBn1d',
14
+ 'ConvBnReLU1d',
15
+ 'ConvReLU1d',
16
+ 'ConvBn2d',
17
+ 'ConvBnReLU2d',
18
+ 'ConvReLU2d',
19
+ 'ConvBn3d',
20
+ 'ConvBnReLU3d',
21
+ 'ConvReLU3d',
22
+ # Utilities
23
+ 'freeze_bn_stats',
24
+ 'update_bn_stats',
25
+ ]
26
+
27
+ from torch.ao.nn.intrinsic.qat import ConvBn1d
28
+ from torch.ao.nn.intrinsic.qat import ConvBnReLU1d
29
+ from torch.ao.nn.intrinsic.qat import ConvReLU1d
30
+ from torch.ao.nn.intrinsic.qat import ConvBn2d
31
+ from torch.ao.nn.intrinsic.qat import ConvBnReLU2d
32
+ from torch.ao.nn.intrinsic.qat import ConvReLU2d
33
+ from torch.ao.nn.intrinsic.qat import ConvBn3d
34
+ from torch.ao.nn.intrinsic.qat import ConvBnReLU3d
35
+ from torch.ao.nn.intrinsic.qat import ConvReLU3d
36
+ from torch.ao.nn.intrinsic.qat import freeze_bn_stats
37
+ from torch.ao.nn.intrinsic.qat import update_bn_stats
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_relu.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Intrinsic QAT Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
8
+ while adding an import statement here.
9
+ """
10
+
11
+ __all__ = [
12
+ 'LinearReLU',
13
+ ]
14
+
15
+ from torch.ao.nn.intrinsic.qat import LinearReLU
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .modules import * # noqa: F403
2
+ # to ensure customers can use the module below
3
+ # without importing it directly
4
+ import torch.nn.intrinsic.quantized.dynamic
5
+
6
+ __all__ = [
7
+ 'BNReLU2d',
8
+ 'BNReLU3d',
9
+ 'ConvReLU1d',
10
+ 'ConvReLU2d',
11
+ 'ConvReLU3d',
12
+ 'LinearReLU',
13
+ ]
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (360 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (219 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .linear_relu import LinearReLU
2
+
3
+ __all__ = [
4
+ 'LinearReLU',
5
+ ]
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__pycache__/linear_relu.cpython-310.pyc ADDED
Binary file (304 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/linear_relu.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from torch.ao.nn.intrinsic.quantized.dynamic import LinearReLU
2
+
3
+ __all__ = [
4
+ 'LinearReLU',
5
+ ]
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .linear_relu import LinearReLU
2
+ from .conv_relu import ConvReLU1d, ConvReLU2d, ConvReLU3d
3
+ from .bn_relu import BNReLU2d, BNReLU3d
4
+
5
+ __all__ = [
6
+ 'LinearReLU',
7
+ 'ConvReLU1d',
8
+ 'ConvReLU2d',
9
+ 'ConvReLU3d',
10
+ 'BNReLU2d',
11
+ 'BNReLU3d',
12
+ ]
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc ADDED
Binary file (322 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc ADDED
Binary file (364 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc ADDED
Binary file (288 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/bn_relu.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from torch.ao.nn.intrinsic.quantized import BNReLU2d
2
+ from torch.ao.nn.intrinsic.quantized import BNReLU3d
3
+
4
+ __all__ = [
5
+ 'BNReLU2d',
6
+ 'BNReLU3d',
7
+ ]
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/conv_relu.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from torch.ao.nn.intrinsic.quantized import ConvReLU1d
2
+ from torch.ao.nn.intrinsic.quantized import ConvReLU2d
3
+ from torch.ao.nn.intrinsic.quantized import ConvReLU3d
4
+
5
+ __all__ = [
6
+ 'ConvReLU1d',
7
+ 'ConvReLU2d',
8
+ 'ConvReLU3d',
9
+ ]
evalkit_tf437/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/linear_relu.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from torch.ao.nn.intrinsic.quantized import LinearReLU
2
+
3
+ __all__ = [
4
+ 'LinearReLU',
5
+ ]
evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .parallel_apply import parallel_apply
2
+ from .replicate import replicate
3
+ from .data_parallel import DataParallel, data_parallel
4
+ from .scatter_gather import gather, scatter
5
+ from .distributed import DistributedDataParallel
6
+
7
+ __all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
8
+ 'DataParallel', 'DistributedDataParallel']
9
+
10
+ def DistributedDataParallelCPU(*args, **kwargs):
11
+ import warnings
12
+ warnings.warn("torch.nn.parallel.DistributedDataParallelCPU is deprecated, "
13
+ "please use torch.nn.parallel.DistributedDataParallel instead.")
14
+ return DistributedDataParallel(*args, **kwargs)
evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (797 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc ADDED
Binary file (4.1 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc ADDED
Binary file (5.14 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/comm.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import torch
3
+ from torch.cuda import nccl
4
+ from torch._utils import _take_tensors, _flatten_dense_tensors, \
5
+ _unflatten_dense_tensors, _reorder_tensors_as, _get_device_index, _handle_complex
6
+ from typing import List
7
+
8
+ def broadcast(tensor, devices=None, *, out=None):
9
+ r"""Broadcasts a tensor to specified GPU devices.
10
+
11
+ Args:
12
+ tensor (Tensor): tensor to broadcast. Can be on CPU or GPU.
13
+ devices (Iterable[torch.device, str or int], optional): an iterable of
14
+ GPU devices, among which to broadcast.
15
+ out (Sequence[Tensor], optional, keyword-only): the GPU tensors to
16
+ store output results.
17
+
18
+ .. note::
19
+ Exactly one of :attr:`devices` and :attr:`out` must be specified.
20
+
21
+ Returns:
22
+ - If :attr:`devices` is specified,
23
+ a tuple containing copies of :attr:`tensor`, placed on
24
+ :attr:`devices`.
25
+ - If :attr:`out` is specified,
26
+ a tuple containing :attr:`out` tensors, each containing a copy of
27
+ :attr:`tensor`.
28
+ """
29
+ tensor = _handle_complex(tensor)
30
+ if not ((devices is None) ^ (out is None)):
31
+ raise RuntimeError(
32
+ f"Exactly one of 'devices' and 'out' must be specified, but got devices={devices} and out={out}")
33
+ if devices is not None:
34
+ devices = [_get_device_index(d) for d in devices]
35
+ return torch._C._broadcast(tensor, devices)
36
+ else:
37
+ return torch._C._broadcast_out(tensor, out)
38
+
39
+
40
+ def broadcast_coalesced(tensors, devices, buffer_size=10485760):
41
+ """Broadcast a sequence of tensors to the specified GPUs.
42
+
43
+ Small tensors are first coalesced into a buffer to reduce the number of synchronizations.
44
+
45
+ Args:
46
+ tensors (sequence): tensors to broadcast. Must be on the same device,
47
+ either CPU or GPU.
48
+ devices (Iterable[torch.device, str or int]): an iterable of GPU
49
+ devices, among which to broadcast.
50
+ buffer_size (int): maximum size of the buffer used for coalescing
51
+
52
+ Returns:
53
+ A tuple containing copies of :attr:`tensor`, placed on :attr:`devices`.
54
+ """
55
+ devices = [_get_device_index(d) for d in devices]
56
+ tensors = [_handle_complex(t) for t in tensors]
57
+ return torch._C._broadcast_coalesced(tensors, devices, buffer_size)
58
+
59
+
60
+ def reduce_add(inputs, destination=None):
61
+ """Sum tensors from multiple GPUs.
62
+
63
+ All inputs should have matching shapes, dtype, and layout. The output tensor
64
+ will be of the same shape, dtype, and layout.
65
+
66
+ Args:
67
+ inputs (Iterable[Tensor]): an iterable of tensors to add.
68
+ destination (int, optional): a device on which the output will be
69
+ placed (default: current device).
70
+
71
+ Returns:
72
+ A tensor containing an elementwise sum of all inputs, placed on the
73
+ :attr:`destination` device.
74
+ """
75
+ destination = _get_device_index(destination, optional=True)
76
+ input_size = inputs[0].size()
77
+ root_index = None # index of input tensor that already is on the correct device
78
+ for i, inp in enumerate(inputs):
79
+ assert inp.device.type != "cpu", "reduce_add expects all inputs to be on GPUs"
80
+ if inp.get_device() == destination:
81
+ root_index = i
82
+ if inp.size() != input_size:
83
+ got = 'x'.join(str(x) for x in inp.size())
84
+ expected = 'x'.join(str(x) for x in input_size)
85
+ raise ValueError(f"input {i} has invalid size: got {got}, but expected {expected}")
86
+ if root_index is None:
87
+ raise RuntimeError("reduce_add expects destination to be on the same GPU with one of the tensors")
88
+
89
+ if len(inputs) == 1:
90
+ return inputs[0]
91
+
92
+ if nccl.is_available(inputs):
93
+ result = torch.empty_like(inputs[root_index])
94
+ nccl.reduce(inputs, output=result, root=root_index)
95
+ else:
96
+ destination_device = torch.device(inputs[root_index].device.type, destination)
97
+ nonroot = [t for i, t in enumerate(inputs) if i != root_index]
98
+ # make a new tensor w/o clone
99
+ result = inputs[root_index] + nonroot[0].to(device=destination_device, non_blocking=True)
100
+ for other in nonroot[1:]:
101
+ result.add_(other.to(device=destination_device, non_blocking=True))
102
+ return result
103
+
104
+
105
+ def reduce_add_coalesced(inputs, destination=None, buffer_size=10485760):
106
+ """Sum tensors from multiple GPUs.
107
+
108
+ Small tensors are first coalesced into a buffer to reduce the number
109
+ of synchronizations.
110
+
111
+ Args:
112
+ inputs (Iterable[Iterable[Tensor]]): iterable of iterables that
113
+ contain tensors from a single device.
114
+ destination (int, optional): a device on which the output will be
115
+ placed (default: current device).
116
+ buffer_size (int): maximum size of the buffer used for coalescing
117
+
118
+ Returns:
119
+ A tuple of tensors containing an elementwise sum of each group of
120
+ inputs, placed on the ``destination`` device.
121
+ """
122
+ # TODO: When `len(inputs) == 1` and all inputs are on `destination`, just
123
+ # return `inputs`.
124
+ dense_tensors: List[List] = [[] for _ in inputs] # shape (num_gpus, num_tensors)
125
+ output = []
126
+ ref_order = []
127
+ # process sparse ones first since they may have different sizes on different gpus
128
+ for tensor_at_gpus in zip(*inputs):
129
+ if all(t.is_sparse for t in tensor_at_gpus):
130
+ result = reduce_add(tensor_at_gpus, destination) # this will be sparse too
131
+ output.append(result)
132
+ ref_order.append(tensor_at_gpus[0])
133
+ else:
134
+ for coll, t in zip(dense_tensors, tensor_at_gpus):
135
+ coll.append(t.to_dense() if t.is_sparse else t)
136
+ ref_order.append(dense_tensors[0][-1])
137
+ itrs = [_take_tensors(tensors, buffer_size) for tensors in dense_tensors]
138
+ # now the dense ones, which have consistent sizes
139
+ for chunks in zip(*itrs):
140
+ flat_tensors = [_flatten_dense_tensors(chunk) for chunk in chunks] # (num_gpus,)
141
+ flat_result = reduce_add(flat_tensors, destination)
142
+ for t in _unflatten_dense_tensors(flat_result, chunks[0]):
143
+ # The unflattened tensors do not share storage, and we don't expose
144
+ # base flat tensor anyways, so give them different version counters.
145
+ # See NOTE [ Version Counter in comm.*_coalesced ]
146
+ output.append(t.data)
147
+ return tuple(_reorder_tensors_as(output, ref_order))
148
+
149
+
150
+ def scatter(tensor, devices=None, chunk_sizes=None, dim=0, streams=None, *, out=None):
151
+ """Scatters tensor across multiple GPUs.
152
+
153
+ Args:
154
+ tensor (Tensor): tensor to scatter. Can be on CPU or GPU.
155
+ devices (Iterable[torch.device, str or int], optional): an iterable of
156
+ GPU devices, among which to scatter.
157
+ chunk_sizes (Iterable[int], optional): sizes of chunks to be placed on
158
+ each device. It should match :attr:`devices` in length and sums to
159
+ ``tensor.size(dim)``. If not specified, :attr:`tensor` will be divided
160
+ into equal chunks.
161
+ dim (int, optional): A dimension along which to chunk :attr:`tensor`.
162
+ Default: ``0``.
163
+ streams (Iterable[torch.cuda.Stream], optional): an iterable of Streams, among
164
+ which to execute the scatter. If not specified, the default stream will
165
+ be utilized.
166
+ out (Sequence[Tensor], optional, keyword-only): the GPU tensors to
167
+ store output results. Sizes of these tensors must match that of
168
+ :attr:`tensor`, except for :attr:`dim`, where the total size must
169
+ sum to ``tensor.size(dim)``.
170
+
171
+ .. note::
172
+ Exactly one of :attr:`devices` and :attr:`out` must be specified. When
173
+ :attr:`out` is specified, :attr:`chunk_sizes` must not be specified and
174
+ will be inferred from sizes of :attr:`out`.
175
+
176
+ Returns:
177
+ - If :attr:`devices` is specified,
178
+ a tuple containing chunks of :attr:`tensor`, placed on
179
+ :attr:`devices`.
180
+ - If :attr:`out` is specified,
181
+ a tuple containing :attr:`out` tensors, each containing a chunk of
182
+ :attr:`tensor`.
183
+ """
184
+ tensor = _handle_complex(tensor)
185
+ if out is None:
186
+ devices = [_get_device_index(d) for d in devices]
187
+ return tuple(torch._C._scatter(tensor, devices, chunk_sizes, dim, streams))
188
+ else:
189
+ if devices is not None:
190
+ raise RuntimeError(
191
+ f"'devices' must not be specified when 'out' is specified, but got devices={devices}")
192
+ if chunk_sizes is not None:
193
+ raise RuntimeError(
194
+ f"'chunk_sizes' must not be specified when 'out' is specified, but got chunk_sizes={chunk_sizes}")
195
+ return tuple(torch._C._scatter_out(tensor, out, dim, streams))
196
+
197
+
198
+ def gather(tensors, dim=0, destination=None, *, out=None):
199
+ r"""Gathers tensors from multiple GPU devices.
200
+
201
+ Args:
202
+ tensors (Iterable[Tensor]): an iterable of tensors to gather.
203
+ Tensor sizes in all dimensions other than :attr:`dim` have to match.
204
+ dim (int, optional): a dimension along which the tensors will be
205
+ concatenated. Default: ``0``.
206
+ destination (torch.device, str, or int, optional): the output device.
207
+ Can be CPU or CUDA. Default: the current CUDA device.
208
+ out (Tensor, optional, keyword-only): the tensor to store gather result.
209
+ Its sizes must match those of :attr:`tensors`, except for :attr:`dim`,
210
+ where the size must equal ``sum(tensor.size(dim) for tensor in tensors)``.
211
+ Can be on CPU or CUDA.
212
+
213
+ .. note::
214
+ :attr:`destination` must not be specified when :attr:`out` is specified.
215
+
216
+ Returns:
217
+ - If :attr:`destination` is specified,
218
+ a tensor located on :attr:`destination` device, that is a result of
219
+ concatenating :attr:`tensors` along :attr:`dim`.
220
+ - If :attr:`out` is specified,
221
+ the :attr:`out` tensor, now containing results of concatenating
222
+ :attr:`tensors` along :attr:`dim`.
223
+ """
224
+ tensors = [_handle_complex(t) for t in tensors]
225
+ if out is None:
226
+ if destination == -1:
227
+ warnings.warn(
228
+ 'Using -1 to represent CPU tensor is deprecated. Please use a '
229
+ 'device object or string instead, e.g., "cpu".')
230
+ destination = _get_device_index(destination, allow_cpu=True, optional=True)
231
+ return torch._C._gather(tensors, dim, destination)
232
+ else:
233
+ if destination is not None:
234
+ raise RuntimeError(
235
+ f"'destination' must not be specified when 'out' is specified, but got destination={destination}")
236
+ return torch._C._gather_out(tensors, out, dim)
evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ import torch
3
+ import warnings
4
+ from itertools import chain
5
+ from typing import Any, Dict, Generic, List, Optional, Sequence, Tuple, TypeVar, Union
6
+ from ..modules import Module
7
+ from .scatter_gather import scatter_kwargs, gather
8
+ from .replicate import replicate
9
+ from .parallel_apply import parallel_apply
10
+ from torch._utils import (
11
+ _get_all_device_indices,
12
+ _get_available_device_type,
13
+ _get_device_index,
14
+ _get_devices_properties
15
+ )
16
+
17
+ __all__ = ['DataParallel', 'data_parallel']
18
+
19
+ def _check_balance(device_ids: Sequence[Union[int, torch.device]]) -> None:
20
+ imbalance_warn = """
21
+ There is an imbalance between your GPUs. You may want to exclude GPU {} which
22
+ has less than 75% of the memory or cores of GPU {}. You can do so by setting
23
+ the device_ids argument to DataParallel, or by setting the CUDA_VISIBLE_DEVICES
24
+ environment variable."""
25
+ device_ids = [_get_device_index(x, True) for x in device_ids]
26
+ dev_props = _get_devices_properties(device_ids)
27
+
28
+ def warn_imbalance(get_prop):
29
+ values = [get_prop(props) for props in dev_props]
30
+ min_pos, min_val = min(enumerate(values), key=operator.itemgetter(1))
31
+ max_pos, max_val = max(enumerate(values), key=operator.itemgetter(1))
32
+ if min_val / max_val < 0.75:
33
+ warnings.warn(imbalance_warn.format(device_ids[min_pos], device_ids[max_pos]))
34
+ return True
35
+ return False
36
+
37
+ if warn_imbalance(lambda props: props.total_memory):
38
+ return
39
+ if warn_imbalance(lambda props: props.multi_processor_count):
40
+ return
41
+
42
+
43
+ T = TypeVar("T", bound=Module)
44
+
45
+
46
+ class DataParallel(Module, Generic[T]):
47
+ r"""Implements data parallelism at the module level.
48
+
49
+ This container parallelizes the application of the given :attr:`module` by
50
+ splitting the input across the specified devices by chunking in the batch
51
+ dimension (other objects will be copied once per device). In the forward
52
+ pass, the module is replicated on each device, and each replica handles a
53
+ portion of the input. During the backwards pass, gradients from each replica
54
+ are summed into the original module.
55
+
56
+ The batch size should be larger than the number of GPUs used.
57
+
58
+ .. warning::
59
+ It is recommended to use :class:`~torch.nn.parallel.DistributedDataParallel`,
60
+ instead of this class, to do multi-GPU training, even if there is only a single
61
+ node. See: :ref:`cuda-nn-ddp-instead` and :ref:`ddp`.
62
+
63
+ Arbitrary positional and keyword inputs are allowed to be passed into
64
+ DataParallel but some types are specially handled. tensors will be
65
+ **scattered** on dim specified (default 0). tuple, list and dict types will
66
+ be shallow copied. The other types will be shared among different threads
67
+ and can be corrupted if written to in the model's forward pass.
68
+
69
+ The parallelized :attr:`module` must have its parameters and buffers on
70
+ ``device_ids[0]`` before running this :class:`~torch.nn.DataParallel`
71
+ module.
72
+
73
+ .. warning::
74
+ In each forward, :attr:`module` is **replicated** on each device, so any
75
+ updates to the running module in ``forward`` will be lost. For example,
76
+ if :attr:`module` has a counter attribute that is incremented in each
77
+ ``forward``, it will always stay at the initial value because the update
78
+ is done on the replicas which are destroyed after ``forward``. However,
79
+ :class:`~torch.nn.DataParallel` guarantees that the replica on
80
+ ``device[0]`` will have its parameters and buffers sharing storage with
81
+ the base parallelized :attr:`module`. So **in-place** updates to the
82
+ parameters or buffers on ``device[0]`` will be recorded. E.g.,
83
+ :class:`~torch.nn.BatchNorm2d` and :func:`~torch.nn.utils.spectral_norm`
84
+ rely on this behavior to update the buffers.
85
+
86
+ .. warning::
87
+ Forward and backward hooks defined on :attr:`module` and its submodules
88
+ will be invoked ``len(device_ids)`` times, each with inputs located on
89
+ a particular device. Particularly, the hooks are only guaranteed to be
90
+ executed in correct order with respect to operations on corresponding
91
+ devices. For example, it is not guaranteed that hooks set via
92
+ :meth:`~torch.nn.Module.register_forward_pre_hook` be executed before
93
+ `all` ``len(device_ids)`` :meth:`~torch.nn.Module.forward` calls, but
94
+ that each such hook be executed before the corresponding
95
+ :meth:`~torch.nn.Module.forward` call of that device.
96
+
97
+ .. warning::
98
+ When :attr:`module` returns a scalar (i.e., 0-dimensional tensor) in
99
+ :func:`forward`, this wrapper will return a vector of length equal to
100
+ number of devices used in data parallelism, containing the result from
101
+ each device.
102
+
103
+ .. note::
104
+ There is a subtlety in using the
105
+ ``pack sequence -> recurrent network -> unpack sequence`` pattern in a
106
+ :class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`.
107
+ See :ref:`pack-rnn-unpack-with-data-parallelism` section in FAQ for
108
+ details.
109
+
110
+
111
+ Args:
112
+ module (Module): module to be parallelized
113
+ device_ids (list of int or torch.device): CUDA devices (default: all devices)
114
+ output_device (int or torch.device): device location of output (default: device_ids[0])
115
+
116
+ Attributes:
117
+ module (Module): the module to be parallelized
118
+
119
+ Example::
120
+
121
+ >>> # xdoctest: +SKIP
122
+ >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
123
+ >>> output = net(input_var) # input_var can be on any device, including CPU
124
+ """
125
+
126
+ # TODO: update notes/cuda.rst when this class handles 8+ GPUs well
127
+
128
+ def __init__(
129
+ self,
130
+ module: T,
131
+ device_ids: Optional[Sequence[Union[int, torch.device]]] = None,
132
+ output_device: Optional[Union[int, torch.device]] = None,
133
+ dim: int = 0,
134
+ ) -> None:
135
+ super().__init__()
136
+ torch._C._log_api_usage_once("torch.nn.parallel.DataParallel")
137
+ device_type = _get_available_device_type()
138
+ if device_type is None:
139
+ self.module = module
140
+ self.device_ids = []
141
+ return
142
+
143
+ if device_ids is None:
144
+ device_ids = _get_all_device_indices()
145
+
146
+ if device_ids is None:
147
+ raise RuntimeError("no available devices were found")
148
+
149
+ if output_device is None:
150
+ output_device = device_ids[0]
151
+
152
+ self.dim = dim
153
+ self.module = module
154
+ self.device_ids = [_get_device_index(x, True) for x in device_ids]
155
+ self.output_device = _get_device_index(output_device, True)
156
+ self.src_device_obj = torch.device(device_type, self.device_ids[0])
157
+
158
+ if device_type == "cuda":
159
+ _check_balance(self.device_ids)
160
+
161
+ if len(self.device_ids) == 1:
162
+ self.module.to(self.src_device_obj)
163
+
164
+ def forward(self, *inputs: Any, **kwargs: Any) -> Any:
165
+ with torch.autograd.profiler.record_function("DataParallel.forward"):
166
+ if not self.device_ids:
167
+ return self.module(*inputs, **kwargs)
168
+
169
+ for t in chain(self.module.parameters(), self.module.buffers()):
170
+ if t.device != self.src_device_obj:
171
+ raise RuntimeError("module must have its parameters and buffers "
172
+ f"on device {self.src_device_obj} (device_ids[0]) but found one of "
173
+ f"them on device: {t.device}")
174
+
175
+ inputs, module_kwargs = self.scatter(inputs, kwargs, self.device_ids)
176
+ # for forward function without any inputs, empty list and dict will be created
177
+ # so the module can be executed on one device which is the first one in device_ids
178
+ if not inputs and not module_kwargs:
179
+ inputs = ((),)
180
+ module_kwargs = ({},)
181
+
182
+ if len(self.device_ids) == 1:
183
+ return self.module(*inputs[0], **module_kwargs[0])
184
+ replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
185
+ outputs = self.parallel_apply(replicas, inputs, module_kwargs)
186
+ return self.gather(outputs, self.output_device)
187
+
188
+ def replicate(self, module: T, device_ids: Sequence[Union[int, torch.device]]) -> List[T]:
189
+ return replicate(module, device_ids, not torch.is_grad_enabled())
190
+
191
+ def scatter(
192
+ self,
193
+ inputs: Tuple[Any, ...],
194
+ kwargs: Optional[Dict[str, Any]],
195
+ device_ids: Sequence[Union[int, torch.device]],
196
+ ) -> Any:
197
+ return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
198
+
199
+ def parallel_apply(self, replicas: Sequence[T], inputs: Sequence[Any], kwargs: Any) -> List[Any]:
200
+ return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
201
+
202
+ def gather(self, outputs: Any, output_device: Union[int, torch.device]) -> Any:
203
+ return gather(outputs, output_device, dim=self.dim)
204
+
205
+
206
+ def data_parallel(
207
+ module: Module,
208
+ inputs: Any,
209
+ device_ids: Optional[Sequence[Union[int, torch.device]]] = None,
210
+ output_device: Optional[Union[int, torch.device]] = None,
211
+ dim: int = 0,
212
+ module_kwargs: Optional[Any] = None,
213
+ ) -> torch.Tensor:
214
+ r"""Evaluate module(input) in parallel across the GPUs given in device_ids.
215
+
216
+ This is the functional version of the DataParallel module.
217
+
218
+ Args:
219
+ module (Module): the module to evaluate in parallel
220
+ inputs (Tensor): inputs to the module
221
+ device_ids (list of int or torch.device): GPU ids on which to replicate module
222
+ output_device (list of int or torch.device): GPU location of the output Use -1 to indicate the CPU.
223
+ (default: device_ids[0])
224
+ Returns:
225
+ a Tensor containing the result of module(input) located on
226
+ output_device
227
+ """
228
+ if not isinstance(inputs, tuple):
229
+ inputs = (inputs,) if inputs is not None else ()
230
+
231
+ device_type = _get_available_device_type()
232
+
233
+ if device_type is None:
234
+ raise RuntimeError("device type could not be determined")
235
+
236
+ if device_ids is None:
237
+ device_ids = _get_all_device_indices()
238
+
239
+ if device_ids is None:
240
+ raise RuntimeError("no available devices were found")
241
+
242
+ if output_device is None:
243
+ output_device = device_ids[0]
244
+
245
+ device_ids = [_get_device_index(x, True) for x in device_ids]
246
+ output_device = _get_device_index(output_device, True)
247
+ src_device_obj = torch.device(device_type, device_ids[0])
248
+
249
+ for t in chain(module.parameters(), module.buffers()):
250
+ if t.device != src_device_obj:
251
+ raise RuntimeError("module must have its parameters and buffers "
252
+ f"on device {src_device_obj} (device_ids[0]) but found one of "
253
+ f"them on device: {t.device}")
254
+
255
+ inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
256
+ # for module without any inputs, empty list and dict will be created
257
+ # so the module can be executed on one device which is the first one in device_ids
258
+ if not inputs and not module_kwargs:
259
+ inputs = ((),)
260
+ module_kwargs = ({},)
261
+
262
+ assert module_kwargs is not None
263
+
264
+ if len(device_ids) == 1:
265
+ return module(*inputs[0], **module_kwargs[0])
266
+ used_device_ids = device_ids[:len(inputs)]
267
+ replicas = replicate(module, used_device_ids)
268
+ outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
269
+ return gather(outputs, output_device, dim)
evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/distributed.py ADDED
The diff for this file is too large to render. See raw diff
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/replicate.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ..modules import Module
3
+ from . import comm
4
+ from typing import TYPE_CHECKING, Dict, Iterator, List, Optional, Sequence, Set, TypeVar, Union, cast
5
+ from torch._utils import _get_device_index
6
+
7
+ from collections import OrderedDict
8
+
9
+ if TYPE_CHECKING:
10
+ import torch.jit
11
+ import torch.jit._state
12
+
13
+ __all__ = ['replicate']
14
+
15
+ def _is_script_module(module: Module) -> bool:
16
+ import torch.jit
17
+ return isinstance(module, torch.jit.ScriptModule)
18
+
19
+
20
+ def _is_script_method(module: Module) -> bool:
21
+ import torch.jit
22
+ return isinstance(module, torch._C.ScriptMethod)
23
+
24
+
25
+ def _init_script_module() -> "torch.jit.ScriptModule":
26
+ import torch.jit
27
+ return torch.jit.ScriptModule()
28
+
29
+
30
+ def _is_jit_enabled() -> "torch.jit._state.EnabledProxy":
31
+ import torch.jit._state
32
+ return torch.jit._state._enabled
33
+
34
+
35
+ # Check if we can safely replicate the module.
36
+ # there are two types of module:
37
+ # 1. python modules
38
+ # 2. ScriptModule
39
+ #
40
+ # currently a module cannot be replicated properly if the descendants of
41
+ # any ScriptModule contains python module (type 1 above)
42
+ def _replicatable_module(module: Module, memo: Optional[Set[Module]] = None) -> bool:
43
+
44
+ # module.modules() contains module itself as the first element
45
+ def descendant_modules(module: Module) -> Iterator[Module]:
46
+ gen = module.modules()
47
+ next(gen)
48
+ return gen
49
+
50
+ if not _is_jit_enabled():
51
+ return True
52
+ if memo is None:
53
+ memo = set()
54
+
55
+ # memoize visited modules
56
+ memo.add(module)
57
+ if _is_script_module(module):
58
+ memo.update(descendant_modules(module))
59
+ return all(_is_script_module(descendant) for
60
+ descendant in descendant_modules(module))
61
+
62
+ for child in module.children():
63
+ # since any unreplicatable module will cause the check to return
64
+ # False early, visited modules here can be safely ignored.
65
+ if child in memo:
66
+ continue
67
+ if not _replicatable_module(child, memo):
68
+ return False
69
+
70
+ return True
71
+
72
+ def _broadcast_coalesced_reshape(
73
+ tensors: Sequence[torch.Tensor],
74
+ devices: Sequence[Union[int, torch.device]],
75
+ detach: bool = False,
76
+ ) -> List[List[torch.Tensor]]:
77
+ from ._functions import Broadcast
78
+ if detach:
79
+ return comm.broadcast_coalesced(tensors, devices)
80
+ else:
81
+ # Use the autograd function to broadcast if not detach
82
+ if len(tensors) > 0:
83
+ tensor_copies = Broadcast.apply(devices, *tensors)
84
+ return [tensor_copies[i:i + len(tensors)]
85
+ for i in range(0, len(tensor_copies), len(tensors))]
86
+ else:
87
+ return []
88
+
89
+
90
+ T = TypeVar("T", bound=Module)
91
+
92
+
93
+ def replicate(
94
+ network: T,
95
+ devices: Sequence[Union[int, torch.device]],
96
+ detach: bool = False,
97
+ ) -> List[T]:
98
+ if not _replicatable_module(network):
99
+ raise RuntimeError("Cannot replicate network where python modules are "
100
+ "childrens of ScriptModule")
101
+
102
+ if not devices:
103
+ return []
104
+
105
+ devices = [_get_device_index(x, True) for x in devices]
106
+ num_replicas = len(devices)
107
+
108
+ params = list(network.parameters())
109
+ param_indices = {param: idx for idx, param in enumerate(params)}
110
+ param_copies = _broadcast_coalesced_reshape(params, devices, detach)
111
+
112
+ buffers = list(network.buffers())
113
+ buffers_rg: List[torch.Tensor] = []
114
+ buffers_not_rg: List[torch.Tensor] = []
115
+ for buf in buffers:
116
+ if buf.requires_grad and not detach:
117
+ buffers_rg.append(buf)
118
+ else:
119
+ buffers_not_rg.append(buf)
120
+
121
+ buffer_indices_rg = {buf: idx for idx, buf in enumerate(buffers_rg)}
122
+ buffer_indices_not_rg = {buf: idx for idx, buf in enumerate(buffers_not_rg)}
123
+
124
+ buffer_copies_rg = _broadcast_coalesced_reshape(buffers_rg, devices, detach=detach)
125
+ buffer_copies_not_rg = _broadcast_coalesced_reshape(buffers_not_rg, devices, detach=True)
126
+
127
+ modules = list(network.modules())
128
+ module_copies: List[List[Module]] = [[] for _ in devices]
129
+ module_indices: Dict[Module, int] = {}
130
+
131
+ for i, module in enumerate(modules):
132
+ module_indices[module] = i
133
+ for j in range(num_replicas):
134
+ replica = module._replicate_for_data_parallel()
135
+ # This is a temporary fix for DDP. DDP needs to access the
136
+ # replicated model parameters. It used to do so through
137
+ # `mode.parameters()`. The fix added in #33907 for DP stops the
138
+ # `parameters()` API from exposing the replicated parameters.
139
+ # Hence, we add a `_former_parameters` dict here to support DDP.
140
+ replica._former_parameters = OrderedDict()
141
+
142
+ module_copies[j].append(replica)
143
+
144
+ for i, module in enumerate(modules):
145
+ for key, child in module._modules.items():
146
+ if child is None:
147
+ for j in range(num_replicas):
148
+ replica = module_copies[j][i]
149
+ replica._modules[key] = None
150
+ else:
151
+ module_idx = module_indices[child]
152
+ for j in range(num_replicas):
153
+ replica = module_copies[j][i]
154
+ setattr(replica, key, module_copies[j][module_idx])
155
+ for key, param in module._parameters.items():
156
+ if param is None:
157
+ for j in range(num_replicas):
158
+ replica = module_copies[j][i]
159
+ replica._parameters[key] = None
160
+ else:
161
+ param_idx = param_indices[param]
162
+ for j in range(num_replicas):
163
+ replica = module_copies[j][i]
164
+ param_copy = param_copies[j][param_idx]
165
+ # parameters in replicas are no longer leaves,
166
+ # so setattr them as non-parameter attributes
167
+ setattr(replica, key, param_copy)
168
+ # expose the parameter for DDP
169
+ replica._former_parameters[key] = param_copy
170
+ for key, buf in module._buffers.items(): # type: ignore[assignment]
171
+ if buf is None:
172
+ for j in range(num_replicas):
173
+ replica = module_copies[j][i]
174
+ replica._buffers[key] = None
175
+ else:
176
+ if buf.requires_grad and not detach:
177
+ buffer_copies = buffer_copies_rg
178
+ buffer_idx = buffer_indices_rg[buf]
179
+ else:
180
+ buffer_copies = buffer_copies_not_rg
181
+ buffer_idx = buffer_indices_not_rg[buf]
182
+ for j in range(num_replicas):
183
+ replica = module_copies[j][i]
184
+ setattr(replica, key, buffer_copies[j][buffer_idx])
185
+
186
+ return [cast(T, module_copies[j][0]) for j in range(num_replicas)]
evalkit_tf437/lib/python3.10/site-packages/torch/nn/parallel/scatter_gather.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar, Union, overload
3
+ from ._functions import Scatter, Gather
4
+ import warnings
5
+
6
+ __all__ = ['scatter', 'scatter_kwargs', 'gather']
7
+
8
+ def is_namedtuple(obj: Any) -> bool:
9
+ # Check if type was created from collections.namedtuple or a typing.NamedTuple.
10
+ warnings.warn("is_namedtuple is deprecated, please use the python checks instead")
11
+ return _is_namedtuple(obj)
12
+
13
+ def _is_namedtuple(obj: Any) -> bool:
14
+ # Check if type was created from collections.namedtuple or a typing.NamedTuple.
15
+ return (
16
+ isinstance(obj, tuple) and hasattr(obj, "_asdict") and hasattr(obj, "_fields")
17
+ )
18
+
19
+
20
+ T = TypeVar("T", dict, list, tuple)
21
+
22
+ # For some reason, 'scatter' returns a tuple when given a single Tensor input but a list otherwise.
23
+ @overload
24
+ def scatter(
25
+ inputs: torch.Tensor,
26
+ target_gpus: Sequence[Union[int, torch.device]],
27
+ dim: int = ...,
28
+ ) -> Tuple[torch.Tensor, ...]:
29
+ ...
30
+
31
+ @overload
32
+ def scatter(inputs: T, target_gpus: Sequence[Union[int, torch.device]], dim: int = ...) -> List[T]:
33
+ ...
34
+
35
+ def scatter(inputs, target_gpus, dim=0):
36
+ r"""Slice tensors into approximately equal chunks and distributes them across given GPUs.
37
+
38
+ Duplicates references to objects that are not tensors.
39
+ """
40
+ def scatter_map(obj):
41
+ if isinstance(obj, torch.Tensor):
42
+ return Scatter.apply(target_gpus, None, dim, obj)
43
+ if _is_namedtuple(obj):
44
+ return [type(obj)(*args) for args in zip(*map(scatter_map, obj))]
45
+ if isinstance(obj, tuple) and len(obj) > 0:
46
+ return list(zip(*map(scatter_map, obj)))
47
+ if isinstance(obj, list) and len(obj) > 0:
48
+ return [list(i) for i in zip(*map(scatter_map, obj))]
49
+ if isinstance(obj, dict) and len(obj) > 0:
50
+ return [type(obj)(i) for i in zip(*map(scatter_map, obj.items()))]
51
+ return [obj for _ in target_gpus]
52
+
53
+ # After scatter_map is called, a scatter_map cell will exist. This cell
54
+ # has a reference to the actual function scatter_map, which has references
55
+ # to a closure that has a reference to the scatter_map cell (because the
56
+ # fn is recursive). To avoid this reference cycle, we set the function to
57
+ # None, clearing the cell
58
+ try:
59
+ res = scatter_map(inputs)
60
+ finally:
61
+ scatter_map = None # type: ignore[assignment]
62
+ return res
63
+
64
+
65
+ def scatter_kwargs(
66
+ inputs: Tuple[Any, ...],
67
+ kwargs: Optional[Dict[str, Any]],
68
+ target_gpus: Sequence[Union[int, torch.device]],
69
+ dim: int = 0,
70
+ ) -> Tuple[Tuple[Any, ...], Tuple[Dict[str, Any], ...]]:
71
+ r"""Scatter with support for kwargs dictionary."""
72
+ scattered_inputs = scatter(inputs, target_gpus, dim) if inputs else []
73
+ scattered_kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
74
+ if len(scattered_inputs) < len(scattered_kwargs):
75
+ scattered_inputs.extend(() for _ in range(len(scattered_kwargs) - len(scattered_inputs)))
76
+ elif len(scattered_kwargs) < len(inputs):
77
+ scattered_kwargs.extend({} for _ in range(len(scattered_inputs) - len(scattered_kwargs)))
78
+ return tuple(scattered_inputs), tuple(scattered_kwargs)
79
+
80
+
81
+ def gather(outputs: Any, target_device: Union[int, torch.device], dim: int = 0) -> Any:
82
+ r"""Gather tensors from different GPUs on a specified device.
83
+
84
+ Use 'cpu' for CPU to avoid a deprecation warning.
85
+ """
86
+ def gather_map(outputs):
87
+ out = outputs[0]
88
+ if isinstance(out, torch.Tensor):
89
+ return Gather.apply(target_device, dim, *outputs)
90
+ if out is None:
91
+ return None
92
+ if isinstance(out, dict):
93
+ if not all(len(out) == len(d) for d in outputs):
94
+ raise ValueError('All dicts must have the same number of keys')
95
+ return type(out)((k, gather_map([d[k] for d in outputs]))
96
+ for k in out)
97
+ if _is_namedtuple(out):
98
+ return type(out)._make(map(gather_map, zip(*outputs)))
99
+ return type(out)(map(gather_map, zip(*outputs)))
100
+
101
+ # Recursive function calls like this create reference cycles.
102
+ # Setting the function to None clears the refcycle.
103
+ try:
104
+ res = gather_map(outputs)
105
+ finally:
106
+ gather_map = None # type: ignore[assignment]
107
+ return res
evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""QAT Dynamic Modules.
3
+
4
+ This package is in the process of being deprecated.
5
+ Please, use `torch.ao.nn.qat.dynamic` instead.
6
+ """
7
+ from . import dynamic # noqa: F403
8
+ from . import modules # noqa: F403
9
+ from .modules import * # noqa: F403
10
+
11
+ __all__ = [
12
+ "Linear",
13
+ "Conv1d",
14
+ "Conv2d",
15
+ "Conv3d",
16
+ "Embedding",
17
+ "EmbeddingBag",
18
+ ]
evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/dynamic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (341 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .linear import Linear
2
+
3
+ __all__ = ["Linear"]
evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (246 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/dynamic/modules/__pycache__/linear.cpython-310.pyc ADDED
Binary file (602 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/modules/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""QAT Modules.
3
+
4
+ This package is in the process of being deprecated.
5
+ Please, use `torch.ao.nn.qat.modules` instead.
6
+ """
7
+ from torch.ao.nn.qat.modules.linear import Linear
8
+ from torch.ao.nn.qat.modules.conv import Conv1d
9
+ from torch.ao.nn.qat.modules.conv import Conv2d
10
+ from torch.ao.nn.qat.modules.conv import Conv3d
11
+ from torch.ao.nn.qat.modules.embedding_ops import EmbeddingBag, Embedding
12
+
13
+ from . import conv
14
+ from . import embedding_ops
15
+ from . import linear
16
+
17
+ __all__ = [
18
+ "Linear",
19
+ "Conv1d",
20
+ "Conv2d",
21
+ "Conv3d",
22
+ "Embedding",
23
+ "EmbeddingBag",
24
+ ]
evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (731 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/modules/__pycache__/embedding_ops.cpython-310.pyc ADDED
Binary file (651 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/modules/embedding_ops.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""QAT Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/qat`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/qat/modules`,
8
+ while adding an import statement here.
9
+ """
10
+
11
+ __all__ = ['Embedding', 'EmbeddingBag']
12
+
13
+ from torch.ao.nn.qat.modules.embedding_ops import Embedding
14
+ from torch.ao.nn.qat.modules.embedding_ops import EmbeddingBag
evalkit_tf437/lib/python3.10/site-packages/torch/nn/qat/modules/linear.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""QAT Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/qat`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/qat/modules`,
8
+ while adding an import statement here.
9
+ """
10
+ from torch.ao.nn.qat.modules.linear import Linear
evalkit_tf437/lib/python3.10/site-packages/torch/nn/utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (961 Bytes). View file