nnilayy commited on
Commit
bb4520f
·
verified ·
1 Parent(s): d0f30f1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. lib/python3.10/site-packages/babel/locale-data/ca.dat +3 -0
  3. lib/python3.10/site-packages/babel/locale-data/sq.dat +3 -0
  4. lib/python3.10/site-packages/multiprocess-0.70.12.2.dist-info/LICENSE +38 -0
  5. lib/python3.10/site-packages/opt_einsum-3.4.0.dist-info/INSTALLER +1 -0
  6. lib/python3.10/site-packages/opt_einsum-3.4.0.dist-info/METADATA +117 -0
  7. lib/python3.10/site-packages/opt_einsum-3.4.0.dist-info/RECORD +34 -0
  8. lib/python3.10/site-packages/opt_einsum-3.4.0.dist-info/REQUESTED +0 -0
  9. lib/python3.10/site-packages/opt_einsum-3.4.0.dist-info/WHEEL +4 -0
  10. lib/python3.10/site-packages/opt_einsum-3.4.0.dist-info/licenses/LICENSE +22 -0
  11. lib/python3.10/site-packages/torch/testing/_internal/__init__.py +0 -0
  12. lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py +472 -0
  13. lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py +633 -0
  14. lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py +165 -0
  15. lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py +1 -0
  16. lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py +1976 -0
  17. lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py +1541 -0
  18. lib/python3.10/site-packages/torch/testing/_internal/common_dtype.py +213 -0
  19. lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py +1582 -0
  20. lib/python3.10/site-packages/torch/testing/_internal/common_jit.py +323 -0
  21. lib/python3.10/site-packages/torch/testing/_internal/common_mkldnn.py +78 -0
  22. lib/python3.10/site-packages/torch/testing/_internal/common_nn.py +0 -0
  23. lib/python3.10/site-packages/torch/testing/_internal/common_optimizers.py +2332 -0
  24. lib/python3.10/site-packages/torch/testing/_internal/common_pruning.py +385 -0
  25. lib/python3.10/site-packages/torch/testing/_internal/common_quantization.py +0 -0
  26. lib/python3.10/site-packages/torch/testing/_internal/common_subclass.py +346 -0
  27. lib/python3.10/site-packages/torch/testing/_internal/common_utils.py +0 -0
  28. lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py +592 -0
  29. lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py +586 -0
  30. lib/python3.10/site-packages/torch/testing/_internal/custom_tensor.py +67 -0
  31. lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py +1 -0
  32. lib/python3.10/site-packages/torch/testing/_internal/data/network1.py +10 -0
  33. lib/python3.10/site-packages/torch/testing/_internal/data/network2.py +11 -0
  34. lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py +200 -0
  35. lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py +98 -0
  36. lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py +66 -0
  37. lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py +0 -0
  38. lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py +66 -0
  39. lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__init__.py +0 -0
  40. lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py +0 -0
  41. lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py +279 -0
  42. lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py +326 -0
  43. lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py +62 -0
  44. lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py +63 -0
  45. lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py +0 -0
  46. lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py +34 -0
  47. lib/python3.10/site-packages/torch/testing/_internal/dynamo_test_failures.py +126 -0
  48. lib/python3.10/site-packages/torch/testing/_internal/hop_db.py +346 -0
  49. lib/python3.10/site-packages/torch/testing/_internal/hypothesis_utils.py +367 -0
  50. lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py +722 -0
.gitattributes CHANGED
@@ -146,3 +146,5 @@ lib/python3.10/site-packages/babel/locale-data/tg.dat filter=lfs diff=lfs merge=
146
  lib/python3.10/site-packages/babel/locale-data/mn.dat filter=lfs diff=lfs merge=lfs -text
147
  lib/python3.10/site-packages/babel/locale-data/no.dat filter=lfs diff=lfs merge=lfs -text
148
  lib/python3.10/site-packages/babel/locale-data/gl.dat filter=lfs diff=lfs merge=lfs -text
 
 
 
146
  lib/python3.10/site-packages/babel/locale-data/mn.dat filter=lfs diff=lfs merge=lfs -text
147
  lib/python3.10/site-packages/babel/locale-data/no.dat filter=lfs diff=lfs merge=lfs -text
148
  lib/python3.10/site-packages/babel/locale-data/gl.dat filter=lfs diff=lfs merge=lfs -text
149
+ lib/python3.10/site-packages/babel/locale-data/sq.dat filter=lfs diff=lfs merge=lfs -text
150
+ lib/python3.10/site-packages/babel/locale-data/ca.dat filter=lfs diff=lfs merge=lfs -text
lib/python3.10/site-packages/babel/locale-data/ca.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0884f4fbb5fbfdef1129646b84b4a61be6810b60f9d7e86f46b4d5fbb8816aff
3
+ size 184038
lib/python3.10/site-packages/babel/locale-data/sq.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f254de8f2026fbde22c694ba0e5cbb35048736c6a67eda8b21d3b83b13d6aa89
3
+ size 160229
lib/python3.10/site-packages/multiprocess-0.70.12.2.dist-info/LICENSE ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2004-2016 California Institute of Technology.
2
+ Copyright (c) 2016-2021 The Uncertainty Quantification Foundation.
3
+ All rights reserved.
4
+
5
+ This software forks the python package "multiprocessing". Licence and
6
+ copyright information for multiprocessing can be found in "COPYING.txt".
7
+
8
+ This software is available subject to the conditions and terms laid
9
+ out below. By downloading and using this software you are agreeing
10
+ to the following conditions.
11
+
12
+ Redistribution and use in source and binary forms, with or without
13
+ modification, are permitted provided that the following conditions
14
+ are met::
15
+
16
+ - Redistribution of source code must retain the above copyright
17
+ notice, this list of conditions and the following disclaimer.
18
+
19
+ - Redistribution in binary form must reproduce the above copyright
20
+ notice, this list of conditions and the following disclaimer in the
21
+ documentations and/or other materials provided with the distribution.
22
+
23
+ - Neither the names of the copyright holders nor the names of any of
24
+ the contributors may be used to endorse or promote products derived
25
+ from this software without specific prior written permission.
26
+
27
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29
+ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
34
+ OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
35
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
36
+ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
37
+ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38
+
lib/python3.10/site-packages/opt_einsum-3.4.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ uv
lib/python3.10/site-packages/opt_einsum-3.4.0.dist-info/METADATA ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.3
2
+ Name: opt_einsum
3
+ Version: 3.4.0
4
+ Summary: Path optimization of einsum functions.
5
+ Author-email: Daniel Smith <dgasmith@icloud.com>
6
+ License-Expression: MIT
7
+ License-File: LICENSE
8
+ Classifier: Development Status :: 5 - Production/Stable
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: Intended Audience :: Science/Research
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Programming Language :: Python
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3 :: Only
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Programming Language :: Python :: 3.13
20
+ Classifier: Programming Language :: Python :: Implementation :: CPython
21
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
22
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
23
+ Requires-Python: >=3.8
24
+ Description-Content-Type: text/markdown
25
+
26
+ # Optimized Einsum
27
+
28
+ [![Tests](https://github.com/dgasmith/opt_einsum/actions/workflows/Tests.yml/badge.svg)](https://github.com/dgasmith/opt_einsum/actions/workflows/Tests.yml)
29
+ [![codecov](https://codecov.io/gh/dgasmith/opt_einsum/branch/master/graph/badge.svg)](https://codecov.io/gh/dgasmith/opt_einsum)
30
+ [![Anaconda-Server Badge](https://anaconda.org/conda-forge/opt_einsum/badges/version.svg)](https://anaconda.org/conda-forge/opt_einsum)
31
+ [![PyPI](https://img.shields.io/pypi/v/opt_einsum.svg)](https://pypi.org/project/opt-einsum/#description)
32
+ [![PyPIStats](https://img.shields.io/pypi/dm/opt_einsum)](https://pypistats.org/packages/opt-einsum)
33
+ [![Documentation Status](https://github.com/dgasmith/opt_einsum/actions/workflows/Docs.yaml/badge.svg)](https://dgasmith.github.io/opt_einsum/)
34
+ [![DOI](https://joss.theoj.org/papers/10.21105/joss.00753/status.svg)](https://doi.org/10.21105/joss.00753)
35
+
36
+ ## Optimized Einsum: A tensor contraction order optimizer
37
+
38
+ Optimized einsum can significantly reduce the overall execution time of einsum-like expressions (e.g.,
39
+ [`np.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html),
40
+ [`dask.array.einsum`](https://docs.dask.org/en/latest/array-api.html#dask.array.einsum),
41
+ [`pytorch.einsum`](https://pytorch.org/docs/stable/torch.html#torch.einsum),
42
+ [`tensorflow.einsum`](https://www.tensorflow.org/api_docs/python/tf/einsum),
43
+ )
44
+ by optimizing the expression's contraction order and dispatching many
45
+ operations to canonical BLAS, cuBLAS, or other specialized routines.
46
+
47
+ Optimized
48
+ einsum is agnostic to the backend and can handle NumPy, Dask, PyTorch,
49
+ Tensorflow, CuPy, Sparse, Theano, JAX, and Autograd arrays as well as potentially
50
+ any library which conforms to a standard API. See the
51
+ [**documentation**](https://dgasmith.github.io/opt_einsum/) for more
52
+ information.
53
+
54
+ ## Example usage
55
+
56
+ The [`opt_einsum.contract`](https://dgasmith.github.io/opt_einsum/api_reference#opt_einsumcontract)
57
+ function can often act as a drop-in replacement for `einsum`
58
+ functions without further changes to the code while providing superior performance.
59
+ Here, a tensor contraction is performed with and without optimization:
60
+
61
+ ```python
62
+ import numpy as np
63
+ from opt_einsum import contract
64
+
65
+ N = 10
66
+ C = np.random.rand(N, N)
67
+ I = np.random.rand(N, N, N, N)
68
+
69
+ %timeit np.einsum('pi,qj,ijkl,rk,sl->pqrs', C, C, I, C, C)
70
+ 1 loops, best of 3: 934 ms per loop
71
+
72
+ %timeit contract('pi,qj,ijkl,rk,sl->pqrs', C, C, I, C, C)
73
+ 1000 loops, best of 3: 324 us per loop
74
+ ```
75
+
76
+ In this particular example, we see a ~3000x performance improvement which is
77
+ not uncommon when compared against unoptimized contractions. See the [backend
78
+ examples](https://dgasmith.github.io/opt_einsum/getting_started/backends)
79
+ for more information on using other backends.
80
+
81
+ ## Features
82
+
83
+ The algorithms found in this repository often power the `einsum` optimizations
84
+ in many of the above projects. For example, the optimization of `np.einsum`
85
+ has been passed upstream and most of the same features that can be found in
86
+ this repository can be enabled with `np.einsum(..., optimize=True)`. However,
87
+ this repository often has more up to date algorithms for complex contractions.
88
+
89
+ The following capabilities are enabled by `opt_einsum`:
90
+
91
+ * Inspect [detailed information](https://dgasmith.github.io/opt_einsum/paths/introduction) about the path chosen.
92
+ * Perform contractions with [numerous backends](https://dgasmith.github.io/opt_einsum/getting_started/backends), including on the GPU and with libraries such as [TensorFlow](https://www.tensorflow.org) and [PyTorch](https://pytorch.org).
93
+ * Generate [reusable expressions](https://dgasmith.github.io/opt_einsum/getting_started/reusing_paths), potentially with [constant tensors](https://dgasmith.github.io/opt_einsum/getting_started/reusing_paths#specifying-constants), that can be compiled for greater performance.
94
+ * Use an arbitrary number of indices to find contractions for [hundreds or even thousands of tensors](https://dgasmith.github.io/opt_einsum/examples/large_expr_with_greedy).
95
+ * Share [intermediate computations](https://dgasmith.github.io/opt_einsum/getting_started/sharing_intermediates) among multiple contractions.
96
+ * Compute gradients of tensor contractions using [autograd](https://github.com/HIPS/autograd) or [jax](https://github.com/google/jax)
97
+
98
+ Please see the [documentation](https://dgasmith.github.io/opt_einsum/index) for more features!
99
+
100
+ ## Installation
101
+
102
+ `opt_einsum` can either be installed via `pip install opt_einsum` or from conda `conda install opt_einsum -c conda-forge`.
103
+ See the installation [documentation](https://dgasmith.github.io/opt_einsum/getting_started/install) for further methods.
104
+
105
+ ## Citation
106
+
107
+ If this code has benefited your research, please support us by citing:
108
+
109
+ Daniel G. A. Smith and Johnnie Gray, opt_einsum - A Python package for optimizing contraction order for einsum-like expressions. *Journal of Open Source Software*, **2018**, 3(26), 753
110
+
111
+ DOI: <https://doi.org/10.21105/joss.00753>
112
+
113
+ ## Contributing
114
+
115
+ All contributions, bug reports, bug fixes, documentation improvements, enhancements, and ideas are welcome.
116
+
117
+ A detailed overview on how to contribute can be found in the [contributing guide](https://github.com/dgasmith/opt_einsum/blob/master/.github/CONTRIBUTING.md).
lib/python3.10/site-packages/opt_einsum-3.4.0.dist-info/RECORD ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ opt_einsum-3.4.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2
2
+ opt_einsum-3.4.0.dist-info/METADATA,sha256=n2eRFAxeG-TxsYk2XmMS1xGh6q6zmbXdZ0H3pL2Z0Ds,6345
3
+ opt_einsum-3.4.0.dist-info/RECORD,,
4
+ opt_einsum-3.4.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ opt_einsum-3.4.0.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
6
+ opt_einsum-3.4.0.dist-info/licenses/LICENSE,sha256=eyZpGnyO5Ir0JaU8uuqryao6hOouk_5HoezFUMHedts,1080
7
+ opt_einsum/__init__.py,sha256=LPIPWo9YglJpU7soEk8We_APdsvZX4p5DfDzXmbC3f8,834
8
+ opt_einsum/_version.py,sha256=YWGqQYvejjlymmjzg4jncyBgDC760jlRmyon_Rd-2uQ,411
9
+ opt_einsum/backends/__init__.py,sha256=bEAsIERI4AgTwEKxwzGQFDabNnIqiqGpJkenGm4mSQw,614
10
+ opt_einsum/backends/cupy.py,sha256=ha9l1kZxL84zOtAAgv4idI5wQv9a1HOlnadMVcy855g,950
11
+ opt_einsum/backends/dispatch.py,sha256=MR_8pzT0i9HdwKn-eklIk43seLUQbnvWM9otF4JTuto,4887
12
+ opt_einsum/backends/jax.py,sha256=4YFucG53tR-QrzbKO_N47yn2Kc1xPp_9jLjZIU_lRUc,1085
13
+ opt_einsum/backends/object_arrays.py,sha256=W91g6HfwDAHFAG8nIsLsjJAy75UuBno9AMeSb8wYF-0,2016
14
+ opt_einsum/backends/tensorflow.py,sha256=OgbLpjGyDxeFUcYjJUoVFb8bSyihDjPM0kUV6uEniJA,3891
15
+ opt_einsum/backends/theano.py,sha256=fBJaL2FOnx9_B7js4o0gXy3g6dzOdr_fT9kqXkSCCJM,1698
16
+ opt_einsum/backends/torch.py,sha256=3v1033W8XyjI4OIqCMvJKZsSP1xKHR-8-_RJRXVUAvI,3583
17
+ opt_einsum/blas.py,sha256=sn3kevdTgN6xYUxLo0QBQqXZ47tUYAQgMEeHJt0GN_c,3500
18
+ opt_einsum/contract.py,sha256=pCrV7YPgkD7pRY0KiXOYbGuywSCY-cYMyyj57Hr58PI,40341
19
+ opt_einsum/helpers.py,sha256=Y1PHxBEu49BEStlym3u8ak8xU3ycwBVu4CUFZ7qiwAA,4270
20
+ opt_einsum/parser.py,sha256=PoUJF5YtYlS-C53CH74ewXJ6pla7KWinnSqjOnBQtYU,13269
21
+ opt_einsum/path_random.py,sha256=2vkE2oshUXHlW6gtq8bcnflfVfHVaxBSYvkA9wTDvUI,14356
22
+ opt_einsum/paths.py,sha256=xqvuBrXX7Xwe22D7L1OmhUt8WUmgh4ZFdvSRPwXMzMw,51711
23
+ opt_einsum/sharing.py,sha256=PvWhN59XlvtKbQI6ziYK5lha2HTIvPzypzUxkxi5kHw,6914
24
+ opt_einsum/testing.py,sha256=MYJcR1qrEyqIkXRSdIalpu5B53cSAR59Ue2-JMnsfJg,6352
25
+ opt_einsum/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
+ opt_einsum/tests/test_backends.py,sha256=XcL6ifV4mxUzVAcqg0Yaxi3fY-LhvDXH2caiPVTmkKQ,15562
27
+ opt_einsum/tests/test_blas.py,sha256=r598QtkCxqXIk93eZQvvoWDMM2qgsWY4hT9_iAryU7o,3590
28
+ opt_einsum/tests/test_contract.py,sha256=Gti0GrEWdUg-DSCr2BTZNgYMHDcg5FdXmpjps95IYmE,8589
29
+ opt_einsum/tests/test_edge_cases.py,sha256=2zMNMK3YqOhEYxQu937qk_csKxUCZEzEXeW_fdcieZQ,5283
30
+ opt_einsum/tests/test_input.py,sha256=KlWo7SSfFsqymeTVpUQ2PsPqbIudccmkFsOSELNNqmI,8275
31
+ opt_einsum/tests/test_parser.py,sha256=t0oqBfPEHuVEiQHcjJJDYeTwiwmUVF-Vs3pgi9Mu8T8,2085
32
+ opt_einsum/tests/test_paths.py,sha256=aSKPJgABCjc0bX76dGwVoIwPHKMr-_iJD3by3wiFdEE,19461
33
+ opt_einsum/tests/test_sharing.py,sha256=hj3PBxbqW8TYzuxNgodpgbPvj3mwB9uCycTyxQbdef4,13125
34
+ opt_einsum/typing.py,sha256=Bsc-mCCegpgRSRuMARt4F3fPlhCRhvgg5RGaINYuVR0,937
lib/python3.10/site-packages/opt_einsum-3.4.0.dist-info/REQUESTED ADDED
File without changes
lib/python3.10/site-packages/opt_einsum-3.4.0.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.25.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
lib/python3.10/site-packages/opt_einsum-3.4.0.dist-info/licenses/LICENSE ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2014 Daniel Smith
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
22
+
lib/python3.10/site-packages/torch/testing/_internal/__init__.py ADDED
File without changes
lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import collections
4
+
5
+ import torch
6
+ from torch.testing._internal.common_utils import TEST_WITH_ROCM
7
+ from torch.testing._internal.common_utils import TestCase
8
+
9
+
10
+ class AutocastTestLists:
11
+ def _rnn_cell_args(self, n, num_chunks, is_lstm, dev, dtype):
12
+ input = (torch.randn((n, n), device=dev, dtype=torch.float32),)
13
+
14
+ hx = ((torch.randn((n, n), device=dev, dtype=torch.float32),
15
+ torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
16
+ torch.randn((n, n), device=dev, dtype=torch.float32),)
17
+
18
+ weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih
19
+ torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh
20
+ torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih
21
+ torch.randn((num_chunks * n), device=dev, dtype=torch.float32)) # bias_hh
22
+
23
+ # returns args as a tuple
24
+ return input + hx + weights
25
+
26
+ # Supplies ops and arguments for test_autocast_* in test/test_cuda.py
27
+ def __init__(self, dev):
28
+ super().__init__()
29
+ n = 8
30
+ # Utility arguments, created as one-element tuples
31
+ pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
32
+ pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
33
+ pointwise2_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
34
+ mat0_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
35
+ mat1_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
36
+ mat2_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
37
+
38
+ dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n))
39
+ conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),
40
+ torch.randn(dimset, dtype=torch.float32, device=dev))
41
+ for dimset in dimsets]
42
+ bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),)
43
+ element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),)
44
+ pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
45
+ pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
46
+ mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
47
+ mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
48
+ mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
49
+ mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
50
+
51
+ # The lists below organize ops that autocast needs to test.
52
+ # self.list_name corresponds to test_autocast_list_name in test/test_cuda.py.
53
+ # Each op is associated with a tuple of valid arguments.
54
+ # In addition, cudnn conv ops are not supported on ROCm and hence will
55
+ # be skipped by passing TEST_WITH_ROCM flag to those ops in self.torch_fp16 list.
56
+
57
+ # Some ops implement built-in type promotion. These don't need autocasting,
58
+ # but autocasting relies on their promotion, so we include tests to double-check.
59
+ self.torch_expect_builtin_promote = [
60
+ ("eq", pointwise0_fp32 + pointwise1_fp16, torch.bool),
61
+ ("ge", pointwise0_fp32 + pointwise1_fp16, torch.bool),
62
+ ("gt", pointwise0_fp32 + pointwise1_fp16, torch.bool),
63
+ ("le", pointwise0_fp32 + pointwise1_fp16, torch.bool),
64
+ ("lt", pointwise0_fp32 + pointwise1_fp16, torch.bool),
65
+ ("ne", pointwise0_fp32 + pointwise1_fp16, torch.bool),
66
+ ("add", pointwise0_fp32 + pointwise1_fp16, torch.float32),
67
+ ("div", pointwise0_fp32 + pointwise1_fp16, torch.float32),
68
+ ("mul", pointwise0_fp32 + pointwise1_fp16, torch.float32),
69
+ ("cat", (pointwise0_fp16 + pointwise1_fp32,), torch.float32),
70
+ ("equal", pointwise0_fp32 + pointwise1_fp16, torch.float32),
71
+ ("stack", (pointwise0_fp16 + pointwise1_fp32,), torch.float32),
72
+ ]
73
+ self.methods_expect_builtin_promote = [
74
+ ("__eq__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
75
+ ("__ge__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
76
+ ("__gt__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
77
+ ("__le__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
78
+ ("__lt__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
79
+ ("__ne__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
80
+ ("__add__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
81
+ ("__div__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
82
+ ("__mul__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
83
+ ]
84
+
85
+ # The remaining lists organize ops that autocast treats explicitly.
86
+ self.torch_fp16 = [
87
+ # deprecated _convolution
88
+ ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
89
+ (0, 0), 1, False, True, True)),
90
+ # the current _convolution
91
+ ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
92
+ (0, 0), 1, False, True, True, True)),
93
+ ("conv1d", conv_args_fp32[0]),
94
+ ("conv2d", conv_args_fp32[1]),
95
+ ("conv3d", conv_args_fp32[2]),
96
+ ("conv_tbc", conv_args_fp32[0] + bias_fp32),
97
+ ("conv_transpose1d", conv_args_fp32[0]),
98
+ ("conv_transpose2d", conv_args_fp32[1]),
99
+ ("conv_transpose3d", conv_args_fp32[2]),
100
+ ("convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, (0, 0), 1)),
101
+ ("cudnn_convolution", conv_args_fp32[1] + ((0, 0), (1, 1), (1, 1), 1, False, True, True), TEST_WITH_ROCM),
102
+ ("cudnn_convolution_transpose", conv_args_fp32[1] + ((0, 0), (0, 0), (1, 1),
103
+ (1, 1), 1, False, True, True), TEST_WITH_ROCM),
104
+ ("prelu", pointwise0_fp32 + element0_fp32),
105
+ ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32),
106
+ ("addmv", pointwise0_fp32 + mat2_fp32 + pointwise1_fp32),
107
+ ("addr", mat0_fp32 + pointwise0_fp32 + pointwise1_fp32),
108
+ ("matmul", mat0_fp32 + mat1_fp32),
109
+ ("einsum", "bkhd,bqhd->bqkh", mat0_fp32 + mat1_fp32),
110
+ ("mm", mat0_fp32 + mat1_fp32),
111
+ ("mv", mat0_fp32 + pointwise0_fp32),
112
+ ("chain_matmul", mat0_fp32 + mat1_fp32 + mat2_fp32),
113
+ ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32),
114
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
115
+ ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
116
+ torch.randn((n, n, n), device=dev, dtype=torch.float32),
117
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
118
+ ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
119
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
120
+ # _thnn_fused_lstm_cell and _thnn_fused_gru_cell are not Python-exposed as far as I can tell.
121
+ # ("_thnn_fused_lstm_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32),
122
+ # ("_thnn_fused_gru_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32),
123
+ ("lstm_cell", self._rnn_cell_args(n, num_chunks=4, is_lstm=True, dev=dev, dtype=torch.float32)),
124
+ ("gru_cell", self._rnn_cell_args(n, num_chunks=3, is_lstm=False, dev=dev, dtype=torch.float32)),
125
+ ("rnn_tanh_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)),
126
+ ("rnn_relu_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)),
127
+ ]
128
+ self.torch_fp32 = [
129
+ ("acos", (pointwise0_fp16[0].clamp(-.9, 0.9),)),
130
+ ("asin", (pointwise0_fp16[0].clamp(-.9, 0.9),)),
131
+ ("cosh", pointwise0_fp16),
132
+ ("erfinv", (pointwise0_fp16[0].clamp(-.9, .9),)),
133
+ ("exp", pointwise0_fp16),
134
+ ("expm1", pointwise0_fp16),
135
+ ("log", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
136
+ ("log10", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
137
+ ("log2", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
138
+ ("log1p", (pointwise0_fp16[0].clamp(-0.9, 100.0),)),
139
+ ("reciprocal", pointwise0_fp16),
140
+ ("rsqrt", (pointwise0_fp16[0].clamp(0.0, 100.0),)),
141
+ ("sinh", pointwise0_fp16),
142
+ ("tan", (pointwise0_fp16[0].clamp(-3.1 / 2, 3.1 / 2),)),
143
+ ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + pointwise1_fp16),
144
+ ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + (1.7,)),
145
+ # ("pow", (1.7,) + pointwise0_fp16), # This variant has a backend, but is not documented in the API.
146
+ ("softmax", pointwise0_fp16 + (0,)),
147
+ ("log_softmax", pointwise0_fp16 + (0,)),
148
+ ("layer_norm", pointwise0_fp16 + ((pointwise0_fp16[0].numel(),),)),
149
+ ("group_norm", mat0_fp16 + (1,)),
150
+ ("norm", pointwise0_fp16),
151
+ ("norm", pointwise0_fp16, {"dim": 0}),
152
+ # these need magma
153
+ # ("norm", mat0_fp16, {"p": "nuc"}),
154
+ # ("norm", mat0_fp16, {"p": "nuc", "dim": 0}),
155
+ ("norm", pointwise0_fp16, {"p": 1}),
156
+ ("norm", pointwise0_fp16, {"p": 1, "dim": 0}),
157
+ ("cosine_similarity", mat0_fp16 + mat1_fp16),
158
+ ("poisson_nll_loss", mat0_fp16 + mat1_fp16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))),
159
+ ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.float16),
160
+ torch.tensor([[1, 3, 4]], device=dev, dtype=torch.float16),
161
+ torch.tensor([1], device=dev, dtype=torch.int))),
162
+ ("hinge_embedding_loss", mat0_fp16 + (torch.ones(n, device=dev, dtype=torch.int),)),
163
+ ("kl_div", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)),
164
+ ("margin_ranking_loss", mat0_fp16 + mat1_fp16 + (torch.ones((n,), device=dev, dtype=torch.float16),)),
165
+ ("triplet_margin_loss", mat0_fp16 + mat1_fp16 + mat2_fp16),
166
+ ("binary_cross_entropy_with_logits", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)),
167
+ ("cumprod", pointwise0_fp16 + (0,)),
168
+ ("cumsum", pointwise0_fp16 + (0,)),
169
+ ("dist", pointwise0_fp16 + pointwise1_fp16),
170
+ ("pdist", mat0_fp16),
171
+ ("cdist", mat0_fp16 + mat1_fp16),
172
+ ("prod", pointwise0_fp16),
173
+ ("prod", pointwise0_fp16 + (0,)),
174
+ ("renorm", mat0_fp16 + (2, 0, 1.0)),
175
+ ("sum", pointwise0_fp16),
176
+ ("sum", mat0_fp16 + (1,)),
177
+ ("logsumexp", mat0_fp16 + (1,)),
178
+ ]
179
+ self.torch_need_autocast_promote = [
180
+ ("addcdiv", pointwise0_fp32 + pointwise1_fp16 + (pointwise2_fp16[0].clamp(0.1, 100),)),
181
+ ("addcmul", pointwise0_fp32 + pointwise1_fp16 + pointwise2_fp16),
182
+ ("atan2", pointwise0_fp32 + (pointwise1_fp16[0].clamp(0.1, 100),)),
183
+ ("bilinear", (torch.randn((1, 2), dtype=torch.float16, device=dev),
184
+ torch.randn((1, 2), dtype=torch.float32, device=dev),
185
+ torch.randn((1, 2, 2), dtype=torch.float16, device=dev),
186
+ torch.randn((1,), dtype=torch.float32, device=dev))),
187
+ ("cross", (torch.randn(3, dtype=torch.float32, device=dev),
188
+ torch.randn(3, dtype=torch.float16, device=dev))),
189
+ ("dot", pointwise0_fp16 + pointwise1_fp32),
190
+ ("vdot", pointwise0_fp16 + pointwise1_fp32),
191
+ ("grid_sampler", (torch.randn((2, 3, 33, 22), dtype=torch.float16, device=dev),
192
+ torch.randn((2, 22, 11, 2), dtype=torch.float32, device=dev),
193
+ 0, 0, False)),
194
+ ("index_put", pointwise0_fp32 + ((torch.tensor([1], device=dev, dtype=torch.long),),
195
+ torch.randn(1, device=dev, dtype=torch.float16))),
196
+ ("index_put", pointwise0_fp16 + ((torch.tensor([1], device=dev, dtype=torch.long),),
197
+ torch.randn(1, device=dev, dtype=torch.float32))),
198
+ ("tensordot", (torch.randn((2, 2, 2), dtype=torch.float32, device=dev),
199
+ torch.randn((2, 2, 2), dtype=torch.float16, device=dev))),
200
+ ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float32, device=dev),
201
+ 0,
202
+ torch.randint(0, 2, (2, 2, 2), device=dev),
203
+ torch.randn((2, 2, 2), dtype=torch.float16, device=dev))),
204
+ ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float16, device=dev),
205
+ 0,
206
+ torch.randint(0, 2, (2, 2, 2), device=dev),
207
+ torch.randn((2, 2, 2), dtype=torch.float32, device=dev))),
208
+ ]
209
+ self.nn_fp16 = [
210
+ ("linear", mat0_fp32 + mat1_fp32 + mat2_fp32),
211
+ ]
212
+ self.nn_fp32 = [
213
+ ("softplus", pointwise0_fp16),
214
+ ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.float),
215
+ torch.zeros((n,), device=dev, dtype=torch.long))),
216
+ ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.half),
217
+ torch.zeros((n, n, n), device=dev, dtype=torch.long))),
218
+ ("l1_loss", mat0_fp16 + mat1_fp16),
219
+ ("smooth_l1_loss", mat0_fp16 + mat1_fp16),
220
+ ("mse_loss", mat0_fp16 + mat1_fp16),
221
+ ("multilabel_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
222
+ ("soft_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
223
+ ("multi_margin_loss", mat0_fp16 + (torch.ones((n,), device=dev, dtype=torch.long),)),
224
+ ]
225
+ self.linalg_fp16 = [
226
+ ("linalg_vecdot", mat0_fp32 + mat0_fp32),
227
+ ("linalg_multi_dot", (mat0_fp32 + mat1_fp32 + mat2_fp32,)),
228
+ ]
229
+ self.methods_fp16 = [
230
+ ("__matmul__", mat0_fp32 + mat1_fp32)
231
+ ]
232
+ self.methods_fp32 = [
233
+ ("__pow__", (torch.rand(n, device=dev, dtype=torch.float16), 1.5)),
234
+ ]
235
+ self.banned = [
236
+ ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.float32),
237
+ torch.rand((n, n), device=dev, dtype=torch.float32)), torch._C._nn),
238
+ ]
239
+
240
+
241
+ class AutocastCPUTestLists:
242
+ # Supplies ops and arguments for test_autocast_* in test/test_cpu.py
243
+ def __init__(self, dev):
244
+ super().__init__()
245
+ n = 8
246
+ # Utility arguments, created as one-element tuples
247
+ pointwise0_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
248
+ pointwise1_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
249
+ mat0_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
250
+ mat1_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
251
+ mat2_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
252
+
253
+ pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
254
+ pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
255
+
256
+ dummy_dimsets = ((n,), (n, n), (n, n, n), (n, n, n, n), (n, n, n, n, n))
257
+
258
+ dummy_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),)
259
+ for dimset in dummy_dimsets]
260
+
261
+ dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n))
262
+ conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),
263
+ torch.randn(dimset, dtype=torch.float32, device=dev))
264
+ for dimset in dimsets]
265
+
266
+ element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),)
267
+ pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
268
+ pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
269
+ mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
270
+ mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
271
+ mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
272
+ mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
273
+
274
+ dummy_fp32 = [ # noqa: F841
275
+ (torch.randn(dimset, dtype=torch.float32, device=dev),)
276
+ for dimset in dummy_dimsets
277
+ ]
278
+ # The lists below organize ops that autocast needs to test.
279
+ # self.list_name corresponds to test_autocast_list_name in test/test_cpu.py.
280
+ # Each op is associated with a tuple of valid arguments.
281
+
282
+ # Some ops implement built-in type promotion. These don't need autocasting,
283
+ # but autocasting relies on their promotion, so we include tests to double-check.
284
+ self.torch_expect_builtin_promote = [
285
+ ("eq", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
286
+ ("ge", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
287
+ ("gt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
288
+ ("le", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
289
+ ("lt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
290
+ ("ne", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
291
+ ("add", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
292
+ ("div", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
293
+ ("mul", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
294
+ ]
295
+
296
+ self.methods_expect_builtin_promote = [
297
+ ("__eq__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
298
+ ("__ge__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
299
+ ("__gt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
300
+ ("__le__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
301
+ ("__lt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
302
+ ("__ne__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
303
+ ("__add__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
304
+ ("__div__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
305
+ ("__mul__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
306
+ ]
307
+ # The remaining lists organize ops that autocast treats explicitly.
308
+ self.torch_16 = [
309
+ ("conv1d", conv_args_fp32[0]),
310
+ ("conv2d", conv_args_fp32[1]),
311
+ ("conv3d", conv_args_fp32[2]),
312
+ ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
313
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
314
+ ("mm", mat0_fp32 + mat1_fp32),
315
+ ("matmul", mat0_fp32 + mat1_fp32),
316
+ ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
317
+ torch.randn((n, n, n), device=dev, dtype=torch.float32),
318
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
319
+ ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32),
320
+ ("_addmm_activation", mat1_fp32 + mat2_fp32 + mat3_fp32, {"beta": 1, "alpha": 1, "use_gelu": True}),
321
+ ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32),
322
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
323
+ ("conv_tbc", (torch.randn((10, 7, 3), device=dev, dtype=torch.float32),
324
+ torch.randn((5, 3, 5), device=dev, dtype=torch.float32),
325
+ torch.randn(5, device=dev, dtype=torch.float32),
326
+ 0)),
327
+ ("conv_transpose1d", conv_args_fp32[0]),
328
+ ("conv_transpose2d", conv_args_fp32[1]),
329
+ ("conv_transpose3d", conv_args_fp32[2]),
330
+ ("prelu", pointwise0_fp32 + element0_fp32),
331
+ ("_native_multi_head_attention", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
332
+ torch.randn((n, n, n), device=dev, dtype=torch.float32),
333
+ torch.randn((n, n, n), device=dev, dtype=torch.float32),
334
+ n, 4, torch.randn((3 * n, n), device=dev, dtype=torch.float32),
335
+ torch.randn((3 * n), device=dev, dtype=torch.float32),
336
+ torch.randn((n, n), device=dev, dtype=torch.float32),
337
+ torch.randn((n), device=dev, dtype=torch.float32))),
338
+ ]
339
+ self.torch_fp32 = [
340
+ ("poisson_nll_loss", mat0_bf16 + mat1_bf16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))),
341
+ ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.bfloat16),
342
+ torch.tensor([[1, 3, 4]], device=dev, dtype=torch.bfloat16),
343
+ torch.tensor([1], device=dev, dtype=torch.int))),
344
+ ("hinge_embedding_loss", mat0_bf16 + (torch.ones(n, device=dev, dtype=torch.int),)),
345
+ ("margin_ranking_loss", mat0_bf16 + mat1_bf16 + (torch.ones((n,), device=dev, dtype=torch.bfloat16),)),
346
+ ("triplet_margin_loss", mat0_bf16 + mat1_bf16 + mat2_bf16),
347
+ ("binary_cross_entropy_with_logits", mat0_bf16 + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)),
348
+ ]
349
+ self.nn_16 = [
350
+ ("linear", mat0_fp32 + mat1_fp32, {}),
351
+ ]
352
+ self.nn_fp32 = [
353
+ ("avg_pool3d", dummy_bf16[3], {"kernel_size": (3, 3, 3), "stride": (1, 1, 1)}),
354
+ ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),) +
355
+ (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)),
356
+ ("reflection_pad1d", dummy_bf16[2], {"padding": (3, 3)}),
357
+ ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),
358
+ torch.zeros((n,), device=dev, dtype=torch.long))),
359
+ ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.bfloat16),
360
+ torch.zeros((n, n, n), device=dev, dtype=torch.long))),
361
+ ("l1_loss", mat0_bf16 + mat1_bf16),
362
+ ("smooth_l1_loss", mat0_bf16 + mat1_bf16),
363
+ ("mse_loss", mat0_bf16 + mat1_bf16),
364
+ ("multilabel_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
365
+ ("soft_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
366
+ ("multi_margin_loss", mat0_bf16 + (torch.ones((n,), device=dev, dtype=torch.long),)),
367
+ ("huber_loss", mat0_bf16 + mat1_bf16),
368
+ ]
369
+ self.torch_need_autocast_promote = [
370
+ ("cat", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)),
371
+ ("stack", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)),
372
+ ]
373
+
374
+
375
+ class TestAutocast(TestCase):
376
+ def args_maybe_kwargs(self, op_with_args):
377
+ if len(op_with_args) == 2:
378
+ return op_with_args[0], op_with_args[1], {}
379
+ else:
380
+ return op_with_args[0], op_with_args[1], op_with_args[2]
381
+
382
+ def _run_autocast_outofplace(
383
+ self,
384
+ op,
385
+ args,
386
+ run_as_type,
387
+ device,
388
+ out_type=None,
389
+ module=torch,
390
+ add_kwargs=None,
391
+ amp_dtype=torch.bfloat16,
392
+ ):
393
+ # helper to cast args
394
+ def cast(val, to_type):
395
+ if isinstance(val, torch.Tensor):
396
+ return val.to(to_type) if val.is_floating_point() else val
397
+ elif isinstance(val, collections.abc.Iterable):
398
+ return type(val)(cast(v, to_type) for v in val)
399
+ else:
400
+ return val
401
+
402
+ if add_kwargs is None:
403
+ add_kwargs = {}
404
+
405
+ self.assertFalse(torch.is_autocast_enabled(device_type=device))
406
+ with torch.amp.autocast(device_type=device, dtype=amp_dtype):
407
+ self.assertTrue(torch.is_autocast_enabled(device_type=device))
408
+
409
+ out_type = out_type if out_type is not None else run_as_type
410
+ output = output_method = None
411
+
412
+ # Try module.* variant, if requested:
413
+ if module is not None and hasattr(module, op):
414
+ output = getattr(module, op)(*args, **add_kwargs)
415
+ if isinstance(output, torch.Tensor):
416
+ self.assertTrue(
417
+ out_type == output.dtype,
418
+ f"autocast for torch.{op} produced {output.dtype}, should produce {out_type}",
419
+ )
420
+ # Try Tensor.* variant:
421
+ if hasattr(torch.Tensor, op):
422
+ output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
423
+ if isinstance(output_method, torch.Tensor):
424
+ self.assertTrue(
425
+ out_type == output_method.dtype,
426
+ f"autocast for torch.{op} produced {output_method.dtype}, should produce torch.{out_type}",
427
+ )
428
+
429
+ self.assertTrue(
430
+ (output is not None) or (output_method is not None),
431
+ f"{op} not found as an attribute on either Tensor or the requested module {module}",
432
+ )
433
+
434
+ # Accounts for ops that return Tensors, iterables, and other non-Tensors.
435
+ # For example, lstm_cell returns a tuple and equal returns bool.
436
+ def compare(first, second):
437
+ if isinstance(first, torch.Tensor):
438
+ return torch.equal(first, second)
439
+ elif isinstance(first, collections.abc.Iterable):
440
+ return all(compare(f, s) for f, s in zip(first, second))
441
+ else:
442
+ return first == second
443
+
444
+ # If both torch.* and Tensor.* variants were found, check outputs are identical
445
+ if (output is not None) and (output_method is not None):
446
+ self.assertTrue(type(output) == type(output_method))
447
+ comparison = compare(output, output_method)
448
+ self.assertTrue(
449
+ comparison, f"torch.{op} result did not match Tensor.{op} result"
450
+ )
451
+
452
+ # Compare numerics to Python-side "autocasting" that (we expect) does the same thing
453
+ # as the C++-side autocasting, and should be bitwise accurate.
454
+ output_to_compare = output if output is not None else output_method
455
+ with torch.amp.autocast(device_type=device, enabled=False):
456
+ self.assertFalse(
457
+ torch.is_autocast_enabled(device_type=device)
458
+ )
459
+
460
+ if module is not None and hasattr(module, op):
461
+ control = getattr(module, op)(
462
+ *cast(args, run_as_type), **add_kwargs
463
+ )
464
+ else:
465
+ control = getattr(args[0].to(run_as_type), op)(
466
+ *cast(args[1:], run_as_type), **add_kwargs
467
+ )
468
+ self.assertTrue(type(output_to_compare) == type(control))
469
+ comparison = compare(output_to_compare, control)
470
+ self.assertTrue(comparison, f"torch.{op} result did not match control")
471
+ self.assertTrue(torch.is_autocast_enabled(device_type=device))
472
+ self.assertFalse(torch.is_autocast_enabled(device_type=device))
lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py ADDED
@@ -0,0 +1,633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+ from functools import partial
5
+ from torch.testing import make_tensor
6
+ from torch.testing._internal.opinfo.core import (
7
+ OpInfo,
8
+ SampleInput,
9
+ )
10
+ from torch.testing._internal.common_dtype import all_types_and
11
+ import numpy as np
12
+
13
+ # Note: [autograd.Function db]
14
+ #
15
+ # This is a collection of autograd.Function test cases written as OpInfos
16
+ # so they can easily be consumed by OpInfo-based tests to check if a subsystem
17
+ # supports autograd.Function.
18
+ #
19
+ # Axes:
20
+ # - saves {output, input, intermediate, non-tensor}
21
+ # - {inputs, output} x {single tensor, tensors, arbitrary objects}
22
+ # - Uses {mark_dirty, mark_non_differentiable, once_differentiable}
23
+
24
+
25
+ def to_numpy(tensor):
26
+ return tensor.cpu().numpy()
27
+
28
+
29
+ class NumpyCube(torch.autograd.Function):
30
+ @staticmethod
31
+ def forward(input):
32
+ input_np = to_numpy(input)
33
+ dinput = torch.tensor(3 * input_np ** 2, device=input.device)
34
+ return torch.tensor(input_np ** 3, device=input.device), dinput
35
+
36
+ @staticmethod
37
+ def setup_context(ctx, inputs, output):
38
+ ctx.save_for_backward(inputs[0], output[1])
39
+ ctx.save_for_forward(inputs[0], output[1])
40
+
41
+ @staticmethod
42
+ def backward(ctx, grad_output, grad_saved):
43
+ input, dinput = ctx.saved_tensors
44
+ return NumpyMul.apply(grad_output, dinput) + 6 * NumpyMul.apply(grad_saved, input)
45
+
46
+ @staticmethod
47
+ def vmap(info, in_dims, input):
48
+ result = NumpyCube.apply(input)
49
+ return result, (in_dims[0], in_dims[0])
50
+
51
+ @staticmethod
52
+ def jvp(ctx, input_tangent):
53
+ input, dinput = ctx.saved_tensors
54
+ return NumpyMul.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input)
55
+
56
+
57
+ class CubeGenVmap(torch.autograd.Function):
58
+ generate_vmap_rule = True
59
+
60
+ @staticmethod
61
+ def forward(x):
62
+ return x ** 3, 3 * x ** 2
63
+
64
+ @staticmethod
65
+ def setup_context(ctx, inputs, outputs):
66
+ ctx.save_for_backward(inputs[0], outputs[1])
67
+ ctx.save_for_forward(inputs[0], outputs[1])
68
+
69
+ @staticmethod
70
+ def backward(ctx, grad_output, grad_saved):
71
+ _input, dinput = ctx.saved_tensors
72
+ result = grad_output * dinput + 6 * dinput
73
+ return result
74
+
75
+ @staticmethod
76
+ def jvp(ctx, input_tangent):
77
+ input, dinput = ctx.saved_tensors
78
+ return MulGenVmap.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input)
79
+
80
+
81
+ def sample_inputs_numpy_cube(opinfo, device, dtype, requires_grad, **kwargs):
82
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
83
+ yield SampleInput(make_arg(1, low=0.8, high=2), args=())
84
+
85
+
86
+ class NumpyCubeNotComposable(torch.autograd.Function):
87
+ @staticmethod
88
+ def forward(input):
89
+ input_np = to_numpy(input)
90
+ return torch.tensor(input_np ** 3, device=input.device), input_np
91
+
92
+ @staticmethod
93
+ def setup_context(ctx, inputs, output):
94
+ _, input_np = output
95
+ ctx.input_np = input_np
96
+ ctx.device = inputs[0].device
97
+
98
+ @staticmethod
99
+ @torch.autograd.function.once_differentiable
100
+ def backward(ctx, grad_output, grad_saved):
101
+ result_np = 3 * (ctx.input_np ** 2)
102
+ return torch.tensor(result_np, device=ctx.device)
103
+
104
+
105
+ class NumpyMul(torch.autograd.Function):
106
+ @staticmethod
107
+ def forward(x, y):
108
+ return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device)
109
+
110
+ @staticmethod
111
+ def setup_context(ctx, inputs, output):
112
+ ctx.save_for_backward(*inputs)
113
+ ctx.save_for_forward(*inputs)
114
+
115
+ @staticmethod
116
+ def backward(ctx, grad_output):
117
+ x, y = ctx.saved_tensors
118
+ gx = None
119
+ if ctx.needs_input_grad[0]:
120
+ gx = NumpyMul.apply(grad_output, y)
121
+ gy = None
122
+ if ctx.needs_input_grad[1]:
123
+ gy = NumpyMul.apply(grad_output, x)
124
+ return gx, gy
125
+
126
+ @staticmethod
127
+ def vmap(info, in_dims, x, y):
128
+ x_bdim, y_bdim = in_dims
129
+ x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1)
130
+ y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1)
131
+ result = NumpyMul.apply(x, y)
132
+ result = result.movedim(-1, 0)
133
+ return result, 0
134
+
135
+ @staticmethod
136
+ def jvp(ctx, x_tangent, y_tangent):
137
+ x, y = ctx.saved_tensors
138
+ return x_tangent * y + y_tangent * x
139
+
140
+ def sample_inputs_numpy_mul(opinfo, device, dtype, requires_grad, **kwargs):
141
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
142
+ # Broadcasting
143
+ yield SampleInput(make_arg(4, low=0.9, high=2), args=(make_arg(3, 4, low=0.9, high=2),))
144
+
145
+ def sample_inputs_numpy_mul_scalar(opinfo, device, dtype, requires_grad, **kwargs):
146
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
147
+ yield SampleInput(make_arg(4, low=0.9, high=2), args=(), kwargs={"scalar": 3.14})
148
+
149
+ class MulGenVmap(torch.autograd.Function):
150
+ generate_vmap_rule = True
151
+
152
+ @staticmethod
153
+ def forward(x, y):
154
+ return x * y
155
+
156
+ @staticmethod
157
+ def setup_context(ctx, inputs, outputs):
158
+ ctx.save_for_backward(*inputs)
159
+ ctx.save_for_forward(*inputs)
160
+
161
+ @staticmethod
162
+ def backward(ctx, grad_output):
163
+ x, y = ctx.saved_tensors
164
+ gx = None
165
+ if ctx.needs_input_grad[0]:
166
+ gx = MulGenVmap.apply(grad_output, y)
167
+ gy = None
168
+ if ctx.needs_input_grad[1]:
169
+ gy = MulGenVmap.apply(grad_output, x)
170
+ return gx, gy
171
+
172
+ @staticmethod
173
+ def jvp(ctx, x_tangent, y_tangent):
174
+ x, y = ctx.saved_tensors
175
+ return x_tangent * y + y_tangent * x
176
+
177
+
178
+ class NumpyExp_(torch.autograd.Function):
179
+ @staticmethod
180
+ def forward(x):
181
+ x_np = to_numpy(x)
182
+ np.exp(x_np, x_np)
183
+ return x
184
+
185
+ @staticmethod
186
+ def setup_context(ctx, inputs, output):
187
+ x, = inputs
188
+ ctx.mark_dirty(x)
189
+ ctx.save_for_backward(output)
190
+ ctx.save_for_forward(output)
191
+
192
+ @staticmethod
193
+ def backward(ctx, grad_output):
194
+ output, = ctx.saved_tensors
195
+ return NumpyMul.apply(grad_output, output)
196
+
197
+ @staticmethod
198
+ def vmap(info, in_dims, x):
199
+ NumpyExp_.apply(x)
200
+ return x, in_dims[0]
201
+
202
+ @staticmethod
203
+ def jvp(ctx, x_tangent):
204
+ # Doesn't call numpy operations because I didn't want to write NumpyMul_
205
+ output, = ctx.saved_tensors
206
+ x_tangent.mul_(output)
207
+ return x_tangent
208
+
209
+ class NumpySort(torch.autograd.Function):
210
+ @staticmethod
211
+ def forward(x, dim):
212
+ device = x.device
213
+ x = to_numpy(x)
214
+ ind = np.argsort(x, axis=dim)
215
+ ind_inv = np.argsort(ind, axis=dim)
216
+ return (
217
+ torch.tensor(x, device=device),
218
+ torch.tensor(ind, device=device),
219
+ torch.tensor(ind_inv, device=device),
220
+ )
221
+
222
+ @staticmethod
223
+ def setup_context(ctx, inputs, output):
224
+ _x, dim = inputs
225
+ _, ind, ind_inv = output
226
+ ctx.mark_non_differentiable(ind, ind_inv)
227
+ ctx.save_for_backward(ind, ind_inv)
228
+ ctx.save_for_forward(ind, ind_inv)
229
+ ctx.dim = dim
230
+
231
+ @staticmethod
232
+ def backward(ctx, grad_output, _0, _1):
233
+ ind, ind_inv = ctx.saved_tensors
234
+ return NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim), None
235
+
236
+ @staticmethod
237
+ def vmap(info, in_dims, x, dim):
238
+ x_bdim, _ = in_dims
239
+ x = x.movedim(x_bdim, 0)
240
+ # wrap dim
241
+ dim = dim if dim >= 0 else dim + x.dim() - 1
242
+ return NumpySort.apply(x, dim + 1), (0, 0, 0)
243
+
244
+ @staticmethod
245
+ def jvp(ctx, x_tangent, _):
246
+ ind, ind_inv = ctx.saved_tensors
247
+ return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim), None, None
248
+
249
+ class SortGenVmap(torch.autograd.Function):
250
+ generate_vmap_rule = True
251
+
252
+ @staticmethod
253
+ def forward(x, dim):
254
+ ind = torch.argsort(x, dim=dim)
255
+ ind_inv = torch.argsort(ind, axis=dim)
256
+ result = torch.take_along_dim(x, ind, dim=dim)
257
+ return result, ind, ind_inv
258
+
259
+ @staticmethod
260
+ def setup_context(ctx, inputs, outputs):
261
+ x, dim = inputs
262
+ _, ind, ind_inv = outputs
263
+ ctx.mark_non_differentiable(ind, ind_inv)
264
+ ctx.save_for_backward(ind, ind_inv)
265
+ ctx.save_for_forward(ind, ind_inv)
266
+ ctx.dim = dim
267
+
268
+ @staticmethod
269
+ def backward(ctx, grad_output, _0, _1):
270
+ ind, ind_inv = ctx.saved_tensors
271
+ return TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim), None
272
+
273
+ @staticmethod
274
+ def jvp(ctx, x_tangent, _):
275
+ ind, ind_inv = ctx.saved_tensors
276
+ return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim), None, None
277
+
278
+
279
+ def sample_inputs_numpy_sort(opinfo, device, dtype, requires_grad, **kwargs):
280
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
281
+ yield SampleInput(make_arg(3, 5), args=(1,))
282
+
283
+
284
+ def sample_inputs_numpy_take(opinfo, device, dtype, requires_grad, **kwargs):
285
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
286
+ tensor = make_arg(3, 5)
287
+ dim = 1
288
+ _, ind, ind_inv = NumpySort.apply(tensor, 1)
289
+ yield SampleInput(tensor, args=(ind, ind_inv, dim))
290
+
291
+
292
+ class NumpyTake(torch.autograd.Function):
293
+ @staticmethod
294
+ def forward(x, ind, ind_inv, dim):
295
+ device = x.device
296
+ x = to_numpy(x)
297
+ ind = to_numpy(ind)
298
+ return torch.tensor(np.take_along_axis(x, ind, dim), device=device)
299
+
300
+ @staticmethod
301
+ def setup_context(ctx, inputs, output):
302
+ _x, ind, ind_inv, dim = inputs
303
+ ctx.save_for_backward(ind, ind_inv)
304
+ ctx.save_for_forward(ind, ind_inv)
305
+ ctx.dim = dim
306
+
307
+ @staticmethod
308
+ def backward(ctx, grad_output):
309
+ ind, ind_inv = ctx.saved_tensors
310
+ result = NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim)
311
+ return result, None, None, None
312
+
313
+ @staticmethod
314
+ def vmap(info, in_dims, x, ind, ind_inv, dim):
315
+ x_bdim, ind_bdim, ind_inv_bdim, _ = in_dims
316
+
317
+ # wrap dim
318
+ logical_dim = x.dim() if x_bdim is None else x_bdim - 1
319
+ dim = dim if dim >= 0 else dim + logical_dim
320
+
321
+ def expand_bdim(x, x_bdim):
322
+ if x_bdim is None:
323
+ return x.expand(info.batch_size, *x.shape)
324
+ return x.movedim(x_bdim, 0)
325
+
326
+ x = expand_bdim(x, x_bdim)
327
+ ind = expand_bdim(ind, ind_bdim)
328
+ ind_inv = expand_bdim(ind_inv, ind_inv_bdim)
329
+
330
+ return NumpyTake.apply(x, ind, ind_inv, dim + 1), 0
331
+
332
+ @staticmethod
333
+ def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _):
334
+ assert ind_tangent is None
335
+ assert ind_inv_tangent is None
336
+ ind, ind_inv = ctx.saved_tensors
337
+ return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim)
338
+
339
+ class TakeGenVmap(torch.autograd.Function):
340
+ generate_vmap_rule = True
341
+
342
+ @staticmethod
343
+ def forward(x, ind, ind_inv, dim):
344
+ return torch.take_along_dim(x, ind, dim)
345
+
346
+ @staticmethod
347
+ def setup_context(ctx, inputs, outputs):
348
+ _x, ind, ind_inv, dim = inputs
349
+ ctx.save_for_backward(ind, ind_inv)
350
+ ctx.save_for_forward(ind, ind_inv)
351
+ ctx.dim = dim
352
+
353
+ @staticmethod
354
+ def backward(ctx, grad_output):
355
+ ind, ind_inv = ctx.saved_tensors
356
+ result = TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim)
357
+ return result, None, None, None
358
+
359
+ @staticmethod
360
+ def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _):
361
+ ind, ind_inv = ctx.saved_tensors
362
+ return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim)
363
+
364
+ class Select(torch.autograd.Function):
365
+ @staticmethod
366
+ def forward(x, idx):
367
+ return x[idx]
368
+
369
+ @staticmethod
370
+ def setup_context(ctx, inputs, output):
371
+ x, idx = inputs
372
+ ctx.x_shape = x.shape
373
+ ctx.idx = idx
374
+
375
+ @staticmethod
376
+ def backward(ctx, grad_output):
377
+ result = grad_output.new_zeros(ctx.x_shape)
378
+ result[ctx.idx] = grad_output
379
+ return result, None
380
+
381
+ @staticmethod
382
+ def vmap(info, in_dims, x, idx):
383
+ x_bdim, _ = in_dims
384
+ x = x.movedim(x_bdim, 1)
385
+ return Select.apply(x, idx), 0
386
+
387
+ @staticmethod
388
+ def jvp(ctx, x_tangent, _):
389
+ return Select.apply(x_tangent, ctx.idx)
390
+
391
+ class SelectGenVmap(torch.autograd.Function):
392
+ generate_vmap_rule = True
393
+
394
+ @staticmethod
395
+ def forward(x, idx):
396
+ return x[idx]
397
+
398
+ @staticmethod
399
+ def setup_context(ctx, inputs, outputs):
400
+ x, idx = inputs
401
+ ctx.x_shape = x.shape
402
+ ctx.idx = idx
403
+
404
+ @staticmethod
405
+ def backward(ctx, grad_output):
406
+ result = grad_output.new_zeros(ctx.x_shape)
407
+ result[ctx.idx] = grad_output
408
+ return result, None
409
+
410
+ @staticmethod
411
+ def jvp(ctx, x_tangent, _):
412
+ return SelectGenVmap.apply(x_tangent, ctx.idx)
413
+
414
+
415
+ def sample_inputs_select(opinfo, device, dtype, requires_grad, **kwargs):
416
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
417
+ yield SampleInput(make_arg(3, 5), args=(2,))
418
+
419
+ class ScaleGradGenVmap(torch.autograd.Function):
420
+ generate_vmap_rule = True
421
+ scale = 3.14
422
+
423
+ @staticmethod
424
+ def forward(x):
425
+ return x.clone()
426
+
427
+ @staticmethod
428
+ def setup_context(ctx, inputs, outputs):
429
+ pass
430
+
431
+ @staticmethod
432
+ def backward(ctx, grad_output):
433
+ return grad_output * ScaleGradGenVmap.scale
434
+
435
+ @staticmethod
436
+ def jvp(ctx, x_tangent):
437
+ return x_tangent * ScaleGradGenVmap.scale
438
+
439
+ class ZeroGradientsGenVmap(torch.autograd.Function):
440
+ generate_vmap_rule = True
441
+
442
+ @staticmethod
443
+ def forward(x, y):
444
+ return x.clone(), y.clone()
445
+
446
+ @staticmethod
447
+ def setup_context(ctx, inputs, outputs):
448
+ pass
449
+
450
+ @staticmethod
451
+ def backward(ctx, gx, gy):
452
+ # Intentionally returning torch.zeros instead of zeros_like or new_zeros.
453
+ # Also intentionally not None.
454
+ return (
455
+ # Intentionally too-large gradient
456
+ torch.zeros(3, 4, *gx.shape, dtype=gx.dtype, device=gx.device),
457
+ torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device),
458
+ )
459
+
460
+ @staticmethod
461
+ def jvp(ctx, gx, gy):
462
+ # Intentionally returning torch.zeros instead of zeros_like or new_zeros.
463
+ # Also intentionally not None.
464
+ return (
465
+ torch.zeros(gx.shape, dtype=gx.dtype, device=gx.device),
466
+ torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device),
467
+ )
468
+
469
+
470
+ def sample_inputs_forward_default_args(opinfo, device, dtype, requires_grad, **kwargs):
471
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
472
+ yield SampleInput(make_arg(3, 5))
473
+
474
+
475
+ class ForwardHasDefaultArgs(torch.autograd.Function):
476
+ @staticmethod
477
+ def forward(x, idx=(2,)):
478
+ return x[idx]
479
+
480
+ @staticmethod
481
+ def setup_context(ctx, inputs, output):
482
+ x, idx = inputs
483
+ ctx.x_shape = x.shape
484
+ ctx.idx = idx
485
+
486
+ @staticmethod
487
+ def backward(ctx, grad_output):
488
+ result = grad_output.new_zeros(ctx.x_shape)
489
+ result[ctx.idx] = grad_output
490
+ return result, None
491
+
492
+ @staticmethod
493
+ def vmap(info, in_dims, x, idx):
494
+ x_bdim, _ = in_dims
495
+ x = x.movedim(x_bdim, 1)
496
+ return ForwardHasDefaultArgs.apply(x, idx), 0
497
+
498
+ @staticmethod
499
+ def jvp(ctx, x_tangent, _):
500
+ return ForwardHasDefaultArgs.apply(x_tangent, ctx.idx)
501
+
502
+
503
+ autograd_function_db = [
504
+ OpInfo(
505
+ 'NumpyCubeAutogradFunction',
506
+ op=NumpyCube.apply,
507
+ supports_forward_ad=True,
508
+ supports_fwgrad_bwgrad=True,
509
+ sample_inputs_func=sample_inputs_numpy_cube,
510
+ dtypes=all_types_and(torch.bool, torch.half),
511
+ supports_out=False,
512
+ ),
513
+ OpInfo(
514
+ 'NumpyExpMarkDirtyAutogradFunction',
515
+ op=lambda x: NumpyExp_.apply(x.clone()),
516
+ inplace_variant=NumpyExp_.apply,
517
+ supports_forward_ad=True,
518
+ supports_fwgrad_bwgrad=True,
519
+ sample_inputs_func=sample_inputs_numpy_cube,
520
+ dtypes=all_types_and(torch.bool, torch.half),
521
+ supports_out=False,
522
+ ),
523
+ OpInfo(
524
+ 'NumpyMulAutogradFunction',
525
+ op=NumpyMul.apply,
526
+ supports_forward_ad=True,
527
+ supports_fwgrad_bwgrad=True,
528
+ sample_inputs_func=sample_inputs_numpy_mul,
529
+ dtypes=all_types_and(torch.bool, torch.half),
530
+ supports_out=False,
531
+ ),
532
+ OpInfo(
533
+ 'NumpyCubeNotComposableAutogradFunction',
534
+ op=lambda x: NumpyCubeNotComposable.apply(x)[0],
535
+ supports_forward_ad=False,
536
+ supports_fwgrad_bwgrad=False,
537
+ sample_inputs_func=sample_inputs_numpy_cube,
538
+ dtypes=all_types_and(torch.bool, torch.half),
539
+ supports_out=False,
540
+ ),
541
+ OpInfo(
542
+ 'NumpySortAutogradFunction',
543
+ op=NumpySort.apply,
544
+ supports_forward_ad=False,
545
+ supports_fwgrad_bwgrad=False,
546
+ sample_inputs_func=sample_inputs_numpy_sort,
547
+ dtypes=all_types_and(torch.bool, torch.half),
548
+ supports_out=False,
549
+ gradcheck_wrapper=lambda y, ind: y,
550
+ ),
551
+ OpInfo(
552
+ 'NumpyTakeAutogradFunction',
553
+ op=NumpyTake.apply,
554
+ supports_forward_ad=False,
555
+ supports_fwgrad_bwgrad=False,
556
+ sample_inputs_func=sample_inputs_numpy_take,
557
+ dtypes=all_types_and(torch.bool, torch.half),
558
+ supports_out=False,
559
+ ),
560
+ OpInfo(
561
+ 'SelectAutogradFunction',
562
+ op=Select.apply,
563
+ supports_forward_ad=True,
564
+ supports_fwgrad_bwgrad=True,
565
+ sample_inputs_func=sample_inputs_select,
566
+ dtypes=all_types_and(torch.bool, torch.half),
567
+ supports_out=False,
568
+ ),
569
+ OpInfo(
570
+ 'CubeGenVmapAutogradFunction',
571
+ op=CubeGenVmap.apply,
572
+ supports_forward_ad=True,
573
+ supports_fwgrad_bwgrad=True,
574
+ sample_inputs_func=sample_inputs_numpy_cube,
575
+ dtypes=all_types_and(torch.bool, torch.half),
576
+ supports_out=False,
577
+ ),
578
+ OpInfo(
579
+ 'MulGenVmapAutogradFunction',
580
+ op=MulGenVmap.apply,
581
+ supports_forward_ad=True,
582
+ supports_fwgrad_bwgrad=True,
583
+ sample_inputs_func=sample_inputs_numpy_mul,
584
+ dtypes=all_types_and(torch.bool, torch.half),
585
+ supports_out=False,
586
+ ),
587
+ OpInfo(
588
+ 'SortGenVmapAutogradFunction',
589
+ op=SortGenVmap.apply,
590
+ supports_forward_ad=True,
591
+ supports_fwgrad_bwgrad=True,
592
+ sample_inputs_func=sample_inputs_numpy_sort,
593
+ dtypes=all_types_and(torch.bool, torch.half),
594
+ supports_out=False,
595
+ gradcheck_wrapper=lambda y, ind: y,
596
+ ),
597
+ OpInfo(
598
+ 'SelectGenVmapAutogradFunction',
599
+ op=SelectGenVmap.apply,
600
+ supports_forward_ad=True,
601
+ supports_fwgrad_bwgrad=True,
602
+ sample_inputs_func=sample_inputs_select,
603
+ dtypes=all_types_and(torch.bool, torch.half),
604
+ supports_out=False,
605
+ ),
606
+ OpInfo(
607
+ 'ScaleGradGenVmapAutogradFunction',
608
+ op=ScaleGradGenVmap.apply,
609
+ supports_forward_ad=True,
610
+ supports_fwgrad_bwgrad=True,
611
+ sample_inputs_func=sample_inputs_numpy_cube,
612
+ dtypes=all_types_and(torch.bool, torch.half),
613
+ supports_out=False,
614
+ ),
615
+ OpInfo(
616
+ 'ZeroGradientsGenVmapAutogradFunction',
617
+ op=ZeroGradientsGenVmap.apply,
618
+ supports_forward_ad=True,
619
+ supports_fwgrad_bwgrad=True,
620
+ sample_inputs_func=sample_inputs_numpy_mul,
621
+ dtypes=all_types_and(torch.bool, torch.half),
622
+ supports_out=False,
623
+ ),
624
+ OpInfo(
625
+ 'ForwardHasDefaultArgsAutogradFunction',
626
+ op=ForwardHasDefaultArgs.apply,
627
+ supports_forward_ad=True,
628
+ supports_fwgrad_bwgrad=True,
629
+ sample_inputs_func=sample_inputs_forward_default_args,
630
+ dtypes=all_types_and(torch.bool, torch.half),
631
+ supports_out=False,
632
+ ),
633
+ ]
lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import os
4
+ import re
5
+ import sys
6
+ from typing import List
7
+
8
+ __all__ = [
9
+ "check_code_for_cuda_kernel_launches",
10
+ "check_cuda_kernel_launches",
11
+ ]
12
+
13
+ # FILES TO EXCLUDE (match is done with suffix using `endswith`)
14
+ # You wouldn't drive without a seatbelt, though, so why would you
15
+ # launch a kernel without some safety? Use this as a quick workaround
16
+ # for a problem with the checker, fix the checker, then de-exclude
17
+ # the files in question.
18
+ exclude_files: List[str] = []
19
+
20
+ # Without using a C++ AST we can't 100% detect kernel launches, so we
21
+ # model them as having the pattern "<<<parameters>>>(arguments);"
22
+ # We then require that `C10_CUDA_KERNEL_LAUNCH_CHECK` be
23
+ # the next statement.
24
+ #
25
+ # We model the next statement as ending at the next `}` or `;`.
26
+ # If we see `}` then a clause ended (bad) if we see a semi-colon then
27
+ # we expect the launch check just before it.
28
+ #
29
+ # Since the kernel launch can include lambda statements, it's important
30
+ # to find the correct end-paren of the kernel launch. Doing this with
31
+ # pure regex requires recursive regex, which aren't part of the Python
32
+ # standard library. To avoid an additional dependency, we build a prefix
33
+ # regex that finds the start of a kernel launch, use a paren-matching
34
+ # algorithm to find the end of the launch, and then another regex to
35
+ # determine if a launch check is present.
36
+
37
+ # Finds potential starts of kernel launches
38
+ kernel_launch_start = re.compile(
39
+ r"^.*<<<[^>]+>>>\s*\(", flags=re.MULTILINE
40
+ )
41
+
42
+ # This pattern should start at the character after the final paren of the
43
+ # kernel launch. It returns a match if the launch check is not the next statement
44
+ has_check = re.compile(
45
+ r"\s*;(?![^;}]*C10_CUDA_KERNEL_LAUNCH_CHECK\(\);)", flags=re.MULTILINE
46
+ )
47
+
48
+ def find_matching_paren(s: str, startpos: int) -> int:
49
+ """Given a string "prefix (unknown number of characters) suffix"
50
+ and the position of the first `(` returns the index of the character
51
+ 1 past the `)`, accounting for paren nesting
52
+ """
53
+ opening = 0
54
+ for i, c in enumerate(s[startpos:]):
55
+ if c == '(':
56
+ opening += 1
57
+ elif c == ')':
58
+ opening -= 1
59
+ if opening == 0:
60
+ return startpos + i + 1
61
+
62
+ raise IndexError("Closing parens not found!")
63
+
64
+
65
+ def should_exclude_file(filename) -> bool:
66
+ for exclude_suffix in exclude_files:
67
+ if filename.endswith(exclude_suffix):
68
+ return True
69
+ return False
70
+
71
+
72
+ def check_code_for_cuda_kernel_launches(code, filename=None):
73
+ """Checks code for CUDA kernel launches without cuda error checks.
74
+
75
+ Args:
76
+ filename - Filename of file containing the code. Used only for display
77
+ purposes, so you can put anything here.
78
+ code - The code to check
79
+
80
+ Returns:
81
+ The number of unsafe kernel launches in the code
82
+ """
83
+ if filename is None:
84
+ filename = "##Python Function Call##"
85
+
86
+ # We break the code apart and put it back together to add
87
+ # helpful line numberings for identifying problem areas
88
+ code = enumerate(code.split("\n")) # Split by line breaks
89
+ code = [f"{lineno}: {linecode}" for lineno, linecode in code] # Number the lines
90
+ code = '\n'.join(code) # Put it back together
91
+
92
+ num_launches_without_checks = 0
93
+ for m in kernel_launch_start.finditer(code):
94
+ end_paren = find_matching_paren(code, m.end() - 1)
95
+ if has_check.match(code, end_paren):
96
+ num_launches_without_checks += 1
97
+ context = code[m.start():end_paren + 1]
98
+ print(f"Missing C10_CUDA_KERNEL_LAUNCH_CHECK in '{filename}'. Context:\n{context}", file=sys.stderr)
99
+
100
+ return num_launches_without_checks
101
+
102
+
103
+ def check_file(filename):
104
+ """Checks a file for CUDA kernel launches without cuda error checks
105
+
106
+ Args:
107
+ filename - File to check
108
+
109
+ Returns:
110
+ The number of unsafe kernel launches in the file
111
+ """
112
+ if not (filename.endswith((".cu", ".cuh"))):
113
+ return 0
114
+ if should_exclude_file(filename):
115
+ return 0
116
+ with open(filename) as fo:
117
+ contents = fo.read()
118
+ unsafeCount = check_code_for_cuda_kernel_launches(contents, filename)
119
+ return unsafeCount
120
+
121
+
122
+ def check_cuda_kernel_launches():
123
+ """Checks all pytorch code for CUDA kernel launches without cuda error checks
124
+
125
+ Returns:
126
+ The number of unsafe kernel launches in the codebase
127
+ """
128
+ torch_dir = os.path.dirname(os.path.realpath(__file__))
129
+ torch_dir = os.path.dirname(torch_dir) # Go up to parent torch
130
+ torch_dir = os.path.dirname(torch_dir) # Go up to parent caffe2
131
+
132
+ kernels_without_checks = 0
133
+ files_without_checks = []
134
+ for root, dirnames, filenames in os.walk(torch_dir):
135
+ # `$BASE/build` and `$BASE/torch/include` are generated
136
+ # so we don't want to flag their contents
137
+ if root == os.path.join(torch_dir, "build") or root == os.path.join(torch_dir, "torch/include"):
138
+ # Curtail search by modifying dirnames and filenames in place
139
+ # Yes, this is the way to do this, see `help(os.walk)`
140
+ dirnames[:] = []
141
+ continue
142
+
143
+ for x in filenames:
144
+ filename = os.path.join(root, x)
145
+ file_result = check_file(filename)
146
+ if file_result > 0:
147
+ kernels_without_checks += file_result
148
+ files_without_checks.append(filename)
149
+
150
+ if kernels_without_checks > 0:
151
+ count_str = f"Found {kernels_without_checks} instances in " \
152
+ f"{len(files_without_checks)} files where kernel " \
153
+ "launches didn't have checks."
154
+ print(count_str, file=sys.stderr)
155
+ print("Files without checks:", file=sys.stderr)
156
+ for x in files_without_checks:
157
+ print(f"\t{x}", file=sys.stderr)
158
+ print(count_str, file=sys.stderr)
159
+
160
+ return kernels_without_checks
161
+
162
+
163
+ if __name__ == "__main__":
164
+ unsafe_launches = check_cuda_kernel_launches()
165
+ sys.exit(0 if unsafe_launches == 0 else 1)
lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # mypy: ignore-errors
lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py ADDED
@@ -0,0 +1,1976 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import copy
4
+ import gc
5
+ import inspect
6
+ import os
7
+ import runpy
8
+ import sys
9
+ import threading
10
+ import unittest
11
+ from collections import namedtuple
12
+ from enum import Enum
13
+ from functools import partial, wraps
14
+ from typing import (
15
+ Any,
16
+ ClassVar,
17
+ Dict,
18
+ Iterable,
19
+ List,
20
+ Optional,
21
+ Sequence,
22
+ Set,
23
+ Tuple,
24
+ Union,
25
+ )
26
+
27
+ import torch
28
+ from torch._inductor.utils import GPU_TYPES
29
+ from torch.testing._internal.common_cuda import (
30
+ _get_torch_cuda_version,
31
+ _get_torch_rocm_version,
32
+ TEST_CUSPARSE_GENERIC,
33
+ TEST_HIPSPARSE_GENERIC,
34
+ )
35
+ from torch.testing._internal.common_dtype import get_all_dtypes
36
+ from torch.testing._internal.common_utils import (
37
+ _TestParametrizer,
38
+ clear_tracked_input,
39
+ compose_parametrize_fns,
40
+ dtype_name,
41
+ get_tracked_input,
42
+ IS_FBCODE,
43
+ is_privateuse1_backend_available,
44
+ IS_REMOTE_GPU,
45
+ IS_SANDCASTLE,
46
+ IS_WINDOWS,
47
+ NATIVE_DEVICES,
48
+ PRINT_REPRO_ON_FAILURE,
49
+ skipCUDANonDefaultStreamIf,
50
+ skipIfTorchDynamo,
51
+ TEST_HPU,
52
+ TEST_MKL,
53
+ TEST_MPS,
54
+ TEST_WITH_ASAN,
55
+ TEST_WITH_MIOPEN_SUGGEST_NHWC,
56
+ TEST_WITH_ROCM,
57
+ TEST_WITH_TORCHINDUCTOR,
58
+ TEST_WITH_TSAN,
59
+ TEST_WITH_UBSAN,
60
+ TEST_XPU,
61
+ TestCase,
62
+ )
63
+
64
+
65
+ try:
66
+ import psutil # type: ignore[import]
67
+
68
+ HAS_PSUTIL = True
69
+ except ModuleNotFoundError:
70
+ HAS_PSUTIL = False
71
+ psutil = None
72
+
73
+ # Note [Writing Test Templates]
74
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
75
+ #
76
+ # This note was written shortly after the PyTorch 1.9 release.
77
+ # If you notice it's out-of-date or think it could be improved then please
78
+ # file an issue.
79
+ #
80
+ # PyTorch has its own framework for instantiating test templates. That is, for
81
+ # taking test classes that look similar to unittest or pytest
82
+ # compatible test classes and optionally doing the following:
83
+ #
84
+ # - instantiating a version of the test class for each available device type
85
+ # (often the CPU, CUDA, and META device types)
86
+ # - further instantiating a version of each test that's always specialized
87
+ # on the test class's device type, and optionally specialized further
88
+ # on datatypes or operators
89
+ #
90
+ # This functionality is similar to pytest's parametrize functionality
91
+ # (see https://docs.pytest.org/en/6.2.x/parametrize.html), but with considerable
92
+ # additional logic that specializes the instantiated test classes for their
93
+ # device types (see CPUTestBase and CUDATestBase below), supports a variety
94
+ # of composable decorators that allow for test filtering and setting
95
+ # tolerances, and allows tests parametrized by operators to instantiate
96
+ # only the subset of device type x dtype that operator supports.
97
+ #
98
+ # This framework was built to make it easier to write tests that run on
99
+ # multiple device types, multiple datatypes (dtypes), and for multiple
100
+ # operators. It's also useful for controlling which tests are run. For example,
101
+ # only tests that use a CUDA device can be run on platforms with CUDA.
102
+ # Let's dive in with an example to get an idea for how it works:
103
+ #
104
+ # --------------------------------------------------------
105
+ # A template class (looks like a regular unittest TestCase)
106
+ # class TestClassFoo(TestCase):
107
+ #
108
+ # # A template test that can be specialized with a device
109
+ # # NOTE: this test case is not runnable by unittest or pytest because it
110
+ # # accepts an extra positional argument, "device", that they do not understand
111
+ # def test_bar(self, device):
112
+ # pass
113
+ #
114
+ # # Function that instantiates a template class and its tests
115
+ # instantiate_device_type_tests(TestCommon, globals())
116
+ # --------------------------------------------------------
117
+ #
118
+ # In the above code example we see a template class and a single test template
119
+ # that can be instantiated with a device. The function
120
+ # instantiate_device_type_tests(), called at file scope, instantiates
121
+ # new test classes, one per available device type, and new tests in those
122
+ # classes from these templates. It actually does this by removing
123
+ # the class TestClassFoo and replacing it with classes like TestClassFooCPU
124
+ # and TestClassFooCUDA, instantiated test classes that inherit from CPUTestBase
125
+ # and CUDATestBase respectively. Additional device types, like XLA,
126
+ # (see https://github.com/pytorch/xla) can further extend the set of
127
+ # instantiated test classes to create classes like TestClassFooXLA.
128
+ #
129
+ # The test template, test_bar(), is also instantiated. In this case the template
130
+ # is only specialized on a device, so (depending on the available device
131
+ # types) it might become test_bar_cpu() in TestClassFooCPU and test_bar_cuda()
132
+ # in TestClassFooCUDA. We can think of the instantiated test classes as
133
+ # looking like this:
134
+ #
135
+ # --------------------------------------------------------
136
+ # # An instantiated test class for the CPU device type
137
+ # class TestClassFooCPU(CPUTestBase):
138
+ #
139
+ # # An instantiated test that calls the template with the string representation
140
+ # # of a device from the test class's device type
141
+ # def test_bar_cpu(self):
142
+ # test_bar(self, 'cpu')
143
+ #
144
+ # # An instantiated test class for the CUDA device type
145
+ # class TestClassFooCUDA(CUDATestBase):
146
+ #
147
+ # # An instantiated test that calls the template with the string representation
148
+ # # of a device from the test class's device type
149
+ # def test_bar_cuda(self):
150
+ # test_bar(self, 'cuda:0')
151
+ # --------------------------------------------------------
152
+ #
153
+ # These instantiated test classes ARE discoverable and runnable by both
154
+ # unittest and pytest. One thing that may be confusing, however, is that
155
+ # attempting to run "test_bar" will not work, despite it appearing in the
156
+ # original template code. This is because "test_bar" is no longer discoverable
157
+ # after instantiate_device_type_tests() runs, as the above snippet shows.
158
+ # Instead "test_bar_cpu" and "test_bar_cuda" may be run directly, or both
159
+ # can be run with the option "-k test_bar".
160
+ #
161
+ # Removing the template class and adding the instantiated classes requires
162
+ # passing "globals()" to instantiate_device_type_tests(), because it
163
+ # edits the file's Python objects.
164
+ #
165
+ # As mentioned, tests can be additionally parametrized on dtypes or
166
+ # operators. Datatype parametrization uses the @dtypes decorator and
167
+ # require a test template like this:
168
+ #
169
+ # --------------------------------------------------------
170
+ # # A template test that can be specialized with a device and a datatype (dtype)
171
+ # @dtypes(torch.float32, torch.int64)
172
+ # def test_car(self, device, dtype)
173
+ # pass
174
+ # --------------------------------------------------------
175
+ #
176
+ # If the CPU and CUDA device types are available this test would be
177
+ # instantiated as 4 tests that cover the cross-product of the two dtypes
178
+ # and two device types:
179
+ #
180
+ # - test_car_cpu_float32
181
+ # - test_car_cpu_int64
182
+ # - test_car_cuda_float32
183
+ # - test_car_cuda_int64
184
+ #
185
+ # The dtype is passed as a torch.dtype object.
186
+ #
187
+ # Tests parametrized on operators (actually on OpInfos, more on that in a
188
+ # moment...) use the @ops decorator and require a test template like this:
189
+ # --------------------------------------------------------
190
+ # # A template test that can be specialized with a device, dtype, and OpInfo
191
+ # @ops(op_db)
192
+ # def test_car(self, device, dtype, op)
193
+ # pass
194
+ # --------------------------------------------------------
195
+ #
196
+ # See the documentation for the @ops decorator below for additional details
197
+ # on how to use it and see the note [OpInfos] in
198
+ # common_methods_invocations.py for more details on OpInfos.
199
+ #
200
+ # A test parametrized over the entire "op_db", which contains hundreds of
201
+ # OpInfos, will likely have hundreds or thousands of instantiations. The
202
+ # test will be instantiated on the cross-product of device types, operators,
203
+ # and the dtypes the operator supports on that device type. The instantiated
204
+ # tests will have names like:
205
+ #
206
+ # - test_car_add_cpu_float32
207
+ # - test_car_sub_cuda_int64
208
+ #
209
+ # The first instantiated test calls the original test_car() with the OpInfo
210
+ # for torch.add as its "op" argument, the string 'cpu' for its "device" argument,
211
+ # and the dtype torch.float32 for is "dtype" argument. The second instantiated
212
+ # test calls the test_car() with the OpInfo for torch.sub, a CUDA device string
213
+ # like 'cuda:0' or 'cuda:1' for its "device" argument, and the dtype
214
+ # torch.int64 for its "dtype argument."
215
+ #
216
+ # In addition to parametrizing over device, dtype, and ops via OpInfos, the
217
+ # @parametrize decorator is supported for arbitrary parametrizations:
218
+ # --------------------------------------------------------
219
+ # # A template test that can be specialized with a device, dtype, and value for x
220
+ # @parametrize("x", range(5))
221
+ # def test_car(self, device, dtype, x)
222
+ # pass
223
+ # --------------------------------------------------------
224
+ #
225
+ # See the documentation for @parametrize in common_utils.py for additional details
226
+ # on this. Note that the instantiate_device_type_tests() function will handle
227
+ # such parametrizations; there is no need to additionally call
228
+ # instantiate_parametrized_tests().
229
+ #
230
+ # Clever test filtering can be very useful when working with parametrized
231
+ # tests. "-k test_car" would run every instantiated variant of the test_car()
232
+ # test template, and "-k test_car_add" runs every variant instantiated with
233
+ # torch.add.
234
+ #
235
+ # It is important to use the passed device and dtype as appropriate. Use
236
+ # helper functions like make_tensor() that require explicitly specifying
237
+ # the device and dtype so they're not forgotten.
238
+ #
239
+ # Test templates can use a variety of composable decorators to specify
240
+ # additional options and requirements, some are listed here:
241
+ #
242
+ # - @deviceCountAtLeast(<minimum number of devices to run test with>)
243
+ # Passes a list of strings representing all available devices of
244
+ # the test class's device type as the test template's "device" argument.
245
+ # If there are fewer devices than the value passed to the decorator
246
+ # the test is skipped.
247
+ # - @dtypes(<list of tuples of dtypes>)
248
+ # In addition to accepting multiple dtypes, the @dtypes decorator
249
+ # can accept a sequence of tuple pairs of dtypes. The test template
250
+ # will be called with each tuple for its "dtype" argument.
251
+ # - @onlyNativeDeviceTypes
252
+ # Skips the test if the device is not a native device type (currently CPU, CUDA, Meta)
253
+ # - @onlyCPU
254
+ # Skips the test if the device is not a CPU device
255
+ # - @onlyCUDA
256
+ # Skips the test if the device is not a CUDA device
257
+ # - @onlyMPS
258
+ # Skips the test if the device is not a MPS device
259
+ # - @skipCPUIfNoLapack
260
+ # Skips the test if the device is a CPU device and LAPACK is not installed
261
+ # - @skipCPUIfNoMkl
262
+ # Skips the test if the device is a CPU device and MKL is not installed
263
+ # - @skipCUDAIfNoMagma
264
+ # Skips the test if the device is a CUDA device and MAGMA is not installed
265
+ # - @skipCUDAIfRocm
266
+ # Skips the test if the device is a CUDA device and ROCm is being used
267
+
268
+
269
+ # Note [Adding a Device Type]
270
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~
271
+ #
272
+ # To add a device type:
273
+ #
274
+ # (1) Create a new "TestBase" extending DeviceTypeTestBase.
275
+ # See CPUTestBase and CUDATestBase below.
276
+ # (2) Define the "device_type" attribute of the base to be the
277
+ # appropriate string.
278
+ # (3) Add logic to this file that appends your base class to
279
+ # device_type_test_bases when your device type is available.
280
+ # (4) (Optional) Write setUpClass/tearDownClass class methods that
281
+ # instantiate dependencies (see MAGMA in CUDATestBase).
282
+ # (5) (Optional) Override the "instantiate_test" method for total
283
+ # control over how your class creates tests.
284
+ #
285
+ # setUpClass is called AFTER tests have been created and BEFORE and ONLY IF
286
+ # they are run. This makes it useful for initializing devices and dependencies.
287
+
288
+
289
+ # Note [Overriding methods in generic tests]
290
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
291
+ #
292
+ # Device generic tests look a lot like normal test classes, but they differ
293
+ # from ordinary classes in some important ways. In particular, overriding
294
+ # methods in generic tests doesn't work quite the way you expect.
295
+ #
296
+ # class TestFooDeviceType(TestCase):
297
+ # # Intention is to override
298
+ # def assertEqual(self, x, y):
299
+ # # This DOESN'T WORK!
300
+ # super().assertEqual(x, y)
301
+ #
302
+ # If you try to run this code, you'll get an error saying that TestFooDeviceType
303
+ # is not in scope. This is because after instantiating our classes, we delete
304
+ # it from the parent scope. Instead, you need to hardcode a direct invocation
305
+ # of the desired subclass call, e.g.,
306
+ #
307
+ # class TestFooDeviceType(TestCase):
308
+ # # Intention is to override
309
+ # def assertEqual(self, x, y):
310
+ # TestCase.assertEqual(x, y)
311
+ #
312
+ # However, a less error-prone way of customizing the behavior of TestCase
313
+ # is to either (1) add your functionality to TestCase and make it toggled
314
+ # by a class attribute, or (2) create your own subclass of TestCase, and
315
+ # then inherit from it for your generic test.
316
+
317
+
318
+ def _dtype_test_suffix(dtypes):
319
+ """Returns the test suffix for a dtype, sequence of dtypes, or None."""
320
+ if isinstance(dtypes, (list, tuple)):
321
+ if len(dtypes) == 0:
322
+ return ""
323
+ return "_" + "_".join(dtype_name(d) for d in dtypes)
324
+ elif dtypes:
325
+ return f"_{dtype_name(dtypes)}"
326
+ else:
327
+ return ""
328
+
329
+
330
+ def _update_param_kwargs(param_kwargs, name, value):
331
+ """Adds a kwarg with the specified name and value to the param_kwargs dict."""
332
+ # Make name plural (e.g. devices / dtypes) if the value is composite.
333
+ plural_name = f"{name}s"
334
+
335
+ # Clear out old entries of the arg if any.
336
+ if name in param_kwargs:
337
+ del param_kwargs[name]
338
+ if plural_name in param_kwargs:
339
+ del param_kwargs[plural_name]
340
+
341
+ if isinstance(value, (list, tuple)):
342
+ param_kwargs[plural_name] = value
343
+ elif value is not None:
344
+ param_kwargs[name] = value
345
+
346
+ # Leave param_kwargs as-is when value is None.
347
+
348
+
349
+ class DeviceTypeTestBase(TestCase):
350
+ device_type: str = "generic_device_type"
351
+
352
+ # Flag to disable test suite early due to unrecoverable error such as CUDA error.
353
+ _stop_test_suite = False
354
+
355
+ # Precision is a thread-local setting since it may be overridden per test
356
+ _tls = threading.local()
357
+ _tls.precision = TestCase._precision
358
+ _tls.rel_tol = TestCase._rel_tol
359
+
360
+ @property
361
+ def precision(self):
362
+ return self._tls.precision
363
+
364
+ @precision.setter
365
+ def precision(self, prec):
366
+ self._tls.precision = prec
367
+
368
+ @property
369
+ def rel_tol(self):
370
+ return self._tls.rel_tol
371
+
372
+ @rel_tol.setter
373
+ def rel_tol(self, prec):
374
+ self._tls.rel_tol = prec
375
+
376
+ # Returns a string representing the device that single device tests should use.
377
+ # Note: single device tests use this device exclusively.
378
+ @classmethod
379
+ def get_primary_device(cls):
380
+ return cls.device_type
381
+
382
+ @classmethod
383
+ def _init_and_get_primary_device(cls):
384
+ try:
385
+ return cls.get_primary_device()
386
+ except Exception:
387
+ # For CUDATestBase, XPUTestBase, XLATestBase, and possibly others, the primary device won't be available
388
+ # until setUpClass() sets it. Call that manually here if needed.
389
+ if hasattr(cls, "setUpClass"):
390
+ cls.setUpClass()
391
+ return cls.get_primary_device()
392
+
393
+ # Returns a list of strings representing all available devices of this
394
+ # device type. The primary device must be the first string in the list
395
+ # and the list must contain no duplicates.
396
+ # Note: UNSTABLE API. Will be replaced once PyTorch has a device generic
397
+ # mechanism of acquiring all available devices.
398
+ @classmethod
399
+ def get_all_devices(cls):
400
+ return [cls.get_primary_device()]
401
+
402
+ # Returns the dtypes the test has requested.
403
+ # Prefers device-specific dtype specifications over generic ones.
404
+ @classmethod
405
+ def _get_dtypes(cls, test):
406
+ if not hasattr(test, "dtypes"):
407
+ return None
408
+
409
+ default_dtypes = test.dtypes.get("all")
410
+ msg = f"@dtypes is mandatory when using @dtypesIf however '{test.__name__}' didn't specify it"
411
+ assert default_dtypes is not None, msg
412
+
413
+ return test.dtypes.get(cls.device_type, default_dtypes)
414
+
415
+ def _get_precision_override(self, test, dtype):
416
+ if not hasattr(test, "precision_overrides"):
417
+ return self.precision
418
+ return test.precision_overrides.get(dtype, self.precision)
419
+
420
+ def _get_tolerance_override(self, test, dtype):
421
+ if not hasattr(test, "tolerance_overrides"):
422
+ return self.precision, self.rel_tol
423
+ return test.tolerance_overrides.get(dtype, tol(self.precision, self.rel_tol))
424
+
425
+ def _apply_precision_override_for_test(self, test, param_kwargs):
426
+ dtype = param_kwargs["dtype"] if "dtype" in param_kwargs else None
427
+ dtype = param_kwargs["dtypes"] if "dtypes" in param_kwargs else dtype
428
+ if dtype:
429
+ self.precision = self._get_precision_override(test, dtype)
430
+ self.precision, self.rel_tol = self._get_tolerance_override(test, dtype)
431
+
432
+ # Creates device-specific tests.
433
+ @classmethod
434
+ def instantiate_test(cls, name, test, *, generic_cls=None):
435
+ def instantiate_test_helper(
436
+ cls, name, *, test, param_kwargs=None, decorator_fn=lambda _: []
437
+ ):
438
+ # Add the device param kwarg if the test needs device or devices.
439
+ param_kwargs = {} if param_kwargs is None else param_kwargs
440
+ test_sig_params = inspect.signature(test).parameters
441
+ if "device" in test_sig_params or "devices" in test_sig_params:
442
+ device_arg: str = cls._init_and_get_primary_device()
443
+ if hasattr(test, "num_required_devices"):
444
+ device_arg = cls.get_all_devices()
445
+ _update_param_kwargs(param_kwargs, "device", device_arg)
446
+
447
+ # Apply decorators based on param kwargs.
448
+ for decorator in decorator_fn(param_kwargs):
449
+ test = decorator(test)
450
+
451
+ # Constructs the test
452
+ @wraps(test)
453
+ def instantiated_test(self, param_kwargs=param_kwargs):
454
+ # Sets precision and runs test
455
+ # Note: precision is reset after the test is run
456
+ guard_precision = self.precision
457
+ guard_rel_tol = self.rel_tol
458
+ try:
459
+ self._apply_precision_override_for_test(test, param_kwargs)
460
+ result = test(self, **param_kwargs)
461
+ except RuntimeError as rte:
462
+ # check if rte should stop entire test suite.
463
+ self._stop_test_suite = self._should_stop_test_suite()
464
+ # Check if test has been decorated with `@expectedFailure`
465
+ # Using `__unittest_expecting_failure__` attribute, see
466
+ # https://github.com/python/cpython/blob/ffa505b580464/Lib/unittest/case.py#L164
467
+ # In that case, make it fail with "unexpected success" by suppressing exception
468
+ if (
469
+ getattr(test, "__unittest_expecting_failure__", False)
470
+ and self._stop_test_suite
471
+ ):
472
+ import sys
473
+
474
+ print(
475
+ "Suppressing fatal exception to trigger unexpected success",
476
+ file=sys.stderr,
477
+ )
478
+ return
479
+ # raise the runtime error as is for the test suite to record.
480
+ raise rte
481
+ finally:
482
+ self.precision = guard_precision
483
+ self.rel_tol = guard_rel_tol
484
+
485
+ return result
486
+
487
+ assert not hasattr(cls, name), f"Redefinition of test {name}"
488
+ setattr(cls, name, instantiated_test)
489
+
490
+ def default_parametrize_fn(test, generic_cls, device_cls):
491
+ # By default, no parametrization is needed.
492
+ yield (test, "", {}, lambda _: [])
493
+
494
+ # Parametrization decorators set the parametrize_fn attribute on the test.
495
+ parametrize_fn = getattr(test, "parametrize_fn", default_parametrize_fn)
496
+
497
+ # If one of the @dtypes* decorators is present, also parametrize over the dtypes set by it.
498
+ dtypes = cls._get_dtypes(test)
499
+ if dtypes is not None:
500
+
501
+ def dtype_parametrize_fn(test, generic_cls, device_cls, dtypes=dtypes):
502
+ for dtype in dtypes:
503
+ param_kwargs: Dict[str, Any] = {}
504
+ _update_param_kwargs(param_kwargs, "dtype", dtype)
505
+
506
+ # Note that an empty test suffix is set here so that the dtype can be appended
507
+ # later after the device.
508
+ yield (test, "", param_kwargs, lambda _: [])
509
+
510
+ parametrize_fn = compose_parametrize_fns(
511
+ dtype_parametrize_fn, parametrize_fn
512
+ )
513
+
514
+ # Instantiate the parametrized tests.
515
+ for (
516
+ test, # noqa: B020
517
+ test_suffix,
518
+ param_kwargs,
519
+ decorator_fn,
520
+ ) in parametrize_fn(test, generic_cls, cls):
521
+ test_suffix = "" if test_suffix == "" else "_" + test_suffix
522
+ cls_device_type = (
523
+ cls.device_type
524
+ if cls.device_type != "privateuse1"
525
+ else torch._C._get_privateuse1_backend_name()
526
+ )
527
+ device_suffix = "_" + cls_device_type
528
+
529
+ # Note: device and dtype suffix placement
530
+ # Special handling here to place dtype(s) after device according to test name convention.
531
+ dtype_kwarg = None
532
+ if "dtype" in param_kwargs or "dtypes" in param_kwargs:
533
+ dtype_kwarg = (
534
+ param_kwargs["dtypes"]
535
+ if "dtypes" in param_kwargs
536
+ else param_kwargs["dtype"]
537
+ )
538
+ test_name = (
539
+ f"{name}{test_suffix}{device_suffix}{_dtype_test_suffix(dtype_kwarg)}"
540
+ )
541
+
542
+ instantiate_test_helper(
543
+ cls=cls,
544
+ name=test_name,
545
+ test=test,
546
+ param_kwargs=param_kwargs,
547
+ decorator_fn=decorator_fn,
548
+ )
549
+
550
+ def run(self, result=None):
551
+ super().run(result=result)
552
+ # Early terminate test if _stop_test_suite is set.
553
+ if self._stop_test_suite:
554
+ result.stop()
555
+
556
+
557
+ class CPUTestBase(DeviceTypeTestBase):
558
+ device_type = "cpu"
559
+
560
+ # No critical error should stop CPU test suite
561
+ def _should_stop_test_suite(self):
562
+ return False
563
+
564
+
565
+ class CUDATestBase(DeviceTypeTestBase):
566
+ device_type = "cuda"
567
+ _do_cuda_memory_leak_check = True
568
+ _do_cuda_non_default_stream = True
569
+ primary_device: ClassVar[str]
570
+ cudnn_version: ClassVar[Any]
571
+ no_magma: ClassVar[bool]
572
+ no_cudnn: ClassVar[bool]
573
+
574
+ def has_cudnn(self):
575
+ return not self.no_cudnn
576
+
577
+ @classmethod
578
+ def get_primary_device(cls):
579
+ return cls.primary_device
580
+
581
+ @classmethod
582
+ def get_all_devices(cls):
583
+ primary_device_idx = int(cls.get_primary_device().split(":")[1])
584
+ num_devices = torch.cuda.device_count()
585
+
586
+ prim_device = cls.get_primary_device()
587
+ cuda_str = "cuda:{0}"
588
+ non_primary_devices = [
589
+ cuda_str.format(idx)
590
+ for idx in range(num_devices)
591
+ if idx != primary_device_idx
592
+ ]
593
+ return [prim_device] + non_primary_devices
594
+
595
+ @classmethod
596
+ def setUpClass(cls):
597
+ # has_magma shows up after cuda is initialized
598
+ t = torch.ones(1).cuda()
599
+ cls.no_magma = not torch.cuda.has_magma
600
+
601
+ # Determines if cuDNN is available and its version
602
+ cls.no_cudnn = not torch.backends.cudnn.is_acceptable(t)
603
+ cls.cudnn_version = None if cls.no_cudnn else torch.backends.cudnn.version()
604
+
605
+ # Acquires the current device as the primary (test) device
606
+ cls.primary_device = f"cuda:{torch.cuda.current_device()}"
607
+
608
+
609
+ # See Note [Lazy Tensor tests in device agnostic testing]
610
+ lazy_ts_backend_init = False
611
+
612
+
613
+ class LazyTestBase(DeviceTypeTestBase):
614
+ device_type = "lazy"
615
+
616
+ def _should_stop_test_suite(self):
617
+ return False
618
+
619
+ @classmethod
620
+ def setUpClass(cls):
621
+ import torch._lazy
622
+ import torch._lazy.metrics
623
+ import torch._lazy.ts_backend
624
+
625
+ global lazy_ts_backend_init
626
+ if not lazy_ts_backend_init:
627
+ # Need to connect the TS backend to lazy key before running tests
628
+ torch._lazy.ts_backend.init()
629
+ lazy_ts_backend_init = True
630
+
631
+
632
+ class MPSTestBase(DeviceTypeTestBase):
633
+ device_type = "mps"
634
+ primary_device: ClassVar[str]
635
+
636
+ @classmethod
637
+ def get_primary_device(cls):
638
+ return cls.primary_device
639
+
640
+ @classmethod
641
+ def get_all_devices(cls):
642
+ # currently only one device is supported on MPS backend
643
+ prim_device = cls.get_primary_device()
644
+ return [prim_device]
645
+
646
+ @classmethod
647
+ def setUpClass(cls):
648
+ cls.primary_device = "mps:0"
649
+
650
+ def _should_stop_test_suite(self):
651
+ return False
652
+
653
+
654
+ class XPUTestBase(DeviceTypeTestBase):
655
+ device_type = "xpu"
656
+ primary_device: ClassVar[str]
657
+
658
+ @classmethod
659
+ def get_primary_device(cls):
660
+ return cls.primary_device
661
+
662
+ @classmethod
663
+ def get_all_devices(cls):
664
+ # currently only one device is supported on MPS backend
665
+ prim_device = cls.get_primary_device()
666
+ return [prim_device]
667
+
668
+ @classmethod
669
+ def setUpClass(cls):
670
+ cls.primary_device = f"xpu:{torch.xpu.current_device()}"
671
+
672
+ def _should_stop_test_suite(self):
673
+ return False
674
+
675
+
676
+ class HPUTestBase(DeviceTypeTestBase):
677
+ device_type = "hpu"
678
+ primary_device: ClassVar[str]
679
+
680
+ @classmethod
681
+ def get_primary_device(cls):
682
+ return cls.primary_device
683
+
684
+ @classmethod
685
+ def setUpClass(cls):
686
+ cls.primary_device = "hpu:0"
687
+
688
+
689
+ class PrivateUse1TestBase(DeviceTypeTestBase):
690
+ primary_device: ClassVar[str]
691
+ device_mod = None
692
+ device_type = "privateuse1"
693
+
694
+ @classmethod
695
+ def get_primary_device(cls):
696
+ return cls.primary_device
697
+
698
+ @classmethod
699
+ def get_all_devices(cls):
700
+ primary_device_idx = int(cls.get_primary_device().split(":")[1])
701
+ num_devices = cls.device_mod.device_count()
702
+ prim_device = cls.get_primary_device()
703
+ device_str = f"{cls.device_type}:{{0}}"
704
+ non_primary_devices = [
705
+ device_str.format(idx)
706
+ for idx in range(num_devices)
707
+ if idx != primary_device_idx
708
+ ]
709
+ return [prim_device] + non_primary_devices
710
+
711
+ @classmethod
712
+ def setUpClass(cls):
713
+ cls.device_type = torch._C._get_privateuse1_backend_name()
714
+ cls.device_mod = getattr(torch, cls.device_type, None)
715
+ assert (
716
+ cls.device_mod is not None
717
+ ), f"""torch has no module of `{cls.device_type}`, you should register
718
+ a module by `torch._register_device_module`."""
719
+ cls.primary_device = f"{cls.device_type}:{cls.device_mod.current_device()}"
720
+
721
+
722
+ # Adds available device-type-specific test base classes
723
+ def get_device_type_test_bases():
724
+ # set type to List[Any] due to mypy list-of-union issue:
725
+ # https://github.com/python/mypy/issues/3351
726
+ test_bases: List[Any] = []
727
+
728
+ if IS_SANDCASTLE or IS_FBCODE:
729
+ if IS_REMOTE_GPU:
730
+ # Skip if sanitizer is enabled
731
+ if not TEST_WITH_ASAN and not TEST_WITH_TSAN and not TEST_WITH_UBSAN:
732
+ test_bases.append(CUDATestBase)
733
+ else:
734
+ test_bases.append(CPUTestBase)
735
+ else:
736
+ test_bases.append(CPUTestBase)
737
+ if torch.cuda.is_available():
738
+ test_bases.append(CUDATestBase)
739
+
740
+ if is_privateuse1_backend_available():
741
+ test_bases.append(PrivateUse1TestBase)
742
+ # Disable MPS testing in generic device testing temporarily while we're
743
+ # ramping up support.
744
+ # elif torch.backends.mps.is_available():
745
+ # test_bases.append(MPSTestBase)
746
+
747
+ return test_bases
748
+
749
+
750
+ device_type_test_bases = get_device_type_test_bases()
751
+
752
+
753
+ def filter_desired_device_types(device_type_test_bases, except_for=None, only_for=None):
754
+ # device type cannot appear in both except_for and only_for
755
+ intersect = set(except_for if except_for else []) & set(
756
+ only_for if only_for else []
757
+ )
758
+ assert (
759
+ not intersect
760
+ ), f"device ({intersect}) appeared in both except_for and only_for"
761
+
762
+ # Replace your privateuse1 backend name with 'privateuse1'
763
+ if is_privateuse1_backend_available():
764
+ privateuse1_backend_name = torch._C._get_privateuse1_backend_name()
765
+ except_for = (
766
+ ["privateuse1" if x == privateuse1_backend_name else x for x in except_for]
767
+ if except_for is not None
768
+ else None
769
+ )
770
+ only_for = (
771
+ ["privateuse1" if x == privateuse1_backend_name else x for x in only_for]
772
+ if only_for is not None
773
+ else None
774
+ )
775
+
776
+ if except_for:
777
+ device_type_test_bases = filter(
778
+ lambda x: x.device_type not in except_for, device_type_test_bases
779
+ )
780
+ if only_for:
781
+ device_type_test_bases = filter(
782
+ lambda x: x.device_type in only_for, device_type_test_bases
783
+ )
784
+
785
+ return list(device_type_test_bases)
786
+
787
+
788
+ # Note [How to extend DeviceTypeTestBase to add new test device]
789
+ # The following logic optionally allows downstream projects like pytorch/xla to
790
+ # add more test devices.
791
+ # Instructions:
792
+ # - Add a python file (e.g. pytorch/xla/test/pytorch_test_base.py) in downstream project.
793
+ # - Inside the file, one should inherit from `DeviceTypeTestBase` class and define
794
+ # a new DeviceTypeTest class (e.g. `XLATestBase`) with proper implementation of
795
+ # `instantiate_test` method.
796
+ # - DO NOT import common_device_type inside the file.
797
+ # `runpy.run_path` with `globals()` already properly setup the context so that
798
+ # `DeviceTypeTestBase` is already available.
799
+ # - Set a top-level variable `TEST_CLASS` equal to your new class.
800
+ # E.g. TEST_CLASS = XLATensorBase
801
+ # - To run tests with new device type, set `TORCH_TEST_DEVICE` env variable to path
802
+ # to this file. Multiple paths can be separated by `:`.
803
+ # See pytorch/xla/test/pytorch_test_base.py for a more detailed example.
804
+ _TORCH_TEST_DEVICES = os.environ.get("TORCH_TEST_DEVICES", None)
805
+ if _TORCH_TEST_DEVICES:
806
+ for path in _TORCH_TEST_DEVICES.split(":"):
807
+ # runpy (a stdlib module) lacks annotations
808
+ mod = runpy.run_path(path, init_globals=globals()) # type: ignore[func-returns-value]
809
+ device_type_test_bases.append(mod["TEST_CLASS"])
810
+
811
+
812
+ PYTORCH_CUDA_MEMCHECK = os.getenv("PYTORCH_CUDA_MEMCHECK", "0") == "1"
813
+
814
+ PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY = "PYTORCH_TESTING_DEVICE_ONLY_FOR"
815
+ PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY = "PYTORCH_TESTING_DEVICE_EXCEPT_FOR"
816
+ PYTORCH_TESTING_DEVICE_FOR_CUSTOM_KEY = "PYTORCH_TESTING_DEVICE_FOR_CUSTOM"
817
+
818
+
819
+ def get_desired_device_type_test_bases(
820
+ except_for=None, only_for=None, include_lazy=False, allow_mps=False, allow_xpu=False
821
+ ):
822
+ # allow callers to specifically opt tests into being tested on MPS, similar to `include_lazy`
823
+ test_bases = device_type_test_bases.copy()
824
+ if allow_mps and TEST_MPS and MPSTestBase not in test_bases:
825
+ test_bases.append(MPSTestBase)
826
+ if allow_xpu and TEST_XPU and XPUTestBase not in test_bases:
827
+ test_bases.append(XPUTestBase)
828
+ if TEST_HPU and HPUTestBase not in test_bases:
829
+ test_bases.append(HPUTestBase)
830
+ # Filter out the device types based on user inputs
831
+ desired_device_type_test_bases = filter_desired_device_types(
832
+ test_bases, except_for, only_for
833
+ )
834
+ if include_lazy:
835
+ # Note [Lazy Tensor tests in device agnostic testing]
836
+ # Right now, test_view_ops.py runs with LazyTensor.
837
+ # We don't want to opt every device-agnostic test into using the lazy device,
838
+ # because many of them will fail.
839
+ # So instead, the only way to opt a specific device-agnostic test file into
840
+ # lazy tensor testing is with include_lazy=True
841
+ if IS_FBCODE:
842
+ print(
843
+ "TorchScript backend not yet supported in FBCODE/OVRSOURCE builds",
844
+ file=sys.stderr,
845
+ )
846
+ else:
847
+ desired_device_type_test_bases.append(LazyTestBase)
848
+
849
+ def split_if_not_empty(x: str):
850
+ return x.split(",") if x else []
851
+
852
+ # run some cuda testcases on other devices if available
853
+ # Usage:
854
+ # export PYTORCH_TESTING_DEVICE_FOR_CUSTOM=privateuse1
855
+ env_custom_only_for = split_if_not_empty(
856
+ os.getenv(PYTORCH_TESTING_DEVICE_FOR_CUSTOM_KEY, "")
857
+ )
858
+ if env_custom_only_for:
859
+ desired_device_type_test_bases += filter(
860
+ lambda x: x.device_type in env_custom_only_for, test_bases
861
+ )
862
+ desired_device_type_test_bases = list(set(desired_device_type_test_bases))
863
+
864
+ # Filter out the device types based on environment variables if available
865
+ # Usage:
866
+ # export PYTORCH_TESTING_DEVICE_ONLY_FOR=cuda,cpu
867
+ # export PYTORCH_TESTING_DEVICE_EXCEPT_FOR=xla
868
+ env_only_for = split_if_not_empty(
869
+ os.getenv(PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, "")
870
+ )
871
+ env_except_for = split_if_not_empty(
872
+ os.getenv(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, "")
873
+ )
874
+
875
+ return filter_desired_device_types(
876
+ desired_device_type_test_bases, env_except_for, env_only_for
877
+ )
878
+
879
+
880
+ # Adds 'instantiated' device-specific test cases to the given scope.
881
+ # The tests in these test cases are derived from the generic tests in
882
+ # generic_test_class. This function should be used instead of
883
+ # instantiate_parametrized_tests() if the test class contains
884
+ # device-specific tests (NB: this supports additional @parametrize usage).
885
+ #
886
+ # See note "Writing Test Templates"
887
+ # TODO: remove "allow_xpu" option after Interl GPU support all test case instantiate by this function.
888
+ def instantiate_device_type_tests(
889
+ generic_test_class,
890
+ scope,
891
+ except_for=None,
892
+ only_for=None,
893
+ include_lazy=False,
894
+ allow_mps=False,
895
+ allow_xpu=False,
896
+ ):
897
+ # Removes the generic test class from its enclosing scope so its tests
898
+ # are not discoverable.
899
+ del scope[generic_test_class.__name__]
900
+
901
+ # Creates an 'empty' version of the generic_test_class
902
+ # Note: we don't inherit from the generic_test_class directly because
903
+ # that would add its tests to our test classes and they would be
904
+ # discovered (despite not being runnable). Inherited methods also
905
+ # can't be removed later, and we can't rely on load_tests because
906
+ # pytest doesn't support it (as of this writing).
907
+ empty_name = generic_test_class.__name__ + "_base"
908
+ empty_class = type(empty_name, generic_test_class.__bases__, {})
909
+
910
+ # Acquires members names
911
+ # See Note [Overriding methods in generic tests]
912
+ generic_members = set(generic_test_class.__dict__.keys()) - set(
913
+ empty_class.__dict__.keys()
914
+ )
915
+ generic_tests = [x for x in generic_members if x.startswith("test")]
916
+
917
+ # Creates device-specific test cases
918
+ for base in get_desired_device_type_test_bases(
919
+ except_for, only_for, include_lazy, allow_mps, allow_xpu
920
+ ):
921
+ class_name = generic_test_class.__name__ + base.device_type.upper()
922
+
923
+ # type set to Any and suppressed due to unsupport runtime class:
924
+ # https://github.com/python/mypy/wiki/Unsupported-Python-Features
925
+ device_type_test_class: Any = type(class_name, (base, empty_class), {})
926
+
927
+ for name in generic_members:
928
+ if name in generic_tests: # Instantiates test member
929
+ test = getattr(generic_test_class, name)
930
+ # XLA-compat shim (XLA's instantiate_test takes doesn't take generic_cls)
931
+ sig = inspect.signature(device_type_test_class.instantiate_test)
932
+ if len(sig.parameters) == 3:
933
+ # Instantiates the device-specific tests
934
+ device_type_test_class.instantiate_test(
935
+ name, copy.deepcopy(test), generic_cls=generic_test_class
936
+ )
937
+ else:
938
+ device_type_test_class.instantiate_test(name, copy.deepcopy(test))
939
+ else: # Ports non-test member
940
+ assert (
941
+ name not in device_type_test_class.__dict__
942
+ ), f"Redefinition of directly defined member {name}"
943
+ nontest = getattr(generic_test_class, name)
944
+ setattr(device_type_test_class, name, nontest)
945
+
946
+ # The dynamically-created test class derives from the test template class
947
+ # and the empty class. Arrange for both setUpClass and tearDownClass methods
948
+ # to be called. This allows the parameterized test classes to support setup
949
+ # and teardown.
950
+ @classmethod
951
+ def _setUpClass(cls):
952
+ base.setUpClass()
953
+ empty_class.setUpClass()
954
+
955
+ @classmethod
956
+ def _tearDownClass(cls):
957
+ empty_class.tearDownClass()
958
+ base.tearDownClass()
959
+
960
+ device_type_test_class.setUpClass = _setUpClass
961
+ device_type_test_class.tearDownClass = _tearDownClass
962
+
963
+ # Mimics defining the instantiated class in the caller's file
964
+ # by setting its module to the given class's and adding
965
+ # the module to the given scope.
966
+ # This lets the instantiated class be discovered by unittest.
967
+ device_type_test_class.__module__ = generic_test_class.__module__
968
+ scope[class_name] = device_type_test_class
969
+
970
+
971
+ # Category of dtypes to run an OpInfo-based test for
972
+ # Example use: @ops(dtype=OpDTypes.supported)
973
+ #
974
+ # There are 5 categories:
975
+ # - supported: Every dtype supported by the operator. Use for exhaustive
976
+ # testing of all dtypes.
977
+ # - unsupported: Run tests on dtypes not supported by the operator. e.g. for
978
+ # testing the operator raises an error and doesn't crash.
979
+ # - supported_backward: Every dtype supported by the operator's backward pass.
980
+ # - unsupported_backward: Run tests on dtypes not supported by the operator's backward pass.
981
+ # - any_one: Runs a test for one dtype the operator supports. Prioritizes dtypes the
982
+ # operator supports in both forward and backward.
983
+ # - none: Useful for tests that are not dtype-specific. No dtype will be passed to the test
984
+ # when this is selected.
985
+ class OpDTypes(Enum):
986
+ supported = 0 # Test all supported dtypes (default)
987
+ unsupported = 1 # Test only unsupported dtypes
988
+ supported_backward = 2 # Test all supported backward dtypes
989
+ unsupported_backward = 3 # Test only unsupported backward dtypes
990
+ any_one = 4 # Test precisely one supported dtype
991
+ none = 5 # Instantiate no dtype variants (no dtype kwarg needed)
992
+ any_common_cpu_cuda_one = (
993
+ 6 # Test precisely one supported dtype that is common to both cuda and cpu
994
+ )
995
+
996
+
997
+ # Arbitrary order
998
+ ANY_DTYPE_ORDER = (
999
+ torch.float32,
1000
+ torch.float64,
1001
+ torch.complex64,
1002
+ torch.complex128,
1003
+ torch.float16,
1004
+ torch.bfloat16,
1005
+ torch.long,
1006
+ torch.int32,
1007
+ torch.int16,
1008
+ torch.int8,
1009
+ torch.uint8,
1010
+ torch.bool,
1011
+ )
1012
+
1013
+
1014
+ def _serialize_sample(sample_input):
1015
+ # NB: For OpInfos, SampleInput.summary() prints in a cleaner way.
1016
+ if getattr(sample_input, "summary", None) is not None:
1017
+ return sample_input.summary()
1018
+ return str(sample_input)
1019
+
1020
+
1021
+ # Decorator that defines the OpInfos a test template should be instantiated for.
1022
+ #
1023
+ # Example usage:
1024
+ #
1025
+ # @ops(unary_ufuncs)
1026
+ # def test_numerics(self, device, dtype, op):
1027
+ # <test_code>
1028
+ #
1029
+ # This will instantiate variants of test_numerics for each given OpInfo,
1030
+ # on each device the OpInfo's operator supports, and for every dtype supported by
1031
+ # that operator. There are a few caveats to the dtype rule, explained below.
1032
+ #
1033
+ # The @ops decorator can accept two
1034
+ # additional arguments, "dtypes" and "allowed_dtypes". If "dtypes" is specified
1035
+ # then the test variants are instantiated for those dtypes, regardless of
1036
+ # what the operator supports. If given "allowed_dtypes" then test variants
1037
+ # are instantiated only for the intersection of allowed_dtypes and the dtypes
1038
+ # they would otherwise be instantiated with. That is, allowed_dtypes composes
1039
+ # with the options listed above and below.
1040
+ #
1041
+ # The "dtypes" argument can also accept additional values (see OpDTypes above):
1042
+ # OpDTypes.supported - the test is instantiated for all dtypes the operator
1043
+ # supports
1044
+ # OpDTypes.unsupported - the test is instantiated for all dtypes the operator
1045
+ # doesn't support
1046
+ # OpDTypes.supported_backward - the test is instantiated for all dtypes the
1047
+ # operator's gradient formula supports
1048
+ # OpDTypes.unsupported_backward - the test is instantiated for all dtypes the
1049
+ # operator's gradient formula doesn't support
1050
+ # OpDTypes.any_one - the test is instantiated for one dtype the
1051
+ # operator supports. The dtype supports forward and backward if possible.
1052
+ # OpDTypes.none - the test is instantiated without any dtype. The test signature
1053
+ # should not include a dtype kwarg in this case.
1054
+ #
1055
+ # These options allow tests to have considerable control over the dtypes
1056
+ # they're instantiated for.
1057
+
1058
+
1059
+ class ops(_TestParametrizer):
1060
+ def __init__(
1061
+ self,
1062
+ op_list,
1063
+ *,
1064
+ dtypes: Union[OpDTypes, Sequence[torch.dtype]] = OpDTypes.supported,
1065
+ allowed_dtypes: Optional[Sequence[torch.dtype]] = None,
1066
+ skip_if_dynamo=True,
1067
+ ):
1068
+ self.op_list = list(op_list)
1069
+ self.opinfo_dtypes = dtypes
1070
+ self.allowed_dtypes = (
1071
+ set(allowed_dtypes) if allowed_dtypes is not None else None
1072
+ )
1073
+ self.skip_if_dynamo = skip_if_dynamo
1074
+
1075
+ def _parametrize_test(self, test, generic_cls, device_cls):
1076
+ """Parameterizes the given test function across each op and its associated dtypes."""
1077
+ if device_cls is None:
1078
+ raise RuntimeError(
1079
+ "The @ops decorator is only intended to be used in a device-specific "
1080
+ "context; use it with instantiate_device_type_tests() instead of "
1081
+ "instantiate_parametrized_tests()"
1082
+ )
1083
+
1084
+ op = check_exhausted_iterator = object()
1085
+ for op in self.op_list:
1086
+ # Determine the set of dtypes to use.
1087
+ dtypes: Union[Set[torch.dtype], Set[None]]
1088
+ if isinstance(self.opinfo_dtypes, Sequence):
1089
+ dtypes = set(self.opinfo_dtypes)
1090
+ elif self.opinfo_dtypes == OpDTypes.unsupported_backward:
1091
+ dtypes = set(get_all_dtypes()).difference(
1092
+ op.supported_backward_dtypes(device_cls.device_type)
1093
+ )
1094
+ elif self.opinfo_dtypes == OpDTypes.supported_backward:
1095
+ dtypes = op.supported_backward_dtypes(device_cls.device_type)
1096
+ elif self.opinfo_dtypes == OpDTypes.unsupported:
1097
+ dtypes = set(get_all_dtypes()).difference(
1098
+ op.supported_dtypes(device_cls.device_type)
1099
+ )
1100
+ elif self.opinfo_dtypes == OpDTypes.supported:
1101
+ dtypes = set(op.supported_dtypes(device_cls.device_type))
1102
+ elif self.opinfo_dtypes == OpDTypes.any_one:
1103
+ # Tries to pick a dtype that supports both forward or backward
1104
+ supported = op.supported_dtypes(device_cls.device_type)
1105
+ supported_backward = op.supported_backward_dtypes(
1106
+ device_cls.device_type
1107
+ )
1108
+ supported_both = supported.intersection(supported_backward)
1109
+ dtype_set = supported_both if len(supported_both) > 0 else supported
1110
+ for dtype in ANY_DTYPE_ORDER:
1111
+ if dtype in dtype_set:
1112
+ dtypes = {dtype}
1113
+ break
1114
+ else:
1115
+ dtypes = {}
1116
+ elif self.opinfo_dtypes == OpDTypes.any_common_cpu_cuda_one:
1117
+ # Tries to pick a dtype that supports both CPU and CUDA
1118
+ supported = set(op.dtypes).intersection(op.dtypesIfCUDA)
1119
+ if supported:
1120
+ dtypes = {
1121
+ next(dtype for dtype in ANY_DTYPE_ORDER if dtype in supported)
1122
+ }
1123
+ else:
1124
+ dtypes = {}
1125
+
1126
+ elif self.opinfo_dtypes == OpDTypes.none:
1127
+ dtypes = {None}
1128
+ else:
1129
+ raise RuntimeError(f"Unknown OpDType: {self.opinfo_dtypes}")
1130
+
1131
+ if self.allowed_dtypes is not None:
1132
+ dtypes = dtypes.intersection(self.allowed_dtypes)
1133
+
1134
+ # Construct the test name; device / dtype parts are handled outside.
1135
+ # See [Note: device and dtype suffix placement]
1136
+ test_name = op.formatted_name
1137
+
1138
+ # Filter sample skips / xfails to only those that apply to the OpInfo.
1139
+ # These are defined on the test function via decorators.
1140
+ sample_skips_and_xfails = getattr(test, "sample_skips_and_xfails", None)
1141
+ if sample_skips_and_xfails is not None:
1142
+ sample_skips_and_xfails = [
1143
+ rule
1144
+ for rule in sample_skips_and_xfails
1145
+ if rule.op_match_fn(device_cls.device_type, op)
1146
+ ]
1147
+
1148
+ for dtype in dtypes:
1149
+ # Construct parameter kwargs to pass to the test.
1150
+ param_kwargs = {"op": op}
1151
+ _update_param_kwargs(param_kwargs, "dtype", dtype)
1152
+
1153
+ # NOTE: test_wrapper exists because we don't want to apply
1154
+ # op-specific decorators to the original test.
1155
+ # Test-specific decorators are applied to the original test,
1156
+ # however.
1157
+ try:
1158
+
1159
+ @wraps(test)
1160
+ def test_wrapper(*args, **kwargs):
1161
+ try:
1162
+ return test(*args, **kwargs)
1163
+ except unittest.SkipTest as e:
1164
+ raise e
1165
+ except Exception as e:
1166
+ tracked_input = get_tracked_input()
1167
+ if PRINT_REPRO_ON_FAILURE and tracked_input is not None:
1168
+ e_tracked = Exception( # noqa: TRY002
1169
+ f"Caused by {tracked_input.type_desc} "
1170
+ f"at index {tracked_input.index}: "
1171
+ f"{_serialize_sample(tracked_input.val)}"
1172
+ )
1173
+ e_tracked._tracked_input = tracked_input # type: ignore[attr]
1174
+ raise e_tracked from e
1175
+ raise e
1176
+ finally:
1177
+ clear_tracked_input()
1178
+
1179
+ if self.skip_if_dynamo and not TEST_WITH_TORCHINDUCTOR:
1180
+ test_wrapper = skipIfTorchDynamo(
1181
+ "Policy: we don't run OpInfo tests w/ Dynamo"
1182
+ )(test_wrapper)
1183
+
1184
+ # Initialize info for the last input seen. This is useful for tracking
1185
+ # down which inputs caused a test failure. Note that TrackedInputIter is
1186
+ # responsible for managing this.
1187
+ test.tracked_input = None
1188
+
1189
+ decorator_fn = partial(
1190
+ op.get_decorators,
1191
+ generic_cls.__name__,
1192
+ test.__name__,
1193
+ device_cls.device_type,
1194
+ dtype,
1195
+ )
1196
+
1197
+ if sample_skips_and_xfails is not None:
1198
+ test_wrapper.sample_skips_and_xfails = sample_skips_and_xfails
1199
+
1200
+ yield (test_wrapper, test_name, param_kwargs, decorator_fn)
1201
+ except Exception as ex:
1202
+ # Provides an error message for debugging before rethrowing the exception
1203
+ print(f"Failed to instantiate {test_name} for op {op.name}!")
1204
+ raise ex
1205
+ if op is check_exhausted_iterator:
1206
+ raise ValueError(
1207
+ "An empty op_list was passed to @ops. "
1208
+ "Note that this may result from reuse of a generator."
1209
+ )
1210
+
1211
+
1212
+ # Decorator that skips a test if the given condition is true.
1213
+ # Notes:
1214
+ # (1) Skip conditions stack.
1215
+ # (2) Skip conditions can be bools or strings. If a string the
1216
+ # test base must have defined the corresponding attribute to be False
1217
+ # for the test to run. If you want to use a string argument you should
1218
+ # probably define a new decorator instead (see below).
1219
+ # (3) Prefer the existing decorators to defining the 'device_type' kwarg.
1220
+ class skipIf:
1221
+ def __init__(self, dep, reason, device_type=None):
1222
+ self.dep = dep
1223
+ self.reason = reason
1224
+ self.device_type = device_type
1225
+
1226
+ def __call__(self, fn):
1227
+ @wraps(fn)
1228
+ def dep_fn(slf, *args, **kwargs):
1229
+ if (
1230
+ self.device_type is None
1231
+ or self.device_type == slf.device_type
1232
+ or (
1233
+ isinstance(self.device_type, Iterable)
1234
+ and slf.device_type in self.device_type
1235
+ )
1236
+ ):
1237
+ if (isinstance(self.dep, str) and getattr(slf, self.dep, True)) or (
1238
+ isinstance(self.dep, bool) and self.dep
1239
+ ):
1240
+ raise unittest.SkipTest(self.reason)
1241
+
1242
+ return fn(slf, *args, **kwargs)
1243
+
1244
+ return dep_fn
1245
+
1246
+
1247
+ # Skips a test on CPU if the condition is true.
1248
+ class skipCPUIf(skipIf):
1249
+ def __init__(self, dep, reason):
1250
+ super().__init__(dep, reason, device_type="cpu")
1251
+
1252
+
1253
+ # Skips a test on CUDA if the condition is true.
1254
+ class skipCUDAIf(skipIf):
1255
+ def __init__(self, dep, reason):
1256
+ super().__init__(dep, reason, device_type="cuda")
1257
+
1258
+
1259
+ # Skips a test on XPU if the condition is true.
1260
+ class skipXPUIf(skipIf):
1261
+ def __init__(self, dep, reason):
1262
+ super().__init__(dep, reason, device_type="xpu")
1263
+
1264
+
1265
+ # Skips a test on XPU or CUDA if the condition is true.
1266
+ class skipGPUIf(skipIf):
1267
+ def __init__(self, dep, reason):
1268
+ super().__init__(dep, reason, device_type=GPU_TYPES)
1269
+
1270
+
1271
+ # Skips a test on Lazy if the condition is true.
1272
+ class skipLazyIf(skipIf):
1273
+ def __init__(self, dep, reason):
1274
+ super().__init__(dep, reason, device_type="lazy")
1275
+
1276
+
1277
+ # Skips a test on Meta if the condition is true.
1278
+ class skipMetaIf(skipIf):
1279
+ def __init__(self, dep, reason):
1280
+ super().__init__(dep, reason, device_type="meta")
1281
+
1282
+
1283
+ # Skips a test on MPS if the condition is true.
1284
+ class skipMPSIf(skipIf):
1285
+ def __init__(self, dep, reason):
1286
+ super().__init__(dep, reason, device_type="mps")
1287
+
1288
+
1289
+ class skipHPUIf(skipIf):
1290
+ def __init__(self, dep, reason):
1291
+ super().__init__(dep, reason, device_type="hpu")
1292
+
1293
+
1294
+ # Skips a test on XLA if the condition is true.
1295
+ class skipXLAIf(skipIf):
1296
+ def __init__(self, dep, reason):
1297
+ super().__init__(dep, reason, device_type="xla")
1298
+
1299
+
1300
+ class skipPRIVATEUSE1If(skipIf):
1301
+ def __init__(self, dep, reason):
1302
+ device_type = torch._C._get_privateuse1_backend_name()
1303
+ super().__init__(dep, reason, device_type=device_type)
1304
+
1305
+
1306
+ def _has_sufficient_memory(device, size):
1307
+ if torch.device(device).type == "cuda":
1308
+ if not torch.cuda.is_available():
1309
+ return False
1310
+ gc.collect()
1311
+ torch.cuda.empty_cache()
1312
+ # torch.cuda.mem_get_info, aka cudaMemGetInfo, returns a tuple of (free memory, total memory) of a GPU
1313
+ if device == "cuda":
1314
+ device = "cuda:0"
1315
+ return (
1316
+ torch.cuda.memory.mem_get_info(device)[0]
1317
+ * torch.cuda.memory.get_per_process_memory_fraction(device)
1318
+ ) >= size
1319
+
1320
+ if device == "xla":
1321
+ raise unittest.SkipTest("TODO: Memory availability checks for XLA?")
1322
+
1323
+ if device == "xpu":
1324
+ raise unittest.SkipTest("TODO: Memory availability checks for Intel GPU?")
1325
+
1326
+ if device != "cpu":
1327
+ raise unittest.SkipTest("Unknown device type")
1328
+
1329
+ # CPU
1330
+ if not HAS_PSUTIL:
1331
+ raise unittest.SkipTest("Need psutil to determine if memory is sufficient")
1332
+
1333
+ # The sanitizers have significant memory overheads
1334
+ if TEST_WITH_ASAN or TEST_WITH_TSAN or TEST_WITH_UBSAN:
1335
+ effective_size = size * 10
1336
+ else:
1337
+ effective_size = size
1338
+
1339
+ if psutil.virtual_memory().available < effective_size:
1340
+ gc.collect()
1341
+ return psutil.virtual_memory().available >= effective_size
1342
+
1343
+
1344
+ def largeTensorTest(size, device=None):
1345
+ """Skip test if the device has insufficient memory to run the test
1346
+
1347
+ size may be a number of bytes, a string of the form "N GB", or a callable
1348
+
1349
+ If the test is a device generic test, available memory on the primary device will be checked.
1350
+ It can also be overriden by the optional `device=` argument.
1351
+ In other tests, the `device=` argument needs to be specified.
1352
+ """
1353
+ if isinstance(size, str):
1354
+ assert size.endswith(("GB", "gb")), "only bytes or GB supported"
1355
+ size = 1024**3 * int(size[:-2])
1356
+
1357
+ def inner(fn):
1358
+ @wraps(fn)
1359
+ def dep_fn(self, *args, **kwargs):
1360
+ size_bytes = size(self, *args, **kwargs) if callable(size) else size
1361
+ _device = device if device is not None else self.get_primary_device()
1362
+ if not _has_sufficient_memory(_device, size_bytes):
1363
+ raise unittest.SkipTest(f"Insufficient {_device} memory")
1364
+
1365
+ return fn(self, *args, **kwargs)
1366
+
1367
+ return dep_fn
1368
+
1369
+ return inner
1370
+
1371
+
1372
+ class expectedFailure:
1373
+ def __init__(self, device_type):
1374
+ self.device_type = device_type
1375
+
1376
+ def __call__(self, fn):
1377
+ @wraps(fn)
1378
+ def efail_fn(slf, *args, **kwargs):
1379
+ if (
1380
+ not hasattr(slf, "device_type")
1381
+ and hasattr(slf, "device")
1382
+ and isinstance(slf.device, str)
1383
+ ):
1384
+ target_device_type = slf.device
1385
+ else:
1386
+ target_device_type = slf.device_type
1387
+
1388
+ if self.device_type is None or self.device_type == target_device_type:
1389
+ try:
1390
+ fn(slf, *args, **kwargs)
1391
+ except Exception:
1392
+ return
1393
+ else:
1394
+ slf.fail("expected test to fail, but it passed")
1395
+
1396
+ return fn(slf, *args, **kwargs)
1397
+
1398
+ return efail_fn
1399
+
1400
+
1401
+ class onlyOn:
1402
+ def __init__(self, device_type):
1403
+ self.device_type = device_type
1404
+
1405
+ def __call__(self, fn):
1406
+ @wraps(fn)
1407
+ def only_fn(slf, *args, **kwargs):
1408
+ if self.device_type != slf.device_type:
1409
+ reason = f"Only runs on {self.device_type}"
1410
+ raise unittest.SkipTest(reason)
1411
+
1412
+ return fn(slf, *args, **kwargs)
1413
+
1414
+ return only_fn
1415
+
1416
+
1417
+ # Decorator that provides all available devices of the device type to the test
1418
+ # as a list of strings instead of providing a single device string.
1419
+ # Skips the test if the number of available devices of the variant's device
1420
+ # type is less than the 'num_required_devices' arg.
1421
+ class deviceCountAtLeast:
1422
+ def __init__(self, num_required_devices):
1423
+ self.num_required_devices = num_required_devices
1424
+
1425
+ def __call__(self, fn):
1426
+ assert not hasattr(
1427
+ fn, "num_required_devices"
1428
+ ), f"deviceCountAtLeast redefinition for {fn.__name__}"
1429
+ fn.num_required_devices = self.num_required_devices
1430
+
1431
+ @wraps(fn)
1432
+ def multi_fn(slf, devices, *args, **kwargs):
1433
+ if len(devices) < self.num_required_devices:
1434
+ reason = f"fewer than {self.num_required_devices} devices detected"
1435
+ raise unittest.SkipTest(reason)
1436
+
1437
+ return fn(slf, devices, *args, **kwargs)
1438
+
1439
+ return multi_fn
1440
+
1441
+
1442
+ # Only runs the test on the native device type (currently CPU, CUDA, Meta and PRIVATEUSE1)
1443
+ def onlyNativeDeviceTypes(fn):
1444
+ @wraps(fn)
1445
+ def only_fn(self, *args, **kwargs):
1446
+ if self.device_type not in NATIVE_DEVICES:
1447
+ reason = f"onlyNativeDeviceTypes: doesn't run on {self.device_type}"
1448
+ raise unittest.SkipTest(reason)
1449
+
1450
+ return fn(self, *args, **kwargs)
1451
+
1452
+ return only_fn
1453
+
1454
+
1455
+ # Only runs the test on the native device types and devices specified in the devices list
1456
+ def onlyNativeDeviceTypesAnd(devices=None):
1457
+ def decorator(fn):
1458
+ @wraps(fn)
1459
+ def only_fn(self, *args, **kwargs):
1460
+ if (
1461
+ self.device_type not in NATIVE_DEVICES
1462
+ and self.device_type not in devices
1463
+ ):
1464
+ reason = f"onlyNativeDeviceTypesAnd {devices} : doesn't run on {self.device_type}"
1465
+ raise unittest.SkipTest(reason)
1466
+
1467
+ return fn(self, *args, **kwargs)
1468
+
1469
+ return only_fn
1470
+
1471
+ return decorator
1472
+
1473
+
1474
+ # Specifies per-dtype precision overrides.
1475
+ # Ex.
1476
+ #
1477
+ # @precisionOverride({torch.half : 1e-2, torch.float : 1e-4})
1478
+ # @dtypes(torch.half, torch.float, torch.double)
1479
+ # def test_X(self, device, dtype):
1480
+ # ...
1481
+ #
1482
+ # When the test is instantiated its class's precision will be set to the
1483
+ # corresponding override, if it exists.
1484
+ # self.precision can be accessed directly, and it also controls the behavior of
1485
+ # functions like self.assertEqual().
1486
+ #
1487
+ # Note that self.precision is a scalar value, so if you require multiple
1488
+ # precisions (or are working with multiple dtypes) they should be specified
1489
+ # explicitly and computed using self.precision (e.g.
1490
+ # self.precision *2, max(1, self.precision)).
1491
+ class precisionOverride:
1492
+ def __init__(self, d):
1493
+ assert isinstance(
1494
+ d, dict
1495
+ ), "precisionOverride not given a dtype : precision dict!"
1496
+ for dtype in d.keys():
1497
+ assert isinstance(
1498
+ dtype, torch.dtype
1499
+ ), f"precisionOverride given unknown dtype {dtype}"
1500
+
1501
+ self.d = d
1502
+
1503
+ def __call__(self, fn):
1504
+ fn.precision_overrides = self.d
1505
+ return fn
1506
+
1507
+
1508
+ # Specifies per-dtype tolerance overrides tol(atol, rtol). It has priority over
1509
+ # precisionOverride.
1510
+ # Ex.
1511
+ #
1512
+ # @toleranceOverride({torch.float : tol(atol=1e-2, rtol=1e-3},
1513
+ # torch.double : tol{atol=1e-4, rtol = 0})
1514
+ # @dtypes(torch.half, torch.float, torch.double)
1515
+ # def test_X(self, device, dtype):
1516
+ # ...
1517
+ #
1518
+ # When the test is instantiated its class's tolerance will be set to the
1519
+ # corresponding override, if it exists.
1520
+ # self.rtol and self.precision can be accessed directly, and they also control
1521
+ # the behavior of functions like self.assertEqual().
1522
+ #
1523
+ # The above example sets atol = 1e-2 and rtol = 1e-3 for torch.float and
1524
+ # atol = 1e-4 and rtol = 0 for torch.double.
1525
+ tol = namedtuple("tol", ["atol", "rtol"])
1526
+
1527
+
1528
+ class toleranceOverride:
1529
+ def __init__(self, d):
1530
+ assert isinstance(d, dict), "toleranceOverride not given a dtype : tol dict!"
1531
+ for dtype, prec in d.items():
1532
+ assert isinstance(
1533
+ dtype, torch.dtype
1534
+ ), f"toleranceOverride given unknown dtype {dtype}"
1535
+ assert isinstance(
1536
+ prec, tol
1537
+ ), "toleranceOverride not given a dtype : tol dict!"
1538
+
1539
+ self.d = d
1540
+
1541
+ def __call__(self, fn):
1542
+ fn.tolerance_overrides = self.d
1543
+ return fn
1544
+
1545
+
1546
+ # Decorator that instantiates a variant of the test for each given dtype.
1547
+ # Notes:
1548
+ # (1) Tests that accept the dtype argument MUST use this decorator.
1549
+ # (2) Can be overridden for CPU or CUDA, respectively, using dtypesIfCPU
1550
+ # or dtypesIfCUDA.
1551
+ # (3) Can accept an iterable of dtypes or an iterable of tuples
1552
+ # of dtypes.
1553
+ # Examples:
1554
+ # @dtypes(torch.float32, torch.float64)
1555
+ # @dtypes((torch.long, torch.float32), (torch.int, torch.float64))
1556
+ class dtypes:
1557
+ def __init__(self, *args, device_type="all"):
1558
+ if len(args) > 0 and isinstance(args[0], (list, tuple)):
1559
+ for arg in args:
1560
+ assert isinstance(arg, (list, tuple)), (
1561
+ "When one dtype variant is a tuple or list, "
1562
+ "all dtype variants must be. "
1563
+ f"Received non-list non-tuple dtype {str(arg)}"
1564
+ )
1565
+ assert all(
1566
+ isinstance(dtype, torch.dtype) for dtype in arg
1567
+ ), f"Unknown dtype in {str(arg)}"
1568
+ else:
1569
+ assert all(
1570
+ isinstance(arg, torch.dtype) for arg in args
1571
+ ), f"Unknown dtype in {str(args)}"
1572
+
1573
+ self.args = args
1574
+ self.device_type = device_type
1575
+
1576
+ def __call__(self, fn):
1577
+ d = getattr(fn, "dtypes", {})
1578
+ assert self.device_type not in d, f"dtypes redefinition for {self.device_type}"
1579
+ d[self.device_type] = self.args
1580
+ fn.dtypes = d
1581
+ return fn
1582
+
1583
+
1584
+ # Overrides specified dtypes on the CPU.
1585
+ class dtypesIfCPU(dtypes):
1586
+ def __init__(self, *args):
1587
+ super().__init__(*args, device_type="cpu")
1588
+
1589
+
1590
+ # Overrides specified dtypes on CUDA.
1591
+ class dtypesIfCUDA(dtypes):
1592
+ def __init__(self, *args):
1593
+ super().__init__(*args, device_type="cuda")
1594
+
1595
+
1596
+ class dtypesIfMPS(dtypes):
1597
+ def __init__(self, *args):
1598
+ super().__init__(*args, device_type="mps")
1599
+
1600
+
1601
+ class dtypesIfHPU(dtypes):
1602
+ def __init__(self, *args):
1603
+ super().__init__(*args, device_type="hpu")
1604
+
1605
+
1606
+ class dtypesIfPRIVATEUSE1(dtypes):
1607
+ def __init__(self, *args):
1608
+ super().__init__(*args, device_type=torch._C._get_privateuse1_backend_name())
1609
+
1610
+
1611
+ def onlyCPU(fn):
1612
+ return onlyOn("cpu")(fn)
1613
+
1614
+
1615
+ def onlyCUDA(fn):
1616
+ return onlyOn("cuda")(fn)
1617
+
1618
+
1619
+ def onlyMPS(fn):
1620
+ return onlyOn("mps")(fn)
1621
+
1622
+
1623
+ def onlyXPU(fn):
1624
+ return onlyOn("xpu")(fn)
1625
+
1626
+
1627
+ def onlyHPU(fn):
1628
+ return onlyOn("hpu")(fn)
1629
+
1630
+
1631
+ def onlyPRIVATEUSE1(fn):
1632
+ device_type = torch._C._get_privateuse1_backend_name()
1633
+ device_mod = getattr(torch, device_type, None)
1634
+ if device_mod is None:
1635
+ reason = f"Skip as torch has no module of {device_type}"
1636
+ return unittest.skip(reason)(fn)
1637
+ return onlyOn(device_type)(fn)
1638
+
1639
+
1640
+ def onlyCUDAAndPRIVATEUSE1(fn):
1641
+ @wraps(fn)
1642
+ def only_fn(self, *args, **kwargs):
1643
+ if self.device_type not in ("cuda", torch._C._get_privateuse1_backend_name()):
1644
+ reason = f"onlyCUDAAndPRIVATEUSE1: doesn't run on {self.device_type}"
1645
+ raise unittest.SkipTest(reason)
1646
+
1647
+ return fn(self, *args, **kwargs)
1648
+
1649
+ return only_fn
1650
+
1651
+
1652
+ def disablecuDNN(fn):
1653
+ @wraps(fn)
1654
+ def disable_cudnn(self, *args, **kwargs):
1655
+ if self.device_type == "cuda" and self.has_cudnn():
1656
+ with torch.backends.cudnn.flags(enabled=False):
1657
+ return fn(self, *args, **kwargs)
1658
+ return fn(self, *args, **kwargs)
1659
+
1660
+ return disable_cudnn
1661
+
1662
+
1663
+ def disableMkldnn(fn):
1664
+ @wraps(fn)
1665
+ def disable_mkldnn(self, *args, **kwargs):
1666
+ if torch.backends.mkldnn.is_available():
1667
+ with torch.backends.mkldnn.flags(enabled=False):
1668
+ return fn(self, *args, **kwargs)
1669
+ return fn(self, *args, **kwargs)
1670
+
1671
+ return disable_mkldnn
1672
+
1673
+
1674
+ def expectedFailureCPU(fn):
1675
+ return expectedFailure("cpu")(fn)
1676
+
1677
+
1678
+ def expectedFailureCUDA(fn):
1679
+ return expectedFailure("cuda")(fn)
1680
+
1681
+
1682
+ def expectedFailureXPU(fn):
1683
+ return expectedFailure("xpu")(fn)
1684
+
1685
+
1686
+ def expectedFailureMeta(fn):
1687
+ return skipIfTorchDynamo()(expectedFailure("meta")(fn))
1688
+
1689
+
1690
+ def expectedFailureXLA(fn):
1691
+ return expectedFailure("xla")(fn)
1692
+
1693
+
1694
+ def expectedFailureHPU(fn):
1695
+ return expectedFailure("hpu")(fn)
1696
+
1697
+
1698
+ def expectedFailureMPS(fn):
1699
+ return expectedFailure("mps")(fn)
1700
+
1701
+
1702
+ def expectedFailureMPSPre15(fn):
1703
+ import platform
1704
+
1705
+ version = float(".".join(platform.mac_ver()[0].split(".")[:2]) or -1)
1706
+ if not version or version < 1.0: # cpu or other unsupported device
1707
+ return fn
1708
+ if version < 15.0:
1709
+ return expectedFailure("mps")(fn)
1710
+ return fn
1711
+
1712
+
1713
+ def expectedFailureMPSPre14(fn):
1714
+ import platform
1715
+
1716
+ version = float(".".join(platform.mac_ver()[0].split(".")[:2]) or -1)
1717
+ if not version or version < 1.0: # cpu or other unsupported device
1718
+ return fn
1719
+ if version < 14.0:
1720
+ return expectedFailure("mps")(fn)
1721
+ return fn
1722
+
1723
+
1724
+ # Skips a test on CPU if LAPACK is not available.
1725
+ def skipCPUIfNoLapack(fn):
1726
+ return skipCPUIf(not torch._C.has_lapack, "PyTorch compiled without Lapack")(fn)
1727
+
1728
+
1729
+ # Skips a test on CPU if FFT is not available.
1730
+ def skipCPUIfNoFFT(fn):
1731
+ return skipCPUIf(not torch._C.has_spectral, "PyTorch is built without FFT support")(
1732
+ fn
1733
+ )
1734
+
1735
+
1736
+ # Skips a test on CPU if MKL is not available.
1737
+ def skipCPUIfNoMkl(fn):
1738
+ return skipCPUIf(not TEST_MKL, "PyTorch is built without MKL support")(fn)
1739
+
1740
+
1741
+ # Skips a test on CPU if MKL Sparse is not available (it's not linked on Windows).
1742
+ def skipCPUIfNoMklSparse(fn):
1743
+ return skipCPUIf(
1744
+ IS_WINDOWS or not TEST_MKL, "PyTorch is built without MKL support"
1745
+ )(fn)
1746
+
1747
+
1748
+ # Skips a test on CPU if mkldnn is not available.
1749
+ def skipCPUIfNoMkldnn(fn):
1750
+ return skipCPUIf(
1751
+ not torch.backends.mkldnn.is_available(),
1752
+ "PyTorch is built without mkldnn support",
1753
+ )(fn)
1754
+
1755
+
1756
+ # Skips a test on CUDA if MAGMA is not available.
1757
+ def skipCUDAIfNoMagma(fn):
1758
+ return skipCUDAIf("no_magma", "no MAGMA library detected")(
1759
+ skipCUDANonDefaultStreamIf(True)(fn)
1760
+ )
1761
+
1762
+
1763
+ def has_cusolver():
1764
+ return not TEST_WITH_ROCM
1765
+
1766
+
1767
+ def has_hipsolver():
1768
+ rocm_version = _get_torch_rocm_version()
1769
+ # hipSOLVER is disabled on ROCM < 5.3
1770
+ return rocm_version >= (5, 3)
1771
+
1772
+
1773
+ # Skips a test on CUDA/ROCM if cuSOLVER/hipSOLVER is not available
1774
+ def skipCUDAIfNoCusolver(fn):
1775
+ return skipCUDAIf(
1776
+ not has_cusolver() and not has_hipsolver(), "cuSOLVER not available"
1777
+ )(fn)
1778
+
1779
+
1780
+ # Skips a test if both cuSOLVER and MAGMA are not available
1781
+ def skipCUDAIfNoMagmaAndNoCusolver(fn):
1782
+ if has_cusolver():
1783
+ return fn
1784
+ else:
1785
+ # cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA
1786
+ return skipCUDAIfNoMagma(fn)
1787
+
1788
+
1789
+ # Skips a test if both cuSOLVER/hipSOLVER and MAGMA are not available
1790
+ def skipCUDAIfNoMagmaAndNoLinalgsolver(fn):
1791
+ if has_cusolver() or has_hipsolver():
1792
+ return fn
1793
+ else:
1794
+ # cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA
1795
+ return skipCUDAIfNoMagma(fn)
1796
+
1797
+
1798
+ # Skips a test on CUDA when using ROCm.
1799
+ def skipCUDAIfRocm(func=None, *, msg="test doesn't currently work on the ROCm stack"):
1800
+ def dec_fn(fn):
1801
+ reason = f"skipCUDAIfRocm: {msg}"
1802
+ return skipCUDAIf(TEST_WITH_ROCM, reason=reason)(fn)
1803
+
1804
+ if func:
1805
+ return dec_fn(func)
1806
+ return dec_fn
1807
+
1808
+
1809
+ # Skips a test on CUDA when not using ROCm.
1810
+ def skipCUDAIfNotRocm(fn):
1811
+ return skipCUDAIf(
1812
+ not TEST_WITH_ROCM, "test doesn't currently work on the CUDA stack"
1813
+ )(fn)
1814
+
1815
+
1816
+ # Skips a test on CUDA if ROCm is unavailable or its version is lower than requested.
1817
+ def skipCUDAIfRocmVersionLessThan(version=None):
1818
+ def dec_fn(fn):
1819
+ @wraps(fn)
1820
+ def wrap_fn(self, *args, **kwargs):
1821
+ if self.device_type == "cuda":
1822
+ if not TEST_WITH_ROCM:
1823
+ reason = "ROCm not available"
1824
+ raise unittest.SkipTest(reason)
1825
+ rocm_version_tuple = _get_torch_rocm_version()
1826
+ if (
1827
+ rocm_version_tuple is None
1828
+ or version is None
1829
+ or rocm_version_tuple < tuple(version)
1830
+ ):
1831
+ reason = (
1832
+ f"ROCm {rocm_version_tuple} is available but {version} required"
1833
+ )
1834
+ raise unittest.SkipTest(reason)
1835
+
1836
+ return fn(self, *args, **kwargs)
1837
+
1838
+ return wrap_fn
1839
+
1840
+ return dec_fn
1841
+
1842
+
1843
+ # Skips a test on CUDA when using ROCm.
1844
+ def skipCUDAIfNotMiopenSuggestNHWC(fn):
1845
+ return skipCUDAIf(
1846
+ not TEST_WITH_MIOPEN_SUGGEST_NHWC,
1847
+ "test doesn't currently work without MIOpen NHWC activation",
1848
+ )(fn)
1849
+
1850
+
1851
+ # Skips a test for specified CUDA versions, given in the form of a list of [major, minor]s.
1852
+ def skipCUDAVersionIn(versions: Optional[List[Tuple[int, int]]] = None):
1853
+ def dec_fn(fn):
1854
+ @wraps(fn)
1855
+ def wrap_fn(self, *args, **kwargs):
1856
+ version = _get_torch_cuda_version()
1857
+ if version == (0, 0): # cpu or rocm
1858
+ return fn(self, *args, **kwargs)
1859
+ if version in (versions or []):
1860
+ reason = f"test skipped for CUDA version {version}"
1861
+ raise unittest.SkipTest(reason)
1862
+ return fn(self, *args, **kwargs)
1863
+
1864
+ return wrap_fn
1865
+
1866
+ return dec_fn
1867
+
1868
+
1869
+ # Skips a test for CUDA versions less than specified, given in the form of [major, minor].
1870
+ def skipCUDAIfVersionLessThan(versions: Optional[Tuple[int, int]] = None):
1871
+ def dec_fn(fn):
1872
+ @wraps(fn)
1873
+ def wrap_fn(self, *args, **kwargs):
1874
+ version = _get_torch_cuda_version()
1875
+ if version == (0, 0): # cpu or rocm
1876
+ return fn(self, *args, **kwargs)
1877
+ if version < versions:
1878
+ reason = f"test skipped for CUDA versions < {version}"
1879
+ raise unittest.SkipTest(reason)
1880
+ return fn(self, *args, **kwargs)
1881
+
1882
+ return wrap_fn
1883
+
1884
+ return dec_fn
1885
+
1886
+
1887
+ # Skips a test on CUDA if cuDNN is unavailable or its version is lower than requested.
1888
+ def skipCUDAIfCudnnVersionLessThan(version=0):
1889
+ def dec_fn(fn):
1890
+ @wraps(fn)
1891
+ def wrap_fn(self, *args, **kwargs):
1892
+ if self.device_type == "cuda":
1893
+ if self.no_cudnn:
1894
+ reason = "cuDNN not available"
1895
+ raise unittest.SkipTest(reason)
1896
+ if self.cudnn_version is None or self.cudnn_version < version:
1897
+ reason = f"cuDNN version {self.cudnn_version} is available but {version} required"
1898
+ raise unittest.SkipTest(reason)
1899
+
1900
+ return fn(self, *args, **kwargs)
1901
+
1902
+ return wrap_fn
1903
+
1904
+ return dec_fn
1905
+
1906
+
1907
+ # Skips a test on CUDA if cuSparse generic API is not available
1908
+ def skipCUDAIfNoCusparseGeneric(fn):
1909
+ return skipCUDAIf(not TEST_CUSPARSE_GENERIC, "cuSparse Generic API not available")(
1910
+ fn
1911
+ )
1912
+
1913
+
1914
+ def skipCUDAIfNoHipsparseGeneric(fn):
1915
+ return skipCUDAIf(
1916
+ not TEST_HIPSPARSE_GENERIC, "hipSparse Generic API not available"
1917
+ )(fn)
1918
+
1919
+
1920
+ def skipCUDAIfNoSparseGeneric(fn):
1921
+ return skipCUDAIf(
1922
+ not (TEST_CUSPARSE_GENERIC or TEST_HIPSPARSE_GENERIC),
1923
+ "Sparse Generic API not available",
1924
+ )(fn)
1925
+
1926
+
1927
+ def skipCUDAIfNoCudnn(fn):
1928
+ return skipCUDAIfCudnnVersionLessThan(0)(fn)
1929
+
1930
+
1931
+ def skipCUDAIfMiopen(fn):
1932
+ return skipCUDAIf(torch.version.hip is not None, "Marked as skipped for MIOpen")(fn)
1933
+
1934
+
1935
+ def skipCUDAIfNoMiopen(fn):
1936
+ return skipCUDAIf(torch.version.hip is None, "MIOpen is not available")(
1937
+ skipCUDAIfNoCudnn(fn)
1938
+ )
1939
+
1940
+
1941
+ def skipLazy(fn):
1942
+ return skipLazyIf(True, "test doesn't work with lazy tensors")(fn)
1943
+
1944
+
1945
+ def skipMeta(fn):
1946
+ return skipMetaIf(True, "test doesn't work with meta tensors")(fn)
1947
+
1948
+
1949
+ def skipXLA(fn):
1950
+ return skipXLAIf(True, "Marked as skipped for XLA")(fn)
1951
+
1952
+
1953
+ def skipMPS(fn):
1954
+ return skipMPSIf(True, "test doesn't work on MPS backend")(fn)
1955
+
1956
+
1957
+ def skipHPU(fn):
1958
+ return skipHPUIf(True, "test doesn't work on HPU backend")(fn)
1959
+
1960
+
1961
+ def skipPRIVATEUSE1(fn):
1962
+ return skipPRIVATEUSE1If(True, "test doesn't work on privateuse1 backend")(fn)
1963
+
1964
+
1965
+ # TODO: the "all" in the name isn't true anymore for quite some time as we have also have for example XLA and MPS now.
1966
+ # This should probably enumerate all available device type test base classes.
1967
+ def get_all_device_types() -> List[str]:
1968
+ return ["cpu"] if not torch.cuda.is_available() else ["cpu", "cuda"]
1969
+
1970
+
1971
+ flex_attention_supported_platform = unittest.skipUnless(
1972
+ torch.cuda.is_available()
1973
+ and torch.utils._triton.has_triton()
1974
+ and torch.cuda.get_device_capability() >= (8, 0),
1975
+ "Requires CUDA and Triton",
1976
+ )
lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py ADDED
@@ -0,0 +1,1541 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import abc
4
+ import faulthandler
5
+ import itertools
6
+ import logging
7
+ import multiprocessing
8
+ import os
9
+ import queue
10
+ import subprocess
11
+ import sys
12
+ import tempfile
13
+ import threading
14
+ import time
15
+ import traceback
16
+ import types
17
+ import unittest
18
+ from contextlib import contextmanager
19
+ from dataclasses import dataclass
20
+ from datetime import timedelta
21
+ from enum import Enum
22
+ from functools import partial, reduce, wraps
23
+ from io import StringIO
24
+ from typing import Dict, NamedTuple, Optional, Union, List, Any, Callable, Tuple
25
+ from unittest.mock import patch
26
+
27
+ from torch._logging._internal import trace_log
28
+ import torch
29
+ import torch._dynamo.test_case
30
+ import torch.cuda.nccl
31
+ import torch.distributed as c10d
32
+ from torch._C._autograd import DeviceType
33
+ from torch._C._distributed_c10d import _SymmetricMemory
34
+ import torch.nn as nn
35
+ from torch.testing._internal.common_utils import (
36
+ FILE_SCHEMA,
37
+ find_free_port,
38
+ IS_SANDCASTLE,
39
+ retry_on_connect_failures,
40
+ skip_but_pass_in_sandcastle,
41
+ skip_but_pass_in_sandcastle_if,
42
+ TEST_WITH_ROCM,
43
+ TEST_WITH_TSAN,
44
+ TestCase,
45
+ run_tests,
46
+ TEST_HPU,
47
+ )
48
+ from torch.testing._internal.distributed.multi_threaded_pg import (
49
+ _install_threaded_pg,
50
+ _uninstall_threaded_pg,
51
+ ProcessLocalGroup,
52
+ )
53
+ import operator
54
+
55
+ logging.basicConfig(level=logging.INFO)
56
+ logger = logging.getLogger(__name__)
57
+
58
+
59
+ class TestSkip(NamedTuple):
60
+ exit_code: int
61
+ message: str
62
+
63
+
64
+ TEST_SKIPS = {
65
+ "backend_unavailable": TestSkip(
66
+ 72, "Skipped because distributed backend is not available."
67
+ ),
68
+ "small_worldsize": TestSkip(73, "Skipped due to small world size."),
69
+ "odd_worldsize": TestSkip(87, "Skipped due to odd world size."),
70
+ "no_cuda": TestSkip(74, "CUDA is not available."),
71
+ "multi-gpu-1": TestSkip(75, "Need at least 1 CUDA device"),
72
+ "multi-gpu-2": TestSkip(77, "Need at least 2 CUDA devices"),
73
+ "multi-gpu-3": TestSkip(80, "Need at least 3 CUDA devices"),
74
+ "multi-gpu-4": TestSkip(81, "Need at least 4 CUDA devices"),
75
+ "multi-gpu-5": TestSkip(82, "Need at least 5 CUDA devices"),
76
+ "multi-gpu-6": TestSkip(83, "Need at least 6 CUDA devices"),
77
+ "multi-gpu-7": TestSkip(84, "Need at least 7 CUDA devices"),
78
+ "multi-gpu-8": TestSkip(85, "Need at least 8 CUDA devices"),
79
+ "nccl": TestSkip(76, "c10d not compiled with NCCL support"),
80
+ "skipIfRocm": TestSkip(78, "Test skipped for ROCm"),
81
+ "no_peer_access": TestSkip(79, "Test skipped because no GPU peer access"),
82
+ "generic": TestSkip(
83
+ 86, "Test skipped at subprocess level, look at subprocess log for skip reason"
84
+ ),
85
+ "importerror": TestSkip(88, "Test skipped due to missing import"),
86
+ "no_accelerator": TestSkip(89, "accelerator is not available."),
87
+ }
88
+
89
+
90
+ @dataclass
91
+ class DistTestCases:
92
+ # Backends that do not support a specific collective
93
+ skip_collective = {}
94
+ skip_collective["allgather_coalesced"] = {"nccl", "mpi", "ucc"}
95
+ skip_collective["reduce"] = set()
96
+ skip_collective["sendrecv anysource"] = {"nccl", "ucc"}
97
+ skip_collective["cpu barrier"] = {"nccl", "ucc"}
98
+
99
+ # Sets showing that something is implemented
100
+ backend_feature = {}
101
+ backend_feature["gpu"] = {"nccl", "gloo", "ucc"}
102
+ backend_feature["cuda"] = {"nccl", "gloo", "ucc"}
103
+ backend_feature["ddp"] = {"nccl", "gloo", "ucc"}
104
+ backend_feature["subgroup"] = {"nccl", "gloo", "ucc"}
105
+ backend_feature["plugin"] = set()
106
+ if TEST_HPU:
107
+ backend_feature["hpu"] = {"hccl"}
108
+
109
+
110
+ def skip_if_no_gpu(func):
111
+ """Skips if the world size exceeds the number of GPUs, ensuring that if the
112
+ test is run, each rank has its own GPU via ``torch.cuda.device(rank)``."""
113
+
114
+ @wraps(func)
115
+ def wrapper(*args, **kwargs):
116
+ if not torch.cuda.is_available():
117
+ sys.exit(TEST_SKIPS["no_cuda"].exit_code)
118
+ world_size = int(os.environ["WORLD_SIZE"])
119
+ if torch.cuda.device_count() < world_size:
120
+ sys.exit(TEST_SKIPS[f"multi-gpu-{world_size}"].exit_code)
121
+ if TEST_HPU and torch.hpu.device_count < world_size:
122
+ sys.exit(TEST_SKIPS[f"multi-gpu-{world_size}"].exit_code)
123
+
124
+ return func(*args, **kwargs)
125
+
126
+ return wrapper
127
+
128
+
129
+ # TODO (kwen2501): what is the purpose of this decorator? Tests with this
130
+ # decorator were always skipped. So they may be outdated already.
131
+ # Oct 2024: bumping the small-world criteria to < 8, as we are increasing the
132
+ # number of GPUs in CI from 2 to 4, and we need to continue skipping those tests
133
+ # to keep CI green. But this is just a temporary solution. We should clean up
134
+ # those tests somehow.
135
+ def skip_if_small_worldsize(func):
136
+ @wraps(func)
137
+ def wrapper(*args, **kwargs):
138
+ if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) < 8:
139
+ sys.exit(TEST_SKIPS["small_worldsize"].exit_code)
140
+
141
+ return func(*args, **kwargs)
142
+
143
+ return wrapper
144
+
145
+
146
+ def skip_if_odd_worldsize(func):
147
+ @wraps(func)
148
+ def wrapper(*args, **kwargs):
149
+ if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) % 2 == 1:
150
+ sys.exit(TEST_SKIPS["odd_worldsize"].exit_code)
151
+
152
+ return func(*args, **kwargs)
153
+
154
+ return wrapper
155
+
156
+
157
+ def require_n_gpus_for_nccl_backend(n, backend):
158
+ def decorator(func):
159
+ @wraps(func)
160
+ def wrapper(*args, **kwargs):
161
+ if backend == "nccl" and torch.cuda.device_count() < n:
162
+ sys.exit(TEST_SKIPS[f"multi-gpu-{n}"].exit_code)
163
+ else:
164
+ return func(*args, **kwargs)
165
+
166
+ return wrapper
167
+
168
+ return decorator
169
+
170
+
171
+ def import_transformers_or_skip():
172
+ def decorator(func):
173
+ @wraps(func)
174
+ def wrapper(*args, **kwargs):
175
+ try:
176
+ from transformers import ( # noqa: F401
177
+ AutoModelForMaskedLM,
178
+ BertConfig,
179
+ )
180
+
181
+ return func(*args, **kwargs)
182
+ except ImportError:
183
+ sys.exit(TEST_SKIPS["importerror"].exit_code)
184
+
185
+ return wrapper
186
+
187
+ return decorator
188
+
189
+
190
+ def at_least_x_gpu(x):
191
+ return torch.cuda.is_available() and torch.cuda.device_count() >= x
192
+
193
+
194
+ def skip_if_lt_x_gpu(x):
195
+ def decorator(func):
196
+ @wraps(func)
197
+ def wrapper(*args, **kwargs):
198
+ if torch.cuda.is_available() and torch.cuda.device_count() >= x:
199
+ return func(*args, **kwargs)
200
+ if TEST_HPU and torch.hpu.device_count() >= x:
201
+ return func(*args, **kwargs)
202
+ sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code)
203
+
204
+ return wrapper
205
+
206
+ return decorator
207
+
208
+
209
+ # This decorator helps avoiding initializing cuda while testing other backends
210
+ def nccl_skip_if_lt_x_gpu(backend, x):
211
+ def decorator(func):
212
+ @wraps(func)
213
+ def wrapper(*args, **kwargs):
214
+ if backend != "nccl":
215
+ return func(*args, **kwargs)
216
+ if torch.cuda.is_available() and torch.cuda.device_count() >= x:
217
+ return func(*args, **kwargs)
218
+ sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code)
219
+
220
+ return wrapper
221
+
222
+ return decorator
223
+
224
+
225
+ def verify_ddp_error_logged(model_DDP, err_substr):
226
+ # Verify error was logged in ddp_logging_data.
227
+ ddp_logging_data = model_DDP._get_ddp_logging_data()
228
+ assert "iteration" in ddp_logging_data
229
+ assert "has_error" in ddp_logging_data
230
+ assert "error" in ddp_logging_data
231
+ logging_err = ddp_logging_data["error"]
232
+ # Remove C++ stacktrace if needed.
233
+ actual = (
234
+ err_substr
235
+ if err_substr.find("\nException raised from ") == -1
236
+ else err_substr.split("\nException raised from ")[0]
237
+ )
238
+ assert (
239
+ actual in logging_err
240
+ ), f"Did not find expected {actual} in ddp logging data error: {logging_err}"
241
+
242
+
243
+ def with_nccl_blocking_wait(func):
244
+ """
245
+ Convenience decorator to set/unset TORCH_NCCL_BLOCKING_WAIT flag. Note that use of
246
+ this decorator will override the setting of TORCH_NCCL_ASYNC_ERROR_HANDLING for
247
+ the particular test. After the test, both TORCH_NCCL_BLOCKING_WAIT and
248
+ TORCH_NCCL_ASYNC_ERROR_HANDLING will be restored to their original values.
249
+ """
250
+
251
+ @wraps(func)
252
+ def wrapper(*args, **kwargs):
253
+ # Save and unset TORCH_NCCL_ASYNC_ERROR_HANDLING
254
+ try:
255
+ cached_nccl_async_error_handling: Union[str, None] = os.environ[
256
+ "TORCH_NCCL_ASYNC_ERROR_HANDLING"
257
+ ]
258
+ del os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"]
259
+ except KeyError:
260
+ # TORCH_NCCL_ASYNC_ERROR_HANDLING was unset
261
+ cached_nccl_async_error_handling = None
262
+
263
+ # Save val of TORCH_NCCL_BLOCKING_WAIT and set it.
264
+ try:
265
+ cached_nccl_blocking_wait: Union[str, None] = os.environ[
266
+ "TORCH_NCCL_BLOCKING_WAIT"
267
+ ]
268
+ except KeyError:
269
+ cached_nccl_blocking_wait = None
270
+ finally:
271
+ os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1"
272
+
273
+ try:
274
+ ret = func(*args, **kwargs)
275
+ return ret
276
+ finally:
277
+ # restore old values.
278
+ if cached_nccl_async_error_handling is not None:
279
+ os.environ[
280
+ "TORCH_NCCL_ASYNC_ERROR_HANDLING"
281
+ ] = cached_nccl_async_error_handling
282
+
283
+ if cached_nccl_blocking_wait is not None:
284
+ os.environ["TORCH_NCCL_BLOCKING_WAIT"] = cached_nccl_blocking_wait
285
+
286
+ return wrapper
287
+
288
+
289
+ def with_dist_debug_levels(levels):
290
+ """
291
+ Runs a test for each distributed debug level specified in levels.
292
+ """
293
+
294
+ def decorator(func):
295
+ @wraps(func)
296
+ def wrapper(*args, **kwargs):
297
+ old_level = os.environ.get("TORCH_DISTRIBUTED_DEBUG", None)
298
+ for level in levels:
299
+ os.environ["TORCH_DISTRIBUTED_DEBUG"] = level
300
+ c10d.set_debug_level_from_env()
301
+ ret = func(*args, **kwargs)
302
+ c10d.barrier()
303
+ if old_level is not None:
304
+ os.environ["TORCH_DISTRIBUTED_DEBUG"] = old_level
305
+ # Only returns test return for last test, but since these are
306
+ # unittests the return value is not really used and earlier tests
307
+ # would've raised had they failed.
308
+ return ret
309
+
310
+ return wrapper
311
+
312
+ return decorator
313
+
314
+
315
+ def requires_gloo():
316
+ return skip_but_pass_in_sandcastle_if(
317
+ not c10d.is_gloo_available(),
318
+ "c10d was not compiled with the Gloo backend",
319
+ )
320
+
321
+
322
+ def requires_nccl_version(version, msg):
323
+ if not c10d.is_nccl_available():
324
+ return skip_but_pass_in_sandcastle(
325
+ "c10d was not compiled with the NCCL backend",
326
+ )
327
+ else:
328
+ return skip_but_pass_in_sandcastle_if(
329
+ torch.cuda.nccl.version() < version,
330
+ f"Requires NCCL version greater than or equal to: {version}, found: {torch.cuda.nccl.version()}, reason: {msg}",
331
+ )
332
+
333
+
334
+ def requires_nccl():
335
+ return skip_but_pass_in_sandcastle_if(
336
+ not c10d.is_nccl_available(),
337
+ "c10d was not compiled with the NCCL backend",
338
+ )
339
+
340
+ def requires_ucc():
341
+ return skip_but_pass_in_sandcastle_if(
342
+ not c10d.is_ucc_available(),
343
+ "c10d was not compiled with the UCC backend",
344
+ )
345
+
346
+ def requires_mpi():
347
+ return skip_but_pass_in_sandcastle_if(
348
+ not c10d.is_mpi_available(),
349
+ "c10d was not compiled with the MPI backend",
350
+ )
351
+
352
+
353
+ def requires_multicast_support():
354
+ has_multicast_support = (
355
+ torch.cuda.is_available()
356
+ and _SymmetricMemory.has_multicast_support(DeviceType.CUDA, 0)
357
+ )
358
+ return skip_but_pass_in_sandcastle_if(
359
+ not has_multicast_support,
360
+ "multicast support is not available",
361
+ )
362
+
363
+
364
+ def skip_if_rocm_multiprocess(func):
365
+ """Skips a test for ROCm"""
366
+ func.skip_if_rocm_multiprocess = True
367
+
368
+ @wraps(func)
369
+ def wrapper(*args, **kwargs):
370
+ if not TEST_WITH_ROCM:
371
+ return func(*args, **kwargs)
372
+ sys.exit(TEST_SKIPS["skipIfRocm"].exit_code)
373
+
374
+ return wrapper
375
+
376
+
377
+ def skip_if_win32():
378
+ return skip_but_pass_in_sandcastle_if(
379
+ sys.platform == "win32",
380
+ "This unit test case is not supported on Windows platform",
381
+ )
382
+
383
+
384
+ def sm_is_or_higher_than(device: torch.device, major: int, minor: int) -> bool:
385
+ """
386
+ Returns True if the device's compute capability is (major, minor) or higher.
387
+ Error out if the device is not a CUDA device.
388
+ Returns False if device is a RoCM device.
389
+ """
390
+ if device.type != "cuda":
391
+ raise ValueError("sm_is_or_later() is only supported for CUDA devices")
392
+
393
+ if torch.version.hip is not None:
394
+ # ROCm devices may have different compute capability codes
395
+ return False
396
+
397
+ return torch.cuda.get_device_capability(device) >= (major, minor)
398
+
399
+
400
+ @retry_on_connect_failures
401
+ def create_tcp_store(
402
+ addr="localhost",
403
+ world_size=1,
404
+ is_master=True,
405
+ timeout=timedelta(minutes=5),
406
+ wait_for_workers=True,
407
+ jit_class=False,
408
+ use_libuv=True,
409
+ ):
410
+ """
411
+ Creates a TCP store. Retries if the chosen port is already in use.
412
+ """
413
+ port = find_free_port()
414
+ if jit_class:
415
+ timeout_millisecond = int(timeout / timedelta(milliseconds=1))
416
+ return torch.classes.dist_c10d.TCPStore(
417
+ addr, port, world_size, is_master, timeout_millisecond
418
+ )
419
+ else:
420
+ return c10d.TCPStore(
421
+ addr, port, world_size, is_master, wait_for_workers=wait_for_workers, use_libuv=use_libuv
422
+ )
423
+
424
+
425
+ if TEST_WITH_TSAN:
426
+ # TSAN runs much slower.
427
+ TIMEOUT_DEFAULT = 500
428
+ else:
429
+ TIMEOUT_DEFAULT = int(os.getenv('DISTRIBUTED_TESTS_DEFAULT_TIMEOUT', '300'))
430
+ TIMEOUT_OVERRIDE = {"test_ddp_uneven_inputs": 400}
431
+
432
+
433
+ # https://github.com/pytorch/pytorch/issues/75665
434
+ if TEST_WITH_ROCM:
435
+ TIMEOUT_OVERRIDE["test_join_kwargs"] = 200
436
+
437
+
438
+ def create_device(interface=None):
439
+ if sys.platform == "win32" or interface is None:
440
+ return c10d.ProcessGroupGloo.create_device(hostname="127.0.0.1")
441
+ else:
442
+ return c10d.ProcessGroupGloo.create_device(interface=interface)
443
+
444
+
445
+ def get_timeout(test_id) -> int:
446
+ return TIMEOUT_OVERRIDE.get(test_id.split(".")[-1], TIMEOUT_DEFAULT)
447
+
448
+
449
+ @contextmanager
450
+ def captured_output():
451
+ new_out, new_err = StringIO(), StringIO()
452
+ old_out, old_err = sys.stdout, sys.stderr
453
+ try:
454
+ sys.stdout, sys.stderr = new_out, new_err
455
+ yield sys.stdout, sys.stderr
456
+ finally:
457
+ sys.stdout, sys.stderr = old_out, old_err
458
+
459
+
460
+ def simple_sparse_reduce_tests(rank: int, world_size: int, num_inputs: int = 1):
461
+ """
462
+ Generate a number of basic test cases for sparse reduction.
463
+ These cover tensors with a varying number of sparse dimensions and a varying
464
+ number of dense dimensions. The only reduction operation we support is sum.
465
+ """
466
+
467
+ def generate(rank: int, world_size: int, sparse_dims: int = 1, dense_dims: int = 0):
468
+ # First sparse dimension is [0..rank].
469
+ # Subsequent dimensions are always 0, so we know there is
470
+ # a non-empty intersection between any two sparse tensors.
471
+ indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1))
472
+ shape = [world_size] + [2 for _ in range(dense_dims)]
473
+ for _ in range(sparse_dims - 1):
474
+ indices = torch.cat((indices, torch.zeros(1, rank + 1)))
475
+ shape.append(world_size)
476
+ values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)])
477
+ return torch.sparse_coo_tensor(indices, values, shape)
478
+
479
+ def compute_sum(fn, world_size: int):
480
+ return reduce(
481
+ operator.add, [fn(rank, world_size) for rank in range(world_size)]
482
+ )
483
+
484
+ return [
485
+ (
486
+ [
487
+ fn(num_inputs * rank + i, num_inputs * world_size)
488
+ for i in range(num_inputs)
489
+ ],
490
+ [compute_sum(fn, num_inputs * world_size) for i in range(num_inputs)],
491
+ )
492
+ for fn in [
493
+ partial(generate, sparse_dims=1),
494
+ partial(generate, sparse_dims=2),
495
+ partial(generate, sparse_dims=3),
496
+ partial(generate, dense_dims=1),
497
+ partial(generate, dense_dims=2),
498
+ partial(generate, dense_dims=3),
499
+ ]
500
+ ]
501
+
502
+
503
+ # HELPER FOR MULTIGPU TESTS
504
+ def init_multigpu_helper(world_size: int, backend: str):
505
+ """Multigpu tests are designed to simulate the multi nodes with multi
506
+ GPUs on each node. Nccl backend requires equal #GPUs in each process.
507
+ On a single node, all visible GPUs are evenly
508
+ divided to subsets, each process only uses a subset.
509
+ """
510
+ nGPUs = torch.cuda.device_count()
511
+ if TEST_HPU:
512
+ nGPUs = torch.hpu.device_count()
513
+
514
+ visible_devices = range(nGPUs)
515
+
516
+ # If rank is less than or equal to number of available GPU's
517
+ # then each rank can be mapped to corresponding GPU.
518
+ nGPUs_per_process = 1
519
+ if world_size > nGPUs:
520
+ nGPUs_per_process = nGPUs // world_size
521
+ rank_to_GPU = {
522
+ i: list(visible_devices[i * nGPUs_per_process : (i + 1) * nGPUs_per_process])
523
+ for i in range(world_size)
524
+ }
525
+ return rank_to_GPU
526
+
527
+
528
+ tmp_dir: Optional[tempfile.TemporaryDirectory] = None
529
+
530
+
531
+ def initialize_temp_directories(init_method: Optional[str] = None) -> None:
532
+ global tmp_dir
533
+ tmp_dir = tempfile.TemporaryDirectory()
534
+ os.environ["TEMP_DIR"] = tmp_dir.name
535
+ os.mkdir(os.path.join(tmp_dir.name, "barrier"))
536
+ os.mkdir(os.path.join(tmp_dir.name, "test_dir"))
537
+ init_dir_path = os.path.join(tmp_dir.name, "init_dir")
538
+ os.mkdir(init_dir_path)
539
+ # Set init method if specified.
540
+ if init_method is not None:
541
+ os.environ["INIT_METHOD"] = init_method
542
+ else:
543
+ os.environ["INIT_METHOD"] = FILE_SCHEMA + os.path.join(
544
+ init_dir_path, "shared_init_file"
545
+ )
546
+
547
+
548
+ def cleanup_temp_dir() -> None:
549
+ if tmp_dir is not None:
550
+ tmp_dir.cleanup()
551
+
552
+
553
+ # Most tests operate with this worldsize
554
+ DEFAULT_WORLD_SIZE = 4
555
+
556
+ # [How does MultiProcessTestCase work?]
557
+ # Each MultiProcessTestCase instance uses 1 + `world_size()` processes, by
558
+ # default `world_size()` returns 4. Let's take `test_rpc_spawn.py` as an
559
+ # example which inherits from this class. Its `Setup()` methods calls into
560
+ # `MultiProcessTestCase._spawn_processes()` which spawns `world_size()`
561
+ # subprocesses. During the spawn, the main process passes the test name to
562
+ # subprocesses, and the name is acquired from self.id(). The subprocesses
563
+ # then use the provided test function name to retrieve the function attribute
564
+ # from the test instance and run it. The main process simply waits for all
565
+ # subprocesses to join.
566
+
567
+
568
+ class MultiProcessTestCase(TestCase):
569
+ MAIN_PROCESS_RANK = -1
570
+ # This exit code is used to indicate that the test code had an error and
571
+ # exited abnormally. There are certain tests that might use sys.exit() to
572
+ # simulate failures and in those cases, we can't have an exit code of 0,
573
+ # but we still want to ensure we didn't run into any other errors.
574
+ TEST_ERROR_EXIT_CODE = 10
575
+
576
+ # do not early terminate for distributed tests.
577
+ def _should_stop_test_suite(self) -> bool:
578
+ return False
579
+
580
+ # Many test cases init a process group but do not destroy it. This property
581
+ # determines whether this base test class should call
582
+ # `destroy_process_group` on behalf of the test. Its value is customizable
583
+ # by derived TestCase's but it is a pan-TestCase value (cannot be customized
584
+ # for each test).
585
+ @property
586
+ def destroy_pg_upon_exit(self) -> bool:
587
+ return True
588
+
589
+ @property
590
+ def world_size(self) -> int:
591
+ return DEFAULT_WORLD_SIZE
592
+
593
+ def join_or_run(self, fn):
594
+ @wraps(fn)
595
+ def wrapper(self):
596
+ if self.rank == self.MAIN_PROCESS_RANK:
597
+ self._join_processes(fn)
598
+ else:
599
+ fn()
600
+
601
+ return types.MethodType(wrapper, self)
602
+
603
+ # The main process spawns N subprocesses that run the test.
604
+ # Constructor patches current instance test method to
605
+ # assume the role of the main process and join its subprocesses,
606
+ # or run the underlying test function.
607
+ def __init__(self, method_name: str = "runTest", methodName: str = "runTest") -> None:
608
+ # methodName is the correct naming in unittest and testslide uses keyword arguments.
609
+ # So we need to use both to 1) not break BC and, 2) support testslide.
610
+ if methodName != "runTest":
611
+ method_name = methodName
612
+ super().__init__(method_name)
613
+ try:
614
+ fn = getattr(self, method_name)
615
+ setattr(self, method_name, self.join_or_run(fn))
616
+ except AttributeError as e:
617
+ if methodName != 'runTest':
618
+ # we allow instantiation with no explicit method name
619
+ # but not an *incorrect* or missing method name
620
+ raise ValueError(f"no such test method in {self.__class__}: {methodName}") from e
621
+
622
+ def setUp(self) -> None:
623
+ super().setUp()
624
+ self.skip_return_code_checks = [] # type: ignore[var-annotated]
625
+ self.processes = [] # type: ignore[var-annotated]
626
+ self.rank = self.MAIN_PROCESS_RANK
627
+ self.file_name = tempfile.NamedTemporaryFile(delete=False).name
628
+ # pid to pipe consisting of error message from process.
629
+ self.pid_to_pipe = {} # type: ignore[var-annotated]
630
+
631
+ def tearDown(self) -> None:
632
+ super().tearDown()
633
+ for p in self.processes:
634
+ p.terminate()
635
+ # Each Process instance holds a few open file descriptors. The unittest
636
+ # runner creates a new TestCase instance for each test method and keeps
637
+ # it alive until the end of the entire suite. We must thus reset the
638
+ # processes to prevent an effective file descriptor leak.
639
+ self.processes = []
640
+
641
+ def _current_test_name(self) -> str:
642
+ # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
643
+ return self.id().split(".")[-1]
644
+
645
+ def _start_processes(self, proc) -> None:
646
+ self.processes = []
647
+ for rank in range(int(self.world_size)):
648
+ parent_conn, child_conn = torch.multiprocessing.Pipe()
649
+ process = proc(
650
+ target=self.__class__._run,
651
+ name="process " + str(rank),
652
+ args=(rank, self._current_test_name(), self.file_name, child_conn),
653
+ kwargs={
654
+ "fake_pg": getattr(self, "fake_pg", False),
655
+ }
656
+ )
657
+ process.start()
658
+ logger.info("Started process %s with pid %s", rank, process.pid)
659
+ self.pid_to_pipe[process.pid] = parent_conn
660
+ self.processes.append(process)
661
+
662
+ def _spawn_processes(self) -> None:
663
+ proc = torch.multiprocessing.get_context("spawn").Process
664
+ self._start_processes(proc)
665
+
666
+ class Event(Enum):
667
+ GET_TRACEBACK = 1
668
+
669
+ @staticmethod
670
+ def _event_listener(parent_pipe, signal_pipe, rank: int):
671
+ logger.info("Starting event listener thread for rank %s", rank)
672
+ while True:
673
+ ready_pipes = multiprocessing.connection.wait([parent_pipe, signal_pipe])
674
+
675
+ if parent_pipe in ready_pipes:
676
+
677
+ if parent_pipe.closed:
678
+ logger.info(
679
+ "Pipe closed for process %s, stopping event listener thread", rank
680
+ )
681
+ return
682
+
683
+ event = parent_pipe.recv()
684
+ logger.info("Received event %s on process %s", event, rank)
685
+
686
+ if event == MultiProcessTestCase.Event.GET_TRACEBACK:
687
+ # Return traceback to the parent process.
688
+ with tempfile.NamedTemporaryFile(mode="r+") as tmp_file:
689
+ faulthandler.dump_traceback(tmp_file)
690
+ # Flush buffers and seek to read from the beginning
691
+ tmp_file.flush()
692
+ tmp_file.seek(0)
693
+ parent_pipe.send(tmp_file.read())
694
+
695
+ logger.info("Process %s sent traceback", rank)
696
+
697
+ if signal_pipe in ready_pipes:
698
+ return
699
+
700
+ @classmethod
701
+ def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe, **kwargs) -> None:
702
+ self = cls(test_name)
703
+ self.rank = rank
704
+ self.file_name = file_name
705
+ self.run_test(test_name, parent_pipe)
706
+
707
+ def run_test(self, test_name: str, parent_pipe) -> None:
708
+ # Start event listener thread.
709
+ signal_recv_pipe, signal_send_pipe = torch.multiprocessing.Pipe(duplex=False)
710
+ event_listener_thread = threading.Thread(
711
+ target=MultiProcessTestCase._event_listener,
712
+ args=(parent_pipe, signal_recv_pipe, self.rank),
713
+ daemon=True,
714
+ )
715
+ event_listener_thread.start()
716
+ if sys.platform != "win32" and sys.platform != "darwin":
717
+ # Register signal handler to dump stack traces on FATALs.
718
+ # Windows and MacOS do not support the signal handlers.
719
+ torch._C._set_print_stack_traces_on_fatal_signal(True)
720
+ # Show full C++ stacktraces when a Python error originating from C++ is raised.
721
+ os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1"
722
+
723
+ # self.id() == e.g. '__main__.TestDistributed.test_get_rank'
724
+ # We're retrieving a corresponding test and executing it.
725
+ try:
726
+ getattr(self, test_name)()
727
+ except unittest.SkipTest as se:
728
+ logger.info(
729
+ "Process %s skipping test %s for following reason: %s", self.rank, test_name, str(se)
730
+ )
731
+ sys.exit(TEST_SKIPS["generic"].exit_code)
732
+ except Exception:
733
+ logger.error(
734
+ "Caught exception: \n%s exiting "
735
+ "process %s with exit code: %s",
736
+ traceback.format_exc(), self.rank, MultiProcessTestCase.TEST_ERROR_EXIT_CODE
737
+ )
738
+ # Send error to parent process.
739
+ parent_pipe.send(traceback.format_exc())
740
+ sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE)
741
+ finally:
742
+ if signal_send_pipe is not None:
743
+ signal_send_pipe.send(None)
744
+
745
+ assert event_listener_thread is not None
746
+ event_listener_thread.join()
747
+ # Close pipe after done with test.
748
+ parent_pipe.close()
749
+
750
+ if self.destroy_pg_upon_exit:
751
+ try:
752
+ # Some tests do destroy the pgs, and destroy can't be called twice.
753
+ # This avoids spewing warnings about improperly shutting down.
754
+ c10d.destroy_process_group()
755
+ except (AssertionError, ValueError):
756
+ pass
757
+
758
+ def _get_timedout_process_traceback(self) -> None:
759
+ pipes = []
760
+ for i, process in enumerate(self.processes):
761
+ if process.exitcode is None:
762
+ pipe = self.pid_to_pipe[process.pid]
763
+ try:
764
+ pipe.send(MultiProcessTestCase.Event.GET_TRACEBACK)
765
+ pipes.append((i, pipe))
766
+ except ConnectionError as e:
767
+ logger.error(
768
+ "Encountered error while trying to get traceback for process %s: %s", i, e
769
+ )
770
+
771
+ # Wait for results.
772
+ for rank, pipe in pipes:
773
+ try:
774
+ # Wait for traceback
775
+ if pipe.poll(5):
776
+ if pipe.closed:
777
+ logger.info(
778
+ "Pipe closed for process %s, cannot retrieve traceback", rank
779
+ )
780
+ continue
781
+
782
+ traceback = pipe.recv()
783
+ logger.error(
784
+ "Process %s timed out with traceback: \n\n%s", rank, traceback
785
+ )
786
+ else:
787
+ logger.error(
788
+ "Could not retrieve traceback for timed out process: %s", rank
789
+ )
790
+ except ConnectionError as e:
791
+ logger.error(
792
+ "Encountered error while trying to get traceback for process %s: %s", rank, e
793
+ )
794
+
795
+ def _join_processes(self, fn) -> None:
796
+ timeout = get_timeout(self.id())
797
+ start_time = time.time()
798
+ subprocess_error = False
799
+ try:
800
+ while True:
801
+ # check to see if any subprocess exited with an error early.
802
+ for (i, p) in enumerate(self.processes):
803
+ # This is the exit code processes exit with if they
804
+ # encountered an exception.
805
+ if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE:
806
+ print(
807
+ f"Process {i} terminated with exit code {p.exitcode}, terminating remaining processes."
808
+ )
809
+ active_children = torch.multiprocessing.active_children()
810
+ for ac in active_children:
811
+ ac.terminate()
812
+ subprocess_error = True
813
+ break
814
+ if subprocess_error:
815
+ break
816
+ # All processes have joined cleanly if they all a valid exitcode
817
+ if all(p.exitcode is not None for p in self.processes):
818
+ break
819
+ # Check if we should time out the test. If so, we terminate each process.
820
+ elapsed = time.time() - start_time
821
+ if elapsed > timeout:
822
+ self._get_timedout_process_traceback()
823
+ print(
824
+ f"Timing out after {timeout} seconds and killing subprocesses."
825
+ )
826
+ for p in self.processes:
827
+ p.terminate()
828
+ break
829
+ # Sleep to avoid excessive busy polling.
830
+ time.sleep(0.1)
831
+
832
+ elapsed_time = time.time() - start_time
833
+
834
+ if fn in self.skip_return_code_checks:
835
+ self._check_no_test_errors(elapsed_time)
836
+ else:
837
+ self._check_return_codes(elapsed_time)
838
+ finally:
839
+ # Close all pipes
840
+ for pipe in self.pid_to_pipe.values():
841
+ pipe.close()
842
+
843
+ def _check_no_test_errors(self, elapsed_time) -> None:
844
+ """
845
+ Checks that we didn't have any errors thrown in the child processes.
846
+ """
847
+ for i, p in enumerate(self.processes):
848
+ if p.exitcode is None:
849
+ raise RuntimeError(
850
+ f"Process {i} timed out after {elapsed_time} seconds"
851
+ )
852
+ self.assertNotEqual(self.TEST_ERROR_EXIT_CODE, p.exitcode)
853
+
854
+ def _check_return_codes(self, elapsed_time) -> None:
855
+ """
856
+ Checks that the return codes of all spawned processes match, and skips
857
+ tests if they returned a return code indicating a skipping condition.
858
+ """
859
+ # If no processes are spawned, there is nothing to check.
860
+ if not self.processes:
861
+ logger.warning("Note: no subprocesses were spawned, test was likely skipped.")
862
+ return
863
+
864
+ first_process = self.processes[0]
865
+ # first, we check if there are errors in actual processes
866
+ # (via TEST_ERROR_EXIT CODE), and raise an exception for those.
867
+ # the reason we do this is to attempt to raise a more helpful error
868
+ # message than "Process x terminated/timed out"
869
+ # TODO: we should pipe the exception of the failed subprocess here.
870
+ # Currently, the actual exception is displayed as a logging output.
871
+ errored_processes = [
872
+ (i, p)
873
+ for i, p in enumerate(self.processes)
874
+ if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE
875
+ ]
876
+ if errored_processes:
877
+ error = ""
878
+ for i, process in errored_processes:
879
+ # Get error from pipe.
880
+ error_message = self.pid_to_pipe[process.pid].recv()
881
+ error += (
882
+ f"Process {i} exited with error code {MultiProcessTestCase.TEST_ERROR_EXIT_CODE} "
883
+ f"and exception:\n{error_message}\n"
884
+ )
885
+
886
+ raise RuntimeError(error)
887
+ # If no process exited uncleanly, we check for timeouts, and then ensure
888
+ # each process exited cleanly.
889
+ for i, p in enumerate(self.processes):
890
+ if p.exitcode is None:
891
+ raise RuntimeError(
892
+ f"Process {i} terminated or timed out after {elapsed_time} seconds"
893
+ )
894
+ self.assertEqual(
895
+ p.exitcode,
896
+ first_process.exitcode,
897
+ msg=f"Expect process {i} exit code to match Process 0 exit code of {first_process.exitcode}, but got {p.exitcode}",
898
+ )
899
+ for skip in TEST_SKIPS.values():
900
+ if first_process.exitcode == skip.exit_code:
901
+ if IS_SANDCASTLE:
902
+ # Don't use unittest.skip to skip the test on sandcastle
903
+ # since it creates tasks for skipped tests assuming there
904
+ # is some follow-up needed. Instead just "pass" the test
905
+ # with an appropriate message.
906
+ logger.info(
907
+ "Skipping %s on sandcastle for the following reason: %s", self.id(), skip.message
908
+ )
909
+ return
910
+ else:
911
+ raise unittest.SkipTest(skip.message)
912
+ self.assertEqual(
913
+ first_process.exitcode,
914
+ 0,
915
+ msg=f"Expected zero exit code but got {first_process.exitcode} for pid: {first_process.pid}",
916
+ )
917
+
918
+ @property
919
+ def is_master(self) -> bool:
920
+ return self.rank == 0
921
+
922
+ # Utility base class for distributed Multi Process Test cases
923
+ # This abstracts the PG creation and deletion, the backends are selected based
924
+ # on device type. The tests functions can be instantiated per device type using
925
+ # common_device_type.instantiate_device_type_tests
926
+ # other backends can add entry in backend() function
927
+ class DistributedTestBase(MultiProcessTestCase):
928
+
929
+ def setUp(self):
930
+ super().setUp()
931
+ self._spawn_processes()
932
+
933
+ def tearDown(self):
934
+ try:
935
+ os.remove(self.file_name)
936
+ except OSError:
937
+ pass
938
+
939
+ def backend(self, device) -> str:
940
+ if "cuda" in device:
941
+ return "nccl"
942
+ elif "hpu" in device : # intel gaudi
943
+ return "hccl"
944
+ else :
945
+ return "gloo"
946
+
947
+ def create_pg(self, device):
948
+ num_visible_devices = torch.get_device_module(device).device_count()
949
+ store = torch.distributed.FileStore(self.file_name, num_visible_devices)
950
+ torch.distributed.init_process_group(
951
+ backend=self.backend(device),
952
+ world_size=self.world_size,
953
+ rank=self.rank,
954
+ store=store
955
+ )
956
+ if "nccl" in self.backend(device):
957
+ torch.cuda.set_device(self.rank)
958
+ return torch.distributed.distributed_c10d._get_default_group()
959
+
960
+ def rank_to_device(self, device):
961
+ num_visible_devices = torch.get_device_module(device).device_count()
962
+ return {i: [i % num_visible_devices] for i in range(self.world_size)}
963
+
964
+ def run_subtests(
965
+ cls_inst,
966
+ subtest_config: Dict[str, List[Any]],
967
+ test_fn: Callable,
968
+ *test_args,
969
+ **test_kwargs: Any,
970
+ ):
971
+ """
972
+ Runs a test function given by ``test_fn`` as a subtest according to the
973
+ configurations specified by ``subtest_config``. This amortizes the
974
+ costly setup overhead (including process spawn and initializing the
975
+ process group) over the subtests.
976
+
977
+ Args:
978
+ subtest_config (Dict[str, List[Any]]): A mapping from subtest
979
+ keyword argument name to a list of its possible values.
980
+ test_fn (Callable): A callable that runs the actual test.
981
+ test_args: Positional arguments to pass to ``test_fn``.
982
+ test_kwargs: Keyword arguments to pass to ``test_fn``.
983
+ """
984
+ # Convert the config mapping to a list to have a fixed order
985
+ subtest_config_items: List[Tuple[str, List[Any]]] = list(subtest_config.items())
986
+ subtest_config_keys: List[str] = [item[0] for item in subtest_config_items]
987
+ subtest_config_values: List[List[Any]] = [item[1] for item in subtest_config_items]
988
+ for values in itertools.product(*subtest_config_values):
989
+ # Map keyword to chosen value
990
+ subtest_kwargs = dict(zip(subtest_config_keys, values))
991
+ with cls_inst.subTest(**subtest_kwargs):
992
+ torch._dynamo.reset()
993
+ test_fn(*test_args, **test_kwargs, **subtest_kwargs)
994
+ torch._dynamo.reset()
995
+ c10d.barrier()
996
+
997
+
998
+ # Cannot use functools.cache as it requires python 3.9
999
+ EFA_PROBE_RESULT = None
1000
+
1001
+
1002
+ def has_efa() -> bool:
1003
+ """
1004
+ If shell command `fi_info -p efa -t FI_EP_RDM` returns exit code 0 then we assume that the machine has
1005
+ Libfabric EFA interfaces and EFA software components installed,
1006
+ see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa-start.html.
1007
+ """
1008
+ global EFA_PROBE_RESULT
1009
+ if EFA_PROBE_RESULT is not None:
1010
+ return EFA_PROBE_RESULT
1011
+
1012
+ try:
1013
+ EFA_PROBE_RESULT = (
1014
+ subprocess.run(["fi_info", "-p", "efa", "-t", "FI_EP_RDM"], check=False).returncode == 0
1015
+ )
1016
+ except FileNotFoundError:
1017
+ EFA_PROBE_RESULT = False
1018
+ return EFA_PROBE_RESULT
1019
+
1020
+
1021
+ def tp_transports():
1022
+ """
1023
+ If the machine has Libfabric EFA interfaces and EFA software components installed it may cause
1024
+ 'RuntimeError: In operator() at tensorpipe/common/ibv.h:172 "": Operation not supported' if tensorpipe
1025
+ uses InfiniBand transport, so we exclude it from tensorpipe transports,
1026
+ see https://github.com/pytorch/pytorch/issues/73885 and https://github.com/pytorch/pytorch/issues/65022
1027
+ """
1028
+ return ["shm", "uv"] if has_efa() else None
1029
+
1030
+
1031
+ def spawn_threads_and_init_comms(
1032
+ func=None, timeout=TIMEOUT_DEFAULT, world_size=DEFAULT_WORLD_SIZE
1033
+ ):
1034
+ """
1035
+ Wrapper to use with a test method
1036
+ """
1037
+ if func is None:
1038
+ return partial(
1039
+ spawn_threads_and_init_comms, timeout=timeout, world_size=world_size
1040
+ )
1041
+
1042
+
1043
+ def _run_test_method_with_multi_threads(world_size, callback):
1044
+ world = _install_threaded_pg()
1045
+ global_store = c10d.HashStore()
1046
+
1047
+ def world_is_valid():
1048
+ return world == c10d.distributed_c10d._world
1049
+
1050
+ def worker(rank, world_pg, store):
1051
+ c10d.init_process_group(
1052
+ backend="threaded", rank=rank, world_size=world_size, store=store
1053
+ )
1054
+ try:
1055
+ callback()
1056
+ except BaseException as ex:
1057
+ # Exceptions are handled in MultiThreadedTestCase
1058
+ MultiThreadedTestCase.exception_queue.put((rank, sys.exc_info()))
1059
+ ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads
1060
+ finally:
1061
+ if world_is_valid():
1062
+ c10d.destroy_process_group()
1063
+
1064
+ threads = []
1065
+ for rank in range(world_size):
1066
+ t = threading.Thread(target=worker, args=(rank, world, global_store))
1067
+ t.start()
1068
+ threads.append(t)
1069
+
1070
+ return threads
1071
+
1072
+
1073
+ @wraps(func)
1074
+ def wrapper(self, *args, **kwargs):
1075
+ # TODO: get test name from kwargs
1076
+ torch._C._distributed_c10d._set_thread_isolation_mode(True)
1077
+ try:
1078
+ threads = _run_test_method_with_multi_threads(world_size, lambda: func(self, *args, **kwargs))
1079
+ # join and error handling
1080
+ MultiThreadedTestCase._join_threads(threads, func)
1081
+ finally:
1082
+ torch._C._distributed_c10d._set_thread_isolation_mode(False)
1083
+
1084
+ return wrapper
1085
+
1086
+
1087
+ class MultiThreadedTestCase(TestCase):
1088
+ """
1089
+ Test runner that runs all tests with the in-proc process group using
1090
+ multiple threads with the threaded process group.
1091
+
1092
+ Each test spawns world_size threads and run the test method in each thread.
1093
+
1094
+ Difference from regular MultiProcess test runner:
1095
+ Must explicitly defines SetUp and call self._spawn_threads() to run the tests.
1096
+ Cannot use setUp / tearDown (must use perThreadSetup / perThreadShutdown)
1097
+ to set up / tear down each thread when running each test.
1098
+ No global state possible
1099
+ How bad of a limitation is this?
1100
+ """
1101
+ exception_queue = queue.Queue()
1102
+
1103
+ MAIN_THREAD_RANK = -1
1104
+
1105
+ def join_or_run(self, fn):
1106
+ @wraps(fn)
1107
+ def wrapper(self):
1108
+ if self.rank == self.MAIN_THREAD_RANK:
1109
+ self._join_threads(self.threads, fn)
1110
+ else:
1111
+ fn()
1112
+
1113
+ return types.MethodType(wrapper, self)
1114
+
1115
+ def __init__(self, method_name: str = "runTest", methodName: str = "runTest") -> None:
1116
+ # methodName is the correct naming in unittest and testslide uses keyword arguments.
1117
+ # So we need to use both to 1) not break BC and, 2) support testslide.
1118
+ if methodName != "runTest":
1119
+ method_name = methodName
1120
+ super().__init__(method_name)
1121
+ try:
1122
+ fn = getattr(self, method_name)
1123
+ setattr(self, method_name, self.join_or_run(fn))
1124
+ except AttributeError as e:
1125
+ if methodName != 'runTest':
1126
+ # we allow instantiation with no explicit method name
1127
+ # but not an *incorrect* or missing method name
1128
+ raise ValueError(f"no such test method in {self.__class__}: {methodName}") from e
1129
+
1130
+ def perThreadSetUp(self):
1131
+ # super().setUp() # TestCase.setUp() calls torch.manual_seed()
1132
+ pass
1133
+
1134
+ def perThreadTearDown(self):
1135
+ pass
1136
+
1137
+ def setUp(self) -> None:
1138
+ """
1139
+ setUp only set up things in the main thread, if you want to configure things
1140
+ in the spawned threads, use perThreadSetUp
1141
+ """
1142
+ super().setUp()
1143
+ self.rank = self.MAIN_THREAD_RANK
1144
+ self.threads = []
1145
+ # Show full C++ stacktraces when a Python error originating from C++ is raised.
1146
+ os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1"
1147
+
1148
+ def tearDown(self):
1149
+ """
1150
+ tearDown only set up things in the main thread, if you want to configure things
1151
+ in the spawned threads, use perThreadTearDown
1152
+ """
1153
+ super().tearDown()
1154
+ self.threads = []
1155
+
1156
+ def _spawn_threads(self):
1157
+ """
1158
+ class method to spawn threads and run test, use this method in the SetUp of your TestCase
1159
+ """
1160
+ torch._C._distributed_c10d._set_thread_isolation_mode(True)
1161
+ test_name = self._current_test_name
1162
+ # for each test case, we need to create thread local world, and a global store
1163
+ world = _install_threaded_pg()
1164
+ self.__class__.global_store = c10d.HashStore()
1165
+
1166
+ def world_is_valid():
1167
+ return world == c10d.distributed_c10d._world
1168
+
1169
+ if not world_is_valid():
1170
+ raise RuntimeError("Invalid world")
1171
+
1172
+ for rank in range(self.world_size):
1173
+ t = threading.Thread(target=self.__class__._run, args=(test_name, rank, self.world_size))
1174
+ t.start()
1175
+ self.threads.append(t)
1176
+
1177
+ @classmethod
1178
+ def _run(cls, test_name, rank, world_size, **kwargs):
1179
+ self = cls(test_name)
1180
+ self.rank = rank
1181
+
1182
+ # precision/rel_tol is a thread-local setting since it may be overridden per test, need to make
1183
+ # every thread have the same value. This would be relevant when we use op db tests, where it
1184
+ # needs those states to be set i.e. using instantiate_device_type_tests()
1185
+ # TODO: figure out a better way to do this
1186
+ if hasattr(self, "_tls"):
1187
+ self._tls = threading.local()
1188
+ self._tls.precision = TestCase._precision
1189
+ self._tls.rel_tol = TestCase._rel_tol
1190
+
1191
+ self.run_test_with_threaded_pg(test_name, rank, world_size)
1192
+
1193
+ def run_test_with_threaded_pg(self, test_name, rank, world_size):
1194
+ """
1195
+ Run the current test associated with `test_name` using the threaded process group.
1196
+ """
1197
+ c10d.init_process_group(
1198
+ backend="threaded", rank=rank, world_size=world_size, store=self.__class__.global_store
1199
+ )
1200
+ self.perThreadSetUp()
1201
+
1202
+ try:
1203
+ getattr(self, test_name)()
1204
+ except BaseException as ex:
1205
+ self.exception_queue.put((rank, sys.exc_info()))
1206
+ ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads
1207
+ finally:
1208
+ c10d.destroy_process_group()
1209
+ self.perThreadTearDown()
1210
+
1211
+
1212
+ @classmethod
1213
+ def _join_threads(cls, threads, fn):
1214
+ timeout = TIMEOUT_DEFAULT
1215
+ try:
1216
+ for idx, thread in enumerate(threads):
1217
+ thread.join(max(0, timeout))
1218
+ if thread.is_alive():
1219
+ MultiThreadedTestCase.exception_queue.put(
1220
+ (
1221
+ idx,
1222
+ (
1223
+ TimeoutError,
1224
+ TimeoutError(
1225
+ f"Rank failed to join in under {timeout} seconds"
1226
+ ),
1227
+ None,
1228
+ ),
1229
+ )
1230
+ )
1231
+ ProcessLocalGroup.reset()
1232
+ failed_ranks = []
1233
+ while not cls.exception_queue.empty():
1234
+ failure = cls.exception_queue.get()
1235
+ failed_ranks.append(failure)
1236
+ finally:
1237
+ _uninstall_threaded_pg()
1238
+ torch._C._distributed_c10d._set_thread_isolation_mode(False)
1239
+
1240
+ cls._check_return_codes(failed_ranks, timeout, fn)
1241
+
1242
+ @classmethod
1243
+ def _check_return_codes(cls, failed_ranks, timeout, fn):
1244
+ # Print based on exceptions raised from threads
1245
+ # SkipTest: print info for each thread
1246
+ # TimeoutError: raise RuntimeError for any timed out thread
1247
+ # Normal Exception: print error for each thread that raises exception
1248
+ # and raise a RuntimeError
1249
+ error_msg = ""
1250
+ skip_code = -1
1251
+ for rank, exc_info in failed_ranks:
1252
+ exc = exc_info[1]
1253
+ if isinstance(exc, unittest.SkipTest):
1254
+ logger.info(
1255
+ "Thread %s skipping test %s for following reason: %s", rank, fn, str(exc)
1256
+ )
1257
+ if skip_code < 0:
1258
+ skip_code = TEST_SKIPS["generic"].exit_code
1259
+ elif isinstance(exc, TimeoutError):
1260
+ msg = f"Thread {rank} terminated or timed out after {timeout} seconds\n"
1261
+ logger.error(msg)
1262
+ raise RuntimeError(msg)
1263
+ elif isinstance(exc, Exception):
1264
+ msg = "".join(traceback.format_exception(*exc_info))
1265
+ logger.error(
1266
+ "Caught exception: \n%s exiting thread %s", msg, rank
1267
+ )
1268
+ error_msg += (
1269
+ f"Thread {rank} exited with exception:\n{msg}\n"
1270
+ )
1271
+ elif isinstance(exc, SystemExit):
1272
+ if type(exc.code) == int and skip_code < 0:
1273
+ skip_code = exc.code
1274
+
1275
+ # check exceptions
1276
+ if len(error_msg) > 0:
1277
+ raise RuntimeError(error_msg)
1278
+ # check skip
1279
+ if skip_code > 0:
1280
+ for skip in TEST_SKIPS.values():
1281
+ if skip_code == skip.exit_code:
1282
+ if IS_SANDCASTLE:
1283
+ # "pass" the test with an appropriate message.
1284
+ logger.info(
1285
+ "Skipping %s on sandcastle for the following reason: %s", fn, skip.message
1286
+ )
1287
+ return
1288
+ else:
1289
+ raise unittest.SkipTest(skip.message)
1290
+
1291
+ @property
1292
+ def world_size(self) -> int:
1293
+ return DEFAULT_WORLD_SIZE
1294
+
1295
+ @property
1296
+ def _current_test_name(self) -> str:
1297
+ # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
1298
+ return self.id().split(".")[-1]
1299
+
1300
+ def assertEqualOnRank(self, x, y, msg=None, *, rank=0):
1301
+ """
1302
+ The reason why we have this util function instead of
1303
+ self.assertEqual is all threads are sharing one CPU RNG
1304
+ so the assertion result is only reliable on rank 0
1305
+ """
1306
+ if self.rank == rank:
1307
+ self.assertEqual(x, y, msg)
1308
+
1309
+ def assertNotEqualOnRank(self, x, y, msg=None, *, rank=0):
1310
+ if self.rank == rank:
1311
+ self.assertNotEqual(x, y)
1312
+
1313
+
1314
+ class SaveForwardInputsModule(nn.Module):
1315
+ def __init__(
1316
+ self,
1317
+ forward_inputs: Dict[nn.Module, torch.Tensor],
1318
+ cast_forward_inputs: bool,
1319
+ ) -> None:
1320
+ super().__init__()
1321
+ self.l = nn.Linear(100, 100)
1322
+ self.forward_inputs = forward_inputs
1323
+ self.cast_forward_inputs = cast_forward_inputs
1324
+
1325
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1326
+ self.forward_inputs[self] = x
1327
+ return self.l(x.to(self.l.weight.dtype) if self.cast_forward_inputs else x)
1328
+
1329
+
1330
+ class SaveForwardInputsModel(nn.Module):
1331
+ def __init__(
1332
+ self,
1333
+ forward_inputs: Dict[nn.Module, torch.Tensor],
1334
+ cast_forward_inputs: bool,
1335
+ ) -> None:
1336
+ super().__init__()
1337
+ self.c1 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs)
1338
+ self.c2 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs)
1339
+ self.forward_inputs = forward_inputs
1340
+
1341
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1342
+ self.forward_inputs[self] = x
1343
+ return self.c2(self.c1(x))
1344
+
1345
+ @contextmanager
1346
+ def _dynamo_dist_per_rank_init(rank, world_size, init_pg=True, fake_pg=False):
1347
+ # To avoid multiple inheritance from _dynamo.test_case.TestCase and MultiProcessTestCase,
1348
+ # Just manually implement the most important part of the dynamo behavior to reset/clear.
1349
+ if not fake_pg:
1350
+ torch.cuda.set_device(rank)
1351
+ os.environ['MASTER_ADDR'] = 'localhost'
1352
+ os.environ['MASTER_PORT'] = '6789'
1353
+ if init_pg:
1354
+ if fake_pg:
1355
+ store = torch.testing._internal.distributed.fake_pg.FakeStore()
1356
+ c10d.init_process_group(
1357
+ backend="fake",
1358
+ world_size=world_size,
1359
+ rank=rank,
1360
+ store=store,
1361
+ )
1362
+ else:
1363
+ c10d.init_process_group("nccl", rank=rank, world_size=world_size)
1364
+ torch._dynamo.reset()
1365
+ torch._dynamo.utils.counters.clear()
1366
+ try:
1367
+ yield
1368
+ finally:
1369
+ torch._dynamo.reset()
1370
+ torch._dynamo.utils.counters.clear()
1371
+ if init_pg:
1372
+ c10d.destroy_process_group()
1373
+
1374
+
1375
+ class DynamoDistributedSingleProcTestCase(torch._dynamo.test_case.TestCase):
1376
+ """
1377
+ Test harness for single-process dynamo distributed tests,
1378
+ initializes dist process group.
1379
+
1380
+ Prefer this for simple tests, as it's easier to debug.
1381
+ """
1382
+
1383
+ @classmethod
1384
+ def setUpClass(cls):
1385
+ super().setUpClass()
1386
+ # _exit_stack is set up in TestCase
1387
+ cls._exit_stack.enter_context(
1388
+ patch.dict(
1389
+ os.environ,
1390
+ {
1391
+ "MASTER_ADDR": "localhost",
1392
+ "MASTER_PORT": "12355",
1393
+ },
1394
+ )
1395
+ )
1396
+ cls.rank = 0
1397
+ cls.device = f"cuda:{cls.rank}"
1398
+ cls.device_ids = None if "cuda" in cls.device else [cls.rank]
1399
+ c10d.init_process_group("nccl", rank=cls.rank, world_size=1)
1400
+
1401
+ @classmethod
1402
+ def tearDownClass(cls):
1403
+ c10d.destroy_process_group()
1404
+ super().tearDownClass()
1405
+
1406
+
1407
+ class DynamoDistributedMultiProcTestCase(MultiProcessTestCase):
1408
+ """
1409
+ Use this for tests that actually run on multiple GPUs.
1410
+
1411
+ Decorate tests with @skip_if_lt_x_gpu(ngpu)
1412
+
1413
+ Note: MultiProcTestCase spawns processes per test and is slow.
1414
+ Prefer MultiThreadedTestCase for most tests. Perhaps use this one
1415
+ sparingly for integration tests.
1416
+ """
1417
+ def setUp(self):
1418
+ super().setUp()
1419
+ self._spawn_processes()
1420
+
1421
+ def tearDown(self):
1422
+ super().tearDown()
1423
+ try:
1424
+ os.remove(self.file_name)
1425
+ except OSError:
1426
+ pass
1427
+
1428
+ @property
1429
+ def world_size(self) -> int:
1430
+ return torch.cuda.device_count()
1431
+
1432
+ @classmethod
1433
+ def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe, **kwargs) -> None:
1434
+ trace_log.addHandler(logging.NullHandler())
1435
+
1436
+ # The rest is copypasta from MultiProcessTestCase._run
1437
+ self = cls(test_name)
1438
+ self.rank = rank
1439
+ self.file_name = file_name
1440
+ self.run_test(test_name, parent_pipe)
1441
+
1442
+
1443
+ class MultiProcContinousTest(TestCase):
1444
+ # Class variables:
1445
+ # number of test processes
1446
+ world_size: int = 2
1447
+ # rank of the current process
1448
+ rank: int = -1 # unset state
1449
+ # Rendezvous file
1450
+ rdvz_file: Optional[str] = None
1451
+
1452
+ @classmethod
1453
+ @abc.abstractmethod
1454
+ def backend_str(cls) -> str:
1455
+ """
1456
+ ProcessGroup backend str.
1457
+ To be customized by sub test classes, e.g. "nccl".
1458
+ Here we raise error.
1459
+ """
1460
+ raise NotImplementedError("Please implement backend_str in your test class")
1461
+
1462
+ @classmethod
1463
+ def opts(cls, high_priority_stream=False):
1464
+ """
1465
+ ProcessGroup init options.
1466
+ To be customized by sub test classes, e.g. ProcessGroupNCCLOpTest
1467
+ Here we return None.
1468
+ """
1469
+ return None
1470
+
1471
+ @classmethod
1472
+ def setUpClass(cls):
1473
+ """
1474
+ Class-scope test fixture. Run once for entire test class, before any test starts.
1475
+ Set up the process group.
1476
+ """
1477
+ super().setUpClass()
1478
+ if not 0 <= cls.rank < cls.world_size:
1479
+ raise RuntimeError(
1480
+ "Rank must be set and in the range of 0 to world_size. "
1481
+ f"World size: {cls.world_size} Rank: {cls.rank}"
1482
+ )
1483
+ if cls.rdvz_file:
1484
+ store = c10d.FileStore(cls.rdvz_file, cls.world_size)
1485
+ else:
1486
+ # torchrun takes care of rendezvous
1487
+ store = None
1488
+ opts = cls.opts()
1489
+ backend = cls.backend_str()
1490
+ print(f"Testing {backend=}")
1491
+ # create nccl processgroup with opts
1492
+ c10d.init_process_group(
1493
+ backend=backend,
1494
+ world_size=cls.world_size,
1495
+ rank=cls.rank,
1496
+ store=store,
1497
+ pg_options=opts,
1498
+ )
1499
+ cls.pg = c10d.distributed_c10d._get_default_group()
1500
+ print(f"Rank {cls.rank} setup complete")
1501
+
1502
+ @classmethod
1503
+ def tearDownClass(cls):
1504
+ """
1505
+ Class-scope test fixture. Run once for entire test class, after all tests finish.
1506
+ Tear down the process group.
1507
+ """
1508
+ c10d.destroy_process_group()
1509
+ super().tearDownClass()
1510
+ # Clear up the rendezvous file
1511
+ if cls.rdvz_file:
1512
+ try:
1513
+ os.remove(cls.rdvz_file)
1514
+ except OSError:
1515
+ pass
1516
+ print(f"Rank {cls.rank} teardown complete")
1517
+
1518
+ @classmethod
1519
+ def run_rank(
1520
+ cls,
1521
+ rank: int,
1522
+ world_size: int,
1523
+ rdvz_file: Optional[str] = None,
1524
+ ):
1525
+ """
1526
+ This is an entry point for each rank to run the tests in `MultiProcContinousTest`.
1527
+ In this entry point, we set the class variables for the test class.
1528
+ Then we run all tests.
1529
+
1530
+ Note:
1531
+ - This helper only works for a subclass of `MultiProcContinousTest`.
1532
+
1533
+ Example:
1534
+ - See `test_c10d_ops_nccl.py`.
1535
+ """
1536
+ # set class variables for the test class
1537
+ cls.rank = rank
1538
+ cls.world_size = world_size
1539
+ cls.rdvz_file = rdvz_file
1540
+ # Launch tests via `common_utils` infra
1541
+ run_tests()
lib/python3.10/site-packages/torch/testing/_internal/common_dtype.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from typing import List
4
+
5
+ import torch
6
+
7
+
8
+ # Functions and classes for describing the dtypes a function supports
9
+ # NOTE: these helpers should correspond to PyTorch's C++ dispatch macros
10
+
11
+
12
+ # Verifies each given dtype is a torch.dtype
13
+ def _validate_dtypes(*dtypes):
14
+ for dtype in dtypes:
15
+ assert isinstance(dtype, torch.dtype)
16
+ return dtypes
17
+
18
+
19
+ # class for tuples corresponding to a PyTorch dispatch macro
20
+ class _dispatch_dtypes(tuple):
21
+ def __add__(self, other):
22
+ assert isinstance(other, tuple)
23
+ return _dispatch_dtypes(tuple.__add__(self, other))
24
+
25
+
26
+ _empty_types = _dispatch_dtypes(())
27
+
28
+
29
+ def empty_types():
30
+ return _empty_types
31
+
32
+
33
+ _floating_types = _dispatch_dtypes((torch.float32, torch.float64))
34
+
35
+
36
+ def floating_types():
37
+ return _floating_types
38
+
39
+
40
+ _floating_types_and_half = _floating_types + (torch.half,)
41
+
42
+
43
+ def floating_types_and_half():
44
+ return _floating_types_and_half
45
+
46
+
47
+ def floating_types_and(*dtypes):
48
+ return _floating_types + _validate_dtypes(*dtypes)
49
+
50
+
51
+ _floating_and_complex_types = _floating_types + (torch.cfloat, torch.cdouble)
52
+
53
+
54
+ def floating_and_complex_types():
55
+ return _floating_and_complex_types
56
+
57
+
58
+ def floating_and_complex_types_and(*dtypes):
59
+ return _floating_and_complex_types + _validate_dtypes(*dtypes)
60
+
61
+
62
+ _double_types = _dispatch_dtypes((torch.float64, torch.complex128))
63
+
64
+
65
+ def double_types():
66
+ return _double_types
67
+
68
+
69
+ # NB: Does not contain uint16/uint32/uint64 for BC reasons
70
+ _integral_types = _dispatch_dtypes(
71
+ (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
72
+ )
73
+
74
+
75
+ def integral_types():
76
+ return _integral_types
77
+
78
+
79
+ def integral_types_and(*dtypes):
80
+ return _integral_types + _validate_dtypes(*dtypes)
81
+
82
+
83
+ _all_types = _floating_types + _integral_types
84
+
85
+
86
+ def all_types():
87
+ return _all_types
88
+
89
+
90
+ def all_types_and(*dtypes):
91
+ return _all_types + _validate_dtypes(*dtypes)
92
+
93
+
94
+ _complex_types = _dispatch_dtypes((torch.cfloat, torch.cdouble))
95
+
96
+
97
+ def complex_types():
98
+ return _complex_types
99
+
100
+
101
+ def complex_types_and(*dtypes):
102
+ return _complex_types + _validate_dtypes(*dtypes)
103
+
104
+
105
+ _all_types_and_complex = _all_types + _complex_types
106
+
107
+
108
+ def all_types_and_complex():
109
+ return _all_types_and_complex
110
+
111
+
112
+ def all_types_and_complex_and(*dtypes):
113
+ return _all_types_and_complex + _validate_dtypes(*dtypes)
114
+
115
+
116
+ _all_types_and_half = _all_types + (torch.half,)
117
+
118
+
119
+ def all_types_and_half():
120
+ return _all_types_and_half
121
+
122
+
123
+ _float8_types = _dispatch_dtypes(
124
+ (
125
+ torch.float8_e4m3fn,
126
+ torch.float8_e4m3fnuz,
127
+ torch.float8_e5m2,
128
+ torch.float8_e5m2fnuz,
129
+ )
130
+ )
131
+
132
+
133
+ def float8_types():
134
+ return _float8_types
135
+
136
+
137
+ def float8_types_and(*dtypes):
138
+ return _float8_types + _validate_dtypes(*dtypes)
139
+
140
+
141
+ def all_types_complex_float8_and(*dtypes):
142
+ return _all_types + _complex_types + _float8_types + _validate_dtypes(*dtypes)
143
+
144
+
145
+ def custom_types(*dtypes):
146
+ """Create a list of arbitrary dtypes"""
147
+ return _empty_types + _validate_dtypes(*dtypes)
148
+
149
+
150
+ # The functions below are used for convenience in our test suite and thus have no corresponding C++ dispatch macro
151
+
152
+
153
+ # See AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS.
154
+ def get_all_dtypes(
155
+ include_half=True,
156
+ include_bfloat16=True,
157
+ include_bool=True,
158
+ include_complex=True,
159
+ include_complex32=False,
160
+ include_qint=False,
161
+ ) -> List[torch.dtype]:
162
+ dtypes = get_all_int_dtypes() + get_all_fp_dtypes(
163
+ include_half=include_half, include_bfloat16=include_bfloat16
164
+ )
165
+ if include_bool:
166
+ dtypes.append(torch.bool)
167
+ if include_complex:
168
+ dtypes += get_all_complex_dtypes(include_complex32)
169
+ if include_qint:
170
+ dtypes += get_all_qint_dtypes()
171
+ return dtypes
172
+
173
+
174
+ def get_all_math_dtypes(device) -> List[torch.dtype]:
175
+ return (
176
+ get_all_int_dtypes()
177
+ + get_all_fp_dtypes(
178
+ include_half=device.startswith("cuda"), include_bfloat16=False
179
+ )
180
+ + get_all_complex_dtypes()
181
+ )
182
+
183
+
184
+ def get_all_complex_dtypes(include_complex32=False) -> List[torch.dtype]:
185
+ return (
186
+ [torch.complex32, torch.complex64, torch.complex128]
187
+ if include_complex32
188
+ else [torch.complex64, torch.complex128]
189
+ )
190
+
191
+
192
+ def get_all_int_dtypes() -> List[torch.dtype]:
193
+ return [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]
194
+
195
+
196
+ def get_all_fp_dtypes(include_half=True, include_bfloat16=True) -> List[torch.dtype]:
197
+ dtypes = [torch.float32, torch.float64]
198
+ if include_half:
199
+ dtypes.append(torch.float16)
200
+ if include_bfloat16:
201
+ dtypes.append(torch.bfloat16)
202
+ return dtypes
203
+
204
+
205
+ def get_all_qint_dtypes() -> List[torch.dtype]:
206
+ return [torch.qint8, torch.quint8, torch.qint32, torch.quint4x2, torch.quint2x4]
207
+
208
+
209
+ float_to_corresponding_complex_type_map = {
210
+ torch.float16: torch.complex32,
211
+ torch.float32: torch.complex64,
212
+ torch.float64: torch.complex128,
213
+ }
lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py ADDED
@@ -0,0 +1,1582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # Owner(s): ["oncall: distributed"]
3
+
4
+ import contextlib
5
+ import os
6
+ import re
7
+ import sys
8
+ import time
9
+ import warnings
10
+ from abc import ABC, abstractmethod
11
+ from contextlib import nullcontext
12
+ from copy import deepcopy
13
+ from enum import auto, Enum
14
+ from functools import wraps
15
+ from typing import (
16
+ Any,
17
+ Callable,
18
+ cast,
19
+ Dict,
20
+ List,
21
+ no_type_check,
22
+ Optional,
23
+ Tuple,
24
+ Type,
25
+ Union,
26
+ )
27
+ from unittest import mock
28
+
29
+ import torch
30
+ import torch.distributed as dist
31
+ import torch.nn as nn
32
+ import torch.nn.functional as F
33
+ from torch.distributed._composable import checkpoint
34
+ from torch.distributed.device_mesh import DeviceMesh
35
+ from torch.distributed.fsdp import (
36
+ CPUOffload,
37
+ fully_shard,
38
+ FullyShardedDataParallel as FSDP,
39
+ )
40
+ from torch.distributed.fsdp._common_utils import TrainingState
41
+ from torch.distributed.fsdp._fully_shard._fsdp_param_group import (
42
+ FSDPParamGroup,
43
+ RegisterPostBackwardFunction,
44
+ )
45
+ from torch.distributed.fsdp._init_utils import NO_RESHARD_AFTER_FORWARD_STRATEGIES
46
+ from torch.distributed.fsdp.fully_sharded_data_parallel import (
47
+ BackwardPrefetch,
48
+ MixedPrecision,
49
+ ShardingStrategy,
50
+ )
51
+ from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
52
+ from torch.distributed.fsdp.wrap import always_wrap_policy, ModuleWrapPolicy, wrap
53
+ from torch.distributed.tensor import distribute_tensor, DTensor, Shard
54
+ from torch.distributed.tensor.parallel import (
55
+ ColwiseParallel,
56
+ parallelize_module,
57
+ RowwiseParallel,
58
+ SequenceParallel,
59
+ )
60
+ from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer
61
+ from torch.nn.parallel.distributed import DistributedDataParallel as DDP
62
+ from torch.testing._internal.common_distributed import (
63
+ MultiProcessTestCase,
64
+ MultiThreadedTestCase,
65
+ run_subtests,
66
+ TEST_SKIPS,
67
+ )
68
+ from torch.testing._internal.common_utils import (
69
+ FILE_SCHEMA,
70
+ get_cycles_per_ms,
71
+ TEST_CUDA,
72
+ TEST_HPU,
73
+ )
74
+ from torch.utils._triton import has_triton
75
+
76
+
77
+ DEVICE_COUNT = 4 # default
78
+
79
+ if TEST_CUDA:
80
+ DEVICE_TYPE = "cuda"
81
+ DISTRIBUTED_BACKEND = "nccl"
82
+ DEVICE_COUNT = torch.cuda.device_count()
83
+ elif TEST_HPU:
84
+ DEVICE_TYPE = "hpu:0"
85
+ DISTRIBUTED_BACKEND = "hccl"
86
+ else:
87
+ DEVICE_TYPE = "cpu"
88
+ DISTRIBUTED_BACKEND = "gloo"
89
+ DEVICE_COUNT = 1
90
+
91
+
92
+ class FSDPInitMode(Enum):
93
+ # No FSDP wrapping
94
+ NO_FSDP = auto()
95
+ # FSDP recursive wrapping
96
+ RECURSIVE = auto()
97
+ # TODO: FSDP non-recursive wrapping
98
+ # NONRECURSIVE = auto()
99
+
100
+
101
+ class DEVICEInitMode(Enum):
102
+ # Move model to DEVICE before passing to the FSDP constructor
103
+ DEVICE_BEFORE = auto()
104
+ # Move model to DEVICE after passing to the FSDP constructor
105
+ DEVICE_AFTER = auto()
106
+ # Keep on CPU
107
+ DEVICE_NEVER = auto()
108
+
109
+
110
+ class FSDPTestModel(nn.Module, ABC):
111
+ """This defines the interface expected from all models used commonly for
112
+ FSDP unit tests."""
113
+
114
+ @abstractmethod
115
+ def get_input(self, device) -> Tuple[torch.Tensor, ...]:
116
+ """Returns an input for the model as as tuple."""
117
+ ...
118
+
119
+ @abstractmethod
120
+ def get_loss(self, input, output) -> torch.Tensor:
121
+ """Returns the loss given the input and output."""
122
+ ...
123
+
124
+ @abstractmethod
125
+ def run_backward(self, loss) -> None:
126
+ """Runs the backward pass (e.g. including ``loss.backward()``)."""
127
+ ...
128
+
129
+ @staticmethod
130
+ @abstractmethod
131
+ def init(*args: Any, **kwargs: Any) -> nn.Module:
132
+ """Initializes an instance of this model."""
133
+ ...
134
+
135
+
136
+ def _assert_module_states(
137
+ model: nn.Module,
138
+ process_group: dist.ProcessGroup,
139
+ assert_fn: Callable,
140
+ ):
141
+ """
142
+ All-gathers module states across ranks and calls ``assert_fn`` on each pair
143
+ of corresponding states from rank 0 and a nonzero rank. For example, if
144
+ ``assert_fn`` is ``self.assertEqual()``, then this checks that all module
145
+ states are equal across ranks.
146
+ """
147
+ # Include names for debugging convenience
148
+ named_module_states = [
149
+ (param_name, param.detach().cpu())
150
+ for param_name, param in model.named_parameters()
151
+ ]
152
+ named_module_states += [
153
+ (buffer_name, buffer.detach().cpu())
154
+ for buffer_name, buffer in model.named_buffers()
155
+ ]
156
+ world_size = dist.get_world_size(process_group)
157
+ olist = [None for _ in range(world_size)]
158
+ dist.all_gather_object(olist, named_module_states, group=process_group)
159
+ rank0_states = olist[0]
160
+ assert rank0_states is not None # mypy
161
+ for state in olist[1:]:
162
+ assert state is not None # mypy
163
+ for (_, p1), (_, p2) in zip(rank0_states, state):
164
+ assert_fn(p1, p2)
165
+
166
+
167
+ def get_devtype():
168
+ return torch.device(DEVICE_TYPE)
169
+
170
+
171
+ def _zero_model(
172
+ model: nn.Module,
173
+ zero_buffers: bool = False,
174
+ summon_full=True,
175
+ ):
176
+ """Zeros the parameters and optionally buffers of ``model`` in place."""
177
+ ctx = FSDP.summon_full_params(model) if summon_full else nullcontext()
178
+ with ctx:
179
+ for param in model.parameters():
180
+ with torch.no_grad():
181
+ param.zero_()
182
+ if zero_buffers:
183
+ for buffer in model.buffers():
184
+ with torch.no_grad():
185
+ buffer.zero_()
186
+
187
+
188
+ def _get_state_dict(model, cpu_offload=False, half=False):
189
+ if not cpu_offload:
190
+ model = model.to(DEVICE_TYPE)
191
+ if half:
192
+ model.half()
193
+
194
+ return model.state_dict()
195
+
196
+
197
+ def subtest_name(test_name_mapping, *args):
198
+ return "_".join(
199
+ [test_name_mapping[str(s)] if s is not None else "none" for s in args]
200
+ )
201
+
202
+
203
+ def _broadcast_state_dict(rank, state_dict):
204
+ # For non-FSDP roots, some parts of the model state on rank 0 may
205
+ # not be on CPU, so we move everything to CPU to avoid issues like:
206
+ # https://github.com/pytorch/pytorch/issues/77113.
207
+ for param_name, param in state_dict.items():
208
+ if param.device != torch.device("cpu"):
209
+ state_dict[param_name] = param.cpu()
210
+
211
+ olist = [state_dict if rank == 0 else None]
212
+ dist.broadcast_object_list(olist)
213
+ state_dict = cast(Dict[str, torch.Tensor], olist[0])
214
+ # Ensure that the state is on DEVICE
215
+ for param_name in state_dict.keys():
216
+ state_dict[param_name] = state_dict[param_name].to(DEVICE_TYPE)
217
+ return state_dict
218
+
219
+
220
+ def get_full_params(model: nn.Module, recurse: bool = True):
221
+ """
222
+ Returns the full unsharded parameters of ``model``. Any FSDP-managed
223
+ parameters offloaded to CPU are moved to GPU in the returned list.
224
+
225
+ Args:
226
+ recurse (bool): If ``False``, only unshards the parameters immediate to
227
+ ``model``; if ``True``, recurses through the module hierarchy
228
+ rooted at ``model``.
229
+ """
230
+ with FSDP.summon_full_params(model, recurse=recurse):
231
+ return deepcopy(list(model.parameters()))
232
+
233
+
234
+ def _move_to_device(model: nn.Module, move_to_device: bool):
235
+ return model.to(DEVICE_TYPE) if move_to_device else model
236
+
237
+
238
+ def _maybe_wrap_fsdp(model: nn.Module, wrap_fsdp: bool, *args, **kwargs):
239
+ return model if not wrap_fsdp else FSDP(model, *args, **kwargs)
240
+
241
+
242
+ class DummyProcessGroup:
243
+ def __init__(self, rank: int, size: int):
244
+ self._rank = rank
245
+ self._size = size
246
+
247
+ def rank(self) -> int:
248
+ return self._rank
249
+
250
+ def size(self) -> int:
251
+ return self._size
252
+
253
+ def allreduce(self, *args, **kwargs):
254
+ dist_wait = mock.Mock()
255
+
256
+ def get_future():
257
+ future: torch.futures.Future = torch.futures.Future()
258
+ future.set_result(1)
259
+ return future
260
+
261
+ dist_wait.get_future = get_future
262
+ return dist_wait
263
+
264
+
265
+ class TransformerWithSharedParams(FSDPTestModel):
266
+ def __init__(
267
+ self,
268
+ group: dist.ProcessGroup,
269
+ device_init_mode: DEVICEInitMode,
270
+ add_bn: bool,
271
+ deterministic: bool,
272
+ ):
273
+ super().__init__()
274
+ self.rank = group.rank()
275
+ self.world_size = group.size()
276
+ if deterministic:
277
+ torch.manual_seed(0)
278
+ d_vocab = 23
279
+ d_model = 16
280
+
281
+ self.embed_tokens = nn.Embedding(d_vocab, d_model)
282
+ self.transformer = nn.Transformer(
283
+ d_model=d_model,
284
+ num_encoder_layers=2,
285
+ num_decoder_layers=2,
286
+ dim_feedforward=8,
287
+ dropout=0.1,
288
+ )
289
+ self.output_proj = nn.Linear(d_model, d_vocab)
290
+
291
+ # share the embedding and output projection weights
292
+ self.output_proj.weight = self.embed_tokens.weight
293
+ self.register_buffer(
294
+ "vocab_bias", self.embed_tokens.weight.new_ones((d_model,))
295
+ )
296
+ self.register_buffer(
297
+ "long_buffer",
298
+ torch.zeros_like(self.vocab_bias, dtype=torch.long), # type: ignore[arg-type]
299
+ ) # type: ignore[arg-type]
300
+
301
+ self.bs = 2
302
+ self.bn = torch.nn.BatchNorm1d(self.bs) if add_bn else torch.nn.Identity()
303
+ if device_init_mode == DEVICEInitMode.DEVICE_BEFORE:
304
+ self = self.to(DEVICE_TYPE)
305
+ if deterministic:
306
+ self.eval()
307
+
308
+ def get_input(self, device):
309
+ torch.manual_seed(1 + self.rank) # keep everything deterministic
310
+ src = torch.arange(12, device=device).view(6, self.bs) # T x B
311
+ tgt = torch.arange(self.bs * 4, device=device).view(4, self.bs) # T x B
312
+ return (src, tgt)
313
+
314
+ def forward(self, src_ids, tgt_ids):
315
+ src = self.embed_tokens(src_ids)
316
+ src = src + self.vocab_bias + self.long_buffer.type_as(src) # type: ignore[operator]
317
+ tgt = self.embed_tokens(tgt_ids)
318
+ tgt = self.bn(tgt)
319
+ x = self.transformer(src, tgt)
320
+ return self.output_proj(x)
321
+
322
+ def get_loss(self, input, output):
323
+ _, tgt = input
324
+ return nn.functional.cross_entropy(
325
+ output.view(-1, output.size(-1)), tgt.view(-1), reduction="sum"
326
+ )
327
+
328
+ def run_backward(self, loss):
329
+ loss.backward()
330
+
331
+ @staticmethod
332
+ def init(
333
+ group: dist.ProcessGroup,
334
+ fsdp_init_mode: FSDPInitMode,
335
+ device_init_mode: DEVICEInitMode,
336
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
337
+ deterministic: bool = False,
338
+ add_bn: bool = True,
339
+ ) -> Union[nn.Module, FSDP]:
340
+ """
341
+ Initializes a :class:`TransformerWithSharedParams` instance.
342
+
343
+ Args:
344
+ fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
345
+ any modules with FSDP. If ``RECURSIVE``, then wraps with
346
+ top-level FSDP. By default, the top-level FSDP uses the
347
+ ``ModuleWrapPolicy`` for encoder and decoder layers, but a
348
+ different auto wrap policy may be specified via
349
+ ``fsdp_kwargs``.
350
+ device_init_mode (DEVICEInitMode): Determines model movement to DEVICE.
351
+ fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
352
+ forwarded to the FSDP constructor.
353
+ deterministic (bool): Whether to make the model deterministic
354
+ across constructions.
355
+ add_bn (bool): Whether to include batch norm in the model.
356
+ """
357
+
358
+ if fsdp_kwargs is None:
359
+ fsdp_kwargs = {}
360
+ if fsdp_init_mode == FSDPInitMode.NO_FSDP:
361
+ if isinstance(group, tuple):
362
+ pg = group[0]
363
+ else:
364
+ pg = group
365
+ return TransformerWithSharedParams(
366
+ pg, device_init_mode, add_bn, deterministic
367
+ )
368
+ elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
369
+ # Default to the `ModuleWrapPolicy`
370
+ if "auto_wrap_policy" not in fsdp_kwargs:
371
+ auto_wrap_policy = ModuleWrapPolicy(
372
+ {
373
+ TransformerEncoderLayer,
374
+ TransformerDecoderLayer,
375
+ }
376
+ )
377
+ else:
378
+ auto_wrap_policy = fsdp_kwargs.pop("auto_wrap_policy")
379
+
380
+ if (
381
+ "sharding_strategy" in fsdp_kwargs
382
+ and fsdp_kwargs["sharding_strategy"]
383
+ in {ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2}
384
+ and not isinstance(group, tuple)
385
+ ):
386
+ fsdp_pg = None
387
+ else:
388
+ fsdp_pg = group
389
+
390
+ if isinstance(group, tuple):
391
+ tformer_pg = group[0]
392
+ else:
393
+ tformer_pg = group
394
+
395
+ m = TransformerWithSharedParams(
396
+ tformer_pg, device_init_mode, add_bn, deterministic
397
+ )
398
+ fsdp_model = FSDP(
399
+ m,
400
+ fsdp_pg,
401
+ auto_wrap_policy=auto_wrap_policy,
402
+ **fsdp_kwargs,
403
+ )
404
+ if device_init_mode == DEVICEInitMode.DEVICE_AFTER:
405
+ fsdp_model = fsdp_model.to(DEVICE_TYPE)
406
+ return fsdp_model
407
+ raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
408
+
409
+ def get_ignored_modules(self):
410
+ return [self.transformer]
411
+
412
+
413
+ class NestedWrappedModule(FSDPTestModel):
414
+ def __init__(
415
+ self,
416
+ group: dist.ProcessGroup,
417
+ wrap_fsdp: bool,
418
+ device_init_mode: DEVICEInitMode,
419
+ deterministic: bool,
420
+ **fsdp_kwargs,
421
+ ):
422
+ super().__init__()
423
+ self.rank = group.rank()
424
+ self.world_size = group.size()
425
+ move_to_device = device_init_mode == DEVICEInitMode.DEVICE_BEFORE
426
+
427
+ def _maybe_wrap(layer):
428
+ if wrap_fsdp:
429
+ return FSDP(layer, group, **fsdp_kwargs)
430
+ return layer
431
+
432
+ if deterministic:
433
+ torch.manual_seed(0)
434
+ self.module = nn.Sequential(
435
+ _move_to_device(nn.Linear(8, 4), move_to_device),
436
+ _maybe_wrap(
437
+ nn.Sequential(
438
+ _maybe_wrap(_move_to_device(nn.Linear(4, 16), move_to_device)),
439
+ _move_to_device(nn.Linear(16, 16), move_to_device),
440
+ ),
441
+ ),
442
+ _maybe_wrap(_move_to_device(nn.Linear(16, 4), move_to_device)),
443
+ _move_to_device(nn.Linear(4, 8), move_to_device),
444
+ )
445
+
446
+ def get_input(self, device):
447
+ torch.manual_seed(1 + self.rank) # keep everything deterministic
448
+ return (torch.rand(4, 8, device=device),)
449
+
450
+ def forward(self, x):
451
+ return self.module(x)
452
+
453
+ def get_loss(self, input, output):
454
+ loss = output.sum()
455
+ return loss
456
+
457
+ def run_backward(self, loss):
458
+ loss.backward()
459
+
460
+ @staticmethod
461
+ def init(
462
+ group: dist.ProcessGroup,
463
+ fsdp_init_mode: FSDPInitMode,
464
+ device_init_mode: DEVICEInitMode,
465
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
466
+ deterministic: bool = False,
467
+ ) -> nn.Module:
468
+ """
469
+ Initializes a :class:`NestedWrappedModule` instance.
470
+
471
+ Args:
472
+ fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
473
+ any modules with FSDP. If ``RECURSIVE``, then wraps some nested
474
+ modules with FSDP but not the top-level module. The model may
475
+ later be wrapped with a top-level FSDP external to this method
476
+ if desired.
477
+ device_init_mode (DEVICEInitMode): Determines model movement to DEVICE.
478
+ fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
479
+ forwarded to the FSDP constructor.
480
+ deterministic (bool): Whether to make the model deterministic
481
+ across constructions.
482
+ """
483
+ if fsdp_kwargs is None:
484
+ fsdp_kwargs = {}
485
+ if fsdp_init_mode == FSDPInitMode.NO_FSDP:
486
+ return NestedWrappedModule(
487
+ group,
488
+ wrap_fsdp=False,
489
+ device_init_mode=device_init_mode,
490
+ deterministic=deterministic,
491
+ )
492
+ elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
493
+ # Does not wrap with top-level FSDP
494
+ fsdp_model = NestedWrappedModule(
495
+ group,
496
+ wrap_fsdp=True,
497
+ device_init_mode=device_init_mode,
498
+ deterministic=deterministic,
499
+ **fsdp_kwargs,
500
+ )
501
+ if device_init_mode == DEVICEInitMode.DEVICE_AFTER:
502
+ fsdp_model = fsdp_model.to(DEVICE_TYPE)
503
+ return fsdp_model
504
+ raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
505
+
506
+
507
+ class AlwaysWrapNestedWrappedModule(NestedWrappedModule):
508
+ @staticmethod
509
+ def init(
510
+ group: dist.ProcessGroup,
511
+ fsdp_init_mode: FSDPInitMode,
512
+ device_init_mode: DEVICEInitMode,
513
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
514
+ deterministic: bool = False,
515
+ ):
516
+ """
517
+ Initializes a :class:`NestedWrappedModule` instance, but unlike
518
+ :meth:`NestedWrappedModule.init`, for the ``RECURSIVE`` init mode, this
519
+ wraps with top-level FSDP and the ``always_wrap_policy()`` auto wrap
520
+ policy.
521
+ """
522
+ model = super(
523
+ AlwaysWrapNestedWrappedModule, AlwaysWrapNestedWrappedModule
524
+ ).init(
525
+ group=group,
526
+ fsdp_init_mode=FSDPInitMode.NO_FSDP,
527
+ device_init_mode=device_init_mode,
528
+ fsdp_kwargs=fsdp_kwargs,
529
+ deterministic=deterministic,
530
+ )
531
+ if fsdp_init_mode == FSDPInitMode.NO_FSDP:
532
+ return model
533
+ elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
534
+ fsdp_kwargs = fsdp_kwargs or {}
535
+ fsdp_model = FSDP(model, auto_wrap_policy=always_wrap_policy, **fsdp_kwargs)
536
+ if device_init_mode == DEVICEInitMode.DEVICE_AFTER:
537
+ fsdp_model = fsdp_model.to(DEVICE_TYPE)
538
+ return fsdp_model
539
+
540
+
541
+ class NonUniformReqGradNWM(NestedWrappedModule):
542
+ def __init__(
543
+ self,
544
+ group: dist.ProcessGroup,
545
+ wrap_fsdp: bool,
546
+ device_init_mode: DEVICEInitMode,
547
+ deterministic: bool,
548
+ **fsdp_kwargs,
549
+ ):
550
+ super(NestedWrappedModule, self).__init__()
551
+ # This `__init__` only differs from `NestedWrappedModule.__init__` in that
552
+ # the last two `nn.Linear` layers are FSDP wrapped in a `nn.Sequential`
553
+ # container. This arrangement results in all elements of the last two parameters
554
+ # residing on a single rank. Freezing all parameters except those two allows us
555
+ # to verify that `ShardedGradScaler` accommodates situations where some ranks
556
+ # have no (non-zero sized) parameter shards.
557
+ self.rank = group.rank()
558
+ self.world_size = group.size()
559
+ move_to_device = device_init_mode == DEVICEInitMode.DEVICE_BEFORE
560
+
561
+ def _maybe_wrap(layer):
562
+ if wrap_fsdp:
563
+ return FSDP(layer, group, **fsdp_kwargs)
564
+ return layer
565
+
566
+ if deterministic:
567
+ torch.manual_seed(0)
568
+ self.module = nn.Sequential(
569
+ _move_to_device(nn.Linear(8, 4), move_to_device),
570
+ _maybe_wrap(
571
+ nn.Sequential(
572
+ _maybe_wrap(_move_to_device(nn.Linear(4, 16), move_to_device)),
573
+ _move_to_device(nn.Linear(16, 16), move_to_device),
574
+ ),
575
+ ),
576
+ _maybe_wrap(
577
+ nn.Sequential(
578
+ _move_to_device(nn.Linear(16, 4), move_to_device),
579
+ _move_to_device(nn.Linear(4, 8), move_to_device),
580
+ ),
581
+ ),
582
+ )
583
+
584
+ @staticmethod
585
+ def _set_nonuniform_req_grad(model, req_grad_mask) -> None:
586
+ for n, p in model.named_parameters():
587
+ if not re.match(req_grad_mask, n):
588
+ p.requires_grad_(False)
589
+
590
+ @staticmethod
591
+ def init(
592
+ group: dist.ProcessGroup,
593
+ fsdp_init_mode: FSDPInitMode,
594
+ device_init_mode: DEVICEInitMode,
595
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
596
+ deterministic: bool = False,
597
+ ):
598
+ """
599
+ Initializes a :class:`NestedWrappedModule` instance, but unlike
600
+ :meth:`NestedWrappedModule.init`, it wraps a second :class:`torch.nn.Sequential`
601
+ container to enable the desired non-uniform ``requires_grad``
602
+ ``use_orig_params=True`` tests. For both ``RECURSIVE`` and ``NO_FSDP``
603
+ init modes, freezes all parameters except the last two to validate
604
+ ``ShardedGradScaler`` support for ranks with no (non-zero sized) local shards in
605
+ FSDP ``use_orig_params=True`` mode.
606
+ """
607
+ # The parameters that should remain unfrozen are in `module.2.1`. The regex
608
+ # pattern below matches the relevant parameter names both with and without
609
+ # an interstitial FSDP module indicator (`_fsdp_wrapped_module`) present.
610
+ req_grad_pattern = re.compile(r"module\.2.*\.1.*")
611
+ if fsdp_init_mode == FSDPInitMode.NO_FSDP:
612
+ ddp_model = NonUniformReqGradNWM(
613
+ group,
614
+ wrap_fsdp=False,
615
+ device_init_mode=device_init_mode,
616
+ deterministic=deterministic,
617
+ )
618
+ NonUniformReqGradNWM._set_nonuniform_req_grad(ddp_model, req_grad_pattern)
619
+ return ddp_model
620
+ elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
621
+ if fsdp_kwargs is None:
622
+ fsdp_kwargs = {}
623
+ fsdp_model = NonUniformReqGradNWM(
624
+ group,
625
+ wrap_fsdp=True,
626
+ device_init_mode=device_init_mode,
627
+ deterministic=deterministic,
628
+ **fsdp_kwargs,
629
+ )
630
+ if device_init_mode == DEVICEInitMode.DEVICE_AFTER:
631
+ fsdp_model = fsdp_model.to(DEVICE_TYPE)
632
+ NonUniformReqGradNWM._set_nonuniform_req_grad(fsdp_model, req_grad_pattern)
633
+ return fsdp_model
634
+ raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
635
+
636
+
637
+ class ModuleWithDelay(FSDPTestModel):
638
+ """This class wraps a :class:`FSDPTestModel` to optionally add a delay
639
+ after computing the loss and/or before the gradient reduction."""
640
+
641
+ def __init__(
642
+ self,
643
+ module: nn.Module,
644
+ delay_after_loss_ms: int,
645
+ delay_before_reduction_ms: int,
646
+ ):
647
+ super().__init__()
648
+ self.delay_after_loss_ms = delay_after_loss_ms
649
+ self.delay_before_reduction_ms = delay_before_reduction_ms
650
+ self.module = module
651
+
652
+ def get_input(self, device):
653
+ return self.module.get_input(device) # type: ignore[operator]
654
+
655
+ def forward(self, x):
656
+ return self.module(x)
657
+
658
+ def get_loss(self, input, output):
659
+ loss = self.module.get_loss(input, output) # type: ignore[operator]
660
+ if self.delay_after_loss_ms > 0:
661
+ if TEST_HPU:
662
+ time.sleep(self.delay_after_loss_ms / 1000)
663
+ elif TEST_CUDA:
664
+ torch.cuda._sleep(int(self.delay_after_loss_ms * get_cycles_per_ms()))
665
+
666
+ return loss
667
+
668
+ def run_backward(self, loss):
669
+ orig_reduce_scatter = torch.distributed.reduce_scatter_tensor
670
+
671
+ def _delayed_reduce_scatter(*args, **kwargs):
672
+ if self.delay_before_reduction_ms > 0:
673
+ if TEST_CUDA:
674
+ torch.cuda._sleep(
675
+ int(self.delay_before_reduction_ms * get_cycles_per_ms())
676
+ )
677
+ elif TEST_HPU:
678
+ time.sleep(self.delay_before_reduction_ms / 1000)
679
+ return orig_reduce_scatter(*args, **kwargs)
680
+
681
+ with mock.patch(
682
+ "torch.distributed.reduce_scatter_tensor", _delayed_reduce_scatter
683
+ ):
684
+ self.module.run_backward(loss) # type: ignore[operator]
685
+
686
+ @staticmethod
687
+ def init(
688
+ module_class: Type[FSDPTestModel],
689
+ *model_args: Any,
690
+ delay_after_loss_ms: int,
691
+ delay_before_reduction_ms: int,
692
+ **model_kwargs: Any,
693
+ ):
694
+ """
695
+ Args:
696
+ module_class (Type[FSDPTestModel]): Wrapped module class to which
697
+ to add delays.
698
+ model_args: Positional arguments forwarded to the ``module_class``
699
+ ``init()``.
700
+ delay_after_loss_ms (int): Delay after computing the loss/before
701
+ the optimizer step (in ms).
702
+ delay_before_reduction_ms (int): Delay before reduce-scattering
703
+ gradients (in ms).
704
+ model_kwargs: Keyword arguments forwarded to the ``module_class``
705
+ ``init()``.
706
+ """
707
+ return ModuleWithDelay(
708
+ module_class.init(*model_args, **model_kwargs),
709
+ delay_after_loss_ms,
710
+ delay_before_reduction_ms,
711
+ )
712
+
713
+
714
+ class NestedWrappedModuleWithDelay(ModuleWithDelay):
715
+ @staticmethod
716
+ def init( # type: ignore[override]
717
+ group: dist.ProcessGroup,
718
+ fsdp_init_mode: FSDPInitMode,
719
+ device_init_mode: DEVICEInitMode = DEVICEInitMode.DEVICE_AFTER,
720
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
721
+ deterministic: bool = False,
722
+ delay_after_loss_ms: int = 0,
723
+ delay_before_reduction_ms: int = 0,
724
+ ):
725
+ return ModuleWithDelay.init(
726
+ NestedWrappedModule,
727
+ group=group,
728
+ fsdp_init_mode=fsdp_init_mode,
729
+ device_init_mode=device_init_mode,
730
+ fsdp_kwargs=fsdp_kwargs,
731
+ deterministic=deterministic,
732
+ delay_after_loss_ms=delay_after_loss_ms,
733
+ delay_before_reduction_ms=delay_before_reduction_ms,
734
+ )
735
+
736
+
737
+ class DummyDDP(nn.Module):
738
+ def __init__(self, module):
739
+ super().__init__()
740
+ self.module = module
741
+
742
+ def forward(self, *args, **kwargs):
743
+ return self.module(*args, **kwargs)
744
+
745
+
746
+ class MixtureOfExperts(NestedWrappedModule):
747
+ def __init__(
748
+ self,
749
+ group: dist.ProcessGroup,
750
+ wrap_fsdp: bool,
751
+ device_init_mode: DEVICEInitMode,
752
+ delay_before_free_ms: int,
753
+ deterministic: bool,
754
+ **fsdp_kwargs,
755
+ ):
756
+ super().__init__(
757
+ group=group,
758
+ wrap_fsdp=wrap_fsdp,
759
+ device_init_mode=device_init_mode,
760
+ deterministic=deterministic,
761
+ )
762
+ self.group = group
763
+ self.delay_before_free_ms = delay_before_free_ms
764
+ self.wrap_fsdp = wrap_fsdp
765
+ self.move_to_device = device_init_mode == DEVICEInitMode.DEVICE_BEFORE
766
+ if deterministic:
767
+ # Give each rank different expert parameters
768
+ torch.manual_seed(42 + self.rank)
769
+ d_expert = 23
770
+ d_shared = 12
771
+ d_input = 8
772
+ expert = _move_to_device(nn.Linear(d_expert, d_shared), self.move_to_device)
773
+
774
+ self.num_expert_params = sum(p.numel() for p in expert.parameters())
775
+ for p in expert.parameters():
776
+ p.expert = True # type: ignore[attr-defined]
777
+
778
+ if deterministic:
779
+ # Keep all other parameters the same across ranks
780
+ torch.manual_seed(0)
781
+
782
+ shared = _move_to_device(nn.Linear(d_shared, d_expert), self.move_to_device)
783
+
784
+ if wrap_fsdp:
785
+ # we create a process group of size 1 for the expert params
786
+ expert_group = torch.distributed.new_group(
787
+ [group.rank()]
788
+ ) # world size 1 means no shard
789
+ expert = FSDP(expert, expert_group, **fsdp_kwargs) # type: ignore[assignment]
790
+ shared = FSDP(shared, group, **fsdp_kwargs) # type: ignore[assignment]
791
+
792
+ self.module = nn.Sequential(
793
+ _move_to_device(nn.Linear(d_input, d_shared), self.move_to_device),
794
+ shared,
795
+ expert,
796
+ _move_to_device(nn.Linear(d_shared, d_input), self.move_to_device),
797
+ )
798
+
799
+ def forward(self, x):
800
+ if self.delay_before_free_ms > 0:
801
+ expert = self.module[2]
802
+ if isinstance(expert, FSDP):
803
+ orig_reshard = torch.distributed.fsdp._runtime_utils._reshard
804
+
805
+ def _delayed_reshard(*args, **kwargs):
806
+ if TEST_CUDA:
807
+ torch.cuda._sleep(
808
+ int(self.delay_before_free_ms * get_cycles_per_ms())
809
+ )
810
+ elif TEST_HPU:
811
+ time.sleep(self.delay_before_free_ms / 1000)
812
+
813
+ return orig_reshard(*args, **kwargs)
814
+
815
+ # This patch covers any `import torch..._reshard` uses.
816
+ with mock.patch(
817
+ "torch.distributed.fsdp._runtime_utils._reshard", _delayed_reshard
818
+ ):
819
+ return self.module(x)
820
+
821
+ return self.module(x)
822
+
823
+ def run_backward(self, loss):
824
+ loss.backward()
825
+ # Manually reduce gradients if not wrapped in FullyShardedDataParallel
826
+ if not self.wrap_fsdp:
827
+ with torch.no_grad():
828
+ for p in self.parameters():
829
+ if hasattr(p, "expert"):
830
+ continue # these params don't need grad reduction
831
+ if p.grad is not None:
832
+ p.grad.div_(self.world_size)
833
+ torch.distributed.all_reduce(p.grad, group=self.group)
834
+
835
+ @staticmethod
836
+ def init(
837
+ group: dist.ProcessGroup,
838
+ fsdp_init_mode: FSDPInitMode,
839
+ device_init_mode: DEVICEInitMode,
840
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
841
+ deterministic: bool = False,
842
+ delay_before_free_ms: int = 0,
843
+ ):
844
+ """
845
+ Initializes a :class:`MixtureOfExperts` instance.
846
+
847
+ Args:
848
+ fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
849
+ any modules with FSDP. If ``RECURSIVE``, then wraps some nested
850
+ modules with FSDP, including the expert and shared layers, but
851
+ not the top-level module. The model may later be wrapped with a
852
+ top-level FSDP external to this method if desired.
853
+ device_init_mode (DEVICEInitMode): Determines model movement to DEVICE.
854
+ fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
855
+ forwarded to the FSDP constructor.
856
+ deterministic (bool): Whether to make the model deterministic
857
+ across constructions.
858
+ delay_before_free_ms (int): Delay before resharding expert
859
+ parameters in the forward pass (in ms).
860
+ """
861
+ if fsdp_kwargs is None:
862
+ fsdp_kwargs = {}
863
+ if fsdp_init_mode == FSDPInitMode.NO_FSDP:
864
+ return MixtureOfExperts(
865
+ group,
866
+ wrap_fsdp=False,
867
+ device_init_mode=device_init_mode,
868
+ delay_before_free_ms=delay_before_free_ms,
869
+ deterministic=deterministic,
870
+ )
871
+ elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
872
+ # Does not wrap with top-level FSDP
873
+ fsdp_model = MixtureOfExperts(
874
+ group,
875
+ wrap_fsdp=True,
876
+ device_init_mode=device_init_mode,
877
+ delay_before_free_ms=delay_before_free_ms,
878
+ deterministic=deterministic,
879
+ **fsdp_kwargs,
880
+ )
881
+ if device_init_mode == DEVICEInitMode.DEVICE_AFTER:
882
+ fsdp_model = fsdp_model.to(DEVICE_TYPE)
883
+ return fsdp_model
884
+ raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
885
+
886
+
887
+ class MLP(nn.Module):
888
+ def __init__(
889
+ self,
890
+ dim: int,
891
+ device: Optional[torch.device] = None,
892
+ *,
893
+ bias: bool = True,
894
+ with_buffer: bool = False,
895
+ dim_multiplier: int = 4,
896
+ ):
897
+ super().__init__()
898
+ self.in_proj = nn.Linear(dim, dim_multiplier * dim, device=device, bias=bias)
899
+ self.out_proj = nn.Linear(dim_multiplier * dim, dim, device=device, bias=bias)
900
+ if with_buffer:
901
+ self.register_buffer("buffer", torch.randn((dim,), device=device))
902
+ else:
903
+ self.buffer = None
904
+
905
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
906
+ z = self.in_proj(x)
907
+ z = F.relu(z)
908
+ z = self.out_proj(z)
909
+ z = F.relu(z)
910
+ if self.buffer is not None:
911
+ z = z + self.buffer
912
+ return z
913
+
914
+ def reset_parameters(self):
915
+ if self.buffer is not None:
916
+ torch.nn.init.normal_(self.buffer)
917
+
918
+
919
+ class MLPStack(nn.Sequential):
920
+ def __init__(self, mlp_dim: int, *, with_seq_parallel: bool = False):
921
+ modules: List[nn.Module] = [
922
+ # Use multiplier of 3 to exercise uneven case
923
+ MLP(mlp_dim, dim_multiplier=3),
924
+ MLP(mlp_dim),
925
+ MLP(mlp_dim, dim_multiplier=3),
926
+ ]
927
+ if with_seq_parallel:
928
+ modules.append(nn.LayerNorm(mlp_dim, bias=False))
929
+ super().__init__(*modules)
930
+ self.with_seq_parallel = with_seq_parallel
931
+
932
+ def parallelize(
933
+ self,
934
+ tp_mesh: DeviceMesh,
935
+ dp_mesh: DeviceMesh,
936
+ use_activation_checkpointing: bool,
937
+ **fsdp_kwargs,
938
+ ) -> "MLPStack":
939
+ parallelize_plan = {
940
+ # Pass `use_local_output=False` to keep as DTensor to preserve
941
+ # uneven activation dims
942
+ "0.in_proj": ColwiseParallel(use_local_output=False),
943
+ "0.out_proj": RowwiseParallel(use_local_output=False),
944
+ "1.in_proj": ColwiseParallel(use_local_output=False),
945
+ "1.out_proj": RowwiseParallel(use_local_output=False),
946
+ "2.in_proj": ColwiseParallel(use_local_output=False),
947
+ "2.out_proj": RowwiseParallel(output_layouts=Shard(1))
948
+ if self.with_seq_parallel
949
+ else RowwiseParallel(),
950
+ }
951
+ if self.with_seq_parallel:
952
+ parallelize_plan["3"] = SequenceParallel(sequence_dim=1)
953
+ parallelize_module(self, device_mesh=tp_mesh, parallelize_plan=parallelize_plan)
954
+ for module in self:
955
+ if isinstance(module, nn.LayerNorm):
956
+ continue
957
+ if use_activation_checkpointing:
958
+ checkpoint(module)
959
+ fully_shard(module, mesh=dp_mesh, **fsdp_kwargs)
960
+ fully_shard(self, mesh=dp_mesh, **fsdp_kwargs)
961
+ return self
962
+
963
+
964
+ class DoubleLinear(nn.Module):
965
+ """
966
+ This can be used for returning multiple outputs from a module
967
+ (``use_second_linear=True``) or for having an unused module (``False``).
968
+ """
969
+
970
+ def __init__(self, dim: int, use_second_linear: bool = True):
971
+ super().__init__()
972
+ self.lin1 = nn.Linear(dim, dim)
973
+ self.lin2 = nn.Linear(dim, dim)
974
+ self.relu = nn.ReLU()
975
+ self.use_second_linear = use_second_linear
976
+
977
+ def forward(
978
+ self, x: torch.Tensor
979
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:
980
+ if self.use_second_linear:
981
+ return self.relu(self.lin1(x)), self.relu(self.lin2(x))
982
+ return self.relu(self.lin1(x))
983
+
984
+
985
+ # NOTE: For these patch methods, if we want safety under multi-threading (e.g.
986
+ # when using multi-threaded process group), then we want:
987
+ # (1) a barrier immediately after reading the original value to ensure that all
988
+ # threads see the same original value
989
+ # (2) a barrier immediately before restoring the original value to ensure that
990
+ # all threads use the patched value inside the context
991
+ @contextlib.contextmanager
992
+ def patch_all_gather(new_all_gather_into_tensor: Callable):
993
+ orig_all_gather = dist.all_gather_into_tensor
994
+ dist.barrier()
995
+ dist.all_gather_into_tensor = new_all_gather_into_tensor
996
+ try:
997
+ yield
998
+ finally:
999
+ dist.barrier()
1000
+ dist.all_gather_into_tensor = orig_all_gather
1001
+
1002
+
1003
+ @contextlib.contextmanager
1004
+ def patch_reduce_scatter(new_reduce_scatter_tensor: Callable):
1005
+ orig_reduce_scatter = dist.reduce_scatter_tensor
1006
+ dist.barrier()
1007
+ dist.reduce_scatter_tensor = new_reduce_scatter_tensor
1008
+ try:
1009
+ yield
1010
+ finally:
1011
+ dist.barrier()
1012
+ dist.reduce_scatter_tensor = orig_reduce_scatter
1013
+
1014
+
1015
+ @contextlib.contextmanager
1016
+ def patch_all_reduce(new_all_reduce: Callable):
1017
+ orig_all_reduce = dist.all_reduce
1018
+ dist.barrier()
1019
+ dist.all_reduce = new_all_reduce
1020
+ try:
1021
+ yield
1022
+ finally:
1023
+ dist.barrier()
1024
+ dist.all_reduce = orig_all_reduce
1025
+
1026
+
1027
+ @no_type_check
1028
+ @contextlib.contextmanager
1029
+ def patch_unshard(new_unshard: Callable):
1030
+ orig_unshard = FSDPParamGroup.unshard
1031
+ dist.barrier()
1032
+ FSDPParamGroup.unshard = new_unshard
1033
+ try:
1034
+ yield
1035
+ finally:
1036
+ dist.barrier()
1037
+ FSDPParamGroup.unshard = orig_unshard
1038
+
1039
+
1040
+ @no_type_check
1041
+ @contextlib.contextmanager
1042
+ def patch_reshard(new_reshard: Callable):
1043
+ orig_reshard = FSDPParamGroup.reshard
1044
+ dist.barrier()
1045
+ FSDPParamGroup.reshard = new_reshard
1046
+ try:
1047
+ yield
1048
+ finally:
1049
+ dist.barrier()
1050
+ FSDPParamGroup.reshard = orig_reshard
1051
+
1052
+
1053
+ @no_type_check
1054
+ @contextlib.contextmanager
1055
+ def patch_post_backward(new_post_backward: Callable):
1056
+ orig_post_backward = FSDPParamGroup.post_backward
1057
+ dist.barrier()
1058
+ FSDPParamGroup.post_backward = new_post_backward
1059
+ try:
1060
+ yield
1061
+ finally:
1062
+ dist.barrier()
1063
+ FSDPParamGroup.post_backward = orig_post_backward
1064
+
1065
+
1066
+ @no_type_check
1067
+ @contextlib.contextmanager
1068
+ def patch_register_post_backward_hook_backward(new_backward: Callable):
1069
+ orig_backward = RegisterPostBackwardFunction.backward
1070
+ dist.barrier()
1071
+ RegisterPostBackwardFunction.backward = new_backward
1072
+ try:
1073
+ yield
1074
+ finally:
1075
+ dist.barrier()
1076
+ RegisterPostBackwardFunction.backward = orig_backward
1077
+
1078
+
1079
+ def reduce_scatter_with_assert(
1080
+ cls,
1081
+ orig_reduce_scatter: Callable,
1082
+ assert_fn: Callable, # `assert_fn(output: Tensor)`
1083
+ *args: Any,
1084
+ **kwargs: Any,
1085
+ ):
1086
+ if len(args) > 0:
1087
+ output = args[0]
1088
+ elif "output" in kwargs:
1089
+ output = kwargs["output"]
1090
+ else:
1091
+ raise AssertionError(
1092
+ f"Cannot get reduce-scatter output from\nargs: {args}\nkwargs: {kwargs}"
1093
+ )
1094
+ assert_fn(output)
1095
+ return orig_reduce_scatter(*args, **kwargs)
1096
+
1097
+
1098
+ def check_sharded_parity(
1099
+ cls, # unit test class
1100
+ replicated_module: nn.Module,
1101
+ sharded_module: nn.Module,
1102
+ prefixes_to_ignore: Tuple[str, ...] = (),
1103
+ ):
1104
+ for (replicated_name, replicated_param), (sharded_name, sharded_param) in zip(
1105
+ replicated_module.named_parameters(), sharded_module.named_parameters()
1106
+ ):
1107
+ clean_sharded_name = sharded_name
1108
+ for prefix in prefixes_to_ignore:
1109
+ clean_sharded_name = clean_sharded_name.replace(prefix, "")
1110
+ cls.assertEqual(replicated_name, clean_sharded_name)
1111
+ cls.assertIsInstance(sharded_param, DTensor)
1112
+ assert isinstance(sharded_param, DTensor) # mypy
1113
+ mesh, placements = sharded_param.device_mesh, sharded_param.placements
1114
+ if tuple(placements) == (Shard(0), Shard(0)):
1115
+ raise AssertionError(
1116
+ "FSDP's (Shard(0), Shard(0)) layout differs from distribute_tensor(), "
1117
+ "so we cannot check for equality using it"
1118
+ )
1119
+ sharded_ref_param = distribute_tensor(replicated_param, mesh, placements)
1120
+ cls.assertEqual(sharded_param.to_local(), sharded_ref_param.to_local())
1121
+ if replicated_param.grad is None:
1122
+ cls.assertIsNone(sharded_param.grad)
1123
+ continue
1124
+ cls.assertIsNotNone(sharded_param.grad)
1125
+ sharded_ref_grad = distribute_tensor(replicated_param.grad, mesh, placements)
1126
+ cls.assertIsInstance(sharded_param.grad, DTensor)
1127
+ assert isinstance(sharded_param.grad, DTensor) # mypy
1128
+ cls.assertEqual(sharded_param.grad.to_local(), sharded_ref_grad.to_local())
1129
+
1130
+
1131
+ class FSDPTestMultiThread(MultiThreadedTestCase):
1132
+ @property
1133
+ def world_size(self):
1134
+ return DEVICE_COUNT
1135
+
1136
+ def setUp(self):
1137
+ super().setUp()
1138
+ self._spawn_threads()
1139
+
1140
+ def run_subtests(self, *args, **kwargs):
1141
+ return run_subtests(self, *args, **kwargs)
1142
+
1143
+ def perThreadSetUp(self):
1144
+ torch._dynamo.reset()
1145
+
1146
+ def perThreadTearDown(self):
1147
+ torch._dynamo.reset()
1148
+
1149
+
1150
+ class FSDPTest(MultiProcessTestCase):
1151
+ def setUp(self):
1152
+ super().setUp()
1153
+ # Set TORCH_NCCL_DESYNC_DEBUG=0 to disable the NCCL `workCleanupLoop()`,
1154
+ # which can cause unit test flakiness:
1155
+ # https://github.com/pytorch/pytorch/issues/90848
1156
+ os.environ["TORCH_NCCL_DESYNC_DEBUG"] = "0"
1157
+ self._spawn_processes()
1158
+
1159
+ @property
1160
+ def world_size(self):
1161
+ return DEVICE_COUNT
1162
+
1163
+ @property
1164
+ def process_group(self):
1165
+ return dist.distributed_c10d._get_default_group()
1166
+
1167
+ @property
1168
+ def destroy_pg_upon_exit(self) -> bool:
1169
+ # Overriding base test class: do not auto destroy PG upon exit.
1170
+ return False
1171
+
1172
+ @property
1173
+ def init_method(self):
1174
+ return f"{FILE_SCHEMA}{self.file_name}"
1175
+
1176
+ def _check_cpu_offload(self, fsdp_model, cpu_offload):
1177
+ self.assertEqual(cpu_offload, fsdp_model.cpu_offload)
1178
+
1179
+ def _check_backward_prefetch(self, fsdp_model, backward_prefetch):
1180
+ self.assertEqual(backward_prefetch, fsdp_model.backward_prefetch)
1181
+
1182
+ def _check_forward_prefetch(self, fsdp_model, forward_prefetch):
1183
+ self.assertEqual(forward_prefetch, fsdp_model.forward_prefetch)
1184
+
1185
+ def run_subtests(self, *args, **kwargs):
1186
+ return run_subtests(self, *args, **kwargs)
1187
+
1188
+ @classmethod
1189
+ def _run(cls, rank, test_name, file_name, pipe, **kwargs):
1190
+ self = cls(test_name)
1191
+ self.rank = rank
1192
+ self.file_name = file_name
1193
+ fake_pg = kwargs.get("fake_pg", False)
1194
+
1195
+ print(f"dist init r={self.rank}, world={self.world_size}")
1196
+
1197
+ # Specify gloo backend to make 'init_process_group()' succeed,
1198
+ # Actual tests will be skipped if there is no enough GPUs.
1199
+ try:
1200
+ if fake_pg:
1201
+ store = torch.testing._internal.distributed.fake_pg.FakeStore()
1202
+ dist.init_process_group(
1203
+ backend="fake",
1204
+ world_size=self.world_size,
1205
+ rank=rank,
1206
+ store=store,
1207
+ )
1208
+ else:
1209
+ dist.init_process_group(
1210
+ init_method=self.init_method,
1211
+ backend=DISTRIBUTED_BACKEND,
1212
+ world_size=int(self.world_size),
1213
+ rank=self.rank,
1214
+ )
1215
+ except RuntimeError as e:
1216
+ if "recompile" in e.args[0]:
1217
+ sys.exit(TEST_SKIPS["backend_unavailable"].exit_code)
1218
+
1219
+ raise
1220
+
1221
+ device_ids = None
1222
+ device_id = self.rank % DEVICE_COUNT
1223
+ if TEST_CUDA:
1224
+ torch.cuda.set_device(device_id)
1225
+ device_ids = [device_id]
1226
+
1227
+ # Execute barrier prior to running test to ensure that every process
1228
+ # has finished initialization and that the following test
1229
+ # immediately exiting due to a skip doesn't cause flakiness.
1230
+ dist.barrier(device_ids=device_ids)
1231
+
1232
+ torch._dynamo.reset()
1233
+ self.run_test(test_name, pipe)
1234
+ torch._dynamo.reset()
1235
+
1236
+ dist.barrier(device_ids=device_ids)
1237
+
1238
+ dist.destroy_process_group()
1239
+
1240
+ def _train_for_several_steps(
1241
+ self,
1242
+ model: nn.Module,
1243
+ num_steps: int,
1244
+ autocast: bool,
1245
+ lr: float = 0.01,
1246
+ fsdp_cpu_offload: Optional[CPUOffload] = None,
1247
+ save_model: bool = False,
1248
+ mixed_precision: Optional[MixedPrecision] = None,
1249
+ enable_sharded_grad_scaler: bool = False,
1250
+ use_pure_fp16: bool = False,
1251
+ sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None,
1252
+ ):
1253
+ cpu_offload_params = fsdp_cpu_offload and fsdp_cpu_offload.offload_params
1254
+
1255
+ model_device = next(model.parameters()).device
1256
+ if sharded_grad_scaler_kwargs is None:
1257
+ sharded_grad_scaler_kwargs = {}
1258
+ sharded_grad_scaler = ShardedGradScaler(
1259
+ enabled=enable_sharded_grad_scaler, **sharded_grad_scaler_kwargs
1260
+ )
1261
+ # use SGD with momentum instead of Adam, since Adam is scale invariant
1262
+ # and this makes it bad for tests
1263
+ optim = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
1264
+ for _ in range(num_steps):
1265
+ optim.zero_grad()
1266
+ with torch.amp.autocast(DEVICE_TYPE, enabled=autocast):
1267
+ # Inputs always cuda regardless of cpu offloading, or model.device
1268
+ input = model.module.get_input(torch.device(DEVICE_TYPE)) # type: ignore[operator, union-attr]
1269
+ if use_pure_fp16 or (mixed_precision and not isinstance(model, FSDP)):
1270
+ if isinstance(input, torch.Tensor):
1271
+ input = input.half()
1272
+ else:
1273
+ input = tuple(x.half() for x in input)
1274
+ output = model(*input)
1275
+ # Post-forward, if CPU offloading model param should be on CPU.
1276
+ if (
1277
+ cpu_offload_params
1278
+ and isinstance(model, FSDP)
1279
+ # If not resharding after forward, the parameters are still
1280
+ # exposed as unsharded views into the GPU flat parameter
1281
+ and model.sharding_strategy
1282
+ not in NO_RESHARD_AFTER_FORWARD_STRATEGIES
1283
+ ):
1284
+ for p in model.parameters():
1285
+ # Params should always be on CPU
1286
+ self.assertEqual(p.device, torch.device("cpu"))
1287
+
1288
+ loss = model.module.get_loss(input, output).to(model_device) # type: ignore[operator, union-attr]
1289
+ loss = sharded_grad_scaler.scale(loss)
1290
+
1291
+ if not mixed_precision and not use_pure_fp16:
1292
+ assert (
1293
+ loss.dtype == torch.float32
1294
+ ), "loss data type should be float32, as the original \
1295
+ parameter data type is float32."
1296
+ else:
1297
+ if use_pure_fp16:
1298
+ self.assertEqual(loss.dtype, torch.float16)
1299
+ # FSDP loss is fp16, DDP AMP loss is fp32
1300
+ elif isinstance(model, FSDP):
1301
+ assert mixed_precision is not None # mypy
1302
+ self.assertEqual(loss.dtype, mixed_precision.param_dtype)
1303
+ else:
1304
+ self.assertEqual(loss.dtype, torch.float32)
1305
+ model.module.run_backward(loss) # type: ignore[operator, union-attr]
1306
+ # Post-backward, if CPU offloading model params should be on CPU.
1307
+ if cpu_offload_params and isinstance(model, FSDP):
1308
+ for p in model.parameters():
1309
+ # Params should always be on CPU
1310
+ self.assertEqual(p.device, torch.device("cpu"))
1311
+ # Unscale the gradients and step
1312
+ sharded_grad_scaler.step(optim)
1313
+ # Update the scale factor
1314
+ sharded_grad_scaler.update()
1315
+ # if save_model, simulate save + load.
1316
+ if save_model:
1317
+ state_dict = {k: v.clone() for k, v in model.state_dict().items()}
1318
+ # Zero params, if save/load state_dict did not work properly, this
1319
+ # would break the parity test with DDP.
1320
+ _zero_model(model)
1321
+ model.load_state_dict(state_dict)
1322
+
1323
+ if isinstance(model, FSDP):
1324
+ model._assert_state(TrainingState.IDLE)
1325
+ return loss.detach() # type: ignore[possibly-undefined]
1326
+
1327
+ def _test_fsdp_parity(
1328
+ self,
1329
+ model_class: Type[FSDPTestModel],
1330
+ fsdp_init_mode: FSDPInitMode,
1331
+ device_init_mode: DEVICEInitMode,
1332
+ ref_init_fn: Optional[Callable] = None,
1333
+ num_iters: int = 2,
1334
+ save_model: bool = True,
1335
+ cpu_offload: CPUOffload = CPUOffload(),
1336
+ backward_prefetch: Optional[BackwardPrefetch] = None,
1337
+ sharding_strategy: Optional[ShardingStrategy] = None,
1338
+ mixed_precision: Optional[MixedPrecision] = None,
1339
+ forward_prefetch: bool = False,
1340
+ use_orig_params: bool = False,
1341
+ enable_sharded_grad_scaler: bool = False,
1342
+ use_pure_fp16: bool = False,
1343
+ init_kwargs: Optional[Dict[str, Any]] = None,
1344
+ sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None,
1345
+ **fsdp_kwargs,
1346
+ ):
1347
+ """
1348
+ Tests FSDP training against a reference, which defaults to DDP but
1349
+ may be customized with ``ref_init_fn``.
1350
+
1351
+ Args:
1352
+ model_class (Type[FSDPTestModel]): A model class that inherits from
1353
+ ``FSDPTestModel``, which defines the expected interface.
1354
+ fsdp_init_mode (FSDPInitMode): The mode to initialize the
1355
+ FSDP-wrapped model. This should not be ``NO_FSDP``.
1356
+ ref_init_fn (Optional[Callable]): A callable to invoke that wraps a
1357
+ non-wrapped model to construct the reference model, where this
1358
+ wrapper should provide data parallel semantics. If ``None``,
1359
+ then the callable defaults to the DDP constructor.
1360
+ """
1361
+ assert (
1362
+ fsdp_init_mode != FSDPInitMode.NO_FSDP
1363
+ ), "Expects an FSDP init mode that wraps with FSDP"
1364
+ if init_kwargs is None:
1365
+ init_kwargs = {}
1366
+ lr = 1e-2
1367
+ rank = self.process_group.rank()
1368
+ # Establish reference behavior with DDP
1369
+ model = model_class.init(
1370
+ self.process_group,
1371
+ FSDPInitMode.NO_FSDP,
1372
+ DEVICEInitMode.DEVICE_BEFORE,
1373
+ deterministic=True,
1374
+ **init_kwargs,
1375
+ )
1376
+ if ref_init_fn is None:
1377
+ if TEST_HPU:
1378
+ ref_model = DDP(
1379
+ model, device_ids=[DEVICE_TYPE], output_device=DEVICE_TYPE
1380
+ )
1381
+ else:
1382
+ ref_model = DDP(model, device_ids=[rank], output_device=rank)
1383
+ else:
1384
+ ref_model = ref_init_fn(model)
1385
+ if use_pure_fp16:
1386
+ ref_model = ref_model.half()
1387
+ ref_loss = self._train_for_several_steps(
1388
+ ref_model,
1389
+ num_iters,
1390
+ autocast=mixed_precision is not None,
1391
+ lr=lr,
1392
+ fsdp_cpu_offload=cpu_offload,
1393
+ mixed_precision=mixed_precision,
1394
+ enable_sharded_grad_scaler=enable_sharded_grad_scaler,
1395
+ use_pure_fp16=use_pure_fp16,
1396
+ sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs,
1397
+ )
1398
+ ddp_params = list(ref_model.parameters())
1399
+ # Check against FSDP behavior
1400
+ fsdp_kwargs.update(
1401
+ {
1402
+ "cpu_offload": cpu_offload,
1403
+ "backward_prefetch": backward_prefetch,
1404
+ "sharding_strategy": sharding_strategy,
1405
+ "mixed_precision": mixed_precision,
1406
+ "forward_prefetch": forward_prefetch,
1407
+ "use_orig_params": use_orig_params,
1408
+ }
1409
+ )
1410
+ try:
1411
+ fsdp_model = model_class.init(
1412
+ self.process_group,
1413
+ fsdp_init_mode,
1414
+ device_init_mode,
1415
+ fsdp_kwargs,
1416
+ deterministic=True,
1417
+ **init_kwargs,
1418
+ )
1419
+ except Exception as e:
1420
+ raise ValueError(f"Initializing {model_class} raised error {str(e)}") from e
1421
+ if not isinstance(fsdp_model, FSDP):
1422
+ # Enforce that we wrap with top-level FSDP since we are comparing
1423
+ # assuming a data parallel reference and some test models may not
1424
+ # do so in their `init()` method
1425
+ fsdp_model = FSDP(fsdp_model, self.process_group, **fsdp_kwargs)
1426
+ if use_pure_fp16:
1427
+ # Change the model parameter dtype after FSDP initialization
1428
+ fsdp_model = fsdp_model.half()
1429
+ if device_init_mode == DEVICEInitMode.DEVICE_AFTER:
1430
+ fsdp_model = fsdp_model.to(DEVICE_TYPE)
1431
+ offload_params = cpu_offload is not None and cpu_offload.offload_params
1432
+ # Offloading parameters with `DEVICE_AFTER` should raise an error during
1433
+ # lazy initialization due to the parameter devices not being CPU;
1434
+ # otherwise, all parameter devices should be CPU
1435
+ expects_device_error = (
1436
+ offload_params and device_init_mode == DEVICEInitMode.DEVICE_AFTER
1437
+ )
1438
+ expects_cpu_device = (
1439
+ offload_params and device_init_mode != DEVICEInitMode.DEVICE_AFTER
1440
+ )
1441
+ if expects_cpu_device:
1442
+ cpu_device = torch.device("cpu")
1443
+ for param in fsdp_model.parameters():
1444
+ self.assertEqual(param.device, cpu_device)
1445
+ context = (
1446
+ self.assertRaisesRegex(
1447
+ RuntimeError,
1448
+ "An FSDP-managed module with parameter CPU offloading enabled "
1449
+ "has parameters on cuda",
1450
+ )
1451
+ if expects_device_error
1452
+ else nullcontext()
1453
+ )
1454
+ with context:
1455
+ fsdp_loss = self._train_for_several_steps(
1456
+ fsdp_model,
1457
+ num_iters,
1458
+ autocast=False,
1459
+ lr=lr,
1460
+ fsdp_cpu_offload=cpu_offload,
1461
+ save_model=save_model,
1462
+ mixed_precision=mixed_precision,
1463
+ enable_sharded_grad_scaler=enable_sharded_grad_scaler,
1464
+ use_pure_fp16=use_pure_fp16,
1465
+ sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs,
1466
+ )
1467
+ # No need to check for parameter and loss parity if expecting an error
1468
+ if expects_device_error:
1469
+ return
1470
+ # Check parameter devices are CPU if offloading to CPU before calling
1471
+ # `get_full_params()`, which will cast the parameters to FP32
1472
+ if offload_params:
1473
+ cpu_device = torch.device("cpu")
1474
+ for param in fsdp_model.parameters():
1475
+ self.assertEqual(param.device, cpu_device)
1476
+ fsdp_loss = fsdp_loss.to(DEVICE_TYPE)
1477
+ fsdp_unsharded_params = get_full_params(fsdp_model)
1478
+ # Do not check dtype since the reference DDP loss may not be the same
1479
+ # dtype as the FSDP loss in the case of mixed precision
1480
+ torch.testing.assert_close(ref_loss, fsdp_loss, check_dtype=False)
1481
+ # Do not check for parameter parity if using mixed precision since (1)
1482
+ # the DDP parameters are in FP16 (from `half()`) while the FSDP
1483
+ # parameters are in FP32 (from `summon_full_params()`) and (2) DDP runs
1484
+ # the optimizer in FP16 while FSDP runs it in FP32
1485
+ # TODO: Disable checking the parameters for pure FP16 due to floating
1486
+ # point inaccuracy. Note that this means that the backward pass is not
1487
+ # checked: https://github.com/pytorch/pytorch/issues/90784
1488
+ if mixed_precision is None and not use_pure_fp16:
1489
+ self.assertEqual(
1490
+ ddp_params,
1491
+ fsdp_unsharded_params,
1492
+ exact_device=True,
1493
+ msg="FSDP did not match DDP",
1494
+ )
1495
+
1496
+
1497
+ def test_compiled_fsdp(compile_compute_on_module: Optional[type] = None):
1498
+ def fully_shard_with_compiled_compute(*args, **kwargs):
1499
+ torch.distributed.fsdp.fully_shard(*args, **kwargs) # type: ignore[operator]
1500
+ if compile_compute_on_module is None or isinstance(
1501
+ args[0], compile_compute_on_module
1502
+ ):
1503
+ args[0].compile()
1504
+
1505
+ class FullyShardMode(Enum):
1506
+ EAGER = auto()
1507
+ COMPILED_COMPUTE = auto()
1508
+
1509
+ def decorator(func):
1510
+ @wraps(func)
1511
+ def wrapper(*args, **kwargs):
1512
+ original_fully_shard = torch.distributed.fsdp.fully_shard
1513
+ for mode in FullyShardMode:
1514
+ if mode != FullyShardMode.EAGER and not has_triton():
1515
+ warnings.warn("Inductor on GPU needs Triton and recent GPU arch")
1516
+ continue
1517
+ # barrier to ensure thread reading the same value
1518
+ original_skip_fsdp_hooks = torch._dynamo.config.skip_fsdp_hooks
1519
+ original_compile_threads = torch._inductor.config.compile_threads
1520
+ torch.distributed.barrier()
1521
+
1522
+ if mode == FullyShardMode.EAGER:
1523
+ fully_shard_patch = original_fully_shard
1524
+ elif mode == FullyShardMode.COMPILED_COMPUTE:
1525
+ torch._dynamo.config.skip_fsdp_hooks = True
1526
+ torch._inductor.config.compile_threads = 1
1527
+ fully_shard_patch = fully_shard_with_compiled_compute # type: ignore[assignment]
1528
+ else:
1529
+ raise NotImplementedError(
1530
+ f"Need to implement FullyShardMode={mode}"
1531
+ )
1532
+
1533
+ # fully_shard is imported as a global
1534
+ # through `from ... import fully_shard`
1535
+ func.__globals__[original_fully_shard.__name__] = fully_shard_patch
1536
+ func(*args, **kwargs)
1537
+ # other threads use patched func before this thread restores
1538
+ torch.distributed.barrier()
1539
+ func.__globals__[original_fully_shard.__name__] = original_fully_shard
1540
+ torch._dynamo.config.skip_fsdp_hooks = original_skip_fsdp_hooks
1541
+ torch._inductor.config.compile_threads = original_compile_threads
1542
+
1543
+ return wrapper
1544
+
1545
+ return decorator
1546
+
1547
+
1548
+ class SkipModule(nn.Module):
1549
+ def __init__(self) -> None:
1550
+ super().__init__()
1551
+ self.lin = nn.Linear(10, 10, bias=False)
1552
+
1553
+ def forward(self, x):
1554
+ return self.lin(x)
1555
+
1556
+
1557
+ class NestedLinear(nn.Module):
1558
+ def __init__(self, fsdp_wrap):
1559
+ super().__init__()
1560
+ if fsdp_wrap:
1561
+ self.nested_linear = wrap(nn.Linear(10, 10, bias=False).to(DEVICE_TYPE))
1562
+ else:
1563
+ self.nested_linear = nn.Linear(10, 10, bias=False).to(DEVICE_TYPE)
1564
+
1565
+ def forward(self, x):
1566
+ return self.nested_linear(x)
1567
+
1568
+
1569
+ class SkipModel(nn.Module):
1570
+ def __init__(self, double_nest):
1571
+ super().__init__()
1572
+ self.linear = nn.Linear(10, 10, bias=False).to(DEVICE_TYPE)
1573
+ self.linear_skip = SkipModule().to(DEVICE_TYPE)
1574
+ self.nested_linear = wrap(
1575
+ NestedLinear(fsdp_wrap=double_nest), device_id=DEVICE_TYPE
1576
+ )
1577
+
1578
+ def forward(self, x):
1579
+ x = self.linear(x)
1580
+ x = self.linear_skip(x)
1581
+ x = self.nested_linear(x)
1582
+ return x
lib/python3.10/site-packages/torch/testing/_internal/common_jit.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # Torch
4
+ import torch
5
+ import torch.cuda
6
+ import torch.jit
7
+ import torch.jit._logging
8
+ import torch.jit.frontend
9
+ import torch.jit.quantized
10
+
11
+ # Testing utils
12
+ from torch.testing._internal.common_dtype import floating_and_complex_types_and
13
+ from torch.testing._internal.common_utils import TestCase, \
14
+ freeze_rng_state, TemporaryFileName, enable_profiling_mode_for_profiling_tests, is_iterable_of_tensors
15
+ from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401
16
+
17
+ # Standard library
18
+ from itertools import chain
19
+ from typing import List, Union
20
+ from torch._C import TensorType
21
+
22
+ import io
23
+
24
+ def check_output_types(self, func, ref_outputs, args, kwargs):
25
+ graph = getattr(func, 'last_graph', None)
26
+ types = [o.type() for o in graph.outputs()]
27
+ self.assertTrue(len(types) == 1)
28
+ t = types[0]
29
+ torch._C._jit_assert_is_instance(ref_outputs, t)
30
+
31
+ # Test names in this set are only checked for a single derivative
32
+ nn_functional_single_grad = frozenset('test_nn_' + name for name in [
33
+ 'pdist',
34
+ 'multilabel_margin_loss',
35
+ 'max_unpool3d',
36
+ 'multi_margin_loss',
37
+ 'binary_cross_entropy',
38
+ 'binary_cross_entropy_size_average',
39
+ 'ctc_loss',
40
+ 'grid_sample',
41
+ ])
42
+
43
+ def check_against_reference(self, func, reference_func, output_func, args, kwargs=None,
44
+ allow_unused=True, check_types=True, no_grad=False, no_gradgrad=False):
45
+ """Verifies a function performs identically to some reference implementation.
46
+
47
+ Commonly, this is used to verify that a JIT implementation
48
+ (output_func) matches the behavior of the eager implementation
49
+ (reference_func).
50
+ """
51
+ kwargs = kwargs if kwargs else {}
52
+
53
+ def allSum(vs):
54
+ if isinstance(vs, torch.Tensor):
55
+ vs = (vs,)
56
+ return sum((i + 1) * v.sum().abs() if v.dtype.is_complex else (i + 1) * v.sum()
57
+ for i, v in enumerate(vs)
58
+ if v is not None and v.dtype in floating_and_complex_types_and(torch.half, torch.bfloat16))
59
+
60
+ def clone_tensor(t, preserve_requires_grad):
61
+ require_grad = preserve_requires_grad and t.requires_grad
62
+ return t.detach().clone().requires_grad_(require_grad)
63
+
64
+ def clone_inputs(preserve_requires_grad: bool):
65
+ inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = []
66
+
67
+ for arg in args:
68
+ if isinstance(arg, torch.Tensor):
69
+ inputs.append(clone_tensor(arg, preserve_requires_grad))
70
+ elif is_iterable_of_tensors(arg):
71
+ inputs.append([clone_tensor(t, preserve_requires_grad) for t in arg])
72
+ else:
73
+ inputs.append(arg)
74
+
75
+ return inputs
76
+
77
+ # Returns tensors in args that requires_grad, including tensors in TensorList args
78
+ def get_recording_tensors(args):
79
+ recording_tensors: List[torch.Tensor] = []
80
+
81
+ for arg in args:
82
+ if isinstance(arg, torch.Tensor) and arg.requires_grad:
83
+ recording_tensors.append(arg)
84
+ elif is_iterable_of_tensors(arg):
85
+ recording_tensors.extend(filter(lambda t: t.requires_grad, arg))
86
+
87
+ return recording_tensors
88
+
89
+ # test no gradients case
90
+ nograd_inputs = clone_inputs(preserve_requires_grad=False)
91
+ outputs = self.runAndSaveRNG(reference_func, nograd_inputs, kwargs)
92
+ with enable_profiling_mode_for_profiling_tests():
93
+ outputs_test = self.runAndSaveRNG(func, nograd_inputs, kwargs)
94
+ self.assertEqual(outputs, outputs_test)
95
+
96
+ if check_types:
97
+ check_output_types(self, func, outputs_test, nograd_inputs, kwargs)
98
+
99
+ if no_grad:
100
+ # skip grad tests
101
+ return
102
+
103
+ with enable_profiling_mode_for_profiling_tests():
104
+ # test single grad case
105
+ recording_inputs = clone_inputs(preserve_requires_grad=True)
106
+ recording_tensors = get_recording_tensors(recording_inputs)
107
+ outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs))
108
+ grads = torch.autograd.grad(allSum(outputs), recording_tensors,
109
+ allow_unused=allow_unused)
110
+ outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs))
111
+ grads_test = torch.autograd.grad(allSum(outputs_test), recording_tensors,
112
+ allow_unused=allow_unused)
113
+ self.assertEqual(outputs, outputs_test)
114
+ self.assertEqual(grads, grads_test)
115
+ # test the grad grad case
116
+ if self._testMethodName in nn_functional_single_grad or no_gradgrad:
117
+ return
118
+
119
+ outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs))
120
+ l1 = allSum(outputs)
121
+ grads = torch.autograd.grad(l1, recording_tensors, create_graph=True,
122
+ allow_unused=allow_unused)
123
+
124
+ l2 = (allSum(grads) * l1)
125
+ grads2 = torch.autograd.grad(l2, recording_tensors, allow_unused=allow_unused)
126
+ recording_inputs = clone_inputs(preserve_requires_grad=True)
127
+ recording_tensors = get_recording_tensors(recording_inputs)
128
+ outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs))
129
+ l1_test = allSum(outputs_test)
130
+ grads_test = torch.autograd.grad(
131
+ l1_test, recording_tensors, create_graph=True, allow_unused=allow_unused)
132
+
133
+ l2_test = (allSum(grads_test) * l1_test)
134
+ grads2_test = torch.autograd.grad(l2_test, recording_tensors, allow_unused=allow_unused)
135
+
136
+ self.assertEqual(outputs, outputs_test)
137
+ self.assertEqual(grads, grads_test)
138
+ for g2, g2_test in zip(grads2, grads2_test):
139
+ if g2 is None and g2_test is None:
140
+ continue
141
+ self.assertEqual(g2, g2_test, atol=5e-4, rtol=1e-4)
142
+
143
+ class JitCommonTestCase(TestCase):
144
+ def createFunctionFromGraph(self, trace):
145
+ graph = trace if isinstance(trace, torch._C.Graph) else trace.graph()
146
+ return torch._C._create_function_from_graph("forward", graph)
147
+
148
+ def assertExportImport(self, trace, inputs):
149
+ m = self.createFunctionFromGraph(trace)
150
+ self.assertExportImportModule(m, inputs)
151
+
152
+ def assertExportImportModule(self, m, inputs):
153
+ m_import = self.getExportImportCopy(m)
154
+ a = self.runAndSaveRNG(m, inputs)
155
+ b = self.runAndSaveRNG(m_import, inputs)
156
+ self.assertEqual(a, b, "Results of original model and "
157
+ "exported/imported version of model differed")
158
+
159
+ def runAndSaveRNG(self, func, inputs, kwargs=None):
160
+ kwargs = kwargs if kwargs else {}
161
+ with freeze_rng_state():
162
+ results = func(*inputs, **kwargs)
163
+ return results
164
+
165
+ def getExportImportCopy(self, m, also_test_file=True, map_location=None):
166
+ buffer = io.BytesIO()
167
+ torch.jit.save(m, buffer)
168
+ buffer.seek(0)
169
+ imported = torch.jit.load(buffer, map_location=map_location)
170
+
171
+ if not also_test_file:
172
+ return imported
173
+
174
+ with TemporaryFileName() as fname:
175
+ torch.jit.save(imported, fname)
176
+ return torch.jit.load(fname, map_location=map_location)
177
+
178
+ def autoDiffErrorMessage(self, should_autodiff_node, nodes_not_in_diff_graph,
179
+ fusion_nodes_not_found, non_fusible_nodes_being_fused,
180
+ fusion_nodes_found, nodes_in_diff_graph):
181
+ err_msg = "\nFailure in testing nodes' autodifferentiation. "
182
+ if should_autodiff_node:
183
+ err_msg += "One or more nodes were expected to be autodiffed, " \
184
+ "but were not found in specified fusible/nonfusible " \
185
+ "DifferentiableGraph groups. \nSpecifically:"
186
+ # The node is intended to appear in a differentiable graph but doesn't
187
+ diff_nodes_missing = []
188
+ # The node is intended to appear in a differentiable graph
189
+ # outside of a fusion group but instead is in a fusion group
190
+ diff_nodes_in_fusion = []
191
+ # The node is intended to appear in a fusion group but doesn't
192
+ fusion_nodes_missing = []
193
+ # The node is intended to appear in a fusion group but instead
194
+ # is just in an outer differentiable graph
195
+ fusion_nodes_in_diff = []
196
+ for node in nodes_not_in_diff_graph:
197
+ if node in non_fusible_nodes_being_fused:
198
+ diff_nodes_in_fusion.append(node)
199
+ else:
200
+ diff_nodes_missing.append(node)
201
+ for node in fusion_nodes_not_found:
202
+ if node in nodes_in_diff_graph:
203
+ fusion_nodes_in_diff.append(node)
204
+ else:
205
+ fusion_nodes_missing.append(node)
206
+ if len(diff_nodes_missing) > 0:
207
+ err_msg += f"\n {diff_nodes_missing} were not in one of the " \
208
+ "DifferentiableGraphs when they were expected to be. " \
209
+ "Did you intend for these nodes to be autodiffed? " \
210
+ "If not, remove them from the list of nonfusible nodes."
211
+ if len(diff_nodes_in_fusion) > 0:
212
+ err_msg += f"\n {diff_nodes_in_fusion} were found in one of the FusionGroups " \
213
+ "when they were expected to be just in a DifferentiableGraph. If it was " \
214
+ "intended for these nodes to be in FusionGroups, reclassify these nodes as " \
215
+ "fusible nodes. If these nodes were not intended to be fused, your " \
216
+ "autodifferentiation logic might be wrong."
217
+ if len(fusion_nodes_missing) > 0:
218
+ err_msg += f"\n {fusion_nodes_missing} were not in one of the FusionGroups " \
219
+ "of the DifferentiableGraphs when they were expected to be. " \
220
+ "They were also not found in an outer DifferentiableGraph. Did you " \
221
+ "intend for these nodes to be autodifferentiated? If not, you should " \
222
+ "remove these nodes from the test's fusible nodes. Otherwise your " \
223
+ "autodifferentiation logic might be wrong."
224
+ if len(fusion_nodes_in_diff) > 0:
225
+ err_msg += f"\n {fusion_nodes_in_diff} were not in one of the FusionGroups " \
226
+ "of the DifferentiableGraphs when they were expected to be, " \
227
+ "instead they were found just in an outer DifferentiableGraph. " \
228
+ "Did you intend for these nodes to be fused? If not, you should " \
229
+ "move these nodes into the test's nonfusible nodes. Otherwise your " \
230
+ "autodifferentiation logic might be wrong."
231
+ else:
232
+ err_msg += "One or more nodes were not expected to be autodiffed " \
233
+ "but were found in a DifferentiableGraph or in a FusionGroup " \
234
+ "of a DifferentiableGraph. Did you intend for these nodes to be " \
235
+ "autodiffed? If so, change this test to expect autodifferentiation. " \
236
+ "\nSpecifically:"
237
+ if len(fusion_nodes_found) > 0:
238
+ err_msg += f"\n {fusion_nodes_found} were not expected to be in " \
239
+ "one of the DifferentiableGraphs, but appeared in a FusionGroup " \
240
+ "of a DifferentiableGraph. "
241
+ if len(nodes_in_diff_graph) > 0:
242
+ err_msg += f"\n {nodes_in_diff_graph} were not expected to " \
243
+ "be in one of the DifferentiableGraphs but were."
244
+ return err_msg
245
+
246
+ def assertAutodiffNode(self, graph, should_autodiff_node, nonfusible_nodes, fusible_nodes):
247
+ diff_nodes = graph.findAllNodes('prim::DifferentiableGraph')
248
+ diff_subgraphs = [node.g('Subgraph') for node in diff_nodes]
249
+
250
+ # Note: currently no tests have fusible_nodes
251
+ fusion_nodes = list(chain.from_iterable([g.findAllNodes('prim::FusionGroup') for g in diff_subgraphs]))
252
+ fusion_subgraphs = [node.g('Subgraph') for node in fusion_nodes]
253
+
254
+ # For any non-fusible node, it must show up in one of the DifferentiableGraphs.
255
+ nodes_in_diff_graph = []
256
+ nodes_not_in_diff_graph = []
257
+ non_fusible_nodes_being_fused = []
258
+ for node in nonfusible_nodes:
259
+ if any(g.findNode(node) is not None for g in diff_subgraphs):
260
+ nodes_in_diff_graph.append(node)
261
+ else:
262
+ nodes_not_in_diff_graph.append(node)
263
+ if any(g.findNode(node) is not None for g in fusion_subgraphs):
264
+ non_fusible_nodes_being_fused.append(node)
265
+ found_all_nonfusible_nodes = len(nodes_in_diff_graph) == len(nonfusible_nodes)
266
+
267
+ # For any fusible node, it must show up in one of the FusionGroups in one of the DifferentiableGraphs.
268
+ fusion_nodes_found = []
269
+ fusion_nodes_not_found = []
270
+ for node in fusible_nodes:
271
+ if any(g.findNode(node) is not None for g in fusion_subgraphs):
272
+ fusion_nodes_found.append(node)
273
+ else:
274
+ fusion_nodes_not_found.append(node)
275
+ found_all_fusible_nodes = len(fusion_nodes_found) == len(fusible_nodes)
276
+
277
+ if should_autodiff_node is not None:
278
+ err_msg = self.autoDiffErrorMessage(should_autodiff_node,
279
+ nodes_not_in_diff_graph,
280
+ fusion_nodes_not_found,
281
+ non_fusible_nodes_being_fused,
282
+ fusion_nodes_found,
283
+ nodes_in_diff_graph)
284
+ self.assertEqual(should_autodiff_node,
285
+ found_all_nonfusible_nodes and found_all_fusible_nodes, err_msg)
286
+
287
+ def checkShapeAnalysis(self, out_sizes: Union[List[int], List[List[int]]],
288
+ traced_graph, assert_propagation, constant_prop=True):
289
+ # repropagte input shapes provided by tracing,
290
+ prev_symbolic_shapes_test_enabled = torch._C._jit_symbolic_shapes_test_mode_enabled()
291
+ for enable_test_mode in [True, False]:
292
+ # here we are testing allowing/disallowing substituting in complete shapes as constants,
293
+ # disallowing constants helps stress test partial eval and substitution pipeline
294
+ torch._C._jit_set_symbolic_shapes_test_mode(enable_test_mode)
295
+ torch._C._jit_erase_non_input_shape_information(traced_graph)
296
+ if constant_prop:
297
+ torch._C._jit_pass_constant_propagation(traced_graph)
298
+ torch._C._jit_pass_propagate_shapes_on_graph(traced_graph)
299
+ # Add sizes to default tensor type to avoid checking something out of scope
300
+ # and difficulties with tracer leaving in other parts of tensor type
301
+ output = next(traced_graph.outputs()).type()
302
+
303
+ def test_type(type, actual_size):
304
+ sizes = type.symbolic_sizes()
305
+ out_type = TensorType.get().with_sizes(sizes)
306
+ actual_type = TensorType.get().with_sizes(actual_size)
307
+
308
+ # always check actual shape is a subtype of the output
309
+ self.assertTrue(actual_type.isSubtypeOf(out_type))
310
+
311
+ # and then if assertion flag is provided, check shape analysis
312
+ # is successful
313
+ if assert_propagation:
314
+ self.assertEqual(out_type.sizes(), actual_size)
315
+
316
+ if output.isSubtypeOf(torch._C.TensorType.get()):
317
+ test_type(output, out_sizes)
318
+ else:
319
+ tuple_elements = output.elements()
320
+ for i in range(len(tuple_elements)):
321
+ test_type(tuple_elements[i], out_sizes[i])
322
+
323
+ torch._C._jit_set_symbolic_shapes_test_mode(prev_symbolic_shapes_test_enabled)
lib/python3.10/site-packages/torch/testing/_internal/common_mkldnn.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import contextlib
4
+ import functools
5
+ import inspect
6
+
7
+ import torch
8
+
9
+
10
+ # Test whether hardware BF32 math mode enabled. It is enabled only on:
11
+ # - MKLDNN is available
12
+ # - BF16 is supported by MKLDNN
13
+ def bf32_is_not_fp32():
14
+ if not torch.backends.mkldnn.is_available():
15
+ return False
16
+ if not torch.ops.mkldnn._is_mkldnn_bf16_supported():
17
+ return False
18
+ return True
19
+
20
+
21
+ @contextlib.contextmanager
22
+ def bf32_off():
23
+ old_matmul_precision = torch.get_float32_matmul_precision()
24
+ try:
25
+ torch.set_float32_matmul_precision("highest")
26
+ yield
27
+ finally:
28
+ torch.set_float32_matmul_precision(old_matmul_precision)
29
+
30
+
31
+ @contextlib.contextmanager
32
+ def bf32_on(self, bf32_precision=1e-5):
33
+ old_matmul_precision = torch.get_float32_matmul_precision()
34
+ old_precision = self.precision
35
+ try:
36
+ torch.set_float32_matmul_precision("medium")
37
+ self.precision = bf32_precision
38
+ yield
39
+ finally:
40
+ torch.set_float32_matmul_precision(old_matmul_precision)
41
+ self.precision = old_precision
42
+
43
+
44
+ # This is a wrapper that wraps a test to run this test twice, one with
45
+ # allow_bf32=True, another with allow_bf32=False. When running with
46
+ # allow_bf32=True, it will use reduced precision as specified by the
47
+ # argument
48
+ def bf32_on_and_off(bf32_precision=1e-5):
49
+ def with_bf32_disabled(self, function_call):
50
+ with bf32_off():
51
+ function_call()
52
+
53
+ def with_bf32_enabled(self, function_call):
54
+ with bf32_on(self, bf32_precision):
55
+ function_call()
56
+
57
+ def wrapper(f):
58
+ params = inspect.signature(f).parameters
59
+ arg_names = tuple(params.keys())
60
+
61
+ @functools.wraps(f)
62
+ def wrapped(*args, **kwargs):
63
+ for k, v in zip(arg_names, args):
64
+ kwargs[k] = v
65
+ cond = bf32_is_not_fp32()
66
+ if "device" in kwargs:
67
+ cond = cond and (torch.device(kwargs["device"]).type == "cpu")
68
+ if "dtype" in kwargs:
69
+ cond = cond and (kwargs["dtype"] == torch.float)
70
+ if cond:
71
+ with_bf32_disabled(kwargs["self"], lambda: f(**kwargs))
72
+ with_bf32_enabled(kwargs["self"], lambda: f(**kwargs))
73
+ else:
74
+ f(**kwargs)
75
+
76
+ return wrapped
77
+
78
+ return wrapper
lib/python3.10/site-packages/torch/testing/_internal/common_nn.py ADDED
The diff for this file is too large to render. See raw diff
 
lib/python3.10/site-packages/torch/testing/_internal/common_optimizers.py ADDED
@@ -0,0 +1,2332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import functools
4
+ import itertools
5
+ import sys
6
+ import unittest
7
+ from copy import deepcopy
8
+ from enum import Enum
9
+ from typing import Any, Dict, List, Tuple, Union
10
+
11
+ import torch
12
+ from torch import Tensor
13
+ from torch.nn import Parameter
14
+ from torch.optim import (
15
+ Adadelta,
16
+ Adafactor,
17
+ Adagrad,
18
+ Adam,
19
+ Adamax,
20
+ AdamW,
21
+ ASGD,
22
+ LBFGS,
23
+ NAdam,
24
+ Optimizer,
25
+ RAdam,
26
+ RMSprop,
27
+ Rprop,
28
+ SGD,
29
+ SparseAdam,
30
+ )
31
+ from torch.optim.lr_scheduler import (
32
+ ConstantLR,
33
+ ExponentialLR,
34
+ LinearLR,
35
+ PolynomialLR,
36
+ ReduceLROnPlateau,
37
+ StepLR,
38
+ )
39
+ from torch.testing._internal.common_device_type import tol, toleranceOverride
40
+ from torch.testing._internal.common_methods_invocations import DecorateInfo
41
+ from torch.testing._internal.common_utils import (
42
+ _TestParametrizer,
43
+ skipIfMPS,
44
+ skipIfTorchDynamo,
45
+ skipIfXpu,
46
+ TEST_WITH_TORCHDYNAMO,
47
+ )
48
+ from torch.utils._foreach_utils import _get_foreach_kernels_supported_devices
49
+
50
+
51
+ class OptimizerInput:
52
+ """Contains args / kwargs to be passed to an optimizer constructor."""
53
+
54
+ __slots__ = ["params", "kwargs", "desc"]
55
+
56
+ def __init__(
57
+ self,
58
+ params: Union[
59
+ List[Parameter], List[Tensor], Dict[Any, Any], List[Dict[str, Any]]
60
+ ],
61
+ kwargs: Dict[str, Any],
62
+ desc: str = "",
63
+ ):
64
+ # params can be a list of Tensors OR param_groups OR None
65
+ self.params = params
66
+ self.kwargs = kwargs
67
+ self.desc = desc
68
+
69
+ def __repr__(self):
70
+ return f"params={self.params}, kwargs={self.kwargs}, desc={self.desc}"
71
+
72
+
73
+ class OptimizerErrorEnum(Enum):
74
+ """Enumerates when an error is raised when testing optimizers."""
75
+
76
+ CONSTRUCTION_ERROR = 0
77
+ STEP_ERROR = 1
78
+
79
+
80
+ class ErrorOptimizerInput:
81
+ """
82
+ An OptimizerInput that will cause the optimizer to throw an error when constructed.
83
+ Includes the type and string of the resulting error.
84
+ """
85
+
86
+ __slots__ = ["optimizer_error_input", "error_on", "error_type", "error_regex"]
87
+
88
+ def __init__(
89
+ self,
90
+ optimizer_error_input,
91
+ *,
92
+ error_on=OptimizerErrorEnum.CONSTRUCTION_ERROR,
93
+ error_type=RuntimeError,
94
+ error_regex="",
95
+ ):
96
+ self.optimizer_error_input = optimizer_error_input
97
+ self.error_on = error_on
98
+ self.error_type = error_type
99
+ self.error_regex = error_regex
100
+
101
+
102
+ class OptimizerInfo:
103
+ """Optimizer information to be used in testing."""
104
+
105
+ def __init__(
106
+ self,
107
+ optim_cls: Optimizer, # Class object for the Optimizer under test
108
+ *,
109
+ # Function to generate optimizer inputs EXCLUDING params. We delegate params responsibility
110
+ # to the test using the OptimizerInfo. OptimizerInput.params is likely None.
111
+ # Can optionally take in device to filter out certain unsupported configs
112
+ optim_inputs_func,
113
+ # Tuple of lambdas to generate LRScheduler instances to run with the optimizer for the
114
+ # LRScheduler tests like test_forloop_goes_right_direction with_lrsched.
115
+ # We DO NOT expect to thoroughly test LRSchedulers through the optimizers, so not every
116
+ # LRScheduler configuration will be included. See test_lrscheduler.py for that instead.
117
+ # A few optimizers like SGD and Adam will test more LRSchedulers.
118
+ scheduler_inputs=(
119
+ [
120
+ lambda opt: StepLR(opt, gamma=0.9, step_size=10),
121
+ lambda opt: ReduceLROnPlateau(opt),
122
+ ],
123
+ ),
124
+ # A subset of the global-cliquey flags (fused, foreach, differentiable) the optimizer
125
+ # supports. See NOTE: [optimizer kwarg categories] for what global-cliquey means.
126
+ supported_impls: Tuple[str, ...] = ("foreach", "differentiable"),
127
+ # A subset of all flags, signifying which ones were only supported after the
128
+ # original optimizer had already been released. aka impls where we need to check BC.
129
+ not_og_supported_flags: Tuple[str, ...] = (
130
+ "foreach",
131
+ "differentiable",
132
+ "maximize",
133
+ "capturable",
134
+ ),
135
+ # the optim supports passing in sparse gradients as well as dense grads
136
+ supports_sparse: bool = False,
137
+ # the optimizer constructor supports passing in capturable as a kwarg
138
+ has_capturable_arg: bool = False,
139
+ # the optim only supports one config: sparse grads w/ dense params, see SparseAdam
140
+ only_supports_sparse_grads: bool = False,
141
+ # Tuple of (optimizer kwargs, schedulers_constructors) specifically for sparse tests,
142
+ # with especially tuned hyperparameters. These only apply if the optimizer supports
143
+ # sparse parameters or grads.
144
+ metadata_for_sparse=({}, []),
145
+ # the optim supports complex parameters
146
+ supports_complex: bool = True,
147
+ # whether the optimizer.step() function requires a closure to be passed
148
+ step_requires_closure: bool = False,
149
+ # whether the optimizer supports per-param options with parameter groups
150
+ supports_param_groups: bool = True,
151
+ # whether the optimizer supports parameters on multiple devices
152
+ supports_multiple_devices: bool = True,
153
+ skips=(), # Indicates which tests to skip
154
+ decorators=None, # Additional decorators to apply to generated tests
155
+ optim_error_inputs_func=None, # Function to generate optim inputs that error
156
+ supports_fused_on: Tuple[str, ...] = (),
157
+ ):
158
+ self.optim_cls = optim_cls
159
+ self.optim_inputs_func = optim_inputs_func
160
+ self.scheduler_inputs = scheduler_inputs
161
+ self.supported_impls = supported_impls
162
+ self.not_og_supported_flags = not_og_supported_flags
163
+ self.supports_sparse = supports_sparse
164
+ self.has_capturable_arg = has_capturable_arg
165
+ self.metadata_for_sparse = metadata_for_sparse
166
+ self.only_supports_sparse_grads = only_supports_sparse_grads
167
+ self.supports_complex = supports_complex
168
+ self.step_requires_closure = step_requires_closure
169
+ self.supports_param_groups = supports_param_groups
170
+ self.supports_multiple_devices = supports_multiple_devices
171
+ self.decorators = (
172
+ *(decorators if decorators else []),
173
+ *(skips if skips else []),
174
+ )
175
+ self.optim_error_inputs_func = optim_error_inputs_func
176
+ self.supports_fused_on = supports_fused_on
177
+
178
+ def get_decorators(self, test_class, test_name, device, dtype, param_kwargs):
179
+ result = []
180
+ for decorator in self.decorators:
181
+ if isinstance(decorator, DecorateInfo):
182
+ if decorator.is_active(
183
+ test_class, test_name, device, dtype, param_kwargs
184
+ ):
185
+ result.extend(decorator.decorators)
186
+ else:
187
+ result.append(decorator)
188
+ return result
189
+
190
+ @property
191
+ def name(self):
192
+ return self.optim_cls.__name__
193
+
194
+
195
+ class optims(_TestParametrizer):
196
+ """Decorator for specifying a list of optimizers over which to run a test."""
197
+
198
+ def __init__(self, optim_info_iterable, dtypes=None):
199
+ self.optim_info_list = list(optim_info_iterable)
200
+
201
+ # optimizers aren't limited to be one dtype as parameters can have different dtypes
202
+ # We default to torch.float32, but dtypes should be specified through passed in
203
+ # parameters.
204
+ self.dtypes = dtypes if dtypes is not None else [torch.float32]
205
+
206
+ def _parametrize_test(self, test, generic_cls, device_cls):
207
+ if device_cls is None:
208
+ raise RuntimeError(
209
+ "The @optims decorator is only intended to be used in a device-specific "
210
+ "context; use it with instantiate_device_type_tests() instead of "
211
+ "instantiate_parametrized_tests()"
212
+ )
213
+
214
+ for optim_info, dtype in itertools.product(self.optim_info_list, self.dtypes):
215
+ # Construct the test name; device / dtype parts are handled outside.
216
+ # See [Note: device and dtype suffix placement]
217
+ test_name = optim_info.name
218
+
219
+ # Construct parameter kwargs to pass to the test.
220
+ param_kwargs = {"optim_info": optim_info, "dtype": dtype}
221
+
222
+ try:
223
+
224
+ @functools.wraps(test)
225
+ def test_wrapper(*args, **kwargs):
226
+ return test(*args, **kwargs)
227
+
228
+ decorator_fn = functools.partial(
229
+ optim_info.get_decorators,
230
+ generic_cls.__name__,
231
+ test.__name__,
232
+ device_cls.device_type,
233
+ dtype,
234
+ )
235
+
236
+ yield (test_wrapper, test_name, param_kwargs, decorator_fn)
237
+ except Exception as ex:
238
+ # Provides an error message for debugging before rethrowing the exception
239
+ print(
240
+ f"Failed to instantiate {test_name} for module {optim_info.name}!"
241
+ )
242
+ raise ex
243
+
244
+
245
+ # Helper function for generating error inputs for all optimizers, used below.
246
+ def get_error_inputs_for_all_optims(device, dtype):
247
+ if _get_device_type(device) == "cpu":
248
+ sample_param = Parameter(torch.randn(1, device=device, dtype=dtype))
249
+ sample_param2 = Parameter(torch.randn(1, device=device, dtype=dtype))
250
+ return [
251
+ ErrorOptimizerInput(
252
+ OptimizerInput(
253
+ params=sample_param,
254
+ kwargs={},
255
+ desc="invalid param type",
256
+ ),
257
+ error_type=TypeError,
258
+ error_regex="params argument given to the optimizer should be an iterable of Tensors or dicts",
259
+ ),
260
+ ErrorOptimizerInput(
261
+ OptimizerInput(
262
+ params=[sample_param, sample_param],
263
+ kwargs={},
264
+ desc="a param group cannot have duplicate parameters",
265
+ ),
266
+ error_type=UserWarning,
267
+ error_regex=".*a parameter group with duplicate parameters.*",
268
+ ),
269
+ ErrorOptimizerInput(
270
+ OptimizerInput(
271
+ params=[{"params": sample_param}, {"params": sample_param}],
272
+ kwargs={},
273
+ desc="duplicate parameters should not occur across param groups either",
274
+ ),
275
+ error_type=ValueError,
276
+ error_regex="some parameters appear in more than one parameter group",
277
+ ),
278
+ ErrorOptimizerInput(
279
+ OptimizerInput(
280
+ params=None,
281
+ kwargs=dict(lr=torch.tensor([0.001, 0.001])),
282
+ desc="Tensor lr must be 1-element",
283
+ ),
284
+ error_type=ValueError,
285
+ error_regex="Tensor lr must be 1-element",
286
+ ),
287
+ ErrorOptimizerInput(
288
+ OptimizerInput(
289
+ params=[("weight", sample_param), sample_param2],
290
+ kwargs={},
291
+ desc="all optimizer params should be with/without names",
292
+ ),
293
+ error_type=ValueError,
294
+ error_regex="all optimizer params should be with/without names. Some param names are missing",
295
+ ),
296
+ ErrorOptimizerInput(
297
+ OptimizerInput(
298
+ params=[
299
+ {"params": [sample_param], "lr": 1e-2},
300
+ {"params": [("weight", sample_param2)]},
301
+ ],
302
+ kwargs={},
303
+ desc="all optimizer param groups should be with/without names.",
304
+ ),
305
+ error_type=ValueError,
306
+ error_regex="all optimizer param groups should be with/without names. "
307
+ "cannot add param group with names to the optimizer",
308
+ ),
309
+ ]
310
+ else:
311
+ return []
312
+
313
+
314
+ # ------------------------------------------------------------------------------------------
315
+ # NOTE: [optimizer kwarg categories]
316
+ # We categorize optimizer kwargs as 3 types:
317
+ # 1. optimizer-specific flags are like amsgrad or rho or beta, flags that are specific to
318
+ # algorithms and thus only show up for certain optimizers. There are many of these, so I
319
+ # do not bother gathering them all and listing them here. The converse to these would be
320
+ # global flags that every optimizer ideally _should_ support. We break global flags into
321
+ # 2 further categories and list them all below.
322
+ # 2. global-friendly = ["lr", "weight_decay", "maximize", "capturable"]
323
+ # global-friendly flags are global flags who play nicely with all other global flags,
324
+ # i.e., are mutually exclusive in function. This means that any pair of the following
325
+ # flags can be toggled at once (e.g., maximize and weight_decay). Furthermore, any of the
326
+ # following flags theoretically can be enabled with ANY other global flag, including the
327
+ # cliquey ones (e.g, capturable and foreach).
328
+ # 3. global-cliquey = ["foreach", "fused", "differentiable"]
329
+ # global-cliquey flags are global flags that do NOT coexist with other cliquey flags,
330
+ # usually because they contradict each other in function. For example, one should not flip
331
+ # both foreach AND fused to True, because they are two differing performance optimizations
332
+ # in which you can only opt into one.
333
+ #
334
+ # The following optim_inputs_func_* sampling functions only return constructor combinations of
335
+ # optimizer-specific and global-friendly flags. This is because we are confident they would mesh
336
+ # well with additional kwargs. On the flip side of the same coin, we reserve setting the
337
+ # global-cliquey flags to individual tests and fully expect tests to edit OptimizerInput.kwargs.
338
+
339
+
340
+ def optim_inputs_func_adadelta(device, dtype=None):
341
+ cuda_supported_configs = [
342
+ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"),
343
+ OptimizerInput(
344
+ params=None,
345
+ kwargs={"weight_decay": 0.1, "capturable": True},
346
+ desc="capturable with weight decay",
347
+ ),
348
+ OptimizerInput(
349
+ params=None,
350
+ kwargs={"lr": torch.tensor(0.001), "capturable": True},
351
+ desc="Tensor lr with capturable",
352
+ ),
353
+ ]
354
+
355
+ return [
356
+ OptimizerInput(params=None, kwargs={}, desc="default"),
357
+ OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"),
358
+ OptimizerInput(
359
+ params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay"
360
+ ),
361
+ OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"),
362
+ OptimizerInput(
363
+ params=None,
364
+ kwargs={"weight_decay": 0.1, "maximize": True},
365
+ desc="maximize, weight_decay",
366
+ ),
367
+ OptimizerInput(
368
+ params=None, kwargs={"rho": 0.95, "weight_decay": 0.9}, desc="rho"
369
+ ),
370
+ ] + (cuda_supported_configs if _get_device_type(device) == "cuda" else [])
371
+
372
+
373
+ def optim_error_inputs_func_adadelta(device, dtype):
374
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
375
+ if _get_device_type(device) == "cpu":
376
+ error_inputs += [
377
+ ErrorOptimizerInput(
378
+ OptimizerInput(
379
+ params=None,
380
+ kwargs=dict(lr=1e-2, rho=1.1),
381
+ desc="rho should be between 0 and 1",
382
+ ),
383
+ error_type=ValueError,
384
+ error_regex="Invalid rho value: 1.1",
385
+ ),
386
+ ]
387
+ return error_inputs
388
+
389
+
390
+ def optim_inputs_func_adafactor(device, dtype=None):
391
+ return [
392
+ OptimizerInput(params=None, kwargs={}, desc="default"),
393
+ OptimizerInput(
394
+ params=None,
395
+ kwargs={"weight_decay": 0.1, "lr": 0.01},
396
+ desc="nonzero weight_decay",
397
+ ),
398
+ OptimizerInput(
399
+ params=None,
400
+ kwargs={"weight_decay": 0.1, "maximize": True},
401
+ desc="maximize",
402
+ ),
403
+ OptimizerInput(
404
+ params=None,
405
+ kwargs={"beta2_decay": -1.0},
406
+ desc="non-default beta2_decay",
407
+ ),
408
+ OptimizerInput(
409
+ params=None,
410
+ kwargs={"d": 1.5},
411
+ desc="non-default clipping threshold d",
412
+ ),
413
+ ]
414
+
415
+
416
+ def optim_error_inputs_func_adafactor(device, dtype):
417
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
418
+ if _get_device_type(device) == "cpu":
419
+ complex_param = torch.rand(2, 3, device=device, dtype=torch.complex64)
420
+ complex_param.grad = torch.rand_like(complex_param)
421
+ error_inputs += [
422
+ ErrorOptimizerInput(
423
+ OptimizerInput(
424
+ params=None,
425
+ kwargs=dict(eps=(-1e-30, 1e-3)),
426
+ desc="epsilon1 should be >= 0",
427
+ ),
428
+ error_type=ValueError,
429
+ error_regex="epsilon1 should be >= 0",
430
+ ),
431
+ ErrorOptimizerInput(
432
+ OptimizerInput(
433
+ params=None,
434
+ kwargs=dict(d=0.0),
435
+ desc="invalid d",
436
+ ),
437
+ error_type=ValueError,
438
+ error_regex="Clipping threshold d should be >= 1",
439
+ ),
440
+ ErrorOptimizerInput(
441
+ OptimizerInput(
442
+ params=None,
443
+ kwargs=dict(beta2_decay=0.8),
444
+ desc="invalid beta2_decay",
445
+ ),
446
+ error_type=ValueError,
447
+ error_regex="beta2_decay should be <= 0",
448
+ ),
449
+ ErrorOptimizerInput(
450
+ OptimizerInput(
451
+ params=[complex_param],
452
+ kwargs=dict(),
453
+ desc="does not support complex parameters",
454
+ ),
455
+ error_type=RuntimeError,
456
+ error_regex="Adafactor does not support complex parameters",
457
+ error_on=OptimizerErrorEnum.STEP_ERROR,
458
+ ),
459
+ ]
460
+ return error_inputs
461
+
462
+
463
+ def optim_inputs_func_adagrad(device, dtype=None):
464
+ return [
465
+ OptimizerInput(params=None, kwargs={}, desc="default"),
466
+ OptimizerInput(
467
+ params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay"
468
+ ),
469
+ OptimizerInput(
470
+ params=None,
471
+ kwargs={"weight_decay": 0.1, "maximize": True},
472
+ desc="maximize",
473
+ ),
474
+ OptimizerInput(params=None, kwargs={"lr": 0.1}, desc="non-default lr"),
475
+ OptimizerInput(
476
+ params=None,
477
+ kwargs={"initial_accumulator_value": 0.1, "weight_decay": 0.1},
478
+ desc="initial_accumulator_value",
479
+ ),
480
+ OptimizerInput(
481
+ params=None,
482
+ kwargs={"lr": 0.1, "lr_decay": 0.5, "weight_decay": 0.1},
483
+ desc="lr_decay",
484
+ ), # TODO: Move out to testing in param_group?
485
+ OptimizerInput(
486
+ params=None,
487
+ kwargs={"lr": torch.tensor(0.001)},
488
+ desc="Tensor lr",
489
+ ),
490
+ ]
491
+
492
+
493
+ def optim_error_inputs_func_adagrad(device, dtype):
494
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
495
+ if _get_device_type(device) == "cpu":
496
+ error_inputs += [
497
+ ErrorOptimizerInput(
498
+ OptimizerInput(
499
+ params=None,
500
+ kwargs=dict(lr=1e-2, lr_decay=-0.5),
501
+ desc="lr_decay must be bigger than 0",
502
+ ),
503
+ error_type=ValueError,
504
+ error_regex="Invalid lr_decay value: -0.5",
505
+ ),
506
+ ]
507
+ return error_inputs
508
+
509
+
510
+ # TODO: consider tensor LR! See multi_tensor_optimizer_configs in test_optim.py --> tensor LR should work
511
+ # with all implementation code paths...
512
+ def optim_inputs_func_adam(device, dtype=None):
513
+ cuda_supported_configs = [
514
+ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"),
515
+ OptimizerInput(
516
+ params=None,
517
+ kwargs={"weight_decay": 0.1, "amsgrad": True, "capturable": True},
518
+ desc="capturable, amsgrad",
519
+ ),
520
+ OptimizerInput(
521
+ params=None,
522
+ kwargs={"lr": torch.tensor(0.001), "amsgrad": True, "capturable": True},
523
+ desc="Tensor lr with capturable and amsgrad",
524
+ ),
525
+ OptimizerInput(
526
+ params=None,
527
+ kwargs={
528
+ "lr": torch.tensor(0.001),
529
+ "betas": (torch.tensor(0.9), torch.tensor(0.99)),
530
+ "amsgrad": True,
531
+ "capturable": True,
532
+ },
533
+ desc="Tensor lr, Tensor betas, with capturable and amsgrad",
534
+ ),
535
+ OptimizerInput(
536
+ params=None,
537
+ kwargs={
538
+ "lr": torch.tensor(0.001),
539
+ "betas": (torch.tensor(0.9), torch.tensor(0.99)),
540
+ "amsgrad": False,
541
+ "capturable": True,
542
+ },
543
+ desc="Tensor lr, Tensor betas, with capturable",
544
+ ),
545
+ ]
546
+ mps_supported_configs = [
547
+ OptimizerInput(
548
+ params=None, kwargs={"lr": torch.tensor(0.01)}, desc="Tensor lr"
549
+ ),
550
+ ]
551
+
552
+ total = (
553
+ [
554
+ OptimizerInput(params=None, kwargs={}, desc="default"),
555
+ OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"),
556
+ OptimizerInput(
557
+ params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay"
558
+ ),
559
+ OptimizerInput(
560
+ params=None,
561
+ kwargs={"weight_decay": 0.1, "maximize": True},
562
+ desc="maximize",
563
+ ),
564
+ OptimizerInput(
565
+ params=None,
566
+ kwargs={"weight_decay": 0.1, "amsgrad": True},
567
+ desc="amsgrad",
568
+ ),
569
+ ]
570
+ + (cuda_supported_configs if _get_device_type(device) == "cuda" else [])
571
+ + (mps_supported_configs if _get_device_type(device) == "mps" else [])
572
+ )
573
+ if dtype in (torch.float16,):
574
+ for input in total:
575
+ """
576
+ Too small eps will make denom to be zero for low precision dtype
577
+ denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps)
578
+ For example,
579
+ >>> a
580
+ tensor([0.], dtype=torch.float16)
581
+ >>> a + 1e-8
582
+ tensor([0.], dtype=torch.float16)
583
+ """
584
+ input.kwargs["eps"] = 0.1
585
+ return total
586
+
587
+
588
+ def optim_error_inputs_func_adam(device, dtype):
589
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
590
+ if _get_device_type(device) == "cpu":
591
+ error_inputs += [
592
+ ErrorOptimizerInput(
593
+ OptimizerInput(
594
+ params=None,
595
+ kwargs=dict(lr=1e-2, betas=(1.0, 0.0)),
596
+ desc="beta1 should be between 0 and 1",
597
+ ),
598
+ error_type=ValueError,
599
+ error_regex="Invalid beta parameter at index 0: 1.0",
600
+ ),
601
+ ErrorOptimizerInput(
602
+ OptimizerInput(
603
+ params=None,
604
+ kwargs=dict(lr=1e-2, weight_decay=-1),
605
+ desc="weight_decay should > 0",
606
+ ),
607
+ error_type=ValueError,
608
+ error_regex="Invalid weight_decay value: -1",
609
+ ),
610
+ ErrorOptimizerInput(
611
+ OptimizerInput(
612
+ params=None,
613
+ kwargs=dict(lr=torch.tensor(0.001), foreach=True),
614
+ desc="lr as Tensor doesn't work with foreach & not capturable",
615
+ ),
616
+ error_type=ValueError,
617
+ error_regex="lr as a Tensor is not supported for capturable=False and foreach=True",
618
+ ),
619
+ ErrorOptimizerInput(
620
+ OptimizerInput(
621
+ params=None,
622
+ kwargs=dict(lr=1e-2, betas=(0.9, torch.tensor(0.99))),
623
+ desc="betas must be either both floats or both Tensors",
624
+ ),
625
+ error_type=ValueError,
626
+ error_regex="betas must be either both floats or both Tensors",
627
+ ),
628
+ ErrorOptimizerInput(
629
+ OptimizerInput(
630
+ params=None,
631
+ kwargs=dict(lr=1e-2, betas=(torch.tensor(0.9), 0.99)),
632
+ desc="betas must be either both floats or both Tensors",
633
+ ),
634
+ error_type=ValueError,
635
+ error_regex="betas must be either both floats or both Tensors",
636
+ ),
637
+ ErrorOptimizerInput(
638
+ OptimizerInput(
639
+ params=None,
640
+ kwargs=dict(
641
+ lr=1e-2,
642
+ betas=(torch.tensor(0.9), torch.tensor(0.99)),
643
+ foreach=True,
644
+ ),
645
+ desc=r"betas\[0\] as a Tensor is not supported for capturable=False and foreach=True",
646
+ ),
647
+ error_type=ValueError,
648
+ error_regex=r"betas\[0\] as a Tensor is not supported for capturable=False and foreach=True",
649
+ ),
650
+ ]
651
+ if _get_device_type(device) == "cuda":
652
+ sample_tensor = torch.empty((), device=device, dtype=dtype)
653
+ error_inputs += [
654
+ ErrorOptimizerInput(
655
+ OptimizerInput(
656
+ params=[sample_tensor],
657
+ kwargs={"foreach": True, "fused": True},
658
+ desc="`fused` and `foreach` cannot be `True` together",
659
+ ),
660
+ error_type=RuntimeError,
661
+ error_regex="`fused` and `foreach` cannot be `True` together",
662
+ ),
663
+ ErrorOptimizerInput(
664
+ OptimizerInput(
665
+ params=[sample_tensor],
666
+ kwargs={"fused": True, "differentiable": True},
667
+ desc="`fused` does not support `differentiable`",
668
+ ),
669
+ error_type=RuntimeError,
670
+ error_regex="`fused` does not support `differentiable`",
671
+ ),
672
+ ]
673
+ return error_inputs
674
+
675
+
676
+ def optim_inputs_func_adamax(device, dtype=None):
677
+ cuda_supported_configs = [
678
+ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"),
679
+ OptimizerInput(
680
+ params=None,
681
+ kwargs={"weight_decay": 0.9, "maximize": True, "capturable": True},
682
+ desc="capturable, maximize, weight_decay",
683
+ ),
684
+ OptimizerInput(
685
+ params=None,
686
+ kwargs={"weight_decay": 0, "maximize": True, "capturable": True},
687
+ desc="capturable, maximize",
688
+ ),
689
+ OptimizerInput(
690
+ params=None,
691
+ kwargs={"weight_decay": 0.9, "maximize": False, "capturable": True},
692
+ desc="capturable, weight_decay",
693
+ ),
694
+ OptimizerInput(
695
+ params=None,
696
+ kwargs={
697
+ "lr": torch.tensor(0.001),
698
+ "weight_decay": 0.9,
699
+ "maximize": False,
700
+ "capturable": True,
701
+ },
702
+ desc="capturable, weight_decay, tensor LR",
703
+ ),
704
+ ]
705
+
706
+ return [
707
+ OptimizerInput(params=None, kwargs={}, desc="default"),
708
+ OptimizerInput(params=None, kwargs={"lr": 0.1}, desc="non-default lr"),
709
+ OptimizerInput(
710
+ params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay"
711
+ ),
712
+ OptimizerInput(
713
+ params=None,
714
+ kwargs={"maximize": True},
715
+ desc="maximize",
716
+ ),
717
+ OptimizerInput(
718
+ params=None,
719
+ kwargs={"weight_decay": 0.1, "maximize": True},
720
+ desc="maximize, weight_decay",
721
+ ),
722
+ ] + (cuda_supported_configs if _get_device_type(device) == "cuda" else [])
723
+
724
+
725
+ def optim_error_inputs_func_adamax(device, dtype):
726
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
727
+ if _get_device_type(device) == "cpu":
728
+ error_inputs += [
729
+ ErrorOptimizerInput(
730
+ OptimizerInput(
731
+ params=None,
732
+ kwargs=dict(lr=1e-2, betas=(0.0, 1.0)),
733
+ desc="beta2 should be between 0 and 1",
734
+ ),
735
+ error_type=ValueError,
736
+ error_regex="Invalid beta parameter at index 1: 1.0",
737
+ ),
738
+ ]
739
+ return error_inputs
740
+
741
+
742
+ def optim_inputs_func_adamw(device, dtype=None):
743
+ return optim_inputs_func_adam(device, dtype)
744
+
745
+
746
+ def optim_error_inputs_func_adamw(device, dtype):
747
+ return optim_error_inputs_func_adam(device, dtype)
748
+
749
+
750
+ def optim_inputs_func_asgd(device, dtype=None):
751
+ cuda_supported_configs = [
752
+ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"),
753
+ OptimizerInput(
754
+ params=None,
755
+ kwargs={"maximize": True, "capturable": True},
756
+ desc="maximize, capturable",
757
+ ),
758
+ OptimizerInput(
759
+ params=None,
760
+ kwargs={"weight_decay": 0.1, "capturable": True},
761
+ desc="weight_decay, capturable",
762
+ ),
763
+ OptimizerInput(
764
+ params=None,
765
+ kwargs={"weight_decay": 0.1, "maximize": True, "capturable": True},
766
+ desc="maximize, weight_decay, capturable",
767
+ ),
768
+ OptimizerInput(
769
+ params=None,
770
+ kwargs={
771
+ "lr": torch.tensor(0.001),
772
+ "weight_decay": 0.1,
773
+ "maximize": True,
774
+ "capturable": True,
775
+ },
776
+ desc="maximize, weight_decay, capturable, tensor LR",
777
+ ),
778
+ ]
779
+ return [
780
+ OptimizerInput(params=None, kwargs={}, desc="default"),
781
+ OptimizerInput(params=None, kwargs={"lambd": 0.1}, desc="non-default lambd"),
782
+ OptimizerInput(params=None, kwargs={"lr": 0.02}, desc="non-default lr"),
783
+ OptimizerInput(params=None, kwargs={"t0": 100}, desc="t0"),
784
+ OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"),
785
+ OptimizerInput(
786
+ params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay"
787
+ ),
788
+ OptimizerInput(
789
+ params=None,
790
+ kwargs={"weight_decay": 0.1, "maximize": True},
791
+ desc="maximize, nonzero weight_decay",
792
+ ),
793
+ ] + (cuda_supported_configs if _get_device_type(device) == "cuda" else [])
794
+
795
+
796
+ def optim_error_inputs_func_asgd(device, dtype):
797
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
798
+ if _get_device_type(device) == "cpu":
799
+ error_inputs += [
800
+ ErrorOptimizerInput(
801
+ OptimizerInput(
802
+ params=None,
803
+ kwargs=dict(lr=1e-2, weight_decay=-0.5),
804
+ desc="weight_decay should > 0",
805
+ ),
806
+ error_type=ValueError,
807
+ error_regex="Invalid weight_decay value: -0.5",
808
+ ),
809
+ ]
810
+ return error_inputs
811
+
812
+
813
+ def optim_inputs_func_lbfgs(device, dtype=None):
814
+ return [
815
+ OptimizerInput(params=None, kwargs={}, desc="default"),
816
+ OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"),
817
+ OptimizerInput(
818
+ params=None, kwargs={"lr": torch.tensor(0.001)}, desc="Tensor lr"
819
+ ),
820
+ OptimizerInput(
821
+ params=None, kwargs={"tolerance_grad": 1e-6}, desc="tolerance_grad"
822
+ ),
823
+ OptimizerInput(
824
+ params=None,
825
+ kwargs={"line_search_fn": "strong_wolfe"},
826
+ desc="strong_wolfe",
827
+ ),
828
+ ]
829
+
830
+
831
+ def optim_error_inputs_func_lbfgs(device, dtype):
832
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
833
+ return error_inputs
834
+
835
+
836
+ def optim_inputs_func_nadam(device, dtype=None):
837
+ cuda_supported_configs = [
838
+ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"),
839
+ OptimizerInput(
840
+ params=None,
841
+ kwargs={"weight_decay": 0.9, "momentum_decay": 6e-3, "capturable": True},
842
+ desc="weight_decay, capturable",
843
+ ),
844
+ OptimizerInput(
845
+ params=None,
846
+ kwargs={
847
+ "weight_decay": 0.9,
848
+ "momentum_decay": 6e-3,
849
+ "decoupled_weight_decay": True,
850
+ "capturable": True,
851
+ },
852
+ desc="decoupled_weight_decay, capturable",
853
+ ),
854
+ OptimizerInput(
855
+ params=None,
856
+ kwargs={
857
+ "lr": torch.tensor(0.001),
858
+ "weight_decay": 0.9,
859
+ "momentum_decay": 6e-3,
860
+ "decoupled_weight_decay": True,
861
+ "capturable": True,
862
+ },
863
+ desc="decoupled_weight_decay, capturable",
864
+ ),
865
+ ]
866
+ return [
867
+ OptimizerInput(params=None, kwargs={}, desc="default"),
868
+ OptimizerInput(params=None, kwargs={"lr": 1e-3}, desc="non-default lr"),
869
+ OptimizerInput(
870
+ params=None,
871
+ kwargs={"momentum_decay": 6e-3},
872
+ desc="non-zero momentum_decay",
873
+ ),
874
+ OptimizerInput(
875
+ params=None,
876
+ kwargs={
877
+ "weight_decay": 0.1,
878
+ },
879
+ desc="weight_decay",
880
+ ),
881
+ OptimizerInput(
882
+ params=None,
883
+ kwargs={"weight_decay": 0.1, "momentum_decay": 6e-3},
884
+ desc="weight_decay, momentum_decay",
885
+ ),
886
+ OptimizerInput(
887
+ params=None,
888
+ kwargs={
889
+ "weight_decay": 0.1,
890
+ "momentum_decay": 6e-3,
891
+ "decoupled_weight_decay": True,
892
+ },
893
+ desc="decoupled_weight_decay",
894
+ ),
895
+ OptimizerInput(
896
+ params=None,
897
+ kwargs={"weight_decay": 0.1, "maximize": True},
898
+ desc="maximize",
899
+ ),
900
+ ] + (cuda_supported_configs if _get_device_type(device) == "cuda" else [])
901
+
902
+
903
+ def optim_error_inputs_func_nadam(device, dtype):
904
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
905
+ if _get_device_type(device) == "cpu":
906
+ error_inputs += [
907
+ ErrorOptimizerInput(
908
+ OptimizerInput(
909
+ params=None,
910
+ kwargs=dict(lr=1e-2, betas=(1.0, 0.0)),
911
+ desc="beta1 should be between 0 and 1",
912
+ ),
913
+ error_type=ValueError,
914
+ error_regex="Invalid beta parameter at index 0: 1.0",
915
+ ),
916
+ ErrorOptimizerInput(
917
+ OptimizerInput(
918
+ params=None,
919
+ kwargs=dict(lr=1e-2, momentum_decay=-0.2),
920
+ desc="momentum_decay should > 0",
921
+ ),
922
+ error_type=ValueError,
923
+ error_regex="Invalid momentum_decay value: -0.2",
924
+ ),
925
+ ]
926
+ return error_inputs
927
+
928
+
929
+ # Weird story bro, NAdam and RAdam do not have maximize.
930
+ def optim_inputs_func_radam(device=None, dtype=None):
931
+ cuda_supported_configs = [
932
+ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"),
933
+ OptimizerInput(
934
+ params=None,
935
+ kwargs={
936
+ "capturable": True,
937
+ "weight_decay": 0.1,
938
+ },
939
+ desc="capturable, weight_decay",
940
+ ),
941
+ OptimizerInput(
942
+ params=None,
943
+ kwargs={
944
+ "capturable": True,
945
+ "weight_decay": 0.1,
946
+ "decoupled_weight_decay": True,
947
+ },
948
+ desc="capturable, weight_decay, decoupled_weight_decay",
949
+ ),
950
+ OptimizerInput(
951
+ params=None,
952
+ kwargs={
953
+ "lr": torch.tensor(0.001),
954
+ "capturable": True,
955
+ "weight_decay": 0.1,
956
+ "decoupled_weight_decay": True,
957
+ },
958
+ desc="capturable, weight_decay, decoupled_weight_decay, tensor LR",
959
+ ),
960
+ ]
961
+ return [
962
+ OptimizerInput(params=None, kwargs={}, desc="default"),
963
+ OptimizerInput(params=None, kwargs={"lr": 2e-3}, desc="non-default lr"),
964
+ OptimizerInput(params=None, kwargs={"eps": 1e-6}, desc="non-default eps"),
965
+ OptimizerInput(
966
+ params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay"
967
+ ),
968
+ OptimizerInput(
969
+ params=None,
970
+ kwargs={"weight_decay": 0.1, "decoupled_weight_decay": True},
971
+ desc="decoupled_weight_decay",
972
+ ),
973
+ OptimizerInput(
974
+ params=None,
975
+ kwargs={"weight_decay": 0.1, "maximize": True},
976
+ desc="maximize",
977
+ ),
978
+ ] + (cuda_supported_configs if _get_device_type(device) == "cuda" else [])
979
+
980
+
981
+ def optim_error_inputs_func_radam(device, dtype):
982
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
983
+ if _get_device_type(device) == "cpu":
984
+ error_inputs += [
985
+ ErrorOptimizerInput(
986
+ OptimizerInput(
987
+ params=None,
988
+ kwargs=dict(lr=1e-2, betas=(1.0, 0.0)),
989
+ desc="beta1 should be between 0 and 1",
990
+ ),
991
+ error_type=ValueError,
992
+ error_regex="Invalid beta parameter at index 0: 1.0",
993
+ ),
994
+ ErrorOptimizerInput(
995
+ OptimizerInput(
996
+ params=None,
997
+ kwargs=dict(lr=1e-2, weight_decay=-1),
998
+ desc="weight_decay should > 0",
999
+ ),
1000
+ error_type=ValueError,
1001
+ error_regex="Invalid weight_decay value: -1",
1002
+ ),
1003
+ ]
1004
+ return error_inputs
1005
+
1006
+
1007
+ def optim_inputs_func_rmsprop(device, dtype=None):
1008
+ cuda_supported_configs = [
1009
+ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"),
1010
+ OptimizerInput(
1011
+ params=None,
1012
+ kwargs={"weight_decay": 0.1, "maximize": True, "capturable": True},
1013
+ desc="capturable, maximize",
1014
+ ),
1015
+ OptimizerInput(
1016
+ params=None,
1017
+ kwargs={"lr": torch.tensor(0.001), "capturable": True},
1018
+ desc="Tensor lr with capturable",
1019
+ ),
1020
+ ]
1021
+
1022
+ return [
1023
+ OptimizerInput(params=None, kwargs={}, desc="default"),
1024
+ OptimizerInput(params=None, kwargs={"lr": 1e-3}, desc="non-default lr"),
1025
+ OptimizerInput(
1026
+ params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay"
1027
+ ),
1028
+ OptimizerInput(
1029
+ params=None,
1030
+ kwargs={
1031
+ "maximize": True,
1032
+ },
1033
+ desc="maximize",
1034
+ ),
1035
+ OptimizerInput(
1036
+ params=None,
1037
+ kwargs={"weight_decay": 0.1, "centered": True},
1038
+ desc="centered",
1039
+ ),
1040
+ OptimizerInput(
1041
+ params=None,
1042
+ kwargs={
1043
+ "maximize": True,
1044
+ "weight_decay": 0.1,
1045
+ },
1046
+ desc="maximize, weight_decay",
1047
+ ),
1048
+ OptimizerInput(
1049
+ params=None,
1050
+ kwargs={"weight_decay": 0.1, "centered": True, "momentum": 0.1},
1051
+ desc="momentum",
1052
+ ),
1053
+ OptimizerInput(
1054
+ params=None,
1055
+ kwargs={
1056
+ "weight_decay": 0.1,
1057
+ "centered": True,
1058
+ "momentum": 0.1,
1059
+ "maximize": True,
1060
+ },
1061
+ desc="maximize, centered, weight_decay, w/ momentum",
1062
+ ),
1063
+ ] + (cuda_supported_configs if _get_device_type(device) == "cuda" else [])
1064
+
1065
+
1066
+ def optim_error_inputs_func_rmsprop(device, dtype):
1067
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
1068
+ if _get_device_type(device) == "cpu":
1069
+ error_inputs += [
1070
+ ErrorOptimizerInput(
1071
+ OptimizerInput(
1072
+ params=None,
1073
+ kwargs=dict(lr=1e-2, momentum=-1.0),
1074
+ desc="momentum should be between 0 and 1",
1075
+ ),
1076
+ error_type=ValueError,
1077
+ error_regex="Invalid momentum value: -1.0",
1078
+ ),
1079
+ ]
1080
+ return error_inputs
1081
+
1082
+
1083
+ def optim_inputs_func_rprop(device, dtype=None):
1084
+ cuda_supported_configs = [
1085
+ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"),
1086
+ OptimizerInput(
1087
+ params=None,
1088
+ kwargs={"lr": torch.tensor(0.001), "capturable": True},
1089
+ desc="Tensor lr with capturable",
1090
+ ),
1091
+ ]
1092
+
1093
+ return [
1094
+ OptimizerInput(params=None, kwargs={}, desc="default"),
1095
+ OptimizerInput(params=None, kwargs={"lr": 2e-4}, desc="non-default lr"),
1096
+ OptimizerInput(
1097
+ params=None, kwargs={"etas": (0.5, 1.5)}, desc="non-default etas"
1098
+ ),
1099
+ OptimizerInput(
1100
+ params=None,
1101
+ kwargs={"step_sizes": (2e-6, 100)},
1102
+ desc="non-default step_sizes",
1103
+ ),
1104
+ OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"),
1105
+ ] + (cuda_supported_configs if _get_device_type(device) == "cuda" else [])
1106
+
1107
+
1108
+ def optim_error_inputs_func_rprop(device, dtype):
1109
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
1110
+ if _get_device_type(device) == "cpu":
1111
+ error_inputs += [
1112
+ ErrorOptimizerInput(
1113
+ OptimizerInput(
1114
+ params=None,
1115
+ kwargs=dict(lr=1e-2, etas=(1.0, 0.5)),
1116
+ desc="0 < eta1 < 1 < eta2",
1117
+ ),
1118
+ error_type=ValueError,
1119
+ error_regex="Invalid eta values: 1.0, 0.5",
1120
+ ),
1121
+ ]
1122
+ return error_inputs
1123
+
1124
+
1125
+ def optim_inputs_func_sgd(device, dtype=None):
1126
+ return [
1127
+ OptimizerInput(params=None, kwargs={}, desc="default"),
1128
+ OptimizerInput(params=None, kwargs={"lr": 1e-2}, desc="non-default lr"),
1129
+ OptimizerInput(
1130
+ params=None, kwargs={"lr": torch.tensor(0.001)}, desc="tensor lr"
1131
+ ),
1132
+ OptimizerInput(
1133
+ params=None, kwargs={"weight_decay": 0.5}, desc="non-zero weight_decay"
1134
+ ),
1135
+ OptimizerInput(params=None, kwargs={"momentum": 0.9}, desc="momentum"),
1136
+ OptimizerInput(
1137
+ params=None,
1138
+ kwargs={"weight_decay": 0.1, "maximize": True},
1139
+ desc="maximize",
1140
+ ),
1141
+ OptimizerInput(
1142
+ params=None,
1143
+ kwargs={"momentum": 0.9, "dampening": 0.5},
1144
+ desc="dampening",
1145
+ ),
1146
+ OptimizerInput(
1147
+ params=None,
1148
+ kwargs={"momentum": 0.9, "weight_decay": 0.1},
1149
+ desc="weight_decay w/ momentum",
1150
+ ),
1151
+ OptimizerInput(
1152
+ params=None,
1153
+ kwargs={"momentum": 0.9, "nesterov": True, "weight_decay": 0.1},
1154
+ desc="nesterov",
1155
+ ),
1156
+ ]
1157
+
1158
+
1159
+ def optim_error_inputs_func_sgd(device, dtype):
1160
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
1161
+ if _get_device_type(device) == "cpu":
1162
+ error_inputs += [
1163
+ ErrorOptimizerInput(
1164
+ OptimizerInput(
1165
+ params=None,
1166
+ kwargs=dict(lr=1e-2, momentum=-0.5),
1167
+ desc="momentum should be between 0 and 1",
1168
+ ),
1169
+ error_type=ValueError,
1170
+ error_regex="Invalid momentum value: -0.5",
1171
+ ),
1172
+ ]
1173
+ return error_inputs
1174
+
1175
+
1176
+ def optim_inputs_func_sparseadam(device, dtype=None):
1177
+ return [
1178
+ OptimizerInput(params=None, kwargs={}, desc="default"),
1179
+ OptimizerInput(
1180
+ params=None, kwargs={"lr": 0.01}, desc="non-default lr"
1181
+ ), # TODO: Move out to testing in param_group?
1182
+ OptimizerInput(
1183
+ params=None, kwargs={"lr": torch.tensor(0.001)}, desc="Tensor lr"
1184
+ ),
1185
+ OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"),
1186
+ ]
1187
+
1188
+
1189
+ def optim_error_inputs_func_sparseadam(device, dtype):
1190
+ error_inputs = get_error_inputs_for_all_optims(device, dtype)
1191
+
1192
+ if _get_device_type(device) == "cpu":
1193
+ error_inputs += [
1194
+ ErrorOptimizerInput(
1195
+ OptimizerInput(
1196
+ params=None,
1197
+ kwargs=dict(lr=1e-2, betas=(1.0, 0.0)),
1198
+ desc="beta1 should be between 0 and 1",
1199
+ ),
1200
+ error_type=ValueError,
1201
+ error_regex="Invalid beta parameter at index 0: 1.0",
1202
+ ),
1203
+ ErrorOptimizerInput(
1204
+ OptimizerInput(
1205
+ params=[
1206
+ torch.zeros(
1207
+ 3, layout=torch.sparse_coo, device=device, dtype=dtype
1208
+ )
1209
+ ],
1210
+ kwargs={},
1211
+ desc="dense params required",
1212
+ ),
1213
+ error_type=ValueError,
1214
+ error_regex="SparseAdam requires dense parameter tensors",
1215
+ ),
1216
+ ErrorOptimizerInput(
1217
+ OptimizerInput(
1218
+ params=[
1219
+ {
1220
+ "params": [
1221
+ torch.zeros(
1222
+ 3,
1223
+ layout=torch.sparse_coo,
1224
+ device=device,
1225
+ dtype=dtype,
1226
+ )
1227
+ ]
1228
+ }
1229
+ ],
1230
+ kwargs={},
1231
+ desc="dense params required in param_groups",
1232
+ ),
1233
+ error_type=ValueError,
1234
+ error_regex="SparseAdam requires dense parameter tensors",
1235
+ ),
1236
+ ErrorOptimizerInput(
1237
+ OptimizerInput(
1238
+ params=[torch.rand(2, 3, device=device, dtype=torch.complex64)],
1239
+ kwargs={},
1240
+ desc="complex not supported",
1241
+ ),
1242
+ error_type=ValueError,
1243
+ error_regex="SparseAdam does not support complex parameters",
1244
+ ),
1245
+ ]
1246
+ return error_inputs
1247
+
1248
+
1249
+ def _get_device_type(device: Union[str, torch.device]) -> str:
1250
+ # Returns the device type as a string, e.g., "cpu" or "cuda"
1251
+ if isinstance(device, torch.device):
1252
+ device = str(device.type)
1253
+ assert isinstance(device, str)
1254
+ return device.split(":")[0]
1255
+
1256
+
1257
+ def _get_optim_inputs_including_global_cliquey_kwargs(
1258
+ device, dtype, optim_info, skip=()
1259
+ ) -> List[OptimizerInput]:
1260
+ """
1261
+ Return a list of all configs for a given optimizer as a list of OptimizerInputs,
1262
+ including configs that have supported global cliquey kwargs (foreach, fused,
1263
+ differentiable) based on optim_info.supported_impls.
1264
+
1265
+ The configs (optim_inputs) returned by optim_info.optim_inputs_func(...)
1266
+ intentionally do NOT include global cliquey kwargs to give flexibility to tests.
1267
+ For example, testing correctness between toggling foreach on and off is now
1268
+ trivial. That said, we sometimes want to test for all possible configs on an
1269
+ optimizer including all supported flags, so this helper returns all optim inputs.
1270
+ """
1271
+ assert all(
1272
+ x in ["foreach", "fused", "differentiable"] for x in skip
1273
+ ), "skip must be a subset of ['foreach', 'fused', 'differentiable']"
1274
+
1275
+ optim_inputs = optim_info.optim_inputs_func(device)
1276
+
1277
+ supported_impls = tuple(
1278
+ x
1279
+ for x in optim_info.supported_impls
1280
+ if x not in skip
1281
+ and (_get_device_type(device) in optim_info.supports_fused_on or x != "fused")
1282
+ and (
1283
+ _get_device_type(device) in _get_foreach_kernels_supported_devices()
1284
+ or x != "foreach"
1285
+ )
1286
+ )
1287
+
1288
+ all_optim_inputs = []
1289
+ for optim_input in optim_inputs:
1290
+ # Add the base config where all the flags are False
1291
+ base_kwargs = deepcopy(optim_input.kwargs)
1292
+ if len(supported_impls) != 0:
1293
+ for flag in supported_impls:
1294
+ base_kwargs[flag] = False
1295
+ all_optim_inputs.append(
1296
+ OptimizerInput(params=None, kwargs=base_kwargs, desc=optim_input.desc)
1297
+ )
1298
+ else:
1299
+ all_optim_inputs.append(optim_input)
1300
+ # Add a config for when each of the global cliquey kwargs is True
1301
+ # Note that in [optimizer kwarg categories], these kwargs are mutually
1302
+ # exclusive, so we do not need to product them together.
1303
+ for flag in supported_impls:
1304
+ new_kwargs = deepcopy(base_kwargs)
1305
+ new_kwargs[flag] = True
1306
+ all_optim_inputs.append(
1307
+ OptimizerInput(
1308
+ params=None, kwargs=new_kwargs, desc=f"{optim_input.desc} & {flag}"
1309
+ )
1310
+ )
1311
+ return all_optim_inputs
1312
+
1313
+
1314
+ # Database of OptimizerInfo entries in alphabetical order.
1315
+ optim_db: List[OptimizerInfo] = [
1316
+ OptimizerInfo(
1317
+ Adadelta,
1318
+ optim_inputs_func=optim_inputs_func_adadelta,
1319
+ optim_error_inputs_func=optim_error_inputs_func_adadelta,
1320
+ supported_impls=("foreach", "differentiable"),
1321
+ has_capturable_arg=True,
1322
+ skips=(
1323
+ DecorateInfo(
1324
+ skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"),
1325
+ "TestOptimRenewed",
1326
+ "test_tensor_lr",
1327
+ active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7),
1328
+ ),
1329
+ DecorateInfo(
1330
+ skipIfTorchDynamo("See #116028"),
1331
+ "TestOptimRenewed",
1332
+ "test_set_default_dtype_works_with_foreach",
1333
+ ),
1334
+ DecorateInfo(
1335
+ skipIfTorchDynamo(
1336
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1337
+ ),
1338
+ "TestOptimRenewed",
1339
+ "test_complex_2d",
1340
+ ),
1341
+ # Note on tolerances:
1342
+ # test_correctness_Adadelta_cuda_float32
1343
+ # Mismatched elements: 10 / 100 (10.0%)
1344
+ # Greatest absolute difference: 4.838220775127411e-05 at index (7, 4) (up to 1e-05 allowed)
1345
+ # Greatest relative difference: 0.007270356640219688 at index (7, 2) (up to 1e-05 allowed)
1346
+ # This is due to floating point ordering error + usage of sqrt
1347
+ DecorateInfo(
1348
+ toleranceOverride(
1349
+ {
1350
+ torch.float32: tol(
1351
+ rtol=5.5e-4,
1352
+ atol=5e-5,
1353
+ )
1354
+ }
1355
+ ),
1356
+ "CompiledOptimizerParityTests",
1357
+ "test_correctness",
1358
+ ),
1359
+ DecorateInfo(
1360
+ skipIfTorchDynamo(
1361
+ "This test uses mocks, which dynamo does not support"
1362
+ ),
1363
+ "TestOptimRenewed",
1364
+ "test_defaults_changed_to_foreach",
1365
+ ),
1366
+ ),
1367
+ ),
1368
+ OptimizerInfo(
1369
+ Adafactor,
1370
+ optim_inputs_func=optim_inputs_func_adafactor,
1371
+ optim_error_inputs_func=optim_error_inputs_func_adafactor,
1372
+ supported_impls=("foreach",),
1373
+ not_og_supported_flags=("foreach",),
1374
+ supports_complex=False,
1375
+ skips=(
1376
+ DecorateInfo(
1377
+ unittest.skip("See #133268 regarding dtype being None"),
1378
+ "CompiledOptimizerParityTests",
1379
+ "test_correctness",
1380
+ device_type="cuda",
1381
+ active_if=lambda kwargs: kwargs.get("use_closure", False),
1382
+ ),
1383
+ DecorateInfo(
1384
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1385
+ "TestOptimRenewed",
1386
+ "test_can_load_older_state_dict",
1387
+ device_type="cuda",
1388
+ ),
1389
+ DecorateInfo(
1390
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1391
+ "TestOptimRenewed",
1392
+ "test_deepcopy_copies_all_public_attrs",
1393
+ device_type="cuda",
1394
+ ),
1395
+ DecorateInfo(
1396
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1397
+ "TestOptimRenewed",
1398
+ "test_foreach_large_tensor",
1399
+ ),
1400
+ DecorateInfo(
1401
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1402
+ "TestOptimRenewed",
1403
+ "test_foreach_matches_forloop",
1404
+ ),
1405
+ DecorateInfo(
1406
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1407
+ "TestOptimRenewed",
1408
+ "test_load_nontensor_step",
1409
+ device_type="cuda",
1410
+ ),
1411
+ DecorateInfo(
1412
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1413
+ "TestOptimRenewed",
1414
+ "test_mixed_device_dtype",
1415
+ ),
1416
+ DecorateInfo(
1417
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1418
+ "TestOptimRenewed",
1419
+ "test_param_groups_lr",
1420
+ device_type="cuda",
1421
+ ),
1422
+ DecorateInfo(
1423
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1424
+ "TestOptimRenewed",
1425
+ "test_param_groups_weight_decay",
1426
+ device_type="cuda",
1427
+ ),
1428
+ DecorateInfo(
1429
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1430
+ "TestOptimRenewed",
1431
+ "test_peak_memory_foreach",
1432
+ ),
1433
+ DecorateInfo(
1434
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1435
+ "TestOptimRenewed",
1436
+ "test_save_load_equality_with_weights_only",
1437
+ device_type="cuda",
1438
+ ),
1439
+ DecorateInfo(
1440
+ skipIfTorchDynamo("See #116028 regarding copy not supported"),
1441
+ "TestOptimRenewed",
1442
+ "test_set_default_dtype_works_with_foreach",
1443
+ ),
1444
+ DecorateInfo(
1445
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1446
+ "TestOptimRenewed",
1447
+ "test_state_dict_deterministic",
1448
+ device_type="cuda",
1449
+ ),
1450
+ DecorateInfo(
1451
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1452
+ "TestOptimRenewed",
1453
+ "test_step_is_noop_for_zero_grads",
1454
+ device_type="cuda",
1455
+ ),
1456
+ DecorateInfo(
1457
+ unittest.skip("See #133268 regarding dtype being None"),
1458
+ "CompiledOptimizerParityTests",
1459
+ "test_correctness",
1460
+ device_type="xpu",
1461
+ ),
1462
+ DecorateInfo(
1463
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1464
+ "TestOptimRenewed",
1465
+ "test_can_load_older_state_dict",
1466
+ device_type="xpu",
1467
+ ),
1468
+ DecorateInfo(
1469
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1470
+ "TestOptimRenewed",
1471
+ "test_deepcopy_copies_all_public_attrs",
1472
+ device_type="xpu",
1473
+ ),
1474
+ DecorateInfo(
1475
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1476
+ "TestOptimRenewed",
1477
+ "test_load_nontensor_step",
1478
+ device_type="xpu",
1479
+ ),
1480
+ DecorateInfo(
1481
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1482
+ "TestOptimRenewed",
1483
+ "test_param_groups_lr",
1484
+ device_type="xpu",
1485
+ ),
1486
+ DecorateInfo(
1487
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1488
+ "TestOptimRenewed",
1489
+ "test_param_groups_weight_decay",
1490
+ device_type="xpu",
1491
+ ),
1492
+ DecorateInfo(
1493
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1494
+ "TestOptimRenewed",
1495
+ "test_save_load_equality_with_weights_only",
1496
+ device_type="xpu",
1497
+ ),
1498
+ DecorateInfo(
1499
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1500
+ "TestOptimRenewed",
1501
+ "test_state_dict_deterministic",
1502
+ device_type="xpu",
1503
+ ),
1504
+ DecorateInfo(
1505
+ skipIfTorchDynamo("See #133268 regarding dtype being None"),
1506
+ "TestOptimRenewed",
1507
+ "test_step_is_noop_for_zero_grads",
1508
+ device_type="xpu",
1509
+ ),
1510
+ ),
1511
+ ),
1512
+ OptimizerInfo(
1513
+ Adagrad,
1514
+ optim_inputs_func=optim_inputs_func_adagrad,
1515
+ optim_error_inputs_func=optim_error_inputs_func_adagrad,
1516
+ supported_impls=("foreach", "differentiable", "fused"),
1517
+ not_og_supported_flags=(
1518
+ "foreach",
1519
+ "differentiable",
1520
+ "fused",
1521
+ "maximize",
1522
+ "capturable",
1523
+ ),
1524
+ supports_fused_on=("cpu",),
1525
+ supports_sparse=True,
1526
+ metadata_for_sparse=(
1527
+ {"lr": 0.1, "weight_decay": 0, "lr_decay": 0},
1528
+ [
1529
+ lambda opt: StepLR(opt, gamma=1 - 1e-5, step_size=500),
1530
+ lambda opt: ReduceLROnPlateau(opt, threshold=1e-4),
1531
+ ],
1532
+ ),
1533
+ decorators=(
1534
+ DecorateInfo(
1535
+ # Note on tolerances:
1536
+ # difference comes from the fact that the non fused kernel have
1537
+ # more dtype cast operations. We have another test test_fused_cpu_matches_cuda
1538
+ # to make sure there is no discrepancies between cuda fused kernel
1539
+ # and cpu fused kernel
1540
+ toleranceOverride(
1541
+ {
1542
+ torch.bfloat16: tol(atol=5e-3, rtol=5e-3),
1543
+ torch.float16: tol(atol=5e-3, rtol=5e-3),
1544
+ }
1545
+ ),
1546
+ "TestOptimRenewed",
1547
+ "test_fused_matches_forloop",
1548
+ ),
1549
+ ),
1550
+ skips=(
1551
+ DecorateInfo(
1552
+ skipIfMPS, # addcdiv doesn't work for non-contiguous, see #118115
1553
+ "TestOptimRenewed",
1554
+ "test_forloop_goes_right_direction",
1555
+ active_if=lambda kwargs: not kwargs["contiguous"],
1556
+ device_type="mps",
1557
+ ),
1558
+ DecorateInfo(
1559
+ skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"),
1560
+ "TestOptimRenewed",
1561
+ "test_tensor_lr",
1562
+ active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7),
1563
+ ),
1564
+ DecorateInfo(
1565
+ skipIfTorchDynamo("See #116028"),
1566
+ "TestOptimRenewed",
1567
+ "test_set_default_dtype_works_with_foreach",
1568
+ ),
1569
+ DecorateInfo(
1570
+ skipIfTorchDynamo(
1571
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1572
+ ),
1573
+ "TestOptimRenewed",
1574
+ "test_complex_2d",
1575
+ ),
1576
+ DecorateInfo(
1577
+ skipIfTorchDynamo(
1578
+ "This test uses mocks, which dynamo does not support"
1579
+ ),
1580
+ "TestOptimRenewed",
1581
+ "test_defaults_changed_to_foreach",
1582
+ ),
1583
+ ),
1584
+ ),
1585
+ OptimizerInfo(
1586
+ Adam,
1587
+ optim_inputs_func=optim_inputs_func_adam,
1588
+ scheduler_inputs=(
1589
+ [lambda opt: ExponentialLR(opt, gamma=0.9)],
1590
+ [lambda opt: LinearLR(opt, start_factor=0.4, total_iters=4)],
1591
+ [
1592
+ lambda opt: ConstantLR(opt, factor=0.4, total_iters=4),
1593
+ lambda opt: ExponentialLR(opt, gamma=0.9),
1594
+ ],
1595
+ [
1596
+ lambda opt: ExponentialLR(opt, gamma=0.9),
1597
+ lambda opt: ReduceLROnPlateau(opt),
1598
+ ],
1599
+ [lambda opt: ConstantLR(opt, factor=0.4, total_iters=4)],
1600
+ [lambda opt: PolynomialLR(opt, power=0.9, total_iters=4)],
1601
+ [
1602
+ lambda opt: StepLR(opt, gamma=0.9, step_size=10),
1603
+ lambda opt: ReduceLROnPlateau(opt),
1604
+ ],
1605
+ ),
1606
+ optim_error_inputs_func=optim_error_inputs_func_adam,
1607
+ supported_impls=("foreach", "differentiable", "fused"),
1608
+ has_capturable_arg=True,
1609
+ not_og_supported_flags=(
1610
+ "foreach",
1611
+ "differentiable",
1612
+ "fused",
1613
+ "maximize",
1614
+ "capturable",
1615
+ ),
1616
+ supports_fused_on=("cpu", "cuda", "mps"),
1617
+ decorators=(
1618
+ # Expected floating point error between fused and compiled forloop
1619
+ DecorateInfo(
1620
+ toleranceOverride({torch.float64: tol(atol=4.5e-7, rtol=2.2e-6)}),
1621
+ "TestOptimRenewed",
1622
+ "test_fused_matches_forloop",
1623
+ active_if=lambda kwargs: TEST_WITH_TORCHDYNAMO
1624
+ and kwargs["dtype"] == torch.float64,
1625
+ ),
1626
+ DecorateInfo(
1627
+ # Note on tolerances:
1628
+ # difference comes from the fact that the non fused kernel have
1629
+ # more dtype cast operations. We have another test test_fused_cpu_matches_cuda
1630
+ # to make sure there is no discrepancies between cuda fused kernel
1631
+ # and cpu fused kernel
1632
+ toleranceOverride(
1633
+ {
1634
+ torch.bfloat16: tol(atol=5e-3, rtol=5e-3),
1635
+ torch.float16: tol(atol=5e-3, rtol=5e-3),
1636
+ }
1637
+ ),
1638
+ "TestOptimRenewed",
1639
+ "test_fused_matches_forloop",
1640
+ ),
1641
+ DecorateInfo(
1642
+ # Note on tolerances:
1643
+ # Tracking through #127000
1644
+ toleranceOverride(
1645
+ {
1646
+ torch.float32: tol(atol=3e-5, rtol=1.3e-06),
1647
+ }
1648
+ ),
1649
+ "TestCudaOptims",
1650
+ "test_grad_scaling_autocast_fused_optimizers",
1651
+ ),
1652
+ ),
1653
+ skips=(
1654
+ DecorateInfo(
1655
+ skipIfMPS, # addcdiv doesn't work for non-contiguous, see #118115
1656
+ "TestOptimRenewed",
1657
+ "test_forloop_goes_right_direction",
1658
+ active_if=lambda kwargs: not kwargs["contiguous"],
1659
+ device_type="mps",
1660
+ ),
1661
+ DecorateInfo(
1662
+ skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"),
1663
+ "TestOptimRenewed",
1664
+ "test_tensor_lr",
1665
+ active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7),
1666
+ ),
1667
+ DecorateInfo(
1668
+ skipIfTorchDynamo(
1669
+ "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028"
1670
+ ),
1671
+ "TestOptimRenewed",
1672
+ "test_set_default_dtype_works_with_foreach",
1673
+ ),
1674
+ DecorateInfo(
1675
+ skipIfTorchDynamo(
1676
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1677
+ ),
1678
+ "TestOptimRenewed",
1679
+ "test_complex_2d",
1680
+ ),
1681
+ DecorateInfo(
1682
+ skipIfTorchDynamo(
1683
+ "This test uses mocks, which dynamo does not support"
1684
+ ),
1685
+ "TestOptimRenewed",
1686
+ "test_defaults_changed_to_foreach",
1687
+ ),
1688
+ ),
1689
+ ),
1690
+ OptimizerInfo(
1691
+ Adamax,
1692
+ optim_inputs_func=optim_inputs_func_adamax,
1693
+ optim_error_inputs_func=optim_error_inputs_func_adamax,
1694
+ supported_impls=("foreach", "differentiable"),
1695
+ has_capturable_arg=True,
1696
+ skips=(
1697
+ DecorateInfo(
1698
+ skipIfMPS, # addcdiv doesn't work for non-contiguous, see #118115
1699
+ "TestOptimRenewed",
1700
+ "test_forloop_goes_right_direction",
1701
+ active_if=lambda kwargs: not kwargs["contiguous"],
1702
+ device_type="mps",
1703
+ ),
1704
+ DecorateInfo(
1705
+ skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"),
1706
+ "TestOptimRenewed",
1707
+ "test_tensor_lr",
1708
+ active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7),
1709
+ ),
1710
+ DecorateInfo(
1711
+ skipIfTorchDynamo("See #116028"),
1712
+ "TestOptimRenewed",
1713
+ "test_set_default_dtype_works_with_foreach",
1714
+ ),
1715
+ DecorateInfo(
1716
+ skipIfTorchDynamo(
1717
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1718
+ ),
1719
+ "TestOptimRenewed",
1720
+ "test_complex_2d",
1721
+ ),
1722
+ DecorateInfo(
1723
+ unittest.skip("Uses too much memory, even for H100, surprisingly."),
1724
+ "TestOptimRenewed",
1725
+ "test_foreach_large_tensor",
1726
+ ),
1727
+ DecorateInfo(
1728
+ skipIfTorchDynamo(
1729
+ "This test uses mocks, which dynamo does not support"
1730
+ ),
1731
+ "TestOptimRenewed",
1732
+ "test_defaults_changed_to_foreach",
1733
+ ),
1734
+ ),
1735
+ ),
1736
+ OptimizerInfo(
1737
+ AdamW,
1738
+ optim_inputs_func=optim_inputs_func_adamw,
1739
+ optim_error_inputs_func=optim_error_inputs_func_adamw,
1740
+ supported_impls=("foreach", "differentiable", "fused"),
1741
+ not_og_supported_flags=(
1742
+ "foreach",
1743
+ "differentiable",
1744
+ "fused",
1745
+ "maximize",
1746
+ "capturable",
1747
+ ),
1748
+ supports_fused_on=("cpu", "cuda", "mps"),
1749
+ has_capturable_arg=True,
1750
+ decorators=(
1751
+ # Expected error between compiled forloop and fused optimizers
1752
+ DecorateInfo(
1753
+ toleranceOverride({torch.float64: tol(atol=4.5e-7, rtol=2.2e-6)}),
1754
+ "TestOptimRenewed",
1755
+ "test_fused_matches_forloop",
1756
+ active_if=lambda kwargs: TEST_WITH_TORCHDYNAMO
1757
+ and kwargs["dtype"] == torch.float64,
1758
+ ),
1759
+ DecorateInfo(
1760
+ toleranceOverride(
1761
+ # Note on tolerances:
1762
+ # difference comes from the fact that the non fused kernel have
1763
+ # more dtype cast operations. We have another test test_fused_cpu_matches_cuda
1764
+ # to make sure there is no discrepancies between cuda fused kernel
1765
+ # and cpu fused kernel
1766
+ {
1767
+ torch.bfloat16: tol(atol=5e-3, rtol=5e-3),
1768
+ torch.float16: tol(atol=5e-3, rtol=5e-3),
1769
+ }
1770
+ ),
1771
+ "TestOptimRenewed",
1772
+ "test_fused_matches_forloop",
1773
+ ),
1774
+ # Note on tolerances:
1775
+ # Tracking through #127000
1776
+ DecorateInfo(
1777
+ toleranceOverride(
1778
+ {
1779
+ torch.float32: tol(
1780
+ atol=3e-5,
1781
+ rtol=1.3e-06,
1782
+ )
1783
+ }
1784
+ ),
1785
+ "TestCudaOptims",
1786
+ "test_grad_scaling_autocast_fused_optimizers",
1787
+ ),
1788
+ ),
1789
+ skips=(
1790
+ DecorateInfo(
1791
+ skipIfMPS, # addcdiv doesn't work for non-contiguous, see #118115
1792
+ "TestOptimRenewed",
1793
+ "test_forloop_goes_right_direction",
1794
+ active_if=lambda kwargs: not kwargs["contiguous"],
1795
+ device_type="mps",
1796
+ ),
1797
+ DecorateInfo(
1798
+ skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"),
1799
+ "TestOptimRenewed",
1800
+ "test_tensor_lr",
1801
+ active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7),
1802
+ ),
1803
+ DecorateInfo(
1804
+ skipIfTorchDynamo(
1805
+ "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028"
1806
+ ),
1807
+ "TestOptimRenewed",
1808
+ "test_set_default_dtype_works_with_foreach",
1809
+ ),
1810
+ DecorateInfo(
1811
+ skipIfTorchDynamo(
1812
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1813
+ ),
1814
+ "TestOptimRenewed",
1815
+ "test_complex_2d",
1816
+ ),
1817
+ DecorateInfo(
1818
+ skipIfTorchDynamo(
1819
+ "This test uses mocks, which dynamo does not support"
1820
+ ),
1821
+ "TestOptimRenewed",
1822
+ "test_defaults_changed_to_foreach",
1823
+ ),
1824
+ ),
1825
+ ),
1826
+ OptimizerInfo(
1827
+ ASGD,
1828
+ optim_inputs_func=optim_inputs_func_asgd,
1829
+ optim_error_inputs_func=optim_error_inputs_func_asgd,
1830
+ supported_impls=("foreach", "differentiable"),
1831
+ has_capturable_arg=True,
1832
+ skips=(
1833
+ DecorateInfo(
1834
+ skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"),
1835
+ "TestOptimRenewed",
1836
+ "test_tensor_lr",
1837
+ active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7),
1838
+ ),
1839
+ DecorateInfo(
1840
+ skipIfTorchDynamo(
1841
+ "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028"
1842
+ ),
1843
+ "TestOptimRenewed",
1844
+ "test_set_default_dtype_works_with_foreach",
1845
+ ),
1846
+ DecorateInfo(
1847
+ skipIfTorchDynamo(
1848
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1849
+ ),
1850
+ "TestOptimRenewed",
1851
+ "test_complex_2d",
1852
+ ),
1853
+ DecorateInfo(
1854
+ toleranceOverride(
1855
+ {
1856
+ torch.float32: tol(atol=1.5e-5, rtol=1e-5),
1857
+ }
1858
+ ),
1859
+ "TestOptimRenewed",
1860
+ "test_step_is_noop_for_zero_grads",
1861
+ ),
1862
+ DecorateInfo(
1863
+ skipIfTorchDynamo(
1864
+ "This test uses mocks, which dynamo does not support"
1865
+ ),
1866
+ "TestOptimRenewed",
1867
+ "test_defaults_changed_to_foreach",
1868
+ ),
1869
+ DecorateInfo(
1870
+ unittest.skip(
1871
+ "ASGD internally changes the weights even with zero grad"
1872
+ ),
1873
+ "TestOptimRenewed",
1874
+ "test_step_is_noop_for_zero_grads",
1875
+ ),
1876
+ ),
1877
+ ),
1878
+ OptimizerInfo(
1879
+ LBFGS,
1880
+ optim_inputs_func=optim_inputs_func_lbfgs,
1881
+ optim_error_inputs_func=optim_error_inputs_func_lbfgs,
1882
+ supported_impls=(),
1883
+ step_requires_closure=True,
1884
+ supports_param_groups=False,
1885
+ supports_multiple_devices=False,
1886
+ skips=(
1887
+ # Fails on MacOS 13.2.1 in CI https://github.com/pytorch/pytorch/issues/117094
1888
+ DecorateInfo(
1889
+ skipIfMPS,
1890
+ "TestOptimRenewed",
1891
+ "test_can_load_older_state_dict",
1892
+ device_type="mps",
1893
+ ),
1894
+ DecorateInfo(
1895
+ toleranceOverride(
1896
+ {
1897
+ torch.complex64: tol(
1898
+ rtol=4.5e-5,
1899
+ atol=5e-5,
1900
+ )
1901
+ }
1902
+ ),
1903
+ "TestOptimRenewed",
1904
+ "test_complex_2d",
1905
+ ),
1906
+ DecorateInfo(
1907
+ unittest.skip("Does not support param groups"),
1908
+ "TestOptimRenewed",
1909
+ "test_param_groups_lr",
1910
+ ),
1911
+ DecorateInfo(
1912
+ unittest.skip("Does not support param groups"),
1913
+ "TestOptimRenewed",
1914
+ "test_param_groups_weight_decay",
1915
+ ),
1916
+ DecorateInfo(
1917
+ unittest.skip("LBFGS doesn't support multidevice"),
1918
+ "TestOptimRenewed",
1919
+ "test_forloop_goes_right_direction_multigpu",
1920
+ ),
1921
+ DecorateInfo(
1922
+ unittest.skip("Does not support param groups"),
1923
+ "TestOptimRenewed",
1924
+ "test_param_group_with_lrscheduler_goes_right_direction",
1925
+ ),
1926
+ DecorateInfo(
1927
+ skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"),
1928
+ "TestOptimRenewed",
1929
+ "test_tensor_lr",
1930
+ active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7),
1931
+ ),
1932
+ # https://github.com/pytorch/pytorch/issues/131398
1933
+ DecorateInfo(
1934
+ unittest.expectedFailure,
1935
+ "CompiledOptimizerParityTests",
1936
+ "test_correctness",
1937
+ active_if=lambda kwargs: sys.platform == "darwin"
1938
+ and kwargs["use_closure"],
1939
+ ),
1940
+ ),
1941
+ ),
1942
+ OptimizerInfo(
1943
+ NAdam,
1944
+ optim_inputs_func=optim_inputs_func_nadam,
1945
+ optim_error_inputs_func=optim_error_inputs_func_nadam,
1946
+ supported_impls=("foreach", "differentiable"),
1947
+ has_capturable_arg=True,
1948
+ skips=(
1949
+ DecorateInfo(
1950
+ skipIfMPS, # addcdiv doesn't work for non-contiguous, see #118115
1951
+ "TestOptimRenewed",
1952
+ "test_forloop_goes_right_direction",
1953
+ active_if=lambda kwargs: not kwargs["contiguous"],
1954
+ device_type="mps",
1955
+ ),
1956
+ DecorateInfo(
1957
+ skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"),
1958
+ "TestOptimRenewed",
1959
+ "test_tensor_lr",
1960
+ active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7),
1961
+ ),
1962
+ DecorateInfo(
1963
+ skipIfTorchDynamo(
1964
+ "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028"
1965
+ ),
1966
+ "TestOptimRenewed",
1967
+ "test_set_default_dtype_works_with_foreach",
1968
+ ),
1969
+ DecorateInfo(
1970
+ skipIfTorchDynamo(
1971
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
1972
+ ),
1973
+ "TestOptimRenewed",
1974
+ "test_complex_2d",
1975
+ ),
1976
+ DecorateInfo(
1977
+ skipIfTorchDynamo(
1978
+ "Errors, https://github.com/pytorch/pytorch/issues/117150"
1979
+ ),
1980
+ "TestOptimRenewed",
1981
+ "test_load_nontensor_step",
1982
+ ),
1983
+ DecorateInfo(
1984
+ skipIfTorchDynamo(
1985
+ "This test uses mocks, which dynamo does not support"
1986
+ ),
1987
+ "TestOptimRenewed",
1988
+ "test_defaults_changed_to_foreach",
1989
+ ),
1990
+ ),
1991
+ ),
1992
+ OptimizerInfo(
1993
+ RAdam,
1994
+ optim_inputs_func=optim_inputs_func_radam,
1995
+ optim_error_inputs_func=optim_error_inputs_func_radam,
1996
+ supported_impls=("foreach", "differentiable"),
1997
+ has_capturable_arg=True,
1998
+ skips=(
1999
+ DecorateInfo(
2000
+ skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"),
2001
+ "TestOptimRenewed",
2002
+ "test_tensor_lr",
2003
+ active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7),
2004
+ ),
2005
+ DecorateInfo(
2006
+ skipIfTorchDynamo(
2007
+ "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028"
2008
+ ),
2009
+ "TestOptimRenewed",
2010
+ "test_set_default_dtype_works_with_foreach",
2011
+ ),
2012
+ DecorateInfo(
2013
+ skipIfTorchDynamo(
2014
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
2015
+ ),
2016
+ "TestOptimRenewed",
2017
+ "test_complex_2d",
2018
+ ),
2019
+ DecorateInfo(
2020
+ toleranceOverride(
2021
+ {
2022
+ # previously atol=1e-7, rtol=1e-7
2023
+ torch.float64: tol(atol=1.5e-7, rtol=1.1e-7)
2024
+ }
2025
+ ),
2026
+ "TestOptimRenewed",
2027
+ "test_foreach_matches_forloop",
2028
+ ),
2029
+ DecorateInfo(
2030
+ skipIfTorchDynamo(
2031
+ "This test uses mocks, which dynamo does not support"
2032
+ ),
2033
+ "TestOptimRenewed",
2034
+ "test_defaults_changed_to_foreach",
2035
+ ),
2036
+ ),
2037
+ ),
2038
+ OptimizerInfo(
2039
+ RMSprop,
2040
+ optim_inputs_func=optim_inputs_func_rmsprop,
2041
+ optim_error_inputs_func=optim_error_inputs_func_rmsprop,
2042
+ supported_impls=("foreach", "differentiable"),
2043
+ has_capturable_arg=True,
2044
+ skips=(
2045
+ DecorateInfo(
2046
+ skipIfMPS, # addcdiv doesn't work for non-contiguous, see #118115
2047
+ "TestOptimRenewed",
2048
+ "test_forloop_goes_right_direction",
2049
+ active_if=lambda kwargs: not kwargs["contiguous"],
2050
+ device_type="mps",
2051
+ ),
2052
+ DecorateInfo(
2053
+ skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"),
2054
+ "TestOptimRenewed",
2055
+ "test_tensor_lr",
2056
+ active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7),
2057
+ ),
2058
+ DecorateInfo(
2059
+ skipIfTorchDynamo("See #116028"),
2060
+ "TestOptimRenewed",
2061
+ "test_set_default_dtype_works_with_foreach",
2062
+ ),
2063
+ DecorateInfo(
2064
+ skipIfTorchDynamo(
2065
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
2066
+ ),
2067
+ "TestOptimRenewed",
2068
+ "test_complex_2d",
2069
+ ),
2070
+ DecorateInfo(
2071
+ toleranceOverride(
2072
+ { # previously atol=5-05, rtol=0.001, https://github.com/pytorch/pytorch/issues/116202
2073
+ torch.float32: tol(atol=5e-04, rtol=0.01),
2074
+ }
2075
+ ),
2076
+ "TestOptimRenewed",
2077
+ "test_mixed_device_dtype",
2078
+ active_if=TEST_WITH_TORCHDYNAMO,
2079
+ ),
2080
+ DecorateInfo(
2081
+ skipIfTorchDynamo(
2082
+ "This test uses mocks, which dynamo does not support"
2083
+ ),
2084
+ "TestOptimRenewed",
2085
+ "test_defaults_changed_to_foreach",
2086
+ ),
2087
+ ),
2088
+ ),
2089
+ OptimizerInfo(
2090
+ Rprop,
2091
+ optim_inputs_func=optim_inputs_func_rprop,
2092
+ optim_error_inputs_func=optim_error_inputs_func_rprop,
2093
+ supported_impls=("foreach", "differentiable"),
2094
+ has_capturable_arg=True,
2095
+ skips=(
2096
+ DecorateInfo(
2097
+ skipIfMPS, # Rprop doesn't update for non-contiguous, see #118117
2098
+ "TestOptimRenewed",
2099
+ "test_forloop_goes_right_direction",
2100
+ active_if=lambda kwargs: not kwargs["contiguous"],
2101
+ device_type="mps",
2102
+ ),
2103
+ DecorateInfo(
2104
+ skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"),
2105
+ "TestOptimRenewed",
2106
+ "test_tensor_lr",
2107
+ active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7),
2108
+ ),
2109
+ DecorateInfo(
2110
+ skipIfTorchDynamo("See #116028"),
2111
+ "TestOptimRenewed",
2112
+ "test_set_default_dtype_works_with_foreach",
2113
+ ),
2114
+ DecorateInfo(
2115
+ skipIfTorchDynamo(
2116
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
2117
+ ),
2118
+ "TestOptimRenewed",
2119
+ "test_complex_2d",
2120
+ ),
2121
+ DecorateInfo(
2122
+ skipIfTorchDynamo(
2123
+ "This test uses mocks, which dynamo does not support"
2124
+ ),
2125
+ "TestOptimRenewed",
2126
+ "test_defaults_changed_to_foreach",
2127
+ ),
2128
+ ),
2129
+ ),
2130
+ OptimizerInfo(
2131
+ SGD,
2132
+ optim_inputs_func=optim_inputs_func_sgd,
2133
+ scheduler_inputs=(
2134
+ [lambda opt: StepLR(opt, gamma=0.9, step_size=10)],
2135
+ [
2136
+ lambda opt: LinearLR(
2137
+ opt, start_factor=0.4, end_factor=0.8, total_iters=4
2138
+ )
2139
+ ],
2140
+ [
2141
+ lambda opt: StepLR(opt, gamma=0.9, step_size=10),
2142
+ lambda opt: LinearLR(
2143
+ opt, start_factor=0.4, end_factor=0.6, total_iters=4
2144
+ ),
2145
+ ],
2146
+ [
2147
+ lambda opt: StepLR(opt, gamma=0.99, step_size=10),
2148
+ lambda opt: ExponentialLR(opt, gamma=0.99),
2149
+ lambda opt: ReduceLROnPlateau(opt),
2150
+ ],
2151
+ [lambda opt: ConstantLR(opt, factor=0.4, total_iters=4)],
2152
+ [lambda opt: PolynomialLR(opt, power=0.9, total_iters=4)],
2153
+ [
2154
+ lambda opt: StepLR(opt, gamma=0.9, step_size=10),
2155
+ lambda opt: ReduceLROnPlateau(opt),
2156
+ ],
2157
+ ),
2158
+ optim_error_inputs_func=optim_error_inputs_func_sgd,
2159
+ supported_impls=("foreach", "differentiable", "fused"),
2160
+ not_og_supported_flags=(
2161
+ "foreach",
2162
+ "differentiable",
2163
+ "fused",
2164
+ "maximize",
2165
+ "capturable",
2166
+ ),
2167
+ supports_sparse=True,
2168
+ metadata_for_sparse=(
2169
+ {
2170
+ "lr": 4.8e-3,
2171
+ "maximize": False,
2172
+ "momentum": 0,
2173
+ "nesterov": False,
2174
+ "weight_decay": 0,
2175
+ },
2176
+ [lambda opt: StepLR(opt, gamma=0.99999, step_size=300)],
2177
+ ),
2178
+ supports_fused_on=(
2179
+ "cpu",
2180
+ "cuda",
2181
+ "mps",
2182
+ ),
2183
+ skips=(
2184
+ DecorateInfo(
2185
+ skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"),
2186
+ "TestOptimRenewed",
2187
+ "test_tensor_lr",
2188
+ active_if=sys.version_info < (3, 9) and sys.version_info > (3, 7),
2189
+ ),
2190
+ DecorateInfo(
2191
+ skipIfTorchDynamo(
2192
+ "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028"
2193
+ ),
2194
+ "TestOptimRenewed",
2195
+ "test_set_default_dtype_works_with_foreach",
2196
+ ),
2197
+ DecorateInfo(
2198
+ skipIfTorchDynamo(
2199
+ "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
2200
+ ),
2201
+ "TestOptimRenewed",
2202
+ "test_complex_2d",
2203
+ ),
2204
+ DecorateInfo(
2205
+ toleranceOverride(
2206
+ { # previously atol=5-05, rtol=0.001, https://github.com/pytorch/pytorch/issues/116202
2207
+ torch.float32: tol(atol=5e-04, rtol=0.007),
2208
+ }
2209
+ ),
2210
+ "TestOptimRenewed",
2211
+ "test_mixed_device_dtype",
2212
+ active_if=TEST_WITH_TORCHDYNAMO,
2213
+ ),
2214
+ DecorateInfo(
2215
+ skipIfTorchDynamo(
2216
+ "This test uses mocks, which dynamo does not support"
2217
+ ),
2218
+ "TestOptimRenewed",
2219
+ "test_defaults_changed_to_foreach",
2220
+ ),
2221
+ ),
2222
+ ),
2223
+ OptimizerInfo(
2224
+ SparseAdam,
2225
+ optim_inputs_func=optim_inputs_func_sparseadam,
2226
+ optim_error_inputs_func=optim_error_inputs_func_sparseadam,
2227
+ supported_impls=(),
2228
+ only_supports_sparse_grads=True,
2229
+ metadata_for_sparse=({"lr": 4e-2}, []),
2230
+ supports_complex=False, # Missing complex support, see #118153
2231
+ skips=(
2232
+ DecorateInfo(
2233
+ skipIfMPS, # SparseAdam does not support MPS
2234
+ "TestOptimRenewed",
2235
+ device_type="mps",
2236
+ ),
2237
+ DecorateInfo(
2238
+ skipIfXpu(msg="SparseAdam is not yet supported on the XPU stack"),
2239
+ ),
2240
+ DecorateInfo(
2241
+ skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"),
2242
+ "TestOptimRenewed",
2243
+ "test_param_groups_lr",
2244
+ ),
2245
+ DecorateInfo(
2246
+ skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"),
2247
+ "TestOptimRenewed",
2248
+ "test_tensor_lr",
2249
+ ),
2250
+ DecorateInfo(
2251
+ unittest.skip(
2252
+ "SparseAdam does not support dense gradients, see #116507"
2253
+ ),
2254
+ "TestOptimRenewed",
2255
+ "test_can_load_older_state_dict",
2256
+ ),
2257
+ DecorateInfo(
2258
+ skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"),
2259
+ "TestOptimRenewed",
2260
+ "test_load_nontensor_step",
2261
+ ),
2262
+ DecorateInfo(
2263
+ skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"),
2264
+ "TestOptimRenewed",
2265
+ "test_forloop_goes_right_direction",
2266
+ ),
2267
+ DecorateInfo(
2268
+ skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"),
2269
+ "TestOptimRenewed",
2270
+ "test_forloop_goes_right_direction_multigpu",
2271
+ ),
2272
+ DecorateInfo(
2273
+ skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"),
2274
+ "TestOptimRenewed",
2275
+ "test_param_group_with_lrscheduler_goes_right_direction",
2276
+ ),
2277
+ DecorateInfo(
2278
+ skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"),
2279
+ "TestOptimRenewed",
2280
+ "test_state_dict_with_cuda_params",
2281
+ ),
2282
+ DecorateInfo(
2283
+ skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"),
2284
+ "TestOptimRenewed",
2285
+ "test_deepcopy_copies_all_public_attrs",
2286
+ ),
2287
+ ),
2288
+ ),
2289
+ ]
2290
+
2291
+
2292
+ class TensorTracker:
2293
+ """
2294
+ A utility to track tensor clones in a list, with the expectation of popping them later (in
2295
+ order) to make fair comparisons between two multi-step computation. The intended use case is
2296
+ usually when comparing two supposed equal computations, such as an optimizer step that each
2297
+ individually consists of multiple steps, where numerical deviation could multiply.
2298
+
2299
+ The goal is to be able to compare and align numbers at every milestone so as to minimize
2300
+ numerical discrepancies, and so when the test fails, it is likely a real problem.
2301
+ """
2302
+
2303
+ def __init__(self, assert_eq_kwargs=None):
2304
+ if assert_eq_kwargs is None:
2305
+ assert_eq_kwargs = {}
2306
+ self.assert_eq_kwargs = assert_eq_kwargs
2307
+ self.tensors = []
2308
+
2309
+ def add(self, tensor):
2310
+ """
2311
+ Add a clone().detach()'d version of the tensor
2312
+ """
2313
+ self.tensors.append(tensor.detach().clone())
2314
+
2315
+ # pops from beginning, like a queue and not a stack!
2316
+ def pop_check_set(self, tensor_to_set, testcase):
2317
+ """
2318
+ Pop the first element in the tensor tracker, assert equality between the popped tensor and
2319
+ the input tensor, and then set the input tensor to have the same values as the popped tensor
2320
+ (with copy_).
2321
+ """
2322
+ testcase.assertGreater(len(self.tensors), 0, "no tensors to pop")
2323
+ ref = self.tensors.pop(0)
2324
+
2325
+ testcase.assertTrue(isinstance(ref, Tensor), f"{type(ref)=}")
2326
+ testcase.assertEqual(tensor_to_set, ref, **self.assert_eq_kwargs)
2327
+
2328
+ with torch.no_grad():
2329
+ tensor_to_set.copy_(ref)
2330
+
2331
+ def all_popped(self):
2332
+ return len(self.tensors) == 0
lib/python3.10/site-packages/torch/testing/_internal/common_pruning.py ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Owner(s): ["module: unknown"]
2
+
3
+ from typing import Dict, Any, Tuple
4
+ from torch.ao.pruning import BaseSparsifier
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from torch import nn
8
+
9
+ class ImplementedSparsifier(BaseSparsifier):
10
+ def __init__(self, **kwargs: Dict[str, Any]) -> None:
11
+ super().__init__(defaults=kwargs)
12
+
13
+ def update_mask(self, module: nn.Module, tensor_name: str, **kwargs: Dict[str, Any]) -> None:
14
+ module.parametrizations.weight[0].mask[0] = 0 # type: ignore[index, union-attr]
15
+ linear_state = self.state['linear1.weight']
16
+ linear_state['step_count'] = linear_state.get('step_count', 0) + 1
17
+
18
+
19
+ class MockSparseLinear(nn.Linear):
20
+ """
21
+ This class is a MockSparseLinear class to check convert functionality.
22
+ It is the same as a normal Linear layer, except with a different type, as
23
+ well as an additional from_dense method.
24
+ """
25
+ @classmethod
26
+ def from_dense(cls, mod: nn.Linear) -> 'MockSparseLinear':
27
+ """
28
+ """
29
+ linear = cls(mod.in_features,
30
+ mod.out_features)
31
+ return linear
32
+
33
+
34
+ def rows_are_subset(subset_tensor: torch.Tensor, superset_tensor: torch.Tensor) -> bool:
35
+ """
36
+ Checks to see if all rows in subset tensor are present in the superset tensor
37
+ """
38
+ i = 0
39
+ for row in subset_tensor:
40
+ while i < len(superset_tensor):
41
+ if not torch.equal(row, superset_tensor[i]):
42
+ i += 1
43
+ else:
44
+ break
45
+ else:
46
+ return False
47
+ return True
48
+
49
+
50
+ class SimpleLinear(nn.Module):
51
+ r"""Model with only Linear layers without biases, some wrapped in a Sequential,
52
+ some following the Sequential. Used to test basic pruned Linear-Linear fusion."""
53
+
54
+ def __init__(self) -> None:
55
+ super().__init__()
56
+ self.seq = nn.Sequential(
57
+ nn.Linear(7, 5, bias=False),
58
+ nn.Linear(5, 6, bias=False),
59
+ nn.Linear(6, 4, bias=False),
60
+ )
61
+ self.linear1 = nn.Linear(4, 4, bias=False)
62
+ self.linear2 = nn.Linear(4, 10, bias=False)
63
+
64
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
65
+ x = self.seq(x)
66
+ x = self.linear1(x)
67
+ x = self.linear2(x)
68
+ return x
69
+
70
+
71
+ class LinearBias(nn.Module):
72
+ r"""Model with only Linear layers, alternating layers with biases,
73
+ wrapped in a Sequential. Used to test pruned Linear-Bias-Linear fusion."""
74
+
75
+ def __init__(self) -> None:
76
+ super().__init__()
77
+ self.seq = nn.Sequential(
78
+ nn.Linear(7, 5, bias=True),
79
+ nn.Linear(5, 6, bias=False),
80
+ nn.Linear(6, 3, bias=True),
81
+ nn.Linear(3, 3, bias=True),
82
+ nn.Linear(3, 10, bias=False),
83
+ )
84
+
85
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
86
+ x = self.seq(x)
87
+ return x
88
+
89
+
90
+ class LinearActivation(nn.Module):
91
+ r"""Model with only Linear layers, some with bias, some in a Sequential and some following.
92
+ Activation functions modules in between each Linear in the Sequential, and each outside layer.
93
+ Used to test pruned Linear(Bias)-Activation-Linear fusion."""
94
+
95
+ def __init__(self) -> None:
96
+ super().__init__()
97
+ self.seq = nn.Sequential(
98
+ nn.Linear(7, 5, bias=True),
99
+ nn.ReLU(),
100
+ nn.Linear(5, 6, bias=False),
101
+ nn.Tanh(),
102
+ nn.Linear(6, 4, bias=True),
103
+ )
104
+ self.linear1 = nn.Linear(4, 3, bias=True)
105
+ self.act1 = nn.ReLU()
106
+ self.linear2 = nn.Linear(3, 10, bias=False)
107
+ self.act2 = nn.Tanh()
108
+
109
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
110
+ x = self.seq(x)
111
+ x = self.linear1(x)
112
+ x = self.act1(x)
113
+ x = self.linear2(x)
114
+ x = self.act2(x)
115
+ return x
116
+
117
+
118
+ class LinearActivationFunctional(nn.Module):
119
+ r"""Model with only Linear layers, some with bias, some in a Sequential and some following.
120
+ Activation functions modules in between each Linear in the Sequential, and functional
121
+ activationals are called in between each outside layer.
122
+ Used to test pruned Linear(Bias)-Activation-Linear fusion."""
123
+
124
+ def __init__(self) -> None:
125
+ super().__init__()
126
+ self.seq = nn.Sequential(
127
+ nn.Linear(7, 5, bias=True),
128
+ nn.ReLU(),
129
+ nn.Linear(5, 6, bias=False),
130
+ nn.ReLU(),
131
+ nn.Linear(6, 4, bias=True),
132
+ )
133
+ self.linear1 = nn.Linear(4, 3, bias=True)
134
+ self.linear2 = nn.Linear(3, 8, bias=False)
135
+ self.linear3 = nn.Linear(8, 10, bias=False)
136
+ self.act1 = nn.ReLU()
137
+
138
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
139
+ x = self.seq(x)
140
+ x = self.linear1(x)
141
+ x = F.relu(x)
142
+ x = self.linear2(x)
143
+ x = F.relu(x)
144
+ x = self.linear3(x)
145
+ x = F.relu(x)
146
+ return x
147
+
148
+
149
+ class SimpleConv2d(nn.Module):
150
+ r"""Model with only Conv2d layers, all without bias, some in a Sequential and some following.
151
+ Used to test pruned Conv2d-Conv2d fusion."""
152
+
153
+ def __init__(self) -> None:
154
+ super().__init__()
155
+ self.seq = nn.Sequential(
156
+ nn.Conv2d(1, 32, 3, 1, bias=False),
157
+ nn.Conv2d(32, 64, 3, 1, bias=False),
158
+ )
159
+ self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=False)
160
+ self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=False)
161
+
162
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
163
+ x = self.seq(x)
164
+ x = self.conv2d1(x)
165
+ x = self.conv2d2(x)
166
+ return x
167
+
168
+
169
+ class Conv2dBias(nn.Module):
170
+ r"""Model with only Conv2d layers, some with bias, some in a Sequential and some outside.
171
+ Used to test pruned Conv2d-Bias-Conv2d fusion."""
172
+
173
+ def __init__(self) -> None:
174
+ super().__init__()
175
+ self.seq = nn.Sequential(
176
+ nn.Conv2d(1, 32, 3, 1, bias=True),
177
+ nn.Conv2d(32, 32, 3, 1, bias=True),
178
+ nn.Conv2d(32, 64, 3, 1, bias=False),
179
+ )
180
+ self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=True)
181
+ self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=False)
182
+
183
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
184
+ x = self.seq(x)
185
+ x = self.conv2d1(x)
186
+ x = self.conv2d2(x)
187
+ return x
188
+
189
+
190
+ class Conv2dActivation(nn.Module):
191
+ r"""Model with only Conv2d layers, some with bias, some in a Sequential and some following.
192
+ Activation function modules in between each Sequential layer, functional activations called
193
+ in-between each outside layer.
194
+ Used to test pruned Conv2d-Bias-Activation-Conv2d fusion."""
195
+
196
+ def __init__(self) -> None:
197
+ super().__init__()
198
+ self.seq = nn.Sequential(
199
+ nn.Conv2d(1, 32, 3, 1, bias=True),
200
+ nn.ReLU(),
201
+ nn.Conv2d(32, 64, 3, 1, bias=True),
202
+ nn.Tanh(),
203
+ nn.Conv2d(64, 64, 3, 1, bias=False),
204
+ nn.ReLU(),
205
+ )
206
+ self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=False)
207
+ self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=True)
208
+
209
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
210
+ x = self.seq(x)
211
+ x = self.conv2d1(x)
212
+ x = F.relu(x)
213
+ x = self.conv2d2(x)
214
+ x = F.hardtanh(x)
215
+ return x
216
+
217
+
218
+ class Conv2dPadBias(nn.Module):
219
+ r"""Model with only Conv2d layers, all with bias and some with padding > 0,
220
+ some in a Sequential and some following. Activation function modules in between each layer.
221
+ Used to test that bias is propagated correctly in the special case of
222
+ pruned Conv2d-Bias-(Activation)Conv2d fusion, when the second Conv2d layer has padding > 0."""
223
+
224
+ def __init__(self) -> None:
225
+ super().__init__()
226
+ self.seq = nn.Sequential(
227
+ nn.Conv2d(1, 32, 3, 1, padding=1, bias=True),
228
+ nn.ReLU(),
229
+ nn.Conv2d(32, 32, 3, 1, bias=False),
230
+ nn.ReLU(),
231
+ nn.Conv2d(32, 32, 3, 1, padding=1, bias=True),
232
+ nn.ReLU(),
233
+ nn.Conv2d(32, 32, 3, 1, padding=1, bias=True),
234
+ nn.ReLU(),
235
+ nn.Conv2d(32, 64, 3, 1, bias=True),
236
+ nn.Tanh(),
237
+ )
238
+ self.conv2d1 = nn.Conv2d(64, 48, 3, 1, padding=1, bias=True)
239
+ self.act1 = nn.ReLU()
240
+ self.conv2d2 = nn.Conv2d(48, 52, 3, 1, padding=1, bias=True)
241
+ self.act2 = nn.Tanh()
242
+
243
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
244
+ x = self.seq(x)
245
+ x = self.conv2d1(x)
246
+ x = self.act1(x)
247
+ x = self.conv2d2(x)
248
+ x = self.act2(x)
249
+ return x
250
+
251
+
252
+ class Conv2dPool(nn.Module):
253
+ r"""Model with only Conv2d layers, all with bias, some in a Sequential and some following.
254
+ Activation function modules in between each layer, Pool2d modules in between each layer.
255
+ Used to test pruned Conv2d-Pool2d-Conv2d fusion."""
256
+
257
+ def __init__(self) -> None:
258
+ super().__init__()
259
+ self.seq = nn.Sequential(
260
+ nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=True),
261
+ nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
262
+ nn.ReLU(),
263
+ nn.Conv2d(32, 64, kernel_size=3, padding=1, bias=True),
264
+ nn.Tanh(),
265
+ nn.AvgPool2d(kernel_size=2, stride=2, padding=1),
266
+ )
267
+ self.conv2d1 = nn.Conv2d(64, 48, kernel_size=3, padding=1, bias=True)
268
+ self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
269
+ self.af1 = nn.ReLU()
270
+ self.conv2d2 = nn.Conv2d(48, 52, kernel_size=3, padding=1, bias=True)
271
+ self.conv2d3 = nn.Conv2d(52, 52, kernel_size=3, padding=1, bias=True)
272
+
273
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
274
+ x = self.seq(x)
275
+ x = self.conv2d1(x)
276
+ x = self.maxpool(x)
277
+ x = self.af1(x)
278
+ x = self.conv2d2(x)
279
+ x = F.avg_pool2d(x, kernel_size=2, stride=2, padding=1)
280
+ x = F.relu(x)
281
+ x = self.conv2d3(x)
282
+ return x
283
+
284
+
285
+ class Conv2dPoolFlattenFunctional(nn.Module):
286
+ r"""Model with Conv2d layers, all with bias, some in a Sequential and some following, and then a Pool2d
287
+ and a functional Flatten followed by a Linear layer.
288
+ Activation functions and Pool2ds in between each layer also.
289
+ Used to test pruned Conv2d-Pool2d-Flatten-Linear fusion."""
290
+
291
+ def __init__(self) -> None:
292
+ super().__init__()
293
+ self.seq = nn.Sequential(
294
+ nn.Conv2d(1, 3, kernel_size=3, padding=1, bias=True),
295
+ nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
296
+ nn.ReLU(),
297
+ nn.Conv2d(3, 5, kernel_size=3, padding=1, bias=True),
298
+ nn.Tanh(),
299
+ nn.AvgPool2d(kernel_size=2, stride=2, padding=1),
300
+ )
301
+ self.conv2d1 = nn.Conv2d(5, 7, kernel_size=3, padding=1, bias=True)
302
+ self.af1 = nn.ReLU()
303
+ self.conv2d2 = nn.Conv2d(7, 11, kernel_size=3, padding=1, bias=True)
304
+ self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
305
+ self.fc = nn.Linear(11, 13, bias=True)
306
+
307
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
308
+ x = self.seq(x)
309
+ x = self.conv2d1(x)
310
+ x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1)
311
+ x = self.af1(x)
312
+ x = self.conv2d2(x)
313
+ x = self.avg_pool(x)
314
+ x = torch.flatten(x, 1) # test functional flatten
315
+ x = self.fc(x)
316
+ return x
317
+
318
+
319
+ class Conv2dPoolFlatten(nn.Module):
320
+ r"""Model with Conv2d layers, all with bias, some in a Sequential and some following, and then a Pool2d
321
+ and a Flatten module followed by a Linear layer.
322
+ Activation functions and Pool2ds in between each layer also.
323
+ Used to test pruned Conv2d-Pool2d-Flatten-Linear fusion."""
324
+
325
+ def __init__(self) -> None:
326
+ super().__init__()
327
+ self.seq = nn.Sequential(
328
+ nn.Conv2d(1, 3, kernel_size=3, padding=1, bias=True),
329
+ nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
330
+ nn.ReLU(),
331
+ nn.Conv2d(3, 5, kernel_size=3, padding=1, bias=True),
332
+ nn.Tanh(),
333
+ nn.AvgPool2d(kernel_size=2, stride=2, padding=1),
334
+ )
335
+ self.conv2d1 = nn.Conv2d(5, 7, kernel_size=3, padding=1, bias=True)
336
+ self.af1 = nn.ReLU()
337
+ self.conv2d2 = nn.Conv2d(7, 11, kernel_size=3, padding=1, bias=True)
338
+ self.avg_pool = nn.AdaptiveAvgPool2d((2, 2))
339
+ self.flatten = nn.Flatten()
340
+ self.fc = nn.Linear(44, 13, bias=True)
341
+
342
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
343
+ x = self.seq(x)
344
+ x = self.conv2d1(x)
345
+ x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1)
346
+ x = self.af1(x)
347
+ x = self.conv2d2(x)
348
+ x = self.avg_pool(x)
349
+ x = self.flatten(x)
350
+ x = self.fc(x)
351
+ return x
352
+
353
+
354
+ class LSTMLinearModel(nn.Module):
355
+ """Container module with an encoder, a recurrent module, and a linear."""
356
+
357
+ def __init__(
358
+ self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int
359
+ ) -> None:
360
+ super().__init__()
361
+ self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers)
362
+ self.linear = nn.Linear(hidden_dim, output_dim)
363
+
364
+ def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
365
+ output, _hidden = self.lstm(input)
366
+ decoded = self.linear(output)
367
+ return decoded, output
368
+
369
+
370
+ class LSTMLayerNormLinearModel(nn.Module):
371
+ """Container module with an LSTM, a LayerNorm, and a linear."""
372
+
373
+ def __init__(
374
+ self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int
375
+ ) -> None:
376
+ super().__init__()
377
+ self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers)
378
+ self.norm = nn.LayerNorm(hidden_dim)
379
+ self.linear = nn.Linear(hidden_dim, output_dim)
380
+
381
+ def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
382
+ x, state = self.lstm(x)
383
+ x = self.norm(x)
384
+ x = self.linear(x)
385
+ return x, state
lib/python3.10/site-packages/torch/testing/_internal/common_quantization.py ADDED
The diff for this file is too large to render. See raw diff
 
lib/python3.10/site-packages/torch/testing/_internal/common_subclass.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+ from copy import deepcopy
5
+ from torch.utils._pytree import tree_map
6
+ import torch.utils._pytree as pytree
7
+
8
+
9
+ # TODO: Move LoggingTensor here.
10
+ from torch.testing._internal.logging_tensor import LoggingTensor
11
+
12
+
13
+ # Base class for wrapper-style tensors.
14
+ class WrapperTensor(torch.Tensor):
15
+ @staticmethod
16
+ def __new__(cls, *args, **kwargs):
17
+ t, kwargs = cls.get_wrapper_properties(*args, **kwargs)
18
+ if "size" not in kwargs:
19
+ size = t.size()
20
+ else:
21
+ size = kwargs["size"]
22
+ del kwargs["size"]
23
+ if "dtype" not in kwargs:
24
+ kwargs["dtype"] = t.dtype
25
+ if "layout" not in kwargs:
26
+ kwargs["layout"] = t.layout
27
+ if "device" not in kwargs:
28
+ kwargs["device"] = t.device
29
+ if "requires_grad" not in kwargs:
30
+ kwargs["requires_grad"] = False
31
+ # Ignore memory_format and pin memory for now as I don't know how to
32
+ # safely access them on a Tensor (if possible??)
33
+
34
+ wrapper = torch.Tensor._make_wrapper_subclass(cls, size, **kwargs)
35
+ wrapper._validate_methods()
36
+ return wrapper
37
+
38
+ @classmethod
39
+ def get_wrapper_properties(cls, *args, **kwargs):
40
+ # Should return both an example Tensor and a dictionary of kwargs
41
+ # to override any of that example Tensor's properly.
42
+ # This is very similar to the `t.new_*(args)` API
43
+ raise NotImplementedError("You need to implement get_wrapper_properties")
44
+
45
+ def _validate_methods(self):
46
+ # Skip this if not in debug mode?
47
+ # Changing these on the python side is wrong as it would not be properly reflected
48
+ # on the c++ side
49
+ # This doesn't catch attributes set in the __init__
50
+ forbidden_overrides = ["size", "stride", "dtype", "layout", "device", "requires_grad"]
51
+ for el in forbidden_overrides:
52
+ if getattr(self.__class__, el) is not getattr(torch.Tensor, el):
53
+ raise RuntimeError(f"Subclass {self.__class__.__name__} is overwriting the "
54
+ f"property {el} but this is not allowed as such change would "
55
+ "not be reflected to c++ callers.")
56
+
57
+
58
+ class WrapperTensorWithCustomSizes(WrapperTensor):
59
+ @classmethod
60
+ def get_wrapper_properties(cls, t, requires_grad=False):
61
+ return t, {"requires_grad": requires_grad, "dispatch_sizes_strides_policy": "sizes"}
62
+
63
+ def __init__(self, t, requires_grad=False):
64
+ self.t = t
65
+
66
+ @classmethod
67
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
68
+ if not all(issubclass(cls, t) for t in types):
69
+ return NotImplemented
70
+
71
+ if kwargs is None:
72
+ kwargs = {}
73
+
74
+ def unwrap(e):
75
+ return e.t if isinstance(e, WrapperTensorWithCustomSizes) else e
76
+
77
+ def wrap(e):
78
+ return WrapperTensorWithCustomSizes(e) if isinstance(e, torch.Tensor) else e
79
+
80
+ rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {})))
81
+ return rs
82
+
83
+ def __repr__(self):
84
+ return super().__repr__(tensor_contents=f"t={self.t}")
85
+
86
+
87
+ class WrapperTensorWithCustomStrides(WrapperTensor):
88
+ @classmethod
89
+ def get_wrapper_properties(cls, t, requires_grad=False):
90
+ return t, {"requires_grad": requires_grad, "dispatch_sizes_strides_policy": "strides"}
91
+
92
+ def __init__(self, t, requires_grad=False):
93
+ self.t = t
94
+
95
+ @classmethod
96
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
97
+ if not all(issubclass(cls, t) for t in types):
98
+ return NotImplemented
99
+
100
+ if kwargs is None:
101
+ kwargs = {}
102
+
103
+ def unwrap(e):
104
+ return e.t if isinstance(e, WrapperTensorWithCustomStrides) else e
105
+
106
+ def wrap(e):
107
+ return WrapperTensorWithCustomStrides(e) if isinstance(e, torch.Tensor) else e
108
+
109
+ rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {})))
110
+ return rs
111
+
112
+ def __repr__(self):
113
+ return super().__repr__(tensor_contents=f"t={self.t}")
114
+
115
+
116
+ class DiagTensorBelow(WrapperTensor):
117
+ @classmethod
118
+ def get_wrapper_properties(cls, diag, requires_grad=False):
119
+ assert diag.ndim == 1
120
+ return diag, {"size": diag.size() + diag.size(), "requires_grad": requires_grad}
121
+
122
+ def __init__(self, diag, requires_grad=False):
123
+ self.diag = diag
124
+
125
+ handled_ops = {}
126
+
127
+ @classmethod
128
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
129
+ if not all(issubclass(cls, t) for t in types):
130
+ return NotImplemented
131
+
132
+ # For everything else, call the handler:
133
+ fn = cls.handled_ops.get(func.__name__, None)
134
+ if fn:
135
+ return fn(*args, **(kwargs or {}))
136
+ else:
137
+ # Note that here, because we don't need to provide the autograd formulas
138
+ # we can have a default "fallback" that creates a plain Tensor based
139
+ # on the diag elements and calls the func again.
140
+
141
+ def unwrap(e):
142
+ return e.diag.diag() if isinstance(e, DiagTensorBelow) else e
143
+
144
+ def wrap(e):
145
+ if isinstance(e, torch.Tensor) and e.ndim == 1:
146
+ return DiagTensorBelow(e)
147
+ if isinstance(e, torch.Tensor) and e.ndim == 2 and e.count_nonzero() == e.diag().count_nonzero():
148
+ return DiagTensorBelow(e.diag())
149
+ return e
150
+
151
+ rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {})))
152
+ return rs
153
+
154
+ def __repr__(self):
155
+ return super().__repr__(tensor_contents=f"diag={self.diag}")
156
+
157
+
158
+ class SparseTensor(WrapperTensor):
159
+ @classmethod
160
+ def get_wrapper_properties(cls, size, values, indices, requires_grad=False):
161
+ assert values.device == indices.device
162
+ return values, {"size": size, "requires_grad": requires_grad}
163
+
164
+ def __init__(self, size, values, indices, requires_grad=False):
165
+ self.values = values
166
+ self.indices = indices
167
+
168
+ def __repr__(self):
169
+ return super().__repr__(tensor_contents=f"values={self.values}, indices={self.indices}")
170
+
171
+ def sparse_to_dense(self):
172
+ res = torch.zeros(self.size(), dtype=self.values.dtype)
173
+ res[self.indices.unbind(1)] = self.values
174
+ return res
175
+
176
+ @staticmethod
177
+ def from_dense(t):
178
+ indices = t.nonzero()
179
+ values = t[indices.unbind(1)]
180
+ return SparseTensor(t.size(), values, indices)
181
+
182
+ @classmethod
183
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
184
+ func_name = f"{func.__module__}.{func.__name__}"
185
+
186
+ res = cls._try_call_special_impl(func_name, args, kwargs)
187
+ if res is not NotImplemented:
188
+ return res
189
+
190
+ # Otherwise, use a default implementation that construct dense
191
+ # tensors and use that to compute values
192
+ def unwrap(e):
193
+ return e.sparse_to_dense() if isinstance(e, SparseTensor) else e
194
+
195
+ # Wrap back all Tensors into our custom class
196
+ def wrap(e):
197
+ # Check for zeros and use that to get indices
198
+ return SparseTensor.from_dense(e) if isinstance(e, torch.Tensor) else e
199
+
200
+ rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {})))
201
+ return rs
202
+
203
+ # To show how things happen later
204
+ def __rmul__(self, other):
205
+ return super().__rmul__(other)
206
+
207
+ _SPECIAL_IMPLS = {}
208
+
209
+ @classmethod
210
+ def _try_call_special_impl(cls, func, args, kwargs):
211
+ if func not in cls._SPECIAL_IMPLS:
212
+ return NotImplemented
213
+ return cls._SPECIAL_IMPLS[func](args, kwargs)
214
+
215
+
216
+ # Example non-wrapper subclass that stores extra state.
217
+ class NonWrapperTensor(torch.Tensor):
218
+ def __new__(cls, data):
219
+ t = torch.Tensor._make_subclass(cls, data)
220
+ t.extra_state = {
221
+ 'last_func_called': None
222
+ }
223
+ return t
224
+
225
+ @classmethod
226
+ def __torch_function__(cls, func, types, args=(), kwargs=None):
227
+ result = super().__torch_function__(func, types, args, kwargs)
228
+
229
+ if isinstance(result, cls):
230
+ # Do something with the extra state. For the example here, just store the name of the
231
+ # last function called (skip for deepcopy so the copy has the same extra state).
232
+ if func is torch.Tensor.__deepcopy__:
233
+ result.extra_state = deepcopy(args[0].extra_state)
234
+ else:
235
+ result.extra_state = {
236
+ 'last_func_called': func.__name__,
237
+ }
238
+
239
+ return result
240
+
241
+ # new_empty() must be defined for deepcopy to work
242
+ def new_empty(self, shape):
243
+ return type(self)(torch.empty(shape))
244
+
245
+
246
+ # Class used to store info about subclass tensors used in testing.
247
+ class SubclassInfo:
248
+
249
+ __slots__ = ['name', 'create_fn', 'closed_under_ops']
250
+
251
+ def __init__(self, name, create_fn, closed_under_ops=True):
252
+ self.name = name
253
+ self.create_fn = create_fn # create_fn(shape) -> tensor instance
254
+ self.closed_under_ops = closed_under_ops
255
+
256
+
257
+ # Helper function to create a subclass of the given class and possibly cache sizes / strides.
258
+ def _create_and_access_shape(cls, shape):
259
+ sub = cls(torch.randn(shape))
260
+ # NB: Wrapper subclasses with custom dispatched sizes / strides cache this info
261
+ # on the first call via non-serializable PyCapsules. We purposefully trigger cache
262
+ # population here for serialization / deepcopy tests to verify that the presence of this
263
+ # cache info doesn't cause problems.
264
+ sub.size()
265
+ sub.stride()
266
+ return sub
267
+
268
+
269
+ subclass_db = {
270
+ torch.Tensor: SubclassInfo(
271
+ 'base_tensor', create_fn=torch.randn
272
+ ),
273
+ NonWrapperTensor: SubclassInfo(
274
+ 'non_wrapper_tensor',
275
+ create_fn=lambda shape: NonWrapperTensor(torch.randn(shape))
276
+ ),
277
+ LoggingTensor: SubclassInfo(
278
+ 'logging_tensor',
279
+ create_fn=lambda shape: LoggingTensor(torch.randn(shape))
280
+ ),
281
+ SparseTensor: SubclassInfo(
282
+ 'sparse_tensor',
283
+ create_fn=lambda shape: SparseTensor.from_dense(torch.randn(shape).relu())
284
+ ),
285
+ DiagTensorBelow: SubclassInfo(
286
+ 'diag_tensor_below',
287
+ create_fn=lambda shape: DiagTensorBelow(torch.randn(shape)),
288
+ closed_under_ops=False # sparse semantics
289
+ ),
290
+ WrapperTensorWithCustomSizes: SubclassInfo(
291
+ 'wrapper_with_custom_sizes',
292
+ create_fn=lambda shape: _create_and_access_shape(WrapperTensorWithCustomSizes, shape),
293
+ closed_under_ops=False,
294
+ ),
295
+ WrapperTensorWithCustomStrides: SubclassInfo(
296
+ 'wrapper_with_custom_strides',
297
+ create_fn=lambda shape: _create_and_access_shape(WrapperTensorWithCustomStrides, shape),
298
+ closed_under_ops=False,
299
+ ),
300
+ }
301
+
302
+ class SubclassWithTensorFactory(torch.Tensor):
303
+ @staticmethod
304
+ def __new__(cls, src):
305
+ shape = src.shape
306
+ kwargs = {}
307
+ kwargs["strides"] = src.stride()
308
+ kwargs["storage_offset"] = src.storage_offset()
309
+ kwargs["device"] = src.device
310
+ kwargs["layout"] = src.layout
311
+ kwargs["requires_grad"] = src.requires_grad
312
+ kwargs["dtype"] = src.dtype
313
+ out = torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs)
314
+ return out
315
+
316
+ def __init__(self, src):
317
+ self.src = src
318
+
319
+ def __repr__(self):
320
+ return f"{self.__class__.__name__}"
321
+
322
+ def __tensor_flatten__(self):
323
+ return ["src"], None
324
+
325
+ @classmethod
326
+ def __tensor_unflatten__(cls, inner_tensors, meta, outer_size, outer_stride):
327
+ src = inner_tensors["src"]
328
+ return cls(src)
329
+
330
+ @classmethod
331
+ def __torch_dispatch__(cls, func, types, args, kwargs):
332
+ if kwargs is None:
333
+ kwargs = {}
334
+
335
+ def _fn(x):
336
+ return x.src * torch.ones(x.src.shape) if x.src.dtype == torch.float32 else x.src
337
+
338
+ _args = pytree.tree_map_only(cls, _fn, args)
339
+ _kwargs = pytree.tree_map_only(cls, _fn, kwargs)
340
+
341
+ _out = func(*_args, **_kwargs)
342
+
343
+ _out_flat, _out_spec = pytree.tree_flatten(_out)
344
+
345
+ out_flat = [cls(o) if isinstance(o, torch.Tensor) else o for o in _out_flat]
346
+ return pytree.tree_unflatten(out_flat, _out_spec)
lib/python3.10/site-packages/torch/testing/_internal/common_utils.py ADDED
The diff for this file is too large to render. See raw diff
 
lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py ADDED
@@ -0,0 +1,592 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+ from torch import Tensor
5
+ import itertools
6
+
7
+ from torch.utils._python_dispatch import TorchDispatchMode
8
+ from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
9
+ from torch.utils import _pytree as pytree
10
+ from functools import partial
11
+ from torch.utils._mode_utils import no_dispatch, all_same_mode
12
+ import torch.autograd.forward_ad as fwAD
13
+ from typing import Callable
14
+ import re
15
+
16
+
17
+ def check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor):
18
+ elem = wrapper_tensor.elem
19
+ metadata_wrapper_tensor = metadata_accessor(wrapper_tensor)
20
+ metadata_elem = metadata_accessor(elem)
21
+ if metadata_wrapper_tensor == metadata_elem:
22
+ return
23
+ raise RuntimeError(
24
+ f"This operator is not Composite Compliant: the "
25
+ f"{metadata_name} of the tensor was modified directly without "
26
+ f"going through the PyTorch dispatcher.")
27
+
28
+ def check_metadata_consistency(wrapper_tensor, CCT):
29
+ # CCT: CompositeCompliantTensor class which is generated using generate_cct
30
+ if not isinstance(wrapper_tensor, CCT):
31
+ return
32
+ things_to_check = {
33
+ 'shape': Tensor.size,
34
+ 'dtype': lambda x: x.dtype,
35
+ 'device': lambda x: x.device,
36
+ 'numel': Tensor.numel,
37
+ 'stride': Tensor.stride,
38
+ 'storage_offset': Tensor.storage_offset,
39
+ }
40
+ for metadata_name, metadata_accessor in things_to_check.items():
41
+ check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor)
42
+
43
+ def is_view_fn(func):
44
+ return func.overloadpacket.__name__ in {
45
+ 'as_strided',
46
+ 'detach',
47
+ 'diagonal',
48
+ 'expand',
49
+ 'expand_as',
50
+ 'movedim',
51
+ 'narrow',
52
+ 'permute',
53
+ 'select',
54
+ 'squeeze',
55
+ 'transpose',
56
+ 't',
57
+ 'real',
58
+ 'imag',
59
+ 'view_as_real',
60
+ 'view_as_complex',
61
+ 'unflatten',
62
+ 'unfold',
63
+ 'unsqueeze',
64
+ 'view',
65
+ 'view_as',
66
+ 'unbind',
67
+ 'split',
68
+ 'split_with_sizes',
69
+ 'vsplit',
70
+ 'hsplit',
71
+ 'tensor_split',
72
+ 'chunk',
73
+ 'swapaxes',
74
+ 'slice',
75
+ '_reshape_alias',
76
+ '_unsafe_view',
77
+ '_conj',
78
+ 'alias',
79
+ }
80
+
81
+ # manually populated from native_functions that have inplace_view: True.
82
+ # In the future we will probably be able to grab that list directly
83
+ def is_inplace_view_fn(func):
84
+ return func.overloadpacket.__name__ in {
85
+ 'as_strided_',
86
+ 'detach_',
87
+ 'squeeze_',
88
+ 'swapaxes_',
89
+ 'swapdims_',
90
+ 't_',
91
+ 'transpose_',
92
+ 'unsqueeze_',
93
+ }
94
+
95
+
96
+ # Introspection please save us
97
+ def is_inplace(func):
98
+ name = func.overloadpacket.__name__
99
+ if re.match('__i.+__', name):
100
+ return True
101
+ if re.match('__.+__', name):
102
+ return False
103
+ return name[-1] == '_'
104
+
105
+
106
+ def generate_cct_and_mode(autograd_view_consistency=True):
107
+ # This function returns a new class CompositeCompliantTensor
108
+ # The two arguments control the behaviour described below.
109
+
110
+ # autograd_view_consistency:
111
+ # If True, alias result using `set_` if func returns a view
112
+ # (See Note [Alias Result]).
113
+ # Since Forward AD doesn't work with `set_`
114
+ # we disable it by setting alias to False.
115
+
116
+ class CompositeCompliantTensor(torch.Tensor):
117
+ elem: torch.Tensor
118
+
119
+ __slots__ = ['elem']
120
+
121
+ @staticmethod
122
+ def __new__(cls, elem, mode, *args, **kwargs):
123
+ assert type(elem) is not cls, \
124
+ "Wrapping a CompositeCompliantTensor in a CompositeCompliantTensor is not supported"
125
+
126
+ # The storage of CompositeCompliantTensor should never be used directly
127
+ # by a Composite operation; if the Composite
128
+ # operator attempts to read from the storage without dispatching then it'll
129
+ # raise a RuntimeError due to it being a meta storage.
130
+ r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
131
+ cls, elem.size(),
132
+ dtype=elem.dtype, layout=elem.layout,
133
+ device=elem.device, requires_grad=elem.requires_grad,
134
+ strides=elem.stride(), storage_offset=elem.storage_offset())
135
+
136
+ if elem.requires_grad:
137
+ # CompositeCompliantTensor steals the "requires_grad"-ness.
138
+ # Why a new copy of `elem`? Because sometimes OpInfo shares inputs between tests...
139
+ tmp = torch.empty(
140
+ (),
141
+ dtype=elem.dtype,
142
+ device=elem.device,
143
+ layout=elem.layout,
144
+ requires_grad=False,
145
+ )
146
+ # Use set_ rather than empty_strided() + copy_ so that we can preserve
147
+ # things like storage_offset.
148
+ tmp.set_(
149
+ source=elem.untyped_storage().clone(),
150
+ storage_offset=elem.storage_offset(),
151
+ size=elem.size(),
152
+ stride=elem.stride(),
153
+ )
154
+ r.elem = tmp
155
+ else:
156
+ r.elem = elem
157
+
158
+ assert r.stride() == r.elem.stride()
159
+
160
+ # Propagate conjugate bits to the wrapper tensor
161
+ # Ref: https://github.com/albanD/subclass_zoo/issues/24
162
+ # Ref: https://github.com/albanD/subclass_zoo/issues/21
163
+ torch._C._set_conj(r, r.elem.is_conj())
164
+ torch._C._set_neg(r, r.elem.is_neg())
165
+
166
+ r.mode = mode
167
+ return r
168
+
169
+ def __repr__(self):
170
+ return f"CompositeCompliantTensor({self.elem})"
171
+
172
+ @classmethod
173
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
174
+ all_args = pytree.arg_tree_leaves(*args, **(kwargs or {}))
175
+ modes = tuple(e.mode for e in all_args if isinstance(e, CompositeCompliantTensor))
176
+ if not all_same_mode(modes):
177
+ raise RuntimeError("Multiple CompositeCompliantTensorModes NYI")
178
+ with modes[0]:
179
+ return func(*args, **kwargs)
180
+
181
+ class CompositeCompliantTensorMode(TorchDispatchMode):
182
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
183
+ def unwrap(e):
184
+ return e.elem if isinstance(e, CompositeCompliantTensor) else e
185
+
186
+ def wrap(e):
187
+ return CompositeCompliantTensor(e, self) if isinstance(e, torch.Tensor) else e
188
+
189
+ if func == torch.ops.aten._local_scalar_dense.default:
190
+ raise RuntimeError(
191
+ ".item() is not allowed to be called inside of composite "
192
+ "functions in the PyTorch library because not all backends "
193
+ "and/or Tensor subclasses (e.g. vmap, ProxyTensor) support them.")
194
+
195
+ if func.overloadpacket.__name__ in ('set_', 'resize_'):
196
+ raise RuntimeError(
197
+ f"{func.__name__} is not allowed to be called inside of "
198
+ f"Composite operators.")
199
+
200
+ if is_inplace(func):
201
+ # NB: We are making an assumption that if the function is in-place,
202
+ # then the first argument is being written to. Introspection please save us!
203
+ mutated_argument = args[0]
204
+ if not isinstance(mutated_argument, CompositeCompliantTensor) and \
205
+ any(isinstance(a, CompositeCompliantTensor) for a in args[1:]):
206
+ raise RuntimeError(
207
+ 'Not composite compliant: performing in-place operation '
208
+ f'{func.__name__} where the Tensor being written to is '
209
+ 'regular Tensor but the other tensors are Tensor Subclasses. '
210
+ 'Please try to avoid this in-place operation.')
211
+
212
+ unwrapped_args = tree_map(unwrap, args)
213
+ unwrapped_kwargs = tree_map(unwrap, kwargs)
214
+ unwrapped_rs = func(*unwrapped_args, **unwrapped_kwargs)
215
+ rs = tree_map(wrap, unwrapped_rs)
216
+
217
+ if is_view_fn(func) and autograd_view_consistency:
218
+ # Note [Alias Result]
219
+ # Autograd asserts that for B = A.view_fn(...), B and A's storages
220
+ # are the same. Here we try to make B alias A to avoid those asserts.
221
+ # See https://github.com/pytorch/pytorch/issues/65339 for more information
222
+ # about the issue.
223
+ with no_dispatch():
224
+ # Idea: this is a weird way of getting a storage that aliases the input.
225
+ # This is a workaround for #65339.
226
+ # 1. under no_dispatch, all of the wrapper tensors look like regular
227
+ # tensors with special storage (the storage is nullptr and
228
+ # advertises CPU/CUDA device.
229
+ # 2. we run func, which ends up running the view operation
230
+ # 3. All view operations reuse the input's storage and return
231
+ # result Tensor(s) with new sizes/strides/offset that alias
232
+ # the input.
233
+ # 4. we set the storage (and sizes/strides/offset) of the wrapper
234
+ # tensor results to be that of the tensors that alias the input
235
+ result = func(*args, **kwargs)
236
+ if isinstance(result, (tuple, list)):
237
+ for a, b in zip(rs, result):
238
+ a.set_(b)
239
+ else:
240
+ rs.set_(result)
241
+
242
+ # Some operations are allowed to in-place modify the metadata of the
243
+ # inputs. The only ones are the "inplace view functions"; when we
244
+ # run into these, we manually modify the metadata of the input.
245
+ with no_dispatch():
246
+ if is_inplace_view_fn(func):
247
+ func(*args, **kwargs)
248
+
249
+ # For each CompositeCompliantTensor t, we check that t and t.elem
250
+ # have consistent metadata. If they don't have consistent metadata,
251
+ # that means the operator did something fishy.
252
+ check = partial(check_metadata_consistency, CCT=CompositeCompliantTensor)
253
+ pytree.tree_map_(check, args)
254
+ pytree.tree_map_(check, kwargs)
255
+ pytree.tree_map_(check, rs)
256
+ return rs
257
+
258
+ return CompositeCompliantTensor, CompositeCompliantTensorMode()
259
+
260
+ def is_tensorlist(lst):
261
+ if not isinstance(lst, list) and not isinstance(lst, tuple):
262
+ return False
263
+ if len(lst) == 0:
264
+ return False
265
+ all_tensors = all(isinstance(elt, torch.Tensor) for elt in lst)
266
+ if all_tensors:
267
+ return True
268
+ exists_one_tensor = all(isinstance(elt, torch.Tensor) for elt in lst)
269
+ if exists_one_tensor:
270
+ raise RuntimeError('This test assumes that PyTorch APIs cannot take '
271
+ 'mixed lists of Tensor and other things')
272
+ return False
273
+
274
+
275
+ def maybe_map(fn, should_map, arg):
276
+ return fn(arg) if should_map else arg
277
+
278
+
279
+ def wrap(arg, CCT, cct_mode):
280
+ # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode
281
+ if isinstance(arg, torch.Tensor):
282
+ return CCT(arg, cct_mode)
283
+ if is_tensorlist(arg):
284
+ return [CCT(a, cct_mode) for a in arg]
285
+ raise RuntimeError("wrap assumes that the input can be wrapped")
286
+
287
+
288
+ # Given a list of flat arguments, some of which may be Tensors, return all
289
+ # possible ways some of the arguments could be CompositeCompliantTensors (CCT).
290
+ # For example, given Tensors A, B, C and flat_args = [A, 1, B],
291
+ # We would return the following 4 options:
292
+ # [CCT(A), 1, CCT(B)]
293
+ # [CCT(A), 1, B]
294
+ # [A, 1, CCT(B)]
295
+ # [A, 1, B]
296
+ # NB: Yes, this is exponential. No, we don't care too much because PyTorch ops
297
+ # don't accept that many input Tensors.
298
+ def generate_subclass_choices(flat_args, CCT, cct_mode):
299
+ # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode
300
+ is_tensor_likes = [isinstance(arg, torch.Tensor) or is_tensorlist(arg) for arg in flat_args]
301
+ subclass_options = [[False, True] if is_tensor_like else [False] for is_tensor_like in is_tensor_likes]
302
+
303
+ for which_args_are_wrapped in itertools.product(*subclass_options):
304
+
305
+ result = [maybe_map(partial(wrap, CCT=CCT, cct_mode=cct_mode), should_wrap_arg, arg)
306
+ for should_wrap_arg, arg in zip(which_args_are_wrapped, flat_args)]
307
+ yield result, which_args_are_wrapped
308
+
309
+
310
+ # For an operation f(*args, **kwargs), each Tensor argument may either be
311
+ # a regular Tensor or a Tensor Subclass. This iterator iterates through
312
+ # all of those options.
313
+ def generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode):
314
+ # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode
315
+ flat_kwargs, spec = tree_flatten(kwargs)
316
+ flat_args_kwargs = list(args) + list(flat_kwargs)
317
+ for choice, debug_metadata in generate_subclass_choices(flat_args_kwargs, CCT, cct_mode):
318
+ new_args = choice[:len(args)]
319
+ new_kwargs = tree_unflatten(choice[len(args):], spec)
320
+ which_args_are_wrapped = debug_metadata[:len(args)]
321
+ which_kwargs_are_wrapped = tree_unflatten(debug_metadata[len(args):], spec)
322
+ yield new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped
323
+
324
+
325
+ def raise_composite_compliance_error(err, additional_info=''):
326
+ raise RuntimeError(
327
+ "Composite compliance check failed with "
328
+ "the above error.\n"
329
+ f"{additional_info}"
330
+ "If you are adding an OpInfo of an "
331
+ "existing operator, please feel free to skip this test "
332
+ "because the problem was pre-existing and file an issue. "
333
+ "Otherwise, if you added a new operator, please read "
334
+ "through the Composite Compliance section in "
335
+ "aten/src/ATen/native/README.md for how to resolve this. "
336
+ ) from err
337
+
338
+
339
+ # This test checks ALL possible permutations of calling `op` with arguments
340
+ # that are individually either a regular Tensor or a Tensor subclass.
341
+ #
342
+ # The general strategy is to wrap some Tensor args and kwargs in
343
+ # CompositeCompliantTensor wrappers and call the operation.
344
+
345
+ # If some composite operation does any non-compliant behavior,
346
+ # CompositeCompliantTensor will raise an error.
347
+ def check_all_permutations(op, args, kwargs, assert_equal_fn):
348
+ CCT, cct_mode = generate_cct_and_mode()
349
+ expected = op(*args, **kwargs)
350
+ for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode):
351
+ new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice
352
+
353
+ try:
354
+ actual = op(*new_args, **new_kwargs)
355
+ # NOTE: [What errors are Composite Compliance trying to catch?]
356
+ #
357
+ # There's two things we want to catch:
358
+ # - errors that would raise within the torch_dispatch impl
359
+ # - data_ptr accesses
360
+ # The first is easy to filter for (we could make the error a different
361
+ # error class), the second is always going to be a RuntimeError due to
362
+ # how it is implemented (if you try to access the data_ptr of thex
363
+ # wrapper Tensor, it raises you some internal RuntimeError).
364
+ #
365
+ # So the most general thing to catch here was RuntimeError. If you
366
+ # are here and debugging why your test failed, it's plausible that
367
+ # the operator itself is broken and that there are other tests failing.
368
+ except RuntimeError as err:
369
+ raise_composite_compliance_error(
370
+ err,
371
+ f"- wrapped_args: {which_args_are_wrapped}\n"
372
+ f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
373
+ )
374
+
375
+ def unwrap(e):
376
+ return e.elem if isinstance(e, CCT) else e
377
+
378
+ assert_equal_fn(tree_map(unwrap, actual), expected)
379
+
380
+ # Checks via the usage of torch dispatch mode certain anti-patterns that
381
+ # are not composite compliant.
382
+ #
383
+ # In particular, the anti-pattern we are trying to prevent is a user
384
+ # creating an empty tensor and then resize_-ing it. Torch Dispatch Mode helps
385
+ # here because all factory functions will create tensors that are
386
+ # CompositeCompliantTensor.
387
+ #
388
+ # The general strategy is to wrap all Tensor args and kwargs in
389
+ # CompositeCompliantTensor wrappers. If an operator that is
390
+ # Composite does any non-compliant behavior,
391
+ # CompositeCompliantTensor will raise an error.
392
+ def check_with_mode(op, args, kwargs, assert_equal_fn):
393
+ CCT, cct_mode = generate_cct_and_mode()
394
+
395
+ def wrap(e):
396
+ return CCT(e, cct_mode) if isinstance(e, torch.Tensor) else e
397
+
398
+ expected = op(*args, **kwargs)
399
+
400
+ args = tree_map(wrap, args)
401
+ kwargs = tree_map(wrap, kwargs)
402
+ try:
403
+ with cct_mode:
404
+ actual = op(*args, **kwargs)
405
+ # see NOTE: [What errors are Composite Compliance trying to catch?]
406
+ except RuntimeError as err:
407
+ raise_composite_compliance_error(err)
408
+
409
+ def unwrap(e):
410
+ return e.elem if isinstance(e, CCT) else e
411
+
412
+ assert_equal_fn(tree_map(unwrap, actual), expected)
413
+
414
+ def gather_leaf_tensors(args, kwargs):
415
+ leaf_tensors = []
416
+ args, _args_spec = tree_flatten(args)
417
+ kwargs, _kwargs_spec = tree_flatten(kwargs)
418
+ args = args + kwargs
419
+ for arg in args:
420
+ if not isinstance(arg, torch.Tensor):
421
+ continue
422
+ if arg.requires_grad:
423
+ leaf_tensors.append(arg)
424
+ return leaf_tensors
425
+
426
+
427
+ def compute_expected_grads(op, args, kwargs, output_process_fn_grad=None, gradcheck_wrapper=None):
428
+ if gradcheck_wrapper is None:
429
+ results = op(*args, **kwargs)
430
+ else:
431
+ results = gradcheck_wrapper(op, *args, **kwargs)
432
+
433
+ if output_process_fn_grad is not None:
434
+ results = output_process_fn_grad(results)
435
+
436
+ flat_results = pytree.tree_leaves(results)
437
+ flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)]
438
+ flat_diff_results = [r for r in flat_results if r.requires_grad]
439
+ assert len(flat_diff_results) > 0
440
+
441
+ grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype) for r in flat_diff_results]
442
+ leaf_tensors = gather_leaf_tensors(args, kwargs)
443
+ assert len(leaf_tensors) > 0
444
+ return torch.autograd.grad(flat_diff_results, leaf_tensors,
445
+ grads, allow_unused=True, retain_graph=True)
446
+
447
+
448
+ # Checks if the backward formula is composite compliant by testing
449
+ # all possible permutations of {inputs, grad_outputs} being
450
+ # CompositeCompliantTensor or regular Tensors.
451
+ #
452
+ # NB: it is important that op is accepted as a Callable and not an OpInfo,
453
+ # this means we can apply check_backward_formula to things that aren't OpInfos
454
+ # while debugging.
455
+ def check_backward_formula(op: Callable, args, kwargs,
456
+ output_process_fn_grad=None,
457
+ gradcheck_wrapper=None, assert_equal_fn=None):
458
+ CCT, cct_mode = generate_cct_and_mode()
459
+
460
+ expected = compute_expected_grads(op, args, kwargs, output_process_fn_grad, gradcheck_wrapper)
461
+
462
+ for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode):
463
+ new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice
464
+ leaf_tensors = gather_leaf_tensors(new_args, new_kwargs)
465
+ assert len(leaf_tensors) > 0
466
+
467
+ try:
468
+ if gradcheck_wrapper is None:
469
+ results = op(*new_args, **new_kwargs)
470
+ else:
471
+ results = gradcheck_wrapper(op, *new_args, **new_kwargs)
472
+ if output_process_fn_grad is not None:
473
+ results = output_process_fn_grad(results)
474
+ # see NOTE: [What errors are Composite Compliance trying to catch?]
475
+ except RuntimeError as err:
476
+ raise_composite_compliance_error(
477
+ err,
478
+ f"- wrapped_args: {which_args_are_wrapped}\n"
479
+ f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
480
+ )
481
+
482
+ flat_results = pytree.tree_leaves(results)
483
+ flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)]
484
+ flat_diff_results = [r for r in flat_results if r.requires_grad]
485
+ assert len(flat_diff_results) > 0
486
+
487
+ # NB: ones, not ones_like, so we get a regular Tensor here
488
+ grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype)
489
+ for r in flat_diff_results]
490
+ for flat_new_grads, which_grad_is_batched in generate_subclass_choices(grads, CCT, cct_mode):
491
+ try:
492
+ actual = torch.autograd.grad(flat_diff_results, leaf_tensors, flat_new_grads,
493
+ allow_unused=True, retain_graph=True)
494
+ # see NOTE: [What errors are Composite Compliance trying to catch?]
495
+ except RuntimeError as err:
496
+ raise_composite_compliance_error(
497
+ err,
498
+ f"- wrapped_args: {which_args_are_wrapped}\n"
499
+ f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
500
+ f"- wrapped_grads: {which_grad_is_batched}\n"
501
+ )
502
+
503
+ def unwrap(e):
504
+ return e.elem if isinstance(e, CCT) else e
505
+
506
+ assert_equal_fn(tuple(map(unwrap, actual)), expected, equal_nan=True)
507
+
508
+ # Checks if the forward AD formula is composite compliant by testing
509
+ # all possible permutations of {primals, tangents} being
510
+ # CompositeCompliantTensor or regular Tensors.
511
+ #
512
+ # NB: it is important that op is accepted as a Callable and not an OpInfo,
513
+ # this means we can apply check_forward_ad_formula to things that aren't OpInfos
514
+ # while debugging.
515
+ def check_forward_ad_formula(op: Callable, args, kwargs, gradcheck_wrapper=None, assert_equal_fn=None):
516
+ CCT, cct_mode = generate_cct_and_mode(autograd_view_consistency=False)
517
+
518
+ def maybe_tangent(t):
519
+ assert type(t) is not CCT
520
+ # Generate `tangent` tensor
521
+ # if given object is a Tensor and requires grad is set.
522
+ if isinstance(t, torch.Tensor) and t.requires_grad:
523
+ return torch.randn_like(t)
524
+ elif is_tensorlist(t):
525
+ return [torch.randn_like(e) if e.requires_grad else None for e in t]
526
+ return None
527
+
528
+ tangent_args = tuple(maybe_tangent(arg) for arg in args)
529
+ flat_kwargs, spec = tree_flatten(kwargs)
530
+ flat_tangent_kwargs = tuple(maybe_tangent(arg) for arg in flat_kwargs)
531
+ tangent_kwargs = tree_unflatten(flat_tangent_kwargs, spec)
532
+
533
+ with fwAD.dual_level():
534
+ def maybe_make_dual(dual):
535
+ # Returns dual tensor if primal is a tensor/tensor subclass
536
+ # with requires_grad set.
537
+ primal, tangent = dual
538
+ if isinstance(primal, torch.Tensor) and primal.requires_grad:
539
+ return fwAD.make_dual(primal.detach(), tangent)
540
+ elif is_tensorlist(primal):
541
+ return tuple(fwAD.make_dual(pri.detach(), tang) if tang is not None else pri
542
+ for pri, tang in zip(primal, tangent))
543
+ return primal
544
+
545
+ def compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs):
546
+ op_args = tuple(map(maybe_make_dual, zip(args, tangent_args)))
547
+ op_kwargs = {k: maybe_make_dual((v, tangent_kwargs[k])) for k, v in kwargs.items()}
548
+
549
+ if gradcheck_wrapper is None:
550
+ return op(*op_args, **op_kwargs)
551
+ return gradcheck_wrapper(op, *op_args, **op_kwargs)
552
+
553
+ expected = compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs)
554
+ expected = tree_map(fwAD.unpack_dual, expected)
555
+ expected_primals = tree_map(lambda x: x.primal, expected)
556
+ expected_tangents = tree_map(lambda x: x.tangent, expected)
557
+
558
+ # Permutations of arg and kwargs in CCT.
559
+ for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode):
560
+ new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice
561
+
562
+ # Permutations tangent arg and tangent kwargs in CCT.
563
+ for tang_choice in generate_subclass_choices_args_kwargs(tangent_args, tangent_kwargs, CCT, cct_mode):
564
+ new_tang_args, new_tang_kwargs, \
565
+ which_tang_args_are_wrapped, which_tang_kwargs_are_wrapped = tang_choice
566
+
567
+ op_args = tuple(map(maybe_make_dual, zip(new_args, new_tang_args)))
568
+ op_kwargs = {k: maybe_make_dual((v, new_tang_kwargs[k])) for k, v in new_kwargs.items()}
569
+
570
+ try:
571
+ if gradcheck_wrapper is None:
572
+ actual = op(*op_args, **op_kwargs)
573
+ else:
574
+ actual = gradcheck_wrapper(op, *op_args, **op_kwargs)
575
+ # see NOTE: [What errors are Composite Compliance trying to catch?]
576
+ except RuntimeError as err:
577
+ raise_composite_compliance_error(
578
+ err,
579
+ f"- wrapped_args: {which_args_are_wrapped}\n"
580
+ f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
581
+ f"- wrapped_tangent_args: {which_tang_args_are_wrapped}\n"
582
+ f"- wrapped_tangent_kwargs: {which_tang_kwargs_are_wrapped}\n"
583
+ )
584
+
585
+ def unwrap(e):
586
+ return e.elem if isinstance(e, CCT) else e
587
+
588
+ actual = tree_map(fwAD.unpack_dual, actual)
589
+ actual_primals = tree_map(lambda x: unwrap(x.primal), actual)
590
+ actual_tangents = tree_map(lambda x: unwrap(x.tangent), actual)
591
+ assert_equal_fn(actual_primals, expected_primals, equal_nan=True)
592
+ assert_equal_fn(actual_tangents, expected_tangents, equal_nan=True)
lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py ADDED
@@ -0,0 +1,586 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-decorators
2
+ # mypy: allow-untyped-defs
3
+ import torch
4
+ import functools
5
+ from torch.testing import make_tensor
6
+ from torch.testing._internal.opinfo.core import (
7
+ OpInfo,
8
+ SampleInput,
9
+ )
10
+ from torch.testing._internal.common_dtype import all_types_and
11
+ import numpy as np
12
+ from torch.testing._internal.autograd_function_db import (
13
+ sample_inputs_numpy_cube,
14
+ sample_inputs_numpy_mul,
15
+ sample_inputs_numpy_mul_scalar,
16
+ sample_inputs_numpy_sort,
17
+ sample_inputs_numpy_take,
18
+ )
19
+ from torch import Tensor
20
+ from torch.types import Number
21
+ from typing import * # noqa: F403
22
+
23
+ # Note: [custom op db]
24
+ #
25
+ # This is a collection of custom operator test cases written as OpInfos
26
+ # so they can easily be consumed by OpInfo-based tests to check if subsystems
27
+ # support them correctly.
28
+
29
+ def to_numpy(tensor):
30
+ return tensor.cpu().numpy()
31
+
32
+ @torch.library.custom_op("_torch_testing::numpy_cube", mutates_args=())
33
+ def numpy_cube(x: Tensor) -> Tuple[Tensor, Tensor]:
34
+ x_np = to_numpy(x)
35
+ dx = torch.tensor(3 * x_np ** 2, device=x.device)
36
+ return torch.tensor(x_np ** 3, device=x.device), dx
37
+
38
+ @numpy_cube.register_fake
39
+ def _(x):
40
+ return x.clone(), x.clone()
41
+
42
+ def numpy_cube_setup_context(ctx, inputs, output):
43
+ x, = inputs
44
+ _cube, dx = output
45
+ ctx.save_for_backward(x, dx)
46
+
47
+ def numpy_cube_backward(ctx, grad_out, grad_dx):
48
+ x, dx = ctx.saved_tensors
49
+ grad_x = numpy_mul(grad_out, dx) + 6 * numpy_mul(grad_dx, x)
50
+ return grad_x
51
+
52
+ numpy_cube.register_autograd(numpy_cube_backward, setup_context=numpy_cube_setup_context)
53
+
54
+ def numpy_cube_vmap(info, in_dims, x):
55
+ result = numpy_cube(x)
56
+ return result, (in_dims[0], in_dims[0])
57
+
58
+ numpy_cube.register_vmap(numpy_cube_vmap)
59
+
60
+ @torch.library.custom_op("_torch_testing::numpy_mul", mutates_args=())
61
+ def numpy_mul(x: Tensor, y: Tensor) -> Tensor:
62
+ return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device)
63
+
64
+ @numpy_mul.register_fake
65
+ def _(x, y):
66
+ assert x.device == y.device
67
+ return (x * y).contiguous()
68
+
69
+ def numpy_mul_setup_context(ctx, inputs, output):
70
+ ctx.save_for_backward(*inputs)
71
+
72
+ def numpy_mul_backward(ctx, grad_out):
73
+ x, y = ctx.saved_tensors
74
+ grad_x = grad_out * y if ctx.needs_input_grad[0] else None
75
+ grad_y = grad_out * x if ctx.needs_input_grad[1] else None
76
+ return grad_x, grad_y
77
+
78
+ numpy_mul.register_autograd(numpy_mul_backward, setup_context=numpy_mul_setup_context)
79
+
80
+ def numpy_mul_vmap(info, in_dims, x, y):
81
+ x_bdim, y_bdim = in_dims
82
+ x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1)
83
+ y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1)
84
+ result = x * y
85
+ result = result.movedim(-1, 0)
86
+ return result, 0
87
+
88
+ numpy_mul.register_vmap(numpy_mul_vmap)
89
+
90
+ @torch.library.custom_op("_torch_testing::numpy_mul_scalar", mutates_args=())
91
+ def numpy_mul_scalar(x: Tensor, *, scalar: float) -> Tensor:
92
+ return torch.tensor(to_numpy(x) * scalar, device=x.device)
93
+
94
+ @numpy_mul_scalar.register_fake
95
+ def _(x, *, scalar):
96
+ return (x * scalar).contiguous()
97
+
98
+ def numpy_mul_scalar_setup_context(ctx, inputs, keyword_only_inputs, output):
99
+ ctx.scalar = keyword_only_inputs["scalar"]
100
+
101
+ def numpy_mul_scalar_backward(ctx, grad_out):
102
+ grad_x = grad_out * ctx.scalar
103
+ return grad_x
104
+
105
+ numpy_mul_scalar.register_autograd(numpy_mul_scalar_backward, setup_context=numpy_mul_scalar_setup_context)
106
+
107
+ def numpy_mul_scalar_vmap(info, in_dims, x, *, scalar):
108
+ x_bdim, = in_dims
109
+ x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1)
110
+ result = x * scalar
111
+ result = result.movedim(-1, 0)
112
+ return result, 0
113
+
114
+ numpy_mul_scalar.register_vmap(numpy_mul_scalar_vmap)
115
+
116
+ @torch.library.custom_op("_torch_testing::numpy_sort", mutates_args=())
117
+ def numpy_sort(x: Tensor, dim: int) -> Tuple[Tensor, Tensor, Tensor]:
118
+ device = x.device
119
+ x = to_numpy(x)
120
+ ind = np.argsort(x, axis=dim)
121
+ ind_inv = np.argsort(ind, axis=dim)
122
+ result = np.take_along_axis(x, ind, axis=dim)
123
+ return (
124
+ torch.tensor(result, device=device),
125
+ torch.tensor(ind, device=device),
126
+ torch.tensor(ind_inv, device=device),
127
+ )
128
+
129
+ @numpy_sort.register_fake
130
+ def _(x, dim):
131
+ return torch.empty_like(x), torch.empty_like(x, dtype=torch.long), torch.empty_like(x, dtype=torch.long)
132
+
133
+ def numpy_sort_setup_context(ctx, inputs, output):
134
+ _out, ind, ind_inv = output
135
+ ctx.dim = inputs[1]
136
+ ctx.save_for_backward(ind, ind_inv)
137
+ ctx.mark_non_differentiable(ind, ind_inv)
138
+
139
+ def numpy_sort_backward(ctx, grad_out, grad_ind, grad_ind_inv):
140
+ ind, ind_inv = ctx.saved_tensors
141
+ return numpy_take(grad_out, ind_inv, ind, ctx.dim), None
142
+
143
+ numpy_sort.register_autograd(numpy_sort_backward, setup_context=numpy_sort_setup_context)
144
+
145
+ def numpy_sort_vmap(info, in_dims, x, dim):
146
+ x_bdim, _ = in_dims
147
+ x = x.movedim(x_bdim, 0)
148
+ dim = dim if dim >= 0 else dim + x.dim() - 1
149
+ result = numpy_sort(x, dim + 1)
150
+ return result, (0, 0, 0)
151
+
152
+ numpy_sort.register_vmap(numpy_sort_vmap)
153
+
154
+ @torch.library.custom_op("_torch_testing::numpy_take", mutates_args=())
155
+ def numpy_take(x: Tensor, ind: Tensor, ind_inv: Tensor, dim: int) -> Tensor:
156
+ device = x.device
157
+ x = to_numpy(x)
158
+ ind = to_numpy(ind)
159
+ return torch.tensor(np.take_along_axis(x, ind, dim), device=device)
160
+
161
+ @numpy_take.register_fake
162
+ def _(x, ind, ind_inv, dim):
163
+ assert x.device == ind.device
164
+ assert x.device == ind_inv.device
165
+ assert ind.dtype == torch.long
166
+ assert ind_inv.dtype == torch.long
167
+ return torch.empty_like(x)
168
+
169
+ def numpy_take_setup_context(ctx, inputs, output):
170
+ _x, ind, ind_inv, dim = inputs
171
+ ctx.dim = dim
172
+ ctx.save_for_backward(ind, ind_inv)
173
+
174
+ def numpy_take_backward(ctx, grad_out):
175
+ ind, ind_inv = ctx.saved_tensors
176
+ grad_x = numpy_take(grad_out, ind_inv, ind, ctx.dim)
177
+ return grad_x, None, None, None
178
+
179
+ numpy_take.register_autograd(numpy_take_backward, setup_context=numpy_take_setup_context)
180
+
181
+ def numpy_take_vmap(info, in_dims, x, ind, ind_inv, dim):
182
+ x_bdim, ind_bdim, ind_inv_bdim, _ = in_dims
183
+
184
+ # wrap dim
185
+ logical_dim = x.dim() if x_bdim is None else x_bdim - 1
186
+ dim = dim if dim >= 0 else dim + logical_dim
187
+
188
+ def expand_bdim(x, x_bdim):
189
+ if x_bdim is None:
190
+ return x.expand(info.batch_size, *x.shape)
191
+ return x.movedim(x_bdim, 0)
192
+
193
+ x = expand_bdim(x, x_bdim)
194
+ ind = expand_bdim(ind, ind_bdim)
195
+ ind_inv = expand_bdim(ind_inv, ind_inv_bdim)
196
+
197
+ return numpy_take(x, ind, ind_inv, dim + 1), 0
198
+
199
+ numpy_take.register_vmap(numpy_take_vmap)
200
+
201
+ @torch.library.custom_op("_torch_testing::numpy_nonzero", mutates_args=())
202
+ def numpy_nonzero(x: Tensor) -> Tensor:
203
+ x_np = to_numpy(x)
204
+ res = np.stack(np.nonzero(x_np), axis=1)
205
+ if res.shape[0] <= 1:
206
+ raise RuntimeError("not supported")
207
+ return torch.tensor(res, device=x.device)
208
+
209
+ @numpy_nonzero.register_fake
210
+ def _(x):
211
+ ctx = torch._custom_op.impl.get_ctx()
212
+ i0 = ctx.create_unbacked_symint()
213
+ shape = [i0, x.dim()]
214
+ result = x.new_empty(shape, dtype=torch.long)
215
+ return result
216
+
217
+ def sample_inputs_numpy_nonzero(opinfo, device, dtype, requires_grad, **kwargs):
218
+ make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
219
+ shape = 10
220
+ result = make_arg(shape, low=0.9, high=2)
221
+ mask = make_tensor(shape, low=0, high=2, device=device, dtype=torch.long)
222
+ with torch.no_grad():
223
+ result *= mask
224
+
225
+ yield SampleInput(result, args=())
226
+
227
+ def numpy_nonzero_vmap(info, in_dims, x):
228
+ raise NotImplementedError("Operator is data-dependent and cannot be vmapped.")
229
+
230
+ numpy_nonzero.register_vmap(numpy_nonzero_vmap)
231
+
232
+ @torch.library.custom_op("_torch_testing::numpy_view_copy", mutates_args=())
233
+ def numpy_view_copy(x: Tensor, shape: Sequence[int]) -> Tensor:
234
+ return torch.tensor(np.copy(to_numpy(x).reshape(shape)), device=x.device)
235
+
236
+ @numpy_view_copy.register_fake
237
+ def _(x, shape) -> Tensor:
238
+ return x.clone().view(shape).clone()
239
+
240
+ def numpy_view_copy_setup_context(ctx, inputs, output) -> None:
241
+ ctx.x_shape = inputs[0].shape
242
+
243
+ def numpy_view_copy_backward(ctx, grad_out):
244
+ return torch.ops._torch_testing.numpy_view_copy(grad_out, ctx.x_shape), None
245
+
246
+ numpy_view_copy.register_autograd(numpy_view_copy_backward, setup_context=numpy_view_copy_setup_context)
247
+
248
+ def numpy_view_copy_vmap(info, in_dims, x, shape):
249
+ x_bdim, _ = in_dims
250
+ x = x.movedim(x_bdim, 0)
251
+ x_shape = x.shape[0]
252
+ batch_shape = (x_shape, *shape)
253
+ result = numpy_view_copy(x, batch_shape)
254
+ return result, 0
255
+
256
+ numpy_view_copy.register_vmap(numpy_view_copy_vmap)
257
+
258
+ def sample_inputs_numpy_view_copy(opinfo, device, dtype, requires_grad, **kwargs):
259
+ make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
260
+ result = make_arg(2, 3, 4, low=0.9, high=2)
261
+ yield SampleInput(result, args=([2, 12],))
262
+
263
+ @torch.library.custom_op('_torch_testing::numpy_cat', mutates_args=())
264
+ def numpy_cat(xs: Sequence[Tensor], dim: int) -> Tensor:
265
+ assert len(xs) > 0
266
+ assert all(x.device == xs[0].device for x in xs)
267
+ assert all(x.dtype == xs[0].dtype for x in xs)
268
+ np_xs = [to_numpy(x) for x in xs]
269
+ np_out = np.concatenate(np_xs, axis=dim)
270
+ return torch.tensor(np_out, device=xs[0].device)
271
+
272
+ @numpy_cat.register_fake
273
+ def _(xs, dim):
274
+ assert len(xs) > 0
275
+ assert all(x.device == xs[0].device for x in xs)
276
+ assert all(x.dtype == xs[0].dtype for x in xs)
277
+ return torch.cat(xs, dim=dim)
278
+
279
+ def numpy_cat_setup_context(ctx, inputs, output):
280
+ xs, dim = inputs
281
+ ctx.dim_sizes = [x.shape[dim] for x in xs]
282
+ ctx.dim = dim
283
+
284
+ def numpy_cat_backward(ctx, grad_out):
285
+ dim_sizes = ctx.dim_sizes
286
+ dim = ctx.dim
287
+
288
+ splits = list(np.cumsum(dim_sizes)[:-1])
289
+ grad_xs = torch.ops._torch_testing.numpy_split_copy(grad_out, splits, dim)
290
+ return grad_xs, None
291
+
292
+ numpy_cat.register_autograd(numpy_cat_backward, setup_context=numpy_cat_setup_context)
293
+
294
+ def numpy_cat_vmap(info, in_dims, x, dim):
295
+ x_bdim, = in_dims
296
+ result = numpy_cat(x, dim)
297
+ return result, x_bdim
298
+
299
+ numpy_cat.register_vmap(numpy_cat_vmap)
300
+
301
+ def sample_inputs_numpy_cat(opinfo, device, dtype, requires_grad, **kwargs):
302
+ make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
303
+ r0 = make_arg(2, 3, 4, low=0.9, high=2)
304
+ r1 = make_arg(4, 3, 4, low=0.9, high=2)
305
+ r2 = make_arg(5, 3, 4, low=0.9, high=2)
306
+ yield SampleInput([r0, r1, r2], args=(0,))
307
+
308
+ @torch.library.custom_op('_torch_testing::numpy_split_copy', mutates_args=())
309
+ def numpy_split_copy(x: Tensor, splits: Sequence[int], dim: int) -> List[Tensor]:
310
+ x_np = to_numpy(x)
311
+ arrs = np.split(x_np, splits, axis=dim)
312
+ return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs]
313
+
314
+ @numpy_split_copy.register_fake
315
+ def _(x, splits, dim):
316
+ return [xi.clone() for xi in torch.tensor_split(x, splits, dim)]
317
+
318
+ def numpy_split_copy_setup_context(ctx, inputs, output):
319
+ _, _, dim = inputs
320
+ ctx.dim = dim
321
+
322
+ def numpy_split_copy_backward(ctx, grad_out):
323
+ result = torch.ops._torch_testing.numpy_cat(grad_out, dim=ctx.dim)
324
+ return result, None, None
325
+
326
+ numpy_split_copy.register_autograd(numpy_split_copy_backward, setup_context=numpy_split_copy_setup_context)
327
+
328
+ def numpy_split_copy_vmap(info, in_dims, x, splits, dim):
329
+ x_bdim, _ , _ = in_dims
330
+ x = x.movedim(x_bdim, 0)
331
+ result = numpy_split_copy(x, splits, dim + 1)
332
+ return result, 0
333
+
334
+ numpy_split_copy.register_vmap(numpy_split_copy_vmap)
335
+
336
+ def sample_inputs_numpy_split_copy(opinfo, device, dtype, requires_grad, **kwargs):
337
+ make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
338
+ x = make_arg(2, 9, low=0.9, high=2)
339
+ yield SampleInput(x, args=([1, 3, 6], 1))
340
+
341
+ @torch.library.custom_op('_torch_testing::numpy_split_copy_with_int', mutates_args=())
342
+ def numpy_split_copy_with_int(x: Tensor, splits: Sequence[int], dim: int) -> Tuple[List[Tensor], int]:
343
+ x_np = to_numpy(x)
344
+ arrs = np.split(x_np, splits, axis=dim)
345
+ return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs], len(splits)
346
+
347
+ @numpy_split_copy_with_int.register_fake
348
+ def _(x, splits, dim):
349
+ return [xi.clone() for xi in torch.tensor_split(x, splits, dim)], len(splits)
350
+
351
+ def numpy_split_copy_with_int_setup_context(ctx, inputs, output):
352
+ _, _, dim = inputs
353
+ ctx.dim = dim
354
+
355
+ def numpy_split_copy_with_int_backward(ctx, grad_out, _):
356
+ return torch.ops._torch_testing.numpy_cat(grad_out, dim=ctx.dim), None, None
357
+
358
+ numpy_split_copy_with_int.register_autograd(
359
+ numpy_split_copy_with_int_backward,
360
+ setup_context=numpy_split_copy_with_int_setup_context)
361
+
362
+ def numpy_split_copy_with_int_vmap(info, in_dims, x, splits, dim):
363
+ x_bdim, _ , _ = in_dims
364
+ x = x.movedim(x_bdim, 0)
365
+ result, len_split = numpy_split_copy_with_int(x, splits, dim + 1)
366
+ return (result, len_split), ([0 for _ in range(len(result))], None)
367
+
368
+ numpy_split_copy_with_int.register_vmap(numpy_split_copy_with_int_vmap)
369
+
370
+ @torch.library.custom_op("_torch_testing::numpy_nms", mutates_args=())
371
+ def numpy_nms(boxes: Tensor, scores: Tensor, iou_threshold: Number) -> Tensor:
372
+ # Adapted from Ross Girshick's fast-rcnn implementation at
373
+ # https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py
374
+ assert boxes.device == scores.device
375
+ device = boxes.device
376
+
377
+ boxes = to_numpy(boxes)
378
+ scores = to_numpy(scores)
379
+
380
+ N = boxes.shape[0]
381
+ assert boxes.shape == (N, 4)
382
+ assert scores.shape == (N,)
383
+
384
+ x1 = boxes[:, 0]
385
+ y1 = boxes[:, 1]
386
+ x2 = boxes[:, 2]
387
+ y2 = boxes[:, 3]
388
+
389
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
390
+ order = scores.argsort()[::-1]
391
+
392
+ keep = []
393
+ while order.size > 0:
394
+ i = order[0]
395
+ keep.append(i)
396
+ xx1 = np.maximum(x1[i], x1[order[1:]])
397
+ yy1 = np.maximum(y1[i], y1[order[1:]])
398
+ xx2 = np.minimum(x2[i], x2[order[1:]])
399
+ yy2 = np.minimum(y2[i], y2[order[1:]])
400
+
401
+ w = np.maximum(0.0, xx2 - xx1 + 1)
402
+ h = np.maximum(0.0, yy2 - yy1 + 1)
403
+ inter = w * h
404
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
405
+
406
+ inds = np.where(ovr <= iou_threshold)[0]
407
+ order = order[inds + 1]
408
+
409
+ result = torch.tensor(np.stack(keep), device=device)
410
+ # Needed for data-dependent condition :(
411
+ assert result.size(0) >= 2
412
+ return result
413
+
414
+ @numpy_nms.register_fake
415
+ def _(boxes, scores, iou_threshold):
416
+ assert boxes.device == scores.device
417
+ N = boxes.shape[0]
418
+ assert boxes.shape == (N, 4)
419
+ assert scores.shape == (N,)
420
+
421
+ ctx = torch._custom_op.impl.get_ctx()
422
+ i0 = ctx.create_unbacked_symint()
423
+ result = boxes.new_empty([i0], dtype=torch.int64)
424
+ return result
425
+
426
+ def numpy_nms_vmap(info, in_dims, boxes, scores, iou_threshold):
427
+ raise NotImplementedError("Operator is data-dependent and cannot be vmapped.")
428
+
429
+ numpy_nms.register_vmap(numpy_nms_vmap)
430
+
431
+ def sample_inputs_numpy_nms(opinfo, device, dtype, requires_grad, **kwargs):
432
+ make_arg = functools.partial(make_tensor, device=device, dtype=dtype)
433
+ N = 64
434
+ xs = make_arg([N], low=0, high=28)
435
+ dx = make_arg([N], low=0, high=4)
436
+ ys = make_arg([N], low=0, high=28)
437
+ dy = make_arg([N], low=0, high=4)
438
+ boxes = torch.stack([xs, ys, xs + dx, ys + dy], dim=1).requires_grad_(requires_grad)
439
+ scores = make_arg([N], low=0, high=1, requires_grad=requires_grad)
440
+ iou_threshold = make_arg([], low=0, high=1).item()
441
+
442
+ yield SampleInput(boxes, args=(scores, iou_threshold))
443
+
444
+ custom_op_db = [
445
+ OpInfo(
446
+ 'NumpyCubeCustomOp',
447
+ op=numpy_cube._opoverload,
448
+ sample_inputs_func=sample_inputs_numpy_cube,
449
+ dtypes=all_types_and(torch.bool, torch.half),
450
+ supports_out=False,
451
+ ),
452
+ OpInfo(
453
+ 'NumpyMulCustomOp',
454
+ op=numpy_mul._opoverload,
455
+ sample_inputs_func=sample_inputs_numpy_mul,
456
+ dtypes=all_types_and(torch.bool, torch.half),
457
+ supports_out=False,
458
+ ),
459
+ OpInfo(
460
+ 'NumpyMulScalarCustomOp',
461
+ op=numpy_mul_scalar._opoverload,
462
+ sample_inputs_func=sample_inputs_numpy_mul_scalar,
463
+ dtypes=all_types_and(torch.bool, torch.half),
464
+ supports_out=False,
465
+ ),
466
+ OpInfo(
467
+ 'NumpySortCustomOp',
468
+ op=numpy_sort._opoverload,
469
+ sample_inputs_func=sample_inputs_numpy_sort,
470
+ dtypes=all_types_and(torch.bool, torch.half),
471
+ supports_out=False,
472
+ ),
473
+ OpInfo(
474
+ 'NumpyTakeCustomOp',
475
+ op=numpy_take._opoverload,
476
+ sample_inputs_func=sample_inputs_numpy_take,
477
+ dtypes=all_types_and(torch.bool, torch.half),
478
+ supports_out=False,
479
+ ),
480
+ OpInfo(
481
+ 'NumpyNonzeroCustomOp',
482
+ op=numpy_nonzero._opoverload,
483
+ sample_inputs_func=sample_inputs_numpy_nonzero,
484
+ dtypes=all_types_and(torch.bool, torch.half),
485
+ supports_autograd=False,
486
+ supports_out=False,
487
+ ),
488
+ OpInfo(
489
+ 'NumpyNMSCustomOp',
490
+ op=torch.ops._torch_testing.numpy_nms,
491
+ sample_inputs_func=sample_inputs_numpy_nms,
492
+ dtypes=all_types_and(torch.bool, torch.half),
493
+ supports_autograd=False,
494
+ supports_out=False,
495
+ ),
496
+ OpInfo(
497
+ 'NumpyViewCopyCustomOp',
498
+ op=torch.ops._torch_testing.numpy_view_copy,
499
+ sample_inputs_func=sample_inputs_numpy_view_copy,
500
+ dtypes=all_types_and(torch.bool, torch.half),
501
+ supports_autograd=True,
502
+ supports_out=False,
503
+ ),
504
+ OpInfo(
505
+ 'NumpyCatCustomOp',
506
+ op=torch.ops._torch_testing.numpy_cat,
507
+ sample_inputs_func=sample_inputs_numpy_cat,
508
+ dtypes=all_types_and(torch.bool, torch.half),
509
+ supports_autograd=True,
510
+ check_batched_grad=False,
511
+ check_batched_gradgrad=False,
512
+ supports_out=False,
513
+ ),
514
+ OpInfo(
515
+ 'NumpySplitCopyCustomOp',
516
+ op=torch.ops._torch_testing.numpy_split_copy,
517
+ sample_inputs_func=sample_inputs_numpy_split_copy,
518
+ dtypes=all_types_and(torch.bool, torch.half),
519
+ supports_autograd=True,
520
+ check_batched_grad=False,
521
+ check_batched_gradgrad=False,
522
+ supports_out=False,
523
+ ),
524
+ OpInfo(
525
+ 'NumpySplitCopyWithIntCustomOp',
526
+ op=torch.ops._torch_testing.numpy_split_copy_with_int,
527
+ sample_inputs_func=sample_inputs_numpy_split_copy,
528
+ dtypes=all_types_and(torch.bool, torch.half),
529
+ gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs)[0],
530
+ supports_autograd=True,
531
+ check_batched_grad=False,
532
+ check_batched_gradgrad=False,
533
+ supports_out=False,
534
+ ),
535
+ ]
536
+
537
+
538
+ # ==============================================================
539
+ # some mechanical test cases
540
+ # ==============================================================
541
+
542
+ lib = torch.library.Library("_torch_testing", "FRAGMENT") # noqa: TOR901
543
+
544
+ lib.define("source0(Tensor x) -> Tensor")
545
+
546
+ @torch.library.register_fake("_torch_testing::source0", lib=lib)
547
+ def _(x):
548
+ return x.clone()
549
+
550
+ lib.define("source1(Tensor x) -> Tensor")
551
+
552
+ def source1_fake(x):
553
+ return x.clone()
554
+
555
+ torch.library.register_fake("_torch_testing::source1", source1_fake, lib=lib)
556
+
557
+ lib.define("source2(Tensor x) -> Tensor")
558
+
559
+ @torch.library.register_fake("_torch_testing::source2", lib=lib)
560
+ def _(x):
561
+ return x.clone()
562
+
563
+ lib.define("source3(Tensor x) -> Tensor")
564
+
565
+ def source3_fake(x):
566
+ return x.clone()
567
+
568
+ torch.library.register_fake("_torch_testing::source3", source3_fake, lib=lib)
569
+
570
+
571
+ @torch.library.custom_op("_torch_testing::source4", mutates_args=())
572
+ def source4(x: Tensor) -> Tensor:
573
+ return x.clone()
574
+
575
+ @source4.register_fake
576
+ def _(x):
577
+ return x.clone()
578
+
579
+ @torch.library.custom_op("_torch_testing::source5", mutates_args=())
580
+ def source5(x: Tensor) -> Tensor:
581
+ return x.clone()
582
+
583
+ def source5_fake(x):
584
+ return x.clone()
585
+
586
+ source5.register_fake(source5_fake)
lib/python3.10/site-packages/torch/testing/_internal/custom_tensor.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+ import torch.utils._pytree as pytree
5
+ from torch.utils._python_dispatch import return_and_correct_aliasing
6
+
7
+
8
+ # A simple tensor subclass that holds a tensor with custom metadata and custom method
9
+ class ConstantExtraMetadataTensor(torch.Tensor):
10
+ @staticmethod
11
+ def __new__(cls, elem):
12
+ shape = elem.shape
13
+ kwargs = {}
14
+ kwargs["strides"] = elem.stride()
15
+ kwargs["storage_offset"] = elem.storage_offset()
16
+ kwargs["device"] = elem.device
17
+ kwargs["layout"] = elem.layout
18
+ kwargs["requires_grad"] = elem.requires_grad
19
+ kwargs["dtype"] = elem.dtype
20
+ return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs)
21
+
22
+ def __init__(self, elem):
23
+ self.elem = elem
24
+ self.constant_attribute = 4
25
+
26
+ def __repr__(self):
27
+ inner_repr = repr(self.elem)
28
+ return f"CustomTensor({inner_repr})"
29
+
30
+ def __tensor_flatten__(self):
31
+ return ["elem"], self.constant_attribute
32
+
33
+ def add_constant(self, a):
34
+ self.constant_attribute += a
35
+
36
+ @staticmethod
37
+ def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
38
+ assert meta is not None
39
+ elem = inner_tensors["elem"]
40
+ out = ConstantExtraMetadataTensor(elem)
41
+ out.constant_attribute = meta
42
+ return out
43
+
44
+ @classmethod
45
+ def __torch_dispatch__(cls, func, types, args, kwargs):
46
+ if kwargs is None:
47
+ kwargs = {}
48
+ args_inner = pytree.tree_map_only(
49
+ ConstantExtraMetadataTensor, lambda x: x.elem, args
50
+ )
51
+
52
+ kwargs_inner = pytree.tree_map_only(
53
+ ConstantExtraMetadataTensor, lambda x: x.elem, kwargs
54
+ )
55
+
56
+ out_inner = func(*args_inner, **kwargs_inner)
57
+ out_inner_flat, spec = pytree.tree_flatten(out_inner)
58
+ # for aten ops that return non-tensors, just assume that
59
+ # our cust inner tensors return the same value
60
+ out_flat = [
61
+ ConstantExtraMetadataTensor(o_inner)
62
+ if isinstance(o_inner, torch.Tensor)
63
+ else o_inner
64
+ for o_inner in out_inner_flat
65
+ ]
66
+ out = pytree.tree_unflatten(out_flat, spec)
67
+ return return_and_correct_aliasing(func, args, kwargs, out)
lib/python3.10/site-packages/torch/testing/_internal/data/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # mypy: ignore-errors
lib/python3.10/site-packages/torch/testing/_internal/data/network1.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch.nn as nn
4
+
5
+
6
+ class Net(nn.Module):
7
+
8
+ def __init__(self) -> None:
9
+ super().__init__()
10
+ self.linear = nn.Linear(10, 20)
lib/python3.10/site-packages/torch/testing/_internal/data/network2.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch.nn as nn
4
+
5
+
6
+ class Net(nn.Module):
7
+
8
+ def __init__(self) -> None:
9
+ super().__init__()
10
+ self.linear = nn.Linear(10, 20)
11
+ self.relu = nn.ReLU()
lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import re
4
+ import sys
5
+ import time
6
+ from functools import partial, wraps
7
+ from typing import Tuple
8
+
9
+ import torch.distributed as dist
10
+ import torch.distributed.rpc as rpc
11
+ from torch.distributed.rpc import _rref_context_get_debug_info
12
+ from torch.testing._internal.common_utils import FILE_SCHEMA, TEST_WITH_TSAN
13
+
14
+
15
+ if not dist.is_available():
16
+ print("c10d not available, skipping tests", file=sys.stderr)
17
+ sys.exit(0)
18
+
19
+
20
+ INIT_METHOD_TEMPLATE = FILE_SCHEMA + "{file_name}"
21
+
22
+ def dist_init(
23
+ old_test_method=None,
24
+ setup_rpc: bool = True,
25
+ clean_shutdown: bool = True,
26
+ faulty_messages=None,
27
+ messages_to_delay=None,
28
+ ):
29
+ """
30
+ We use this decorator for setting up and tearing down state since
31
+ MultiProcessTestCase runs each `test*` method in a separate process and
32
+ each process just runs the `test*` method without actually calling
33
+ 'setUp' and 'tearDown' methods of unittest.
34
+
35
+ Note: pass the string representation of MessageTypes that should be used
36
+ with the faulty agent's send function. By default, all retriable messages
37
+ ("RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT", "RREF_USER_DELETE",
38
+ "CLEANUP_AUTOGRAD_CONTEXT_REQ") will use the faulty send (this default is
39
+ set from faulty_rpc_agent_test_fixture.py).
40
+ """
41
+ # If we use dist_init without arguments (ex: @dist_init), old_test_method is
42
+ # appropriately set and we return the wrapper appropriately. On the other
43
+ # hand if dist_init has arguments (ex: @dist_init(clean_shutdown=False)),
44
+ # old_test_method is None and we return a functools.partial which is the real
45
+ # decorator that is used and as a result we recursively call dist_init with
46
+ # old_test_method and the rest of the arguments appropriately set.
47
+ if old_test_method is None:
48
+ return partial(
49
+ dist_init,
50
+ setup_rpc=setup_rpc,
51
+ clean_shutdown=clean_shutdown,
52
+ faulty_messages=faulty_messages,
53
+ messages_to_delay=messages_to_delay,
54
+ )
55
+
56
+ @wraps(old_test_method)
57
+ def new_test_method(self, *arg, **kwargs):
58
+ # Setting _ignore_rref_leak to make sure OwnerRRefs are properly deleted
59
+ # in tests.
60
+ import torch.distributed.rpc.api as api
61
+
62
+ api._ignore_rref_leak = False
63
+ self.worker_id = self.rank
64
+ self.setup_fault_injection(faulty_messages, messages_to_delay)
65
+
66
+ rpc_backend_options = self.rpc_backend_options
67
+ if setup_rpc:
68
+ if TEST_WITH_TSAN:
69
+ # TSAN runs much slower.
70
+ rpc_backend_options.rpc_timeout = rpc.constants.DEFAULT_RPC_TIMEOUT_SEC * 5
71
+ rpc.constants.DEFAULT_SHUTDOWN_TIMEOUT = 60
72
+
73
+ rpc.init_rpc(
74
+ name="worker%d" % self.rank,
75
+ backend=self.rpc_backend,
76
+ rank=self.rank,
77
+ world_size=self.world_size,
78
+ rpc_backend_options=rpc_backend_options,
79
+ )
80
+
81
+ return_value = old_test_method(self, *arg, **kwargs)
82
+
83
+ if setup_rpc:
84
+ rpc.shutdown(graceful=clean_shutdown)
85
+
86
+ return return_value
87
+
88
+ return new_test_method
89
+
90
+
91
+ def noop() -> None:
92
+ pass
93
+
94
+
95
+ def wait_until_node_failure(rank: int, expected_error_regex: str = ".*") -> str:
96
+ """
97
+ Loops until an RPC to the given rank fails. This is used to
98
+ indicate that the node has failed in unit tests.
99
+ Args:
100
+ rank (int): Rank of the node expected to fail
101
+ expected_error_regex (optional, str): Regex of exception message expected. Useful to ensure a specific failure
102
+ occurs, not just any.
103
+ """
104
+ while True:
105
+ try:
106
+ rpc.rpc_sync(f"worker{rank}", noop, args=())
107
+ time.sleep(0.1)
108
+ except Exception as e:
109
+ if re.search(pattern=expected_error_regex, string=str(e)):
110
+ return str(e)
111
+
112
+
113
+ def wait_until_pending_futures_and_users_flushed(timeout: int = 20) -> None:
114
+ """
115
+ The RRef protocol holds forkIds of rrefs in a map until those forks are
116
+ confirmed by the owner. The message confirming the fork may arrive after
117
+ our tests check whether this map is empty, which leads to failures and
118
+ flaky tests. to_here also does not guarantee that we have finished
119
+ processind the owner's confirmation message for the RRef. This function
120
+ loops until the map is empty, which means the messages have been received
121
+ as processed. Call this function before asserting the map returned by
122
+ _get_debug_info is empty.
123
+ """
124
+ start = time.time()
125
+ while True:
126
+ debug_info = _rref_context_get_debug_info()
127
+ num_pending_futures = int(debug_info["num_pending_futures"])
128
+ num_pending_users = int(debug_info["num_pending_users"])
129
+ if num_pending_futures == 0 and num_pending_users == 0:
130
+ break
131
+ time.sleep(0.1)
132
+ if time.time() - start > timeout:
133
+ raise ValueError(
134
+ f"Timed out waiting to flush pending futures and users, "
135
+ f"had {num_pending_futures} pending futures and {num_pending_users} pending users"
136
+ )
137
+
138
+
139
+ def get_num_owners_and_forks() -> Tuple[str, str]:
140
+ """
141
+ Retrieves number of OwnerRRefs and forks on this node from
142
+ _rref_context_get_debug_info.
143
+ """
144
+ rref_dbg_info = _rref_context_get_debug_info()
145
+ num_owners = rref_dbg_info["num_owner_rrefs"]
146
+ num_forks = rref_dbg_info["num_forks"]
147
+ return num_owners, num_forks
148
+
149
+
150
+ def wait_until_owners_and_forks_on_rank(
151
+ num_owners: int, num_forks: int, rank: int, timeout: int = 20
152
+ ) -> None:
153
+ """
154
+ Waits until timeout for num_forks and num_owners to exist on the rank. Used
155
+ to ensure proper deletion of RRefs in tests.
156
+ """
157
+ start = time.time()
158
+ while True:
159
+ num_owners_on_rank, num_forks_on_rank = rpc.rpc_sync(
160
+ worker_name(rank), get_num_owners_and_forks, args=(), timeout=5
161
+ )
162
+ num_owners_on_rank = int(num_owners_on_rank)
163
+ num_forks_on_rank = int(num_forks_on_rank)
164
+ if num_owners_on_rank == num_owners and num_forks_on_rank == num_forks:
165
+ return
166
+ time.sleep(1)
167
+ if time.time() - start > timeout:
168
+ raise ValueError(
169
+ f"Timed out waiting {timeout} sec for {num_owners} owners and {num_forks} forks on rank,"
170
+ f" had {num_owners_on_rank} owners and {num_forks_on_rank} forks"
171
+ )
172
+
173
+
174
+ def initialize_pg(init_method, rank: int, world_size: int) -> None:
175
+ # This is for tests using `dist.barrier`.
176
+ if not dist.is_initialized():
177
+ dist.init_process_group(
178
+ backend="gloo",
179
+ init_method=init_method,
180
+ rank=rank,
181
+ world_size=world_size,
182
+ )
183
+
184
+
185
+ def worker_name(rank: int) -> str:
186
+ return f"worker{rank}"
187
+
188
+
189
+ def get_function_event(function_events, partial_event_name):
190
+ """
191
+ Returns the first event that matches partial_event_name in the provided
192
+ function_events. These function_events should be the output of
193
+ torch.autograd.profiler.function_events().
194
+
195
+ Args:
196
+ function_events: function_events returned by the profiler.
197
+ event_name (str): partial key that the event was profiled with.
198
+ """
199
+ event = [event for event in function_events if partial_event_name in event.name][0] # noqa: RUF015
200
+ return event
lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import sys
4
+ from functools import wraps, partial
5
+
6
+ import torch
7
+ import torch.distributed as dist
8
+ from torch.distributed import rpc
9
+ from torch.testing._internal.common_distributed import (
10
+ MultiProcessTestCase,
11
+ TEST_SKIPS,
12
+ tp_transports,
13
+ )
14
+
15
+ TEST_GPU_NUM = 4
16
+
17
+ class ShardedTensorTestBase(MultiProcessTestCase):
18
+ @property
19
+ def world_size(self):
20
+ return TEST_GPU_NUM
21
+
22
+ def init_pg(self, backend="nccl"):
23
+ if backend not in ["nccl", "gloo", "mpi"]:
24
+ raise RuntimeError(f"Backend {backend} not supported!")
25
+
26
+ dist.init_process_group(
27
+ backend=backend,
28
+ world_size=self.world_size,
29
+ rank=self.rank,
30
+ init_method=f"file://{self.file_name}",
31
+ )
32
+
33
+ # set device for nccl pg for collectives
34
+ if backend == "nccl":
35
+ torch.cuda.set_device(self.rank)
36
+
37
+
38
+ def init_rpc(self):
39
+ rpc_backend_options = rpc.TensorPipeRpcBackendOptions(_transports=tp_transports())
40
+ rpc_backend_options.init_method = f"file://{self.file_name}"
41
+ for rank in range(self.world_size):
42
+ rpc_backend_options.set_device_map(
43
+ f"worker{rank}", {rank: self.rank, self.rank: rank}
44
+ )
45
+
46
+ rpc.init_rpc(
47
+ name="worker%d" % self.rank,
48
+ rank=self.rank,
49
+ world_size=self.world_size,
50
+ rpc_backend_options=rpc_backend_options,
51
+ )
52
+
53
+ def init_comms(self, init_rpc=True, backend="nccl"):
54
+ if init_rpc:
55
+ self.init_rpc()
56
+ self.init_pg(backend=backend)
57
+
58
+ def destroy_comms(self, destroy_rpc=True):
59
+ # Wait for all ranks to reach here before starting shutdown.
60
+ dist.barrier()
61
+
62
+ if destroy_rpc:
63
+ rpc.shutdown()
64
+ dist.destroy_process_group()
65
+
66
+ def setUp(self) -> None:
67
+ super().setUp()
68
+ self._spawn_processes()
69
+
70
+ def assert_sharded_tensor_equal(self, st1, st2):
71
+ st1_local_shards = st1.local_shards()
72
+ st2_local_shards = st2.local_shards()
73
+ self.assertEqual(len(st1_local_shards), len(st2_local_shards))
74
+ for i, st1_local_shard in enumerate(st1_local_shards):
75
+ self.assertEqual(st1_local_shard.tensor, st2_local_shards[i].tensor)
76
+ self.assertEqual(st1_local_shard.metadata, st2_local_shards[i].metadata)
77
+
78
+ self.assertEqual(st1.metadata(), st2.metadata())
79
+ self.assertEqual(st1.sharding_spec(), st2.sharding_spec())
80
+ self.assertEqual(len(st1.remote_shards()), len(st2.remote_shards()))
81
+
82
+ # wrapper to initialize comms (processgroup + rpc)
83
+ def with_comms(func=None, init_rpc=True, backend="nccl"):
84
+ if func is None:
85
+ return partial(
86
+ with_comms,
87
+ init_rpc=init_rpc,
88
+ backend=backend,
89
+ )
90
+
91
+ @wraps(func)
92
+ def wrapper(self, *args, **kwargs):
93
+ if backend == "nccl" and torch.cuda.device_count() < self.world_size:
94
+ sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
95
+ self.init_comms(init_rpc=init_rpc, backend=backend)
96
+ func(self, *args, **kwargs)
97
+ self.destroy_comms(destroy_rpc=init_rpc)
98
+ return wrapper
lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import copy
4
+ import random
5
+ import torch
6
+ from torch.distributed._shard import sharded_tensor
7
+
8
+ from torch.distributed._shard.sharding_spec import (
9
+ ChunkShardingSpec,
10
+ )
11
+
12
+ PLACEMENTS = [
13
+ "rank:0/cuda:0",
14
+ "rank:1/cuda:1",
15
+ "rank:2/cuda:2",
16
+ "rank:3/cuda:3",
17
+ ]
18
+
19
+ DEFAULT_GPU_NUM = 4
20
+
21
+
22
+ def _chunk_sharding_specs_list_for_test(sharding_dims, seed=0):
23
+ spec_list = []
24
+ for i in range(len(sharding_dims)):
25
+ random.Random(seed + i).shuffle(PLACEMENTS)
26
+ spec_list.append(
27
+ ChunkShardingSpec(
28
+ dim=sharding_dims[i],
29
+ placements=copy.deepcopy(PLACEMENTS),
30
+ )
31
+ )
32
+ return spec_list
33
+
34
+ class MyShardedModel2(torch.nn.Module):
35
+ def __init__(
36
+ self,
37
+ spec=None,
38
+ group=None,
39
+ init_rrefs=True
40
+ ) -> None:
41
+ super().__init__()
42
+ if spec is not None:
43
+ self.sharded_tensor2 = sharded_tensor.rand(
44
+ spec, 10, 20, process_group=group, init_rrefs=init_rrefs
45
+ )
46
+ else:
47
+ self.sharded_tensor2 = None
48
+ self.random_tensor2 = torch.nn.Parameter(torch.rand(2, 2))
49
+
50
+
51
+ class MyShardedModel1(torch.nn.Module):
52
+ def __init__(
53
+ self,
54
+ spec=None,
55
+ group=None,
56
+ init_rrefs=True
57
+ ) -> None:
58
+ super().__init__()
59
+ if spec is not None:
60
+ self.sharded_tensor1 = sharded_tensor.rand(
61
+ spec, 10, 20, process_group=group, init_rrefs=init_rrefs
62
+ )
63
+ else:
64
+ self.sharded_tensor1 = None
65
+ self.random_tensor1 = torch.nn.Parameter(torch.rand(2, 2))
66
+ self.submodule = MyShardedModel2(spec, group, init_rrefs)
lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py ADDED
The diff for this file is too large to render. See raw diff
 
lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_utils.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ from contextlib import contextmanager
4
+ from datetime import timedelta
5
+ from functools import (
6
+ partial,
7
+ wraps,
8
+ )
9
+
10
+ import torch.distributed as dist
11
+ import torch.distributed.distributed_c10d as c10d
12
+
13
+ class MockProcessGroup(dist.ProcessGroup):
14
+
15
+ def __init__(self, rank, world):
16
+ super().__init__(rank, world)
17
+
18
+ def getBackendName(self):
19
+ return "mock_process_group"
20
+
21
+ def create_mock_pg(prefix_store, rank, world_size, timeout):
22
+ return MockProcessGroup(rank, world_size)
23
+
24
+ dist.Backend.register_backend('mock_process_group', create_mock_pg)
25
+
26
+ def mock_init_dist(rank, world_size):
27
+ # !!! WARNING !!!
28
+ # Kids don't try this at home, this is a cute pile of hacks that
29
+ # depends on a small mountain of c10d internals
30
+ assert not dist.is_initialized()
31
+ store = dist.HashStore()
32
+ # Trick _store_based_barrier into believing everyone else already checked-in
33
+ # Zero is the group index
34
+ store.add(f"{c10d.STORE_BASED_BARRIER_PREFIX}:0", world_size - 1)
35
+ dist.init_process_group(
36
+ backend="mock_process_group",
37
+ rank=rank,
38
+ world_size=world_size,
39
+ store=store,
40
+ group_name="fake",
41
+ timeout=timedelta(seconds=1))
42
+
43
+ @contextmanager
44
+ def with_dist(rank=0, world_size=2):
45
+ """
46
+ Context manager that initializer c10d with a fake process group.
47
+ """
48
+ mock_init_dist(rank=rank, world_size=world_size)
49
+ try:
50
+ yield
51
+ finally:
52
+ dist.destroy_process_group()
53
+
54
+ def with_fake_comms(func=None, rank=0, world_size=2):
55
+ """
56
+ Function wrapper that inits a fake process group designed for testing.
57
+ Right now only querying for world size is available
58
+ """
59
+ if func is None:
60
+ return partial(with_fake_comms, rank=rank, world_size=world_size)
61
+
62
+ @wraps(func)
63
+ def wrapper(self, *args, **kwargs):
64
+ with with_dist(rank, world_size):
65
+ func(self, *args, **kwargs)
66
+ return wrapper
lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/__init__.py ADDED
File without changes
lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_autograd_test.py ADDED
The diff for this file is too large to render. See raw diff
 
lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/dist_optimizer_test.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+
4
+ import threading
5
+
6
+ import torch
7
+ import torch.distributed.autograd as dist_autograd
8
+ import torch.distributed.rpc as rpc
9
+ from torch import optim
10
+ from torch.distributed.optim import DistributedOptimizer
11
+ from torch.testing._internal.dist_utils import dist_init
12
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
13
+ RpcAgentTestFixture,
14
+ )
15
+
16
+
17
+ class MyModule:
18
+ lock = threading.Lock()
19
+
20
+ def __init__(self, requires_grad=True):
21
+ # cannot directly use torch.manual_seed(0) as all threads share the same
22
+ # default generator. The race from multiple RPC threads could mess up
23
+ # the draw order from the default RNG instance, leading to
24
+ # non-deterministic behavior. Hence, create a dedicated RNG here.
25
+ g_cpu = torch.Generator()
26
+ g_cpu.manual_seed(0)
27
+ self.w = torch.rand((3, 3), requires_grad=requires_grad, generator=g_cpu)
28
+
29
+ def forward(self, t1):
30
+ return torch.mm(self.w, t1)
31
+
32
+ def get_w(self):
33
+ return self.w
34
+
35
+
36
+ class FailingOptimizer(optim.Optimizer):
37
+ def __init__(self, params):
38
+ super().__init__(params, {})
39
+
40
+ def step(self, closure=None):
41
+ raise ValueError("Error running optimizer.")
42
+
43
+
44
+ class OptimizerFailingOnConstructor(optim.Optimizer):
45
+ def __init__(self, params):
46
+ super().__init__(params, {})
47
+ raise ValueError("Error creating optimizer.")
48
+
49
+ def step(self, closure=None):
50
+ raise NotImplementedError
51
+
52
+
53
+ def _call_method(method, obj_rref, *args, **kwargs):
54
+ return method(obj_rref.local_value(), *args, **kwargs)
55
+
56
+
57
+ def remote_method(method, obj_rref, *args, **kwargs):
58
+ """
59
+ Call rpc.remote on a method in a remote object.
60
+
61
+ Args:
62
+ method: the method (for example, Class.method)
63
+ obj_rref (RRef): remote reference to the object
64
+ args: positional arguments to pass to the method
65
+ kwargs: keyword arguments to pass to the method
66
+
67
+ Returns a RRef to the remote method call result.
68
+ """
69
+ return rpc.remote(
70
+ obj_rref.owner(),
71
+ _call_method,
72
+ args=[method, obj_rref] + list(args),
73
+ kwargs=kwargs,
74
+ )
75
+
76
+
77
+ def rpc_async_method(method, obj_rref, *args, **kwargs):
78
+ """
79
+ Call rpc.rpc_async on a method in a remote object.
80
+
81
+ Args:
82
+ method: the method (for example, Class.method)
83
+ obj_rref (RRef): remote reference to the object
84
+ args: positional arguments to pass to the method
85
+ kwargs: keyword arguments to pass to the method
86
+
87
+ Returns a Future to the method call result.
88
+ """
89
+ return rpc.rpc_async(
90
+ obj_rref.owner(),
91
+ _call_method,
92
+ args=[method, obj_rref] + list(args),
93
+ kwargs=kwargs,
94
+ )
95
+
96
+
97
+ class DistOptimizerTest(RpcAgentTestFixture):
98
+ @dist_init()
99
+ def test_dist_optim_exception(self):
100
+ # distributed version
101
+ owner1 = "worker%d" % ((self.rank + 1) % self.world_size)
102
+ owner2 = "worker%d" % ((self.rank + 2) % self.world_size)
103
+
104
+ remote_module1 = rpc.remote(owner1, MyModule)
105
+ remote_module2 = rpc.remote(owner2, MyModule)
106
+ remote_param1 = remote_method(MyModule.get_w, remote_module1)
107
+ remote_param2 = remote_method(MyModule.get_w, remote_module2)
108
+
109
+ dist_optim = DistributedOptimizer(
110
+ FailingOptimizer, [remote_param1, remote_param2]
111
+ )
112
+
113
+ with dist_autograd.context() as context_id:
114
+ g_cpu = torch.Generator()
115
+ g_cpu.manual_seed(0)
116
+ t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
117
+ t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
118
+ output1 = rpc_async_method(MyModule.forward, remote_module1, t2)
119
+ output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait())
120
+ loss = torch.add(output2.wait(), t1).sum()
121
+
122
+ dist_autograd.backward(context_id, [loss])
123
+ with self.assertRaisesRegex(Exception, "Error running optimizer"):
124
+ dist_optim.step(context_id)
125
+
126
+ @dist_init()
127
+ def test_dist_optim_exception_on_constructor(self):
128
+ # distributed version
129
+ owner1 = "worker%d" % ((self.rank + 1) % self.world_size)
130
+ owner2 = "worker%d" % ((self.rank + 2) % self.world_size)
131
+
132
+ remote_module1 = rpc.remote(owner1, MyModule)
133
+ remote_module2 = rpc.remote(owner2, MyModule)
134
+ remote_param1 = remote_method(MyModule.get_w, remote_module1)
135
+ remote_param2 = remote_method(MyModule.get_w, remote_module2)
136
+
137
+ with self.assertRaisesRegex(Exception, "Error creating optimizer."):
138
+ DistributedOptimizer(
139
+ OptimizerFailingOnConstructor, [remote_param1, remote_param2]
140
+ )
141
+
142
+ def _test_dist_optim_base(self, optim_cls, *args, **kwargs):
143
+ # local version
144
+ module1 = MyModule()
145
+ module2 = MyModule()
146
+ params = [module1.get_w(), module2.get_w()]
147
+ local_optim = optim_cls(params, *args, **kwargs)
148
+
149
+ old_w1 = module1.w.detach().clone()
150
+ old_w2 = module2.w.detach().clone()
151
+
152
+ g_cpu = torch.Generator()
153
+ g_cpu.manual_seed(0)
154
+ t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
155
+ t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
156
+ output1 = module1.forward(t2)
157
+ output2 = module2.forward(output1)
158
+ loss = torch.add(output2, t1).sum()
159
+
160
+ loss.backward()
161
+ local_optim.step()
162
+
163
+ # distributed version
164
+ owner1 = "worker%d" % ((self.rank + 1) % self.world_size)
165
+ owner2 = "worker%d" % ((self.rank + 2) % self.world_size)
166
+
167
+ remote_module1 = rpc.remote(owner1, MyModule)
168
+ remote_module2 = rpc.remote(owner2, MyModule)
169
+ remote_param1 = remote_method(MyModule.get_w, remote_module1)
170
+ remote_param2 = remote_method(MyModule.get_w, remote_module2)
171
+
172
+ # sanity check: local and remote initial weights should match
173
+ self.assertEqual(old_w1, remote_param1.to_here())
174
+ self.assertEqual(old_w2, remote_param2.to_here())
175
+
176
+ dist_optim = DistributedOptimizer(
177
+ optim_cls, [remote_param1, remote_param2], *args, **kwargs
178
+ )
179
+
180
+ with dist_autograd.context() as context_id:
181
+ g_cpu.manual_seed(0)
182
+ t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
183
+ t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
184
+ output1 = rpc_async_method(MyModule.forward, remote_module1, t2)
185
+ output2 = rpc_async_method(MyModule.forward, remote_module2, output1.wait())
186
+ loss = torch.add(output2.wait(), t1)
187
+
188
+ dist_autograd.backward(context_id, [loss.sum()])
189
+ dist_optim.step(context_id)
190
+
191
+ new_w1 = rpc_async_method(MyModule.get_w, remote_module1).wait()
192
+ new_w2 = rpc_async_method(MyModule.get_w, remote_module2).wait()
193
+
194
+ # ensure optimizer changed weights
195
+ self.assertNotEqual(old_w1, new_w1)
196
+ self.assertNotEqual(old_w2, new_w2)
197
+ # ensure local equals remote
198
+ self.assertEqual(new_w1, module1.get_w())
199
+ self.assertEqual(new_w2, module2.get_w())
200
+
201
+ @dist_init()
202
+ def test_dist_optim(self):
203
+ self._test_dist_optim_base(optim.Adagrad, lr=0.05)
204
+ self._test_dist_optim_base(optim.Adam, lr=1e-2, amsgrad=True)
205
+ self._test_dist_optim_base(optim.AdamW, lr=0.05, amsgrad=True)
206
+ self._test_dist_optim_base(optim.SGD, lr=0.05)
207
+ self._test_dist_optim_base(optim.SGD, lr=1e-3, momentum=1, weight_decay=1, nesterov=True)
208
+ self._test_dist_optim_base(optim.Adadelta, rho=0.95)
209
+ self._test_dist_optim_base(optim.RMSprop, lr=0.05)
210
+ self._test_dist_optim_base(optim.Adamax, lr=0.05)
211
+ self._test_dist_optim_base(optim.Rprop, lr=0.05)
212
+
213
+ def _test_dist_optim_none_grads(self, optim_cls, *args, **kwargs):
214
+ # local version
215
+ module1 = MyModule()
216
+ module2 = MyModule(requires_grad=False)
217
+ params = [module1.get_w(), module2.get_w()]
218
+ local_optim = optim_cls(params, *args, **kwargs)
219
+
220
+ old_w1 = module1.w.detach().clone()
221
+ old_w2 = module2.w.detach().clone()
222
+
223
+ g_cpu = torch.Generator()
224
+ g_cpu.manual_seed(0)
225
+ t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
226
+ t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
227
+ output1 = module1.forward(t2)
228
+ output2 = module2.forward(output1)
229
+ loss = torch.add(output2, t1).sum()
230
+
231
+ loss.backward()
232
+ local_optim.step()
233
+
234
+ # distributed version
235
+ owner1 = "worker%d" % ((self.rank + 1) % self.world_size)
236
+ owner2 = "worker%d" % ((self.rank + 2) % self.world_size)
237
+
238
+ remote_module1 = rpc.remote(owner1, MyModule)
239
+ remote_module2 = rpc.remote(owner2, MyModule, args=(False,))
240
+ remote_param1 = remote_module1.remote().get_w()
241
+ remote_param2 = remote_module2.remote().get_w()
242
+
243
+ # sanity check: local and remote initial weights should match
244
+ self.assertEqual(old_w1, remote_param1.to_here())
245
+ self.assertEqual(old_w2, remote_param2.to_here())
246
+
247
+ dist_optim = DistributedOptimizer(
248
+ optim_cls, [remote_param1, remote_param2], *args, **kwargs
249
+ )
250
+
251
+ with dist_autograd.context() as context_id:
252
+ g_cpu.manual_seed(0)
253
+ t1 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
254
+ t2 = torch.rand((3, 3), requires_grad=True, generator=g_cpu)
255
+ output1 = remote_module1.rpc_async().forward(t2)
256
+ output2 = remote_module2.rpc_async().forward(output1.wait())
257
+ loss = torch.add(output2.wait(), t1)
258
+
259
+ dist_autograd.backward(context_id, [loss.sum()])
260
+ dist_optim.step(context_id)
261
+
262
+ new_w1 = remote_module1.rpc_async().get_w().wait()
263
+ new_w2 = remote_module2.rpc_async().get_w().wait()
264
+
265
+ # ensure optimizer changed weights for w1
266
+ self.assertNotEqual(old_w1, new_w1)
267
+
268
+ # ensure optimizer not changed weights for w2
269
+ self.assertEqual(old_w2, new_w2)
270
+ # ensure local equals remote
271
+ self.assertEqual(new_w1, module1.get_w())
272
+ self.assertEqual(new_w2, module2.get_w())
273
+
274
+ @dist_init()
275
+ def test_dist_optim_none_grads(self):
276
+ self._test_dist_optim_none_grads(optim.SGD, lr=0.05)
277
+ self._test_dist_optim_none_grads(optim.RMSprop, lr=0.05)
278
+ self._test_dist_optim_none_grads(optim.Rprop, lr=0.05)
279
+ self._test_dist_optim_none_grads(optim.Adadelta, rho=0.95)
lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_agent_rpc_test.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import torch
4
+ import time
5
+ import torch.distributed.rpc as rpc
6
+ from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs
7
+ from torch.testing._internal.dist_utils import (
8
+ dist_init,
9
+ wait_until_pending_futures_and_users_flushed,
10
+ wait_until_owners_and_forks_on_rank,
11
+ worker_name,
12
+ )
13
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
14
+ RpcAgentTestFixture,
15
+ )
16
+
17
+ def my_sleep_func(seconds=1):
18
+ time.sleep(seconds)
19
+ return torch.mul(torch.tensor(1), torch.tensor(1))
20
+
21
+ @torch.jit.script
22
+ def my_script_func(tensor):
23
+ return torch.add(tensor, tensor)
24
+
25
+ def add_rref_to_value(rref, value):
26
+ return rref.to_here() + value
27
+
28
+ class FaultyAgentRpcTest(RpcAgentTestFixture):
29
+
30
+ # no faulty_messages defined so this fails all retryable messages - see
31
+ # faulty_rpc_agent_test_fixture.py for the list of retryable messages.
32
+ @dist_init(messages_to_delay={})
33
+ def test_check_failed_messages(self):
34
+ if self.rank == 0:
35
+ dst_worker_b = worker_name((self.rank + 1) % self.world_size)
36
+ dst_worker_c = worker_name((self.rank + 2) % self.world_size)
37
+
38
+ # Worker0 sends RPC to Worker1 and creates an RRef there
39
+ rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
40
+ # Worker0 sends an RPC to Worker2 with the RRef as an arg
41
+ rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
42
+ # check if the output is as expected
43
+ self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
44
+ # explicitly delete all User RRefs
45
+ _delete_all_user_and_unforked_owner_rrefs()
46
+
47
+ @dist_init
48
+ def test_verify_backend_options(self):
49
+ self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
50
+ self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
51
+ self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
52
+ self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
53
+ self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
54
+ self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
55
+
56
+ @dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
57
+ def test_custom_faulty_messages(self):
58
+ self.assertEqual(
59
+ {"RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"},
60
+ set(self.rpc_backend_options.messages_to_fail),
61
+ )
62
+
63
+ @dist_init(faulty_messages=[])
64
+ def test_no_faulty_messages(self):
65
+ self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
66
+
67
+ @dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
68
+ def test_custom_messages_to_delay(self):
69
+ self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
70
+
71
+ def _test_remote_message_dropped_pickle(self, dst=None):
72
+ if self.rank != 0:
73
+ return
74
+ dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
75
+ dst_worker = f"worker{dst_rank}"
76
+ # Since we fail python_remote_call messages synchronously, the future
77
+ # corresponding to this remote call will be marked with an error when
78
+ # this function returns.
79
+ rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
80
+ # Call to ensure pending callbacks are run.
81
+ wait_until_pending_futures_and_users_flushed()
82
+ # Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
83
+ with self.assertRaisesRegex(RuntimeError, "RRef creation"):
84
+ rref._serialize()
85
+ # Test that using RRef as arg over RPC (which forks) results in the same
86
+ # error
87
+ with self.assertRaisesRegex(RuntimeError, "RRef creation"):
88
+ rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
89
+
90
+ @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
91
+ def test_remote_message_dropped_pickle(self):
92
+ self._test_remote_message_dropped_pickle()
93
+
94
+ @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
95
+ def test_remote_message_dropped_pickle_to_self(self):
96
+ self._test_remote_message_dropped_pickle(self.rank)
97
+
98
+
99
+ def _test_remote_message_dropped_timeout(self, func, args, dst=None):
100
+ if self.rank != 0:
101
+ return
102
+
103
+ # test the case where rpc.remote() message creation is completely dropped.
104
+ dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
105
+ dst_worker = f"worker{dst_rank}"
106
+ # Since we fail python_remote_call messages synchronously, the future
107
+ # corresponding to this remote call will be marked with an error when
108
+ # this function returns.
109
+ rref = rpc.remote(dst_worker, func, args=args)
110
+ # Call to ensure pending callbacks are run.
111
+ wait_until_pending_futures_and_users_flushed()
112
+ with self.assertRaisesRegex(RuntimeError, "RRef creation"):
113
+ rref.to_here()
114
+ # Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
115
+ # on the owning nodes, this is expected because the OwnerRRef was never
116
+ # successfully created. Therefore, delAllUsers will work as expected.
117
+
118
+ @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
119
+ def test_builtin_remote_message_dropped_timeout(self):
120
+ func = torch.add
121
+ args = (torch.tensor(1), torch.tensor(1))
122
+ self._test_remote_message_dropped_timeout(func, args)
123
+
124
+ @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
125
+ def test_builtin_remote_message_dropped_timeout_to_self(self):
126
+ func = torch.add
127
+ args = (torch.tensor(1), torch.tensor(1))
128
+ self._test_remote_message_dropped_timeout(func, args, dst=0)
129
+
130
+ @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
131
+ def test_udf_remote_message_dropped_timeout(self):
132
+ func = my_sleep_func
133
+ args = (2,)
134
+ self._test_remote_message_dropped_timeout(func, args)
135
+
136
+ @dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
137
+ def test_udf_remote_message_dropped_timeout_to_self(self):
138
+ func = my_sleep_func
139
+ args = (2,)
140
+ self._test_remote_message_dropped_timeout(func, args, dst=0)
141
+
142
+ def _test_remote_message_delay_timeout(self, func, args, dst=None):
143
+ if self.rank != 0:
144
+ return
145
+ # Test the case where remote message is eventually processed on the owner,
146
+ # but the future on the creator times out before the response comes back.
147
+ dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
148
+ dst_worker = f"worker{dst_rank}"
149
+ # 10 ms timeout
150
+ rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
151
+ # Future corresponding to the remote creation should time out.
152
+ expected_error = self.get_timeout_error_regex()
153
+ with self.assertRaisesRegex(RuntimeError, expected_error):
154
+ rref._get_future().wait()
155
+
156
+ # Call to ensure pending callbacks are run.
157
+ wait_until_pending_futures_and_users_flushed()
158
+ # to_here() should now pick up that rpc.remote() creation has failed.
159
+ with self.assertRaisesRegex(RuntimeError, "RRef creation"):
160
+ rref.to_here()
161
+
162
+ # Test the case where rpc.remote() times out, but to_here() has already
163
+ # started blocking before.
164
+ # NOTE: we only test this when not sending to self, as to_here() calls
165
+ # calls localValue(), which does not send an RPC and thus does not have
166
+ # a timeout. This can be supported by allowing future.wait() to
167
+ # take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
168
+ if dst_rank != self.rank:
169
+ slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
170
+
171
+ with self.assertRaisesRegex(RuntimeError, expected_error):
172
+ # to_here() should raise timeout error, since it does not know about the
173
+ # status of rpc.remote().
174
+ slow_rref.to_here(0.001)
175
+ # Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
176
+ # but this can be a noop since it may not exist on the owner yet. Later,
177
+ # the owner can process the RRef creation and wait for the delete message,
178
+ # thus leading to a timeout.
179
+ # Therefore, we wait until we get notification that pending owners have
180
+ # been confirmed before sending out RRefUserDeletes.
181
+ if dst_rank != self.rank:
182
+ wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
183
+
184
+ @dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
185
+ def test_udf_remote_message_delay_timeout(self):
186
+ func = my_sleep_func
187
+ args = (2,)
188
+ self._test_remote_message_delay_timeout(func, args)
189
+
190
+ @dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
191
+ def test_udf_remote_message_delay_timeout_to_self(self):
192
+ func = my_sleep_func
193
+ args = (1,)
194
+ self._test_remote_message_delay_timeout(func, args, dst=0)
195
+
196
+ @dist_init(
197
+ faulty_messages=[],
198
+ messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
199
+ )
200
+ def test_remote_message_builtin_delay_timeout(self):
201
+ func = torch.add
202
+ args = (torch.tensor(1), torch.tensor(1))
203
+ self._test_remote_message_delay_timeout(func, args)
204
+
205
+ @dist_init(
206
+ faulty_messages=[],
207
+ messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
208
+ )
209
+ def test_remote_message_builtin_delay_timeout_to_self(self):
210
+ func = torch.add
211
+ args = (torch.tensor(1), torch.tensor(1))
212
+ self._test_remote_message_delay_timeout(func, args, dst=0)
213
+
214
+ @dist_init(
215
+ faulty_messages=[],
216
+ messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
217
+ )
218
+ def test_remote_message_script_delay_timeout(self):
219
+ func = my_script_func
220
+ args = (torch.tensor(1),)
221
+ self._test_remote_message_delay_timeout(func, args)
222
+
223
+ @dist_init(
224
+ faulty_messages=[],
225
+ messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
226
+ )
227
+ def test_remote_message_script_delay_timeout_to_self(self):
228
+ func = my_script_func
229
+ args = (torch.tensor(1),)
230
+ self._test_remote_message_delay_timeout(func, args, dst=0)
231
+
232
+ @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
233
+ def test_rref_to_here_timeout(self):
234
+ if self.rank != 0:
235
+ return
236
+
237
+ dst_rank = (self.rank + 1) % self.world_size
238
+ dst_worker = f"worker{dst_rank}"
239
+ rref = rpc.remote(
240
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
241
+ )
242
+ expected_error = self.get_timeout_error_regex()
243
+ with self.assertRaisesRegex(RuntimeError, expected_error):
244
+ rref.to_here(0.01)
245
+
246
+ rref.to_here()
247
+
248
+ @dist_init(faulty_messages=[])
249
+ def test_rpc_builtin_timeout(self):
250
+ next_rank = (self.rank + 1) % self.world_size
251
+ dst_worker = worker_name(next_rank)
252
+ expected_error = self.get_timeout_error_regex()
253
+ # PYTHON_CALL message types which correspond to Python UDF over RPC
254
+ # by default get a delay (see faulty_rpc_agent_test_fixture)
255
+ with self.assertRaisesRegex(RuntimeError, expected_error):
256
+ rpc.rpc_sync(
257
+ dst_worker,
258
+ torch.add,
259
+ args=(torch.tensor(1), torch.tensor(1)),
260
+ timeout=1,
261
+ )
262
+
263
+ fut = rpc.rpc_async(
264
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
265
+ )
266
+ with self.assertRaisesRegex(RuntimeError, expected_error):
267
+ fut.wait()
268
+
269
+ # Ensure that the currently set default timeout is large enough such
270
+ # that RPCs with delays still complete.
271
+ fut = rpc.rpc_async(
272
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
273
+ )
274
+ fut.wait()
275
+
276
+ # Ensure timeout if we set a new default and don't override
277
+ rpc._set_rpc_timeout(0.001)
278
+ fut = rpc.rpc_async(
279
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
280
+ )
281
+ with self.assertRaisesRegex(RuntimeError, expected_error):
282
+ fut.wait()
283
+
284
+ # Ensure run to completion if we specify timeout of 0
285
+ fut = rpc.rpc_async(
286
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
287
+ )
288
+ fut.wait()
289
+ # Reset for clean shutdown
290
+ rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
291
+
292
+ @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
293
+ def test_rpc_script_timeout(self):
294
+ next_rank = (self.rank + 1) % self.world_size
295
+ dst_worker = worker_name(next_rank)
296
+ expected_error = self.get_timeout_error_regex()
297
+ with self.assertRaisesRegex(RuntimeError, expected_error):
298
+ rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
299
+
300
+ fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
301
+ with self.assertRaisesRegex(RuntimeError, expected_error):
302
+ fut.wait()
303
+
304
+ # Ensure that the currently set default timeout is large enough such
305
+ # that RPCs with delays still complete.
306
+ fut = rpc.rpc_async(
307
+ dst_worker, my_script_func, args=(torch.tensor(1),)
308
+ )
309
+ fut.wait()
310
+
311
+ # Ensure timeout if we set a new default and don't override
312
+ rpc._set_rpc_timeout(0.001)
313
+ fut = rpc.rpc_async(
314
+ dst_worker, my_script_func, args=(torch.tensor(1),)
315
+ )
316
+ with self.assertRaisesRegex(RuntimeError, expected_error):
317
+ fut.wait()
318
+
319
+ # Ensure run to completion if we specify timeout of 0
320
+ rpc._set_rpc_timeout(0.001)
321
+ fut = rpc.rpc_async(
322
+ dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
323
+ )
324
+ fut.wait()
325
+ # Reset for clean shutdown
326
+ rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/faulty_rpc_agent_test_fixture.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import torch.distributed.rpc as rpc
4
+ import torch.distributed.rpc._testing # noqa: F401
5
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
6
+ RpcAgentTestFixture,
7
+ )
8
+
9
+ # The following message types are currently retried in the RREF protocol and
10
+ # distributed autograd. Thus only these messages should be tested with the
11
+ # Faulty RPC Agent.
12
+ retryable_message_types = ["RREF_FORK_REQUEST",
13
+ "RREF_CHILD_ACCEPT",
14
+ "RREF_USER_DELETE",
15
+ "CLEANUP_AUTOGRAD_CONTEXT_REQ"]
16
+
17
+ # The following messages incur the corresponding delay in seconds while being
18
+ # processed in FaultyTensorPipeAgent's enqueueSend() function.
19
+ default_messages_to_delay = {
20
+ "PYTHON_CALL": 1.5, # Python UDF
21
+ "SCRIPT_CALL": 1.5, # Script/Builtin
22
+ }
23
+
24
+ class FaultyRpcAgentTestFixture(RpcAgentTestFixture):
25
+ def __init__(self, *args, **kwargs):
26
+ super().__init__(*args, **kwargs)
27
+ self.messages_to_fail = retryable_message_types
28
+ self.messages_to_delay = default_messages_to_delay
29
+
30
+ @property
31
+ def rpc_backend(self):
32
+ return rpc.backend_registry.BackendType[
33
+ "FAULTY_TENSORPIPE"
34
+ ]
35
+
36
+ @property
37
+ def rpc_backend_options(self):
38
+ return rpc.backend_registry.construct_rpc_backend_options(
39
+ self.rpc_backend,
40
+ init_method=self.init_method,
41
+ num_worker_threads=8,
42
+ num_fail_sends=3,
43
+ messages_to_fail=self.messages_to_fail,
44
+ messages_to_delay=self.messages_to_delay,
45
+ )
46
+
47
+ def setup_fault_injection(self, faulty_messages, messages_to_delay):
48
+ if faulty_messages is not None:
49
+ self.messages_to_fail = faulty_messages
50
+ if messages_to_delay is not None:
51
+ self.messages_to_delay = messages_to_delay
52
+
53
+ def get_shutdown_error_regex(self):
54
+ error_regexes = [
55
+ "Exception in thread pool task",
56
+ "Connection reset by peer",
57
+ "Connection closed by peer"
58
+ ]
59
+ return "|".join([f"({error_str})" for error_str in error_regexes])
60
+
61
+ def get_timeout_error_regex(self):
62
+ return "RPC ran for more than"
lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_agent_test_fixture.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import os
4
+ from abc import ABC, abstractmethod
5
+
6
+ import torch.testing._internal.dist_utils
7
+
8
+
9
+ class RpcAgentTestFixture(ABC):
10
+ @property
11
+ def world_size(self) -> int:
12
+ return 4
13
+
14
+ @property
15
+ def init_method(self):
16
+ use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None)
17
+ if use_tcp_init == "1":
18
+ master_addr = os.environ["MASTER_ADDR"]
19
+ master_port = os.environ["MASTER_PORT"]
20
+ return f"tcp://{master_addr}:{master_port}"
21
+ else:
22
+ return self.file_init_method
23
+
24
+ @property
25
+ def file_init_method(self):
26
+ return torch.testing._internal.dist_utils.INIT_METHOD_TEMPLATE.format(
27
+ file_name=self.file_name
28
+ )
29
+
30
+ @property
31
+ @abstractmethod
32
+ def rpc_backend(self):
33
+ pass
34
+
35
+ @property
36
+ @abstractmethod
37
+ def rpc_backend_options(self):
38
+ pass
39
+
40
+ def setup_fault_injection(self, faulty_messages, messages_to_delay): # noqa: B027
41
+ """Method used by dist_init to prepare the faulty agent.
42
+
43
+ Does nothing for other agents.
44
+ """
45
+
46
+ # Shutdown sequence is not well defined, so we may see any of the following
47
+ # errors when running tests that simulate errors via a shutdown on the
48
+ # remote end.
49
+ @abstractmethod
50
+ def get_shutdown_error_regex(self):
51
+ """
52
+ Return various error message we may see from RPC agents while running
53
+ tests that check for failures. This function is used to match against
54
+ possible errors to ensure failures were raised properly.
55
+ """
56
+
57
+ @abstractmethod
58
+ def get_timeout_error_regex(self):
59
+ """
60
+ Returns a partial string indicating the error we should receive when an
61
+ RPC has timed out. Useful for use with assertRaisesRegex() to ensure we
62
+ have the right errors during timeout.
63
+ """
lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/rpc_test.py ADDED
The diff for this file is too large to render. See raw diff
 
lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/tensorpipe_rpc_agent_test_fixture.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import torch.distributed.rpc as rpc
4
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
5
+ RpcAgentTestFixture,
6
+ )
7
+ from torch.testing._internal.common_distributed import (
8
+ tp_transports,
9
+ )
10
+
11
+
12
+ class TensorPipeRpcAgentTestFixture(RpcAgentTestFixture):
13
+ @property
14
+ def rpc_backend(self):
15
+ return rpc.backend_registry.BackendType[
16
+ "TENSORPIPE"
17
+ ]
18
+
19
+ @property
20
+ def rpc_backend_options(self):
21
+ return rpc.backend_registry.construct_rpc_backend_options(
22
+ self.rpc_backend,
23
+ init_method=self.init_method,
24
+ _transports=tp_transports()
25
+ )
26
+
27
+ def get_shutdown_error_regex(self):
28
+ # FIXME Once we consolidate the error messages returned by the
29
+ # TensorPipe agent put some more specific regex here.
30
+ error_regexes = [".*"]
31
+ return "|".join([f"({error_str})" for error_str in error_regexes])
32
+
33
+ def get_timeout_error_regex(self):
34
+ return "RPC ran for more than"
lib/python3.10/site-packages/torch/testing/_internal/dynamo_test_failures.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import logging
3
+ import os
4
+ import sys
5
+
6
+
7
+ # NOTE: [dynamo_test_failures.py]
8
+ #
9
+ # We generate xFailIfTorchDynamo* for all tests in `dynamo_expected_failures`
10
+ # We generate skipIfTorchDynamo* for all tests in `dynamo_skips`
11
+ #
12
+ # For an easier-than-manual way of generating and updating these lists,
13
+ # see scripts/compile_tests/update_failures.py
14
+ #
15
+ # If you're adding a new test, and it's failing PYTORCH_TEST_WITH_DYNAMO=1,
16
+ # either add the appropriate decorators to your test or add skips for them
17
+ # via test/dynamo_skips and test/dynamo_expected_failures.
18
+ #
19
+ # *These are not exactly unittest.expectedFailure and unittest.skip. We'll
20
+ # always execute the test and then suppress the signal, if necessary.
21
+ # If your tests crashes, or is slow, please use @skipIfTorchDynamo instead.
22
+ #
23
+ # The expected failure and skip files are located in test/dynamo_skips and
24
+ # test/dynamo_expected_failures. They're individual files rather than a list so
25
+ # git will merge changes easier.
26
+
27
+
28
+ def find_test_dir():
29
+ # Find the path to the dynamo expected failure and skip files.
30
+ from os.path import abspath, basename, dirname, exists, join, normpath
31
+
32
+ if sys.platform == "win32":
33
+ return None
34
+
35
+ # Check relative to this file (local build):
36
+ test_dir = normpath(join(dirname(abspath(__file__)), "../../../test"))
37
+ if exists(join(test_dir, "dynamo_expected_failures")):
38
+ return test_dir
39
+
40
+ # Check relative to __main__ (installed builds relative to test file):
41
+ main = sys.modules["__main__"]
42
+ file = getattr(main, "__file__", None)
43
+ if file is None:
44
+ # Generated files do not have a module.__file__
45
+ return None
46
+ test_dir = dirname(abspath(file))
47
+ while dirname(test_dir) != test_dir:
48
+ if basename(test_dir) == "test" and exists(
49
+ join(test_dir, "dynamo_expected_failures")
50
+ ):
51
+ return test_dir
52
+ test_dir = dirname(test_dir)
53
+
54
+ # Not found
55
+ return None
56
+
57
+
58
+ test_dir = find_test_dir()
59
+ if not test_dir:
60
+ logger = logging.getLogger(__name__)
61
+ logger.warning(
62
+ "test/dynamo_expected_failures directory not found - known dynamo errors won't be skipped."
63
+ )
64
+
65
+ # Tests that run without strict mode in PYTORCH_TEST_WITH_INDUCTOR=1.
66
+ # Please don't add anything to this list.
67
+ FIXME_inductor_non_strict = {
68
+ "test_modules",
69
+ "test_ops",
70
+ "test_ops_gradients",
71
+ "test_torch",
72
+ }
73
+
74
+ # Tests that run without resetting dynamo in PYTORCH_TEST_WITH_INDUCTOR=1.
75
+ # Please don't add anything to this list.
76
+ #
77
+ # Instead we will gradually remove items from this list. Once the list is empty,
78
+ # we will remove the list.
79
+ FIXME_inductor_dont_reset_dynamo = {
80
+ "test_modules",
81
+ "test_ops",
82
+ "test_ops_gradients",
83
+ }
84
+
85
+ # We generate unittest.expectedFailure for all of the following tests
86
+ # when run under PYTORCH_TEST_WITH_DYNAMO=1.
87
+ # see NOTE [dynamo_test_failures.py] for more details
88
+ #
89
+ # This lists exists so we can more easily add large numbers of failing tests,
90
+ if test_dir is None:
91
+ dynamo_expected_failures = set()
92
+ dynamo_skips = set()
93
+ else:
94
+ failures_directory = os.path.join(test_dir, "dynamo_expected_failures")
95
+ skips_directory = os.path.join(test_dir, "dynamo_skips")
96
+
97
+ dynamo_expected_failures = set(os.listdir(failures_directory))
98
+ dynamo_skips = set(os.listdir(skips_directory))
99
+
100
+ # TODO: due to case sensitivity problems, for now list these files by hand
101
+ extra_dynamo_skips = {
102
+ "TestProxyTensorOpInfoCPU.test_make_fx_exhaustive_T_cpu_float32",
103
+ "TestProxyTensorOpInfoCPU.test_make_fx_exhaustive_t_cpu_float32",
104
+ "TestProxyTensorOpInfoCPU.test_make_fx_fake_exhaustive_T_cpu_float32",
105
+ "TestProxyTensorOpInfoCPU.test_make_fx_fake_exhaustive_t_cpu_float32",
106
+ "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_T_cpu_float32",
107
+ "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_t_cpu_float32",
108
+ "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_inplace_T_cpu_float32",
109
+ "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_inplace_t_cpu_float32",
110
+ "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_out_T_cpu_float32",
111
+ "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_out_t_cpu_float32",
112
+ }
113
+ dynamo_skips = dynamo_skips.union(extra_dynamo_skips)
114
+
115
+
116
+ # verify some invariants
117
+ for test in dynamo_expected_failures.union(dynamo_skips):
118
+ if len(test.split(".")) != 2:
119
+ raise AssertionError(f'Invalid test name: "{test}"')
120
+
121
+ intersection = dynamo_expected_failures.intersection(dynamo_skips)
122
+ if len(intersection) > 0:
123
+ raise AssertionError(
124
+ "there should be no overlap between dynamo_expected_failures "
125
+ "and dynamo_skips, got " + str(intersection)
126
+ )
lib/python3.10/site-packages/torch/testing/_internal/hop_db.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import functools
4
+ import unittest
5
+
6
+ import torch
7
+ from functorch.experimental.control_flow import map
8
+ from torch.nn.attention.flex_attention import _create_empty_block_mask, flex_attention
9
+ from torch.testing import make_tensor
10
+ from torch.testing._internal.common_device_type import onlyCUDA
11
+ from torch.testing._internal.common_dtype import all_types_and, custom_types
12
+ from torch.testing._internal.opinfo.core import DecorateInfo, OpInfo, SampleInput
13
+ from torch._higher_order_ops.invoke_subgraph import mark_compile_region
14
+
15
+ def sample_inputs_map(opinfo, device, dtype, requires_grad, **kwargs):
16
+ make_arg = functools.partial(
17
+ make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
18
+ )
19
+ yield SampleInput(
20
+ [make_arg(2, 2, 2, low=0.1, high=2), make_arg(2, 2, 2, low=0.1, high=2)],
21
+ args=(make_arg(1, low=0.1, high=2), make_arg(1, low=0.1, high=2)),
22
+ )
23
+
24
+
25
+ def inner_f(x, y0, y1):
26
+ return [x[0].cos().add_(1.0) * y0, (x[1] + y1.sin()).cos_().view(x[1].size())]
27
+
28
+
29
+ def simple_map(xs, y0, y1):
30
+ def f(x, y0, y1):
31
+ return inner_f(x, y0, y1)
32
+
33
+ return map(f, xs, y0, y1)
34
+
35
+
36
+ def nested_map(xs, y0, y1):
37
+ def f1(xx, y0, y1):
38
+ def f2(x, y0, y1):
39
+ return inner_f(x, y0, y1)
40
+
41
+ return map(f2, xx, y0, y1)
42
+
43
+ return map(f1, xs, y0, y1)
44
+
45
+
46
+ def triple_nested_map(xs, y0, y1):
47
+ def f0(xs, y0, y1):
48
+ def f1(xx, y0, y1):
49
+ def f2(x, y0, y1):
50
+ return inner_f(x, y0, y1)
51
+
52
+ return map(f2, xx, y0, y1)
53
+
54
+ return map(f1, xs, y0, y1)
55
+
56
+ return map(f0, xs, y0, y1)
57
+
58
+
59
+ # Please consult with torch.export team before
60
+ # adding new entry to this list.
61
+ hop_that_doesnt_have_opinfo_test_allowlist = [
62
+ "custom_function_call",
63
+ "autograd_function_apply",
64
+ "run_and_save_rng_state",
65
+ "run_with_rng_state",
66
+ "out_dtype",
67
+ "trace_wrapped",
68
+ "map", # T183144629
69
+ "map_impl",
70
+ "with_effects",
71
+ "strict_mode",
72
+ "_export_tracepoint",
73
+ "call_torchbind",
74
+ "triton_kernel_wrapper_mutation",
75
+ "triton_kernel_wrapper_functional",
76
+ "hints_wrapper",
77
+ ]
78
+
79
+ torch.library.define(
80
+ "testlib::mutating_custom_op",
81
+ "(Tensor(a!) x, Tensor(b!) z) -> (Tensor, Tensor, Tensor)",
82
+ tags=torch.Tag.pt2_compliant_tag,
83
+ )
84
+
85
+
86
+ @torch.library.impl("testlib::mutating_custom_op", "cpu")
87
+ def foo_impl_cpu(x, z):
88
+ x.add_(5)
89
+ z.add_(5)
90
+ return x, z, x + z
91
+
92
+
93
+ @torch.library.impl("testlib::mutating_custom_op", "cuda")
94
+ def foo_impl_cuda(x, z):
95
+ x.add_(5)
96
+ z.add_(5)
97
+ return x, z, x + z
98
+
99
+
100
+ @torch.library.register_fake("testlib::mutating_custom_op")
101
+ def foo_impl_abstract(x, z):
102
+ return x, z, x + z
103
+
104
+
105
+ def sample_inputs_cond(opinfo, device, dtype, requires_grad, **kwargs):
106
+ make_arg = functools.partial(
107
+ make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
108
+ )
109
+ yield SampleInput(make_arg(2, 2, 2, low=0.1, high=2))
110
+
111
+
112
+ def simple_cond(x):
113
+ return torch.cond(x.sum() > 2, lambda x: (x.cos(),), lambda x: (x.sin(),), [x])
114
+
115
+
116
+ def sample_inputs_invoke_subgraph(opinfo, device, dtype, requires_grad, **kwargs):
117
+ make_arg = functools.partial(
118
+ make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
119
+ )
120
+ yield SampleInput(make_arg(2, 2, 2, low=0.1, high=2))
121
+
122
+
123
+ @mark_compile_region
124
+ def fn_for_invoke_subgraph(x):
125
+ return torch.sin(x)
126
+
127
+ def simple_invoke_subgraph(x):
128
+ return fn_for_invoke_subgraph(x)
129
+
130
+
131
+ def sample_inputs_auto_functionalize(opinfo, device, dtype, requires_grad, **kwargs):
132
+ make_arg = functools.partial(
133
+ make_tensor, device=device, dtype=dtype, requires_grad=False
134
+ )
135
+ yield SampleInput(
136
+ make_arg(2, 2, 2, low=0.1, high=2), make_arg(2, 2, 2, low=0.1, high=2)
137
+ )
138
+
139
+
140
+ def simple_auto_functionalize(x, z):
141
+ return torch.ops.testlib.mutating_custom_op(x, z)
142
+
143
+
144
+ def sample_inputs_flex_attention(opinfo, device, dtype, requires_grad, **kwargs):
145
+ make_arg = functools.partial(
146
+ make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
147
+ )
148
+
149
+ def score_mod(score, b, h, m, n):
150
+ return score + h
151
+
152
+ q, k, v = (make_arg(2, 2, 128, 8, low=0.1, high=2) for _ in range(3))
153
+ block_mask = _create_empty_block_mask(q, k)
154
+ yield SampleInput(q, k, v, score_mod, block_mask)
155
+
156
+
157
+ def sample_inputs_while_loop(opinfo, device, dtype, requires_grad, **kwargs):
158
+ make_arg = functools.partial(
159
+ make_tensor, device=device, dtype=dtype, requires_grad=False
160
+ )
161
+ yield SampleInput(
162
+ torch.tensor(3),
163
+ make_arg(2, 3, 4, low=0.1, high=2),
164
+ )
165
+
166
+
167
+ def simple_while_loop(iter_t, x):
168
+ def cond_fn(iter_t, x):
169
+ return iter_t > 0
170
+
171
+ def body_fn(iter_t, x):
172
+ return iter_t - 1, x.cos()
173
+
174
+ return torch._higher_order_ops.while_loop(cond_fn, body_fn, (iter_t, x))
175
+
176
+
177
+ def sample_inputs_scan(opinfo, device, dtype, requires_grad, **kwargs):
178
+ make_arg = functools.partial(
179
+ make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
180
+ )
181
+ yield SampleInput(
182
+ make_arg(2, 2, low=0.1, high=2),
183
+ make_arg(2, 2, 2, low=0.1, high=2),
184
+ )
185
+
186
+
187
+ def simple_scan(init, xs):
188
+
189
+ def combine_fn(carry, x):
190
+ result = carry @ x + x
191
+ return result, carry.clone()
192
+
193
+ return torch._higher_order_ops.scan(combine_fn, init, xs)
194
+
195
+
196
+ hop_db = [
197
+ OpInfo(
198
+ name="scan",
199
+ variant_test_name="simple",
200
+ op=simple_scan,
201
+ sample_inputs_func=sample_inputs_scan,
202
+ dtypes=all_types_and(torch.bool, torch.half),
203
+ supports_out=False,
204
+ check_batched_grad=False,
205
+ check_batched_gradgrad=False,
206
+ check_batched_forward_grad=False,
207
+ check_inplace_batched_forward_grad=False,
208
+ supports_autograd=False,
209
+ # "torch.compile with aot_autograd does not currently support double backward."
210
+ supports_gradgrad=False,
211
+ ),
212
+ OpInfo(
213
+ name="invoke_subgraph",
214
+ variant_test_name="simple",
215
+ op=simple_invoke_subgraph,
216
+ sample_inputs_func=sample_inputs_invoke_subgraph,
217
+ dtypes=all_types_and(torch.bool, torch.half),
218
+ supports_out=False,
219
+ check_batched_grad=False,
220
+ check_batched_gradgrad=False,
221
+ check_batched_forward_grad=False,
222
+ check_inplace_batched_forward_grad=False,
223
+ supports_autograd=True,
224
+ # "torch.compile with aot_autograd does not currently support double backward."
225
+ supports_gradgrad=False,
226
+ ),
227
+ OpInfo(
228
+ name="map",
229
+ variant_test_name="simple",
230
+ op=simple_map,
231
+ sample_inputs_func=sample_inputs_map,
232
+ dtypes=all_types_and(torch.bool, torch.half),
233
+ supports_out=False,
234
+ check_batched_grad=False,
235
+ check_batched_gradgrad=False,
236
+ check_batched_forward_grad=False,
237
+ check_inplace_batched_forward_grad=False,
238
+ ),
239
+ OpInfo(
240
+ name="map",
241
+ variant_test_name="nested",
242
+ op=nested_map,
243
+ sample_inputs_func=sample_inputs_map,
244
+ dtypes=all_types_and(torch.bool, torch.half),
245
+ supports_out=False,
246
+ check_batched_grad=False,
247
+ check_batched_gradgrad=False,
248
+ check_batched_forward_grad=False,
249
+ check_inplace_batched_forward_grad=False,
250
+ ),
251
+ OpInfo(
252
+ name="map",
253
+ variant_test_name="triple_nested",
254
+ op=triple_nested_map,
255
+ sample_inputs_func=sample_inputs_map,
256
+ dtypes=all_types_and(torch.bool, torch.half),
257
+ supports_out=False,
258
+ check_batched_grad=False,
259
+ check_batched_gradgrad=False,
260
+ check_batched_forward_grad=False,
261
+ check_inplace_batched_forward_grad=False,
262
+ ),
263
+ OpInfo(
264
+ name="cond",
265
+ variant_test_name="simple",
266
+ op=simple_cond,
267
+ sample_inputs_func=sample_inputs_cond,
268
+ dtypes=all_types_and(torch.bool, torch.half),
269
+ supports_out=False,
270
+ check_batched_grad=False,
271
+ check_batched_gradgrad=False,
272
+ check_batched_forward_grad=False,
273
+ check_inplace_batched_forward_grad=False,
274
+ supports_autograd=True,
275
+ # "torch.compile with aot_autograd does not currently support double backward."
276
+ supports_gradgrad=False,
277
+ ),
278
+ OpInfo(
279
+ name="while_loop",
280
+ variant_test_name="simple",
281
+ op=simple_while_loop,
282
+ sample_inputs_func=sample_inputs_while_loop,
283
+ dtypes=all_types_and(torch.bool, torch.half),
284
+ supports_out=False,
285
+ check_batched_grad=False,
286
+ check_batched_gradgrad=False,
287
+ check_batched_forward_grad=False,
288
+ check_inplace_batched_forward_grad=False,
289
+ supports_autograd=False,
290
+ ),
291
+ OpInfo(
292
+ name="auto_functionalize",
293
+ variant_test_name="simple",
294
+ op=simple_auto_functionalize,
295
+ sample_inputs_func=sample_inputs_auto_functionalize,
296
+ dtypes=all_types_and(torch.bool, torch.half),
297
+ supports_out=False,
298
+ check_batched_grad=False,
299
+ check_batched_gradgrad=False,
300
+ check_batched_forward_grad=False,
301
+ check_inplace_batched_forward_grad=False,
302
+ supports_autograd=False,
303
+ ),
304
+ OpInfo(
305
+ name="flex_attention",
306
+ variant_test_name="simple",
307
+ op=flex_attention,
308
+ sample_inputs_func=sample_inputs_flex_attention,
309
+ dtypes=custom_types(torch.float16, torch.float32),
310
+ supports_out=False,
311
+ check_batched_grad=False,
312
+ check_batched_gradgrad=False,
313
+ check_batched_forward_grad=False,
314
+ check_inplace_batched_forward_grad=False,
315
+ skips=(
316
+ DecorateInfo(unittest.expectedFailure, "TestHOP", "test_aot_export"),
317
+ DecorateInfo(
318
+ unittest.expectedFailure, "TestHOP", "test_pre_dispatch_export"
319
+ ),
320
+ DecorateInfo(unittest.expectedFailure, "TestHOP", "test_serialize_export"),
321
+ DecorateInfo(unittest.expectedFailure, "TestHOP", "test_retrace_export"),
322
+ ),
323
+ decorators=[onlyCUDA],
324
+ ),
325
+ OpInfo(
326
+ name="flex_attention_backward",
327
+ variant_test_name="simple",
328
+ op=flex_attention,
329
+ sample_inputs_func=sample_inputs_flex_attention,
330
+ dtypes=custom_types(torch.float16, torch.float32),
331
+ supports_out=False,
332
+ check_batched_grad=False,
333
+ check_batched_gradgrad=False,
334
+ check_batched_forward_grad=False,
335
+ check_inplace_batched_forward_grad=False,
336
+ skips=(
337
+ DecorateInfo(unittest.expectedFailure, "TestHOP", "test_aot_export"),
338
+ DecorateInfo(
339
+ unittest.expectedFailure, "TestHOP", "test_pre_dispatch_export"
340
+ ),
341
+ DecorateInfo(unittest.expectedFailure, "TestHOP", "test_serialize_export"),
342
+ DecorateInfo(unittest.expectedFailure, "TestHOP", "test_retrace_export"),
343
+ ),
344
+ decorators=[onlyCUDA],
345
+ ),
346
+ ]
lib/python3.10/site-packages/torch/testing/_internal/hypothesis_utils.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from collections import defaultdict
4
+ from collections.abc import Iterable
5
+ import numpy as np
6
+ import torch
7
+
8
+ import hypothesis
9
+ from functools import reduce
10
+ from hypothesis import assume
11
+ from hypothesis import settings
12
+ from hypothesis import strategies as st
13
+ from hypothesis.extra import numpy as stnp
14
+ from hypothesis.strategies import SearchStrategy
15
+
16
+ from torch.testing._internal.common_quantized import _calculate_dynamic_qparams, _calculate_dynamic_per_channel_qparams
17
+
18
+ # Setup for the hypothesis tests.
19
+ # The tuples are (torch_quantized_dtype, zero_point_enforce), where the last
20
+ # element is enforced zero_point. If None, any zero_point point within the
21
+ # range of the data type is OK.
22
+
23
+ # Tuple with all quantized data types.
24
+ _ALL_QINT_TYPES = (
25
+ torch.quint8,
26
+ torch.qint8,
27
+ torch.qint32,
28
+ )
29
+
30
+ # Enforced zero point for every quantized data type.
31
+ # If None, any zero_point point within the range of the data type is OK.
32
+ _ENFORCED_ZERO_POINT = defaultdict(lambda: None, {
33
+ torch.quint8: None,
34
+ torch.qint8: None,
35
+ torch.qint32: 0
36
+ })
37
+
38
+ def _get_valid_min_max(qparams):
39
+ scale, zero_point, _quantized_type = qparams
40
+ adjustment = 1 + torch.finfo(torch.float).eps
41
+ _long_type_info = torch.iinfo(torch.long)
42
+ long_min, long_max = _long_type_info.min / adjustment, _long_type_info.max / adjustment
43
+ # make sure intermediate results are within the range of long
44
+ min_value = max((long_min - zero_point) * scale, (long_min / scale + zero_point))
45
+ max_value = min((long_max - zero_point) * scale, (long_max / scale + zero_point))
46
+ return np.float32(min_value), np.float32(max_value)
47
+
48
+ # This wrapper wraps around `st.floats` and checks the version of `hypothesis`, if
49
+ # it is too old, removes the `width` parameter (which was introduced)
50
+ # in 3.67.0
51
+ def _floats_wrapper(*args, **kwargs):
52
+ if 'width' in kwargs and hypothesis.version.__version_info__ < (3, 67, 0):
53
+ # As long as nan, inf, min, max are not specified, reimplement the width
54
+ # parameter for older versions of hypothesis.
55
+ no_nan_and_inf = (
56
+ (('allow_nan' in kwargs and not kwargs['allow_nan']) or
57
+ 'allow_nan' not in kwargs) and
58
+ (('allow_infinity' in kwargs and not kwargs['allow_infinity']) or
59
+ 'allow_infinity' not in kwargs))
60
+ min_and_max_not_specified = (
61
+ len(args) == 0 and
62
+ 'min_value' not in kwargs and
63
+ 'max_value' not in kwargs
64
+ )
65
+ if no_nan_and_inf and min_and_max_not_specified:
66
+ if kwargs['width'] == 16:
67
+ kwargs['min_value'] = torch.finfo(torch.float16).min
68
+ kwargs['max_value'] = torch.finfo(torch.float16).max
69
+ elif kwargs['width'] == 32:
70
+ kwargs['min_value'] = torch.finfo(torch.float32).min
71
+ kwargs['max_value'] = torch.finfo(torch.float32).max
72
+ elif kwargs['width'] == 64:
73
+ kwargs['min_value'] = torch.finfo(torch.float64).min
74
+ kwargs['max_value'] = torch.finfo(torch.float64).max
75
+ kwargs.pop('width')
76
+ return st.floats(*args, **kwargs)
77
+
78
+ def floats(*args, **kwargs):
79
+ if 'width' not in kwargs:
80
+ kwargs['width'] = 32
81
+ return _floats_wrapper(*args, **kwargs)
82
+
83
+ """Hypothesis filter to avoid overflows with quantized tensors.
84
+
85
+ Args:
86
+ tensor: Tensor of floats to filter
87
+ qparams: Quantization parameters as returned by the `qparams`.
88
+
89
+ Returns:
90
+ True
91
+
92
+ Raises:
93
+ hypothesis.UnsatisfiedAssumption
94
+
95
+ Note: This filter is slow. Use it only when filtering of the test cases is
96
+ absolutely necessary!
97
+ """
98
+ def assume_not_overflowing(tensor, qparams):
99
+ min_value, max_value = _get_valid_min_max(qparams)
100
+ assume(tensor.min() >= min_value)
101
+ assume(tensor.max() <= max_value)
102
+ return True
103
+
104
+ """Strategy for generating the quantization parameters.
105
+
106
+ Args:
107
+ dtypes: quantized data types to sample from.
108
+ scale_min / scale_max: Min and max scales. If None, set to 1e-3 / 1e3.
109
+ zero_point_min / zero_point_max: Min and max for the zero point. If None,
110
+ set to the minimum and maximum of the quantized data type.
111
+ Note: The min and max are only valid if the zero_point is not enforced
112
+ by the data type itself.
113
+
114
+ Generates:
115
+ scale: Sampled scale.
116
+ zero_point: Sampled zero point.
117
+ quantized_type: Sampled quantized type.
118
+ """
119
+ @st.composite
120
+ def qparams(draw, dtypes=None, scale_min=None, scale_max=None,
121
+ zero_point_min=None, zero_point_max=None):
122
+ if dtypes is None:
123
+ dtypes = _ALL_QINT_TYPES
124
+ if not isinstance(dtypes, (list, tuple)):
125
+ dtypes = (dtypes,)
126
+ quantized_type = draw(st.sampled_from(dtypes))
127
+
128
+ _type_info = torch.iinfo(quantized_type)
129
+ qmin, qmax = _type_info.min, _type_info.max
130
+
131
+ # TODO: Maybe embed the enforced zero_point in the `torch.iinfo`.
132
+ _zp_enforced = _ENFORCED_ZERO_POINT[quantized_type]
133
+ if _zp_enforced is not None:
134
+ zero_point = _zp_enforced
135
+ else:
136
+ _zp_min = qmin if zero_point_min is None else zero_point_min
137
+ _zp_max = qmax if zero_point_max is None else zero_point_max
138
+ zero_point = draw(st.integers(min_value=_zp_min, max_value=_zp_max))
139
+
140
+ if scale_min is None:
141
+ scale_min = torch.finfo(torch.float).eps
142
+ if scale_max is None:
143
+ scale_max = torch.finfo(torch.float).max
144
+ scale = draw(floats(min_value=scale_min, max_value=scale_max, width=32))
145
+
146
+ return scale, zero_point, quantized_type
147
+
148
+ """Strategy to create different shapes.
149
+ Args:
150
+ min_dims / max_dims: minimum and maximum rank.
151
+ min_side / max_side: minimum and maximum dimensions per rank.
152
+
153
+ Generates:
154
+ Possible shapes for a tensor, constrained to the rank and dimensionality.
155
+
156
+ Example:
157
+ # Generates 3D and 4D tensors.
158
+ @given(Q = qtensor(shapes=array_shapes(min_dims=3, max_dims=4))
159
+ some_test(self, Q):...
160
+ """
161
+ @st.composite
162
+ def array_shapes(draw, min_dims=1, max_dims=None, min_side=1, max_side=None, max_numel=None):
163
+ """Return a strategy for array shapes (tuples of int >= 1)."""
164
+ assert min_dims < 32
165
+ if max_dims is None:
166
+ max_dims = min(min_dims + 2, 32)
167
+ assert max_dims < 32
168
+ if max_side is None:
169
+ max_side = min_side + 5
170
+ candidate = st.lists(st.integers(min_side, max_side), min_size=min_dims, max_size=max_dims)
171
+ if max_numel is not None:
172
+ candidate = candidate.filter(lambda x: reduce(int.__mul__, x, 1) <= max_numel)
173
+ return draw(candidate.map(tuple))
174
+
175
+
176
+ """Strategy for generating test cases for tensors.
177
+ The resulting tensor is in float32 format.
178
+
179
+ Args:
180
+ shapes: Shapes under test for the tensor. Could be either a hypothesis
181
+ strategy, or an iterable of different shapes to sample from.
182
+ elements: Elements to generate from for the returned data type.
183
+ If None, the strategy resolves to float within range [-1e6, 1e6].
184
+ qparams: Instance of the qparams strategy. This is used to filter the tensor
185
+ such that the overflow would not happen.
186
+
187
+ Generates:
188
+ X: Tensor of type float32. Note that NaN and +/-inf is not included.
189
+ qparams: (If `qparams` arg is set) Quantization parameters for X.
190
+ The returned parameters are `(scale, zero_point, quantization_type)`.
191
+ (If `qparams` arg is None), returns None.
192
+ """
193
+ @st.composite
194
+ def tensor(draw, shapes=None, elements=None, qparams=None, dtype=np.float32):
195
+ if isinstance(shapes, SearchStrategy):
196
+ _shape = draw(shapes)
197
+ else:
198
+ _shape = draw(st.sampled_from(shapes))
199
+ if qparams is None:
200
+ if elements is None:
201
+ elements = floats(-1e6, 1e6, allow_nan=False, width=32)
202
+ X = draw(stnp.arrays(dtype=dtype, elements=elements, shape=_shape))
203
+ assume(not (np.isnan(X).any() or np.isinf(X).any()))
204
+ return X, None
205
+ qparams = draw(qparams)
206
+ if elements is None:
207
+ min_value, max_value = _get_valid_min_max(qparams)
208
+ elements = floats(min_value, max_value, allow_infinity=False,
209
+ allow_nan=False, width=32)
210
+ X = draw(stnp.arrays(dtype=dtype, elements=elements, shape=_shape))
211
+ # Recompute the scale and zero_points according to the X statistics.
212
+ scale, zp = _calculate_dynamic_qparams(X, qparams[2])
213
+ enforced_zp = _ENFORCED_ZERO_POINT.get(qparams[2], None)
214
+ if enforced_zp is not None:
215
+ zp = enforced_zp
216
+ return X, (scale, zp, qparams[2])
217
+
218
+ @st.composite
219
+ def per_channel_tensor(draw, shapes=None, elements=None, qparams=None):
220
+ if isinstance(shapes, SearchStrategy):
221
+ _shape = draw(shapes)
222
+ else:
223
+ _shape = draw(st.sampled_from(shapes))
224
+ if qparams is None:
225
+ if elements is None:
226
+ elements = floats(-1e6, 1e6, allow_nan=False, width=32)
227
+ X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape))
228
+ assume(not (np.isnan(X).any() or np.isinf(X).any()))
229
+ return X, None
230
+ qparams = draw(qparams)
231
+ if elements is None:
232
+ min_value, max_value = _get_valid_min_max(qparams)
233
+ elements = floats(min_value, max_value, allow_infinity=False,
234
+ allow_nan=False, width=32)
235
+ X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape))
236
+ # Recompute the scale and zero_points according to the X statistics.
237
+ scale, zp = _calculate_dynamic_per_channel_qparams(X, qparams[2])
238
+ enforced_zp = _ENFORCED_ZERO_POINT.get(qparams[2], None)
239
+ if enforced_zp is not None:
240
+ zp = enforced_zp
241
+ # Permute to model quantization along an axis
242
+ axis = int(np.random.randint(0, X.ndim, 1))
243
+ permute_axes = np.arange(X.ndim)
244
+ permute_axes[0] = axis
245
+ permute_axes[axis] = 0
246
+ X = np.transpose(X, permute_axes)
247
+
248
+ return X, (scale, zp, axis, qparams[2])
249
+
250
+ """Strategy for generating test cases for tensors used in Conv.
251
+ The resulting tensors is in float32 format.
252
+
253
+ Args:
254
+ spatial_dim: Spatial Dim for feature maps. If given as an iterable, randomly
255
+ picks one from the pool to make it the spatial dimension
256
+ batch_size_range: Range to generate `batch_size`.
257
+ Must be tuple of `(min, max)`.
258
+ input_channels_per_group_range:
259
+ Range to generate `input_channels_per_group`.
260
+ Must be tuple of `(min, max)`.
261
+ output_channels_per_group_range:
262
+ Range to generate `output_channels_per_group`.
263
+ Must be tuple of `(min, max)`.
264
+ feature_map_range: Range to generate feature map size for each spatial_dim.
265
+ Must be tuple of `(min, max)`.
266
+ kernel_range: Range to generate kernel size for each spatial_dim. Must be
267
+ tuple of `(min, max)`.
268
+ max_groups: Maximum number of groups to generate.
269
+ elements: Elements to generate from for the returned data type.
270
+ If None, the strategy resolves to float within range [-1e6, 1e6].
271
+ qparams: Strategy for quantization parameters. for X, w, and b.
272
+ Could be either a single strategy (used for all) or a list of
273
+ three strategies for X, w, b.
274
+ Generates:
275
+ (X, W, b, g): Tensors of type `float32` of the following drawen shapes:
276
+ X: (`batch_size, input_channels, H, W`)
277
+ W: (`output_channels, input_channels_per_group) + kernel_shape
278
+ b: `(output_channels,)`
279
+ groups: Number of groups the input is divided into
280
+ Note: X, W, b are tuples of (Tensor, qparams), where qparams could be either
281
+ None or (scale, zero_point, quantized_type)
282
+
283
+
284
+ Example:
285
+ @given(tensor_conv(
286
+ spatial_dim=2,
287
+ batch_size_range=(1, 3),
288
+ input_channels_per_group_range=(1, 7),
289
+ output_channels_per_group_range=(1, 7),
290
+ feature_map_range=(6, 12),
291
+ kernel_range=(3, 5),
292
+ max_groups=4,
293
+ elements=st.floats(-1.0, 1.0),
294
+ qparams=qparams()
295
+ ))
296
+ """
297
+ @st.composite
298
+ def tensor_conv(
299
+ draw, spatial_dim=2, batch_size_range=(1, 4),
300
+ input_channels_per_group_range=(3, 7),
301
+ output_channels_per_group_range=(3, 7), feature_map_range=(6, 12),
302
+ kernel_range=(3, 7), max_groups=1, can_be_transposed=False,
303
+ elements=None, qparams=None
304
+ ):
305
+
306
+ # Resolve the minibatch, in_channels, out_channels, iH/iW, iK/iW
307
+ batch_size = draw(st.integers(*batch_size_range))
308
+ input_channels_per_group = draw(
309
+ st.integers(*input_channels_per_group_range))
310
+ output_channels_per_group = draw(
311
+ st.integers(*output_channels_per_group_range))
312
+ groups = draw(st.integers(1, max_groups))
313
+ input_channels = input_channels_per_group * groups
314
+ output_channels = output_channels_per_group * groups
315
+
316
+ if isinstance(spatial_dim, Iterable):
317
+ spatial_dim = draw(st.sampled_from(spatial_dim))
318
+
319
+ feature_map_shape = [draw(st.integers(*feature_map_range)) for _ in range(spatial_dim)]
320
+
321
+ kernels = [draw(st.integers(*kernel_range)) for _ in range(spatial_dim)]
322
+
323
+ tr = False
324
+ weight_shape = (output_channels, input_channels_per_group) + tuple(kernels)
325
+ bias_shape = output_channels
326
+ if can_be_transposed:
327
+ tr = draw(st.booleans())
328
+ if tr:
329
+ weight_shape = (input_channels, output_channels_per_group) + tuple(kernels)
330
+ bias_shape = output_channels
331
+
332
+ # Resolve the tensors
333
+ if qparams is not None:
334
+ if isinstance(qparams, (list, tuple)):
335
+ assert len(qparams) == 3, "Need 3 qparams for X, w, b"
336
+ else:
337
+ qparams = [qparams] * 3
338
+
339
+ X = draw(tensor(shapes=(
340
+ (batch_size, input_channels) + tuple(feature_map_shape),),
341
+ elements=elements, qparams=qparams[0]))
342
+ W = draw(tensor(shapes=(weight_shape,), elements=elements,
343
+ qparams=qparams[1]))
344
+ b = draw(tensor(shapes=(bias_shape,), elements=elements,
345
+ qparams=qparams[2]))
346
+
347
+ return X, W, b, groups, tr
348
+
349
+ # We set the deadline in the currently loaded profile.
350
+ # Creating (and loading) a separate profile overrides any settings the user
351
+ # already specified.
352
+ hypothesis_version = hypothesis.version.__version_info__
353
+ current_settings = settings._profiles[settings._current_profile].__dict__
354
+ current_settings['deadline'] = None
355
+ if hypothesis_version >= (3, 16, 0) and hypothesis_version < (5, 0, 0):
356
+ current_settings['timeout'] = hypothesis.unlimited
357
+ def assert_deadline_disabled():
358
+ if hypothesis_version < (3, 27, 0):
359
+ import warnings
360
+ warning_message = (
361
+ "Your version of hypothesis is outdated. "
362
+ "To avoid `DeadlineExceeded` errors, please update. "
363
+ f"Current hypothesis version: {hypothesis.__version__}"
364
+ )
365
+ warnings.warn(warning_message)
366
+ else:
367
+ assert settings().deadline is None
lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py ADDED
@@ -0,0 +1,722 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # Torch
4
+ from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401
5
+ import torch.nn.functional as F
6
+ import torch
7
+ import torch.cuda
8
+ import torch.jit
9
+ import torch.jit._logging
10
+ import torch.jit.frontend
11
+ from torch.testing._internal.common_nn import module_tests, new_module_tests
12
+ from torch.testing._internal.common_utils import is_iterable_of_tensors, noncontiguous_like
13
+
14
+ import collections
15
+ from copy import deepcopy
16
+ from typing import Any, Dict, List, Union
17
+ import math # noqa: F401
18
+
19
+ # Testing utils
20
+ from torch import inf
21
+
22
+ assert torch.get_default_dtype() == torch.float32
23
+
24
+ L = 20
25
+ M = 10
26
+ S = 5
27
+
28
+
29
+ def unpack_variables(args):
30
+ if isinstance(args, tuple):
31
+ return tuple(unpack_variables(elem) for elem in args)
32
+ else:
33
+ return args
34
+
35
+ class dont_convert(tuple):
36
+ pass
37
+
38
+ non_differentiable = collections.namedtuple('non_differentiable', ['tensor'])
39
+
40
+ def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.float, device=None):
41
+ if not isinstance(call_args, tuple):
42
+ call_args = (call_args,)
43
+
44
+ def map_arg(arg):
45
+ def maybe_non_contig(tensor):
46
+ if not non_contiguous or tensor.numel() < 2:
47
+ return tensor.clone()
48
+
49
+ return noncontiguous_like(tensor)
50
+
51
+ def conjugate(tensor):
52
+ return tensor.conj()
53
+
54
+ if isinstance(arg, (torch.Size, dont_convert)):
55
+ return arg
56
+ elif isinstance(arg, tuple) and len(arg) == 0:
57
+ var = conjugate(torch.randn((), dtype=dtype, device=device))
58
+ var.requires_grad = requires_grad
59
+ return var
60
+ elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):
61
+ return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad)
62
+ # double check casting
63
+ elif isinstance(arg, non_differentiable):
64
+ if isinstance(arg.tensor, torch.Tensor):
65
+ return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
66
+ return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
67
+ elif isinstance(arg, torch.Tensor):
68
+ if arg.is_complex() != dtype.is_complex:
69
+ raise RuntimeError("User provided tensor is real for a test that runs with complex dtype, ",
70
+ "which is not supported for now")
71
+ # NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards
72
+ v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone()
73
+ v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex())
74
+ return v
75
+ elif callable(arg):
76
+ return map_arg(arg(dtype=dtype, device=device))
77
+ else:
78
+ return arg
79
+ args_out = tuple(map_arg(arg) for arg in call_args)
80
+ kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}
81
+ return args_out, kwargs_out
82
+
83
+ # NB: JIT script tests for all nn functional interfaces, script mode does
84
+ # not support in_place operations yet, so no inplace operation tests added.
85
+ # removed all the deprecated functions
86
+ #
87
+ # (
88
+ # method name,
89
+ # input size/constructing fn,
90
+ # args (tuple represents shape of a tensor arg),
91
+ # test variant name(will be used at test name suffix,
92
+ # 'inplace' skips grad tests), // optional
93
+ # (True, nonfusible_nodes, fusible_nodes) for autodiff // optional
94
+ # fn to determine if test should be skipped, // optional
95
+ # fn mapping output to part that should be gradcheck'ed, // optional
96
+ # kwargs for function, // optional
97
+ # )
98
+ nn_functional_tests = [
99
+ ('conv1d', (S, S, S), ((S, S, S),)),
100
+ ('conv2d', (S, S, S, S), ((S, S, S, S),)),
101
+ ('conv3d', (S, S, S, S, S), ((S, S, S, S, S),)),
102
+ ('conv_transpose1d', (S, S, S), ((S, S, S),)),
103
+ ('conv_transpose2d', (S, S, S, S), ((S, S, S, S),)),
104
+ ('conv_transpose3d', (S, S, S, S, S), ((S, S, S, S, S),)),
105
+ ('conv_tbc', (S, S, S), ((S, S, S), (S,), 2)),
106
+ ('avg_pool1d', (S, S, S), (3,)),
107
+ ('avg_pool2d', (S, S, S, S), (3,), '', (True,)),
108
+ ('avg_pool3d', (S, S, S, S, S), (3,)),
109
+ ('fractional_max_pool2d', (S, S, S, S), (3, [2, 3],)),
110
+ ('max_pool1d', (S, S, S), (2, 1)),
111
+ ('max_pool1d', (S, S, S), (2, 1, 1, 1, False, True), 'with_indices'),
112
+ ('max_pool2d', (S, S, S, S), (2, 1), '', (True, 'aten::max_pool2d_with_indices')),
113
+ ('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')),
114
+ ('max_pool3d', (S, S, S, S, S), (2, 1)),
115
+ ('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)),
116
+ ('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)),
117
+ ('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)),
118
+ ('lp_pool1d', (S, S, S), (2., 3, 2,)),
119
+ ('lp_pool2d', (S, S, S, S), (2., 3, 2,)),
120
+ ('lp_pool3d', (S, S, S, S, S), (2., 3, 2,)),
121
+ ('adaptive_max_pool1d', (S, S, S), (5,)),
122
+ ('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)),
123
+ ('adaptive_max_pool3d', (S, S, S, S, S), ([3, 2, 2],)),
124
+ ('adaptive_avg_pool1d', (S, S, S), (5,), '', (True,)),
125
+ ('adaptive_avg_pool2d', (S, S, S, S), ([5, 7],), '', (True,)),
126
+ ('adaptive_avg_pool3d', (S, S, S, S, S), ([3, 2, 2],), '', (True,)),
127
+ ('dropout', (S, S, S), (0.5,), '', (True, 'aten::native_dropout')),
128
+ ('alpha_dropout', (S, S, S), (0.5,)),
129
+ ('dropout2d', (S, S, S), (0.5,)),
130
+ ('dropout2d', (S, S, S, S), (0.5,), 'batched'),
131
+ ('dropout3d', (S, S, S, S), (0.5,)),
132
+ ('dropout3d', (S, S, S, S, S), (0.5,), 'batched'),
133
+ ('feature_alpha_dropout', (S, S, S), (0.5,)),
134
+ ('threshold', (S, S, S), (0.1, 2.), '', (True,)),
135
+ ('threshold', (S, S, S), (0.1, 2., True), 'inplace'),
136
+ ('relu', (S, S, S), (), '', (True,)),
137
+ ('relu', (S, S, S), (), 'inplace'),
138
+ ('glu', (S - 1, S - 1, S - 1), (),),
139
+ ('hardtanh', (S, S, S), (-0.5, 0.5), '', (True,)),
140
+ ('hardtanh', (S, S, S), (-0.5, 0.5, True), 'inplace'),
141
+ ('relu6', (S, S, S), (), '', (True,)),
142
+ ('relu6', (S, S, S), (True), 'inplace'),
143
+ ('elu', (S, S, S), (0.9,),),
144
+ ('elu', (S, S, S), (0.9, True), 'inplace'),
145
+ ('selu', (S, S, S), (),),
146
+ ('selu', (S, S, S), (True), 'inplace'),
147
+ ('celu', (S, S, S), (0.9,),),
148
+ ('celu', (S, S, S), (0.9, True), 'inplace'),
149
+ ('leaky_relu', (S, S, S), (0.02,), '', (True,)),
150
+ ('leaky_relu', (S, S, S), (0.02,), 'inplace'),
151
+ ('rrelu', (S, S), (0.1, 0.3, False),),
152
+ ('rrelu', (S, S), (0.1, 0.3, False, True), 'inplace'),
153
+ ('hardshrink', (S, S, S), (0.4,), '', (True,)),
154
+ ('tanhshrink', (S, S, S), (),),
155
+ ('softsign', (S, S, S), (),),
156
+ ('softplus', (S, S, S), (), '', (True,)),
157
+ ('softmin', (S, S, S), (0,),),
158
+ ('softmax', (S, S, S), (0,), '', (True,)),
159
+ ('softmax', (S, S, S), (0, 3, torch.double), 'with_all_args', (True,)),
160
+ ('tanh', (S, S, S), (), '', (True,)),
161
+ ('sigmoid', (S, S, S), (), '', (True,)),
162
+ ('silu', (S, S, S), (), '', (True,)),
163
+ ('log_softmax', (S, S, S), (0,), '', (True,)),
164
+ ('linear', (S, S), ((M, S),), '', (True, ['aten::linear'])),
165
+ ('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::linear'])),
166
+ ('bilinear', (S, S, S), ((S, S, M), torch.zeros(M, S, M),),),
167
+ ('embedding', torch.tensor([[1, 2, 4, 5], [4, 3, 2, 5]]), (torch.rand(6, 3), ), '', (True,)),
168
+ ('embedding_bag', torch.tensor([1, 2, 4, 2]), (torch.rand(5, 3), torch.tensor([0, 4]),),),
169
+ ('batch_norm', (S, S),
170
+ (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), None, None, True, ),
171
+ 'training', (True, 'aten::_batch_norm_impl_index')),
172
+ ('batch_norm', (0, S, S, S),
173
+ (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
174
+ non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
175
+ 'size_zero', (True, 'aten::_batch_norm_impl_index')),
176
+ ('batch_norm', (0, S, S, S),
177
+ (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
178
+ non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
179
+ 'size_zero_inference', (True, 'aten::_batch_norm_impl_index')),
180
+ ('batch_norm', (S, S),
181
+ (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
182
+ non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
183
+ 'with_weight_and_bias_training', (True, 'aten::_batch_norm_impl_index')),
184
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
185
+ None, non_differentiable(torch.ones(S)), True, ),
186
+ 'with_only_bias_training', (True, 'aten::_batch_norm_impl_index')),
187
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
188
+ non_differentiable(torch.randn(S)), None, True, ),
189
+ 'with_only_weight_training', (True, 'aten::_batch_norm_impl_index')),
190
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
191
+ None, None, False, ),
192
+ 'inference', (True, 'aten::_batch_norm_impl_index')),
193
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
194
+ non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), False, ),
195
+ 'with_weight_and_bias_inference', (True, 'aten::_batch_norm_impl_index')),
196
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
197
+ None, non_differentiable(torch.ones(S)), False, ),
198
+ 'with_only_bias_inference', (True, 'aten::_batch_norm_impl_index')),
199
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
200
+ non_differentiable(torch.randn(S)), None, False, ),
201
+ 'with_only_weight_inference', (True, 'aten::_batch_norm_impl_index')),
202
+ ('instance_norm', (S, S, S), (non_differentiable(torch.zeros(S)), non_differentiable(torch.ones(S))),),
203
+ ('layer_norm', (S, S, S, S), ([5],), '',
204
+ (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
205
+ ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),), 'with_only_weight',
206
+ (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
207
+ ('layer_norm', (S, S, S, S), ([5], None, non_differentiable(torch.rand(S)),), 'with_only_bias',
208
+ (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
209
+ ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),
210
+ non_differentiable(torch.rand(S))), 'with_weight_and_bias',
211
+ (False, ['aten::contiguous', 'aten::_batch_norm_impl_index', 'aten::addcmul'])),
212
+ ('group_norm', (S, S, S), (1, torch.rand(5),),),
213
+ ('local_response_norm', (S, S, S), (2, ),),
214
+ ('nll_loss', F.log_softmax(torch.randn(3, 5), dim=0), (torch.tensor([1, 0, 4]),), '',),
215
+ ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2),),),
216
+ ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2), True, True), 'full'),
217
+ ('kl_div', F.log_softmax(torch.randn(S, 10), 1), (F.softmax(torch.randn(S, 10), 1),),),
218
+ ('cross_entropy', (3, S), (torch.randint(S, (3,), dtype=torch.int64),),),
219
+ ('binary_cross_entropy_with_logits', (3,), (torch.empty(3).random_(2), ),),
220
+ ('smooth_l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
221
+ ('huber_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
222
+ ('l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
223
+ ('mse_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
224
+ ('smooth_l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
225
+ ('huber_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
226
+ ('l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
227
+ ('mse_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
228
+ ('margin_ranking_loss', (S,), ((S,), (S,)),),
229
+ ('hinge_embedding_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
230
+ ('soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
231
+ ('multilabel_soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
232
+ ('cosine_embedding_loss', (S, S), ((S, S), non_differentiable(torch.rand(S,))),),
233
+ ('pixel_shuffle', (1, 9, 4, 4), (3,),),
234
+ ('pixel_unshuffle', (1, 1, 12, 12), (3,),),
235
+ ('affine_grid', (S, 2, 3), (torch.Size([S, 1, 7, 7]),),),
236
+ ('pad', (3, 3, 4, 2), ([1, 1],),),
237
+ ('pairwise_distance', (S, S), ((S, S),),),
238
+ ('pdist', (S, S), (),),
239
+ ('cosine_similarity', (S, S), ((S, S),),),
240
+ ('triplet_margin_loss', (S, S), ((S, S), (S, S)),),
241
+ ('normalize', (S, S, S), (),),
242
+ ('unfold', (S, S, S, S), ([2, 3]),),
243
+ ('fold', (1, 3 * 2 * 2, 12), ([4, 5], [2, 2]),),
244
+ ('grid_sample', (S, S, S, S), (non_differentiable(torch.rand(S, S, S, 2)),),),
245
+ ('gumbel_softmax', (S, S), (2.,), '', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),
246
+ ('gumbel_softmax', (S, S), (2., True,), 'hard', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),
247
+ ('multilabel_margin_loss', torch.tensor([[0.2, -0.2, 0.07]]), (torch.tensor([[0, 0, 1]]),),),
248
+ ('multi_margin_loss', (S, S), (non_differentiable(torch.randint(S, (S, ), dtype=torch.int64)),
249
+ 1, 1., non_differentiable(torch.randn(S))),),
250
+ ('binary_cross_entropy', torch.randn(3, 2).sigmoid(), (non_differentiable(torch.rand(3, 2)),
251
+ non_differentiable(torch.randn(3, 2))),),
252
+ ('binary_cross_entropy', torch.randn(3, 2).sigmoid(),
253
+ (non_differentiable(torch.rand(3, 2)),
254
+ non_differentiable(torch.randn(3, 2)), None, None, 'mean'), 'size_average'),
255
+ ('ctc_loss', torch.rand(S, S, S).log_softmax(2).detach().requires_grad_(),
256
+ (torch.randint(1, S, (S, S), dtype=torch.long), torch.full((S,), S, dtype=torch.long),
257
+ torch.randint(1, S, (S,), dtype=torch.long))),
258
+ ('upsample', torch.randn(S, S, M, M), (None, 2.), 'with_scale'),
259
+ ('upsample', torch.randn(S, S, M, M), (4,), 'with_size'),
260
+ ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'nearest_4d'),
261
+ ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'nearest_4d_with_scale'),
262
+ ('interpolate', torch.randn(S, S, M, M), (4,), 'nearest_4d_with_size'),
263
+ ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'area_4d'),
264
+ ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'area_4d_with_scale'),
265
+ ('interpolate', torch.randn(S, S, M, M), (4,), 'area_4d_with_size'),
266
+ ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bilinear_4d'),
267
+ ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bilinear_4d_with_scale'),
268
+ ('interpolate', torch.randn(S, S, M, M), (4,), 'bilinear_4d_with_size'),
269
+ ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bicubic_4d'),
270
+ ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bicubic_4d_with_scale'),
271
+ ('interpolate', torch.randn(S, S, M, M), (4,), 'bicubic_4d_with_size'),
272
+ ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'nearest_3d'),
273
+ ('interpolate', torch.randn(S, M, M), (None, 2.), 'nearest_3d_with_scale'),
274
+ ('interpolate', torch.randn(S, M, M), (4,), 'nearest_3d_with_size'),
275
+ ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'area_3d'),
276
+ ('interpolate', torch.randn(S, M, M), (None, 2.), 'area_3d_with_scale'),
277
+ ('interpolate', torch.randn(S, M, M), (4,), 'area_3d_with_size'),
278
+ ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'linear_3d'),
279
+ ('interpolate', torch.randn(S, M, M), (None, 2.), 'linear_3d_with_scale'),
280
+ ('interpolate', torch.randn(S, M, M), (4,), 'linear_3d_with_size'),
281
+ ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'nearest_5d_with_scale'),
282
+ ('interpolate', torch.randn(S, M, M, M, M), (4,), 'nearest_5d_with_size'),
283
+ ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'area_5d'),
284
+ ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'area_5d_with_scale'),
285
+ ('interpolate', torch.randn(S, M, M, M, M), (4,), 'area_5d_with_size'),
286
+ ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'trilinear_5d'),
287
+ ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'trilinear_5d_with_scale'),
288
+ ('interpolate', torch.randn(S, M, M, M, M), (4,), 'trilinear_5d_with_size'),
289
+ ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2, None, 'nearest', None, False),
290
+ 'nearest_4d_not_recompute_scale_factor'),
291
+ ('interpolate', torch.randn(S, S, M, M), (4, None, 'nearest', None, False),
292
+ 'nearest_4d_with_size_not_recompute_scale_factor'),
293
+ ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bilinear', None, False),
294
+ 'bilinear_4d_with_scale_not_recompute_scale_factor'),
295
+ ('interpolate', torch.randn(S, S, M, M), (4, None, 'bilinear', None, False),
296
+ 'bilinear_4d_with_size_not_recompute_scale_factor'),
297
+ ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bicubic', None, False),
298
+ 'bicubic_4d_with_scale_not_recompute_scale_factor'),
299
+ ('interpolate', torch.randn(S, S, M, M), (4, None, 'bicubic', None, False),
300
+ 'bicubic_4d_with_size_not_recompute_scale_factor'),
301
+ ('interpolate', torch.randn(S, M, M), (None, 2., 'nearest', None, False),
302
+ 'nearest_3d_with_scale_not_recompute_scale_factor'),
303
+ ('interpolate', torch.randn(S, M, M), (4, None, 'nearest', None, False),
304
+ 'nearest_3d_with_size_not_recompute_scale_factor'),
305
+ ('interpolate', torch.randn(S, M, M), (None, 2., 'linear', None, False),
306
+ 'linear_3d_with_scale_not_recompute_scale_factor'),
307
+ ('interpolate', torch.randn(S, M, M), (4, None, 'linear', None, False),
308
+ 'linear_3d_with_size_not_recompute_scale_factor'),
309
+ ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'nearest', None, False),
310
+ 'nearest_5d_with_scale_not_recompute_scale_factor'),
311
+ ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'nearest', None, False),
312
+ 'nearest_5d_with_size_not_recompute_scale_factor'),
313
+ ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'trilinear', None, False),
314
+ 'trilinear_5d_with_scale_not_recompute_scale_factor'),
315
+ ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'trilinear', None, False),
316
+ 'trilinear_5d_with_size_not_recompute_scale_factor'),
317
+ ]
318
+
319
+ script_template = '''
320
+ def the_method({}):
321
+ return {}
322
+ '''
323
+
324
+ def value_to_literal(value):
325
+ if isinstance(value, str):
326
+ # Quotes string and escapes special characters
327
+ return ascii(value)
328
+ if isinstance(value, torch.Tensor):
329
+ return 'torch.' + str(value)
330
+ else:
331
+ return str(value)
332
+
333
+ def get_call(method_name, func_type, args, kwargs):
334
+ kwargs_str = ', '.join([k + '=' + value_to_literal(v) for k, v in kwargs.items()])
335
+ self_arg = args[0]
336
+ if func_type == 'method':
337
+ args = args[1:]
338
+
339
+ argument_str = ', '.join(args)
340
+ argument_str += ', ' if len(args) and len(kwargs) else ''
341
+ argument_str += kwargs_str
342
+
343
+ if func_type == 'functional' or func_type == 'function':
344
+ call = f'torch.{method_name}({argument_str})'
345
+ elif func_type == 'method':
346
+ call = f'{self_arg}.{method_name}({argument_str})'
347
+ elif func_type == 'nn_functional':
348
+ call = f'torch.nn.functional.{method_name}({argument_str})'
349
+ else:
350
+ raise TypeError('Unsupported function type')
351
+
352
+ return call
353
+
354
+ def get_constant(x):
355
+ if x == inf:
356
+ return 'math.inf'
357
+ if x == -inf:
358
+ return '-math.inf'
359
+ return x
360
+
361
+ def get_script_args(args):
362
+ formals: List[str] = []
363
+ tensors: List[Union[torch.Tensor, List[torch.Tensor]]] = []
364
+ actuals: List[str] = []
365
+ for arg in args:
366
+ if isinstance(arg, torch.Tensor):
367
+ name = f'i{len(formals)}'
368
+ formals.append(name)
369
+ actuals.append(name)
370
+ tensors.append(arg)
371
+ elif is_iterable_of_tensors(arg):
372
+ name = f'i{len(formals)}'
373
+ formals.append(name + ': List[torch.Tensor]')
374
+ actuals.append(name)
375
+ tensors.append(list(arg))
376
+ elif isinstance(arg, str):
377
+ actuals.append(f"'{arg}'")
378
+ else:
379
+ actuals.append(str(get_constant(arg)))
380
+ return (formals, tensors, actuals)
381
+
382
+ # create a script function from (name, func_type, output_process_fn),
383
+ # and returns the compiled function and example inputs
384
+ def gen_script_fn_and_args(method_name, func_type, *args, **kwargs):
385
+ formals, tensors, actuals = get_script_args(args)
386
+ call = get_call(method_name, func_type, actuals, kwargs)
387
+ script = script_template.format(', '.join(formals), call)
388
+ CU = torch.jit.CompilationUnit(script)
389
+ return CU.the_method, tensors
390
+
391
+ # create a script function from (name, func_type),
392
+ # returns a function takes in (args, kwargs) and runs the compiled function
393
+ def create_script_fn(self, method_name, func_type):
394
+ # function returns tuple containing original output and
395
+ # filtered output to be used in checking gradients
396
+ def script_fn(*args, **kwargs):
397
+ fn, tensors = gen_script_fn_and_args(method_name, func_type, *args, **kwargs)
398
+ self.assertExportImport(fn.graph, tensors)
399
+ output = fn(*tensors)
400
+ # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
401
+ script_fn.last_graph = fn.graph_for(*tensors) # type: ignore[attr-defined]
402
+ return output
403
+ return script_fn
404
+
405
+ class SplitInputs:
406
+ all_tensors: List[Any]
407
+ tensor_args: List[Any]
408
+ nontensor_args: List[Any]
409
+ arg_types: List[str]
410
+ tensor_kwargs: Dict[str, Any]
411
+ kwarg_order: List[str]
412
+ nontensor_kwargs: Dict[str, Any]
413
+ kwarg_types: Dict[str, Any]
414
+
415
+ @staticmethod
416
+ def _is_tensor_input(arg):
417
+ return isinstance(arg, torch.Tensor) or is_iterable_of_tensors(arg)
418
+
419
+ def __init__(self, args, kwargs):
420
+ self.arg_types = ['t' if self._is_tensor_input(arg) else 's' for arg in args]
421
+ self.kwarg_types = {k: 't' if self._is_tensor_input(v) else 's' for k, v in kwargs.items()}
422
+ self.tensor_args = [arg for arg in args if self._is_tensor_input(arg)]
423
+ self.nontensor_args = [arg for arg in args if not self._is_tensor_input(arg)]
424
+ self.tensor_kwargs = {k: v for k, v in kwargs.items() if self._is_tensor_input(v)}
425
+ self.nontensor_kwargs = {k: v for k, v in kwargs.items() if not self._is_tensor_input(v)}
426
+ self.all_tensors = [*self.tensor_args, *[v for k, v in self.tensor_kwargs.items()]]
427
+ self.kwarg_order = [k for k, v in kwargs.items()]
428
+
429
+ def nontensors_match(self, other: 'SplitInputs'):
430
+ if self.arg_types != other.arg_types:
431
+ return False
432
+ if self.kwarg_types != other.kwarg_types:
433
+ return False
434
+ if self.kwarg_order != other.kwarg_order:
435
+ return False
436
+ if self.nontensor_args != other.nontensor_args:
437
+ return False
438
+ if self.nontensor_kwargs != other.nontensor_kwargs:
439
+ return False
440
+ return True
441
+
442
+ # make a new function where all non-tensor arguments in 'args' have been partially
443
+ # applied, and all tensor arguments remain.
444
+ # used to trace functions when some arguments are not tensors
445
+ def partial_apply_nontensors(fn, args, kwargs):
446
+ inputs = SplitInputs(args, kwargs)
447
+
448
+ def new_fn(*tensors_):
449
+ tensors = iter(tensors_)
450
+ full_args = [args[i] if s == 's' else next(tensors) for i, s in enumerate(inputs.arg_types)]
451
+ full_kwargs = {k: kwargs[k] if s == 's' else next(tensors) for k, s in inputs.kwarg_types.items()}
452
+ return fn(*full_args, **full_kwargs)
453
+
454
+ return new_fn, inputs
455
+
456
+ # create a trace function from input fn
457
+ def create_traced_fn(self, fn, cache_traced_fn=False):
458
+ def traced_fn(*inputs, **kwargs):
459
+ # `check_trace` is set to False because check_trace is run with @no_grad
460
+ # Also, `check_against_reference` already does all the checks
461
+ # against python function
462
+ fn_tensors, split_inputs = partial_apply_nontensors(fn, inputs, kwargs)
463
+ if not cache_traced_fn or not hasattr(traced_fn, 'traced'):
464
+ traced = torch.jit.trace(fn_tensors, split_inputs.all_tensors, check_trace=False)
465
+ self.assertExportImport(traced.graph, split_inputs.all_tensors)
466
+ output = traced(*split_inputs.all_tensors)
467
+ if cache_traced_fn:
468
+ traced_fn.traced = traced
469
+ traced_fn.split_inputs = split_inputs
470
+ else:
471
+ # Guard to check that nontensor inputs are the same as during tracing
472
+ self.assertTrue(traced_fn.split_inputs.nontensors_match(split_inputs))
473
+ output = traced_fn.traced(*split_inputs.all_tensors)
474
+ traced = traced_fn.traced
475
+ # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
476
+ traced_fn.last_graph = traced.graph_for(*split_inputs.all_tensors) # type: ignore[attr-defined]
477
+ traced_fn.graph = traced.graph # type: ignore[attr-defined]
478
+ return output
479
+ return traced_fn
480
+
481
+ # known to be failing in script
482
+ EXCLUDE_SCRIPT = {
483
+ 'test_norm_fro_default',
484
+ 'test_norm_fro_cpu',
485
+ 'test_norm_nuc',
486
+ 'test_norm_fro',
487
+ 'test_norm_nuc_batched',
488
+
489
+ # aten op has additional cudnn argument
490
+ 'test_nn_unfold',
491
+
492
+ # flaky test - TODO fix
493
+ 'test_nn_ctc_loss',
494
+
495
+ # unknown builtin op
496
+ 'test_nn_fold',
497
+
498
+ # jit doesn't support sparse tensors.
499
+ 'test_to_sparse',
500
+ 'test_to_sparse_dim',
501
+ }
502
+
503
+ # generates a script function and set of example inputs
504
+ # from a specified test in the format of nn_functional_tests
505
+ def get_nn_functional_compiled_fn_and_inputs(name, self_size, args, variant_name='', *extra_args):
506
+ test_name = 'test_nn_' + name
507
+
508
+ if variant_name != '':
509
+ test_name = test_name + '_' + variant_name
510
+
511
+ self_variable = create_input((self_size,))[0][0]
512
+
513
+ # need to record this because methods can change the size (e.g. unsqueeze)
514
+ args_variable, _kwargs_variable = create_input(args)
515
+
516
+ self_tensor = deepcopy(self_variable.data)
517
+ args_tensor = deepcopy(unpack_variables(args_variable))
518
+
519
+ f_args_variable = (self_variable,) + args_variable
520
+ f_args_tensor = (self_tensor,) + args_tensor # noqa: F841
521
+ with torch._jit_internal._disable_emit_hooks():
522
+ script_fn, inputs = gen_script_fn_and_args(name, "nn_functional", *f_args_variable)
523
+ return script_fn, inputs
524
+
525
+
526
+ # additional modules test
527
+ # TODO: delete this list once we make all nn_tests work
528
+ additional_module_tests = [
529
+ {
530
+ 'module_name': 'Bilinear',
531
+ 'constructor_args': (S, S, M),
532
+ 'input_size': (S, S),
533
+ 'extra_args': ((S, S),)
534
+ },
535
+ {
536
+ 'module_name': 'RNNCell',
537
+ 'constructor_args': (S, S),
538
+ 'input_size': (S, S),
539
+ },
540
+ {
541
+ 'module_name': 'LSTMCell',
542
+ 'constructor_args': (S, S),
543
+ 'input_size': (S, S),
544
+ },
545
+ {
546
+ 'module_name': 'GRUCell',
547
+ 'constructor_args': (S, S),
548
+ 'input_size': (S, S),
549
+ },
550
+ {
551
+ 'module_name': 'MultiheadAttention',
552
+ 'constructor_args': (128, 8),
553
+ 'input_size': (10, 8, 128),
554
+ 'extra_args': (torch.randn(10, 8, 128), torch.randn(10, 8, 128)),
555
+ 'slowTest': True
556
+ },
557
+ {
558
+ 'module_name': 'Transformer',
559
+ 'constructor_args': (1, 1, 1, 1, 2),
560
+ 'input_size': (3, 1, 1),
561
+ 'extra_args': (torch.randn(1, 1, 1),),
562
+ 'slowTest': True
563
+ }
564
+ ]
565
+
566
+ EXCLUDE_SCRIPT_MODULES = {
567
+ 'test_nn_AdaptiveAvgPool2d_tuple_none',
568
+ 'test_nn_AdaptiveAvgPool3d_tuple_none',
569
+ 'test_nn_AdaptiveMaxPool2d_tuple_none',
570
+ 'test_nn_AdaptiveMaxPool3d_tuple_none',
571
+
572
+ # Doesn't use future division, so this is not supported
573
+ 'test_nn_CrossMapLRN2d',
574
+ # Derivative for aten::_scaled_dot_product_flash_attention_backward is not implemented
575
+ 'test_nn_TransformerDecoderLayer_gelu_activation',
576
+ 'test_nn_TransformerDecoderLayer_relu_activation',
577
+ 'test_nn_TransformerEncoderLayer_gelu_activation',
578
+ 'test_nn_TransformerEncoderLayer_relu_activation',
579
+ 'test_nn_Transformer_multilayer_coder',
580
+ }
581
+
582
+ script_method_template = '''
583
+ def forward({}):
584
+ return {}
585
+ '''
586
+
587
+ def create_script_module(self, nn_module, constructor_args, *args, **kwargs):
588
+ def script_module(*args, **kwargs):
589
+ _formals, tensors, actuals = get_script_args(args)
590
+
591
+ method_args = ', '.join(['self'] + actuals)
592
+ call_args_str = ', '.join(actuals)
593
+ call = f"self.submodule({call_args_str})"
594
+ script = script_method_template.format(method_args, call)
595
+
596
+ submodule_constants = []
597
+ if kwargs.get('is_constant'):
598
+ submodule_constants = ['submodule']
599
+
600
+ # Create module to use the script method
601
+ class TheModule(torch.jit.ScriptModule):
602
+ __constants__ = submodule_constants
603
+
604
+ def __init__(self) -> None:
605
+ super().__init__()
606
+ self.submodule = nn_module(*constructor_args)
607
+
608
+ def make_module(script):
609
+ module = TheModule()
610
+ # check __repr__
611
+ str(module)
612
+ module.define(script)
613
+ return module
614
+
615
+ module = make_module(script)
616
+ if self:
617
+ self.assertExportImportModule(module, tensors)
618
+ module(*args)
619
+ # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
620
+ create_script_module.last_graph = module.graph # type: ignore[attr-defined]
621
+ return module
622
+ return script_module
623
+
624
+ def check_alias_annotation(method_name, args, kwargs, *, aten_name, func_type='method'):
625
+ formals, tensors, actuals = get_script_args(args)
626
+ call = get_call(method_name, func_type, actuals, kwargs)
627
+ script = script_template.format(', '.join(formals), call)
628
+ CU = torch.jit.CompilationUnit(script)
629
+ # to clean up IR
630
+ torch._C._jit_pass_inline(CU.the_method.graph)
631
+ torch._C._jit_pass_constant_propagation(CU.the_method.graph)
632
+ torch._C._jit_check_alias_annotation(CU.the_method.graph, tuple(tensors), aten_name)
633
+
634
+ def get_nn_module_name_from_kwargs(**kwargs):
635
+ if 'module_name' in kwargs:
636
+ return kwargs['module_name']
637
+ elif 'fullname' in kwargs:
638
+ return kwargs['fullname']
639
+ elif 'constructor' in kwargs:
640
+ return kwargs['constructor'].__name__
641
+
642
+ def get_nn_mod_test_name(**kwargs):
643
+ if 'fullname' in kwargs:
644
+ test_name = kwargs['fullname']
645
+ else:
646
+ test_name = get_nn_module_name_from_kwargs(**kwargs)
647
+ if 'desc' in kwargs:
648
+ test_name = f"{test_name}_{kwargs['desc']}"
649
+ return f'test_nn_{test_name}'
650
+
651
+ def get_nn_module_class_from_kwargs(**kwargs):
652
+ name = get_nn_module_name_from_kwargs(**kwargs)
653
+ index = name.find("_")
654
+ if index == -1:
655
+ return name
656
+ else:
657
+ return name[0:name.find("_")]
658
+
659
+ def try_get_nn_module_compiled_mod_and_inputs(*args, **kwargs):
660
+ name = get_nn_module_name_from_kwargs(**kwargs)
661
+
662
+ if 'desc' in kwargs and 'eval' in kwargs['desc']:
663
+ # eval() is not supported, so skip these tests
664
+ return
665
+
666
+ test_name = name
667
+ if 'desc' in kwargs:
668
+ test_name = f"{test_name}_{kwargs['desc']}"
669
+ test_name = get_nn_mod_test_name(**kwargs)
670
+
671
+ if test_name in EXCLUDE_SCRIPT_MODULES:
672
+ return
673
+ if 'constructor' in kwargs:
674
+ nn_module = kwargs['constructor']
675
+ else:
676
+ nn_module = getattr(torch.nn, name)
677
+
678
+ if "FunctionalModule" in str(nn_module):
679
+ return
680
+
681
+ if 'constructor_args_fn' in kwargs:
682
+ constructor_args = kwargs['constructor_args_fn']()
683
+ else:
684
+ constructor_args = kwargs.get('constructor_args', ())
685
+
686
+ # Set up inputs from tuple of sizes or constructor fn
687
+ input_dtype = torch.double
688
+ if 'input_fn' in kwargs:
689
+ input = kwargs['input_fn']()
690
+ if isinstance(input, torch.Tensor):
691
+ input = (input,)
692
+
693
+ if all(tensor.is_complex() for tensor in input):
694
+ input_dtype = torch.cdouble
695
+ else:
696
+ input = (kwargs['input_size'],)
697
+
698
+ # Extra parameters to forward()
699
+ if 'extra_args' in kwargs:
700
+ input = input + kwargs['extra_args']
701
+
702
+ if 'target_size' in kwargs:
703
+ input = input + (kwargs['target_size'],)
704
+ elif 'target_fn' in kwargs:
705
+ if torch.is_tensor(input):
706
+ input = (input,)
707
+ input = input + (kwargs['target_fn'](),)
708
+
709
+ args_variable, _kwargs_variable = create_input(input, dtype=input_dtype)
710
+ f_args_variable = deepcopy(unpack_variables(args_variable))
711
+ out_var = deepcopy(f_args_variable)
712
+
713
+
714
+ _args, mod = f_args_variable, create_script_module(
715
+ None, nn_module, constructor_args, *f_args_variable
716
+ )(*f_args_variable)
717
+
718
+ return mod, out_var
719
+
720
+
721
+ def get_all_nn_module_tests():
722
+ return module_tests + new_module_tests + additional_module_tests