ZTWHHH commited on
Commit
ca07ebe
·
verified ·
1 Parent(s): fa82258

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. wemm/lib/python3.10/site-packages/lightning_fabric/__pycache__/__about__.cpython-310.pyc +0 -0
  3. wemm/lib/python3.10/site-packages/lightning_fabric/__pycache__/__init__.cpython-310.pyc +0 -0
  4. wemm/lib/python3.10/site-packages/lightning_fabric/__pycache__/__version__.cpython-310.pyc +0 -0
  5. wemm/lib/python3.10/site-packages/lightning_fabric/__pycache__/cli.cpython-310.pyc +0 -0
  6. wemm/lib/python3.10/site-packages/lightning_fabric/__pycache__/connector.cpython-310.pyc +0 -0
  7. wemm/lib/python3.10/site-packages/lightning_fabric/__pycache__/fabric.cpython-310.pyc +0 -0
  8. wemm/lib/python3.10/site-packages/lightning_fabric/__pycache__/wrappers.cpython-310.pyc +0 -0
  9. wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/__pycache__/__init__.cpython-310.pyc +0 -0
  10. wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/__pycache__/cpu.cpython-310.pyc +0 -0
  11. wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/__pycache__/mps.cpython-310.pyc +0 -0
  12. wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/__pycache__/registry.cpython-310.pyc +0 -0
  13. wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/__pycache__/tpu.cpython-310.pyc +0 -0
  14. wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/accelerator.py +58 -0
  15. wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/cpu.py +87 -0
  16. wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/cuda.py +366 -0
  17. wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/tpu.py +182 -0
  18. wemm/lib/python3.10/site-packages/lightning_fabric/loggers/__init__.py +15 -0
  19. wemm/lib/python3.10/site-packages/lightning_fabric/loggers/__pycache__/__init__.cpython-310.pyc +0 -0
  20. wemm/lib/python3.10/site-packages/lightning_fabric/loggers/__pycache__/csv_logs.cpython-310.pyc +0 -0
  21. wemm/lib/python3.10/site-packages/lightning_fabric/loggers/__pycache__/logger.cpython-310.pyc +0 -0
  22. wemm/lib/python3.10/site-packages/lightning_fabric/loggers/__pycache__/tensorboard.cpython-310.pyc +0 -0
  23. wemm/lib/python3.10/site-packages/lightning_fabric/loggers/csv_logs.py +224 -0
  24. wemm/lib/python3.10/site-packages/lightning_fabric/loggers/logger.py +133 -0
  25. wemm/lib/python3.10/site-packages/lightning_fabric/loggers/tensorboard.py +311 -0
  26. wemm/lib/python3.10/site-packages/lightning_fabric/strategies/launchers/__pycache__/launcher.cpython-310.pyc +0 -0
  27. wemm/lib/python3.10/site-packages/lightning_fabric/utilities/__init__.py +23 -0
  28. wemm/lib/python3.10/site-packages/lightning_fabric/utilities/__pycache__/__init__.cpython-310.pyc +0 -0
  29. wemm/lib/python3.10/site-packages/lightning_fabric/utilities/__pycache__/rank_zero.cpython-310.pyc +0 -0
  30. wemm/lib/python3.10/site-packages/lightning_fabric/utilities/apply_func.py +126 -0
  31. wemm/lib/python3.10/site-packages/lightning_fabric/utilities/device_dtype_mixin.py +111 -0
  32. wemm/lib/python3.10/site-packages/lightning_fabric/utilities/device_parser.py +201 -0
  33. wemm/lib/python3.10/site-packages/lightning_fabric/utilities/distributed.py +316 -0
  34. wemm/lib/python3.10/site-packages/lightning_fabric/utilities/enums.py +29 -0
  35. wemm/lib/python3.10/site-packages/lightning_fabric/utilities/exceptions.py +17 -0
  36. wemm/lib/python3.10/site-packages/lightning_fabric/utilities/optimizer.py +34 -0
  37. wemm/lib/python3.10/site-packages/lightning_fabric/utilities/registry.py +27 -0
  38. wemm/lib/python3.10/site-packages/lightning_fabric/utilities/seed.py +128 -0
  39. wemm/lib/python3.10/site-packages/lightning_fabric/utilities/types.py +131 -0
  40. wemm/lib/python3.10/site-packages/lightning_fabric/utilities/warnings.py +24 -0
  41. wemm/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc +3 -0
  42. wemm/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc +3 -0
  43. wemm/lib/python3.10/site-packages/torch/_VF.py +30 -0
  44. wemm/lib/python3.10/site-packages/torch/__config__.py +22 -0
  45. wemm/lib/python3.10/site-packages/torch/__future__.py +21 -0
  46. wemm/lib/python3.10/site-packages/torch/__init__.py +1488 -0
  47. wemm/lib/python3.10/site-packages/torch/_jit_internal.py +1435 -0
  48. wemm/lib/python3.10/site-packages/torch/_lowrank.py +299 -0
  49. wemm/lib/python3.10/site-packages/torch/_meta_registrations.py +2705 -0
  50. wemm/lib/python3.10/site-packages/torch/_namedtensor_internals.py +166 -0
.gitattributes CHANGED
@@ -206,3 +206,5 @@ parrot/lib/python3.10/site-packages/numpy/_core/_multiarray_umath.cpython-310-x8
206
  wemm/lib/python3.10/site-packages/sympy/polys/__pycache__/polytools.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
207
  wemm/lib/python3.10/site-packages/sympy/printing/__pycache__/latex.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
208
  wemm/lib/python3.10/site-packages/sympy/printing/pretty/tests/__pycache__/test_pretty.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
206
  wemm/lib/python3.10/site-packages/sympy/polys/__pycache__/polytools.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
207
  wemm/lib/python3.10/site-packages/sympy/printing/__pycache__/latex.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
208
  wemm/lib/python3.10/site-packages/sympy/printing/pretty/tests/__pycache__/test_pretty.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
209
+ wemm/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
210
+ wemm/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
wemm/lib/python3.10/site-packages/lightning_fabric/__pycache__/__about__.cpython-310.pyc ADDED
Binary file (582 Bytes). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/__pycache__/__version__.cpython-310.pyc ADDED
Binary file (458 Bytes). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/__pycache__/cli.cpython-310.pyc ADDED
Binary file (5.71 kB). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/__pycache__/connector.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/__pycache__/fabric.cpython-310.pyc ADDED
Binary file (35.9 kB). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/__pycache__/wrappers.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (807 Bytes). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/__pycache__/cpu.cpython-310.pyc ADDED
Binary file (2.91 kB). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/__pycache__/mps.cpython-310.pyc ADDED
Binary file (3.3 kB). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/__pycache__/registry.cpython-310.pyc ADDED
Binary file (4.45 kB). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/__pycache__/tpu.cpython-310.pyc ADDED
Binary file (6.04 kB). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/accelerator.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from abc import ABC, abstractmethod
15
+ from typing import Any, Dict
16
+
17
+ import torch
18
+
19
+
20
+ class Accelerator(ABC):
21
+ """The Accelerator base class.
22
+
23
+ An Accelerator is meant to deal with one type of hardware.
24
+
25
+ .. warning:: Writing your own accelerator is an :ref:`experimental <versioning:Experimental API>` feature.
26
+ """
27
+
28
+ @abstractmethod
29
+ def setup_device(self, device: torch.device) -> None:
30
+ """Create and prepare the device for the current process."""
31
+
32
+ @abstractmethod
33
+ def teardown(self) -> None:
34
+ """Clean up any state created by the accelerator."""
35
+
36
+ @staticmethod
37
+ @abstractmethod
38
+ def parse_devices(devices: Any) -> Any:
39
+ """Accelerator device parsing logic."""
40
+
41
+ @staticmethod
42
+ @abstractmethod
43
+ def get_parallel_devices(devices: Any) -> Any:
44
+ """Gets parallel devices for the Accelerator."""
45
+
46
+ @staticmethod
47
+ @abstractmethod
48
+ def auto_device_count() -> int:
49
+ """Get the device count when set to auto."""
50
+
51
+ @staticmethod
52
+ @abstractmethod
53
+ def is_available() -> bool:
54
+ """Detect if the hardware is available."""
55
+
56
+ @classmethod
57
+ def register_accelerators(cls, accelerator_registry: Dict) -> None:
58
+ pass
wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/cpu.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Dict, List, Union
15
+
16
+ import torch
17
+
18
+ from lightning_fabric.accelerators.accelerator import Accelerator
19
+
20
+
21
+ class CPUAccelerator(Accelerator):
22
+ """Accelerator for CPU devices."""
23
+
24
+ def setup_device(self, device: torch.device) -> None:
25
+ """
26
+ Raises:
27
+ ValueError:
28
+ If the selected device is not CPU.
29
+ """
30
+ if device.type != "cpu":
31
+ raise ValueError(f"Device should be CPU, got {device} instead.")
32
+
33
+ def teardown(self) -> None:
34
+ pass
35
+
36
+ @staticmethod
37
+ def parse_devices(devices: Union[int, str, List[int]]) -> int:
38
+ """Accelerator device parsing logic."""
39
+ devices = _parse_cpu_cores(devices)
40
+ return devices
41
+
42
+ @staticmethod
43
+ def get_parallel_devices(devices: Union[int, str, List[int]]) -> List[torch.device]:
44
+ """Gets parallel devices for the Accelerator."""
45
+ devices = _parse_cpu_cores(devices)
46
+ return [torch.device("cpu")] * devices
47
+
48
+ @staticmethod
49
+ def auto_device_count() -> int:
50
+ """Get the devices when set to auto."""
51
+ return 1
52
+
53
+ @staticmethod
54
+ def is_available() -> bool:
55
+ """CPU is always available for execution."""
56
+ return True
57
+
58
+ @classmethod
59
+ def register_accelerators(cls, accelerator_registry: Dict) -> None:
60
+ accelerator_registry.register(
61
+ "cpu",
62
+ cls,
63
+ description=cls.__class__.__name__,
64
+ )
65
+
66
+
67
+ def _parse_cpu_cores(cpu_cores: Union[int, str, List[int]]) -> int:
68
+ """Parses the cpu_cores given in the format as accepted by the ``devices`` argument in the
69
+ :class:`~pytorch_lightning.trainer.Trainer`.
70
+
71
+ Args:
72
+ cpu_cores: An int > 0.
73
+
74
+ Returns:
75
+ An int representing the number of processes
76
+
77
+ Raises:
78
+ MisconfigurationException:
79
+ If cpu_cores is not an int > 0
80
+ """
81
+ if isinstance(cpu_cores, str) and cpu_cores.strip().isdigit():
82
+ cpu_cores = int(cpu_cores)
83
+
84
+ if not isinstance(cpu_cores, int) or cpu_cores <= 0:
85
+ raise TypeError("`devices` selected with `CPUAccelerator` should be an int > 0.")
86
+
87
+ return cpu_cores
wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/cuda.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+ import warnings
16
+ from contextlib import contextmanager
17
+ from functools import lru_cache
18
+ from typing import cast, Dict, Generator, List, Optional, Union
19
+
20
+ import torch
21
+ from lightning_utilities.core.rank_zero import rank_zero_info
22
+
23
+ from lightning_fabric.accelerators.accelerator import Accelerator
24
+ from lightning_fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_12, _TORCH_GREATER_EQUAL_2_0
25
+
26
+
27
+ class CUDAAccelerator(Accelerator):
28
+ """Accelerator for NVIDIA CUDA devices."""
29
+
30
+ def setup_device(self, device: torch.device) -> None:
31
+ """
32
+ Raises:
33
+ ValueError:
34
+ If the selected device is not of type CUDA.
35
+ """
36
+ if device.type != "cuda":
37
+ raise ValueError(f"Device should be CUDA, got {device} instead.")
38
+ _check_cuda_matmul_precision(device)
39
+ torch.cuda.set_device(device)
40
+
41
+ def teardown(self) -> None:
42
+ _clear_cuda_memory()
43
+
44
+ @staticmethod
45
+ def parse_devices(devices: Union[int, str, List[int]]) -> Optional[List[int]]:
46
+ """Accelerator device parsing logic."""
47
+ from lightning_fabric.utilities.device_parser import _parse_gpu_ids
48
+
49
+ return _parse_gpu_ids(devices, include_cuda=True)
50
+
51
+ @staticmethod
52
+ def get_parallel_devices(devices: List[int]) -> List[torch.device]:
53
+ """Gets parallel devices for the Accelerator."""
54
+ return [torch.device("cuda", i) for i in devices]
55
+
56
+ @staticmethod
57
+ def auto_device_count() -> int:
58
+ """Get the devices when set to auto."""
59
+ return num_cuda_devices()
60
+
61
+ @staticmethod
62
+ def is_available() -> bool:
63
+ return num_cuda_devices() > 0
64
+
65
+ @classmethod
66
+ def register_accelerators(cls, accelerator_registry: Dict) -> None:
67
+ accelerator_registry.register(
68
+ "cuda",
69
+ cls,
70
+ description=cls.__class__.__name__,
71
+ )
72
+
73
+
74
+ def find_usable_cuda_devices(num_devices: int = -1) -> List[int]:
75
+ """Returns a list of all available and usable CUDA GPU devices.
76
+
77
+ A GPU is considered usable if we can successfully move a tensor to the device, and this is what this function
78
+ tests for each GPU on the system until the target number of usable devices is found.
79
+
80
+ A subset of GPUs on the system might be used by other processes, and if the GPU is configured to operate in
81
+ 'exclusive' mode (configurable by the admin), then only one process is allowed to occupy it.
82
+
83
+ Args:
84
+ num_devices: The number of devices you want to request. By default, this function will return as many as there
85
+ are usable CUDA GPU devices available.
86
+
87
+ Warning:
88
+ If multiple processes call this function at the same time, there can be race conditions in the case where
89
+ both processes determine that the device is unoccupied, leading into one of them crashing later on.
90
+ """
91
+ visible_devices = _get_all_visible_cuda_devices()
92
+ if not visible_devices:
93
+ raise ValueError(
94
+ f"You requested to find {num_devices} devices but there are no visible CUDA devices on this machine."
95
+ )
96
+ if num_devices > len(visible_devices):
97
+ raise ValueError(
98
+ f"You requested to find {num_devices} devices but this machine only has {len(visible_devices)} GPUs."
99
+ )
100
+
101
+ available_devices = []
102
+ unavailable_devices = []
103
+
104
+ for gpu_idx in visible_devices:
105
+ try:
106
+ torch.tensor(0, device=torch.device("cuda", gpu_idx))
107
+ except RuntimeError:
108
+ unavailable_devices.append(gpu_idx)
109
+ continue
110
+
111
+ available_devices.append(gpu_idx)
112
+ if len(available_devices) == num_devices:
113
+ # exit early if we found the right number of GPUs
114
+ break
115
+
116
+ if num_devices != -1 and len(available_devices) != num_devices:
117
+ raise RuntimeError(
118
+ f"You requested to find {num_devices} devices but only {len(available_devices)} are currently available."
119
+ f" The devices {unavailable_devices} are occupied by other processes and can't be used at the moment."
120
+ )
121
+ return available_devices
122
+
123
+
124
+ def _get_all_visible_cuda_devices() -> List[int]:
125
+ """Returns a list of all visible CUDA GPU devices.
126
+
127
+ Devices masked by the environment variabale ``CUDA_VISIBLE_DEVICES`` won't be returned here. For example, assume you
128
+ have 8 physical GPUs. If ``CUDA_VISIBLE_DEVICES="1,3,6"``, then this function will return the list ``[0, 1, 2]``
129
+ because these are the three visible GPUs after applying the mask ``CUDA_VISIBLE_DEVICES``.
130
+ """
131
+ return list(range(num_cuda_devices()))
132
+
133
+
134
+ # TODO: Remove once minimum supported PyTorch version is 2.0
135
+ @contextmanager
136
+ def _patch_cuda_is_available() -> Generator:
137
+ """Context manager that safely patches :func:`torch.cuda.is_available` with its NVML-based version if
138
+ possible."""
139
+ if hasattr(torch._C, "_cuda_getDeviceCount") and _device_count_nvml() >= 0 and not _TORCH_GREATER_EQUAL_2_0:
140
+ # we can safely patch is_available if both torch has CUDA compiled and the NVML count is succeeding
141
+ # otherwise, patching is_available could lead to attribute errors or infinite recursion
142
+ orig_check = torch.cuda.is_available
143
+ torch.cuda.is_available = is_cuda_available
144
+ try:
145
+ yield
146
+ finally:
147
+ torch.cuda.is_available = orig_check
148
+ else:
149
+ yield
150
+
151
+
152
+ @lru_cache(1)
153
+ def num_cuda_devices() -> int:
154
+ """Returns the number of available CUDA devices.
155
+
156
+ Unlike :func:`torch.cuda.device_count`, this function does its best not to create a CUDA context for fork support,
157
+ if the platform allows it.
158
+ """
159
+ if _TORCH_GREATER_EQUAL_2_0:
160
+ return torch.cuda.device_count()
161
+
162
+ # Implementation copied from upstream: https://github.com/pytorch/pytorch/pull/84879
163
+ # TODO: Remove once minimum supported PyTorch version is 2.0
164
+ nvml_count = _device_count_nvml()
165
+ return torch.cuda.device_count() if nvml_count < 0 else nvml_count
166
+
167
+
168
+ def is_cuda_available() -> bool:
169
+ """Returns a bool indicating if CUDA is currently available.
170
+
171
+ Unlike :func:`torch.cuda.is_available`, this function does its best not to create a CUDA context for fork support,
172
+ if the platform allows it.
173
+ """
174
+ # We set `PYTORCH_NVML_BASED_CUDA_CHECK=1` in lightning_fabric.__init__.py
175
+ return torch.cuda.is_available() if _TORCH_GREATER_EQUAL_2_0 else num_cuda_devices() > 0
176
+
177
+
178
+ # TODO: Remove once minimum supported PyTorch version is 2.0
179
+ def _parse_visible_devices() -> Union[List[int], List[str]]:
180
+ """Parse CUDA_VISIBLE_DEVICES environment variable."""
181
+ var = os.getenv("CUDA_VISIBLE_DEVICES")
182
+ if var is None:
183
+ return list(range(64))
184
+
185
+ def _strtoul(s: str) -> int:
186
+ """Return -1 or positive integer sequence string starts with,"""
187
+ if not s:
188
+ return -1
189
+ for idx, c in enumerate(s):
190
+ if not (c.isdigit() or (idx == 0 and c in "+-")):
191
+ break
192
+ if idx + 1 == len(s):
193
+ idx += 1
194
+ return int(s[:idx]) if idx > 0 else -1
195
+
196
+ def parse_list_with_prefix(lst: str, prefix: str) -> List[str]:
197
+ rcs: List[str] = []
198
+ for elem in lst.split(","):
199
+ # Repeated id results in empty set
200
+ if elem in rcs:
201
+ return cast(List[str], [])
202
+ # Anything other but prefix is ignored
203
+ if not elem.startswith(prefix):
204
+ break
205
+ rcs.append(elem)
206
+ return rcs
207
+
208
+ if var.startswith("GPU-"):
209
+ return parse_list_with_prefix(var, "GPU-")
210
+ if var.startswith("MIG-"):
211
+ return parse_list_with_prefix(var, "MIG-")
212
+ # CUDA_VISIBLE_DEVICES uses something like strtoul
213
+ # which makes `1gpu2,2ampere` is equivalent to `1,2`
214
+ rc: List[int] = []
215
+ for elem in var.split(","):
216
+ x = _strtoul(elem.strip())
217
+ # Repeated ordinal results in empty set
218
+ if x in rc:
219
+ return cast(List[int], [])
220
+ # Negative value aborts the sequence
221
+ if x < 0:
222
+ break
223
+ rc.append(x)
224
+ return rc
225
+
226
+
227
+ # TODO: Remove once minimum supported PyTorch version is 2.0
228
+ def _raw_device_count_nvml() -> int:
229
+ """Return number of devices as reported by NVML or negative value if NVML discovery/initialization failed."""
230
+ from ctypes import byref, c_int, CDLL
231
+
232
+ nvml_h = CDLL("libnvidia-ml.so.1")
233
+ rc = nvml_h.nvmlInit()
234
+ if rc != 0:
235
+ warnings.warn("Can't initialize NVML")
236
+ return -1
237
+ dev_count = c_int(-1)
238
+ rc = nvml_h.nvmlDeviceGetCount_v2(byref(dev_count))
239
+ if rc != 0:
240
+ warnings.warn("Can't get nvml device count")
241
+ return -1
242
+ del nvml_h
243
+ return dev_count.value
244
+
245
+
246
+ # TODO: Remove once minimum supported PyTorch version is 2.0
247
+ def _raw_device_uuid_nvml() -> Optional[List[str]]:
248
+ """Return list of device UUID as reported by NVML or None if NVM discovery/initialization failed."""
249
+ from ctypes import byref, c_int, c_void_p, CDLL, create_string_buffer
250
+
251
+ nvml_h = CDLL("libnvidia-ml.so.1")
252
+ rc = nvml_h.nvmlInit()
253
+ if rc != 0:
254
+ warnings.warn("Can't initialize NVML")
255
+ return None
256
+ dev_count = c_int(-1)
257
+ rc = nvml_h.nvmlDeviceGetCount_v2(byref(dev_count))
258
+ if rc != 0:
259
+ warnings.warn("Can't get nvml device count")
260
+ return None
261
+ uuids: List[str] = []
262
+ for idx in range(dev_count.value):
263
+ dev_id = c_void_p()
264
+ rc = nvml_h.nvmlDeviceGetHandleByIndex_v2(idx, byref(dev_id))
265
+ if rc != 0:
266
+ warnings.warn("Can't get device handle")
267
+ return None
268
+ buf_len = 96
269
+ buf = create_string_buffer(buf_len)
270
+ rc = nvml_h.nvmlDeviceGetUUID(dev_id, buf, buf_len)
271
+ if rc != 0:
272
+ warnings.warn("Can't get device UUID")
273
+ return None
274
+ uuids.append(buf.raw.decode("ascii").strip("\0"))
275
+ del nvml_h
276
+ return uuids
277
+
278
+
279
+ # TODO: Remove once minimum supported PyTorch version is 2.0
280
+ def _transform_uuid_to_ordinals(candidates: List[str], uuids: List[str]) -> List[int]:
281
+ """Given the set of partial uuids and list of known uuids builds a set of ordinals excluding ambiguous partials
282
+ IDs."""
283
+
284
+ def uuid_to_orinal(candidate: str, uuids: List[str]) -> int:
285
+ best_match = -1
286
+ for idx, uuid in enumerate(uuids):
287
+ if not uuid.startswith(candidate):
288
+ continue
289
+ # Ambigous candidate
290
+ if best_match != -1:
291
+ return -1
292
+ best_match = idx
293
+ return best_match
294
+
295
+ rc: List[int] = []
296
+ for candidate in candidates:
297
+ idx = uuid_to_orinal(candidate, uuids)
298
+ # First invalid ordinal stops parsing
299
+ if idx < 0:
300
+ break
301
+ # Duplicates result in empty set
302
+ if idx in rc:
303
+ return cast(List[int], [])
304
+ rc.append(idx)
305
+ return rc
306
+
307
+
308
+ # TODO: Remove once minimum supported PyTorch version is 2.0
309
+ def _device_count_nvml() -> int:
310
+ """Return number of devices as reported by NVML taking CUDA_VISIBLE_DEVICES into account.
311
+
312
+ Negative value is returned if NVML discovery or initialization has failed.
313
+ """
314
+ visible_devices = _parse_visible_devices()
315
+ if not visible_devices:
316
+ return 0
317
+ try:
318
+ if type(visible_devices[0]) is str:
319
+ # Skip MIG parsing
320
+ if visible_devices[0].startswith("MIG-"):
321
+ return -1
322
+ uuids = _raw_device_uuid_nvml()
323
+ if uuids is None:
324
+ return -1
325
+ visible_devices = _transform_uuid_to_ordinals(cast(List[str], visible_devices), uuids)
326
+ else:
327
+ raw_cnt = _raw_device_count_nvml()
328
+ if raw_cnt <= 0:
329
+ return raw_cnt
330
+ # Trim the list up to a maximum available device
331
+ for idx, val in enumerate(visible_devices):
332
+ if cast(int, val) >= raw_cnt:
333
+ return idx
334
+ except OSError:
335
+ return -1
336
+ except AttributeError:
337
+ return -1
338
+ return len(visible_devices)
339
+
340
+
341
+ def _check_cuda_matmul_precision(device: torch.device) -> None:
342
+ if not _TORCH_GREATER_EQUAL_1_12:
343
+ # before 1.12, tf32 was used by default
344
+ return
345
+ major, _ = torch.cuda.get_device_capability(device)
346
+ ampere_or_later = major >= 8 # Ampere and later leverage tensor cores, where this setting becomes useful
347
+ if not ampere_or_later:
348
+ return
349
+ # check that the user hasn't changed the precision already, this works for both `allow_tf32 = True` and
350
+ # `set_float32_matmul_precision`
351
+ if torch.get_float32_matmul_precision() == "highest": # default
352
+ rank_zero_info(
353
+ f"You are using a CUDA device ({torch.cuda.get_device_name(device)!r}) that has Tensor Cores. To properly"
354
+ " utilize them, you should set `torch.set_float32_matmul_precision('medium' | 'high')` which will trade-off"
355
+ " precision for performance. For more details, read https://pytorch.org/docs/stable/generated/"
356
+ "torch.set_float32_matmul_precision.html#torch.set_float32_matmul_precision"
357
+ )
358
+ # note: no need change `torch.backends.cudnn.allow_tf32` as it's enabled by default:
359
+ # https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
360
+
361
+
362
+ def _clear_cuda_memory() -> None:
363
+ if _TORCH_GREATER_EQUAL_2_0:
364
+ # https://github.com/pytorch/pytorch/issues/95668
365
+ torch._C._cuda_clearCublasWorkspaces()
366
+ torch.cuda.empty_cache()
wemm/lib/python3.10/site-packages/lightning_fabric/accelerators/tpu.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import functools
15
+ import queue as q
16
+ import traceback
17
+ from multiprocessing import Process, Queue
18
+ from typing import Any, Callable, Dict, List, Optional, Union
19
+
20
+ import torch
21
+ from lightning_utilities.core.imports import ModuleAvailableCache
22
+
23
+ from lightning_fabric.accelerators.accelerator import Accelerator
24
+ from lightning_fabric.utilities.device_parser import _check_data_type
25
+
26
+
27
+ class TPUAccelerator(Accelerator):
28
+ """Accelerator for TPU devices.
29
+
30
+ .. warning:: Use of this accelerator beyond import and instantiation is experimental.
31
+ """
32
+
33
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
34
+ if not _XLA_AVAILABLE:
35
+ raise ModuleNotFoundError(str(_XLA_AVAILABLE))
36
+ super().__init__(*args, **kwargs)
37
+
38
+ def setup_device(self, device: torch.device) -> None:
39
+ pass
40
+
41
+ def teardown(self) -> None:
42
+ pass
43
+
44
+ @staticmethod
45
+ def parse_devices(devices: Union[int, str, List[int]]) -> Optional[Union[int, List[int]]]:
46
+ """Accelerator device parsing logic."""
47
+ return _parse_tpu_devices(devices)
48
+
49
+ @staticmethod
50
+ def get_parallel_devices(devices: Union[int, List[int]]) -> List[int]:
51
+ """Gets parallel devices for the Accelerator."""
52
+ if isinstance(devices, int):
53
+ return list(range(devices))
54
+ return devices
55
+
56
+ @staticmethod
57
+ def auto_device_count() -> int:
58
+ """Get the devices when set to auto."""
59
+ return 8
60
+
61
+ @staticmethod
62
+ @functools.lru_cache(maxsize=1)
63
+ def is_available() -> bool:
64
+ # check `_XLA_AVAILABLE` again to avoid launching processes
65
+ return bool(_XLA_AVAILABLE) and _is_device_tpu()
66
+
67
+ @classmethod
68
+ def register_accelerators(cls, accelerator_registry: Dict) -> None:
69
+ accelerator_registry.register(
70
+ "tpu",
71
+ cls,
72
+ description=cls.__class__.__name__,
73
+ )
74
+
75
+
76
+ # define TPU availability timeout in seconds
77
+ TPU_CHECK_TIMEOUT = 60
78
+
79
+
80
+ def _inner_f(queue: Queue, func: Callable, *args: Any, **kwargs: Any) -> None: # pragma: no cover
81
+ try:
82
+ queue.put(func(*args, **kwargs))
83
+ except Exception:
84
+ traceback.print_exc()
85
+ queue.put(None)
86
+
87
+
88
+ def _multi_process(func: Callable) -> Callable:
89
+ @functools.wraps(func)
90
+ def wrapper(*args: Any, **kwargs: Any) -> Union[bool, Any]:
91
+ queue: Queue = Queue()
92
+ proc = Process(target=_inner_f, args=(queue, func, *args), kwargs=kwargs)
93
+ proc.start()
94
+ proc.join(TPU_CHECK_TIMEOUT)
95
+ try:
96
+ return queue.get_nowait()
97
+ except q.Empty:
98
+ traceback.print_exc()
99
+ return False
100
+
101
+ return wrapper
102
+
103
+
104
+ @_multi_process
105
+ def _is_device_tpu() -> bool:
106
+ """Check if TPU devices are available. Runs XLA device check within a separate process.
107
+
108
+ Return:
109
+ A boolean value indicating if TPU devices are available
110
+ """
111
+ if not _XLA_AVAILABLE:
112
+ return False
113
+ import torch_xla.core.xla_model as xm
114
+
115
+ # For the TPU Pod training process, for example, if we have
116
+ # TPU v3-32 with 4 VMs, the world size would be 4 and as
117
+ # we would have to use `torch_xla.distributed.xla_dist` for
118
+ # multiple VMs and TPU_CONFIG won't be available, running
119
+ # `xm.get_xla_supported_devices("TPU")` won't be possible.
120
+ return (xm.xrt_world_size() > 1) or bool(xm.get_xla_supported_devices("TPU"))
121
+
122
+
123
+ _XLA_AVAILABLE = ModuleAvailableCache("torch_xla")
124
+
125
+
126
+ def _tpu_distributed() -> bool:
127
+ if not TPUAccelerator.is_available():
128
+ return False
129
+ import torch_xla.core.xla_model as xm
130
+
131
+ return xm.xrt_world_size() > 1
132
+
133
+
134
+ def _parse_tpu_devices(devices: Optional[Union[int, str, List[int]]]) -> Optional[Union[int, List[int]]]:
135
+ """
136
+ Parses the TPU devices given in the format as accepted by the
137
+ :class:`~pytorch_lightning.trainer.Trainer` and :class:`~lightning_fabric.Fabric`.
138
+
139
+ Args:
140
+ devices: An int of 1 or string '1' indicates that 1 core with multi-processing should be used
141
+ An int 8 or string '8' indicates that all 8 cores with multi-processing should be used
142
+ A list of ints or a strings containing a list of comma separated integers
143
+ indicates the specific TPU core to use.
144
+
145
+ Returns:
146
+ A list of tpu_cores to be used or ``None`` if no TPU cores were requested
147
+
148
+ Raises:
149
+ TypeError:
150
+ If TPU devices aren't 1, 8 or [<1-8>]
151
+ """
152
+ _check_data_type(devices)
153
+
154
+ if isinstance(devices, str):
155
+ devices = _parse_tpu_devices_str(devices.strip())
156
+
157
+ if not _tpu_devices_valid(devices):
158
+ raise TypeError("`devices` can only be 1, 8 or [<1-8>] for TPUs.")
159
+
160
+ return devices
161
+
162
+
163
+ def _tpu_devices_valid(devices: Any) -> bool:
164
+ # allow 1 or 8 cores
165
+ if devices in (1, 8, None):
166
+ return True
167
+
168
+ # allow picking 1 of 8 indexes
169
+ if isinstance(devices, (list, tuple, set)):
170
+ has_1_tpu_idx = len(devices) == 1
171
+ is_valid_tpu_idx = 1 <= list(devices)[0] <= 8
172
+
173
+ is_valid_tpu_core_choice = has_1_tpu_idx and is_valid_tpu_idx
174
+ return is_valid_tpu_core_choice
175
+
176
+ return False
177
+
178
+
179
+ def _parse_tpu_devices_str(devices: str) -> Union[int, List[int]]:
180
+ if devices in ("1", "8"):
181
+ return int(devices)
182
+ return [int(x.strip()) for x in devices.split(",") if len(x) > 0]
wemm/lib/python3.10/site-packages/lightning_fabric/loggers/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ from lightning_fabric.loggers.csv_logs import CSVLogger # noqa: F401
14
+ from lightning_fabric.loggers.logger import Logger # noqa: F401
15
+ from lightning_fabric.loggers.tensorboard import TensorBoardLogger # noqa: F401
wemm/lib/python3.10/site-packages/lightning_fabric/loggers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (381 Bytes). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/loggers/__pycache__/csv_logs.cpython-310.pyc ADDED
Binary file (7.66 kB). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/loggers/__pycache__/logger.cpython-310.pyc ADDED
Binary file (5.46 kB). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/loggers/__pycache__/tensorboard.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/loggers/csv_logs.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import csv
16
+ import logging
17
+ import os
18
+ from argparse import Namespace
19
+ from typing import Any, Dict, List, Optional, Union
20
+
21
+ from torch import Tensor
22
+
23
+ from lightning_fabric.loggers.logger import Logger, rank_zero_experiment
24
+ from lightning_fabric.utilities.cloud_io import get_filesystem
25
+ from lightning_fabric.utilities.logger import _add_prefix
26
+ from lightning_fabric.utilities.rank_zero import rank_zero_only, rank_zero_warn
27
+ from lightning_fabric.utilities.types import _PATH
28
+
29
+ log = logging.getLogger(__name__)
30
+
31
+
32
+ class CSVLogger(Logger):
33
+ r"""
34
+ Log to the local file system in CSV format.
35
+
36
+ Logs are saved to ``os.path.join(root_dir, name, version)``.
37
+
38
+ Args:
39
+ root_dir: The root directory in which all your experiments with different names and versions will be stored.
40
+ name: Experiment name. Defaults to ``'lightning_logs'``.
41
+ version: Experiment version. If version is not specified the logger inspects the save
42
+ directory for existing versions, then automatically assigns the next available version.
43
+ prefix: A string to put at the beginning of metric keys.
44
+ flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps).
45
+
46
+ Example::
47
+
48
+ from lightning_fabric.loggers import CSVLogger
49
+
50
+ logger = CSVLogger("path/to/logs/root", name="my_model")
51
+ logger.log_metrics({"loss": 0.235, "acc": 0.75})
52
+ logger.finalize("success")
53
+ """
54
+
55
+ LOGGER_JOIN_CHAR = "-"
56
+
57
+ def __init__(
58
+ self,
59
+ root_dir: _PATH,
60
+ name: str = "lightning_logs",
61
+ version: Optional[Union[int, str]] = None,
62
+ prefix: str = "",
63
+ flush_logs_every_n_steps: int = 100,
64
+ ):
65
+ super().__init__()
66
+ root_dir = os.fspath(root_dir)
67
+ self._root_dir = root_dir
68
+ self._name = name or ""
69
+ self._version = version
70
+ self._prefix = prefix
71
+ self._fs = get_filesystem(root_dir)
72
+ self._experiment: Optional[_ExperimentWriter] = None
73
+ self._flush_logs_every_n_steps = flush_logs_every_n_steps
74
+
75
+ @property
76
+ def name(self) -> str:
77
+ """Gets the name of the experiment.
78
+
79
+ Returns:
80
+ The name of the experiment.
81
+ """
82
+ return self._name
83
+
84
+ @property
85
+ def version(self) -> Union[int, str]:
86
+ """Gets the version of the experiment.
87
+
88
+ Returns:
89
+ The version of the experiment if it is specified, else the next version.
90
+ """
91
+ if self._version is None:
92
+ self._version = self._get_next_version()
93
+ return self._version
94
+
95
+ @property
96
+ def root_dir(self) -> str:
97
+ """Gets the save directory where the versioned CSV experiments are saved."""
98
+ return self._root_dir
99
+
100
+ @property
101
+ def log_dir(self) -> str:
102
+ """The log directory for this run.
103
+
104
+ By default, it is named ``'version_${self.version}'`` but it can be overridden by passing a string value for the
105
+ constructor's version parameter instead of ``None`` or an int.
106
+ """
107
+ # create a pseudo standard path
108
+ version = self.version if isinstance(self.version, str) else f"version_{self.version}"
109
+ log_dir = os.path.join(self.root_dir, self.name, version)
110
+ return log_dir
111
+
112
+ @property
113
+ @rank_zero_experiment
114
+ def experiment(self) -> "_ExperimentWriter":
115
+ """Actual ExperimentWriter object. To use ExperimentWriter features anywhere in your code, do the
116
+ following.
117
+
118
+ Example::
119
+
120
+ self.logger.experiment.some_experiment_writer_function()
121
+ """
122
+ if self._experiment is not None:
123
+ return self._experiment
124
+
125
+ os.makedirs(self.root_dir, exist_ok=True)
126
+ self._experiment = _ExperimentWriter(log_dir=self.log_dir)
127
+ return self._experiment
128
+
129
+ @rank_zero_only
130
+ def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
131
+ raise NotImplementedError("The `CSVLogger` does not yet support logging hyperparameters.")
132
+
133
+ @rank_zero_only
134
+ def log_metrics(self, metrics: Dict[str, Union[Tensor, float]], step: Optional[int] = None) -> None:
135
+ metrics = _add_prefix(metrics, self._prefix, self.LOGGER_JOIN_CHAR)
136
+ self.experiment.log_metrics(metrics, step)
137
+ if step is not None and (step + 1) % self._flush_logs_every_n_steps == 0:
138
+ self.save()
139
+
140
+ @rank_zero_only
141
+ def save(self) -> None:
142
+ super().save()
143
+ self.experiment.save()
144
+
145
+ @rank_zero_only
146
+ def finalize(self, status: str) -> None:
147
+ if self._experiment is None:
148
+ # When using multiprocessing, finalize() should be a no-op on the main process, as no experiment has been
149
+ # initialized there
150
+ return
151
+ self.save()
152
+
153
+ def _get_next_version(self) -> int:
154
+ root_dir = self.root_dir
155
+
156
+ if not self._fs.isdir(root_dir):
157
+ log.warning("Missing logger folder: %s", root_dir)
158
+ return 0
159
+
160
+ existing_versions = []
161
+ for d in self._fs.listdir(root_dir, detail=False):
162
+ name = d[len(root_dir) + 1 :] # removes parent directories
163
+ if self._fs.isdir(d) and name.startswith("version_"):
164
+ existing_versions.append(int(name.split("_")[1]))
165
+
166
+ if len(existing_versions) == 0:
167
+ return 0
168
+
169
+ return max(existing_versions) + 1
170
+
171
+
172
+ class _ExperimentWriter:
173
+ r"""
174
+ Experiment writer for CSVLogger.
175
+
176
+ Args:
177
+ log_dir: Directory for the experiment logs
178
+ """
179
+
180
+ NAME_METRICS_FILE = "metrics.csv"
181
+
182
+ def __init__(self, log_dir: str) -> None:
183
+ self.metrics: List[Dict[str, float]] = []
184
+
185
+ self._fs = get_filesystem(log_dir)
186
+ self.log_dir = log_dir
187
+ if self._fs.exists(self.log_dir) and self._fs.listdir(self.log_dir):
188
+ rank_zero_warn(
189
+ f"Experiment logs directory {self.log_dir} exists and is not empty."
190
+ " Previous log files in this directory will be deleted when the new ones are saved!"
191
+ )
192
+ self._fs.makedirs(self.log_dir, exist_ok=True)
193
+
194
+ self.metrics_file_path = os.path.join(self.log_dir, self.NAME_METRICS_FILE)
195
+
196
+ def log_metrics(self, metrics_dict: Dict[str, float], step: Optional[int] = None) -> None:
197
+ """Record metrics."""
198
+
199
+ def _handle_value(value: Union[Tensor, Any]) -> Any:
200
+ if isinstance(value, Tensor):
201
+ return value.item()
202
+ return value
203
+
204
+ if step is None:
205
+ step = len(self.metrics)
206
+
207
+ metrics = {k: _handle_value(v) for k, v in metrics_dict.items()}
208
+ metrics["step"] = step
209
+ self.metrics.append(metrics)
210
+
211
+ def save(self) -> None:
212
+ """Save recorded metrics into files."""
213
+ if not self.metrics:
214
+ return
215
+
216
+ last_m = {}
217
+ for m in self.metrics:
218
+ last_m.update(m)
219
+ metrics_keys = list(last_m.keys())
220
+
221
+ with self._fs.open(self.metrics_file_path, "w", newline="") as f:
222
+ writer = csv.DictWriter(f, fieldnames=metrics_keys)
223
+ writer.writeheader()
224
+ writer.writerows(self.metrics)
wemm/lib/python3.10/site-packages/lightning_fabric/loggers/logger.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Abstract base class used to build new loggers."""
15
+
16
+ from abc import ABC, abstractmethod
17
+ from argparse import Namespace
18
+ from functools import wraps
19
+ from typing import Any, Callable, Dict, Optional, Union
20
+
21
+ from torch import Tensor
22
+ from torch.nn import Module
23
+
24
+ from lightning_fabric.utilities.rank_zero import rank_zero_only
25
+
26
+
27
+ class Logger(ABC):
28
+ """Base class for experiment loggers."""
29
+
30
+ @property
31
+ @abstractmethod
32
+ def name(self) -> Optional[str]:
33
+ """Return the experiment name."""
34
+
35
+ @property
36
+ @abstractmethod
37
+ def version(self) -> Optional[Union[int, str]]:
38
+ """Return the experiment version."""
39
+
40
+ @property
41
+ def root_dir(self) -> Optional[str]:
42
+ """Return the root directory where all versions of an experiment get saved, or `None` if the logger does
43
+ not save data locally."""
44
+ return None
45
+
46
+ @property
47
+ def log_dir(self) -> Optional[str]:
48
+ """Return directory the current version of the experiment gets saved, or `None` if the logger does not save
49
+ data locally."""
50
+ return None
51
+
52
+ @property
53
+ def group_separator(self) -> str:
54
+ """Return the default separator used by the logger to group the data into subfolders."""
55
+ return "/"
56
+
57
+ @abstractmethod
58
+ def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
59
+ """Records metrics. This method logs metrics as soon as it received them.
60
+
61
+ Args:
62
+ metrics: Dictionary with metric names as keys and measured quantities as values
63
+ step: Step number at which the metrics should be recorded
64
+ """
65
+ pass
66
+
67
+ @abstractmethod
68
+ def log_hyperparams(self, params: Union[Dict[str, Any], Namespace], *args: Any, **kwargs: Any) -> None:
69
+ """Record hyperparameters.
70
+
71
+ Args:
72
+ params: :class:`~argparse.Namespace` or `Dict` containing the hyperparameters
73
+ args: Optional positional arguments, depends on the specific logger being used
74
+ kwargs: Optional keyword arguments, depends on the specific logger being used
75
+ """
76
+
77
+ def log_graph(self, model: Module, input_array: Optional[Tensor] = None) -> None:
78
+ """Record model graph.
79
+
80
+ Args:
81
+ model: the model with an implementation of ``forward``.
82
+ input_array: input passes to `model.forward`
83
+ """
84
+ pass
85
+
86
+ def save(self) -> None:
87
+ """Save log data."""
88
+
89
+ def finalize(self, status: str) -> None:
90
+ """Do any processing that is necessary to finalize an experiment.
91
+
92
+ Args:
93
+ status: Status that the experiment finished with (e.g. success, failed, aborted)
94
+ """
95
+ self.save()
96
+
97
+
98
+ def rank_zero_experiment(fn: Callable) -> Callable:
99
+ """Returns the real experiment on rank 0 and otherwise the _DummyExperiment."""
100
+
101
+ @wraps(fn)
102
+ def experiment(self: Logger) -> Union[Any, _DummyExperiment]:
103
+ """
104
+ Note:
105
+ ``self`` is a custom logger instance. The loggers typically wrap an ``experiment`` method
106
+ with a ``@rank_zero_experiment`` decorator.
107
+
108
+ ``Union[Any, _DummyExperiment]`` is used because the wrapped hooks have several return
109
+ types that are specific to the custom logger. The return type here can be considered as
110
+ ``Union[return type of logger.experiment, _DummyExperiment]``.
111
+ """
112
+ if rank_zero_only.rank > 0:
113
+ return _DummyExperiment()
114
+ return fn(self)
115
+
116
+ return experiment
117
+
118
+
119
+ class _DummyExperiment:
120
+ """Dummy experiment."""
121
+
122
+ def nop(self, *args: Any, **kw: Any) -> None:
123
+ pass
124
+
125
+ def __getattr__(self, _: Any) -> Callable:
126
+ return self.nop
127
+
128
+ def __getitem__(self, idx: int) -> "_DummyExperiment":
129
+ # enables self.logger.experiment[0].add_image(...)
130
+ return self
131
+
132
+ def __setitem__(self, *args: Any, **kwargs: Any) -> None:
133
+ pass
wemm/lib/python3.10/site-packages/lightning_fabric/loggers/tensorboard.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ import os
17
+ from argparse import Namespace
18
+ from typing import Any, Dict, Mapping, Optional, TYPE_CHECKING, Union
19
+
20
+ import numpy as np
21
+ from lightning_utilities.core.imports import RequirementCache
22
+ from torch import Tensor
23
+ from torch.nn import Module
24
+
25
+ from lightning_fabric.loggers.logger import Logger, rank_zero_experiment
26
+ from lightning_fabric.utilities.cloud_io import get_filesystem
27
+ from lightning_fabric.utilities.logger import _add_prefix, _convert_params, _flatten_dict
28
+ from lightning_fabric.utilities.logger import _sanitize_params as _utils_sanitize_params
29
+ from lightning_fabric.utilities.rank_zero import rank_zero_only, rank_zero_warn
30
+ from lightning_fabric.utilities.types import _PATH
31
+
32
+ log = logging.getLogger(__name__)
33
+
34
+ _TENSORBOARD_AVAILABLE = RequirementCache("tensorboard")
35
+ _TENSORBOARDX_AVAILABLE = RequirementCache("tensorboardX")
36
+ if TYPE_CHECKING:
37
+ # assumes at least one will be installed when type checking
38
+ if _TENSORBOARD_AVAILABLE:
39
+ from torch.utils.tensorboard import SummaryWriter
40
+ else:
41
+ from tensorboardX import SummaryWriter # type: ignore[no-redef]
42
+
43
+
44
+ class TensorBoardLogger(Logger):
45
+ r"""
46
+ Log to local file system in `TensorBoard <https://www.tensorflow.org/tensorboard>`_ format.
47
+
48
+ Implemented using :class:`~tensorboardX.SummaryWriter`. Logs are saved to
49
+ ``os.path.join(root_dir, name, version)``. This is the recommended logger in Lightning Fabric.
50
+
51
+ Args:
52
+ root_dir: The root directory in which all your experiments with different names and versions will be stored.
53
+ name: Experiment name. Defaults to ``'lightning_logs'``. If it is the empty string then no per-experiment
54
+ subdirectory is used.
55
+ version: Experiment version. If version is not specified the logger inspects the save
56
+ directory for existing versions, then automatically assigns the next available version.
57
+ If it is a string then it is used as the run-specific subdirectory name,
58
+ otherwise ``'version_${version}'`` is used.
59
+ default_hp_metric: Enables a placeholder metric with key `hp_metric` when `log_hyperparams` is
60
+ called without a metric (otherwise calls to ``log_hyperparams`` without a metric are ignored).
61
+ prefix: A string to put at the beginning of all metric keys.
62
+ sub_dir: Sub-directory to group TensorBoard logs. If a ``sub_dir`` argument is passed
63
+ then logs are saved in ``/root_dir/name/version/sub_dir/``. Defaults to ``None`` in which case
64
+ logs are saved in ``/root_dir/name/version/``.
65
+ \**kwargs: Additional arguments used by :class:`tensorboardX.SummaryWriter` can be passed as keyword
66
+ arguments in this logger. To automatically flush to disk, `max_queue` sets the size
67
+ of the queue for pending logs before flushing. `flush_secs` determines how many seconds
68
+ elapses before flushing.
69
+
70
+
71
+ Example::
72
+
73
+ from lightning_fabric.loggers import TensorBoardLogger
74
+
75
+ logger = TensorBoardLogger("path/to/logs/root", name="my_model")
76
+ logger.log_hyperparams({"epochs": 5, "optimizer": "Adam"})
77
+ logger.log_metrics({"acc": 0.75})
78
+ logger.finalize("success")
79
+ """
80
+ LOGGER_JOIN_CHAR = "-"
81
+
82
+ def __init__(
83
+ self,
84
+ root_dir: _PATH,
85
+ name: Optional[str] = "lightning_logs",
86
+ version: Optional[Union[int, str]] = None,
87
+ default_hp_metric: bool = True,
88
+ prefix: str = "",
89
+ sub_dir: Optional[_PATH] = None,
90
+ **kwargs: Any,
91
+ ):
92
+ if not _TENSORBOARD_AVAILABLE and not _TENSORBOARDX_AVAILABLE:
93
+ raise ModuleNotFoundError(
94
+ "Neither `tensorboard` nor `tensorboardX` is available. Try `pip install`ing either.\n"
95
+ f"{str(_TENSORBOARDX_AVAILABLE)}\n{str(_TENSORBOARD_AVAILABLE)}"
96
+ )
97
+ super().__init__()
98
+ root_dir = os.fspath(root_dir)
99
+ self._root_dir = root_dir
100
+ self._name = name or ""
101
+ self._version = version
102
+ self._sub_dir = None if sub_dir is None else os.fspath(sub_dir)
103
+
104
+ self._default_hp_metric = default_hp_metric
105
+ self._prefix = prefix
106
+ self._fs = get_filesystem(root_dir)
107
+
108
+ self._experiment: Optional["SummaryWriter"] = None
109
+ self._kwargs = kwargs
110
+
111
+ @property
112
+ def name(self) -> str:
113
+ """Get the name of the experiment.
114
+
115
+ Returns:
116
+ The name of the experiment.
117
+ """
118
+ return self._name
119
+
120
+ @property
121
+ def version(self) -> Union[int, str]:
122
+ """Get the experiment version.
123
+
124
+ Returns:
125
+ The experiment version if specified else the next version.
126
+ """
127
+ if self._version is None:
128
+ self._version = self._get_next_version()
129
+ return self._version
130
+
131
+ @property
132
+ def root_dir(self) -> str:
133
+ """Gets the save directory where the TensorBoard experiments are saved.
134
+
135
+ Returns:
136
+ The local path to the save directory where the TensorBoard experiments are saved.
137
+ """
138
+ return self._root_dir
139
+
140
+ @property
141
+ def log_dir(self) -> str:
142
+ """The directory for this run's tensorboard checkpoint.
143
+
144
+ By default, it is named ``'version_${self.version}'`` but it can be overridden by passing a string value for the
145
+ constructor's version parameter instead of ``None`` or an int.
146
+ """
147
+ version = self.version if isinstance(self.version, str) else f"version_{self.version}"
148
+ log_dir = os.path.join(self.root_dir, self.name, version)
149
+ if isinstance(self.sub_dir, str):
150
+ log_dir = os.path.join(log_dir, self.sub_dir)
151
+ log_dir = os.path.expandvars(log_dir)
152
+ log_dir = os.path.expanduser(log_dir)
153
+ return log_dir
154
+
155
+ @property
156
+ def sub_dir(self) -> Optional[str]:
157
+ """Gets the sub directory where the TensorBoard experiments are saved.
158
+
159
+ Returns:
160
+ The local path to the sub directory where the TensorBoard experiments are saved.
161
+ """
162
+ return self._sub_dir
163
+
164
+ @property
165
+ @rank_zero_experiment
166
+ def experiment(self) -> "SummaryWriter":
167
+ """Actual tensorboard object. To use TensorBoard features anywhere in your code, do the following.
168
+
169
+ Example::
170
+
171
+ logger.experiment.some_tensorboard_function()
172
+ """
173
+ if self._experiment is not None:
174
+ return self._experiment
175
+
176
+ assert rank_zero_only.rank == 0, "tried to init log dirs in non global_rank=0"
177
+ if self.root_dir:
178
+ self._fs.makedirs(self.root_dir, exist_ok=True)
179
+
180
+ if _TENSORBOARD_AVAILABLE:
181
+ from torch.utils.tensorboard import SummaryWriter
182
+ else:
183
+ from tensorboardX import SummaryWriter # type: ignore[no-redef]
184
+
185
+ self._experiment = SummaryWriter(log_dir=self.log_dir, **self._kwargs)
186
+ return self._experiment
187
+
188
+ @rank_zero_only
189
+ def log_metrics(self, metrics: Mapping[str, float], step: Optional[int] = None) -> None:
190
+ assert rank_zero_only.rank == 0, "experiment tried to log from global_rank != 0"
191
+
192
+ metrics = _add_prefix(metrics, self._prefix, self.LOGGER_JOIN_CHAR)
193
+
194
+ for k, v in metrics.items():
195
+ if isinstance(v, Tensor):
196
+ v = v.item()
197
+
198
+ if isinstance(v, dict):
199
+ self.experiment.add_scalars(k, v, step)
200
+ else:
201
+ try:
202
+ self.experiment.add_scalar(k, v, step)
203
+ # TODO(fabric): specify the possible exception
204
+ except Exception as ex:
205
+ m = f"\n you tried to log {v} which is currently not supported. Try a dict or a scalar/tensor."
206
+ raise ValueError(m) from ex
207
+
208
+ @rank_zero_only
209
+ def log_hyperparams(
210
+ self, params: Union[Dict[str, Any], Namespace], metrics: Optional[Dict[str, Any]] = None
211
+ ) -> None:
212
+ """Record hyperparameters. TensorBoard logs with and without saved hyperparameters are incompatible, the
213
+ hyperparameters are then not displayed in the TensorBoard. Please delete or move the previously saved logs
214
+ to display the new ones with hyperparameters.
215
+
216
+ Args:
217
+ params: a dictionary-like container with the hyperparameters
218
+ metrics: Dictionary with metric names as keys and measured quantities as values
219
+ """
220
+ params = _convert_params(params)
221
+
222
+ # format params into the suitable for tensorboard
223
+ params = _flatten_dict(params)
224
+ params = self._sanitize_params(params)
225
+
226
+ if metrics is None:
227
+ if self._default_hp_metric:
228
+ metrics = {"hp_metric": -1}
229
+ elif not isinstance(metrics, dict):
230
+ metrics = {"hp_metric": metrics}
231
+
232
+ if metrics:
233
+ self.log_metrics(metrics, 0)
234
+
235
+ if _TENSORBOARD_AVAILABLE:
236
+ from torch.utils.tensorboard.summary import hparams
237
+ else:
238
+ from tensorboardX.summary import hparams # type: ignore[no-redef]
239
+
240
+ exp, ssi, sei = hparams(params, metrics)
241
+ writer = self.experiment._get_file_writer()
242
+ writer.add_summary(exp)
243
+ writer.add_summary(ssi)
244
+ writer.add_summary(sei)
245
+
246
+ @rank_zero_only
247
+ def log_graph(self, model: Module, input_array: Optional[Tensor] = None) -> None:
248
+ model_example_input = getattr(model, "example_input_array", None)
249
+ input_array = model_example_input if input_array is None else input_array
250
+
251
+ if input_array is None:
252
+ rank_zero_warn(
253
+ "Could not log computational graph to TensorBoard: The `model.example_input_array` attribute"
254
+ " is not set or `input_array` was not given."
255
+ )
256
+ elif not isinstance(input_array, (Tensor, tuple)):
257
+ rank_zero_warn(
258
+ "Could not log computational graph to TensorBoard: The `input_array` or `model.example_input_array`"
259
+ f" has type {type(input_array)} which can't be traced by TensorBoard. Make the input array a tuple"
260
+ f" representing the positional arguments to the model's `forward()` implementation."
261
+ )
262
+ elif callable(getattr(model, "_on_before_batch_transfer", None)) and callable(
263
+ getattr(model, "_apply_batch_transfer_handler", None)
264
+ ):
265
+ # this is probably is a LightningModule
266
+ input_array = model._on_before_batch_transfer(input_array) # type: ignore[operator]
267
+ input_array = model._apply_batch_transfer_handler(input_array) # type: ignore[operator]
268
+ self.experiment.add_graph(model, input_array)
269
+
270
+ @rank_zero_only
271
+ def save(self) -> None:
272
+ self.experiment.flush()
273
+
274
+ @rank_zero_only
275
+ def finalize(self, status: str) -> None:
276
+ if self._experiment is not None:
277
+ self.experiment.flush()
278
+ self.experiment.close()
279
+
280
+ def _get_next_version(self) -> int:
281
+ save_dir = os.path.join(self.root_dir, self.name)
282
+
283
+ try:
284
+ listdir_info = self._fs.listdir(save_dir)
285
+ except OSError:
286
+ # TODO(fabric): This message can be confusing (did user do something wrong?). Improve it or remove it.
287
+ log.warning("Missing logger folder: %s", save_dir)
288
+ return 0
289
+
290
+ existing_versions = []
291
+ for listing in listdir_info:
292
+ d = listing["name"]
293
+ bn = os.path.basename(d)
294
+ if self._fs.isdir(d) and bn.startswith("version_"):
295
+ dir_ver = bn.split("_")[1].replace("/", "")
296
+ existing_versions.append(int(dir_ver))
297
+ if len(existing_versions) == 0:
298
+ return 0
299
+
300
+ return max(existing_versions) + 1
301
+
302
+ @staticmethod
303
+ def _sanitize_params(params: Dict[str, Any]) -> Dict[str, Any]:
304
+ params = _utils_sanitize_params(params)
305
+ # logging of arrays with dimension > 1 is not supported, sanitize as string
306
+ return {k: str(v) if isinstance(v, (Tensor, np.ndarray)) and v.ndim > 1 else v for k, v in params.items()}
307
+
308
+ def __getstate__(self) -> Dict[str, Any]:
309
+ state = self.__dict__.copy()
310
+ state["_experiment"] = None
311
+ return state
wemm/lib/python3.10/site-packages/lightning_fabric/strategies/launchers/__pycache__/launcher.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/utilities/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """General utilities."""
15
+
16
+ from lightning_fabric.utilities.apply_func import move_data_to_device # noqa: F401
17
+ from lightning_fabric.utilities.enums import LightningEnum # noqa: F401
18
+ from lightning_fabric.utilities.rank_zero import ( # noqa: F401
19
+ rank_zero_deprecation,
20
+ rank_zero_info,
21
+ rank_zero_only,
22
+ rank_zero_warn,
23
+ )
wemm/lib/python3.10/site-packages/lightning_fabric/utilities/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (519 Bytes). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/utilities/__pycache__/rank_zero.cpython-310.pyc ADDED
Binary file (1.41 kB). View file
 
wemm/lib/python3.10/site-packages/lightning_fabric/utilities/apply_func.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Utilities used for collections."""
15
+ from abc import ABC
16
+ from functools import partial
17
+ from typing import Any, Callable, List, Tuple, Union
18
+
19
+ import numpy as np
20
+ import torch
21
+ from lightning_utilities.core.apply_func import apply_to_collection
22
+ from torch import Tensor
23
+
24
+ from lightning_fabric.utilities.types import _DEVICE
25
+
26
+ _BLOCKING_DEVICE_TYPES = ("cpu", "mps")
27
+
28
+
29
+ def _from_numpy(value: np.ndarray, device: _DEVICE) -> Tensor:
30
+ return torch.from_numpy(value).to(device) # type: ignore[arg-type]
31
+
32
+
33
+ CONVERSION_DTYPES: List[Tuple[Any, Callable[[Any, Any], Tensor]]] = [
34
+ # bool -> uint8 as bool -> torch.bool triggers RuntimeError: Unsupported data type for NCCL process group
35
+ (bool, partial(torch.tensor, dtype=torch.uint8)),
36
+ (int, partial(torch.tensor, dtype=torch.int)),
37
+ (float, partial(torch.tensor, dtype=torch.float)),
38
+ (np.ndarray, _from_numpy),
39
+ ]
40
+
41
+
42
+ class _TransferableDataType(ABC):
43
+ """A custom type for data that can be moved to a torch device via ``.to(...)``.
44
+
45
+ Example:
46
+
47
+ >>> isinstance(dict, _TransferableDataType)
48
+ False
49
+ >>> isinstance(torch.rand(2, 3), _TransferableDataType)
50
+ True
51
+ >>> class CustomObject:
52
+ ... def __init__(self):
53
+ ... self.x = torch.rand(2, 2)
54
+ ... def to(self, device):
55
+ ... self.x = self.x.to(device)
56
+ ... return self
57
+ >>> isinstance(CustomObject(), _TransferableDataType)
58
+ True
59
+ """
60
+
61
+ @classmethod
62
+ def __subclasshook__(cls, subclass: Any) -> Union[bool, Any]:
63
+ if cls is _TransferableDataType:
64
+ to = getattr(subclass, "to", None)
65
+ return callable(to)
66
+ return NotImplemented
67
+
68
+
69
+ def move_data_to_device(batch: Any, device: _DEVICE) -> Any:
70
+ """Transfers a collection of data to the given device. Any object that defines a method ``to(device)`` will be
71
+ moved and all other objects in the collection will be left untouched.
72
+
73
+ Args:
74
+ batch: A tensor or collection of tensors or anything that has a method ``.to(...)``.
75
+ See :func:`apply_to_collection` for a list of supported collection types.
76
+ device: The device to which the data should be moved
77
+
78
+ Return:
79
+ the same collection but with all contained tensors residing on the new device.
80
+
81
+ See Also:
82
+ - :meth:`torch.Tensor.to`
83
+ - :class:`torch.device`
84
+ """
85
+
86
+ if isinstance(device, str):
87
+ device = torch.device(device)
88
+
89
+ def batch_to(data: Any) -> Any:
90
+ kwargs = {}
91
+ # Don't issue non-blocking transfers to CPU
92
+ # Same with MPS due to a race condition bug: https://github.com/pytorch/pytorch/issues/83015
93
+ if isinstance(data, Tensor) and isinstance(device, torch.device) and device.type not in _BLOCKING_DEVICE_TYPES:
94
+ kwargs["non_blocking"] = True
95
+ data_output = data.to(device, **kwargs)
96
+ if data_output is not None:
97
+ return data_output
98
+ # user wrongly implemented the `_TransferableDataType` and forgot to return `self`.
99
+ return data
100
+
101
+ return apply_to_collection(batch, dtype=_TransferableDataType, function=batch_to)
102
+
103
+
104
+ def convert_to_tensors(data: Any, device: _DEVICE) -> Any:
105
+ # convert non-tensors
106
+ for src_dtype, conversion_func in CONVERSION_DTYPES:
107
+ data = apply_to_collection(data, src_dtype, conversion_func, device=device)
108
+ return move_data_to_device(data, device)
109
+
110
+
111
+ def convert_tensors_to_scalars(data: Any) -> Any:
112
+ """Recursively walk through a collection and convert single-item tensors to scalar values.
113
+
114
+ Raises:
115
+ ValueError:
116
+ If tensors inside ``metrics`` contains multiple elements, hence preventing conversion to a scalar.
117
+ """
118
+
119
+ def to_item(value: Tensor) -> Union[int, float, bool]:
120
+ if value.numel() != 1:
121
+ raise ValueError(
122
+ f"The metric `{value}` does not contain a single element, thus it cannot be converted to a scalar."
123
+ )
124
+ return value.item()
125
+
126
+ return apply_to_collection(data, Tensor, to_item)
wemm/lib/python3.10/site-packages/lightning_fabric/utilities/device_dtype_mixin.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any, List, Optional, Union
16
+
17
+ import torch
18
+ from torch.nn import Module
19
+ from typing_extensions import Self
20
+
21
+
22
+ class _DeviceDtypeModuleMixin(Module):
23
+ __jit_unused_properties__: List[str] = ["device", "dtype"]
24
+
25
+ def __init__(self) -> None:
26
+ super().__init__()
27
+ self._dtype: Union[str, torch.dtype] = torch.get_default_dtype()
28
+ self._device = torch.device("cpu")
29
+
30
+ @property
31
+ def dtype(self) -> Union[str, torch.dtype]:
32
+ return self._dtype
33
+
34
+ @dtype.setter
35
+ def dtype(self, new_dtype: Union[str, torch.dtype]) -> None:
36
+ # necessary to avoid infinite recursion
37
+ raise RuntimeError("Cannot set the dtype explicitly. Please use module.to(new_dtype).")
38
+
39
+ @property
40
+ def device(self) -> torch.device:
41
+ device = self._device
42
+
43
+ # make this more explicit to always include the index
44
+ if device.type == "cuda" and device.index is None:
45
+ return torch.device(f"cuda:{torch.cuda.current_device()}")
46
+
47
+ return device
48
+
49
+ def to(self, *args: Any, **kwargs: Any) -> Self:
50
+ """See :meth:`torch.nn.Module.to`."""
51
+ # this converts `str` device to `torch.device`
52
+ device, dtype = torch._C._nn._parse_to(*args, **kwargs)[:2]
53
+ self.__update_properties(device=device, dtype=dtype)
54
+ return super().to(*args, **kwargs)
55
+
56
+ def cuda(self, device: Optional[Union[torch.device, int]] = None) -> Self:
57
+ """Moves all model parameters and buffers to the GPU. This also makes associated parameters and buffers
58
+ different objects. So it should be called before constructing optimizer if the module will live on GPU
59
+ while being optimized.
60
+
61
+ Arguments:
62
+ device: If specified, all parameters will be copied to that device. If `None`, the current CUDA device
63
+ index will be used.
64
+
65
+ Returns:
66
+ Module: self
67
+ """
68
+ if device is None:
69
+ device = torch.device("cuda", torch.cuda.current_device())
70
+ elif isinstance(device, int):
71
+ device = torch.device("cuda", index=device)
72
+ self.__update_properties(device=device)
73
+ return super().cuda(device=device)
74
+
75
+ def cpu(self) -> Self:
76
+ """See :meth:`torch.nn.Module.cpu`."""
77
+ self.__update_properties(device=torch.device("cpu"))
78
+ return super().cpu()
79
+
80
+ def type(self, dst_type: Union[str, torch.dtype]) -> Self:
81
+ """See :meth:`torch.nn.Module.type`."""
82
+ self.__update_properties(dtype=dst_type)
83
+ return super().type(dst_type=dst_type)
84
+
85
+ def float(self) -> Self:
86
+ """See :meth:`torch.nn.Module.float`."""
87
+ self.__update_properties(dtype=torch.float)
88
+ return super().float()
89
+
90
+ def double(self) -> Self:
91
+ """See :meth:`torch.nn.Module.double`."""
92
+ self.__update_properties(dtype=torch.double)
93
+ return super().double()
94
+
95
+ def half(self) -> Self:
96
+ """See :meth:`torch.nn.Module.half`."""
97
+ self.__update_properties(dtype=torch.half)
98
+ return super().half()
99
+
100
+ def __update_properties(
101
+ self, device: Optional[torch.device] = None, dtype: Optional[Union[str, torch.dtype]] = None
102
+ ) -> None:
103
+ def apply_fn(module: Union[_DeviceDtypeModuleMixin, Module]) -> None:
104
+ if not isinstance(module, _DeviceDtypeModuleMixin):
105
+ return
106
+ if device is not None:
107
+ module._device = device
108
+ if dtype is not None:
109
+ module._dtype = dtype
110
+
111
+ self.apply(apply_fn)
wemm/lib/python3.10/site-packages/lightning_fabric/utilities/device_parser.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, List, MutableSequence, Optional, Tuple, Union
15
+
16
+ import lightning_fabric.accelerators as accelerators # avoid circular dependency
17
+ from lightning_fabric.plugins.environments.torchelastic import TorchElasticEnvironment
18
+ from lightning_fabric.utilities.exceptions import MisconfigurationException
19
+ from lightning_fabric.utilities.types import _DEVICE
20
+
21
+
22
+ def _determine_root_gpu_device(gpus: List[_DEVICE]) -> Optional[_DEVICE]:
23
+ """
24
+ Args:
25
+ gpus: Non-empty list of ints representing which GPUs to use
26
+
27
+ Returns:
28
+ Designated root GPU device id
29
+
30
+ Raises:
31
+ TypeError:
32
+ If ``gpus`` is not a list
33
+ AssertionError:
34
+ If GPU list is empty
35
+ """
36
+ if gpus is None:
37
+ return None
38
+
39
+ if not isinstance(gpus, list):
40
+ raise TypeError("GPUs should be a list")
41
+
42
+ assert len(gpus) > 0, "GPUs should be a non-empty list"
43
+
44
+ # set root gpu
45
+ root_gpu = gpus[0]
46
+
47
+ return root_gpu
48
+
49
+
50
+ def _parse_gpu_ids(
51
+ gpus: Optional[Union[int, str, List[int]]],
52
+ include_cuda: bool = False,
53
+ include_mps: bool = False,
54
+ ) -> Optional[List[int]]:
55
+ """
56
+ Parses the GPU IDs given in the format as accepted by the
57
+ :class:`~pytorch_lightning.trainer.Trainer`.
58
+
59
+ Args:
60
+ gpus: An int -1 or string '-1' indicate that all available GPUs should be used.
61
+ A list of unique ints or a string containing a list of comma separated unique integers
62
+ indicates specific GPUs to use.
63
+ An int of 0 means that no GPUs should be used.
64
+ Any int N > 0 indicates that GPUs [0..N) should be used.
65
+ include_cuda: A boolean value indicating whether to include CUDA devices for GPU parsing.
66
+ include_mps: A boolean value indicating whether to include MPS devices for GPU parsing.
67
+
68
+ Returns:
69
+ A list of GPUs to be used or ``None`` if no GPUs were requested
70
+
71
+ Raises:
72
+ MisconfigurationException:
73
+ If no GPUs are available but the value of gpus variable indicates request for GPUs
74
+
75
+ .. note::
76
+ ``include_cuda`` and ``include_mps`` default to ``False`` so that you only
77
+ have to specify which device type to use and all other devices are not disabled.
78
+ """
79
+ # Check that gpus param is None, Int, String or Sequence of Ints
80
+ _check_data_type(gpus)
81
+
82
+ # Handle the case when no GPUs are requested
83
+ if gpus is None or (isinstance(gpus, int) and gpus == 0) or str(gpus).strip() in ("0", "[]"):
84
+ return None
85
+
86
+ # We know the user requested GPUs therefore if some of the
87
+ # requested GPUs are not available an exception is thrown.
88
+ gpus = _normalize_parse_gpu_string_input(gpus)
89
+ gpus = _normalize_parse_gpu_input_to_list(gpus, include_cuda=include_cuda, include_mps=include_mps)
90
+ if not gpus:
91
+ raise MisconfigurationException("GPUs requested but none are available.")
92
+
93
+ if (
94
+ TorchElasticEnvironment.detect()
95
+ and len(gpus) != 1
96
+ and len(_get_all_available_gpus(include_cuda=include_cuda, include_mps=include_mps)) == 1
97
+ ):
98
+ # Omit sanity check on torchelastic because by default it shows one visible GPU per process
99
+ return gpus
100
+
101
+ # Check that GPUs are unique. Duplicate GPUs are not supported by the backend.
102
+ _check_unique(gpus)
103
+
104
+ return _sanitize_gpu_ids(gpus, include_cuda=include_cuda, include_mps=include_mps)
105
+
106
+
107
+ def _normalize_parse_gpu_string_input(s: Union[int, str, List[int]]) -> Union[int, List[int]]:
108
+ if not isinstance(s, str):
109
+ return s
110
+ if s == "-1":
111
+ return -1
112
+ if "," in s:
113
+ return [int(x.strip()) for x in s.split(",") if len(x) > 0]
114
+ return int(s.strip())
115
+
116
+
117
+ def _sanitize_gpu_ids(gpus: List[int], include_cuda: bool = False, include_mps: bool = False) -> List[int]:
118
+ """Checks that each of the GPUs in the list is actually available. Raises a MisconfigurationException if any of
119
+ the GPUs is not available.
120
+
121
+ Args:
122
+ gpus: List of ints corresponding to GPU indices
123
+
124
+ Returns:
125
+ Unmodified gpus variable
126
+
127
+ Raises:
128
+ MisconfigurationException:
129
+ If machine has fewer available GPUs than requested.
130
+ """
131
+ if sum((include_cuda, include_mps)) == 0:
132
+ raise ValueError("At least one gpu type should be specified!")
133
+ all_available_gpus = _get_all_available_gpus(include_cuda=include_cuda, include_mps=include_mps)
134
+ for gpu in gpus:
135
+ if gpu not in all_available_gpus:
136
+ raise MisconfigurationException(
137
+ f"You requested gpu: {gpus}\n But your machine only has: {all_available_gpus}"
138
+ )
139
+ return gpus
140
+
141
+
142
+ def _normalize_parse_gpu_input_to_list(
143
+ gpus: Union[int, List[int], Tuple[int, ...]], include_cuda: bool, include_mps: bool
144
+ ) -> Optional[List[int]]:
145
+ assert gpus is not None
146
+ if isinstance(gpus, (MutableSequence, tuple)):
147
+ return list(gpus)
148
+
149
+ # must be an int
150
+ if not gpus: # gpus==0
151
+ return None
152
+ if gpus == -1:
153
+ return _get_all_available_gpus(include_cuda=include_cuda, include_mps=include_mps)
154
+
155
+ return list(range(gpus))
156
+
157
+
158
+ def _get_all_available_gpus(include_cuda: bool = False, include_mps: bool = False) -> List[int]:
159
+ """
160
+ Returns:
161
+ A list of all available GPUs
162
+ """
163
+ cuda_gpus = accelerators.cuda._get_all_visible_cuda_devices() if include_cuda else []
164
+ mps_gpus = accelerators.mps._get_all_available_mps_gpus() if include_mps else []
165
+ return cuda_gpus + mps_gpus
166
+
167
+
168
+ def _check_unique(device_ids: List[int]) -> None:
169
+ """Checks that the device_ids are unique.
170
+
171
+ Args:
172
+ device_ids: List of ints corresponding to GPUs indices
173
+
174
+ Raises:
175
+ MisconfigurationException:
176
+ If ``device_ids`` of GPUs aren't unique
177
+ """
178
+ if len(device_ids) != len(set(device_ids)):
179
+ raise MisconfigurationException("Device ID's (GPU) must be unique.")
180
+
181
+
182
+ def _check_data_type(device_ids: Any) -> None:
183
+ """Checks that the device_ids argument is one of the following: None, int, string, or sequence of integers.
184
+
185
+ Args:
186
+ device_ids: gpus/tpu_cores parameter as passed to the Trainer
187
+
188
+ Raises:
189
+ MisconfigurationException:
190
+ If ``device_ids`` of GPU/TPUs aren't ``int``, ``str``, sequence of ``int`` or ``None``
191
+ """
192
+ msg = "Device IDs (GPU/TPU) must be an int, a string, a sequence of ints or None, but you passed"
193
+
194
+ if device_ids is None:
195
+ return
196
+ elif isinstance(device_ids, (MutableSequence, tuple)):
197
+ for id_ in device_ids:
198
+ if type(id_) is not int:
199
+ raise MisconfigurationException(f"{msg} a sequence of {type(id_).__name__}.")
200
+ elif type(device_ids) not in (int, str):
201
+ raise MisconfigurationException(f"{msg} {type(device_ids).__name__}.")
wemm/lib/python3.10/site-packages/lightning_fabric/utilities/distributed.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ import sys
4
+ from contextlib import nullcontext
5
+ from typing import Any, Iterable, Iterator, List, Optional, Sized, Tuple, Union
6
+
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from lightning_utilities.core.imports import module_available
10
+ from torch import Tensor
11
+ from torch.utils.data import Dataset, DistributedSampler, Sampler
12
+
13
+ from lightning_fabric.plugins.environments.cluster_environment import ClusterEnvironment
14
+ from lightning_fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_12
15
+ from lightning_fabric.utilities.rank_zero import rank_zero_info
16
+ from lightning_fabric.utilities.types import ReduceOp
17
+
18
+ if torch.distributed.is_available():
19
+ from torch.distributed import group
20
+ else:
21
+
22
+ class group: # type: ignore
23
+ WORLD = None
24
+
25
+
26
+ log = logging.getLogger(__name__)
27
+
28
+
29
+ def _gather_all_tensors(result: Tensor, group: Optional[Any] = None) -> List[Tensor]:
30
+ """Function to gather all tensors from several DDP processes onto a list that is broadcasted to all processes.
31
+
32
+ Works on tensors that have the same number of dimensions, but where each dimension may differ. In this case
33
+ tensors are padded, gathered and then trimmed to secure equal workload for all processes.
34
+
35
+ Args:
36
+ result: The value to sync
37
+ group: The process group to gather results from. Defaults to all processes (world)
38
+
39
+ Return:
40
+ gathered_result: List with size equal to the process group where
41
+ gathered_result[i] corresponds to result tensor from process i
42
+ """
43
+ if group is None:
44
+ group = torch.distributed.group.WORLD
45
+
46
+ # Convert tensors to contiguous format
47
+ result = result.contiguous()
48
+
49
+ world_size = torch.distributed.get_world_size(group)
50
+ torch.distributed.barrier(group=group)
51
+
52
+ # If the tensor is scalar, things are easy
53
+ if result.ndim == 0:
54
+ return _simple_gather_all_tensors(result, group, world_size)
55
+
56
+ # 1. Gather sizes of all tensors
57
+ local_size = torch.tensor(result.shape, device=result.device)
58
+ local_sizes = [torch.zeros_like(local_size) for _ in range(world_size)]
59
+ torch.distributed.all_gather(local_sizes, local_size, group=group)
60
+ max_size = torch.stack(local_sizes).max(dim=0).values
61
+ all_sizes_equal = all(all(ls == max_size) for ls in local_sizes)
62
+
63
+ # 2. If shapes are all the same, then do a simple gather:
64
+ if all_sizes_equal:
65
+ return _simple_gather_all_tensors(result, group, world_size)
66
+
67
+ # 3. If not, we need to pad each local tensor to maximum size, gather and then truncate
68
+ pad_dims = []
69
+ pad_by = (max_size - local_size).detach().cpu()
70
+ for val in reversed(pad_by):
71
+ pad_dims.append(0)
72
+ pad_dims.append(val.item())
73
+ result_padded = F.pad(result, pad_dims)
74
+ gathered_result = [torch.zeros_like(result_padded) for _ in range(world_size)]
75
+ torch.distributed.all_gather(gathered_result, result_padded, group)
76
+ for idx, item_size in enumerate(local_sizes):
77
+ slice_param = [slice(dim_size) for dim_size in item_size]
78
+ gathered_result[idx] = gathered_result[idx][slice_param]
79
+ return gathered_result
80
+
81
+
82
+ def _simple_gather_all_tensors(result: Tensor, group: Any, world_size: int) -> List[Tensor]:
83
+ gathered_result = [torch.zeros_like(result) for _ in range(world_size)]
84
+ torch.distributed.all_gather(gathered_result, result, group)
85
+ return gathered_result
86
+
87
+
88
+ def _distributed_available() -> bool:
89
+ from lightning_fabric.accelerators.tpu import _tpu_distributed
90
+
91
+ return torch.distributed.is_available() and torch.distributed.is_initialized() or _tpu_distributed()
92
+
93
+
94
+ def _sync_ddp_if_available(
95
+ result: Tensor, group: Optional[Any] = None, reduce_op: Optional[Union[ReduceOp, str]] = None
96
+ ) -> Tensor:
97
+ """Function to reduce a tensor across worker processes during distributed training.
98
+
99
+ Args:
100
+ result: The value to sync and reduce (typically tensor or number)
101
+ group: The process group to gather results from. Defaults to all processes (world)
102
+ reduce_op: The reduction operation. Defaults to sum.
103
+ Can also be a string of 'avg', 'mean' to calculate the mean during reduction.
104
+
105
+ Return:
106
+ reduced value
107
+ """
108
+ if _distributed_available():
109
+ return _sync_ddp(result, group=group, reduce_op=reduce_op)
110
+ return result
111
+
112
+
113
+ def _sync_ddp(result: Tensor, group: Optional[Any] = None, reduce_op: Optional[Union[ReduceOp, str]] = None) -> Tensor:
114
+ """Function to reduce the tensors from several DDP processes to one main process.
115
+
116
+ Args:
117
+ result: The value to sync and reduce (typically tensor or number)
118
+ group: The process group to gather results from. Defaults to all processes (world)
119
+ reduce_op: The reduction operation. Defaults to sum.
120
+ Can also be a string of 'avg', 'mean' to calculate the mean during reduction.
121
+
122
+ Return:
123
+ reduced value
124
+ """
125
+ divide_by_world_size = False
126
+
127
+ if group is None:
128
+ group = torch.distributed.group.WORLD
129
+
130
+ op: Optional[ReduceOp]
131
+ if isinstance(reduce_op, str):
132
+ if reduce_op.lower() in ("avg", "mean"):
133
+ op = ReduceOp.SUM # type: ignore[assignment]
134
+ divide_by_world_size = True
135
+ else:
136
+ op = getattr(ReduceOp, reduce_op.upper())
137
+ else:
138
+ op = reduce_op
139
+
140
+ # WA for HPU. HPU doesn't support Long types, forcefully set it to float
141
+ if module_available("habana_frameworks.torch.utils.library_loader"):
142
+ from habana_frameworks.torch.utils.library_loader import is_habana_avaialble
143
+
144
+ if (
145
+ is_habana_avaialble()
146
+ and os.environ.get("HCCL_DISTRIBUTED_BACKEND") == "1"
147
+ and result.type() in ("torch.LongTensor", "torch.hpu.LongTensor")
148
+ ):
149
+ rank_zero_info("Long tensor unsupported on HPU, casting to float")
150
+ result = result.float()
151
+
152
+ # Sync all processes before reduction
153
+ torch.distributed.barrier(group=group)
154
+ torch.distributed.all_reduce(result, op=op, group=group, async_op=False)
155
+
156
+ if divide_by_world_size:
157
+ result = result / torch.distributed.get_world_size(group)
158
+
159
+ return result
160
+
161
+
162
+ class _AllGather(torch.autograd.Function):
163
+ @staticmethod
164
+ def forward( # type: ignore[override]
165
+ ctx: Any,
166
+ tensor: Tensor,
167
+ group: Optional["torch.distributed.ProcessGroup"] = group.WORLD,
168
+ ) -> Tensor:
169
+ ctx.group = group
170
+ gathered_tensor = [torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size(group=group))]
171
+ torch.distributed.all_gather(gathered_tensor, tensor, group=group)
172
+ gathered_tensor = torch.stack(gathered_tensor, dim=0)
173
+ return gathered_tensor
174
+
175
+ @staticmethod
176
+ def backward(ctx: Any, *grad_output: Tensor) -> Tuple[Tensor, None]:
177
+ grad_output = torch.cat(grad_output)
178
+ torch.distributed.all_reduce(grad_output, op=torch.distributed.ReduceOp.SUM, async_op=False, group=ctx.group)
179
+ return grad_output[torch.distributed.get_rank()], None
180
+
181
+
182
+ def _functional_all_gather(tensor: Any, group: Any) -> Any:
183
+ """Compatibility layer with Windows."""
184
+ if sys.platform == "win32" and not _TORCH_GREATER_EQUAL_1_12:
185
+ # TODO: also remove `_AllGather` when support for 1.12 is dropped
186
+ return _AllGather.apply(tensor, group)
187
+
188
+ import torch.distributed.nn
189
+
190
+ return torch.distributed.nn.functional.all_gather(tensor, group)
191
+
192
+
193
+ def _all_gather_ddp_if_available(
194
+ tensor: Tensor, group: Optional["torch.distributed.ProcessGroup"] = None, sync_grads: bool = False
195
+ ) -> Tensor:
196
+ """Function to gather a tensor from several distributed processes.
197
+
198
+ Args:
199
+ tensor: Tensor of shape (batch, ...)
200
+ group: The process group to gather results from. Defaults to all processes (world)
201
+ sync_grads: Flag that allows users to synchronize gradients for all_gather op
202
+
203
+ Return:
204
+ A tensor of shape (world_size, batch, ...)
205
+ """
206
+ if not _distributed_available():
207
+ return tensor
208
+ tensor = tensor.contiguous() # https://github.com/pytorch/pytorch/issues/73515
209
+ with nullcontext() if sync_grads else torch.no_grad():
210
+ gathered_tensors = _functional_all_gather(tensor, group)
211
+ return torch.stack(gathered_tensors)
212
+
213
+
214
+ def _init_dist_connection(
215
+ cluster_environment: ClusterEnvironment,
216
+ torch_distributed_backend: str,
217
+ global_rank: Optional[int] = None,
218
+ world_size: Optional[int] = None,
219
+ **kwargs: Any,
220
+ ) -> None:
221
+ """Utility function to initialize distributed connection by setting env variables and initializing the
222
+ distributed process group.
223
+
224
+ Args:
225
+ cluster_environment: ``ClusterEnvironment`` instance
226
+ torch_distributed_backend: Backend to use (includes `nccl` and `gloo`)
227
+ global_rank: Rank of the current process
228
+ world_size: Number of processes in the group
229
+ kwargs: Kwargs for ``init_process_group``
230
+
231
+ Raises:
232
+ RuntimeError:
233
+ If ``torch.distributed`` is not available
234
+ """
235
+ if not torch.distributed.is_available():
236
+ raise RuntimeError("torch.distributed is not available. Cannot initialize distributed process group")
237
+ if torch.distributed.is_initialized():
238
+ log.debug("torch.distributed is already initialized. Exiting early")
239
+ return
240
+ global_rank = global_rank if global_rank is not None else cluster_environment.global_rank()
241
+ world_size = world_size if world_size is not None else cluster_environment.world_size()
242
+ os.environ["MASTER_ADDR"] = cluster_environment.main_address
243
+ os.environ["MASTER_PORT"] = str(cluster_environment.main_port)
244
+ log.info(f"Initializing distributed: GLOBAL_RANK: {global_rank}, MEMBER: {global_rank + 1}/{world_size}")
245
+ torch.distributed.init_process_group(torch_distributed_backend, rank=global_rank, world_size=world_size, **kwargs)
246
+
247
+ # On rank=0 let everyone know training is starting
248
+ rank_zero_info(
249
+ f"{'-' * 100}\n"
250
+ f"distributed_backend={torch_distributed_backend}\n"
251
+ f"All distributed processes registered. Starting with {world_size} processes\n"
252
+ f"{'-' * 100}\n"
253
+ )
254
+
255
+
256
+ def _get_default_process_group_backend_for_device(device: torch.device) -> str:
257
+ return "nccl" if device.type == "cuda" else "gloo"
258
+
259
+
260
+ class _DatasetSamplerWrapper(Dataset):
261
+ """Dataset to create indexes from `Sampler` or `Iterable`"""
262
+
263
+ def __init__(self, sampler: Union[Sampler, Iterable]) -> None:
264
+ if not isinstance(sampler, Sized):
265
+ raise TypeError(
266
+ "You seem to have configured a sampler in your DataLoader which"
267
+ " does not provide `__len__` method. The sampler was about to be"
268
+ " replaced by `DistributedSamplerWrapper` since `use_distributed_sampler`"
269
+ " is True and you are using distributed training. Either provide `__len__`"
270
+ " method in your sampler, remove it from DataLoader or set `use_distributed_sampler=False`"
271
+ " if you want to handle distributed sampling yourself."
272
+ )
273
+ if len(sampler) == float("inf"):
274
+ raise TypeError(
275
+ "You seem to have configured a sampler in your DataLoader which"
276
+ " does not provide finite `__len__` method. The sampler was about to be"
277
+ " replaced by `DistributedSamplerWrapper` since `use_distributed_sampler`"
278
+ " is True and you are using distributed training. Either provide `__len__`"
279
+ " method in your sampler which returns a finite number, remove it from DataLoader"
280
+ " or set `use_distributed_sampler=False` if you want to handle distributed sampling yourself."
281
+ )
282
+ self._sampler = sampler
283
+ # defer materializing an iterator until it is necessary
284
+ self._sampler_list: Optional[List[Any]] = None
285
+
286
+ def __getitem__(self, index: int) -> Any:
287
+ if self._sampler_list is None:
288
+ self._sampler_list = list(self._sampler)
289
+ return self._sampler_list[index]
290
+
291
+ def __len__(self) -> int:
292
+ return len(self._sampler)
293
+
294
+ def reset(self) -> None:
295
+ """Reset the sampler list in order to get new sampling."""
296
+ self._sampler_list = list(self._sampler)
297
+
298
+
299
+ class DistributedSamplerWrapper(DistributedSampler):
300
+ """Wrapper over ``Sampler`` for distributed training.
301
+
302
+ Allows you to use any sampler in distributed mode. It will be automatically used by Lightning in distributed mode if
303
+ sampler replacement is enabled.
304
+
305
+ Note:
306
+ The purpose of this wrapper is to take care of sharding the sampler indices. It is up to the underlying
307
+ sampler to handle randomness and shuffling. The ``shuffle`` and ``seed`` arguments on this wrapper won't
308
+ have any effect.
309
+ """
310
+
311
+ def __init__(self, sampler: Union[Sampler, Iterable], *args: Any, **kwargs: Any) -> None:
312
+ super().__init__(_DatasetSamplerWrapper(sampler), *args, **kwargs)
313
+
314
+ def __iter__(self) -> Iterator:
315
+ self.dataset.reset()
316
+ return (self.dataset[index] for index in super().__iter__())
wemm/lib/python3.10/site-packages/lightning_fabric/utilities/enums.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Enumerated utilities."""
15
+ from __future__ import annotations
16
+
17
+ from typing import TYPE_CHECKING
18
+
19
+ from lightning_utilities.core.enums import StrEnum
20
+
21
+ if TYPE_CHECKING:
22
+ from enum import Enum
23
+
24
+ # re-defined because `mypy` infers `StrEnum` as `Any`
25
+ class LightningEnum(StrEnum, Enum):
26
+ ...
27
+
28
+ else:
29
+ LightningEnum = StrEnum
wemm/lib/python3.10/site-packages/lightning_fabric/utilities/exceptions.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ class MisconfigurationException(Exception):
17
+ """Exception used to inform users of misuse with Lightning."""
wemm/lib/python3.10/site-packages/lightning_fabric/utilities/optimizer.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Iterable
16
+
17
+ from lightning_utilities.core.apply_func import apply_to_collection
18
+ from torch import Tensor
19
+ from torch.optim import Optimizer
20
+
21
+ from lightning_fabric.utilities.apply_func import move_data_to_device
22
+ from lightning_fabric.utilities.types import _DEVICE
23
+
24
+
25
+ def _optimizers_to_device(optimizers: Iterable[Optimizer], device: _DEVICE) -> None:
26
+ """Moves optimizer states for a sequence of optimizers to the device."""
27
+ for opt in optimizers:
28
+ _optimizer_to_device(opt, device)
29
+
30
+
31
+ def _optimizer_to_device(optimizer: Optimizer, device: _DEVICE) -> None:
32
+ """Moves the state of a single optimizer to the device."""
33
+ for p, v in optimizer.state.items():
34
+ optimizer.state[p] = apply_to_collection(v, Tensor, move_data_to_device, device, allow_frozen=True)
wemm/lib/python3.10/site-packages/lightning_fabric/utilities/registry.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import inspect
15
+ from typing import Any
16
+
17
+
18
+ def _is_register_method_overridden(mod: type, base_cls: Any, method: str) -> bool:
19
+ mod_attr = getattr(mod, method)
20
+ previous_super_cls = inspect.getmro(mod)[1]
21
+
22
+ if issubclass(previous_super_cls, base_cls):
23
+ super_attr = getattr(previous_super_cls, method)
24
+ else:
25
+ return False
26
+
27
+ return mod_attr.__code__ is not super_attr.__code__
wemm/lib/python3.10/site-packages/lightning_fabric/utilities/seed.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ import random
4
+ from random import getstate as python_get_rng_state
5
+ from random import setstate as python_set_rng_state
6
+ from typing import Any, Dict, Optional
7
+
8
+ import numpy as np
9
+ import torch
10
+
11
+ from lightning_fabric.utilities.rank_zero import _get_rank, rank_prefixed_message, rank_zero_only, rank_zero_warn
12
+
13
+ log = logging.getLogger(__name__)
14
+
15
+ max_seed_value = np.iinfo(np.uint32).max
16
+ min_seed_value = np.iinfo(np.uint32).min
17
+
18
+
19
+ def seed_everything(seed: Optional[int] = None, workers: bool = False) -> int:
20
+ """Function that sets seed for pseudo-random number generators in: pytorch, numpy, python.random In addition,
21
+ sets the following environment variables:
22
+
23
+ - `PL_GLOBAL_SEED`: will be passed to spawned subprocesses (e.g. ddp_spawn backend).
24
+ - `PL_SEED_WORKERS`: (optional) is set to 1 if ``workers=True``.
25
+
26
+ Args:
27
+ seed: the integer value seed for global random state in Lightning.
28
+ If `None`, will read seed from `PL_GLOBAL_SEED` env variable
29
+ or select it randomly.
30
+ workers: if set to ``True``, will properly configure all dataloaders passed to the
31
+ Trainer with a ``worker_init_fn``. If the user already provides such a function
32
+ for their dataloaders, setting this argument will have no influence. See also:
33
+ :func:`~lightning_fabric.utilities.seed.pl_worker_init_function`.
34
+ """
35
+ if seed is None:
36
+ env_seed = os.environ.get("PL_GLOBAL_SEED")
37
+ if env_seed is None:
38
+ seed = _select_seed_randomly(min_seed_value, max_seed_value)
39
+ rank_zero_warn(f"No seed found, seed set to {seed}")
40
+ else:
41
+ try:
42
+ seed = int(env_seed)
43
+ except ValueError:
44
+ seed = _select_seed_randomly(min_seed_value, max_seed_value)
45
+ rank_zero_warn(f"Invalid seed found: {repr(env_seed)}, seed set to {seed}")
46
+ elif not isinstance(seed, int):
47
+ seed = int(seed)
48
+
49
+ if not (min_seed_value <= seed <= max_seed_value):
50
+ rank_zero_warn(f"{seed} is not in bounds, numpy accepts from {min_seed_value} to {max_seed_value}")
51
+ seed = _select_seed_randomly(min_seed_value, max_seed_value)
52
+
53
+ log.info(rank_prefixed_message(f"Global seed set to {seed}", _get_rank()))
54
+ os.environ["PL_GLOBAL_SEED"] = str(seed)
55
+ random.seed(seed)
56
+ np.random.seed(seed)
57
+ torch.manual_seed(seed)
58
+ torch.cuda.manual_seed_all(seed)
59
+
60
+ os.environ["PL_SEED_WORKERS"] = f"{int(workers)}"
61
+
62
+ return seed
63
+
64
+
65
+ def _select_seed_randomly(min_seed_value: int = min_seed_value, max_seed_value: int = max_seed_value) -> int:
66
+ return random.randint(min_seed_value, max_seed_value)
67
+
68
+
69
+ def reset_seed() -> None:
70
+ """Reset the seed to the value that :func:`lightning_fabric.utilities.seed.seed_everything` previously set.
71
+
72
+ If :func:`lightning_fabric.utilities.seed.seed_everything` is unused, this function will do nothing.
73
+ """
74
+ seed = os.environ.get("PL_GLOBAL_SEED", None)
75
+ if seed is None:
76
+ return
77
+ workers = os.environ.get("PL_SEED_WORKERS", "0")
78
+ seed_everything(int(seed), workers=bool(int(workers)))
79
+
80
+
81
+ def pl_worker_init_function(worker_id: int, rank: Optional[int] = None) -> None: # pragma: no cover
82
+ """The worker_init_fn that Lightning automatically adds to your dataloader if you previously set the seed with
83
+ ``seed_everything(seed, workers=True)``.
84
+
85
+ See also the PyTorch documentation on
86
+ `randomness in DataLoaders <https://pytorch.org/docs/stable/notes/randomness.html#dataloader>`_.
87
+ """
88
+ # implementation notes: https://github.com/pytorch/pytorch/issues/5059#issuecomment-817392562
89
+ global_rank = rank if rank is not None else rank_zero_only.rank
90
+ process_seed = torch.initial_seed()
91
+ # back out the base seed so we can use all the bits
92
+ base_seed = process_seed - worker_id
93
+ log.debug(
94
+ f"Initializing random number generators of process {global_rank} worker {worker_id} with base seed {base_seed}"
95
+ )
96
+ ss = np.random.SeedSequence([base_seed, worker_id, global_rank])
97
+ # use 128 bits (4 x 32-bit words)
98
+ np.random.seed(ss.generate_state(4))
99
+ # Spawn distinct SeedSequences for the PyTorch PRNG and the stdlib random module
100
+ torch_ss, stdlib_ss = ss.spawn(2)
101
+ torch.manual_seed(torch_ss.generate_state(1, dtype=np.uint64)[0])
102
+ # use 128 bits expressed as an integer
103
+ stdlib_seed = (stdlib_ss.generate_state(2, dtype=np.uint64).astype(object) * [1 << 64, 1]).sum()
104
+ random.seed(stdlib_seed)
105
+
106
+
107
+ def _collect_rng_states(include_cuda: bool = True) -> Dict[str, Any]:
108
+ """Collect the global random state of :mod:`torch`, :mod:`torch.cuda`, :mod:`numpy` and Python."""
109
+ states = {
110
+ "torch": torch.get_rng_state(),
111
+ "numpy": np.random.get_state(),
112
+ "python": python_get_rng_state(),
113
+ }
114
+ if include_cuda:
115
+ states["torch.cuda"] = torch.cuda.get_rng_state_all()
116
+ return states
117
+
118
+
119
+ def _set_rng_states(rng_state_dict: Dict[str, Any]) -> None:
120
+ """Set the global random state of :mod:`torch`, :mod:`torch.cuda`, :mod:`numpy` and Python in the current
121
+ process."""
122
+ torch.set_rng_state(rng_state_dict["torch"])
123
+ # torch.cuda rng_state is only included since v1.8.
124
+ if "torch.cuda" in rng_state_dict:
125
+ torch.cuda.set_rng_state_all(rng_state_dict["torch.cuda"])
126
+ np.random.set_state(rng_state_dict["numpy"])
127
+ version, state, gauss = rng_state_dict["python"]
128
+ python_set_rng_state((version, tuple(state), gauss))
wemm/lib/python3.10/site-packages/lightning_fabric/utilities/types.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from pathlib import Path
15
+ from typing import Any, Callable, Dict, Iterator, List, Optional, Protocol, runtime_checkable, TypeVar, Union
16
+
17
+ import torch
18
+ from torch import Tensor
19
+ from torch.optim import Optimizer
20
+ from typing_extensions import TypeAlias
21
+
22
+ from lightning_fabric.utilities.imports import _TORCH_GREATER_EQUAL_1_13, _TORCH_GREATER_EQUAL_2_0
23
+
24
+ _PATH = Union[str, Path]
25
+ _DEVICE = Union[torch.device, str, int]
26
+ _MAP_LOCATION_TYPE = Optional[Union[_DEVICE, Callable[[_DEVICE], _DEVICE], Dict[_DEVICE, _DEVICE]]]
27
+ _PARAMETERS = Iterator[torch.nn.Parameter]
28
+
29
+
30
+ if torch.distributed.is_available():
31
+ from torch.distributed import ProcessGroup, ReduceOp
32
+
33
+ RedOpType: TypeAlias = ReduceOp.RedOpType if _TORCH_GREATER_EQUAL_1_13 else object # type: ignore[valid-type]
34
+ else:
35
+ ProcessGroup = Any # type: ignore[assignment,misc]
36
+ ReduceOp = object # type: ignore[assignment,misc] # we are using isinstance check once
37
+ RedOpType = object
38
+
39
+
40
+ _DictKey = TypeVar("_DictKey")
41
+
42
+
43
+ @runtime_checkable
44
+ class _Stateful(Protocol[_DictKey]):
45
+ """This class is used to detect if an object is stateful using `isinstance(obj, _Stateful)`."""
46
+
47
+ def state_dict(self) -> Dict[_DictKey, Any]:
48
+ ...
49
+
50
+ def load_state_dict(self, state_dict: Dict[_DictKey, Any]) -> None:
51
+ ...
52
+
53
+
54
+ @runtime_checkable
55
+ class CollectibleGroup(Protocol):
56
+ def size(self) -> int:
57
+ ...
58
+
59
+ def rank(self) -> int:
60
+ ...
61
+
62
+
63
+ # Inferred from `torch.optim.lr_scheduler.pyi`
64
+ # Missing attributes were added to improve typing
65
+ @runtime_checkable
66
+ class LRScheduler(_Stateful[str], Protocol):
67
+ optimizer: Optimizer
68
+ base_lrs: List[float]
69
+
70
+ def __init__(self, optimizer: Optimizer, *args: Any, **kwargs: Any) -> None:
71
+ ...
72
+
73
+ def step(self, epoch: Optional[int] = None) -> None:
74
+ ...
75
+
76
+
77
+ _TORCH_LRSCHEDULER: TypeAlias = (
78
+ torch.optim.lr_scheduler.LRScheduler # type: ignore[valid-type]
79
+ if _TORCH_GREATER_EQUAL_2_0
80
+ else torch.optim.lr_scheduler._LRScheduler
81
+ )
82
+
83
+
84
+ # Inferred from `torch.optim.lr_scheduler.pyi`
85
+ # Missing attributes were added to improve typing
86
+ @runtime_checkable
87
+ class ReduceLROnPlateau(_Stateful[str], Protocol):
88
+ in_cooldown: bool
89
+ optimizer: Optimizer
90
+
91
+ def __init__(
92
+ self,
93
+ optimizer: Optimizer,
94
+ mode: str = ...,
95
+ factor: float = ...,
96
+ patience: int = ...,
97
+ verbose: bool = ...,
98
+ threshold: float = ...,
99
+ threshold_mode: str = ...,
100
+ cooldown: int = ...,
101
+ min_lr: float = ...,
102
+ eps: float = ...,
103
+ ) -> None:
104
+ ...
105
+
106
+ def step(self, metrics: Union[float, int, Tensor], epoch: Optional[int] = None) -> None:
107
+ ...
108
+
109
+
110
+ @runtime_checkable
111
+ class Steppable(Protocol):
112
+ """To structurally type ``optimizer.step()``"""
113
+
114
+ # Inferred from `torch.optim.optimizer.pyi`
115
+ def step(self, closure: Optional[Callable[[], float]] = ...) -> Optional[float]:
116
+ ...
117
+
118
+
119
+ @runtime_checkable
120
+ class Optimizable(Steppable, Protocol):
121
+ """To structurally type ``optimizer``"""
122
+
123
+ param_groups: List[Dict[Any, Any]]
124
+ defaults: Dict[Any, Any]
125
+ state: Dict[Any, Any]
126
+
127
+ def state_dict(self) -> Dict[str, Dict[Any, Any]]:
128
+ ...
129
+
130
+ def load_state_dict(self, state_dict: Dict[str, Dict[Any, Any]]) -> None:
131
+ ...
wemm/lib/python3.10/site-packages/lightning_fabric/utilities/warnings.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Warning-related utilities."""
15
+ import warnings
16
+
17
+ from lightning_fabric.utilities.rank_zero import LightningDeprecationWarning
18
+
19
+ # enable our warnings
20
+ warnings.simplefilter("default", category=LightningDeprecationWarning)
21
+
22
+
23
+ class PossibleUserWarning(UserWarning):
24
+ """Warnings that could be false positives."""
wemm/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51db8e9d0eec4c7334f8e042aa1986e7ece1cf32e454afa431f49885d6a3dd57
3
+ size 137704
wemm/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa972a82fb986d28430da35e69d26262fd25bd32b9c73d8d42c5b18f8f85e195
3
+ size 152960
wemm/lib/python3.10/site-packages/torch/_VF.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This makes the functions in torch._C._VariableFunctions available as
3
+ torch._VF.<funcname>
4
+ without mypy being able to find them.
5
+
6
+ A subset of those functions are mapped to ATen functions in
7
+ torch/jit/_builtins.py
8
+
9
+ See https://github.com/pytorch/pytorch/issues/21478 for the reason for
10
+ introducing torch._VF
11
+
12
+ """
13
+ import sys
14
+ import types
15
+
16
+ import torch
17
+
18
+
19
+ class VFModule(types.ModuleType):
20
+ vf: types.ModuleType
21
+
22
+ def __init__(self, name):
23
+ super().__init__(name)
24
+ self.vf = torch._C._VariableFunctions
25
+
26
+ def __getattr__(self, attr):
27
+ return getattr(self.vf, attr)
28
+
29
+
30
+ sys.modules[__name__] = VFModule(__name__)
wemm/lib/python3.10/site-packages/torch/__config__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def show():
5
+ """
6
+ Return a human-readable string with descriptions of the
7
+ configuration of PyTorch.
8
+ """
9
+ return torch._C._show_config()
10
+
11
+
12
+ # TODO: In principle, we could provide more structured version/config
13
+ # information here. For now only CXX_FLAGS is exposed, as Timer
14
+ # uses them.
15
+ def _cxx_flags():
16
+ """Returns the CXX_FLAGS used when building PyTorch."""
17
+ return torch._C._cxx_flags()
18
+
19
+
20
+ def parallel_info():
21
+ r"""Returns detailed string with parallelization settings"""
22
+ return torch._C._parallel_info()
wemm/lib/python3.10/site-packages/torch/__future__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This global flag controls whether to assign new tensors to the parameters
3
+ instead of changing the existing parameters in-place when converting an `nn.Module`
4
+ using the following methods:
5
+ 1. `module.cuda()` / `.cpu()` (for moving `module` between devices)
6
+ 2. `module.float()` / `.double()` / `.half()` (for converting `module` to a different dtype)
7
+ 3. `module.to()` / `.type()` (for changing `module`'s device or dtype)
8
+ 4. `module._apply(fn)` (for generic functions applied to `module`)
9
+
10
+ Default: False
11
+ """
12
+ _overwrite_module_params_on_conversion = False
13
+
14
+
15
+ def set_overwrite_module_params_on_conversion(value):
16
+ global _overwrite_module_params_on_conversion
17
+ _overwrite_module_params_on_conversion = value
18
+
19
+
20
+ def get_overwrite_module_params_on_conversion():
21
+ return _overwrite_module_params_on_conversion
wemm/lib/python3.10/site-packages/torch/__init__.py ADDED
@@ -0,0 +1,1488 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ r"""
3
+ The torch package contains data structures for multi-dimensional
4
+ tensors and defines mathematical operations over these tensors.
5
+ Additionally, it provides many utilities for efficient serialization of
6
+ Tensors and arbitrary types, and other useful utilities.
7
+
8
+ It has a CUDA counterpart, that enables you to run your tensor computations
9
+ on an NVIDIA GPU with compute capability >= 3.0.
10
+ """
11
+
12
+ import math
13
+ import os
14
+ import sys
15
+ import platform
16
+ import textwrap
17
+ import ctypes
18
+ import inspect
19
+ if sys.version_info < (3,):
20
+ raise Exception("Python 2 has reached end-of-life and is no longer supported by PyTorch.")
21
+
22
+ from ._utils import _import_dotted_name, classproperty
23
+ from ._utils_internal import get_file_path, prepare_multiprocessing_environment, \
24
+ USE_RTLD_GLOBAL_WITH_LIBTORCH, USE_GLOBAL_DEPS
25
+ # TODO(torch_deploy) figure out how to freeze version.py in fbcode build
26
+ if sys.executable == 'torch_deploy':
27
+ __version__ = "torch-deploy-1.8"
28
+ else:
29
+ from .torch_version import __version__ as __version__
30
+
31
+ from typing import Any, Callable, Dict, Optional, Set, Type, TYPE_CHECKING, Union
32
+ import builtins
33
+
34
+ __all__ = [
35
+ 'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',
36
+ 'set_default_device',
37
+ 'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
38
+ 'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
39
+ 'no_grad', 'enable_grad', 'rand', 'randn', 'inference_mode',
40
+ 'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
41
+ 'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
42
+ 'TypedStorage', 'UntypedStorage',
43
+ 'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
44
+ 'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
45
+ 'lobpcg', 'use_deterministic_algorithms',
46
+ 'are_deterministic_algorithms_enabled',
47
+ 'is_deterministic_algorithms_warn_only_enabled',
48
+ 'set_deterministic_debug_mode', 'get_deterministic_debug_mode',
49
+ 'set_float32_matmul_precision', 'get_float32_matmul_precision',
50
+ 'set_warn_always', 'is_warn_always_enabled', 'SymInt', 'SymFloat',
51
+ 'SymBool', 'sym_not',
52
+ 'sym_int', 'sym_float', 'sym_max', 'sym_min', 'compile', 'vmap'
53
+ ]
54
+
55
+ ################################################################################
56
+ # Load the extension module
57
+ ################################################################################
58
+
59
+ if sys.platform == 'win32':
60
+ pfiles_path = os.getenv('ProgramFiles', 'C:\\Program Files')
61
+ py_dll_path = os.path.join(sys.exec_prefix, 'Library', 'bin')
62
+ th_dll_path = os.path.join(os.path.dirname(__file__), 'lib')
63
+
64
+ # When users create a virtualenv that inherits the base environment,
65
+ # we will need to add the corresponding library directory into
66
+ # DLL search directories. Otherwise, it will rely on `PATH` which
67
+ # is dependent on user settings.
68
+ if sys.exec_prefix != sys.base_exec_prefix:
69
+ base_py_dll_path = os.path.join(sys.base_exec_prefix, 'Library', 'bin')
70
+ else:
71
+ base_py_dll_path = ''
72
+
73
+ dll_paths = list(filter(os.path.exists, [th_dll_path, py_dll_path, base_py_dll_path]))
74
+
75
+ if all([not os.path.exists(os.path.join(p, 'nvToolsExt64_1.dll')) for p in dll_paths]):
76
+ nvtoolsext_dll_path = os.path.join(
77
+ os.getenv('NVTOOLSEXT_PATH', os.path.join(pfiles_path, 'NVIDIA Corporation', 'NvToolsExt')), 'bin', 'x64')
78
+ else:
79
+ nvtoolsext_dll_path = ''
80
+
81
+ from .version import cuda as cuda_version
82
+ import glob
83
+ if cuda_version and all([not glob.glob(os.path.join(p, 'cudart64*.dll')) for p in dll_paths]):
84
+ cuda_version_1 = cuda_version.replace('.', '_')
85
+ cuda_path_var = 'CUDA_PATH_V' + cuda_version_1
86
+ default_path = os.path.join(pfiles_path, 'NVIDIA GPU Computing Toolkit', 'CUDA', 'v' + cuda_version)
87
+ cuda_path = os.path.join(os.getenv(cuda_path_var, default_path), 'bin')
88
+ else:
89
+ cuda_path = ''
90
+
91
+ dll_paths.extend(filter(os.path.exists, [nvtoolsext_dll_path, cuda_path]))
92
+
93
+ kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True)
94
+ with_load_library_flags = hasattr(kernel32, 'AddDllDirectory')
95
+ prev_error_mode = kernel32.SetErrorMode(0x0001)
96
+
97
+ kernel32.LoadLibraryW.restype = ctypes.c_void_p
98
+ if with_load_library_flags:
99
+ kernel32.LoadLibraryExW.restype = ctypes.c_void_p
100
+
101
+ for dll_path in dll_paths:
102
+ os.add_dll_directory(dll_path)
103
+
104
+ try:
105
+ ctypes.CDLL('vcruntime140.dll')
106
+ ctypes.CDLL('msvcp140.dll')
107
+ ctypes.CDLL('vcruntime140_1.dll')
108
+ except OSError:
109
+ print('''Microsoft Visual C++ Redistributable is not installed, this may lead to the DLL load failure.
110
+ It can be downloaded at https://aka.ms/vs/16/release/vc_redist.x64.exe''')
111
+
112
+ dlls = glob.glob(os.path.join(th_dll_path, '*.dll'))
113
+ path_patched = False
114
+ for dll in dlls:
115
+ is_loaded = False
116
+ if with_load_library_flags:
117
+ res = kernel32.LoadLibraryExW(dll, None, 0x00001100)
118
+ last_error = ctypes.get_last_error()
119
+ if res is None and last_error != 126:
120
+ err = ctypes.WinError(last_error)
121
+ err.strerror += f' Error loading "{dll}" or one of its dependencies.'
122
+ raise err
123
+ elif res is not None:
124
+ is_loaded = True
125
+ if not is_loaded:
126
+ if not path_patched:
127
+ os.environ['PATH'] = ';'.join(dll_paths + [os.environ['PATH']])
128
+ path_patched = True
129
+ res = kernel32.LoadLibraryW(dll)
130
+ if res is None:
131
+ err = ctypes.WinError(ctypes.get_last_error())
132
+ err.strerror += f' Error loading "{dll}" or one of its dependencies.'
133
+ raise err
134
+
135
+ kernel32.SetErrorMode(prev_error_mode)
136
+
137
+
138
+ def _preload_cuda_deps(lib_folder, lib_name):
139
+ """Preloads cuda deps if they could not be found otherwise."""
140
+ # Should only be called on Linux if default path resolution have failed
141
+ assert platform.system() == 'Linux', 'Should only be called on Linux'
142
+ import glob
143
+ lib_path = None
144
+ for path in sys.path:
145
+ nvidia_path = os.path.join(path, 'nvidia')
146
+ if not os.path.exists(nvidia_path):
147
+ continue
148
+ candidate_lib_paths = glob.glob(os.path.join(nvidia_path, lib_folder, 'lib', lib_name))
149
+ if candidate_lib_paths and not lib_path:
150
+ lib_path = candidate_lib_paths[0]
151
+ if lib_path:
152
+ break
153
+ if not lib_path:
154
+ raise ValueError(f"{lib_name} not found in the system path {sys.path}")
155
+ ctypes.CDLL(lib_path)
156
+
157
+
158
+ # See Note [Global dependencies]
159
+ def _load_global_deps():
160
+ if sys.executable == 'torch_deploy' or platform.system() == 'Windows':
161
+ return
162
+
163
+ lib_name = 'libtorch_global_deps' + ('.dylib' if platform.system() == 'Darwin' else '.so')
164
+ here = os.path.abspath(__file__)
165
+ lib_path = os.path.join(os.path.dirname(here), 'lib', lib_name)
166
+
167
+ try:
168
+ ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
169
+ except OSError as err:
170
+ # Can only happen for wheel with cuda libs as PYPI deps
171
+ # As PyTorch is not purelib, but nvidia-*-cu11 is
172
+ cuda_libs: Dict[str, str] = {
173
+ 'cublas': 'libcublas.so.*[0-9]',
174
+ 'cudnn': 'libcudnn.so.*[0-9]',
175
+ 'cuda_nvrtc': 'libnvrtc.so.*[0-9].*[0-9]',
176
+ 'cuda_runtime': 'libcudart.so.*[0-9].*[0-9]',
177
+ 'cuda_cupti': 'libcupti.so.*[0-9].*[0-9]',
178
+ 'cufft': 'libcufft.so.*[0-9]',
179
+ 'curand': 'libcurand.so.*[0-9]',
180
+ 'cusolver': 'libcusolver.so.*[0-9]',
181
+ 'cusparse': 'libcusparse.so.*[0-9]',
182
+ 'nccl': 'libnccl.so.*[0-9]',
183
+ 'nvtx': 'libnvToolsExt.so.*[0-9]',
184
+ }
185
+ is_cuda_lib_err = [lib for lib in cuda_libs.values() if(lib.split('.')[0] in err.args[0])]
186
+ if not is_cuda_lib_err:
187
+ raise err
188
+ for lib_folder, lib_name in cuda_libs.items():
189
+ _preload_cuda_deps(lib_folder, lib_name)
190
+ ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
191
+
192
+
193
+ if (USE_RTLD_GLOBAL_WITH_LIBTORCH or os.getenv('TORCH_USE_RTLD_GLOBAL')) and \
194
+ (sys.executable == "torch_deploy" or platform.system() != 'Windows'):
195
+ # Do it the hard way. You might want to load libtorch with RTLD_GLOBAL in a
196
+ # few circumstances:
197
+ #
198
+ # 1. You're in a build environment (e.g., fbcode) where
199
+ # libtorch_global_deps is not available, but you still need
200
+ # to get mkl to link in with RTLD_GLOBAL or it will just
201
+ # not work.
202
+ #
203
+ # 2. You're trying to run PyTorch under UBSAN and you need
204
+ # to ensure that only one copy of libtorch is loaded, so
205
+ # vptr checks work properly
206
+ #
207
+ # If you're using this setting, you must verify that all the libraries
208
+ # you load consistently use the same libstdc++, or you may have
209
+ # mysterious segfaults.
210
+ #
211
+ old_flags = sys.getdlopenflags()
212
+ sys.setdlopenflags(os.RTLD_GLOBAL | os.RTLD_LAZY)
213
+ from torch._C import * # noqa: F403
214
+ sys.setdlopenflags(old_flags)
215
+ del old_flags
216
+
217
+ else:
218
+ # Easy way. You want this most of the time, because it will prevent
219
+ # C++ symbols from libtorch clobbering C++ symbols from other
220
+ # libraries, leading to mysterious segfaults.
221
+ #
222
+ # If building in an environment where libtorch_global_deps isn't available
223
+ # like parts of fbsource, but where RTLD_GLOBAL causes segfaults, you will
224
+ # want USE_RTLD_GLOBAL_WITH_LIBTORCH = False and USE_GLOBAL_DEPS = False
225
+ #
226
+ # See Note [Global dependencies]
227
+ if USE_GLOBAL_DEPS:
228
+ _load_global_deps()
229
+ from torch._C import * # noqa: F403
230
+
231
+ # Appease the type checker; ordinarily this binding is inserted by the
232
+ # torch._C module initialization code in C
233
+ if TYPE_CHECKING:
234
+ import torch._C as _C
235
+
236
+ class SymInt:
237
+ """
238
+ Like an int (including magic methods), but redirects all operations on the
239
+ wrapped node. This is used in particular to symbolically record operations
240
+ in the symbolic shape workflow.
241
+ """
242
+
243
+ def __init__(self, node):
244
+ # This field MUST be named node; C++ binding code assumes that this
245
+ # class has a field named node that stores SymNode
246
+ self.node = node
247
+
248
+ def __bool__(self):
249
+ return self.node.bool_()
250
+
251
+ def __int__(self):
252
+ return self.node.int_()
253
+
254
+ # Magic methods installed by torch.fx.experimental.symbolic_shapes
255
+
256
+ def __eq__(self, other: object) -> builtins.bool:
257
+ raise AssertionError("type stub not overridden")
258
+
259
+ def __lt__(self, other) -> builtins.bool:
260
+ raise AssertionError("type stub not overridden")
261
+
262
+ def __gt__(self, other) -> builtins.bool:
263
+ raise AssertionError("type stub not overridden")
264
+
265
+ def __le__(self, other) -> builtins.bool:
266
+ raise AssertionError("type stub not overridden")
267
+
268
+ def __ge__(self, other) -> builtins.bool:
269
+ raise AssertionError("type stub not overridden")
270
+
271
+ def __sym_max__(self, other):
272
+ raise AssertionError("type stub not overridden")
273
+
274
+ def __sym_min__(self, other):
275
+ raise AssertionError("type stub not overridden")
276
+
277
+ def __sym_float__(self):
278
+ raise AssertionError("type stub not overridden")
279
+
280
+ def __repr__(self):
281
+ return str(self.node)
282
+
283
+ class SymFloat:
284
+ """
285
+ Like an float (including magic methods), but redirects all operations on the
286
+ wrapped node. This is used in particular to symbolically record operations
287
+ in the symbolic shape workflow.
288
+ """
289
+
290
+ def __init__(self, node):
291
+ from torch.fx.experimental.symbolic_shapes import SymNode
292
+ assert isinstance(node, SymNode)
293
+ # This field MUST be named node; C++ binding code assumes that this
294
+ # class has a field named node that stores SymNode
295
+ self.node = node
296
+
297
+ def __bool__(self):
298
+ return self.node.bool_()
299
+
300
+ # Magic methods installed by torch.fx.experimental.symbolic_shapes
301
+
302
+ def __eq__(self, other: object) -> builtins.bool:
303
+ raise AssertionError("type stub not overridden")
304
+
305
+ def __lt__(self, other) -> builtins.bool:
306
+ raise AssertionError("type stub not overridden")
307
+
308
+ def __gt__(self, other) -> builtins.bool:
309
+ raise AssertionError("type stub not overridden")
310
+
311
+ def __le__(self, other) -> builtins.bool:
312
+ raise AssertionError("type stub not overridden")
313
+
314
+ def __ge__(self, other) -> builtins.bool:
315
+ raise AssertionError("type stub not overridden")
316
+
317
+ def __sym_max__(self, other):
318
+ raise AssertionError("type stub not overridden")
319
+
320
+ def __sym_min__(self, other):
321
+ raise AssertionError("type stub not overridden")
322
+
323
+ def __sym_int__(self):
324
+ raise AssertionError("type stub not overridden")
325
+
326
+ def __repr__(self):
327
+ return self.node.str()
328
+
329
+ class SymBool:
330
+ """
331
+ Like an bool (including magic methods), but redirects all operations on the
332
+ wrapped node. This is used in particular to symbolically record operations
333
+ in the symbolic shape workflow.
334
+
335
+ Unlike regular bools, regular boolean operators will force extra guards instead
336
+ of symbolically evaluate. Use the bitwise operators instead to handle this.
337
+ """
338
+
339
+ def __init__(self, node):
340
+ from torch.fx.experimental.symbolic_shapes import SymNode
341
+ assert isinstance(node, SymNode)
342
+ # This field MUST be named node; C++ binding code assumes that this
343
+ # class has a field named node that stores SymNode
344
+ self.node = node
345
+
346
+ def __bool__(self):
347
+ return self.node.bool_()
348
+
349
+ # Magic methods installed by torch.fx.experimental.symbolic_shapes
350
+ def __and__(self, other) -> "SymBool":
351
+ raise AssertionError("type stub not overridden")
352
+
353
+ def __or__(self, other) -> "SymBool":
354
+ raise AssertionError("type stub not overridden")
355
+
356
+ # We very carefully define __sym_not__, and not a number of other
357
+ # plausible alternatives:
358
+ #
359
+ # - We do not override __not__ because this is not a real magic
360
+ # method; you cannot override the meaning of the not builtin in
361
+ # Python. We use the name 'sym_not' to clarify that in user code you
362
+ # cannot use the builtin not or operator.not_ or operator.__not__ and
363
+ # hit this magic method; you must use our custom sym_not operator.
364
+ #
365
+ # - We do not override the __invert__ method because SymBool is
366
+ # meant to be usable in situations where bool is expected. However,
367
+ # bitwise negation ~a does the wrong thing with booleans (because
368
+ # bool is a subclass of int, so ~1 = -2 which is not falseish.)
369
+ # This would be a giant footgun, so we get around it by defining
370
+ # our own operator. Note that bitwise and/or do the right thing,
371
+ # so we reuse the conventional operators there for readability.
372
+ #
373
+ def __sym_not__(self) -> "SymBool":
374
+ raise AssertionError("type stub not overridden")
375
+
376
+ def __repr__(self):
377
+ return self.node.str()
378
+
379
+ def sym_not(a):
380
+ r""" SymInt-aware utility for logical negation.
381
+
382
+ Args:
383
+ a (SymBool or bool): Object to negate
384
+ """
385
+ if hasattr(a, '__sym_not__'):
386
+ return a.__sym_not__()
387
+ return not a
388
+
389
+ def sym_float(a):
390
+ r""" SymInt-aware utility for float casting.
391
+
392
+ Args:
393
+ a (SymInt, SymFloat, or object): Object to cast
394
+ """
395
+ if isinstance(a, SymFloat):
396
+ return a
397
+ elif hasattr(a, '__sym_float__'):
398
+ return a.__sym_float__()
399
+ return py_float(a) # type: ignore[operator]
400
+
401
+
402
+ def sym_int(a):
403
+ r""" SymInt-aware utility for int casting.
404
+
405
+ Args:
406
+ a (SymInt, SymFloat, or object): Object to cast
407
+ """
408
+ if isinstance(a, SymInt):
409
+ return a
410
+ elif isinstance(a, SymFloat):
411
+ return math.floor(a) if a >= 0 else math.ceil(a) # type: ignore[arg-type]
412
+ return py_int(a) # type: ignore[operator]
413
+
414
+ def sym_max(a, b):
415
+ """ SymInt-aware utility for max()."""
416
+ if isinstance(a, (SymInt, SymFloat)):
417
+ return a.__sym_max__(b)
418
+ elif isinstance(b, (SymInt, SymFloat)):
419
+ # NB: If you actually care about preserving output type exactly
420
+ # if you do something like max(0, 0.0), it is NOT sound to treat
421
+ # min/max as commutative
422
+ return b.__sym_max__(a)
423
+ return builtins.max(a, b) # type: ignore[operator]
424
+
425
+ def sym_min(a, b):
426
+ """ SymInt-aware utility for max()."""
427
+ if isinstance(a, (SymInt, SymFloat)):
428
+ return a.__sym_min__(b)
429
+ elif isinstance(b, (SymInt, SymFloat)):
430
+ return b.__sym_min__(a)
431
+ return builtins.min(a, b) # type: ignore[operator]
432
+
433
+ # Check to see if we can load C extensions, and if not provide some guidance
434
+ # on what the problem might be.
435
+ try:
436
+ # _initExtension is chosen (arbitrarily) as a sentinel.
437
+ from torch._C import _initExtension
438
+ except ImportError:
439
+ import torch._C as _C_for_compiled_check
440
+
441
+ # The __file__ check only works for Python 3.7 and above.
442
+ if _C_for_compiled_check.__file__ is None:
443
+ raise ImportError(textwrap.dedent('''
444
+ Failed to load PyTorch C extensions:
445
+ It appears that PyTorch has loaded the `torch/_C` folder
446
+ of the PyTorch repository rather than the C extensions which
447
+ are expected in the `torch._C` namespace. This can occur when
448
+ using the `install` workflow. e.g.
449
+ $ python setup.py install && python -c "import torch"
450
+
451
+ This error can generally be solved using the `develop` workflow
452
+ $ python setup.py develop && python -c "import torch" # This should succeed
453
+ or by running Python from a different directory.
454
+ ''').strip()) from None
455
+ raise # If __file__ is not None the cause is unknown, so just re-raise.
456
+
457
+ for name in dir(_C):
458
+ if name[0] != '_' and not name.endswith('Base'):
459
+ __all__.append(name)
460
+ obj = getattr(_C, name)
461
+ if (isinstance(obj, Callable) or inspect.isclass(obj)): # type: ignore[arg-type]
462
+ if (obj.__module__ != 'torch'):
463
+ # TODO: fix their module from C++ side
464
+ if name not in ['DisableTorchFunctionSubclass', 'DisableTorchFunction', 'Generator']:
465
+ obj.__module__ = 'torch'
466
+
467
+ if not TYPE_CHECKING:
468
+ # issue 38137 and python issue 43367. Submodules of a C extension are
469
+ # non-standard, and attributes of those submodules cannot be pickled since
470
+ # pickle expect to be able to import them as "from _C.sub import attr"
471
+ # which fails with "_C is not a package
472
+ for attr in dir(_C):
473
+ candidate = getattr(_C, attr)
474
+ if type(candidate) is type(_C):
475
+ # submodule
476
+ if f'torch._C.{attr}' not in sys.modules:
477
+ sys.modules[f'torch._C.{attr}'] = candidate
478
+
479
+
480
+ ################################################################################
481
+ # Define basic utilities
482
+ ################################################################################
483
+
484
+
485
+ def typename(o):
486
+ if isinstance(o, torch.Tensor):
487
+ return o.type()
488
+
489
+ module = ''
490
+ class_name = ''
491
+ if hasattr(o, '__module__') and o.__module__ != 'builtins' \
492
+ and o.__module__ != '__builtin__' and o.__module__ is not None:
493
+ module = o.__module__ + '.'
494
+
495
+ if hasattr(o, '__qualname__'):
496
+ class_name = o.__qualname__
497
+ elif hasattr(o, '__name__'):
498
+ class_name = o.__name__
499
+ else:
500
+ class_name = o.__class__.__name__
501
+
502
+ return module + class_name
503
+
504
+
505
+ def is_tensor(obj):
506
+ r"""Returns True if `obj` is a PyTorch tensor.
507
+
508
+ Note that this function is simply doing ``isinstance(obj, Tensor)``.
509
+ Using that ``isinstance`` check is better for typechecking with mypy,
510
+ and more explicit - so it's recommended to use that instead of
511
+ ``is_tensor``.
512
+
513
+ Args:
514
+ obj (Object): Object to test
515
+ Example::
516
+
517
+ >>> x = torch.tensor([1, 2, 3])
518
+ >>> torch.is_tensor(x)
519
+ True
520
+
521
+ """
522
+ return isinstance(obj, torch.Tensor)
523
+
524
+
525
+ def is_storage(obj):
526
+ r"""Returns True if `obj` is a PyTorch storage object.
527
+
528
+ Args:
529
+ obj (Object): Object to test
530
+ """
531
+ return type(obj) in _storage_classes
532
+
533
+
534
+ _GLOBAL_DEVICE_CONTEXT = None
535
+
536
+ def set_default_device(device):
537
+ """Sets the default ``torch.Tensor`` to be allocated on ``device``. This
538
+ does not affect factory function calls which are called with an explicit
539
+ ``device`` argument. Factory calls will be performed as if they
540
+ were passed ``device`` as an argument.
541
+
542
+ To only temporarily change the default device instead of setting it
543
+ globally, use ``with torch.device(device):`` instead.
544
+
545
+ The default device is initially ``cpu``. If you set the default tensor
546
+ device to another device (e.g., ``cuda``) without a device index, tensors
547
+ will be allocated on whatever the current device for the device type,
548
+ even after :func:`torch.cuda.set_device` is called.
549
+
550
+ .. warning::
551
+
552
+ This function imposes a slight performance cost on every Python
553
+ call to the torch API (not just factory functions). If this
554
+ is causing problems for you, please comment on
555
+ https://github.com/pytorch/pytorch/issues/92701
556
+
557
+ Args:
558
+ device (device or string): the device to set as default
559
+
560
+ Example::
561
+
562
+ >>> # xdoctest: +SKIP("requires cuda, changes global state")
563
+ >>> torch.tensor([1.2, 3]).device
564
+ device(type='cpu')
565
+ >>> torch.set_default_device('cuda') # current device is 0
566
+ >>> torch.tensor([1.2, 3]).device
567
+ device(type='cuda', index=0)
568
+ >>> torch.set_default_device('cuda:1')
569
+ >>> torch.tensor([1.2, 3]).device
570
+ device(type='cuda', index=1)
571
+
572
+ """
573
+ global _GLOBAL_DEVICE_CONTEXT
574
+ if _GLOBAL_DEVICE_CONTEXT is not None:
575
+ _GLOBAL_DEVICE_CONTEXT.__exit__(None, None, None)
576
+ if device is None:
577
+ _GLOBAL_DEVICE_CONTEXT = None
578
+ return
579
+ from torch.utils._device import DeviceContext
580
+ _GLOBAL_DEVICE_CONTEXT = DeviceContext(device)
581
+ _GLOBAL_DEVICE_CONTEXT.__enter__()
582
+
583
+
584
+ def set_default_tensor_type(t):
585
+ r"""Sets the default ``torch.Tensor`` type to floating point tensor type
586
+ ``t``. This type will also be used as default floating point type for
587
+ type inference in :func:`torch.tensor`.
588
+
589
+ The default floating point tensor type is initially ``torch.FloatTensor``.
590
+
591
+ Args:
592
+ t (type or string): the floating point tensor type or its name
593
+
594
+ Example::
595
+
596
+ >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
597
+ >>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32
598
+ torch.float32
599
+ >>> torch.set_default_tensor_type(torch.DoubleTensor)
600
+ >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
601
+ torch.float64
602
+
603
+ """
604
+ if isinstance(t, str):
605
+ t = _import_dotted_name(t)
606
+ _C._set_default_tensor_type(t)
607
+
608
+
609
+ def set_default_dtype(d):
610
+ r"""
611
+
612
+ Sets the default floating point dtype to :attr:`d`. Supports torch.float32
613
+ and torch.float64 as inputs. Other dtypes may be accepted without complaint
614
+ but are not supported and are unlikely to work as expected.
615
+
616
+ When PyTorch is initialized its default floating point dtype is torch.float32,
617
+ and the intent of set_default_dtype(torch.float64) is to facilitate NumPy-like
618
+ type inference. The default floating point dtype is used to:
619
+
620
+ 1. Implicitly determine the default complex dtype. When the default floating point
621
+ type is float32 the default complex dtype is complex64, and when the default
622
+ floating point type is float64 the default complex type is complex128.
623
+ 2. Infer the dtype for tensors constructed using Python floats or complex Python
624
+ numbers. See examples below.
625
+ 3. Determine the result of type promotion between bool and integer tensors and
626
+ Python floats and complex Python numbers.
627
+
628
+ Args:
629
+ d (:class:`torch.dtype`): the floating point dtype to make the default.
630
+ Either torch.float32 or torch.float64.
631
+
632
+ Example:
633
+ >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
634
+ >>> # initial default for floating point is torch.float32
635
+ >>> # Python floats are interpreted as float32
636
+ >>> torch.tensor([1.2, 3]).dtype
637
+ torch.float32
638
+ >>> # initial default for floating point is torch.complex64
639
+ >>> # Complex Python numbers are interpreted as complex64
640
+ >>> torch.tensor([1.2, 3j]).dtype
641
+ torch.complex64
642
+
643
+ >>> torch.set_default_dtype(torch.float64)
644
+
645
+ >>> # Python floats are now interpreted as float64
646
+ >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
647
+ torch.float64
648
+ >>> # Complex Python numbers are now interpreted as complex128
649
+ >>> torch.tensor([1.2, 3j]).dtype # a new complex tensor
650
+ torch.complex128
651
+
652
+ """
653
+ _C._set_default_dtype(d)
654
+
655
+ def use_deterministic_algorithms(mode, *, warn_only=False):
656
+ r""" Sets whether PyTorch operations must use "deterministic"
657
+ algorithms. That is, algorithms which, given the same input, and when
658
+ run on the same software and hardware, always produce the same output.
659
+ When enabled, operations will use deterministic algorithms when available,
660
+ and if only nondeterministic algorithms are available they will throw a
661
+ :class:`RuntimeError` when called.
662
+
663
+ .. note:: This setting alone is not always enough to make an application
664
+ reproducible. Refer to :ref:`reproducibility` for more information.
665
+
666
+ .. note:: :func:`torch.set_deterministic_debug_mode` offers an alternative
667
+ interface for this feature.
668
+
669
+ The following normally-nondeterministic operations will act
670
+ deterministically when ``mode=True``:
671
+
672
+ * :class:`torch.nn.Conv1d` when called on CUDA tensor
673
+ * :class:`torch.nn.Conv2d` when called on CUDA tensor
674
+ * :class:`torch.nn.Conv3d` when called on CUDA tensor
675
+ * :class:`torch.nn.ConvTranspose1d` when called on CUDA tensor
676
+ * :class:`torch.nn.ConvTranspose2d` when called on CUDA tensor
677
+ * :class:`torch.nn.ConvTranspose3d` when called on CUDA tensor
678
+ * :func:`torch.bmm` when called on sparse-dense CUDA tensors
679
+ * :func:`torch.Tensor.__getitem__` when attempting to differentiate a CPU tensor
680
+ and the index is a list of tensors
681
+ * :func:`torch.Tensor.index_put` with ``accumulate=False``
682
+ * :func:`torch.Tensor.index_put` with ``accumulate=True`` when called on a CPU
683
+ tensor
684
+ * :func:`torch.Tensor.put_` with ``accumulate=True`` when called on a CPU
685
+ tensor
686
+ * :func:`torch.Tensor.scatter_add_` when called on a CUDA tensor
687
+ * :func:`torch.gather` when called on a CUDA tensor that requires grad
688
+ * :func:`torch.index_add` when called on CUDA tensor
689
+ * :func:`torch.index_select` when attempting to differentiate a CUDA tensor
690
+ * :func:`torch.repeat_interleave` when attempting to differentiate a CUDA tensor
691
+ * :func:`torch.Tensor.index_copy` when called on a CPU or CUDA tensor
692
+
693
+ The following normally-nondeterministic operations will throw a
694
+ :class:`RuntimeError` when ``mode=True``:
695
+
696
+ * :class:`torch.nn.AvgPool3d` when attempting to differentiate a CUDA tensor
697
+ * :class:`torch.nn.AdaptiveAvgPool2d` when attempting to differentiate a CUDA tensor
698
+ * :class:`torch.nn.AdaptiveAvgPool3d` when attempting to differentiate a CUDA tensor
699
+ * :class:`torch.nn.MaxPool3d` when attempting to differentiate a CUDA tensor
700
+ * :class:`torch.nn.AdaptiveMaxPool2d` when attempting to differentiate a CUDA tensor
701
+ * :class:`torch.nn.FractionalMaxPool2d` when attempting to differentiate a CUDA tensor
702
+ * :class:`torch.nn.FractionalMaxPool3d` when attempting to differentiate a CUDA tensor
703
+ * :class:`torch.nn.MaxUnpool1d`
704
+ * :class:`torch.nn.MaxUnpool2d`
705
+ * :class:`torch.nn.MaxUnpool3d`
706
+ * :func:`torch.nn.functional.interpolate` when attempting to differentiate a CUDA tensor
707
+ and one of the following modes is used:
708
+
709
+ - ``linear``
710
+ - ``bilinear``
711
+ - ``bicubic``
712
+ - ``trilinear``
713
+
714
+ * :class:`torch.nn.ReflectionPad1d` when attempting to differentiate a CUDA tensor
715
+ * :class:`torch.nn.ReflectionPad2d` when attempting to differentiate a CUDA tensor
716
+ * :class:`torch.nn.ReflectionPad3d` when attempting to differentiate a CUDA tensor
717
+ * :class:`torch.nn.ReplicationPad1d` when attempting to differentiate a CUDA tensor
718
+ * :class:`torch.nn.ReplicationPad2d` when attempting to differentiate a CUDA tensor
719
+ * :class:`torch.nn.ReplicationPad3d` when attempting to differentiate a CUDA tensor
720
+ * :class:`torch.nn.NLLLoss` when called on a CUDA tensor
721
+ * :class:`torch.nn.CTCLoss` when attempting to differentiate a CUDA tensor
722
+ * :class:`torch.nn.EmbeddingBag` when attempting to differentiate a CUDA tensor when
723
+ ``mode='max'``
724
+ * :func:`torch.Tensor.put_` when ``accumulate=False``
725
+ * :func:`torch.Tensor.put_` when ``accumulate=True`` and called on a CUDA tensor
726
+ * :func:`torch.histc` when called on a CUDA tensor
727
+ * :func:`torch.bincount` when called on a CUDA tensor
728
+ * :func:`torch.kthvalue` with called on a CUDA tensor
729
+ * :func:`torch.median` with indices output when called on a CUDA tensor
730
+ * :func:`torch.nn.functional.grid_sample` when attempting to differentiate a CUDA tensor
731
+ * :func:`torch.cumsum` when called on a CUDA tensor when dtype is floating point or complex
732
+
733
+ A handful of CUDA operations are nondeterministic if the CUDA version is
734
+ 10.2 or greater, unless the environment variable ``CUBLAS_WORKSPACE_CONFIG=:4096:8``
735
+ or ``CUBLAS_WORKSPACE_CONFIG=:16:8`` is set. See the CUDA documentation for more
736
+ details: `<https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility>`_
737
+ If one of these environment variable configurations is not set, a :class:`RuntimeError`
738
+ will be raised from these operations when called with CUDA tensors:
739
+
740
+ * :func:`torch.mm`
741
+ * :func:`torch.mv`
742
+ * :func:`torch.bmm`
743
+
744
+ Note that deterministic operations tend to have worse performance than
745
+ nondeterministic operations.
746
+
747
+ .. note::
748
+
749
+ This flag does not detect or prevent nondeterministic behavior caused
750
+ by calling an inplace operation on a tensor with an internal memory
751
+ overlap or by giving such a tensor as the :attr:`out` argument for an
752
+ operation. In these cases, multiple writes of different data may target
753
+ a single memory location, and the order of writes is not guaranteed.
754
+
755
+ Args:
756
+ mode (:class:`bool`): If True, makes potentially nondeterministic
757
+ operations switch to a deterministic algorithm or throw a runtime
758
+ error. If False, allows nondeterministic operations.
759
+
760
+ Keyword args:
761
+ warn_only (:class:`bool`, optional): If True, operations that do not
762
+ have a deterministic implementation will throw a warning instead of
763
+ an error. Default: ``False``
764
+
765
+ Example::
766
+
767
+ >>> # xdoctest: +SKIP
768
+ >>> torch.use_deterministic_algorithms(True)
769
+
770
+ # Forward mode nondeterministic error
771
+ >>> torch.randn(10, device='cuda').kthvalue(0)
772
+ ...
773
+ RuntimeError: kthvalue CUDA does not have a deterministic implementation...
774
+
775
+ # Backward mode nondeterministic error
776
+ >>> torch.nn.AvgPool3d(1)(torch.randn(3, 4, 5, 6, requires_grad=True).cuda()).sum().backward()
777
+ ...
778
+ RuntimeError: avg_pool3d_backward_cuda does not have a deterministic implementation...
779
+ """
780
+ _C._set_deterministic_algorithms(mode, warn_only=warn_only)
781
+
782
+ def are_deterministic_algorithms_enabled():
783
+ r"""Returns True if the global deterministic flag is turned on. Refer to
784
+ :func:`torch.use_deterministic_algorithms` documentation for more details.
785
+ """
786
+ return _C._get_deterministic_algorithms()
787
+
788
+ def is_deterministic_algorithms_warn_only_enabled():
789
+ r"""Returns True if the global deterministic flag is set to warn only.
790
+ Refer to :func:`torch.use_deterministic_algorithms` documentation for more
791
+ details.
792
+ """
793
+ return _C._get_deterministic_algorithms_warn_only()
794
+
795
+ def set_deterministic_debug_mode(debug_mode: Union[builtins.int, str]) -> None:
796
+ r"""Sets the debug mode for deterministic operations.
797
+
798
+ .. note:: This is an alternative interface for
799
+ :func:`torch.use_deterministic_algorithms`. Refer to that function's
800
+ documentation for details about affected operations.
801
+
802
+ Args:
803
+ debug_mode(str or int): If "default" or 0, don't error or warn on
804
+ nondeterministic operations. If "warn" or 1, warn on
805
+ nondeterministic operations. If "error" or 2, error on
806
+ nondeterministic operations.
807
+ """
808
+
809
+ # NOTE: builtins.int is used here because int in this scope resolves
810
+ # to torch.int
811
+ if not isinstance(debug_mode, (builtins.int, str)):
812
+ raise TypeError(f'debug_mode must be str or int, but got {type(debug_mode)}')
813
+
814
+ if isinstance(debug_mode, str):
815
+ if debug_mode == 'default':
816
+ debug_mode = 0
817
+ elif debug_mode == 'warn':
818
+ debug_mode = 1
819
+ elif debug_mode == 'error':
820
+ debug_mode = 2
821
+ else:
822
+ raise RuntimeError(
823
+ 'invalid value of debug_mode, expected one of `default`, '
824
+ f'`warn`, `error`, but got {debug_mode}')
825
+
826
+ if debug_mode == 0:
827
+ _C._set_deterministic_algorithms(False)
828
+ elif debug_mode == 1:
829
+ _C._set_deterministic_algorithms(True, warn_only=True)
830
+ elif debug_mode == 2:
831
+ _C._set_deterministic_algorithms(True)
832
+ else:
833
+ raise RuntimeError(
834
+ 'invalid value of debug_mode, expected 0, 1, or 2, '
835
+ f'but got {debug_mode}')
836
+
837
+ def get_deterministic_debug_mode() -> builtins.int:
838
+ r"""Returns the current value of the debug mode for deterministic
839
+ operations. Refer to :func:`torch.set_deterministic_debug_mode`
840
+ documentation for more details.
841
+ """
842
+
843
+ if _C._get_deterministic_algorithms():
844
+ if _C._get_deterministic_algorithms_warn_only():
845
+ return 1
846
+ else:
847
+ return 2
848
+ else:
849
+ return 0
850
+
851
+ def get_float32_matmul_precision() -> builtins.str:
852
+ r"""Returns the current value of float32 matrix multiplication precision. Refer to
853
+ :func:`torch.set_float32_matmul_precision` documentation for more details.
854
+ """
855
+ return _C._get_float32_matmul_precision()
856
+
857
+ def set_float32_matmul_precision(precision):
858
+ r"""Sets the internal precision of float32 matrix multiplications.
859
+
860
+ Running float32 matrix multiplications in lower precision may significantly increase
861
+ performance, and in some programs the loss of precision has a negligible impact.
862
+
863
+ Supports three settings:
864
+
865
+ * "highest", float32 matrix multiplications use the float32 datatype for
866
+ internal computations.
867
+ * "high", float32 matrix multiplications use the TensorFloat32 or bfloat16_3x
868
+ datatypes for internal computations, if fast matrix multiplication algorithms
869
+ using those datatypes internally are available. Otherwise float32
870
+ matrix multiplications are computed as if the precision is "highest".
871
+ * "medium", float32 matrix multiplications use the bfloat16 datatype for
872
+ internal computations, if a fast matrix multiplication algorithm
873
+ using that datatype internally is available. Otherwise float32
874
+ matrix multiplications are computed as if the precision is "high".
875
+
876
+ .. note::
877
+
878
+ This does not change the output dtype of float32 matrix multiplications,
879
+ it controls how the internal computation of the matrix multiplication is performed.
880
+
881
+ .. note::
882
+
883
+ This does not change the precision of convolution operations. Other flags,
884
+ like `torch.backends.cudnn.allow_tf32`, may control the precision of convolution
885
+ operations.
886
+
887
+ .. note::
888
+
889
+ This flag currently only affects one native device type: CUDA.
890
+ If "high" or "medium" are set then the TensorFloat32 datatype will be used
891
+ when computing float32 matrix multiplications, equivalent to setting
892
+ `torch.backends.cuda.matmul.allow_tf32 = True`. When "highest" (the default)
893
+ is set then the float32 datatype is used for internal computations, equivalent
894
+ to setting `torch.backends.cuda.matmul.allow_tf32 = False`.
895
+
896
+ Args:
897
+ precision(str): can be set to "highest" (default), "high", or "medium" (see above).
898
+
899
+ """
900
+ _C._set_float32_matmul_precision(precision)
901
+
902
+ def set_warn_always(b):
903
+ r"""When this flag is False (default) then some PyTorch warnings may only
904
+ appear once per process. This helps avoid excessive warning information.
905
+ Setting it to True causes these warnings to always appear, which may be
906
+ helpful when debugging.
907
+
908
+ Args:
909
+ b (:class:`bool`): If True, force warnings to always be emitted
910
+ If False, set to the default behaviour
911
+ """
912
+ _C._set_warnAlways(b)
913
+
914
+ def is_warn_always_enabled():
915
+ r"""Returns True if the global warn_always flag is turned on. Refer to
916
+ :func:`torch.set_warn_always` documentation for more details.
917
+ """
918
+ return _C._get_warnAlways()
919
+
920
+ ################################################################################
921
+ # Define numeric constants
922
+ ################################################################################
923
+
924
+ # For Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html) and
925
+ # NumPy consistency (https://numpy.org/devdocs/reference/constants.html)
926
+ from math import e , nan , inf , pi
927
+ __all__.extend(['e', 'pi', 'nan', 'inf'])
928
+
929
+ ################################################################################
930
+ # Define Storage and Tensor classes
931
+ ################################################################################
932
+
933
+ from ._tensor import Tensor
934
+ from .storage import _StorageBase, TypedStorage, _LegacyStorage, UntypedStorage, _warn_typed_storage_removal
935
+
936
+ # NOTE: New <type>Storage classes should never be added. When adding a new
937
+ # dtype, use torch.storage.TypedStorage directly.
938
+
939
+ class ByteStorage(_LegacyStorage):
940
+ @classproperty
941
+ def dtype(self):
942
+ _warn_typed_storage_removal()
943
+ return self._dtype
944
+
945
+ @classproperty
946
+ def _dtype(self):
947
+ return torch.uint8
948
+
949
+ class DoubleStorage(_LegacyStorage):
950
+ @classproperty
951
+ def dtype(self):
952
+ _warn_typed_storage_removal()
953
+ return self._dtype
954
+
955
+ @classproperty
956
+ def _dtype(self):
957
+ return torch.double
958
+
959
+ class FloatStorage(_LegacyStorage):
960
+ @classproperty
961
+ def dtype(self):
962
+ _warn_typed_storage_removal()
963
+ return self._dtype
964
+
965
+ @classproperty
966
+ def _dtype(self):
967
+ return torch.float
968
+
969
+ class HalfStorage(_LegacyStorage):
970
+ @classproperty
971
+ def dtype(self):
972
+ _warn_typed_storage_removal()
973
+ return self._dtype
974
+
975
+ @classproperty
976
+ def _dtype(self):
977
+ return torch.half
978
+
979
+ class LongStorage(_LegacyStorage):
980
+ @classproperty
981
+ def dtype(self):
982
+ _warn_typed_storage_removal()
983
+ return self._dtype
984
+
985
+ @classproperty
986
+ def _dtype(self):
987
+ return torch.long
988
+
989
+ class IntStorage(_LegacyStorage):
990
+ @classproperty
991
+ def dtype(self):
992
+ _warn_typed_storage_removal()
993
+ return self._dtype
994
+
995
+ @classproperty
996
+ def _dtype(self):
997
+ return torch.int
998
+
999
+ class ShortStorage(_LegacyStorage):
1000
+ @classproperty
1001
+ def dtype(self):
1002
+ _warn_typed_storage_removal()
1003
+ return self._dtype
1004
+
1005
+ @classproperty
1006
+ def _dtype(self):
1007
+ return torch.short
1008
+
1009
+ class CharStorage(_LegacyStorage):
1010
+ @classproperty
1011
+ def dtype(self):
1012
+ _warn_typed_storage_removal()
1013
+ return self._dtype
1014
+
1015
+ @classproperty
1016
+ def _dtype(self):
1017
+ return torch.int8
1018
+
1019
+ class BoolStorage(_LegacyStorage):
1020
+ @classproperty
1021
+ def dtype(self):
1022
+ _warn_typed_storage_removal()
1023
+ return self._dtype
1024
+
1025
+ @classproperty
1026
+ def _dtype(self):
1027
+ return torch.bool
1028
+
1029
+ class BFloat16Storage(_LegacyStorage):
1030
+ @classproperty
1031
+ def dtype(self):
1032
+ _warn_typed_storage_removal()
1033
+ return self._dtype
1034
+
1035
+ @classproperty
1036
+ def _dtype(self):
1037
+ return torch.bfloat16
1038
+
1039
+ class ComplexDoubleStorage(_LegacyStorage):
1040
+ @classproperty
1041
+ def dtype(self):
1042
+ _warn_typed_storage_removal()
1043
+ return self._dtype
1044
+
1045
+ @classproperty
1046
+ def _dtype(self):
1047
+ return torch.cdouble
1048
+
1049
+ class ComplexFloatStorage(_LegacyStorage):
1050
+ @classproperty
1051
+ def dtype(self):
1052
+ _warn_typed_storage_removal()
1053
+ return self._dtype
1054
+
1055
+ @classproperty
1056
+ def _dtype(self):
1057
+ return torch.cfloat
1058
+
1059
+ class QUInt8Storage(_LegacyStorage):
1060
+ @classproperty
1061
+ def dtype(self):
1062
+ _warn_typed_storage_removal()
1063
+ return self._dtype
1064
+
1065
+ @classproperty
1066
+ def _dtype(self):
1067
+ return torch.quint8
1068
+
1069
+ class QInt8Storage(_LegacyStorage):
1070
+ @classproperty
1071
+ def dtype(self):
1072
+ _warn_typed_storage_removal()
1073
+ return self._dtype
1074
+
1075
+ @classproperty
1076
+ def _dtype(self):
1077
+ return torch.qint8
1078
+
1079
+ class QInt32Storage(_LegacyStorage):
1080
+ @classproperty
1081
+ def dtype(self):
1082
+ _warn_typed_storage_removal()
1083
+ return self._dtype
1084
+
1085
+ @classproperty
1086
+ def _dtype(self):
1087
+ return torch.qint32
1088
+
1089
+ class QUInt4x2Storage(_LegacyStorage):
1090
+ @classproperty
1091
+ def dtype(self):
1092
+ _warn_typed_storage_removal()
1093
+ return self._dtype
1094
+
1095
+ @classproperty
1096
+ def _dtype(self):
1097
+ return torch.quint4x2
1098
+
1099
+ class QUInt2x4Storage(_LegacyStorage):
1100
+ @classproperty
1101
+ def dtype(self):
1102
+ _warn_typed_storage_removal()
1103
+ return self._dtype
1104
+
1105
+ @classproperty
1106
+ def _dtype(self):
1107
+ return torch.quint2x4
1108
+
1109
+ _storage_classes = {
1110
+ UntypedStorage, DoubleStorage, FloatStorage, LongStorage, IntStorage,
1111
+ ShortStorage, CharStorage, ByteStorage, HalfStorage, BoolStorage,
1112
+ QUInt8Storage, QInt8Storage, QInt32Storage, BFloat16Storage,
1113
+ ComplexFloatStorage, ComplexDoubleStorage, QUInt4x2Storage, QUInt2x4Storage,
1114
+ TypedStorage
1115
+ }
1116
+
1117
+ # The _tensor_classes set is initialized by the call to _C._initialize_tensor_type_bindings()
1118
+ _tensor_classes: Set[Type] = set()
1119
+
1120
+ # If you edit these imports, please update torch/__init__.py.in as well
1121
+ from .random import set_rng_state, get_rng_state, manual_seed, initial_seed, seed
1122
+ from .serialization import save, load
1123
+ from ._tensor_str import set_printoptions
1124
+
1125
+ ################################################################################
1126
+ # Initialize extension
1127
+ ################################################################################
1128
+
1129
+ def manager_path():
1130
+ if sys.executable == 'torch_deploy' or platform.system() == 'Windows':
1131
+ return b""
1132
+ path = get_file_path('torch', 'bin', 'torch_shm_manager')
1133
+ prepare_multiprocessing_environment(get_file_path('torch'))
1134
+ if not os.path.exists(path):
1135
+ raise RuntimeError("Unable to find torch_shm_manager at " + path)
1136
+ return path.encode('utf-8')
1137
+
1138
+ from torch.amp import autocast
1139
+
1140
+ # Initializing the extension shadows the built-in python float / int classes;
1141
+ # store them for later use by SymInt / SymFloat.
1142
+ py_float = float
1143
+ py_int = int
1144
+
1145
+ # Shared memory manager needs to know the exact location of manager executable
1146
+ _C._initExtension(manager_path())
1147
+ del manager_path
1148
+
1149
+ # Appease the type checker: it can't deal with direct setting of globals().
1150
+ # Note that we will see "too many" functions when reexporting this way; there
1151
+ # is not a good way to fix this problem. Perhaps, try to redesign VariableFunctions
1152
+ # so that this import is good enough
1153
+ if TYPE_CHECKING:
1154
+ # Some type signatures pulled in from _VariableFunctions here clash with
1155
+ # signatures already imported. For now these clashes are ignored; see
1156
+ # PR #43339 for details.
1157
+ from torch._C._VariableFunctions import * # type: ignore[misc] # noqa: F403
1158
+ # Fixup segment_reduce visibility
1159
+ _segment_reduce = segment_reduce
1160
+ del segment_reduce
1161
+
1162
+ # Ops not to be exposed in `torch` namespace,
1163
+ # mostly helper ops.
1164
+ PRIVATE_OPS = (
1165
+ 'unique_dim',
1166
+ )
1167
+
1168
+ for name in dir(_C._VariableFunctions):
1169
+ if name.startswith('__') or name in PRIVATE_OPS:
1170
+ continue
1171
+ obj = getattr(_C._VariableFunctions, name)
1172
+ obj.__module__ = 'torch'
1173
+ # Hide some APIs that should not be public
1174
+ if name == "segment_reduce":
1175
+ # TODO: Once the undocumented FC window is passed, remove the line bellow
1176
+ globals()[name] = obj
1177
+ name = "_" + name
1178
+ globals()[name] = obj
1179
+ if not name.startswith("_"):
1180
+ __all__.append(name)
1181
+
1182
+ ################################################################################
1183
+ # Import interface functions defined in Python
1184
+ ################################################################################
1185
+
1186
+ # needs to be after the above ATen bindings so we can overwrite from Python side
1187
+ from .functional import * # noqa: F403
1188
+
1189
+
1190
+ ################################################################################
1191
+ # Remove unnecessary members
1192
+ ################################################################################
1193
+
1194
+ del _StorageBase
1195
+ del _LegacyStorage
1196
+
1197
+ ################################################################################
1198
+ # Define _assert
1199
+ ################################################################################
1200
+
1201
+ # needs to be before the submodule imports to avoid circular dependencies
1202
+ def _assert(condition, message):
1203
+ r"""A wrapper around Python's assert which is symbolically traceable.
1204
+ """
1205
+ from .overrides import has_torch_function, handle_torch_function
1206
+
1207
+ if type(condition) is not torch.Tensor and has_torch_function((condition,)):
1208
+ return handle_torch_function(_assert, (condition,), condition, message)
1209
+ assert condition, message
1210
+
1211
+ ################################################################################
1212
+ # Import most common subpackages
1213
+ ################################################################################
1214
+
1215
+ # Use the redundant form so that type checkers know that these are a part of
1216
+ # the public API. The "regular" import lines are there solely for the runtime
1217
+ # side effect of adding to the imported module's members for other users.
1218
+ from torch import cuda as cuda
1219
+ from torch import cpu as cpu
1220
+ from torch import autograd as autograd
1221
+ from torch.autograd import (
1222
+ no_grad as no_grad,
1223
+ enable_grad as enable_grad,
1224
+ set_grad_enabled as set_grad_enabled,
1225
+ inference_mode as inference_mode,
1226
+ )
1227
+ from torch import fft as fft
1228
+ from torch import futures as futures
1229
+ from torch import _awaits as _awaits
1230
+ from torch import nested as nested
1231
+ from torch import nn as nn
1232
+ from torch.signal import windows as windows
1233
+ from torch import optim as optim
1234
+ import torch.optim._multi_tensor
1235
+ from torch import multiprocessing as multiprocessing
1236
+ from torch import sparse as sparse
1237
+ from torch import special as special
1238
+ import torch.utils.backcompat
1239
+ from torch import onnx as onnx
1240
+ from torch import jit as jit
1241
+ from torch import linalg as linalg
1242
+ from torch import hub as hub
1243
+ from torch import random as random
1244
+ from torch import distributions as distributions
1245
+ from torch import testing as testing
1246
+ import torch.backends.cuda
1247
+ import torch.backends.mps
1248
+ import torch.backends.cudnn
1249
+ import torch.backends.mkl
1250
+ import torch.backends.mkldnn
1251
+ import torch.backends.openmp
1252
+ import torch.backends.quantized
1253
+ import torch.utils.data
1254
+ from torch import __config__ as __config__
1255
+ from torch import __future__ as __future__
1256
+ from torch import profiler as profiler
1257
+
1258
+ # Quantized, sparse, AO, etc. should be last to get imported, as nothing
1259
+ # is expected to depend on them.
1260
+ from torch import ao as ao
1261
+ # nn.quant* depends on ao -- so should be after those.
1262
+ import torch.nn.quantizable
1263
+ import torch.nn.quantized
1264
+ import torch.nn.qat
1265
+ import torch.nn.intrinsic
1266
+
1267
+ _C._init_names(list(torch._storage_classes))
1268
+
1269
+ # attach docstrings to torch and tensor functions
1270
+ from . import _torch_docs, _tensor_docs, _storage_docs
1271
+ del _torch_docs, _tensor_docs, _storage_docs
1272
+
1273
+
1274
+ def compiled_with_cxx11_abi():
1275
+ r"""Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1"""
1276
+ return _C._GLIBCXX_USE_CXX11_ABI
1277
+
1278
+
1279
+ # Import the ops "namespace"
1280
+ from torch._ops import ops
1281
+ from torch._classes import classes
1282
+
1283
+ # quantization depends on torch.fx
1284
+ # Import quantization
1285
+ from torch import quantization as quantization
1286
+
1287
+ # Import the quasi random sampler
1288
+ from torch import quasirandom as quasirandom
1289
+
1290
+ # If you are seeing this, it means that this call site was not checked if
1291
+ # the memory format could be preserved, and it was switched to old default
1292
+ # behaviour of contiguous
1293
+ legacy_contiguous_format = contiguous_format
1294
+
1295
+ # Register fork handler to initialize OpenMP in child processes (see gh-28389)
1296
+ from torch.multiprocessing._atfork import register_after_fork
1297
+ register_after_fork(torch.get_num_threads)
1298
+ del register_after_fork
1299
+
1300
+ # Import tools that require fully imported torch (for applying
1301
+ # torch.jit.script as a decorator, for instance):
1302
+ from ._lobpcg import lobpcg as lobpcg
1303
+
1304
+ # These were previously defined in native_functions.yaml and appeared on the
1305
+ # `torch` namespace, but we moved them to c10 dispatch to facilitate custom
1306
+ # class usage. We add these lines here to preserve backward compatibility.
1307
+ quantized_lstm = torch.ops.aten.quantized_lstm
1308
+ quantized_gru = torch.ops.aten.quantized_gru
1309
+
1310
+ from torch.utils.dlpack import from_dlpack, to_dlpack
1311
+
1312
+ # Import experimental masked operations support. See
1313
+ # [RFC-0016](https://github.com/pytorch/rfcs/pull/27) for more
1314
+ # information.
1315
+ from . import masked
1316
+
1317
+ # Import removed ops with error message about removal
1318
+ from ._linalg_utils import ( # type: ignore[misc]
1319
+ matrix_rank,
1320
+ eig,
1321
+ solve,
1322
+ lstsq,
1323
+ )
1324
+ from ._linalg_utils import _symeig as symeig # type: ignore[misc]
1325
+
1326
+
1327
+ class _TorchCompileInductorWrapper:
1328
+ compiler_name = "inductor"
1329
+
1330
+ def __init__(self, mode, options, dynamic):
1331
+ self.config = dict()
1332
+ self.dynamic = dynamic
1333
+ self.apply_mode(mode)
1334
+ self.apply_options(options)
1335
+ if dynamic:
1336
+ # cudagraphs conflicts with dynamic shapes
1337
+ self.config["triton.cudagraphs"] = False
1338
+ assert "triton.cudagraphs" not in (
1339
+ options or ()
1340
+ ), "triton.cudagraphs does not support dynamic shapes"
1341
+
1342
+ def __eq__(self, other):
1343
+ return (isinstance(other, _TorchCompileInductorWrapper) and
1344
+ self.config == other.config and
1345
+ self.dynamic == other.dynamic)
1346
+
1347
+ def apply_mode(self, mode: Optional[str]):
1348
+ if mode is None or mode == "default":
1349
+ pass
1350
+ elif mode == "reduce-overhead":
1351
+ self.apply_options({
1352
+ "triton.cudagraphs": True,
1353
+ "size_asserts": False,
1354
+ })
1355
+ elif mode == "max-autotune":
1356
+ self.apply_options({
1357
+ "epilogue_fusion": True,
1358
+ "max_autotune": True,
1359
+ "triton.cudagraphs": True,
1360
+ })
1361
+ else:
1362
+ raise RuntimeError(
1363
+ f"Unrecognized mode={mode}, should be one of: default, reduce-overhead, max-autotune"
1364
+ )
1365
+
1366
+ def apply_options(self, options: Optional[Dict[str, Any]]):
1367
+ if not options:
1368
+ return
1369
+
1370
+ from torch._inductor import config
1371
+ current_config: Dict[str, Any] = config.to_dict() # type: ignore[attr-defined]
1372
+
1373
+ for key, val in options.items():
1374
+ attr_name = key.replace("-", "_")
1375
+ if attr_name not in current_config:
1376
+ raise RuntimeError(
1377
+ f"Unexpected optimization option {key}, known options are {list(current_config.keys())}"
1378
+ )
1379
+ if type(val) is not type(current_config[attr_name]):
1380
+ val_type_str = type(val).__name__
1381
+ expected_type_str = type(current_config[attr_name]).__name__
1382
+ raise RuntimeError(
1383
+ f"Unexpected type of attr {key}, got {val_type_str} should be {expected_type_str}"
1384
+ )
1385
+ self.config[attr_name] = val
1386
+
1387
+ def __call__(self, model_, inputs_):
1388
+ from torch._inductor.compile_fx import compile_fx
1389
+
1390
+ return compile_fx(model_, inputs_, config_patches=self.config)
1391
+
1392
+
1393
+ def compile(model: Optional[Callable] = None, *,
1394
+ fullgraph: builtins.bool = False,
1395
+ dynamic: builtins.bool = False,
1396
+ backend: Union[str, Callable] = "inductor",
1397
+ mode: Union[str, None] = None,
1398
+ options: Optional[Dict[str, Union[str, builtins.int, builtins.bool]]] = None,
1399
+ disable: builtins.bool = False) -> Callable:
1400
+ """
1401
+ Optimizes given model/function using TorchDynamo and specified backend.
1402
+
1403
+ Args:
1404
+ model (Callable): Module/function to optimize
1405
+ fullgraph (bool): Whether it is ok to break model into several subgraphs
1406
+ dynamic (bool): Use dynamic shape tracing
1407
+ backend (str or Callable): backend to be used
1408
+ mode (str): Can be either "default", "reduce-overhead" or "max-autotune"
1409
+ options (dict): A dictionary of options to pass to the backend.
1410
+ disable (bool): Turn torch.compile() into a no-op for testing
1411
+
1412
+ Example::
1413
+
1414
+ @torch.compile(options={"matmul-padding": True}, fullgraph=True)
1415
+ def foo(x):
1416
+ return torch.sin(x) + torch.cos(x)
1417
+
1418
+ """
1419
+ _C._log_api_usage_once("torch.compile")
1420
+ # Decorator mode
1421
+ if model is None:
1422
+ def fn(model: Callable):
1423
+ if model is None:
1424
+ raise RuntimeError("Model can't be None")
1425
+ return compile(model,
1426
+ fullgraph=fullgraph,
1427
+ dynamic=dynamic,
1428
+ backend=backend,
1429
+ mode=mode,
1430
+ options=options,
1431
+ disable=disable)
1432
+ return fn
1433
+
1434
+ import torch._dynamo
1435
+ if mode is not None and options is not None:
1436
+ raise RuntimeError("Either mode or options can be specified, but both can't be specified at the same time.")
1437
+ if mode is None and options is None:
1438
+ mode = "default"
1439
+ if backend == "inductor":
1440
+ backend = _TorchCompileInductorWrapper(mode, options, dynamic)
1441
+ return torch._dynamo.optimize(backend=backend, nopython=fullgraph, dynamic=dynamic, disable=disable)(model)
1442
+
1443
+
1444
+ def _register_device_module(device_type, module):
1445
+ r"""Register an external runtime module of the specific :attr:`device_type`
1446
+ supported by torch.
1447
+
1448
+ After the :attr:`module` is registered correctly, the user can refer
1449
+ the external runtime module as part of torch with attribute torch.xxx.
1450
+ """
1451
+ # Make sure the device_type represent a supported device type for torch.
1452
+ device_type = torch.device(device_type).type
1453
+ m = sys.modules[__name__]
1454
+ if hasattr(m, device_type):
1455
+ raise RuntimeError("The runtime module of '{}' has already "
1456
+ "been registered with '{}'".format(device_type, getattr(m, device_type)))
1457
+ setattr(m, device_type, module)
1458
+ torch_module_name = '.'.join([__name__, device_type])
1459
+ sys.modules[torch_module_name] = module
1460
+
1461
+ # expose return_types
1462
+ from . import return_types
1463
+ from . import library
1464
+ if not TYPE_CHECKING:
1465
+ from . import _meta_registrations
1466
+
1467
+ # Enable CUDA Sanitizer
1468
+ if 'TORCH_CUDA_SANITIZER' in os.environ:
1469
+ import torch.cuda._sanitizer as csan
1470
+
1471
+ csan.enable_cuda_sanitizer()
1472
+
1473
+ # Populate magic methods on SymInt and SymFloat
1474
+ import torch.fx.experimental.symbolic_shapes
1475
+
1476
+ from torch import func as func
1477
+ from torch.func import vmap
1478
+
1479
+ # The function _sparse_coo_tensor_unsafe is removed from PyTorch
1480
+ # Python API (v. 1.13), here we temporarily provide its replacement
1481
+ # with a deprecation warning.
1482
+ # TODO: remove the function for PyTorch v 1.15.
1483
+ def _sparse_coo_tensor_unsafe(*args, **kwargs):
1484
+ import warnings
1485
+ warnings.warn('torch._sparse_coo_tensor_unsafe is deprecated, '
1486
+ 'use torch.sparse_coo_tensor(..., check_invariants=False) instead.')
1487
+ kwargs['check_invariants'] = False
1488
+ return torch.sparse_coo_tensor(*args, **kwargs)
wemm/lib/python3.10/site-packages/torch/_jit_internal.py ADDED
@@ -0,0 +1,1435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The weak_script annotation needs to be here instead of inside torch/jit/ so it
3
+ can be used in other places in torch/ (namely torch.nn) without running into
4
+ circular dependency problems
5
+ """
6
+
7
+ import ast
8
+ import builtins
9
+ import collections
10
+ import contextlib
11
+ import enum
12
+ import inspect
13
+ import io
14
+ import pickle
15
+ import sys
16
+ import threading
17
+ import typing
18
+ import warnings
19
+ import weakref
20
+ from textwrap import dedent
21
+ from typing import ( # noqa: F401
22
+ Any,
23
+ Callable,
24
+ Dict,
25
+ Final,
26
+ Generic,
27
+ List,
28
+ Optional,
29
+ Tuple,
30
+ Type,
31
+ TypeVar,
32
+ Union,
33
+ )
34
+
35
+ import torch
36
+
37
+ # This is needed. `torch._jit_internal` is imported before `torch.distributed.__init__`.
38
+ # Explicitly ask to import `torch.distributed.__init__` first.
39
+ # Otherwise, "AttributeError: module 'torch' has no attribute 'distributed'" is raised.
40
+ import torch.distributed.rpc
41
+ import torch.package._mangling as package_mangling
42
+ from torch._awaits import _Await
43
+ from torch._C import _Await as CAwait, Future as CFuture
44
+ from torch._sources import fake_range, get_source_lines_and_file, parse_def
45
+ from torch.futures import Future
46
+
47
+ LockType: Type
48
+ try:
49
+ import _thread
50
+
51
+ LockType = _thread.LockType
52
+ except ImportError:
53
+ import _dummy_thread
54
+
55
+ LockType = _dummy_thread.LockType
56
+
57
+ # Wrapper functions that can call either of 2 functions depending on a boolean
58
+ # argument
59
+ boolean_dispatched: "weakref.WeakKeyDictionary[Callable, Dict[str, Callable]]" = (
60
+ weakref.WeakKeyDictionary()
61
+ ) # noqa: T484
62
+
63
+
64
+ FAKE_FILENAME_PREFIX = "__torch_jit_dataclass"
65
+
66
+
67
+ class SourceLoader:
68
+ def __init__(self):
69
+ self.content = {}
70
+
71
+ def cache(self, fn, source):
72
+ self.content[fn] = source
73
+
74
+ def get_source(self, fn):
75
+ return self.content.get(fn)
76
+
77
+
78
+ loader = SourceLoader()
79
+
80
+
81
+ def createResolutionCallbackFromEnv(lookup_base):
82
+ """
83
+ Creates a resolution callback that will look up qualified names in an
84
+ environment, starting with `lookup_base` for the base of any qualified
85
+ names, then proceeding down the lookup chain with the resolved object.
86
+
87
+ You should not use this directly, it should only be used from the other
88
+ createResolutionCallbackFrom* functions.
89
+ """
90
+
91
+ def lookupInModule(qualified_name, module):
92
+ if "." in qualified_name:
93
+ parts = qualified_name.split(".")
94
+ base = parts[0]
95
+ remaining_pieces = ".".join(parts[1:])
96
+ module_value = getattr(module, base)
97
+ return lookupInModule(remaining_pieces, module_value)
98
+ else:
99
+ return getattr(module, qualified_name)
100
+
101
+ def parseNestedExpr(expr, module) -> Tuple[Any, int]:
102
+ i = 0
103
+ while i < len(expr) and expr[i] not in (",", "[", "]"):
104
+ i += 1
105
+
106
+ # Special case logic for the empty Tuple as a subscript (used
107
+ # in the type annotation `Tuple[()]`)
108
+ if expr[:i] == "()":
109
+ return (), i
110
+
111
+ base = lookupInModule(expr[:i].strip(), module)
112
+ assert base is not None, f"Unresolvable type {expr[:i]}"
113
+ if i == len(expr) or expr[i] != "[":
114
+ return base, i
115
+
116
+ assert expr[i] == "["
117
+ parts = []
118
+ while expr[i] != "]":
119
+ part_len = 0
120
+ i += 1
121
+ part, part_len = parseNestedExpr(expr[i:], module)
122
+ parts.append(part)
123
+ i += part_len
124
+ if len(parts) > 1:
125
+ return base[tuple(parts)], i + 1
126
+ else:
127
+ return base[parts[0]], i + 1
128
+
129
+ def parseExpr(expr, module):
130
+ try:
131
+ value, len_parsed = parseNestedExpr(expr, module)
132
+ assert len_parsed == len(
133
+ expr
134
+ ), "whole expression was not parsed, falling back to c++ parser"
135
+ return value
136
+ except Exception:
137
+ """
138
+ The python resolver fails in several cases in known unit tests, and is intended
139
+ to fall back gracefully to the c++ resolver in general. For example, python 2 style
140
+ annotations which are frequent in our unit tests often fail with types e.g. int not
141
+ resolvable from the calling frame.
142
+ """
143
+ return None
144
+
145
+ return lambda expr: parseExpr(expr, lookup_base)
146
+
147
+
148
+ def createResolutionCallbackFromFrame(frames_up: int = 0):
149
+ """
150
+ Creates a function which, given a string variable name,
151
+ returns the value of the variable in the scope of the caller of
152
+ the function which called createResolutionCallbackFromFrame (by default).
153
+
154
+ This is used to enable access in-scope Python variables inside
155
+ TorchScript fragments.
156
+
157
+ frames_up is number of additional frames to go up on the stack.
158
+ The default value is 0, which correspond to the frame of the caller
159
+ of createResolutionCallbackFromFrame. Also for example, if frames_up is set
160
+ to 1, then the frame of the caller's caller of createResolutionCallbackFromFrame
161
+ will be taken.
162
+
163
+ For example, the following program prints 2::
164
+
165
+ def bar():
166
+ cb = createResolutionCallbackFromFrame(1)
167
+ print(cb("foo"))
168
+
169
+ def baz():
170
+ foo = 2
171
+ bar()
172
+
173
+ baz()
174
+ """
175
+ frame = inspect.currentframe()
176
+ i = 0
177
+ while i < frames_up + 1:
178
+ assert frame is not None
179
+ frame = frame.f_back
180
+ i += 1
181
+
182
+ assert frame is not None
183
+ f_locals = frame.f_locals
184
+ f_globals = frame.f_globals
185
+
186
+ class env:
187
+ def __getattr__(self, key):
188
+ if key in f_locals:
189
+ return f_locals[key]
190
+ elif key in f_globals:
191
+ return f_globals[key]
192
+ elif key in dir(builtins):
193
+ return getattr(builtins, key)
194
+
195
+ return createResolutionCallbackFromEnv(env())
196
+
197
+
198
+ def get_closure(fn):
199
+ """
200
+ Get a dictionary of closed over variables from a function
201
+ """
202
+ captures = {}
203
+ captures.update(fn.__globals__)
204
+
205
+ for index, captured_name in enumerate(fn.__code__.co_freevars):
206
+ captures[captured_name] = fn.__closure__[index].cell_contents
207
+
208
+ return captures
209
+
210
+
211
+ # [local resolution in python]
212
+ # Depending on where a variable is defined, and where it is used, we may
213
+ # or may not be able to recover its value when recursively compiling a
214
+ # script function. Remember in the general case, a module or function is
215
+ # first defined and then later scripted. This means we do not have a
216
+ # chance to capture the active frames when the function is defined. Hence any
217
+ # name resolution has to happen later on the created closure. The way
218
+ # python captures type annotations restricts what we can recover. The
219
+ # follow example illustrates the different cases:
220
+ #
221
+ # class MyGlobalClass:
222
+ # ...
223
+ # def my_local_scope():
224
+ # @torch.jit.script
225
+ # class MyClass:
226
+ # ...
227
+ # @torch.jit.script
228
+ # class MyClassUsedAsVar:
229
+ # ...
230
+ # def eg(x: MyClass, y: MyGlobalClass):
231
+ # a_local_capture : Foo
232
+ # return MyClassUsedAsVar(x)
233
+ #
234
+ # MyGlobalClass is defined in the __globals__ dictionary of function
235
+ # 'eg', so it is always recoverable. my_local_scope introduces a new local
236
+ # variable scope in the function. Classes defined here are only visible as
237
+ # local variables. For the case of MyClassUsedAsVar, it is captured
238
+ # because it is used as a variable inside the body of the function, and we
239
+ # can resolve it using the captures returned from `get_closure`. However,
240
+ # the type annotations are not captured by the closure. In Python
241
+ # 3.0--3.9, the _value_ of MyClass and MyGlobalClass will be available as
242
+ # annotations on `eg``, but starting in Python 4.0, they will represented as
243
+ # strings and no longer present. Furthermore, since the body of `eg` does
244
+ # not reference those names, they do not appear in the list of closed over
245
+ # variables. In Python 2.x, type annotations are in comments, leading to a
246
+ # similar situation where their definitions are not available. We anticipate
247
+ # that most users will not run into this issue because their modules and
248
+ # functions will be defined at a global scope like MyGlobalClass. In cases
249
+ # where they are not, it is possible to work around issues by declaring the
250
+ # values global in the function.
251
+ # In Python 3.9 declaring class as global will make it invisible to
252
+ # `inspect.getsource`, see https://bugs.python.org/issue42666 .
253
+ # This could be worked around by manualy adding it to `global()` dictionary.
254
+
255
+
256
+ def createResolutionCallbackFromClosure(fn):
257
+ """
258
+ Create a resolutionCallback by introspecting the function instead of
259
+ looking up the stack for the enclosing scope
260
+ """
261
+ closure = get_closure(fn)
262
+
263
+ class closure_lookup:
264
+ # This is a class since `closure` is a dict and it's easier in
265
+ # `env_helper` if everything just works with `getattr` calls
266
+ def __getattr__(self, key):
267
+ if key in closure:
268
+ return closure[key]
269
+ elif hasattr(typing, key):
270
+ return getattr(typing, key)
271
+ elif hasattr(builtins, key):
272
+ return getattr(builtins, key)
273
+ return None
274
+
275
+ return createResolutionCallbackFromEnv(closure_lookup())
276
+
277
+
278
+ def can_compile_class(cls) -> bool:
279
+ # If any of the functions on a type don't have a code object, this type can't
280
+ # be compiled and is probably a builtin / bound from C
281
+ if is_ignored_fn(cls):
282
+ return False
283
+
284
+ # Ignore the following list of built-in classes.
285
+ ignored_builtin_classes = (torch.nn.Module, tuple, list, Exception)
286
+ if issubclass(cls, ignored_builtin_classes):
287
+ return False
288
+
289
+ names = cls.__dict__
290
+ fns = [
291
+ getattr(cls, name)
292
+ for name in names
293
+ if inspect.isroutine(getattr(cls, name, None))
294
+ ]
295
+ has_code = [hasattr(fn, "__code__") for fn in fns]
296
+ return all(has_code)
297
+
298
+
299
+ def get_callable_argument_names(fn) -> List[str]:
300
+ """
301
+ Gets names of all POSITIONAL_OR_KEYWORD arguments for callable `fn`.
302
+ Returns an empty list when other types of arguments are present.
303
+
304
+ This is used by `torch.jit.trace` to assign meaningful argument names to
305
+ traced functions and modules.
306
+
307
+ Args:
308
+ fn: A callable.
309
+ Returns:
310
+ Argument names: List[str]
311
+ """
312
+ # inspect.signature may fail, give up in that case.
313
+ try:
314
+ callable_signature = inspect.signature(fn)
315
+ except Exception:
316
+ return []
317
+
318
+ argument_names = []
319
+ for name, param in callable_signature.parameters.items():
320
+ # All four other types of arguments do not map to individual values
321
+ # with a keyword as name.
322
+ if not param.kind == param.POSITIONAL_OR_KEYWORD:
323
+ continue
324
+
325
+ argument_names.append(name)
326
+
327
+ return argument_names
328
+
329
+
330
+ def get_annotation_str(annotation):
331
+ """
332
+ Convert an AST node containing a type annotation to the string present in the source
333
+ that represents the same annotation.
334
+ """
335
+ if isinstance(annotation, ast.Name):
336
+ return annotation.id
337
+ elif isinstance(annotation, ast.Attribute):
338
+ return ".".join([get_annotation_str(annotation.value), annotation.attr])
339
+ elif isinstance(annotation, ast.Subscript):
340
+ # In Python3.9+ subscript indicies are not wrapped in ast.Index
341
+ subscript_slice = annotation.slice if sys.version_info >= (3, 9) else annotation.slice.value # type: ignore[attr-defined]
342
+ return f"{get_annotation_str(annotation.value)}[{get_annotation_str(subscript_slice)}]"
343
+ elif isinstance(annotation, ast.Tuple):
344
+ return ",".join([get_annotation_str(elt) for elt in annotation.elts])
345
+ elif isinstance(annotation, (ast.Constant, ast.NameConstant)):
346
+ return f"{annotation.value}"
347
+
348
+ # If an AST node is not handled here, it's probably handled in ScriptTypeParser.
349
+ return None
350
+
351
+
352
+ def get_type_hint_captures(fn):
353
+ """
354
+ Get a dictionary containing type resolution mappings necessary to resolve types
355
+ for the literal annotations on 'fn'. These are not considered to be closed-over by fn
356
+ and must be obtained separately (e.g. using this function).
357
+
358
+ Args:
359
+ fn: A callable.
360
+ Returns:
361
+ A Dict[str, Any] containing a mapping from the literal annotations used on
362
+ fn to the Python objects they refer to.
363
+ """
364
+ # First, try to get the source of the function. We'll need to parse it to find the actual string names
365
+ # that were used to annotate the types, since inspect.signature() will only return the class object that
366
+ # the annotation refers to, not the string name. If we can't get the source, simply return an empty dict.
367
+ # This may happen in cases where the function is synthesized dynamically at runtime.
368
+ src = loader.get_source(fn)
369
+ if src is None:
370
+ src = inspect.getsource(fn)
371
+
372
+ # Gather a dictionary of parameter name -> type, skipping any parameters whose annotated
373
+ # types are strings. These are only understood by TorchScript in the context of a type annotation
374
+ # that refers to a class in its own definition, but trying to include a mapping for this in the result
375
+ # function would cause infinite recursion because the class is currently being compiled.
376
+ # In addition, there is logic in ScriptTypeParser to handle this.
377
+ signature = inspect.signature(fn)
378
+ name_to_type = {
379
+ name: parameter.annotation
380
+ for name, parameter in signature.parameters.items()
381
+ if parameter.annotation is not inspect.Parameter.empty
382
+ and not isinstance(parameter.annotation, str)
383
+ }
384
+
385
+ # Then, get the literal type annotations from the function declaration
386
+ # by source inspection. This accounts for the case in which aliases are used
387
+ # to annotate the arguments (e.g device_t = torch.device, and then d: device_t).
388
+ # frontend.py cannot be used here because it includes _jit_internal, so use ast instead.
389
+ a = ast.parse(dedent(src))
390
+ if len(a.body) != 1 or not isinstance(a.body[0], ast.FunctionDef):
391
+ raise RuntimeError(f"Expected {fn} to be a function")
392
+ f = a.body[0]
393
+
394
+ # Prepare a dictionary of source annotation -> type, which will be the final result of this function,
395
+ # by using the parsed AST (f) to reconstruct source annotations as strings for each parameter and mapping
396
+ # them to the type object corresponding to the annotation via name_to_type using the parameter name.
397
+ annotation_to_type = {}
398
+
399
+ for arg in f.args.args:
400
+ # Get the source type annotation string for this argument if possible.
401
+ arg_annotation_str = (
402
+ get_annotation_str(arg.annotation) if arg.annotation else None
403
+ )
404
+
405
+ # If the argument has no annotation or get_annotation_str cannot convert it to a string,
406
+ # arg_annotation_str will be None. Skip this arg; ScriptTypeParser will probably handle
407
+ # this in the latter case.
408
+ if arg_annotation_str is None:
409
+ continue
410
+
411
+ # Insert {arg_annotation_str: type} into annotation_to_type if possible. One reason arg_name may not
412
+ # be present in name_to_type is that the annotation itself is a string and not a type object
413
+ # (common for self-refential annotations in classes). Once again, let ScriptTypeParser handle this.
414
+ arg_name = arg.arg
415
+ if arg_name in name_to_type:
416
+ annotation_to_type[arg_annotation_str] = name_to_type[arg_name]
417
+
418
+ # If there is a valid return annotation, include it in annotation_to_type. As with argument annotations,
419
+ # the literal annotation has to be convertible to a string by get_annotation_str, and the actual type
420
+ # of the annotation cannot be a string.
421
+ literal_return_annotation = get_annotation_str(f.returns)
422
+ valid_literal_annotation = literal_return_annotation is not None
423
+ return_annotation = signature.return_annotation
424
+ valid_return_annotation_type = (
425
+ return_annotation is not inspect.Parameter.empty
426
+ and not isinstance(return_annotation, str)
427
+ )
428
+ if valid_literal_annotation and valid_return_annotation_type:
429
+ annotation_to_type[literal_return_annotation] = return_annotation
430
+
431
+ return annotation_to_type
432
+
433
+
434
+ def createResolutionCallbackForClassMethods(cls):
435
+ """
436
+ This looks at all the methods defined in a class and pulls their closed-over
437
+ variables into a dictionary and uses that to resolve variables.
438
+ """
439
+ # cls is a type here, so `ismethod` is false since the methods on the type
440
+ # aren't bound to anything, so Python treats them as regular functions
441
+ fns = [
442
+ getattr(cls, name)
443
+ for name in cls.__dict__
444
+ if inspect.isroutine(getattr(cls, name))
445
+ ]
446
+ # Skip built-ins, as they do not have global scope nor type hints
447
+ # Needed to support `enum.Enum` derived classes in Python-3.11
448
+ # That adds `_new_member_` property which is an alias to `__new__`
449
+ fns = [fn for fn in fns if not inspect.isbuiltin(fn)]
450
+ captures = {}
451
+
452
+ for fn in fns:
453
+ captures.update(get_closure(fn))
454
+ captures.update(get_type_hint_captures(fn))
455
+
456
+ def lookup_in_class(key):
457
+ if key in captures:
458
+ return captures[key]
459
+ else:
460
+ return getattr(builtins, key, None)
461
+
462
+ return lookup_in_class
463
+
464
+
465
+ def boolean_dispatch(
466
+ arg_name, arg_index, default, if_true, if_false, module_name, func_name
467
+ ):
468
+ """
469
+ Dispatches to either of 2 script functions based on a boolean argument.
470
+ In TorchScript, the boolean argument must be constant so that the correct
471
+ function to use can be determined at compile time.
472
+ """
473
+
474
+ def fn(*args, **kwargs):
475
+ dispatch_flag = False
476
+ if arg_name in kwargs:
477
+ dispatch_flag = kwargs[arg_name]
478
+ elif arg_index < len(args):
479
+ dispatch_flag = args[arg_index]
480
+
481
+ if dispatch_flag:
482
+ return if_true(*args, **kwargs)
483
+ else:
484
+ return if_false(*args, **kwargs)
485
+
486
+ if if_true.__doc__ is None and if_false.__doc__ is not None:
487
+ doc = if_false.__doc__
488
+ if_true.__doc__ = doc
489
+ elif if_false.__doc__ is None and if_true.__doc__ is not None:
490
+ doc = if_true.__doc__
491
+ if_false.__doc__ = doc
492
+ elif if_false.__doc__ is None and if_true.__doc__ is None:
493
+ # neither function has a docstring
494
+ doc = None
495
+ else:
496
+ raise RuntimeError("only one function can have a docstring")
497
+ fn.__doc__ = doc
498
+
499
+ if module_name is not None:
500
+ fn.__module__ = module_name
501
+ if func_name is not None:
502
+ fn.__name__ = func_name
503
+
504
+ boolean_dispatched[fn] = {
505
+ "if_true": if_true,
506
+ "if_false": if_false,
507
+ "index": arg_index,
508
+ "default": default,
509
+ "arg_name": arg_name,
510
+ }
511
+ return fn
512
+
513
+
514
+ class FunctionModifiers:
515
+ """
516
+ Used to denote the behavior of a function in TorchScript. See export() and
517
+ ignore() for details.
518
+ """
519
+
520
+ UNUSED = "unused (ignored and replaced with raising of an exception)"
521
+ IGNORE = "ignore (leave as a call to Python, cannot be torch.jit.save'd)"
522
+ EXPORT = "export (compile this function even if nothing calls it)"
523
+ DEFAULT = "default (compile if called from a exported function / forward)"
524
+ COPY_TO_SCRIPT_WRAPPER = (
525
+ "if this method is not scripted, copy the python method onto the scripted model"
526
+ )
527
+ _DROP = "_drop (function is fully ignored, declaration can be unscriptable)"
528
+
529
+
530
+ def export(fn):
531
+ """
532
+ This decorator indicates that a method on an ``nn.Module`` is used as an entry point into a
533
+ :class:`ScriptModule` and should be compiled.
534
+
535
+ ``forward`` implicitly is assumed to be an entry point, so it does not need this decorator.
536
+ Functions and methods called from ``forward`` are compiled as they are seen
537
+ by the compiler, so they do not need this decorator either.
538
+
539
+ Example (using ``@torch.jit.export`` on a method):
540
+
541
+ .. testcode::
542
+
543
+ import torch
544
+ import torch.nn as nn
545
+
546
+ class MyModule(nn.Module):
547
+ def implicitly_compiled_method(self, x):
548
+ return x + 99
549
+
550
+ # `forward` is implicitly decorated with `@torch.jit.export`,
551
+ # so adding it here would have no effect
552
+ def forward(self, x):
553
+ return x + 10
554
+
555
+ @torch.jit.export
556
+ def another_forward(self, x):
557
+ # When the compiler sees this call, it will compile
558
+ # `implicitly_compiled_method`
559
+ return self.implicitly_compiled_method(x)
560
+
561
+ def unused_method(self, x):
562
+ return x - 20
563
+
564
+ # `m` will contain compiled methods:
565
+ # `forward`
566
+ # `another_forward`
567
+ # `implicitly_compiled_method`
568
+ # `unused_method` will not be compiled since it was not called from
569
+ # any compiled methods and wasn't decorated with `@torch.jit.export`
570
+ m = torch.jit.script(MyModule())
571
+ """
572
+ fn._torchscript_modifier = FunctionModifiers.EXPORT
573
+ return fn
574
+
575
+
576
+ def unused(fn):
577
+ """
578
+ This decorator indicates to the compiler that a function or method should
579
+ be ignored and replaced with the raising of an exception. This allows you
580
+ to leave code in your model that is not yet TorchScript compatible and still
581
+ export your model.
582
+
583
+ Example (using ``@torch.jit.unused`` on a method)::
584
+
585
+ import torch
586
+ import torch.nn as nn
587
+
588
+ class MyModule(nn.Module):
589
+ def __init__(self, use_memory_efficient):
590
+ super().__init__()
591
+ self.use_memory_efficient = use_memory_efficient
592
+
593
+ @torch.jit.unused
594
+ def memory_efficient(self, x):
595
+ import pdb
596
+ pdb.set_trace()
597
+ return x + 10
598
+
599
+ def forward(self, x):
600
+ # Use not-yet-scriptable memory efficient mode
601
+ if self.use_memory_efficient:
602
+ return self.memory_efficient(x)
603
+ else:
604
+ return x + 10
605
+
606
+ m = torch.jit.script(MyModule(use_memory_efficient=False))
607
+ m.save("m.pt")
608
+
609
+ m = torch.jit.script(MyModule(use_memory_efficient=True))
610
+ # exception raised
611
+ m(torch.rand(100))
612
+ """
613
+ if isinstance(fn, property):
614
+ prop = fn
615
+ setattr( # noqa: B010
616
+ prop.fget, "_torchscript_modifier", FunctionModifiers.UNUSED
617
+ )
618
+
619
+ if prop.fset:
620
+ setattr( # noqa: B010
621
+ prop.fset, "_torchscript_modifier", FunctionModifiers.UNUSED
622
+ )
623
+
624
+ return prop
625
+
626
+ fn._torchscript_modifier = FunctionModifiers.UNUSED
627
+ return fn
628
+
629
+
630
+ # No op context manager from python side
631
+ class _IgnoreContextManager(contextlib.AbstractContextManager):
632
+ def __init__(self, **kwargs):
633
+ pass
634
+
635
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
636
+ pass
637
+
638
+
639
+ def ignore(drop=False, **kwargs):
640
+ """
641
+ This decorator indicates to the compiler that a function or method should
642
+ be ignored and left as a Python function. This allows you to leave code in
643
+ your model that is not yet TorchScript compatible. If called from TorchScript,
644
+ ignored functions will dispatch the call to the Python interpreter. Models with ignored
645
+ functions cannot be exported; use :func:`@torch.jit.unused <torch.jit.unused>` instead.
646
+
647
+ Example (using ``@torch.jit.ignore`` on a method)::
648
+
649
+ import torch
650
+ import torch.nn as nn
651
+
652
+ class MyModule(nn.Module):
653
+ @torch.jit.ignore
654
+ def debugger(self, x):
655
+ import pdb
656
+ pdb.set_trace()
657
+
658
+ def forward(self, x):
659
+ x += 10
660
+ # The compiler would normally try to compile `debugger`,
661
+ # but since it is `@ignore`d, it will be left as a call
662
+ # to Python
663
+ self.debugger(x)
664
+ return x
665
+
666
+ m = torch.jit.script(MyModule())
667
+
668
+ # Error! The call `debugger` cannot be saved since it calls into Python
669
+ m.save("m.pt")
670
+
671
+ Example (using ``@torch.jit.ignore(drop=True)`` on a method):
672
+
673
+ .. testcode::
674
+
675
+ import torch
676
+ import torch.nn as nn
677
+
678
+ class MyModule(nn.Module):
679
+ @torch.jit.ignore(drop=True)
680
+ def training_method(self, x):
681
+ import pdb
682
+ pdb.set_trace()
683
+
684
+ def forward(self, x):
685
+ if self.training:
686
+ self.training_method(x)
687
+ return x
688
+
689
+ m = torch.jit.script(MyModule())
690
+
691
+ # This is OK since `training_method` is not saved, the call is replaced
692
+ # with a `raise`.
693
+ m.save("m.pt")
694
+
695
+ .. testcleanup::
696
+
697
+ import os
698
+ os.remove('m.pt')
699
+ """
700
+
701
+ if callable(drop):
702
+ # used without any args, so drop is actually a function
703
+ # @torch.jit.ignore
704
+ # def fn(...):
705
+ fn = drop
706
+ fn._torchscript_modifier = FunctionModifiers.IGNORE
707
+ return fn
708
+
709
+ if not isinstance(drop, bool):
710
+ raise RuntimeError(
711
+ "Argument to @torch.jit.ignore must be a bool or "
712
+ f"a function but got {drop}"
713
+ )
714
+
715
+ # for backwards compat
716
+ drop_on_export = kwargs.pop("drop_on_export", None)
717
+ if drop_on_export:
718
+ warnings.warn(
719
+ "ignore(drop_on_export=True) has been deprecated. TorchScript will now drop the function "
720
+ "call on compilation. Use torch.jit.unused now. {}",
721
+ category=FutureWarning,
722
+ )
723
+
724
+ drop = drop_on_export
725
+ elif drop:
726
+ warnings.warn(
727
+ "ignore(True) has been deprecated. TorchScript will now drop the function "
728
+ "call on compilation. Use torch.jit.unused now. {}",
729
+ category=FutureWarning,
730
+ )
731
+
732
+ def decorator(fn):
733
+ if drop:
734
+ fn._torchscript_modifier = FunctionModifiers.UNUSED
735
+ else:
736
+ fn._torchscript_modifier = FunctionModifiers.IGNORE
737
+ return fn
738
+
739
+ return decorator
740
+
741
+
742
+ def _drop(fn):
743
+ fn._torchscript_modifier = FunctionModifiers._DROP
744
+ return fn
745
+
746
+
747
+ def _copy_to_script_wrapper(fn):
748
+ fn._torchscript_modifier = FunctionModifiers.COPY_TO_SCRIPT_WRAPPER
749
+ return fn
750
+
751
+
752
+ def module_has_exports(mod):
753
+ for name in dir(mod):
754
+ if hasattr(mod, name):
755
+ item = getattr(mod, name)
756
+ if callable(item):
757
+ if get_torchscript_modifier(item) is FunctionModifiers.EXPORT:
758
+ return True
759
+ return False
760
+
761
+
762
+ # WARNING: should_drop is currently being used by our JIT code coverage plug-in to mark JIT'd code as covered. If you
763
+ # rename this function, please update references in tools/coverage_plugins_package/src/coverage_plugins/jit_plugin.py to
764
+ # allow JIT'd code to still be covered.
765
+ def should_drop(fn) -> bool:
766
+ attr = get_torchscript_modifier(fn)
767
+ if attr is None:
768
+ return False
769
+ return attr is FunctionModifiers.UNUSED or attr is FunctionModifiers._DROP
770
+
771
+
772
+ def is_ignored_fn(fn) -> bool:
773
+ mod = get_torchscript_modifier(fn)
774
+ return (
775
+ mod is FunctionModifiers.UNUSED
776
+ or mod is FunctionModifiers.IGNORE
777
+ or mod is FunctionModifiers._DROP
778
+ )
779
+
780
+
781
+ def _is_drop_fn(fn) -> bool:
782
+ mod = get_torchscript_modifier(fn)
783
+ return mod is FunctionModifiers._DROP
784
+
785
+
786
+ def is_static_fn(cls, fn) -> bool:
787
+ return isinstance(inspect.getattr_static(cls, fn, default=None), staticmethod)
788
+
789
+
790
+ def get_static_fn(cls, fn):
791
+ return inspect.getattr_static(cls, fn).__func__
792
+
793
+
794
+ def get_torchscript_modifier(fn):
795
+ if not callable(fn):
796
+ return None
797
+ if hasattr(fn, "__func__"):
798
+ fn = fn.__func__
799
+ return getattr(fn, "_torchscript_modifier", FunctionModifiers.DEFAULT)
800
+
801
+
802
+ def copy_torchscript_modifier(orig, new) -> None:
803
+ attr = get_torchscript_modifier(orig)
804
+ if attr is None:
805
+ return
806
+ new._torchscript_modifier = attr
807
+
808
+
809
+ # overloading registration
810
+ # overloads get registered in this file, and compiled in torch/jit/__init__.py
811
+ # so that they can be imported in nn/functional.py without an import cycle
812
+
813
+ # qualified_name => list[overload_functions]
814
+ _overloaded_fns: Dict[str, List[Callable]] = {} # noqa: T484
815
+
816
+
817
+ _OVERLOAD_EXAMPLE = """
818
+ Example usage of overload function:
819
+ @torch.jit._overload
820
+ def my_function(x: type0) -> type0: # decl 1
821
+ pass
822
+
823
+ @torch.jit._overload
824
+ def my_function(x: type1) -> type1: # decl 2
825
+ pass
826
+
827
+ def my_function(x): # implementation
828
+ if isinstance(x, type0):
829
+ return x
830
+ elif isinstance(x, type1):
831
+ return x
832
+ """
833
+
834
+
835
+ def get_overload_no_implementation_error_message(kind, obj):
836
+ sourcelines, file_lineno, filename = get_source_lines_and_file(obj)
837
+ return (
838
+ f'Implementation for the {kind} "{_qualified_name(obj)}" is missing. Please make '
839
+ f"sure a definition is provided and defined after all overload declarations.\n"
840
+ f'File "{filename}", line {file_lineno}:\n'
841
+ + "".join(sourcelines)
842
+ + "\n"
843
+ + _OVERLOAD_EXAMPLE
844
+ )
845
+
846
+
847
+ def _check_overload_body(func):
848
+ try:
849
+ parsed_def = parse_def(func)
850
+ except OSError as e:
851
+ # Parsing the function definition can raise an OSError if source is unavailable.
852
+ # Since this is just an initial check, just raise a warning if this is the case.
853
+ warnings.warn(
854
+ f"Unable to retrieve source for @torch.jit._overload function: {func}."
855
+ )
856
+ return
857
+
858
+ body = parsed_def.ast.body[0].body
859
+
860
+ def is_pass(x):
861
+ return isinstance(x, ast.Pass)
862
+
863
+ def is_ellipsis(x):
864
+ return isinstance(x, ast.Expr) and isinstance(x.value, ast.Ellipsis)
865
+
866
+ if len(body) != 1 or not (is_pass(body[0]) or is_ellipsis(body[0])):
867
+ msg = (
868
+ "Only `pass` statement or `...` can be the body of overload declaration:\n"
869
+ )
870
+ msg += "\n".join(parsed_def.source.split("\n")[:3])
871
+ msg += " <- Expecting `pass` or `...` here!\n" + _OVERLOAD_EXAMPLE
872
+ raise RuntimeError(msg)
873
+
874
+
875
+ def _overload(func):
876
+ _check_overload_body(func)
877
+ qual_name = _qualified_name(func)
878
+ global _overloaded_fns
879
+ fn_overload_list = _overloaded_fns.get(qual_name)
880
+ if fn_overload_list is None:
881
+ fn_overload_list = []
882
+ _overloaded_fns[qual_name] = fn_overload_list
883
+ fn_overload_list.append(func)
884
+ return func
885
+
886
+
887
+ def _get_fn_overloads(qual_name):
888
+ return _overloaded_fns.get(qual_name)
889
+
890
+
891
+ def _clear_fn_overloads(qual_name) -> None:
892
+ del _overloaded_fns[qual_name]
893
+
894
+
895
+ def get_class_name_lineno(method) -> Tuple[str, int]:
896
+ current_frame = inspect.currentframe()
897
+
898
+ # one for the get_class_name call, one for _overload_method call
899
+ for i in range(2):
900
+ assert (
901
+ current_frame is not None
902
+ ) # assert current frame is not an Optional[FrameType]
903
+ current_frame = current_frame.f_back
904
+
905
+ assert current_frame is not None # same here
906
+ class_name = current_frame.f_code.co_name
907
+ line_no = current_frame.f_code.co_firstlineno
908
+ return class_name, line_no
909
+
910
+
911
+ # At the the point the decorator is applied to class methods the method
912
+ # has no reference to its owning class. _qualified_name would not include
913
+ # the class it is defined in, so any methods with the same name in the same file
914
+ # would have the same _qualified_name, even if they were defined in different
915
+ # classes. This problem only exists in python 2.
916
+ # We get around this problem by looking at the stack frame and identifying
917
+ # the class name, and throwing an error whenever overloads are used
918
+ # when modules of the same name are in the same file
919
+
920
+ # qualified_name => class name => list[overload_functions]
921
+ _overloaded_methods: Dict[str, Dict[str, List[Callable]]] = {} # noqa: T484
922
+
923
+
924
+ # (qualified_name, class name) => class_fileno
925
+ _overloaded_method_class_fileno = {}
926
+
927
+
928
+ def _overload_method(func):
929
+ _check_overload_body(func)
930
+ qual_name = _qualified_name(func)
931
+ global _overloaded_methods
932
+ class_name_map = _overloaded_methods.get(qual_name, None)
933
+ if class_name_map is None:
934
+ class_name_map = {}
935
+ _overloaded_methods[qual_name] = class_name_map
936
+
937
+ class_name, line_no = get_class_name_lineno(func)
938
+ method_overloads = class_name_map.get(class_name, None)
939
+ if method_overloads is None:
940
+ method_overloads = []
941
+ class_name_map[class_name] = method_overloads
942
+ _overloaded_method_class_fileno[(qual_name, class_name)] = line_no
943
+ else:
944
+ existing_lineno = _overloaded_method_class_fileno[(qual_name, class_name)]
945
+ if existing_lineno != line_no:
946
+ raise RuntimeError(
947
+ "Cannot currently overload the same method name in two different"
948
+ " classes with the same name in the same module"
949
+ )
950
+
951
+ method_overloads.append(func)
952
+ return func
953
+
954
+
955
+ def _get_overloaded_methods(method, mod_class):
956
+ # TODO: __name__ not set for submodules in recursive script
957
+ if not hasattr(method, "__name__"):
958
+ return None
959
+ qual_name = _qualified_name(method)
960
+ class_name_map = _overloaded_methods.get(qual_name, None)
961
+ if class_name_map is None:
962
+ return None
963
+ overloads = class_name_map.get(mod_class.__name__, None)
964
+ if overloads is None:
965
+ return None
966
+
967
+ method_line_no = get_source_lines_and_file(method)[1]
968
+ mod_class_fileno = get_source_lines_and_file(mod_class)[1]
969
+ mod_end_fileno = mod_class_fileno + len(get_source_lines_and_file(mod_class)[0])
970
+ if not (method_line_no >= mod_class_fileno and method_line_no <= mod_end_fileno):
971
+ raise Exception(
972
+ "Overloads are not useable when a module is redeclared within the same file: "
973
+ + str(method)
974
+ )
975
+ return overloads
976
+
977
+
978
+ def is_tuple(ann) -> bool:
979
+ if ann is Tuple:
980
+ raise_error_container_parameter_missing("Tuple")
981
+
982
+ # For some reason Python 3.7 violates the Type[A, B].__origin__ == Type rule
983
+ if not hasattr(ann, "__module__"):
984
+ return False
985
+ return ann.__module__ == "typing" and (
986
+ getattr(ann, "__origin__", None) is Tuple
987
+ or getattr(ann, "__origin__", None) is tuple
988
+ )
989
+
990
+
991
+ def is_list(ann) -> bool:
992
+ if ann is List:
993
+ raise_error_container_parameter_missing("List")
994
+
995
+ if not hasattr(ann, "__module__"):
996
+ return False
997
+ return ann.__module__ == "typing" and (
998
+ getattr(ann, "__origin__", None) is List
999
+ or getattr(ann, "__origin__", None) is list
1000
+ )
1001
+
1002
+
1003
+ def is_dict(ann) -> bool:
1004
+ if ann is Dict:
1005
+ raise_error_container_parameter_missing("Dict")
1006
+
1007
+ if not hasattr(ann, "__module__"):
1008
+ return False
1009
+ return ann.__module__ == "typing" and (
1010
+ getattr(ann, "__origin__", None) is Dict
1011
+ or getattr(ann, "__origin__", None) is dict
1012
+ )
1013
+
1014
+
1015
+ def is_union(ann):
1016
+ if ann is Union:
1017
+ raise_error_container_parameter_missing("Union")
1018
+
1019
+ return (
1020
+ hasattr(ann, "__module__")
1021
+ and ann.__module__ == "typing"
1022
+ and (getattr(ann, "__origin__", None) is Union)
1023
+ )
1024
+
1025
+
1026
+ def is_optional(ann):
1027
+ if ann is Optional:
1028
+ raise_error_container_parameter_missing("Optional")
1029
+
1030
+ def is_optional_as_optional(ann):
1031
+ return (
1032
+ hasattr(ann, "__module__")
1033
+ and ann.__module__ == "typing"
1034
+ and (getattr(ann, "__origin__", None) is Optional)
1035
+ )
1036
+
1037
+ def is_union_as_optional(ann):
1038
+ ann_args = ann.__args__
1039
+ return len(ann_args) == 2 and (None in ann_args or type(None) in ann_args)
1040
+
1041
+ return is_optional_as_optional(ann) or (is_union(ann) and is_union_as_optional(ann))
1042
+
1043
+
1044
+ def is_future(ann) -> bool:
1045
+ if ann is Future:
1046
+ raise RuntimeError(
1047
+ "Attempted to use Future without a "
1048
+ "contained type. Please add a contained type, e.g. "
1049
+ "Future[int]"
1050
+ )
1051
+ return getattr(ann, "__origin__", None) is Future
1052
+
1053
+
1054
+ def is_await(ann) -> bool:
1055
+ if ann is _Await:
1056
+ return True
1057
+ return getattr(ann, "__origin__", None) is _Await
1058
+
1059
+
1060
+ if torch.distributed.rpc.is_available():
1061
+ from torch._C._distributed_rpc import PyRRef
1062
+ from torch.distributed.rpc import RRef
1063
+
1064
+ def is_rref(ann) -> bool:
1065
+ if ann is RRef:
1066
+ raise RuntimeError(
1067
+ "Attempted to use RRef without a "
1068
+ "contained type. Please add a contained type, e.g. "
1069
+ "RRef[int]"
1070
+ )
1071
+ return getattr(ann, "__origin__", None) is RRef
1072
+
1073
+ def is_rref_instance(obj) -> bool:
1074
+ return isinstance(obj, PyRRef)
1075
+
1076
+ else:
1077
+
1078
+ def is_rref_instance(obj) -> bool:
1079
+ # If the RPC module doesn't exist then RRefs don't exist either.
1080
+ return False
1081
+
1082
+
1083
+ def is_final(ann) -> bool:
1084
+ return ann.__module__ in {"typing", "typing_extensions"} and (
1085
+ getattr(ann, "__origin__", None) is Final or isinstance(ann, type(Final))
1086
+ )
1087
+
1088
+
1089
+ # allows BroadcastingList instance to be subscriptable
1090
+ class BroadcastingListCls:
1091
+ def __getitem__(self, types):
1092
+ return
1093
+
1094
+
1095
+ # mypy doesn't support parameters on types, so we have to explicitly type each
1096
+ # list size
1097
+ BroadcastingList1 = BroadcastingListCls()
1098
+ for i in range(2, 7):
1099
+ globals()[f"BroadcastingList{i}"] = BroadcastingList1
1100
+
1101
+
1102
+ def is_scripting() -> bool:
1103
+ r"""
1104
+ Function that returns True when in compilation and False otherwise. This
1105
+ is useful especially with the @unused decorator to leave code in your
1106
+ model that is not yet TorchScript compatible.
1107
+ .. testcode::
1108
+
1109
+ import torch
1110
+
1111
+ @torch.jit.unused
1112
+ def unsupported_linear_op(x):
1113
+ return x
1114
+
1115
+ def linear(x):
1116
+ if torch.jit.is_scripting():
1117
+ return torch.linear(x)
1118
+ else:
1119
+ return unsupported_linear_op(x)
1120
+ """
1121
+ return False
1122
+
1123
+
1124
+ # Retrieves a fully-qualified name (module hierarchy + classname) for a given obj.
1125
+ def _qualified_name(obj, mangle_name=True) -> str:
1126
+ # This special case allows us to override the qualified name on a type.
1127
+ # It's currently used in conjunction with tracing, where we create a
1128
+ # fake module to filter only supported attributes. However, since this
1129
+ # new type is defined as a local class, we need a mechanism to override
1130
+ # its qualname so it appears correctly in the TorchScript system. This,
1131
+ # we set '_jit_override_qualname' with the original traced module's
1132
+ # qualified name, which is picked up here
1133
+ if hasattr(obj, "_jit_override_qualname"):
1134
+ return obj._jit_override_qualname
1135
+ # short-circuit in cases where the object already has a known qualified name
1136
+ if isinstance(obj, torch._C.ScriptFunction):
1137
+ return obj.qualified_name
1138
+
1139
+ if getattr(obj, "__name__", None):
1140
+ name = obj.__name__
1141
+ # Enum classes do not have `__name__` attr, instead they have `name`.
1142
+ elif isinstance(obj, enum.Enum):
1143
+ name = obj.name
1144
+ else:
1145
+ raise RuntimeError("Could not get name of python class object")
1146
+
1147
+ if name == "<lambda>":
1148
+ name = "_lambda" # make name a valid identifier
1149
+
1150
+ module_name = obj.__module__
1151
+
1152
+ # If the module is actually a torchbind module, then we should short circuit
1153
+ if module_name == "torch._classes":
1154
+ return obj.qualified_name
1155
+
1156
+ # The Python docs are very clear that `__module__` can be None, but I can't
1157
+ # figure out when it actually would be.
1158
+ if module_name is None:
1159
+ raise RuntimeError(
1160
+ f"Could not get qualified name for class '{name}': "
1161
+ "__module__ can't be None."
1162
+ )
1163
+
1164
+ # if getattr(sys.modules[module_name], name) is not obj:
1165
+ # raise RuntimeError(f"Could not get qualified name for class '{name}': "
1166
+ # f"the attr {name} on module {module_name} is not the the class")
1167
+
1168
+ # torch.package and TorchScript have separate mangling schemes to avoid
1169
+ # name collisions from multiple packages. To avoid them interfering with
1170
+ # each other, normalize the package manging here.
1171
+ if package_mangling.is_mangled(module_name):
1172
+ module_name = module_name.replace("<", "_")
1173
+ module_name = module_name.replace(">", "_")
1174
+
1175
+ # The PythonExceptionValue C++ class in torch/csrc/jit/python/python_sugared_value.h
1176
+ # does not need mangle the python class name.
1177
+ if mangle_name:
1178
+ # __main__ is a builtin module, so rewrite it to "__torch__".
1179
+ if module_name == "__main__":
1180
+ module_name = "__torch__"
1181
+ else:
1182
+ # Everything else gets a "__torch__" prefix to avoid name collisions
1183
+ # with the names of user values.
1184
+ module_name = "__torch__." + module_name
1185
+
1186
+ if "." in name:
1187
+ raise RuntimeError(
1188
+ f"Could not get qualified name for class '{name}': "
1189
+ f"'{name}' is not a valid identifier"
1190
+ )
1191
+
1192
+ return module_name + "." + name
1193
+
1194
+
1195
+ def _try_get_dispatched_fn(fn):
1196
+ if not callable(fn):
1197
+ return None
1198
+ return boolean_dispatched.get(fn)
1199
+
1200
+
1201
+ def _get_named_tuple_properties(obj):
1202
+ assert issubclass(obj, tuple) and hasattr(obj, "_fields")
1203
+ if hasattr(obj, "_field_defaults"):
1204
+ defaults = [
1205
+ obj._field_defaults[field]
1206
+ for field in obj._fields
1207
+ if field in obj._field_defaults
1208
+ ]
1209
+ else:
1210
+ defaults = []
1211
+ # In 3.10 recommended way to get annotations is to call `inspect.get_annotations` function
1212
+ # Also, annotations from base class are not inherited so they need to be queried explicitly
1213
+ if sys.version_info[:2] < (3, 10):
1214
+ obj_annotations = getattr(obj, "__annotations__", {})
1215
+ else:
1216
+ obj_annotations = inspect.get_annotations(obj)
1217
+ if len(obj_annotations) == 0 and hasattr(obj, "__base__"):
1218
+ obj_annotations = inspect.get_annotations(obj.__base__)
1219
+
1220
+ annotations = []
1221
+ for field in obj._fields:
1222
+ if field in obj_annotations:
1223
+ the_type = torch.jit.annotations.ann_to_type(
1224
+ obj_annotations[field], fake_range()
1225
+ )
1226
+ annotations.append(the_type)
1227
+ else:
1228
+ annotations.append(torch._C.TensorType.getInferred())
1229
+ return type(obj).__name__, obj._fields, annotations, defaults
1230
+
1231
+
1232
+ def _create_named_tuple(
1233
+ t, unqual_name: str, field_names: List[str], defaults: Tuple[Any, ...]
1234
+ ):
1235
+ TupleType = collections.namedtuple(unqual_name, field_names, defaults=defaults) # type: ignore[call-arg, no-redef, misc]
1236
+ return TupleType(*t)
1237
+
1238
+
1239
+ @contextlib.contextmanager
1240
+ def _disable_emit_hooks():
1241
+ hooks = torch._C._jit_get_emit_hooks()
1242
+ torch._C._jit_set_emit_hooks(None, None)
1243
+ yield
1244
+ torch._C._jit_set_emit_hooks(hooks[0], hooks[1])
1245
+
1246
+
1247
+ def _disable_emit_hooks_decorator(_DecoratorContextManager) -> None: # noqa: F811
1248
+ def __enter__(self) -> None:
1249
+ self.hooks = torch._C._jit_get_emit_hooks()
1250
+ torch._C._jit_set_emit_hooks(None, None)
1251
+
1252
+ def __exit__(self, *args) -> None:
1253
+ torch._C._jit_set_emit_hooks(self.hooks[0], self.hooks[1])
1254
+
1255
+
1256
+ def _is_exception(obj) -> bool:
1257
+ if not inspect.isclass(obj):
1258
+ return False
1259
+ return issubclass(obj, Exception)
1260
+
1261
+
1262
+ def raise_error_container_parameter_missing(target_type) -> None:
1263
+ if target_type == "Dict":
1264
+ raise RuntimeError(
1265
+ "Attempted to use Dict without "
1266
+ "contained types. Please add contained type, e.g. "
1267
+ "Dict[int, int]"
1268
+ )
1269
+ raise RuntimeError(
1270
+ f"Attempted to use {target_type} without a "
1271
+ "contained type. Please add a contained type, e.g. "
1272
+ f"{target_type}[int]"
1273
+ )
1274
+
1275
+
1276
+ def get_origin(target_type):
1277
+ return getattr(target_type, "__origin__", None)
1278
+
1279
+
1280
+ def get_args(target_type):
1281
+ return getattr(target_type, "__args__", None)
1282
+
1283
+
1284
+ def check_args_exist(target_type) -> None:
1285
+ if target_type is List or target_type is list:
1286
+ raise_error_container_parameter_missing("List")
1287
+ elif target_type is Tuple or target_type is tuple:
1288
+ raise_error_container_parameter_missing("Tuple")
1289
+ elif target_type is Dict or target_type is dict:
1290
+ raise_error_container_parameter_missing("Dict")
1291
+ elif target_type is None or target_type is Optional:
1292
+ raise_error_container_parameter_missing("Optional")
1293
+
1294
+
1295
+ def check_empty_containers(obj) -> None:
1296
+ if obj == [] or obj == {} or obj == ():
1297
+ warnings.warn(
1298
+ "The inner type of a container is lost when "
1299
+ "calling torch.jit.isinstance in eager mode. For "
1300
+ "example, List[int] would become list and "
1301
+ "therefore falsely return True for List[float] or"
1302
+ " List[str]."
1303
+ )
1304
+
1305
+
1306
+ # supports List/Dict/Tuple and Optional types
1307
+ # TODO support future
1308
+ def container_checker(obj, target_type) -> bool:
1309
+ origin_type = get_origin(target_type)
1310
+ check_args_exist(target_type)
1311
+ if origin_type is list or origin_type is List:
1312
+ check_empty_containers(obj)
1313
+ if not isinstance(obj, list):
1314
+ return False
1315
+ arg_type = get_args(target_type)[0]
1316
+ arg_origin = get_origin(arg_type)
1317
+ for el in obj:
1318
+ # check if nested container, ex: List[List[str]]
1319
+ if arg_origin: # processes nested container, ex: List[List[str]]
1320
+ if not container_checker(el, arg_type):
1321
+ return False
1322
+ elif not isinstance(el, arg_type):
1323
+ return False
1324
+ return True
1325
+ elif origin_type is Dict or origin_type is dict:
1326
+ check_empty_containers(obj)
1327
+ if not isinstance(obj, dict):
1328
+ return False
1329
+ key_type = get_args(target_type)[0]
1330
+ val_type = get_args(target_type)[1]
1331
+ for key, val in obj.items():
1332
+ # check if keys are of right type
1333
+ if not isinstance(key, key_type):
1334
+ return False
1335
+ val_origin = get_origin(val_type)
1336
+ if val_origin:
1337
+ if not container_checker(val, val_type):
1338
+ return False
1339
+ elif not isinstance(val, val_type):
1340
+ return False
1341
+ return True
1342
+ elif origin_type is Tuple or origin_type is tuple:
1343
+ check_empty_containers(obj)
1344
+ if not isinstance(obj, tuple):
1345
+ return False
1346
+ arg_types = get_args(target_type)
1347
+ if len(obj) != len(arg_types):
1348
+ return False
1349
+ for el, el_type in zip(obj, arg_types):
1350
+ el_origin = get_origin(el_type)
1351
+ if el_origin:
1352
+ if not container_checker(el, el_type):
1353
+ return False
1354
+ elif not isinstance(el, el_type):
1355
+ return False
1356
+ return True
1357
+ elif origin_type is Union: # also handles Optional
1358
+ if obj is None: # check before recursion because None is always fine
1359
+ return True
1360
+ inner_types = get_args(target_type)
1361
+ for t in inner_types:
1362
+ t_origin = get_origin(t)
1363
+ if t_origin:
1364
+ return container_checker(obj, t)
1365
+ elif isinstance(obj, t):
1366
+ return True
1367
+ return False
1368
+
1369
+
1370
+ def _isinstance(obj, target_type) -> bool:
1371
+ if isinstance(target_type, collections.abc.Container):
1372
+ if not isinstance(target_type, tuple):
1373
+ raise RuntimeError(
1374
+ "The second argument to "
1375
+ "`torch.jit.isinstance` must be a type "
1376
+ "or a tuple of types"
1377
+ )
1378
+ for t_type in target_type:
1379
+ if _isinstance(obj, t_type):
1380
+ return True
1381
+ return False
1382
+
1383
+ origin_type = get_origin(target_type)
1384
+ if origin_type:
1385
+ return container_checker(obj, target_type)
1386
+
1387
+ # Check to handle non-typed optional origin returns as none instead
1388
+ # of as optional in 3.7-3.8
1389
+ check_args_exist(target_type)
1390
+
1391
+ # handle non-containers
1392
+ return isinstance(obj, target_type)
1393
+
1394
+
1395
+ class _TensorExtractor(pickle.Pickler):
1396
+ def __init__(self, *args, tensors: List[torch.Tensor], **kwargs):
1397
+ super().__init__(*args, **kwargs)
1398
+ self.tensors = tensors
1399
+
1400
+ def persistent_id(self, obj):
1401
+ if isinstance(obj, torch.Tensor):
1402
+ self.tensors.append(obj)
1403
+ return ""
1404
+ # Since we just want to extract tensors, we don't mind if an object is
1405
+ # unpicklable if it doesn't contain tensors, as we can just ignore/skip
1406
+ # it. To play it safe, we only do so for common objects that we're sure
1407
+ # don't contain tensors. Feel free to add new types here. Note also that
1408
+ # even if a type isn't listed here this won't block users, since thet
1409
+ # can just add a __getstate__ or __reduce__ method to their class.
1410
+ if isinstance(obj, LockType):
1411
+ return ""
1412
+ # Futures and RRefs don't technically contain a value, they just offer
1413
+ # the means to access a value.
1414
+ if isinstance(obj, CFuture) or is_rref_instance(obj):
1415
+ return ""
1416
+ if isinstance(obj, CAwait):
1417
+ return ""
1418
+ if isinstance(obj, torch.cuda.Event):
1419
+ return ""
1420
+ if isinstance(obj, threading.Thread):
1421
+ return ""
1422
+ return None
1423
+
1424
+
1425
+ def _extract_tensors(obj):
1426
+ r"""
1427
+ This function is exclusively called from C++.
1428
+ See ``torch/csrc/jit/python/python_ivalue.h``.
1429
+
1430
+ It extracts the tensors contained in the given object, through pickling.
1431
+ """
1432
+ tensors: List[torch.Tensor] = []
1433
+ extractor = _TensorExtractor(io.BytesIO(), protocol=-1, tensors=tensors)
1434
+ extractor.dump(obj)
1435
+ return tensors
wemm/lib/python3.10/site-packages/torch/_lowrank.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Implement various linear algebra algorithms for low rank matrices.
2
+ """
3
+
4
+ __all__ = ["svd_lowrank", "pca_lowrank"]
5
+
6
+ from typing import Optional, Tuple
7
+
8
+ import torch
9
+ from torch import Tensor
10
+ from . import _linalg_utils as _utils
11
+ from .overrides import handle_torch_function, has_torch_function
12
+
13
+
14
+ def get_approximate_basis(
15
+ A: Tensor, q: int, niter: Optional[int] = 2, M: Optional[Tensor] = None
16
+ ) -> Tensor:
17
+ """Return tensor :math:`Q` with :math:`q` orthonormal columns such
18
+ that :math:`Q Q^H A` approximates :math:`A`. If :math:`M` is
19
+ specified, then :math:`Q` is such that :math:`Q Q^H (A - M)`
20
+ approximates :math:`A - M`.
21
+
22
+ .. note:: The implementation is based on the Algorithm 4.4 from
23
+ Halko et al, 2009.
24
+
25
+ .. note:: For an adequate approximation of a k-rank matrix
26
+ :math:`A`, where k is not known in advance but could be
27
+ estimated, the number of :math:`Q` columns, q, can be
28
+ choosen according to the following criteria: in general,
29
+ :math:`k <= q <= min(2*k, m, n)`. For large low-rank
30
+ matrices, take :math:`q = k + 5..10`. If k is
31
+ relatively small compared to :math:`min(m, n)`, choosing
32
+ :math:`q = k + 0..2` may be sufficient.
33
+
34
+ .. note:: To obtain repeatable results, reset the seed for the
35
+ pseudorandom number generator
36
+
37
+ Args::
38
+ A (Tensor): the input tensor of size :math:`(*, m, n)`
39
+
40
+ q (int): the dimension of subspace spanned by :math:`Q`
41
+ columns.
42
+
43
+ niter (int, optional): the number of subspace iterations to
44
+ conduct; ``niter`` must be a
45
+ nonnegative integer. In most cases, the
46
+ default value 2 is more than enough.
47
+
48
+ M (Tensor, optional): the input tensor's mean of size
49
+ :math:`(*, 1, n)`.
50
+
51
+ References::
52
+ - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
53
+ structure with randomness: probabilistic algorithms for
54
+ constructing approximate matrix decompositions,
55
+ arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
56
+ `arXiv <http://arxiv.org/abs/0909.4061>`_).
57
+ """
58
+
59
+ niter = 2 if niter is None else niter
60
+ m, n = A.shape[-2:]
61
+ dtype = _utils.get_floating_dtype(A)
62
+ matmul = _utils.matmul
63
+
64
+ R = torch.randn(n, q, dtype=dtype, device=A.device)
65
+
66
+ # The following code could be made faster using torch.geqrf + torch.ormqr
67
+ # but geqrf is not differentiable
68
+ A_H = _utils.transjugate(A)
69
+ if M is None:
70
+ Q = torch.linalg.qr(matmul(A, R)).Q
71
+ for i in range(niter):
72
+ Q = torch.linalg.qr(matmul(A_H, Q)).Q
73
+ Q = torch.linalg.qr(matmul(A, Q)).Q
74
+ else:
75
+ M_H = _utils.transjugate(M)
76
+ Q = torch.linalg.qr(matmul(A, R) - matmul(M, R)).Q
77
+ for i in range(niter):
78
+ Q = torch.linalg.qr(matmul(A_H, Q) - matmul(M_H, Q)).Q
79
+ Q = torch.linalg.qr(matmul(A, Q) - matmul(M, Q)).Q
80
+
81
+ return Q
82
+
83
+
84
+ def svd_lowrank(
85
+ A: Tensor,
86
+ q: Optional[int] = 6,
87
+ niter: Optional[int] = 2,
88
+ M: Optional[Tensor] = None,
89
+ ) -> Tuple[Tensor, Tensor, Tensor]:
90
+ r"""Return the singular value decomposition ``(U, S, V)`` of a matrix,
91
+ batches of matrices, or a sparse matrix :math:`A` such that
92
+ :math:`A \approx U diag(S) V^T`. In case :math:`M` is given, then
93
+ SVD is computed for the matrix :math:`A - M`.
94
+
95
+ .. note:: The implementation is based on the Algorithm 5.1 from
96
+ Halko et al, 2009.
97
+
98
+ .. note:: To obtain repeatable results, reset the seed for the
99
+ pseudorandom number generator
100
+
101
+ .. note:: The input is assumed to be a low-rank matrix.
102
+
103
+ .. note:: In general, use the full-rank SVD implementation
104
+ :func:`torch.linalg.svd` for dense matrices due to its 10-fold
105
+ higher performance characteristics. The low-rank SVD
106
+ will be useful for huge sparse matrices that
107
+ :func:`torch.linalg.svd` cannot handle.
108
+
109
+ Args::
110
+ A (Tensor): the input tensor of size :math:`(*, m, n)`
111
+
112
+ q (int, optional): a slightly overestimated rank of A.
113
+
114
+ niter (int, optional): the number of subspace iterations to
115
+ conduct; niter must be a nonnegative
116
+ integer, and defaults to 2
117
+
118
+ M (Tensor, optional): the input tensor's mean of size
119
+ :math:`(*, 1, n)`.
120
+
121
+ References::
122
+ - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
123
+ structure with randomness: probabilistic algorithms for
124
+ constructing approximate matrix decompositions,
125
+ arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
126
+ `arXiv <https://arxiv.org/abs/0909.4061>`_).
127
+
128
+ """
129
+ if not torch.jit.is_scripting():
130
+ tensor_ops = (A, M)
131
+ if not set(map(type, tensor_ops)).issubset(
132
+ (torch.Tensor, type(None))
133
+ ) and has_torch_function(tensor_ops):
134
+ return handle_torch_function(
135
+ svd_lowrank, tensor_ops, A, q=q, niter=niter, M=M
136
+ )
137
+ return _svd_lowrank(A, q=q, niter=niter, M=M)
138
+
139
+
140
+ def _svd_lowrank(
141
+ A: Tensor,
142
+ q: Optional[int] = 6,
143
+ niter: Optional[int] = 2,
144
+ M: Optional[Tensor] = None,
145
+ ) -> Tuple[Tensor, Tensor, Tensor]:
146
+ q = 6 if q is None else q
147
+ m, n = A.shape[-2:]
148
+ matmul = _utils.matmul
149
+ if M is None:
150
+ M_t = None
151
+ else:
152
+ M_t = _utils.transpose(M)
153
+ A_t = _utils.transpose(A)
154
+
155
+ # Algorithm 5.1 in Halko et al 2009, slightly modified to reduce
156
+ # the number conjugate and transpose operations
157
+ if m < n or n > q:
158
+ # computing the SVD approximation of a transpose in
159
+ # order to keep B shape minimal (the m < n case) or the V
160
+ # shape small (the n > q case)
161
+ Q = get_approximate_basis(A_t, q, niter=niter, M=M_t)
162
+ Q_c = _utils.conjugate(Q)
163
+ if M is None:
164
+ B_t = matmul(A, Q_c)
165
+ else:
166
+ B_t = matmul(A, Q_c) - matmul(M, Q_c)
167
+ assert B_t.shape[-2] == m, (B_t.shape, m)
168
+ assert B_t.shape[-1] == q, (B_t.shape, q)
169
+ assert B_t.shape[-1] <= B_t.shape[-2], B_t.shape
170
+ U, S, Vh = torch.linalg.svd(B_t, full_matrices=False)
171
+ V = Vh.mH
172
+ V = Q.matmul(V)
173
+ else:
174
+ Q = get_approximate_basis(A, q, niter=niter, M=M)
175
+ Q_c = _utils.conjugate(Q)
176
+ if M is None:
177
+ B = matmul(A_t, Q_c)
178
+ else:
179
+ B = matmul(A_t, Q_c) - matmul(M_t, Q_c)
180
+ B_t = _utils.transpose(B)
181
+ assert B_t.shape[-2] == q, (B_t.shape, q)
182
+ assert B_t.shape[-1] == n, (B_t.shape, n)
183
+ assert B_t.shape[-1] <= B_t.shape[-2], B_t.shape
184
+ U, S, Vh = torch.linalg.svd(B_t, full_matrices=False)
185
+ V = Vh.mH
186
+ U = Q.matmul(U)
187
+
188
+ return U, S, V
189
+
190
+
191
+ def pca_lowrank(
192
+ A: Tensor, q: Optional[int] = None, center: bool = True, niter: int = 2
193
+ ) -> Tuple[Tensor, Tensor, Tensor]:
194
+ r"""Performs linear Principal Component Analysis (PCA) on a low-rank
195
+ matrix, batches of such matrices, or sparse matrix.
196
+
197
+ This function returns a namedtuple ``(U, S, V)`` which is the
198
+ nearly optimal approximation of a singular value decomposition of
199
+ a centered matrix :math:`A` such that :math:`A = U diag(S) V^T`.
200
+
201
+ .. note:: The relation of ``(U, S, V)`` to PCA is as follows:
202
+
203
+ - :math:`A` is a data matrix with ``m`` samples and
204
+ ``n`` features
205
+
206
+ - the :math:`V` columns represent the principal directions
207
+
208
+ - :math:`S ** 2 / (m - 1)` contains the eigenvalues of
209
+ :math:`A^T A / (m - 1)` which is the covariance of
210
+ ``A`` when ``center=True`` is provided.
211
+
212
+ - ``matmul(A, V[:, :k])`` projects data to the first k
213
+ principal components
214
+
215
+ .. note:: Different from the standard SVD, the size of returned
216
+ matrices depend on the specified rank and q
217
+ values as follows:
218
+
219
+ - :math:`U` is m x q matrix
220
+
221
+ - :math:`S` is q-vector
222
+
223
+ - :math:`V` is n x q matrix
224
+
225
+ .. note:: To obtain repeatable results, reset the seed for the
226
+ pseudorandom number generator
227
+
228
+ Args:
229
+
230
+ A (Tensor): the input tensor of size :math:`(*, m, n)`
231
+
232
+ q (int, optional): a slightly overestimated rank of
233
+ :math:`A`. By default, ``q = min(6, m,
234
+ n)``.
235
+
236
+ center (bool, optional): if True, center the input tensor,
237
+ otherwise, assume that the input is
238
+ centered.
239
+
240
+ niter (int, optional): the number of subspace iterations to
241
+ conduct; niter must be a nonnegative
242
+ integer, and defaults to 2.
243
+
244
+ References::
245
+
246
+ - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
247
+ structure with randomness: probabilistic algorithms for
248
+ constructing approximate matrix decompositions,
249
+ arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
250
+ `arXiv <http://arxiv.org/abs/0909.4061>`_).
251
+
252
+ """
253
+
254
+ if not torch.jit.is_scripting():
255
+ if type(A) is not torch.Tensor and has_torch_function((A,)):
256
+ return handle_torch_function(
257
+ pca_lowrank, (A,), A, q=q, center=center, niter=niter
258
+ )
259
+
260
+ (m, n) = A.shape[-2:]
261
+
262
+ if q is None:
263
+ q = min(6, m, n)
264
+ elif not (q >= 0 and q <= min(m, n)):
265
+ raise ValueError(
266
+ "q(={}) must be non-negative integer"
267
+ " and not greater than min(m, n)={}".format(q, min(m, n))
268
+ )
269
+ if not (niter >= 0):
270
+ raise ValueError("niter(={}) must be non-negative integer".format(niter))
271
+
272
+ dtype = _utils.get_floating_dtype(A)
273
+
274
+ if not center:
275
+ return _svd_lowrank(A, q, niter=niter, M=None)
276
+
277
+ if _utils.is_sparse(A):
278
+ if len(A.shape) != 2:
279
+ raise ValueError("pca_lowrank input is expected to be 2-dimensional tensor")
280
+ c = torch.sparse.sum(A, dim=(-2,)) / m
281
+ # reshape c
282
+ column_indices = c.indices()[0]
283
+ indices = torch.zeros(
284
+ 2,
285
+ len(column_indices),
286
+ dtype=column_indices.dtype,
287
+ device=column_indices.device,
288
+ )
289
+ indices[0] = column_indices
290
+ C_t = torch.sparse_coo_tensor(
291
+ indices, c.values(), (n, 1), dtype=dtype, device=A.device
292
+ )
293
+
294
+ ones_m1_t = torch.ones(A.shape[:-2] + (1, m), dtype=dtype, device=A.device)
295
+ M = _utils.transpose(torch.sparse.mm(C_t, ones_m1_t))
296
+ return _svd_lowrank(A, q, niter=niter, M=M)
297
+ else:
298
+ C = A.mean(dim=(-2,), keepdim=True)
299
+ return _svd_lowrank(A - C, q, niter=niter, M=None)
wemm/lib/python3.10/site-packages/torch/_meta_registrations.py ADDED
@@ -0,0 +1,2705 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import List, Optional, Union
3
+
4
+ import torch
5
+ import torch._prims_common as utils
6
+ from torch import Tensor
7
+ from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
8
+ from torch._ops import OpOverload
9
+ from torch._prims import _elementwise_meta, ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND
10
+ from torch._prims_common import (
11
+ check,
12
+ corresponding_complex_dtype,
13
+ corresponding_real_dtype,
14
+ elementwise_dtypes,
15
+ ELEMENTWISE_TYPE_PROMOTION_KIND,
16
+ IntLike,
17
+ make_contiguous_strides_for,
18
+ )
19
+
20
+ from torch._prims_common.wrappers import out_wrapper
21
+ from torch._refs import _broadcast_shapes
22
+
23
+ from torch._subclasses.fake_tensor import check_no_bool_index_tensors
24
+ from torch.utils._pytree import tree_map
25
+
26
+
27
+ aten = torch.ops.aten
28
+
29
+ _meta_lib_dont_use_me_use_register_meta = torch.library.Library("aten", "IMPL", "Meta")
30
+
31
+
32
+ def register_meta(op):
33
+ def wrapper(fn):
34
+ def register(op):
35
+ _add_op_to_registry(meta_table, op, fn)
36
+
37
+ tree_map(register, op)
38
+ return fn
39
+
40
+ return wrapper
41
+
42
+
43
+ def toRealValueType(dtype):
44
+ from_complex = {
45
+ torch.complex32: torch.half,
46
+ torch.cfloat: torch.float,
47
+ torch.cdouble: torch.double,
48
+ }
49
+ return from_complex.get(dtype, dtype)
50
+
51
+
52
+ @register_meta([aten._fft_c2c.default, aten._fft_c2c.out])
53
+ @out_wrapper()
54
+ def meta_fft_c2c(self, dim, normalization, forward):
55
+ assert self.dtype.is_complex
56
+ return self.new_empty(self.size())
57
+
58
+
59
+ @register_meta([aten._fft_r2c.default, aten._fft_r2c.out])
60
+ @out_wrapper()
61
+ def meta_fft_r2c(self, dim, normalization, onesided):
62
+ assert self.dtype.is_floating_point
63
+ output_sizes = list(self.size())
64
+
65
+ if onesided:
66
+ last_dim = dim[-1]
67
+ last_dim_halfsize = (output_sizes[last_dim] // 2) + 1
68
+ output_sizes[last_dim] = last_dim_halfsize
69
+
70
+ return self.new_empty(
71
+ output_sizes, dtype=utils.corresponding_complex_dtype(self.dtype)
72
+ )
73
+
74
+
75
+ @register_meta(aten.randperm.generator_out)
76
+ def meta_randperm(n, *, generator=None, out):
77
+ assert out.ndim == 1 and out.size(0) == n
78
+ return out
79
+
80
+
81
+ @register_meta(aten.randint.default)
82
+ def meta_randint(
83
+ high, size, *, dtype=torch.long, layout=None, device=None, pin_memory=None
84
+ ):
85
+ return torch.empty(
86
+ size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
87
+ )
88
+
89
+
90
+ @register_meta(aten.randint.low)
91
+ def meta_randint_low(
92
+ low, high, size, *, dtype=torch.long, layout=None, device=None, pin_memory=None
93
+ ):
94
+ return torch.empty(
95
+ size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
96
+ )
97
+
98
+
99
+ @register_meta(aten.rand.default)
100
+ def meta_rand_default(size, *, dtype=None, layout=None, device=None, pin_memory=None):
101
+ return torch.empty(
102
+ size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
103
+ )
104
+
105
+
106
+ @register_meta([aten._fft_c2r.default, aten._fft_c2r.out])
107
+ @out_wrapper()
108
+ def meta_fft_c2r(self, dim, normalization, lastdim):
109
+ assert self.dtype.is_complex
110
+ output_sizes = list(self.size())
111
+ output_sizes[dim[-1]] = lastdim
112
+ return self.new_empty(output_sizes, dtype=toRealValueType(self.dtype))
113
+
114
+
115
+ @register_meta(aten.copy_.default)
116
+ def meta_copy_(self, src, non_blocking=False):
117
+ return self
118
+
119
+
120
+ def inferUnsqueezeGeometry(tensor, dim):
121
+ result_sizes = list(tensor.size())
122
+ result_strides = list(tensor.stride())
123
+ new_stride = 1 if dim >= tensor.dim() else result_sizes[dim] * result_strides[dim]
124
+ result_sizes.insert(dim, 1)
125
+ result_strides.insert(dim, new_stride)
126
+ return result_sizes, result_strides
127
+
128
+
129
+ @register_meta(aten.unsqueeze_.default)
130
+ def meta_unsqueeze_(self, dim):
131
+ dim = maybe_wrap_dim(dim, self.dim() + 1)
132
+ g_sizes, g_strides = inferUnsqueezeGeometry(self, dim)
133
+ self.as_strided_(g_sizes, g_strides)
134
+ return self
135
+
136
+
137
+ # Implementations below are taken from https://github.com/albanD/subclass_zoo/blob/main/python_meta_tensor.py
138
+ @register_meta(aten.index_select.default)
139
+ def meta_index_select(self, dim, index):
140
+ result_size = list(self.size())
141
+ if self.dim() > 0:
142
+ result_size[dim] = index.numel()
143
+ return self.new_empty(result_size)
144
+
145
+
146
+ @register_meta(aten.index_select.out)
147
+ def meta_index_select_out(self, dim, index, out):
148
+ torch._resize_output_(out, self.size(), self.device)
149
+ return out.copy_(torch.index_select(self, dim, index))
150
+
151
+
152
+ @register_meta([aten.max.default, aten.max.unary_out])
153
+ @out_wrapper()
154
+ def meta_max(self):
155
+ return self.new_empty(())
156
+
157
+
158
+ @register_meta(aten.max.dim)
159
+ def meta_max_dim(self, dim, keepdim=False):
160
+ dim = utils.reduction_dims(self.shape, (dim,))
161
+ output_shape = _compute_reduction_shape(self, dim, keepdim)
162
+ return (
163
+ self.new_empty(output_shape),
164
+ self.new_empty(output_shape, dtype=torch.long),
165
+ )
166
+
167
+
168
+ @register_meta([aten.min.default])
169
+ def meta_min(self):
170
+ return self.new_empty(())
171
+
172
+
173
+ @register_meta(aten.angle.default)
174
+ def meta_angle(self):
175
+ if self.is_complex():
176
+ result_dtype = corresponding_real_dtype(self.dtype)
177
+ else:
178
+ _, result_dtype = elementwise_dtypes(
179
+ self, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
180
+ )
181
+ return torch.empty_like(self, dtype=result_dtype)
182
+
183
+
184
+ @register_meta(aten.angle.out)
185
+ def meta_angle_out(self, out):
186
+ torch._resize_output_(out, self.size(), self.device)
187
+ return out.copy_(torch.angle(self))
188
+
189
+
190
+ # From aten/src/ATen/native/LinearAlgebraUtils.h
191
+ def squareCheckInputs(self: Tensor, f_name: str):
192
+ assert (
193
+ self.dim() >= 2
194
+ ), f"{f_name}: The input tensor must have at least 2 dimensions."
195
+ assert self.size(-1) == self.size(
196
+ -2
197
+ ), f"{f_name}: A must be batches of square matrices, but they are {self.size(-2)} by {self.size(-1)} matrices"
198
+
199
+
200
+ # From aten/src/ATen/native/LinearAlgebraUtils.h
201
+ def checkFloatingOrComplex(
202
+ t: Tensor, f_name: str, allow_low_precision_dtypes: bool = True
203
+ ):
204
+ dtype = t.dtype
205
+ check(
206
+ t.is_floating_point() or t.is_complex(),
207
+ lambda: f"{f_name}, : Expected a floating point or complex tensor as input. Got , {dtype}",
208
+ )
209
+ if allow_low_precision_dtypes:
210
+ check(
211
+ dtype in (torch.float, torch.double, torch.cfloat, torch.cdouble),
212
+ lambda: f"{f_name} : Low precision dtypes not supported. Got {dtype}",
213
+ )
214
+
215
+
216
+ # From aten/src/ATen/native/LinearAlgebraUtils.h
217
+ def checkIsMatrix(A: Tensor, f_name: str, arg_name: str = "A"):
218
+ check(
219
+ A.dim() >= 2,
220
+ lambda: f"{f_name}: The input tensor {arg_name} must have at least 2 dimensions.",
221
+ )
222
+
223
+
224
+ def checkUplo(uplo: str):
225
+ uplo_uppercase = uplo.upper()
226
+ assert (
227
+ len(uplo) == 1 and uplo_uppercase == "U" or uplo_uppercase == "L"
228
+ ), f"Expected UPLO argument to be 'L' or 'U', but got {uplo}"
229
+
230
+
231
+ # @register_meta(aten.linalg_eigh.default)
232
+ def meta_linalg_eigh(self, uplo="L"):
233
+ squareCheckInputs(self, "linalg_eigh")
234
+ checkUplo(uplo)
235
+ real_dtype = toRealValueType(self.dtype)
236
+ assert self.dim() >= 2
237
+ values = self.new_empty(self.shape, dtype=real_dtype)
238
+ values.transpose_(-2, -1)
239
+ vectors = self.new_empty(self.shape[:-1])
240
+ return (values, vectors)
241
+
242
+
243
+ # From aten/src/ATen/native/BatchLinearAlgebra.cpp
244
+ @register_meta(aten.linalg_cholesky_ex.default)
245
+ def linalg_cholesky_ex(A: Tensor, upper: bool = False, check_errors: bool = False):
246
+ squareCheckInputs(A, "linalg.cholesky")
247
+ checkFloatingOrComplex(A, "linalg.cholesky")
248
+
249
+ A_shape = A.shape
250
+ ndim = len(A_shape)
251
+
252
+ # L
253
+ L_strides = make_contiguous_strides_for(A_shape, False)
254
+ L = A.new_empty(A_shape)
255
+ L.as_strided_(A_shape, L_strides)
256
+
257
+ # infos
258
+ infos = A.new_empty(A_shape[0 : ndim - 2], dtype=torch.int32)
259
+ return L, infos
260
+
261
+
262
+ # From aten/src/ATen/native/BatchLinearAlgebra.cpp
263
+ @register_meta(aten.linalg_inv_ex.default)
264
+ def linalg_inv_ex_meta(A: Tensor, check_errors: bool = False):
265
+ squareCheckInputs(A, "linalg.inv_ex")
266
+ checkFloatingOrComplex(A, "linalg.inv_ex", allow_low_precision_dtypes=False)
267
+
268
+ L = A.new_empty(A.shape)
269
+ L.as_strided_(A.shape, make_contiguous_strides_for(A.shape, row_major=False))
270
+
271
+ infos = A.new_empty(A.shape[:-2], dtype=torch.int32)
272
+ return L, infos
273
+
274
+
275
+ # From aten/src/ATen/native/BatchLinearAlgebra.cpp
276
+ # NOTE: matching defaults in aten/src/ATen/native/native_functions.yaml
277
+ @register_meta(aten._linalg_svd.default)
278
+ def _linalg_svd_meta(
279
+ A: Tensor, full_matrices: bool = False, compute_uv: bool = True, driver: str = None
280
+ ):
281
+ checkIsMatrix(A, "linalg.svd")
282
+ checkFloatingOrComplex(A, "linalg.svd")
283
+
284
+ batch_dims = list(A.shape[:-2])
285
+ m = A.shape[-2]
286
+ n = A.shape[-1]
287
+ k = min(m, n)
288
+
289
+ if compute_uv:
290
+ U_shape = batch_dims + [m, m if full_matrices else k]
291
+ U = A.new_empty(U_shape)
292
+ U.as_strided_(U_shape, make_contiguous_strides_for(U_shape, row_major=False))
293
+
294
+ V_shape = batch_dims + [n if full_matrices else k, n]
295
+ V = A.new_empty(V_shape)
296
+ # TODO: need to distinguish cuSOLVER case? (see original code)
297
+ V.as_strided_(V_shape, make_contiguous_strides_for(V_shape, row_major=False))
298
+ else:
299
+ # doesn't matter
300
+ U = A.new_empty([0])
301
+ V = A.new_empty([0])
302
+
303
+ # S is always real, even when A is complex.
304
+ S = A.new_empty(batch_dims + [k], dtype=toRealValueType(A.dtype))
305
+ return U, S, V
306
+
307
+
308
+ # From aten/src/ATen/native/LinearAlgebra.cpp
309
+ @register_meta(aten._linalg_det.default)
310
+ def _linalg_det_meta(A):
311
+ squareCheckInputs(A, "linalg.det")
312
+ checkFloatingOrComplex(A, "linalg.det")
313
+
314
+ det = A.new_empty(A.shape[:-2])
315
+
316
+ LU = A.new_empty(A.shape)
317
+ LU.as_strided_(A.shape, make_contiguous_strides_for(A.shape, row_major=False))
318
+
319
+ pivots = A.new_empty(A.shape[:-1], dtype=torch.int32)
320
+ return det, LU, pivots
321
+
322
+
323
+ # From aten/src/ATen/native/ReflectionPad.cpp
324
+ @register_meta(
325
+ [aten.reflection_pad2d_backward.default, aten.replication_pad2d_backward.default]
326
+ )
327
+ def meta_pad2d_backward(grad_output, self, padding):
328
+ dim_w = 2
329
+ dim_h = 1
330
+ dim_plane = 0
331
+ nbatch = 1
332
+
333
+ self_shape = self.shape
334
+ if self.dim() == 4:
335
+ nbatch = self_shape[0]
336
+ dim_w += 1
337
+ dim_h += 1
338
+ dim_plane += 1
339
+
340
+ pad_l = padding[0]
341
+ pad_r = padding[1]
342
+ pad_t = padding[2]
343
+ pad_b = padding[3]
344
+
345
+ nplane = self_shape[dim_plane]
346
+ input_h = self_shape[dim_h]
347
+ input_w = self_shape[dim_w]
348
+ output_h = input_h + pad_t + pad_b
349
+ output_w = input_w + pad_l + pad_r
350
+
351
+ check(
352
+ output_w == grad_output.shape[dim_w],
353
+ lambda: f"gradOutput width unexpected. Expected: {output_w}, Got: {grad_output.shape[dim_w]}",
354
+ )
355
+ check(
356
+ output_h == grad_output.shape[dim_h],
357
+ lambda: f"gradOutput height unexpected. Expected: {output_h}, Got: {grad_output.shape[dim_h]}",
358
+ )
359
+ return self.new_empty(self.shape)
360
+
361
+
362
+ @register_meta(aten.reflection_pad2d.default)
363
+ def meta_pad2d(self, padding):
364
+ valid_dims = self.size(1) != 0 and self.size(2) != 0
365
+ check(
366
+ (self.ndim == 3 and valid_dims)
367
+ or (self.ndim == 4 and valid_dims and self.size(3) != 0),
368
+ lambda: f"3D or 4D (batch mode) tensor expected for input, but got: {self}",
369
+ )
370
+ if self.ndim == 4:
371
+ nbatch, nplane, input_h, input_w = self.shape
372
+ else:
373
+ nbatch = 1
374
+ nplane, input_h, input_w = self.shape
375
+
376
+ pad_l, pad_r, pad_t, pad_b = padding
377
+
378
+ output_h = input_h + pad_t + pad_b
379
+ output_w = input_w + pad_l + pad_r
380
+
381
+ if self.ndim == 3:
382
+ return self.new_empty((nplane, output_h, output_w))
383
+ else:
384
+ return self.new_empty((nbatch, nplane, output_h, output_w))
385
+
386
+
387
+ @register_meta([aten.bernoulli.default, aten.bernoulli.out])
388
+ @out_wrapper()
389
+ def meta_bernoulli(self, *, generator=None):
390
+ # https://github.com/pytorch/pytorch/issues/88612
391
+ return torch.empty_like(self).contiguous()
392
+
393
+
394
+ @register_meta(aten.bernoulli_.float)
395
+ def meta_bernoulli_(self, p=0.5, generator=None):
396
+ return self
397
+
398
+
399
+ @register_meta(aten.bernoulli.p)
400
+ def meta_bernoulli_p(self, p=0.5, generator=None):
401
+ # https://github.com/pytorch/pytorch/issues/88612
402
+ return torch.empty_like(self).contiguous()
403
+
404
+
405
+ @register_meta(aten._fused_moving_avg_obs_fq_helper.default)
406
+ def meta__fused_moving_avg_obs_fq_helper(
407
+ self,
408
+ observer_on,
409
+ fake_quant_on,
410
+ running_min,
411
+ running_max,
412
+ scale,
413
+ zero_point,
414
+ averaging_const,
415
+ quant_min,
416
+ quant_max,
417
+ ch_axis,
418
+ per_row_fake_quant=False,
419
+ symmetric_quant=False,
420
+ ):
421
+ check(
422
+ ch_axis < self.dim(),
423
+ lambda: "Error in fused_moving_avg_obs_fake_quant_cpu: ch_axis must be < self.dim()",
424
+ )
425
+ mask = torch.empty_like(self, dtype=torch.bool)
426
+ return (torch.empty_like(self), mask)
427
+
428
+
429
+ def dot_check(self, other):
430
+ check(
431
+ self.dim() == 1 and other.dim() == 1,
432
+ lambda: f"1D tensors expected, but got {self.dim()}D and {other.dim()}D tensors",
433
+ )
434
+
435
+
436
+ @register_meta(aten.dot.default)
437
+ def meta_dot(self, tensor):
438
+ dot_check(self, tensor)
439
+ return self.new_empty(())
440
+
441
+
442
+ @register_meta([aten.mm.default])
443
+ def meta_mm(a, b):
444
+ check(a.dim() == 2, lambda: "a must be 2D")
445
+ check(b.dim() == 2, lambda: "b must be 2D")
446
+ N, M1 = a.shape
447
+ M2, P = b.shape
448
+ check(M1 == M2, lambda: "a and b must have same reduction dim")
449
+ return a.new_empty(N, P)
450
+
451
+
452
+ def _compute_reduction_shape(self, dims, keepdim):
453
+ if keepdim:
454
+ return tuple(self.shape[i] if i not in dims else 1 for i in range(self.ndim))
455
+
456
+ return utils.compute_reduction_output_shape(self.shape, dims)
457
+
458
+
459
+ # FakeTensors (meta tensors with a device) will report device as meta
460
+ # when running meta kernels. Here, access the "fake device" of FakeTensor if it
461
+ # exists so meta kernels which have diverge per device will be more
462
+ # accurate when run with FakeTensors
463
+ def device_hint(tensor) -> "str":
464
+ if isinstance(tensor, torch._subclasses.FakeTensor):
465
+ return tensor.fake_device.type
466
+ else:
467
+ return "cuda" # default to cuda
468
+
469
+
470
+ def calc_conv_nd_return_shape(
471
+ input_tensor: torch.Tensor,
472
+ weight: torch.Tensor,
473
+ stride: Union[List[int], int],
474
+ padding: Union[List[int], int],
475
+ dilation: Union[List[int], int],
476
+ is_transposed: bool,
477
+ groups: int,
478
+ output_padding: Optional[Union[List[int], int]] = None,
479
+ ):
480
+ def _formula(ln: int, p: int, d: int, k: int, s: int) -> int:
481
+ """
482
+ Formula to apply to calculate the length of some dimension of the output
483
+
484
+ See: https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
485
+
486
+ Args:
487
+ ln: length of the dimension
488
+ p: padding in that dim
489
+ d: dilation in that dim
490
+ k: kernel size in that dim
491
+ s: stride in that dim
492
+ Returns:
493
+ The output length
494
+ """
495
+ return (ln + 2 * p - d * (k - 1) - 1) // s + 1
496
+
497
+ def _formula_transposed(ln: int, p: int, d: int, k: int, s: int, op: int) -> int:
498
+ """
499
+ Formula to apply to calculate the length of some dimension of the output
500
+ if transposed convolution is used.
501
+ See: https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html
502
+
503
+ Args:
504
+ ln: length of the dimension
505
+ p: padding in that dim
506
+ d: dilation in that dim
507
+ k: kernel size in that dim
508
+ s: stride in that dim
509
+ op: output padding in that dim
510
+
511
+ Returns:
512
+ The output length
513
+ """
514
+ return (ln - 1) * s - 2 * p + d * (k - 1) + op + 1
515
+
516
+ kernel_size = weight.shape[2:]
517
+ dims = input_tensor.shape[2:]
518
+ if is_transposed:
519
+ out_channels = groups * weight.shape[1]
520
+ else:
521
+ out_channels = weight.shape[0]
522
+ if weight.shape[1] * groups != input_tensor.shape[1]:
523
+ raise RuntimeError("Invalid channel dimensions")
524
+
525
+ ret_shape = [input_tensor.shape[0], out_channels]
526
+ if isinstance(stride, IntLike):
527
+ stride = [stride] * len(dims)
528
+ elif len(stride) == 1:
529
+ stride = [stride[0]] * len(dims)
530
+
531
+ if isinstance(padding, IntLike):
532
+ padding = [padding] * len(dims)
533
+ elif len(padding) == 1:
534
+ padding = [padding[0]] * len(dims)
535
+
536
+ if isinstance(dilation, IntLike):
537
+ dilation = [dilation] * len(dims)
538
+ elif len(dilation) == 1:
539
+ dilation = [dilation[0]] * len(dims)
540
+
541
+ output_padding_list: Optional[List[int]] = None
542
+ if output_padding:
543
+ if isinstance(output_padding, IntLike):
544
+ output_padding_list = [output_padding] * len(dims)
545
+ elif len(output_padding) == 1:
546
+ output_padding_list = [output_padding[0]] * len(dims)
547
+ else:
548
+ output_padding_list = output_padding
549
+
550
+ for i in range(len(dims)):
551
+ # If output_padding is present, we are dealing with a transposed convolution
552
+ if output_padding_list:
553
+ ret_shape.append(
554
+ _formula_transposed(
555
+ dims[i],
556
+ padding[i],
557
+ dilation[i],
558
+ kernel_size[i],
559
+ stride[i],
560
+ output_padding_list[i],
561
+ )
562
+ )
563
+ else:
564
+ ret_shape.append(
565
+ _formula(dims[i], padding[i], dilation[i], kernel_size[i], stride[i])
566
+ )
567
+
568
+ return ret_shape
569
+
570
+
571
+ def is_channels_last(ten):
572
+ return torch._prims_common.suggest_memory_format(ten) == torch.channels_last
573
+
574
+
575
+ @register_meta(aten.convolution.default)
576
+ def meta_conv(
577
+ input_tensor: torch.Tensor,
578
+ weight: torch.Tensor,
579
+ bias: torch.Tensor,
580
+ stride: List[int],
581
+ padding: List[int],
582
+ dilation: List[int],
583
+ is_transposed: bool,
584
+ output_padding: List[int],
585
+ groups: int,
586
+ ):
587
+ def pick_memory_format():
588
+ if device_hint(input_tensor) == "cuda":
589
+ if is_channels_last(input_tensor) or is_channels_last(weight):
590
+ return torch.channels_last
591
+ else:
592
+ if is_channels_last(input_tensor):
593
+ return torch.channels_last
594
+ if input_tensor.is_contiguous(memory_format=torch.contiguous_format):
595
+ return torch.contiguous_format
596
+ elif input_tensor.is_contiguous(memory_format=torch.preserve_format):
597
+ return torch.preserve_format
598
+
599
+ shape_out = calc_conv_nd_return_shape(
600
+ input_tensor,
601
+ weight,
602
+ stride,
603
+ padding,
604
+ dilation,
605
+ is_transposed,
606
+ groups,
607
+ output_padding if is_transposed else None,
608
+ )
609
+
610
+ out = input_tensor.new_empty(shape_out)
611
+ out = out.to(memory_format=pick_memory_format()) # type: ignore[call-overload]
612
+ return out
613
+
614
+
615
+ if torch._C.has_mkldnn:
616
+ _meta_lib_dont_use_me_use_register_meta_for_mkldnn = torch.library.Library(
617
+ "mkldnn", "IMPL", "Meta"
618
+ )
619
+
620
+ def pick_mkldnn_conv_memory_format(input_tensor, weight):
621
+ if weight.is_mkldnn:
622
+ return torch.channels_last
623
+ if is_channels_last(input_tensor) or is_channels_last(weight):
624
+ return torch.channels_last
625
+ if input_tensor.is_contiguous(memory_format=torch.contiguous_format):
626
+ return torch.contiguous_format
627
+ elif input_tensor.is_contiguous(memory_format=torch.preserve_format):
628
+ return torch.preserve_format
629
+
630
+ @register_meta(torch.ops.mkldnn._convolution_pointwise.default)
631
+ def meta_mkldnn_convolution_default(
632
+ input_tensor,
633
+ weight,
634
+ bias,
635
+ padding,
636
+ stride,
637
+ dilation,
638
+ groups,
639
+ attr,
640
+ scalars,
641
+ algorithm,
642
+ ):
643
+ shape_out = calc_conv_nd_return_shape(
644
+ input_tensor, weight, stride, padding, dilation, False, groups, []
645
+ )
646
+ out = input_tensor.new_empty(shape_out)
647
+ out_memory_format = torch.channels_last
648
+ out = out.to(memory_format=out_memory_format) # type: ignore[call-overload]
649
+ return out
650
+
651
+ @register_meta(torch.ops.mkldnn._convolution_pointwise.binary)
652
+ def meta_mkldnn_convolution_binary(
653
+ input_tensor,
654
+ other,
655
+ weight,
656
+ bias,
657
+ padding,
658
+ stride,
659
+ dilation,
660
+ groups,
661
+ binary_attr,
662
+ alpha,
663
+ unary_attr,
664
+ unary_scalars,
665
+ unary_algorithm,
666
+ ):
667
+ out = input_tensor.new_empty(other.size())
668
+ out = out.to(memory_format=torch.channels_last) # type: ignore[call-overload]
669
+ return out
670
+
671
+ @register_meta(torch.ops.mkldnn._convolution_pointwise_.binary)
672
+ def meta_mkldnn_convolution_binary_inplace(
673
+ input_tensor,
674
+ other,
675
+ weight,
676
+ bias,
677
+ padding,
678
+ stride,
679
+ dilation,
680
+ groups,
681
+ binary_attr,
682
+ alpha,
683
+ unary_attr,
684
+ unary_scalars,
685
+ unary_algorithm,
686
+ ):
687
+ return other
688
+
689
+ @register_meta(torch.ops.mkldnn._linear_pointwise.default)
690
+ def meta_linear_pointwise_default(
691
+ input_tensor, weight, bias, attr, scalars, algorithm
692
+ ):
693
+ return input_tensor.new_empty((*input_tensor.shape[:-1], weight.shape[0]))
694
+
695
+ @register_meta(torch.ops.mkldnn._linear_pointwise.binary)
696
+ def meta_linear_pointwise_binary(input_tensor, other, weight, bias, attr):
697
+ out = input_tensor.new_empty(other.size())
698
+ return out
699
+
700
+ if torch._C.has_mkl:
701
+ _meta_lib_dont_use_me_use_register_meta_for_mkl = torch.library.Library(
702
+ "mkl", "IMPL", "Meta"
703
+ )
704
+
705
+ @register_meta(torch.ops.mkl._mkl_linear)
706
+ def meta_mkl_linear(
707
+ input_tensor,
708
+ packed_weight,
709
+ orig_weight,
710
+ bias,
711
+ batch_size,
712
+ ):
713
+ return input_tensor.new_empty(
714
+ (*input_tensor.shape[:-1], orig_weight.shape[0])
715
+ )
716
+
717
+
718
+ # from check_dim_size() in aten/src/ATen/TensorUtils.cpp.
719
+ def check_dim_size(tensor, dim, dim_size, size):
720
+ check(
721
+ tensor.dim() == dim and tensor.shape[dim_size] == size,
722
+ lambda: f"Expected a tensor of dimension {dim} and tensor.size[{dim_size}] == {size}, "
723
+ + f"but got : dimension {tensor.dim()} and tensor.size[{dim_size}] = {tensor.shape[dim_size]}",
724
+ )
725
+
726
+
727
+ @register_meta(aten.avg_pool2d.default)
728
+ def meta_avg_pool2d(
729
+ input,
730
+ kernel_size,
731
+ stride=(),
732
+ padding=(0,),
733
+ ceil_mode=False,
734
+ count_include_pad=True,
735
+ divisor_override=None,
736
+ ):
737
+ def unpack(name, val):
738
+ check(
739
+ len(val) in [1, 2],
740
+ lambda: f"avg_pool2d: {name} must either be a single int, or a tuple of two ints",
741
+ )
742
+ H = val[0]
743
+ W = H if len(val) == 1 else val[1]
744
+ return H, W
745
+
746
+ kH, kW = unpack("kernel_size", kernel_size)
747
+ check(
748
+ len(stride) in [0, 1, 2],
749
+ lambda: "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints",
750
+ )
751
+ if len(stride) == 0:
752
+ dH, dW = kH, kW
753
+ elif len(stride) == 1:
754
+ dH, dW = stride[0], stride[0]
755
+ else:
756
+ dH, dW = unpack("stride", stride)
757
+
758
+ padH, padW = unpack("padding", padding)
759
+
760
+ check(
761
+ divisor_override is None or divisor_override != 0,
762
+ lambda: "divisor must be not zero",
763
+ )
764
+
765
+ nbatch = input.size(-4) if input.dim() == 4 else 1
766
+ nInputPlane = input.size(-3)
767
+ inputHeight = input.size(-2)
768
+ inputWidth = input.size(-1)
769
+
770
+ outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, 1, ceil_mode)
771
+ outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, 1, ceil_mode)
772
+
773
+ memory_format = utils.suggest_memory_format(input)
774
+ pool2d_shape_check(
775
+ input,
776
+ kH,
777
+ kW,
778
+ dH,
779
+ dW,
780
+ padH,
781
+ padW,
782
+ 1,
783
+ 1,
784
+ nInputPlane,
785
+ inputHeight,
786
+ inputWidth,
787
+ outputHeight,
788
+ outputWidth,
789
+ memory_format,
790
+ )
791
+
792
+ if input.dim() == 3:
793
+ size = [nInputPlane, outputHeight, outputWidth]
794
+ else:
795
+ size = [nbatch, nInputPlane, outputHeight, outputWidth]
796
+ return torch.empty(
797
+ size, dtype=input.dtype, device=input.device, memory_format=memory_format
798
+ )
799
+
800
+
801
+ # from avg_pool2d_backward_shape_check() in aten/src/ATen/native/Pool.h.
802
+ def avg_pool2d_backward_shape_check(
803
+ input,
804
+ gradOutput,
805
+ nbatch,
806
+ kH,
807
+ kW,
808
+ dH,
809
+ dW,
810
+ padH,
811
+ padW,
812
+ nInputPlane,
813
+ inputHeight,
814
+ inputWidth,
815
+ outputHeight,
816
+ outputWidth,
817
+ mem_format,
818
+ ):
819
+ pool2d_shape_check(
820
+ input,
821
+ kH,
822
+ kW,
823
+ dH,
824
+ dW,
825
+ padH,
826
+ padW,
827
+ 1,
828
+ 1,
829
+ nInputPlane,
830
+ inputHeight,
831
+ inputWidth,
832
+ outputHeight,
833
+ outputWidth,
834
+ mem_format,
835
+ )
836
+
837
+ ndim = input.dim()
838
+ nOutputPlane = nInputPlane
839
+
840
+ check_dim_size(gradOutput, ndim, ndim - 3, nOutputPlane)
841
+ check_dim_size(gradOutput, ndim, ndim - 2, outputHeight)
842
+ check_dim_size(gradOutput, ndim, ndim - 1, outputWidth)
843
+
844
+
845
+ # Don't override the C++ registration.
846
+ @register_meta(aten.avg_pool2d_backward.default)
847
+ def meta_avg_pool2d_backward(
848
+ gradOutput_,
849
+ input,
850
+ kernel_size,
851
+ stride,
852
+ padding,
853
+ ceil_mode,
854
+ count_include_pad,
855
+ divisor_override,
856
+ ):
857
+ # From aten/src/ATen/native/AveragePool2d.cpp structured kernel meta func.
858
+ check(
859
+ len(kernel_size) == 1 or len(kernel_size) == 2,
860
+ lambda: "avg_pool2d: kernel_size must either be a single int, or a tuple of two ints",
861
+ )
862
+ kH = kernel_size[0]
863
+ kW = kH if len(kernel_size) == 1 else kernel_size[1]
864
+ check(
865
+ len(stride) == 0 or len(stride) == 1 or len(stride) == 2,
866
+ lambda: "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints",
867
+ )
868
+ dH = kH if len(stride) == 0 else stride[0]
869
+ dW = kW if len(stride) == 0 else dH if len(stride) == 1 else stride[1]
870
+ check(
871
+ len(padding) == 1 or len(padding) == 2,
872
+ lambda: "avg_pool2d: padding must either be a single int, or a tuple of two ints",
873
+ )
874
+ padH = padding[0]
875
+ padW = padH if len(padding) == 1 else padding[1]
876
+
877
+ check(
878
+ divisor_override is None or divisor_override != 0,
879
+ lambda: "divisor must be not zero",
880
+ )
881
+
882
+ input_size = input.shape
883
+ nbatch = input_size[-4] if input.dim() == 4 else 1
884
+ nInputPlane = input_size[-3]
885
+ inputHeight = input_size[-2]
886
+ inputWidth = input_size[-1]
887
+
888
+ outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, 1, ceil_mode)
889
+ outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, 1, ceil_mode)
890
+
891
+ mem_format = utils.suggest_memory_format(input)
892
+
893
+ avg_pool2d_backward_shape_check(
894
+ input,
895
+ gradOutput_,
896
+ nbatch,
897
+ kH,
898
+ kW,
899
+ dH,
900
+ dW,
901
+ padH,
902
+ padW,
903
+ nInputPlane,
904
+ inputHeight,
905
+ inputWidth,
906
+ outputHeight,
907
+ outputWidth,
908
+ mem_format,
909
+ )
910
+
911
+ return torch.empty(
912
+ input_size, dtype=input.dtype, device=input.device, memory_format=mem_format
913
+ )
914
+
915
+
916
+ @register_meta(aten._adaptive_avg_pool2d.default)
917
+ def meta_adaptive_avg_pool2d(self, output_size):
918
+ check(
919
+ self.ndim == 3 or self.ndim == 4,
920
+ lambda: f"Expected 3D or 4D tensor, but got {self.shape}",
921
+ )
922
+ output_shape = self.shape[:-2] + tuple(output_size)
923
+ memory_format = utils.suggest_memory_format(self)
924
+ # need to set memory_format to preserve the memory format of the input
925
+ # channel last input should have channel last output
926
+ return torch.empty(
927
+ output_shape, dtype=self.dtype, device=self.device, memory_format=memory_format
928
+ )
929
+
930
+
931
+ @register_meta(aten._adaptive_avg_pool3d.default)
932
+ def meta_adaptive_avg_pool3d(self, output_size):
933
+ check(
934
+ self.ndim == 4 or self.ndim == 5,
935
+ lambda: f"Expected 4D or 5D tensor, but got {self.shape}",
936
+ )
937
+ return self.new_empty(self.shape[:-3] + tuple(output_size))
938
+
939
+
940
+ @register_meta(aten._adaptive_avg_pool2d_backward.default)
941
+ def meta__adaptive_avg_pool2d_backward(grad_out, self):
942
+ ndim = grad_out.ndim
943
+ for i in range(1, ndim):
944
+ check(
945
+ grad_out.size(i) > 0,
946
+ lambda: f"adaptive_avg_pool2d_backward(): Expected grad_output to have non-zero \
947
+ size for non-batch dimensions, {grad_out.shape} with dimension {i} being empty",
948
+ )
949
+ check(
950
+ ndim == 3 or ndim == 4,
951
+ lambda: f"adaptive_avg_pool2d_backward(): Expected 3D or 4D tensor, but got {self.shape}",
952
+ )
953
+ check(
954
+ self.dtype == grad_out.dtype,
955
+ lambda: f"expected dtype {self.dtype} for `grad_output` but got dtype {grad_out.dtype}",
956
+ )
957
+ return self.new_empty(self.shape)
958
+
959
+
960
+ @register_meta(aten.repeat_interleave.Tensor)
961
+ def meta_repeat_interleave_Tensor(repeats, output_size=None):
962
+ if output_size is None:
963
+ raise RuntimeError("cannot repeat_interleave a meta tensor without output_size")
964
+ return repeats.new_empty(output_size)
965
+
966
+
967
+ @register_meta([aten.complex.default, aten.complex.out])
968
+ @out_wrapper()
969
+ def meta_complex(real, imag):
970
+ assert real.dtype.is_floating_point
971
+ assert imag.dtype.is_floating_point
972
+ out_shape = _broadcast_shapes(real.shape, imag.shape)
973
+ return real.new_empty(out_shape, dtype=corresponding_complex_dtype(real.dtype))
974
+
975
+
976
+ @register_meta(aten.vdot.default)
977
+ def vdot(self, other):
978
+ if not self.is_complex:
979
+ return torch.dot(self, other)
980
+
981
+ if self.is_conj():
982
+ if other.is_conj():
983
+ return torch.vdot(other.conj(), self.conj())
984
+ else:
985
+ return torch.dot(self.conj(), other)
986
+ elif other.is_conj():
987
+ return torch.dot(self, other.conj()).conj()
988
+
989
+ dot_check(self, other)
990
+ return self.new_empty(())
991
+
992
+
993
+ # Leaving this function around because a python implementation
994
+ # of indexing shape inference is useful,
995
+ # but not registering it to the dispatcher because we already
996
+ # get shape inference through structured kernels
997
+ @register_meta(aten.index.Tensor)
998
+ def meta_index_Tensor(self, indices):
999
+ check_no_bool_index_tensors(aten.index.Tensor, self, indices)
1000
+ check(indices, lambda: "at least one index must be provided")
1001
+ # aten::index is the internal advanced indexing implementation
1002
+ # checkIndexTensorTypes and expandTensors
1003
+ result: List[Optional[Tensor]] = []
1004
+ for i, index in enumerate(indices):
1005
+ if index is not None:
1006
+ check(
1007
+ index.dtype in [torch.long, torch.int, torch.int8, torch.bool],
1008
+ lambda: "tensors used as indices must be long, int, byte or bool tensors",
1009
+ )
1010
+ if index.dtype in [torch.int8, torch.bool]:
1011
+ nonzero = index.nonzero()
1012
+ k = len(result)
1013
+ check(
1014
+ k + index.ndim <= self.ndim,
1015
+ lambda: f"too many indices for tensor of dimension {self.ndim}",
1016
+ IndexError,
1017
+ )
1018
+ for j in range(index.ndim):
1019
+ check(
1020
+ index.shape[j] == self.shape[k + j],
1021
+ lambda: f"The shape of the mask {index.shape} at index {i} "
1022
+ f"does not match the shape of the indexed tensor {self.shape} at index {k + j}",
1023
+ IndexError,
1024
+ )
1025
+ result.append(nonzero.select(1, j))
1026
+ else:
1027
+ result.append(index)
1028
+ else:
1029
+ result.append(index)
1030
+ indices = result
1031
+ check(
1032
+ len(indices) <= self.ndim,
1033
+ lambda: f"too many indices for tensor of dimension {self.ndim} (got {len(indices)})",
1034
+ )
1035
+ # expand_outplace
1036
+ import torch._refs as refs # avoid import cycle in mypy
1037
+
1038
+ indices = list(refs._maybe_broadcast(*indices))
1039
+ # add missing null tensors
1040
+ while len(indices) < self.ndim:
1041
+ indices.append(None)
1042
+
1043
+ # hasContiguousSubspace
1044
+ # true if all non-null tensors are adjacent
1045
+ # See:
1046
+ # https://numpy.org/doc/stable/user/basics.indexing.html#combining-advanced-and-basic-indexing
1047
+ # https://stackoverflow.com/questions/53841497/why-does-numpy-mixed-basic-advanced-indexing-depend-on-slice-adjacency
1048
+ state = 0
1049
+ has_contiguous_subspace = False
1050
+ for index in indices:
1051
+ if state == 0:
1052
+ if index is not None:
1053
+ state = 1
1054
+ elif state == 1:
1055
+ if index is None:
1056
+ state = 2
1057
+ else:
1058
+ if index is not None:
1059
+ break
1060
+ else:
1061
+ has_contiguous_subspace = True
1062
+
1063
+ # transposeToFront
1064
+ # This is the logic that causes the newly inserted dimensions to show up
1065
+ # at the beginning of the tensor, if they're not contiguous
1066
+ if not has_contiguous_subspace:
1067
+ dims = []
1068
+ transposed_indices = []
1069
+ for i, index in enumerate(indices):
1070
+ if index is not None:
1071
+ dims.append(i)
1072
+ transposed_indices.append(index)
1073
+ for i, index in enumerate(indices):
1074
+ if index is None:
1075
+ dims.append(i)
1076
+ transposed_indices.append(index)
1077
+ self = self.permute(dims)
1078
+ indices = transposed_indices
1079
+
1080
+ # AdvancedIndex::AdvancedIndex
1081
+ # Now we can assume the indices have contiguous subspace
1082
+ # This is simplified from AdvancedIndex which goes to more effort
1083
+ # to put the input and indices in a form so that TensorIterator can
1084
+ # take them. If we write a ref for this, probably that logic should
1085
+ # get implemented
1086
+ before_shape: List[int] = []
1087
+ after_shape: List[int] = []
1088
+ replacement_shape: List[int] = []
1089
+ for dim, index in enumerate(indices):
1090
+ if index is None:
1091
+ if replacement_shape:
1092
+ after_shape.append(self.shape[dim])
1093
+ else:
1094
+ before_shape.append(self.shape[dim])
1095
+ else:
1096
+ replacement_shape = list(index.shape)
1097
+ return self.new_empty(before_shape + replacement_shape + after_shape)
1098
+
1099
+
1100
+ @register_meta([aten.convolution_backward.default])
1101
+ def meta_convolution_backward(
1102
+ grad_output_,
1103
+ input_,
1104
+ weight_,
1105
+ bias_sizes_opt,
1106
+ stride,
1107
+ padding,
1108
+ dilation,
1109
+ transposed,
1110
+ output_padding,
1111
+ groups,
1112
+ output_mask,
1113
+ ):
1114
+ # High level logic taken from slow_conv3d_backward_cpu which should
1115
+ # be representative of all convolution_backward impls
1116
+ backend_grad_input = None
1117
+ backend_grad_weight = None
1118
+ backend_grad_bias = None
1119
+
1120
+ if output_mask[0]:
1121
+ backend_grad_input = grad_output_.new_empty(input_.size())
1122
+ if output_mask[1]:
1123
+ backend_grad_weight = grad_output_.new_empty(weight_.size())
1124
+ if output_mask[2]:
1125
+ backend_grad_bias = grad_output_.new_empty(bias_sizes_opt)
1126
+
1127
+ return (backend_grad_input, backend_grad_weight, backend_grad_bias)
1128
+
1129
+
1130
+ @register_meta([aten.addbmm.default, aten.addbmm.out])
1131
+ @out_wrapper()
1132
+ def meta_addbmm(self, batch1, batch2, *, beta=1, alpha=1):
1133
+ dim1 = batch1.size(1)
1134
+ dim2 = batch2.size(2)
1135
+ self = self.expand((dim1, dim2))
1136
+ check(batch1.dim() == 3, lambda: "batch1 must be a 3D tensor")
1137
+ check(batch2.dim() == 3, lambda: "batch2 must be a 3D tensor")
1138
+ check(
1139
+ batch1.size(0) == batch2.size(0),
1140
+ lambda: f"batch1 and batch2 must have same number of batches, got {batch1.size(0)} and {batch2.size(0)}",
1141
+ )
1142
+ check(
1143
+ batch1.size(2) == batch2.size(1),
1144
+ lambda: (
1145
+ f"Incompatible matrix sizes for bmm ({batch1.size(1)}x{batch1.size(2)} "
1146
+ f"and {batch2.size(1)}x{batch2.size(2)})"
1147
+ ),
1148
+ )
1149
+ check(
1150
+ self.size(0) == dim1 and self.size(1) == dim2,
1151
+ lambda: "self tensor does not match matmul output shape",
1152
+ )
1153
+ return self.new_empty(self.size())
1154
+
1155
+
1156
+ @register_meta(aten._cdist_forward.default)
1157
+ def meta_cdist_forward(x1, x2, p, compute_mode):
1158
+ check(
1159
+ x1.dim() >= 2,
1160
+ lambda: f"cdist only supports at least 2D tensors, X1 got: {x1.dim()}D",
1161
+ )
1162
+ check(
1163
+ x2.dim() >= 2,
1164
+ lambda: f"cdist only supports at least 2D tensors, X2 got: {x2.dim()}D",
1165
+ )
1166
+ check(
1167
+ x1.size(-1) == x2.size(-1),
1168
+ lambda: f"X1 and X2 must have the same number of columns. X1: {x1.size(-1)} X2: {x2.size(-1)}",
1169
+ )
1170
+ check(
1171
+ utils.is_float_dtype(x1.dtype),
1172
+ lambda: "cdist only supports floating-point dtypes, X1 got: {x1.dtype}",
1173
+ )
1174
+ check(
1175
+ utils.is_float_dtype(x2.dtype),
1176
+ lambda: "cdist only supports floating-point dtypes, X2 got: {x2.dtype}",
1177
+ )
1178
+ check(p >= 0, lambda: "cdist only supports non-negative p values")
1179
+ check(
1180
+ compute_mode in (None, 1, 2),
1181
+ lambda: f"possible modes: None, 1, 2, but was: {compute_mode}",
1182
+ )
1183
+ r1 = x1.size(-2)
1184
+ r2 = x2.size(-2)
1185
+ batch_tensor1 = x1.shape[:-2]
1186
+ batch_tensor2 = x2.shape[:-2]
1187
+ output_shape = list(torch.broadcast_shapes(batch_tensor1, batch_tensor2))
1188
+ output_shape.extend([r1, r2])
1189
+ return x1.new_empty(output_shape)
1190
+
1191
+
1192
+ @register_meta(aten._embedding_bag.default)
1193
+ def meta_embedding_bag(
1194
+ weight,
1195
+ indices,
1196
+ offsets,
1197
+ scale_grad_by_freq=False,
1198
+ mode=0,
1199
+ sparse=False,
1200
+ per_sample_weights=None,
1201
+ include_last_offset=False,
1202
+ padding_idx=-1,
1203
+ ):
1204
+ check(
1205
+ indices.dtype in (torch.long, torch.int),
1206
+ lambda: f"expected indices to be long or int, got {indices.dtype}",
1207
+ )
1208
+ check(
1209
+ offsets.dtype in (torch.long, torch.int),
1210
+ lambda: f"expected offsets to be long or int, got {offsets.dtype}",
1211
+ )
1212
+ check(
1213
+ utils.is_float_dtype(weight.dtype),
1214
+ lambda: f"expected weight to be floating point type, got {weight.dtype}",
1215
+ )
1216
+
1217
+ num_bags = offsets.size(0)
1218
+ if include_last_offset:
1219
+ check(
1220
+ num_bags >= 1, lambda: "include_last_offset: numBags should be at least 1"
1221
+ )
1222
+ num_bags -= 1
1223
+
1224
+ output = weight.new_empty(num_bags, weight.size(1))
1225
+ MODE_SUM, MODE_MEAN, MODE_MAX = range(3)
1226
+
1227
+ if per_sample_weights is not None:
1228
+ check(
1229
+ mode == MODE_SUM,
1230
+ lambda: "embedding_bag: per_sample_weights only supported with mode='sum'",
1231
+ )
1232
+ check(
1233
+ per_sample_weights.dtype == weight.dtype,
1234
+ lambda: f"expected weight ({weight.dtype}) and per_sample_weights ({per_sample_weights.dtype}) to have same dtype",
1235
+ )
1236
+ check(
1237
+ per_sample_weights.ndim == 1,
1238
+ lambda: f"expected per_sample_weights to be 1D tensor, got {per_sample_weights.ndim}D",
1239
+ )
1240
+ check(
1241
+ per_sample_weights.numel() == indices.numel(),
1242
+ lambda: (
1243
+ f"expected per_sample_weights.numel() ({per_sample_weights.numel()} "
1244
+ f"to be the same as indices.numel() ({indices.numel()})"
1245
+ ),
1246
+ )
1247
+
1248
+ def is_fast_path_index_select_scale(src, scale, output, padding_idx):
1249
+ return (
1250
+ is_fast_path_index_select(src, output, padding_idx) and scale.stride(0) == 1
1251
+ )
1252
+
1253
+ def is_fast_path_index_select(src, output, padding_idx):
1254
+ return (
1255
+ (src.dtype == torch.float or src.dtype == torch.half)
1256
+ and src.stride(1) == 1
1257
+ and output.stride(1) == 1
1258
+ and padding_idx < 0
1259
+ )
1260
+
1261
+ def is_fast_path(src, scale, output, padding_idx):
1262
+ if scale is not None:
1263
+ return is_fast_path_index_select_scale(src, scale, output, padding_idx)
1264
+ else:
1265
+ return is_fast_path_index_select(src, output, padding_idx)
1266
+
1267
+ if device_hint(offsets) != "cpu":
1268
+ offset2bag = indices.new_empty(indices.size(0))
1269
+ bag_size = indices.new_empty(offsets.size())
1270
+ if mode == MODE_MAX:
1271
+ max_indices = indices.new_empty(num_bags, weight.size(1))
1272
+ else:
1273
+ max_indices = indices.new_empty(0)
1274
+ else:
1275
+ fast_path_sum = is_fast_path(weight, per_sample_weights, output, padding_idx)
1276
+ if mode == MODE_MEAN or mode == MODE_MAX or not fast_path_sum:
1277
+ offset2bag = offsets.new_empty(indices.size(0))
1278
+ else:
1279
+ offset2bag = offsets.new_empty(0)
1280
+ bag_size = offsets.new_empty(num_bags)
1281
+ # This part of the logic comes from make_max_indices_out in EmbeddingBag.cpp
1282
+ numBags = offsets.shape[0]
1283
+ if mode == MODE_MAX:
1284
+ if include_last_offset:
1285
+ check(
1286
+ numBags >= 1,
1287
+ lambda: "include_last_offset: numBags should be at least 1",
1288
+ )
1289
+ numBags -= 1
1290
+ max_indices = offsets.new_empty(numBags, weight.shape[1])
1291
+ else:
1292
+ max_indices = offsets.new_empty(bag_size.size())
1293
+ return output, offset2bag, bag_size, max_indices
1294
+
1295
+
1296
+ @register_meta(aten._embedding_bag_forward_only.default)
1297
+ def meta_embedding_bag_forward_only(weight, indices, offsets, *args):
1298
+ output, offset2bag, bag_size, max_indices = meta_embedding_bag(
1299
+ weight, indices, offsets, *args
1300
+ )
1301
+ if device_hint(offsets) == "cpu":
1302
+ bag_size = offsets.new_empty(offsets.size())
1303
+ return output, offset2bag, bag_size, max_indices
1304
+
1305
+
1306
+ def _get_reduction_dtype(input, dtype, promote_int_to_long=True):
1307
+ # if specified, dtype takes precedence
1308
+ if dtype:
1309
+ return dtype
1310
+
1311
+ if input.dtype.is_floating_point or input.dtype.is_complex:
1312
+ return input.dtype
1313
+ elif promote_int_to_long:
1314
+ return torch.long
1315
+
1316
+ return input.dtype
1317
+
1318
+
1319
+ @register_meta([aten.nansum.default, aten.nansum.out])
1320
+ @out_wrapper()
1321
+ def meta_nansum(input, dims=None, keepdim=False, *, dtype=None):
1322
+ output_dtype = _get_reduction_dtype(input, dtype, promote_int_to_long=True)
1323
+ dims = utils.reduction_dims(input.shape, dims)
1324
+ output_shape = _compute_reduction_shape(input, dims, keepdim)
1325
+ return input.new_empty(output_shape, dtype=output_dtype)
1326
+
1327
+
1328
+ @register_meta(aten.nanmedian.default)
1329
+ def meta_nanmedian(input):
1330
+ output_shape = utils.compute_reduction_output_shape(
1331
+ input.shape, tuple(range(input.dim()))
1332
+ )
1333
+ return input.new_empty(output_shape)
1334
+
1335
+
1336
+ @register_meta([aten.nanmedian.dim, aten.nanmedian.dim_values])
1337
+ @out_wrapper("values", "indices")
1338
+ def meta_nanmedian_dim(input, dim=-1, keepdim=False):
1339
+ dim = utils.reduction_dims(input.shape, (dim,))
1340
+ output_shape = _compute_reduction_shape(input, dim, keepdim)
1341
+ return (
1342
+ input.new_empty(output_shape),
1343
+ input.new_empty(output_shape, dtype=torch.long),
1344
+ )
1345
+
1346
+
1347
+ @register_meta(aten.logical_not_.default)
1348
+ def meta_logical_not_(self):
1349
+ return self
1350
+
1351
+
1352
+ @register_meta(aten.repeat.default)
1353
+ def meta_repeat(self, repeats):
1354
+ check(
1355
+ len(repeats) >= self.dim(),
1356
+ lambda: "Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor",
1357
+ )
1358
+ # Add new leading dimensions to the tensor if the
1359
+ # number of target dimensions is larger than the
1360
+ # number of source dimensions.
1361
+ num_new_dimensions = len(repeats) - self.dim()
1362
+ padded_size = (1,) * num_new_dimensions + tuple(self.shape)
1363
+ target_size = [padded_size[i] * repeats[i] for i in range(len(repeats))]
1364
+ return self.new_empty(target_size)
1365
+
1366
+
1367
+ @register_meta(aten.zero_.default)
1368
+ def meta_zero_(self):
1369
+ return self
1370
+
1371
+
1372
+ @register_meta(
1373
+ [
1374
+ aten.mul_.Scalar,
1375
+ aten.div_.Scalar,
1376
+ aten.mul_.Tensor,
1377
+ aten.div_.Tensor,
1378
+ aten.logical_and_.default,
1379
+ aten.logical_or_.default,
1380
+ aten.logical_xor_.default,
1381
+ ],
1382
+ )
1383
+ def meta_binop_inplace(self, other):
1384
+ return self
1385
+
1386
+
1387
+ @register_meta(
1388
+ [
1389
+ aten.add_.Scalar,
1390
+ aten.sub_.Scalar,
1391
+ aten.add_.Tensor,
1392
+ aten.sub_.Tensor,
1393
+ ],
1394
+ )
1395
+ def meta_binop_inplace_alpha(self, other, alpha=1):
1396
+ return self
1397
+
1398
+
1399
+ @register_meta([aten.round.default, aten.round.decimals])
1400
+ def meta_round(self, **kwargs):
1401
+ return _elementwise_meta(
1402
+ self, type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT
1403
+ )
1404
+
1405
+
1406
+ @register_meta(aten.zero.default)
1407
+ def meta_zero(self):
1408
+ return self.new_empty(self.shape)
1409
+
1410
+
1411
+ @register_meta([aten.fill_.Tensor, aten.fill_.Scalar])
1412
+ def meta_fill_(self, val):
1413
+ return self
1414
+
1415
+
1416
+ @register_meta([aten.fill.Tensor, aten.fill.Scalar])
1417
+ def meta_fill(self, val):
1418
+ return torch.empty_like(self)
1419
+
1420
+
1421
+ @register_meta(aten.relu_.default)
1422
+ def meta_relu_(self):
1423
+ return self
1424
+
1425
+
1426
+ @register_meta(aten.index_put.default)
1427
+ def meta_index_put(self, indices, values, accumulate=False):
1428
+ return torch.empty_like(self)
1429
+
1430
+
1431
+ @register_meta(aten.masked_fill_.Scalar)
1432
+ def meta_masked_fill_(self, mask, value):
1433
+ return self
1434
+
1435
+
1436
+ @register_meta(aten.index_put_.default)
1437
+ def meta_index_put_(self, indices, values, accumulate=False):
1438
+ return self
1439
+
1440
+
1441
+ @register_meta(aten.alias.default)
1442
+ def meta_alias(self):
1443
+ return self.view(self.shape)
1444
+
1445
+
1446
+ def common_meta_baddbmm_bmm(batch1, batch2, is_bmm, self_baddbmm=None):
1447
+ check(batch1.dim() == 3, lambda: "batch1 must be a 3D tensor")
1448
+ check(batch2.dim() == 3, lambda: "batch2 must be a 3D tensor")
1449
+
1450
+ batch1_sizes = batch1.size()
1451
+ batch2_sizes = batch2.size()
1452
+
1453
+ bs = batch1_sizes[0]
1454
+ contraction_size = batch1_sizes[2]
1455
+ res_rows = batch1_sizes[1]
1456
+ res_cols = batch2_sizes[2]
1457
+ output_size = (bs, res_rows, res_cols)
1458
+
1459
+ check(
1460
+ batch2_sizes[0] == bs and batch2_sizes[1] == contraction_size,
1461
+ lambda: f"Expected size for first two dimensions of batch2 tensor to be: [{bs}"
1462
+ f", {contraction_size}] but got: [{batch2_sizes[0]}, {batch2_sizes[1]}].",
1463
+ )
1464
+
1465
+ # TODO: handle out
1466
+
1467
+ output = batch2.new_empty(output_size)
1468
+
1469
+ if not is_bmm and self_baddbmm is not None:
1470
+ check(self_baddbmm.dim() == 3, lambda: "self must be a 3D tensor")
1471
+ check(
1472
+ self_baddbmm.size() == output_size,
1473
+ lambda: "Expected an input tensor shape with shape {output_size} but got shape: {self.size()}",
1474
+ )
1475
+
1476
+ return output
1477
+
1478
+
1479
+ @register_meta(aten.bmm.default)
1480
+ def meta_bmm(self, mat2):
1481
+ return common_meta_baddbmm_bmm(self, mat2, True)
1482
+
1483
+
1484
+ def div_rtn(x, y):
1485
+ q = x // y
1486
+ r = x % y
1487
+ # WARNING: explicit bool conversion here is necessary;
1488
+ # would be fixed by SymBool
1489
+ if r != 0 and (bool(r < 0) != bool(y < 0)):
1490
+ q -= 1
1491
+ return q
1492
+
1493
+
1494
+ def pooling_output_shape_pad_lr(
1495
+ inputSize, kernelSize, pad_l, pad_r, stride, dilation, ceil_mode
1496
+ ):
1497
+ outputSize = (
1498
+ div_rtn(
1499
+ inputSize
1500
+ + pad_l
1501
+ + pad_r
1502
+ - dilation * (kernelSize - 1)
1503
+ - 1
1504
+ + (stride - 1 if ceil_mode else 0),
1505
+ stride,
1506
+ )
1507
+ + 1
1508
+ )
1509
+ if ceil_mode:
1510
+ if (outputSize - 1) * stride >= inputSize + pad_l:
1511
+ outputSize -= 1
1512
+ return outputSize
1513
+
1514
+
1515
+ def pooling_output_shape(inputSize, kernelSize, pad, stride, dilation, ceil_mode):
1516
+ check(stride != 0, lambda: "stride should not be zero")
1517
+ check(pad >= 0, lambda: f"pad must be non-negative, but got pad: {pad}")
1518
+ check(
1519
+ pad <= kernelSize // 2,
1520
+ lambda: f"pad should be at most half of kernel size, but got pad={pad} and kernel_size={kernelSize}",
1521
+ )
1522
+ return pooling_output_shape_pad_lr(
1523
+ inputSize, kernelSize, pad, pad, stride, dilation, ceil_mode
1524
+ )
1525
+
1526
+
1527
+ def pool2d_shape_check(
1528
+ input,
1529
+ kH,
1530
+ kW,
1531
+ dH,
1532
+ dW,
1533
+ padH,
1534
+ padW,
1535
+ dilationH,
1536
+ dilationW,
1537
+ nInputPlane,
1538
+ inputHeight,
1539
+ inputWidth,
1540
+ outputHeight,
1541
+ outputWidth,
1542
+ memory_format,
1543
+ ):
1544
+ ndim = input.dim()
1545
+ nOutputPlane = nInputPlane
1546
+
1547
+ check(
1548
+ kW > 0 and kH > 0,
1549
+ lambda: "kernel size should be greater than zero, but got kH: {kH}, kW: {kW}",
1550
+ )
1551
+ check(
1552
+ dW > 0 and dH > 0,
1553
+ lambda: "stride should be greater than zero, but got dH: {dH}, dW: {dW}",
1554
+ )
1555
+ check(
1556
+ dilationH > 0 and dilationW > 0,
1557
+ lambda: "dilation should be greater than zero, but got dilationH: {dilationH}, dilationW: {dilationW}",
1558
+ )
1559
+
1560
+ valid_dims = input.size(1) != 0 and input.size(2) != 0
1561
+
1562
+ if memory_format == torch.channels_last:
1563
+ check(
1564
+ ndim == 4 and valid_dims and input.size(3) != 0,
1565
+ lambda: "Expected 4D (batch mode) tensor expected for input with channels_last layout"
1566
+ " with optional 0 dim batch size for input, but got: {input.size()}",
1567
+ )
1568
+ else:
1569
+ check(
1570
+ (ndim == 3 and input.size(0) != 0 and valid_dims)
1571
+ or (ndim == 4 and valid_dims and input.size(3) != 0),
1572
+ lambda: f"Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input, but got: {input.size()}",
1573
+ )
1574
+
1575
+ check(
1576
+ kW // 2 >= padW and kH // 2 >= padH,
1577
+ lambda: "pad should be smaller than or equal to half of kernel size, but got "
1578
+ f"padW = {padW}, padH = {padH}, kW = {kW}, kH = {kH}",
1579
+ )
1580
+
1581
+ check(
1582
+ outputWidth >= 1 and outputHeight >= 1,
1583
+ lambda: f"Given input size: ({nInputPlane}x{inputHeight}x{inputWidth}). "
1584
+ f"Calculated output size: ({nOutputPlane}x{outputHeight}x{outputWidth}). "
1585
+ "Output size is too small",
1586
+ )
1587
+
1588
+
1589
+ def max_pool2d_checks_and_compute_shape(
1590
+ input, kernel_size, stride, padding, dilation, ceil_mode
1591
+ ):
1592
+ # Reference: aten/src/ATen/native/DilatedMaxPool2d.cpp
1593
+ def unpack(name, val):
1594
+ check(
1595
+ len(val) in [1, 2],
1596
+ lambda: f"max_pool2d: {name} must either be a single int, or a tuple of two ints",
1597
+ )
1598
+ H = val[0]
1599
+ W = H if len(val) == 1 else val[1]
1600
+ return H, W
1601
+
1602
+ kH, kW = unpack("kernel_size", kernel_size)
1603
+
1604
+ check(
1605
+ len(stride) in [0, 1, 2],
1606
+ lambda: "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints",
1607
+ )
1608
+ if len(stride) == 0:
1609
+ dH, dW = kH, kW
1610
+ else:
1611
+ dH, dW = unpack("stride", stride)
1612
+
1613
+ padH, padW = unpack("padding", padding)
1614
+ dilationH, dilationW = unpack("dilation", dilation)
1615
+ nInputPlane = input.size(-3)
1616
+ inputHeight = input.size(-2)
1617
+ inputWidth = input.size(-1)
1618
+
1619
+ memory_format = utils.suggest_memory_format(input)
1620
+ if memory_format == torch.channels_last:
1621
+ check(
1622
+ input.dim() == 4,
1623
+ lambda: "non-empty 4D (batch mode) tensor expected for input with channels_last layout",
1624
+ )
1625
+ elif memory_format == torch.contiguous_format:
1626
+ check(
1627
+ input.dim() in [3, 4],
1628
+ lambda: "non-empty 3D or 4D (batch mode) tensor expected for input",
1629
+ )
1630
+ else:
1631
+ check(
1632
+ False,
1633
+ lambda: "Unsupport memory format. Supports only ChannelsLast, Contiguous",
1634
+ )
1635
+
1636
+ outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, dilationH, ceil_mode)
1637
+ outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, dilationW, ceil_mode)
1638
+
1639
+ pool2d_shape_check(
1640
+ input,
1641
+ kH,
1642
+ kW,
1643
+ dH,
1644
+ dW,
1645
+ padH,
1646
+ padW,
1647
+ dilationH,
1648
+ dilationW,
1649
+ nInputPlane,
1650
+ inputHeight,
1651
+ inputWidth,
1652
+ outputHeight,
1653
+ outputWidth,
1654
+ memory_format,
1655
+ )
1656
+
1657
+ return nInputPlane, outputHeight, outputWidth
1658
+
1659
+
1660
+ @register_meta(aten.max_pool2d_with_indices_backward.default)
1661
+ def meta_max_pool2d_with_indices_backward(
1662
+ grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices
1663
+ ):
1664
+ nInputPlane, outputHeight, outputWidth = max_pool2d_checks_and_compute_shape(
1665
+ self, kernel_size, stride, padding, dilation, ceil_mode
1666
+ )
1667
+
1668
+ check(
1669
+ self.dtype == grad_output.dtype,
1670
+ lambda: "expected dtype {self.dtype} for `gradOutput` but got dtype {grad_output.dtype}",
1671
+ )
1672
+
1673
+ nOutputPlane = nInputPlane
1674
+ ndim = self.ndim
1675
+
1676
+ def _check_dim_size(t):
1677
+ check_dim_size(t, ndim, ndim - 3, nOutputPlane)
1678
+ check_dim_size(t, ndim, ndim - 2, outputHeight)
1679
+ check_dim_size(t, ndim, ndim - 1, outputWidth)
1680
+
1681
+ _check_dim_size(grad_output)
1682
+ _check_dim_size(indices)
1683
+
1684
+ memory_format = utils.suggest_memory_format(self)
1685
+ return torch.empty(
1686
+ self.shape, dtype=self.dtype, device=self.device, memory_format=memory_format
1687
+ )
1688
+
1689
+
1690
+ @register_meta(aten.max_pool2d_with_indices.default)
1691
+ def meta_max_pool2d_with_indices(
1692
+ input, kernel_size, stride=(), padding=(0,), dilation=(1,), ceil_mode=False
1693
+ ):
1694
+ nInputPlane, outputHeight, outputWidth = max_pool2d_checks_and_compute_shape(
1695
+ input, kernel_size, stride, padding, dilation, ceil_mode
1696
+ )
1697
+
1698
+ nbatch = input.size(-4) if input.dim() == 4 else 1
1699
+ memory_format = utils.suggest_memory_format(input)
1700
+ if input.dim() == 3:
1701
+ size = [nInputPlane, outputHeight, outputWidth]
1702
+ else:
1703
+ size = [nbatch, nInputPlane, outputHeight, outputWidth]
1704
+ return (
1705
+ torch.empty(
1706
+ size, dtype=input.dtype, device=input.device, memory_format=memory_format
1707
+ ),
1708
+ torch.empty(
1709
+ size, dtype=torch.int64, device=input.device, memory_format=memory_format
1710
+ ),
1711
+ )
1712
+
1713
+
1714
+ @register_meta(aten.grid_sampler_2d_backward.default)
1715
+ def grid_sampler_2d_backward_meta(
1716
+ grad_output,
1717
+ input,
1718
+ grid,
1719
+ interpolation_mode,
1720
+ padding_mode,
1721
+ align_corners,
1722
+ output_mask,
1723
+ ):
1724
+ input_requires_grad = output_mask[0]
1725
+ if input_requires_grad:
1726
+ grad_input = torch.zeros_like(input, memory_format=torch.contiguous_format)
1727
+ else:
1728
+ grad_input = None
1729
+ grad_grid = torch.empty_like(grid, memory_format=torch.contiguous_format)
1730
+ return (grad_input, grad_grid)
1731
+
1732
+
1733
+ @register_meta([aten.full.default])
1734
+ def full(size, fill_value, *args, **kwargs):
1735
+ return torch.empty(size, *args, **kwargs)
1736
+
1737
+
1738
+ @register_meta(
1739
+ [
1740
+ aten.randint_like.default,
1741
+ aten.randint_like.low_dtype,
1742
+ aten.randn_like.default,
1743
+ aten.rand_like.default,
1744
+ aten.full_like.default,
1745
+ aten.ones_like.default,
1746
+ ]
1747
+ )
1748
+ def meta_like(self, *args, **kwargs):
1749
+ return aten.empty_like.default(self, **kwargs)
1750
+
1751
+
1752
+ # zeros_like is special cased to work for sparse
1753
+ @register_meta(aten.zeros_like.default)
1754
+ def zeros_like(
1755
+ self, dtype=None, layout=None, device=None, pin_memory=None, memory_format=None
1756
+ ):
1757
+ if layout == torch.sparse_coo:
1758
+ check(
1759
+ memory_format is None,
1760
+ lambda: "memory format option is only supported by strided tensors",
1761
+ )
1762
+
1763
+ res = torch.empty(
1764
+ 0,
1765
+ dtype=self.dtype if dtype is None else dtype,
1766
+ layout=layout,
1767
+ device=self.device if device is None else device,
1768
+ pin_memory=pin_memory,
1769
+ )
1770
+
1771
+ if self.is_sparse:
1772
+ res.sparse_resize_and_clear_(
1773
+ self.size(), self.sparse_dim(), self.dense_dim()
1774
+ )
1775
+ else:
1776
+ res.sparse_resize_and_clear_(self.size(), self.dim(), 0)
1777
+
1778
+ res._coalesced_(True)
1779
+ return res
1780
+ return aten.empty_like.default(
1781
+ self,
1782
+ dtype=dtype,
1783
+ layout=layout,
1784
+ device=device,
1785
+ pin_memory=pin_memory,
1786
+ memory_format=memory_format,
1787
+ )
1788
+
1789
+
1790
+ @register_meta(aten.select.int)
1791
+ def meta_select(self, dim, index):
1792
+ ndim = self.dim()
1793
+ check(
1794
+ ndim != 0, lambda: "select() cannot be applied to a 0-dim tensor.", IndexError
1795
+ )
1796
+
1797
+ dim = dim if dim >= 0 else dim + ndim
1798
+ size = self.size(dim)
1799
+
1800
+ check(
1801
+ not (-index > size or index >= size),
1802
+ lambda: f"select(): index {index} out of range for tensor of size "
1803
+ f"{self.size()} at dimension {dim}",
1804
+ IndexError,
1805
+ )
1806
+
1807
+ index = index if index >= 0 else index + size
1808
+
1809
+ new_size = list(self.size())
1810
+ new_stride = list(self.stride())
1811
+
1812
+ new_storage_offset = self.storage_offset() + index * new_stride[dim]
1813
+ del new_size[dim]
1814
+ del new_stride[dim]
1815
+
1816
+ return self.as_strided(new_size, new_stride, new_storage_offset)
1817
+
1818
+
1819
+ @register_meta(aten.select_scatter.default)
1820
+ def meta_select_scatter(self, src, dim, index):
1821
+ return utils.clone_preserve_strides(self)
1822
+
1823
+
1824
+ @register_meta(aten.slice_scatter.default)
1825
+ def meta_slice_scatter(self, src, dim=0, start=None, end=None, step=1):
1826
+ return utils.clone_preserve_strides(self)
1827
+
1828
+
1829
+ # TODO: Deduplicate this with canonicalize_dim
1830
+ def maybe_wrap_dim(dim: int, dim_post_expr: int, wrap_scalar: bool = True):
1831
+ if dim_post_expr <= 0:
1832
+ assert wrap_scalar
1833
+ dim_post_expr = 1
1834
+ min = -dim_post_expr
1835
+ max = dim_post_expr - 1
1836
+ assert not (dim < min or dim > max), f"dim {dim} out of bounds ({min}, {max})"
1837
+ if dim < 0:
1838
+ dim += dim_post_expr
1839
+ return dim
1840
+
1841
+
1842
+ def ensure_nonempty_size(t, dim):
1843
+ return 1 if t.dim() == 0 else t.shape[dim]
1844
+
1845
+
1846
+ # From aten/src/ATen/native/ScatterGatherChecks.h
1847
+ def gather_shape_check(self, dim, index):
1848
+ self_dims = max(self.dim(), 1)
1849
+ index_dims = max(index.dim(), 1)
1850
+ check(
1851
+ self_dims == index_dims,
1852
+ lambda: "Index tensor must have the same number of dimensions as input tensor",
1853
+ )
1854
+ for i in range(self_dims):
1855
+ if i != dim:
1856
+ check(
1857
+ ensure_nonempty_size(index, i) <= ensure_nonempty_size(self, i),
1858
+ lambda: f"Size does not match at dimension {i} expected index {index.shape}"
1859
+ + f" to be smaller than self {self.shape} apart from dimension {dim}",
1860
+ )
1861
+
1862
+
1863
+ @register_meta(aten.gather.default)
1864
+ def meta_gather(self, dim, index, sparse_grad=False):
1865
+ wrapped_dim = maybe_wrap_dim(dim, self.dim())
1866
+ is_index_empty = index.numel() == 0
1867
+ if not is_index_empty:
1868
+ check(
1869
+ index.dtype == torch.long,
1870
+ lambda: f"gather(): Expected dtype int64 for index, but got {index.dtype}",
1871
+ )
1872
+ gather_shape_check(self, wrapped_dim, index)
1873
+ return self.new_empty(index.shape)
1874
+
1875
+
1876
+ # From aten/src/ATen/native/TensorAdvancedIndexing.cpp
1877
+ def get_operator_enum(reduce_, use_new_options=False):
1878
+ if use_new_options:
1879
+ if reduce_ == "sum":
1880
+ return "REDUCE_ADD"
1881
+ elif reduce_ == "prod":
1882
+ return "REDUCE_MULTIPLY"
1883
+ elif reduce_ == "mean":
1884
+ return "REDUCE_MEAN"
1885
+ elif reduce_ == "amax":
1886
+ return "REDUCE_MAXIMUM"
1887
+ elif reduce_ == "amin":
1888
+ return "REDUCE_MINIMUM"
1889
+ check(
1890
+ False,
1891
+ lambda: "reduce argument must be either sum, prod, mean, amax or amin.",
1892
+ )
1893
+ return
1894
+ else:
1895
+ if reduce_ == "add":
1896
+ return "REDUCE_ADD"
1897
+ elif reduce_ == "multiply":
1898
+ return "REDUCE_MULTIPLY"
1899
+ check(False, lambda: "reduce argument must be either add or multiply.")
1900
+ return
1901
+
1902
+
1903
+ # From aten/src/ATen/native/ScatterGatherChecks.h
1904
+ def scatter_gather_dtype_check(method_name, self, index, src_opt=None):
1905
+ if index.numel() != 0:
1906
+ check(
1907
+ index.dtype == torch.long,
1908
+ lambda: f"{method_name}(): Expected dtype int64 for index",
1909
+ )
1910
+
1911
+ if src_opt is not None:
1912
+ check(
1913
+ self.dtype == src_opt.dtype,
1914
+ lambda: f"{method_name}(): Expected self.dtype to be equal to src.dtype",
1915
+ )
1916
+
1917
+
1918
+ def ensure_nonempty_dim(dim):
1919
+ return max(dim, 1)
1920
+
1921
+
1922
+ # From aten/src/ATen/native/ScatterGatherChecks.h
1923
+ def scatter_shape_check(self, dim, index, src_opt=None):
1924
+ if index.numel() == 0:
1925
+ return
1926
+ check(
1927
+ ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()),
1928
+ lambda: "Index tensor must have the same number of dimensions as self tensor",
1929
+ )
1930
+
1931
+ is_wrong_shape = False
1932
+ self_dims = ensure_nonempty_dim(self.dim())
1933
+
1934
+ # Check: index.size(d) <= self.size(d) for all d != dim
1935
+ for d in range(self_dims):
1936
+ index_d_size = ensure_nonempty_size(index, d)
1937
+ if d == dim:
1938
+ continue
1939
+ if index_d_size > ensure_nonempty_size(self, d):
1940
+ is_wrong_shape = True
1941
+ break
1942
+
1943
+ # Check: index.size(d) <= src.size(d) for all d if src is Tensor
1944
+ if not is_wrong_shape and src_opt is not None:
1945
+ for d in range(self_dims):
1946
+ index_d_size = ensure_nonempty_size(index, d)
1947
+ if index_d_size > ensure_nonempty_size(src_opt, d):
1948
+ is_wrong_shape = True
1949
+ break
1950
+
1951
+ if src_opt is not None:
1952
+ check(
1953
+ ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()),
1954
+ lambda: "Index tensor must have the same number of dimensions as self tensor",
1955
+ )
1956
+ check(
1957
+ not is_wrong_shape,
1958
+ lambda: f"Expected index {index.shape} to be smaller than self {self.shape}"
1959
+ + f" apart from dimension {dim} and to be smaller than src {src_opt.shape}",
1960
+ )
1961
+ else:
1962
+ check(
1963
+ not is_wrong_shape,
1964
+ lambda: f"Expected index {index.shape} to be smaller than self {self.shape}"
1965
+ + f" apart from dimension {dim}",
1966
+ )
1967
+
1968
+
1969
+ # From aten/src/ATen/native/TensorAdvancedIndexing.cpp
1970
+ def scatter_meta_impl(self, dim, index, src=None, reduce_=None, use_new_options=False):
1971
+ wrapped_dim = maybe_wrap_dim(dim, self.dim())
1972
+ scatter_gather_dtype_check("scatter", self, index, src)
1973
+ scatter_shape_check(self, wrapped_dim, index, src)
1974
+ if reduce_ is not None:
1975
+ # Check if we have a valid reduce operator.
1976
+ get_operator_enum(reduce_, use_new_options)
1977
+
1978
+
1979
+ @register_meta(aten.scatter_add.default)
1980
+ def meta_scatter_add(self, dim, index, src):
1981
+ scatter_meta_impl(self, dim, index, src, "add")
1982
+ return self.new_empty(self.shape)
1983
+
1984
+
1985
+ @register_meta(aten.scatter_add_)
1986
+ def meta_scatter_add_(self, dim, index, src):
1987
+ scatter_meta_impl(self, dim, index, src, "add")
1988
+ return self
1989
+
1990
+
1991
+ @register_meta(
1992
+ [
1993
+ aten.scatter.src,
1994
+ aten.scatter.value,
1995
+ aten.scatter.reduce,
1996
+ aten.scatter.value_reduce,
1997
+ ]
1998
+ )
1999
+ @out_wrapper()
2000
+ def meta_scatter(self, dim, index, src_or_value, reduce=None):
2001
+ src = src_or_value if isinstance(src_or_value, torch.Tensor) else None
2002
+ scatter_meta_impl(self, dim, index, src, reduce)
2003
+ return self.new_empty(self.shape)
2004
+
2005
+
2006
+ @register_meta(
2007
+ [
2008
+ aten.scatter_.src,
2009
+ aten.scatter_.value,
2010
+ aten.scatter_.reduce,
2011
+ aten.scatter_.value_reduce,
2012
+ ]
2013
+ )
2014
+ def meta_scatter_(self, dim, index, src_or_value, reduce=None):
2015
+ src = src_or_value if isinstance(src_or_value, torch.Tensor) else None
2016
+ scatter_meta_impl(self, dim, index, src, reduce)
2017
+ return self
2018
+
2019
+
2020
+ @register_meta(
2021
+ [
2022
+ aten._scaled_dot_product_flash_attention,
2023
+ ]
2024
+ )
2025
+ def meta__scaled_dot_product_flash(
2026
+ query: Tensor,
2027
+ key: Tensor,
2028
+ value: Tensor,
2029
+ dropout_p: float = 0.0,
2030
+ is_causal: bool = False,
2031
+ return_debug_mask: bool = False,
2032
+ ):
2033
+ # [Note] SDPA_flash's meta function returns incorrect Philox seed and offset:
2034
+ # We have added logic to torch/_dynamo/variables/torch.py
2035
+ # We need to check if scaled_dot_product_attention will run the flash attention
2036
+ # kernel and if dropout is != 0.0. If that is the case then we want dynamo
2037
+ # to graph break. The derivative calculation for _scaled_dot_product_flash_attention
2038
+ # does not function correctly with cuda graphs because the full philox state is not captured
2039
+ # the forward's return values. Another reason to graph break is that the the meta function
2040
+ # returns the wrong outputs for philox seed and offset and these values get baked into the
2041
+ # inductor fallback calls to the eager kernels.
2042
+ check(
2043
+ dropout_p == 0.0,
2044
+ lambda: f"Can only trace _scaled_dot_product_flash_attention when dropout is set to 0 but got a dropout_p of {dropout_p}.",
2045
+ )
2046
+ batch_size = query.size(0)
2047
+ num_heads = query.size(1)
2048
+ max_seqlen_batch_q = query.size(2)
2049
+ head_dim = query.size(3)
2050
+
2051
+ max_seqlen_batch_k = key.size(2)
2052
+
2053
+ query = query.transpose(1, 2)
2054
+ key = key.transpose(1, 2)
2055
+ value = value.transpose(1, 2)
2056
+
2057
+ Nnz_q = batch_size * max_seqlen_batch_q
2058
+
2059
+ output = torch.empty(
2060
+ (Nnz_q, num_heads, head_dim), dtype=query.dtype, device=query.device
2061
+ )
2062
+ output = output.view(batch_size, max_seqlen_batch_q, num_heads, head_dim).transpose(
2063
+ 1, 2
2064
+ )
2065
+ max_seqlen_q = math.ceil(max_seqlen_batch_q / 16) * 16
2066
+ logsumexp = torch.empty(
2067
+ (batch_size, num_heads, max_seqlen_q),
2068
+ dtype=torch.float,
2069
+ device=query.device,
2070
+ )
2071
+ cumulative_sequence_length_q = torch.empty(
2072
+ batch_size + 1, dtype=torch.int32, device="meta"
2073
+ )
2074
+ cumulative_sequence_length_k = torch.empty(
2075
+ batch_size + 1, dtype=torch.int32, device="meta"
2076
+ )
2077
+
2078
+ if return_debug_mask:
2079
+ blocksize_c = 128 if head_dim > 64 else 256
2080
+ max_seqlen_k = math.ceil(max_seqlen_batch_q / blocksize_c)
2081
+ if max_seqlen_batch_k <= 128:
2082
+ max_seqlen_k = 128
2083
+ elif max_seqlen_batch_k <= 256:
2084
+ max_seqlen_k = 256
2085
+ debug_mask = torch.empty(
2086
+ (batch_size, num_heads, max_seqlen_q, max_seqlen_k),
2087
+ dtype=query.dtype,
2088
+ device=query.device,
2089
+ )
2090
+ else:
2091
+ debug_mask = torch.empty(0, dtype=query.dtype, device=query.device)
2092
+
2093
+ return (
2094
+ output,
2095
+ logsumexp,
2096
+ cumulative_sequence_length_q,
2097
+ cumulative_sequence_length_k,
2098
+ max_seqlen_batch_q,
2099
+ max_seqlen_batch_k,
2100
+ 1, # Philox Seed will not be used, see note at top.
2101
+ 1, # Philox Offset will not be used, see note at top.
2102
+ debug_mask,
2103
+ )
2104
+
2105
+
2106
+ @register_meta(
2107
+ [
2108
+ aten._scaled_dot_product_flash_attention_backward,
2109
+ ]
2110
+ )
2111
+ def meta__scaled_dot_product_flash_backward(
2112
+ grad_out: Tensor,
2113
+ query: Tensor,
2114
+ key: Tensor,
2115
+ value: Tensor,
2116
+ out: Tensor,
2117
+ logsumexp: Tensor,
2118
+ cum_seq_q: Tensor,
2119
+ cum_seq_k: Tensor,
2120
+ max_q: int,
2121
+ max_k: int,
2122
+ dropout_p: float,
2123
+ is_causal: bool,
2124
+ philox_seed: int,
2125
+ philox_offset: int,
2126
+ ):
2127
+ batch_size = query.size(0)
2128
+ num_heads = query.size(1)
2129
+ head_dim = query.size(3)
2130
+
2131
+ Nnz_q = batch_size * max_q
2132
+ Nnz_kv = batch_size * max_k
2133
+
2134
+ query = query.transpose(1, 2)
2135
+ key = key.transpose(1, 2)
2136
+ value = value.transpose(1, 2)
2137
+
2138
+ query_reshaped = query.reshape(Nnz_q, num_heads, head_dim)
2139
+ key_reshaped = key.reshape(Nnz_kv, num_heads, head_dim)
2140
+ value_reshaped = value.reshape(Nnz_kv, num_heads, head_dim)
2141
+
2142
+ grad_q = torch.empty_like(query_reshaped)
2143
+ grad_k = torch.empty_like(key_reshaped)
2144
+ grad_v = torch.empty_like(value_reshaped)
2145
+
2146
+ grad_q = grad_q.view(batch_size, max_q, num_heads, head_dim).transpose(1, 2)
2147
+ grad_k = grad_k.view(batch_size, max_k, num_heads, head_dim).transpose(1, 2)
2148
+ grad_v = grad_v.view(batch_size, max_k, num_heads, head_dim).transpose(1, 2)
2149
+
2150
+ return grad_q, grad_k, grad_v
2151
+
2152
+
2153
+ @register_meta(
2154
+ [
2155
+ aten._scaled_dot_product_efficient_attention,
2156
+ ]
2157
+ )
2158
+ def meta__scaled_dot_product_efficient(
2159
+ query: Tensor,
2160
+ key: Tensor,
2161
+ value: Tensor,
2162
+ compute_log_sumexp: bool,
2163
+ is_causal: bool = False,
2164
+ ):
2165
+ query = query.transpose(1, 2)
2166
+ key = key.transpose(1, 2)
2167
+ value = value.transpose(1, 2)
2168
+
2169
+ B = query.size(0)
2170
+ M = query.size(1)
2171
+ N = key.size(1)
2172
+ num_heads = query.size(-2)
2173
+ K = query.size(-1)
2174
+ Kv = value.size(-1)
2175
+
2176
+ res = torch.empty(B, M, num_heads, Kv, dtype=query.dtype, device=query.device)
2177
+
2178
+ logsumexp_dim = math.ceil(M / 32) * 32 if compute_log_sumexp else 0
2179
+ logsum_exp = torch.empty(
2180
+ (B, num_heads, logsumexp_dim),
2181
+ dtype=torch.float,
2182
+ device=query.device,
2183
+ )
2184
+
2185
+ res = res.transpose(1, 2)
2186
+
2187
+ return res, logsum_exp
2188
+
2189
+
2190
+ @register_meta(
2191
+ [
2192
+ aten._scaled_dot_product_efficient_attention_backward,
2193
+ ]
2194
+ )
2195
+ def meta__scaled_dot_product_efficient_backward(
2196
+ grad_out: Tensor,
2197
+ query: Tensor,
2198
+ key: Tensor,
2199
+ value: Tensor,
2200
+ out: Tensor,
2201
+ logsumexp: Tensor,
2202
+ is_causal: bool = False,
2203
+ chunk_grad_outputs=False,
2204
+ ):
2205
+ grad_out = grad_out.transpose(1, 2)
2206
+ query = query.transpose(1, 2)
2207
+ key = key.transpose(1, 2)
2208
+ value = value.transpose(1, 2)
2209
+
2210
+ B = query.size(0)
2211
+ M = query.size(1)
2212
+ N = key.size(1)
2213
+ nH = query.size(2)
2214
+ K = query.size(3)
2215
+
2216
+ grad_kv_needs_init = is_causal and N > M
2217
+
2218
+ if chunk_grad_outputs:
2219
+ chunk = torch.empty((B, M, 3, nH, K), dtype=query.dtype, device=query.device)
2220
+ grad_q = chunk.select(2, 0)
2221
+ grad_k = chunk.select(2, 1)
2222
+ grad_v = chunk.select(2, 2)
2223
+ else:
2224
+ grad_q = torch.empty(query.shape, dtype=query.dtype, device=query.device)
2225
+ grad_k = (
2226
+ torch.zeros(key.shape, dtype=key.dtype, device=key.device)
2227
+ if grad_kv_needs_init
2228
+ else torch.empty(key.shape, dtype=key.dtype, device=key.device)
2229
+ )
2230
+ grad_v = (
2231
+ torch.zeros(value.shape, dtype=value.dtype, device=value.device)
2232
+ if grad_kv_needs_init
2233
+ else torch.empty(value.shape, dtype=value.dtype, device=value.device)
2234
+ )
2235
+ return grad_q.transpose(1, 2), grad_k.transpose(1, 2), grad_v.transpose(1, 2)
2236
+
2237
+
2238
+ @register_meta([aten.scatter_reduce.two, aten.scatter_reduce.two_out])
2239
+ @out_wrapper()
2240
+ def meta_scatter_reduce_two(self, dim, index, src, reduce, include_self=True):
2241
+ scatter_meta_impl(self, dim, index, src, reduce, use_new_options=True)
2242
+ return self.new_empty(self.shape)
2243
+
2244
+
2245
+ @register_meta(aten.scatter_reduce_.two)
2246
+ def meta_scatter_reduce__two(self, dim, index, src, reduce, include_self=True):
2247
+ scatter_meta_impl(self, dim, index, src, reduce, use_new_options=True)
2248
+ return self
2249
+
2250
+
2251
+ def multiply_integers(vs):
2252
+ r = 1
2253
+ for v in vs:
2254
+ r *= v
2255
+ return r
2256
+
2257
+
2258
+ def upsample_common_check(input_size, output_size, num_spatial_dims):
2259
+ check(
2260
+ len(output_size) == num_spatial_dims,
2261
+ lambda: f"It is expected output_size equals to {num_spatial_dims}, but got size {len(output_size)}",
2262
+ )
2263
+ expected_input_dims = num_spatial_dims + 2 # N, C, ...
2264
+ check(
2265
+ len(input_size) == expected_input_dims,
2266
+ lambda: f"It is expected input_size equals to {expected_input_dims}, but got size {len(input_size)}",
2267
+ )
2268
+
2269
+ check(
2270
+ all([s > 0 for s in input_size[2:]]) and all([s > 0 for s in output_size]),
2271
+ lambda: f"Input and output sizes should be greater than 0, but got "
2272
+ f"input size {input_size} and output size {output_size}",
2273
+ )
2274
+
2275
+ nbatch, channels = input_size[:2]
2276
+ return (nbatch, channels, *output_size)
2277
+
2278
+
2279
+ @register_meta(aten.upsample_nearest1d.default)
2280
+ def upsample_nearest1d(input, output_size, scales=None):
2281
+ check(
2282
+ input.numel() != 0 or multiply_integers(input.size()[1:]),
2283
+ lambda: "Non-empty 3D data tensor expected but got a tensor with sizes {input.size()}",
2284
+ )
2285
+ full_output_size = upsample_common_check(
2286
+ input.size(), output_size, num_spatial_dims=1
2287
+ )
2288
+ return input.new_empty(full_output_size).to(
2289
+ memory_format=utils.suggest_memory_format(input)
2290
+ )
2291
+
2292
+
2293
+ @register_meta(aten.upsample_nearest2d.default)
2294
+ def upsample_nearest2d(input, output_size, scales_h=None, scales_w=None):
2295
+ check(
2296
+ input.numel() != 0 or multiply_integers(input.size()[1:]),
2297
+ lambda: "Non-empty 4D data tensor expected but got a tensor with sizes {input.size()}",
2298
+ )
2299
+ full_output_size = upsample_common_check(
2300
+ input.size(), output_size, num_spatial_dims=2
2301
+ )
2302
+ output = input.new_empty(full_output_size)
2303
+
2304
+ # convert output to correct memory format, if necessary
2305
+ memory_format = utils.suggest_memory_format(input)
2306
+
2307
+ # following "heuristic: only use channels_last path when it's faster than the contiguous path"
2308
+ _, n_channels, _, _ = input.shape
2309
+ if input.device.type == "cuda" and n_channels < 4:
2310
+ memory_format = torch.contiguous_format
2311
+
2312
+ output = output.contiguous(memory_format=memory_format)
2313
+
2314
+ return output
2315
+
2316
+
2317
+ @register_meta(aten.upsample_nearest3d.default)
2318
+ def upsample_nearest3d(input, output_size, scales_d=None, scales_h=None, scales_w=None):
2319
+ check(
2320
+ input.numel() != 0 or multiply_integers(input.size()[1:]),
2321
+ lambda: "Non-empty 5D data tensor expected but got a tensor with sizes {input.size()}",
2322
+ )
2323
+ full_output_size = upsample_common_check(
2324
+ input.size(), output_size, num_spatial_dims=3
2325
+ )
2326
+ return input.new_empty(full_output_size).to(
2327
+ memory_format=utils.suggest_memory_format(input)
2328
+ )
2329
+
2330
+
2331
+ @register_meta([aten.sort.default, aten.sort.stable])
2332
+ def meta_sort(self, stable=None, dim=-1, descending=False):
2333
+ return torch.empty_like(self), torch.empty_like(self, dtype=torch.int64)
2334
+
2335
+
2336
+ def rnn_cell_checkSizes(
2337
+ input_gates, hidden_gates, input_bias, hidden_bias, factor, prev_hidden
2338
+ ):
2339
+ check(input_gates.ndim == 2, lambda: f"{input_gates.ndim} != 2")
2340
+ check(
2341
+ input_gates.shape == hidden_gates.shape,
2342
+ lambda: f"{input_gates.shape} != {hidden_gates.shape}",
2343
+ )
2344
+ gates_size = input_gates.size(1)
2345
+ if input_bias is not None:
2346
+ check(input_bias.ndim == 1, lambda: f"{input_bias.ndim} != 1")
2347
+ check(
2348
+ input_bias.numel() == gates_size,
2349
+ lambda: f"{input_bias.numel()} != {gates_size}",
2350
+ )
2351
+ check(
2352
+ input_bias.shape == hidden_bias.shape,
2353
+ lambda: f"{input_bias.shape} != {hidden_bias.shape}",
2354
+ )
2355
+ check(prev_hidden.ndim == 2, lambda: f"{prev_hidden.ndim} != 2")
2356
+ expected_prev_hidden_numel = input_gates.size(0) * gates_size // factor
2357
+ check(
2358
+ prev_hidden.numel() == expected_prev_hidden_numel,
2359
+ lambda: f"{prev_hidden.numel()} != {input_gates.size(0)} * {gates_size} // {factor} (aka {expected_prev_hidden_numel})",
2360
+ )
2361
+ check(
2362
+ all(
2363
+ x.device == input_gates.device
2364
+ for x in [hidden_gates, input_bias, hidden_bias, prev_hidden]
2365
+ ),
2366
+ lambda: "expected all inputs to be same device",
2367
+ )
2368
+
2369
+
2370
+ @register_meta(aten._thnn_fused_lstm_cell.default)
2371
+ def _thnn_fused_lstm_cell_meta(
2372
+ input_gates, hidden_gates, cx, input_bias=None, hidden_bias=None
2373
+ ):
2374
+ rnn_cell_checkSizes(input_gates, hidden_gates, input_bias, hidden_bias, 4, cx)
2375
+ workspace = torch.empty_like(input_gates, memory_format=torch.contiguous_format)
2376
+ hy = torch.empty_like(cx, memory_format=torch.contiguous_format)
2377
+ cy = torch.empty_like(cx, memory_format=torch.contiguous_format)
2378
+ return (hy, cy, workspace)
2379
+
2380
+
2381
+ @register_meta(aten._cudnn_rnn.default)
2382
+ def _cudnn_rnn(
2383
+ input,
2384
+ weight,
2385
+ weight_stride0,
2386
+ weight_buf,
2387
+ hx,
2388
+ cx,
2389
+ mode,
2390
+ hidden_size,
2391
+ proj_size,
2392
+ num_layers,
2393
+ batch_first,
2394
+ dropout,
2395
+ train,
2396
+ bidirectional,
2397
+ batch_sizes,
2398
+ dropout_state,
2399
+ ):
2400
+
2401
+ is_input_packed = len(batch_sizes) != 0
2402
+ if is_input_packed:
2403
+ seq_length = len(batch_sizes)
2404
+ mini_batch = batch_sizes[0]
2405
+ batch_sizes_sum = input.shape[0]
2406
+ else:
2407
+ seq_length = input.shape[1] if batch_first else input.shape[0]
2408
+ mini_batch = input.shape[0] if batch_first else input.shape[1]
2409
+ batch_sizes_sum = -1
2410
+
2411
+ num_directions = 2 if bidirectional else 1
2412
+ out_size = proj_size if proj_size != 0 else hidden_size
2413
+ if is_input_packed:
2414
+ out_shape = [batch_sizes_sum, out_size * num_directions]
2415
+ else:
2416
+ out_shape = (
2417
+ [mini_batch, seq_length, out_size * num_directions]
2418
+ if batch_first
2419
+ else [seq_length, mini_batch, out_size * num_directions]
2420
+ )
2421
+ output = input.new_empty(out_shape)
2422
+
2423
+ cell_shape = [num_layers * num_directions, mini_batch, hidden_size]
2424
+ if cx is None:
2425
+ cy = torch.empty(0, device=input.device)
2426
+ else:
2427
+ cy = cx.new_empty(cell_shape)
2428
+
2429
+ hy = hx.new_empty([num_layers * num_directions, mini_batch, out_size])
2430
+
2431
+ # TODO: Query cudnnGetRNNTrainingReserveSize (expose to python)
2432
+ reserve_shape = 0 if train else 0
2433
+ reserve = input.new_empty(reserve_shape, dtype=torch.uint8)
2434
+
2435
+ return output, hy, cy, reserve, weight_buf
2436
+
2437
+
2438
+ @register_meta(aten.mkldnn_rnn_layer.default)
2439
+ def mkldnn_rnn_layer(
2440
+ input,
2441
+ w0,
2442
+ w1,
2443
+ w2,
2444
+ w3,
2445
+ hx_,
2446
+ cx_,
2447
+ reverse,
2448
+ batch_sizes,
2449
+ mode,
2450
+ hidden_size,
2451
+ num_layers,
2452
+ has_biases,
2453
+ bidirectional,
2454
+ batch_first,
2455
+ train,
2456
+ ):
2457
+ seq_length = input.shape[1] if batch_first else input.shape[0]
2458
+ mini_batch = input.shape[0] if batch_first else input.shape[1]
2459
+ output_chanels = hidden_size
2460
+ out_shape = (
2461
+ [mini_batch, seq_length, output_chanels]
2462
+ if batch_first
2463
+ else [seq_length, mini_batch, output_chanels]
2464
+ )
2465
+ output = input.new_empty(out_shape)
2466
+ if hx_ is None:
2467
+ hy = torch.empty(0, device=input.device)
2468
+ else:
2469
+ hy = hx_.new_empty(hx_.shape)
2470
+ if cx_ is None:
2471
+ cy = torch.empty(0, device=input.device)
2472
+ else:
2473
+ cy = cx_.new_empty(cx_.shape)
2474
+ workspace = torch.empty(0, device=input.device, dtype=torch.uint8)
2475
+ return output, hy, cy, workspace
2476
+
2477
+
2478
+ def zero_numel_check_dims(self, dim, fn_name):
2479
+ if self.ndim == 0:
2480
+ check(
2481
+ dim == 0 or dim == -1,
2482
+ lambda: f"{fn_name}: Expected reduction dim -1 or 0 for scalar but got {dim}",
2483
+ IndexError,
2484
+ )
2485
+ else:
2486
+ check(
2487
+ self.size(dim) != 0,
2488
+ lambda: f"{fn_name}: Expected reduction dim {dim} to have non-zero size.",
2489
+ IndexError,
2490
+ )
2491
+
2492
+
2493
+ # From aten/src/ATen/native/ReduceOps.cpp
2494
+ def check_argmax_argmin(name, self, dim):
2495
+ if dim is not None:
2496
+ dim = maybe_wrap_dim(dim, self.dim())
2497
+ zero_numel_check_dims(self, dim, name)
2498
+ else:
2499
+ check(
2500
+ self.numel() != 0,
2501
+ lambda: f"{name}: Expected reduction dim to be specified for input.numel() == 0.",
2502
+ )
2503
+
2504
+
2505
+ @register_meta([aten.argmax.default, aten.argmin.default])
2506
+ def argmax_argmin_meta(self, dim=None, keepdim=False):
2507
+ check_argmax_argmin("argmax", self, dim)
2508
+ dims = utils.reduction_dims(self.shape, (dim,) if dim is not None else None)
2509
+ shape = _compute_reduction_shape(self, dims, keepdim)
2510
+ return self.new_empty(shape, dtype=torch.int64)
2511
+
2512
+
2513
+ @register_meta(aten.scalar_tensor.default)
2514
+ def scalar_tensor(s, dtype=None, layout=None, device=None, pin_memory=None):
2515
+ return torch.empty(
2516
+ (), dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
2517
+ )
2518
+
2519
+
2520
+ @register_meta(aten.topk.default)
2521
+ def topk_meta(self, k, dim=-1, largest=True, sorted=True):
2522
+ # From aten/src/ATen/native/Sorting.cpp
2523
+ dim = maybe_wrap_dim(dim, self.dim(), wrap_scalar=True)
2524
+ check(
2525
+ k >= 0 and k <= (self.size(dim) if self.dim() > 0 else 1),
2526
+ lambda: "selected index k out of range",
2527
+ )
2528
+ sliceSize = 1 if self.dim() == 0 else self.size(dim)
2529
+ check(k >= 0 and k <= sliceSize, lambda: "k not in range for dimension")
2530
+
2531
+ topKSize = list(self.shape)
2532
+ if len(topKSize) > 0:
2533
+ topKSize[dim] = k
2534
+ return self.new_empty(topKSize), self.new_empty(topKSize, dtype=torch.int64)
2535
+
2536
+
2537
+ legacy_contiguous_memory_format = torch.contiguous_format
2538
+
2539
+
2540
+ # From aten/src/ATen/native/cuda/RNN.cu
2541
+ def checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace):
2542
+ defined_grad = grad_hy if grad_hy is not None else grad_cy
2543
+ check(defined_grad.dim() == 2, lambda: "")
2544
+ exp_size = defined_grad.size()
2545
+ if grad_hy is not None:
2546
+ check(grad_hy.size() == exp_size, lambda: "")
2547
+ if grad_cy is not None:
2548
+ check(grad_cy.size() == exp_size, lambda: "")
2549
+ check(cx.size() == exp_size, lambda: "")
2550
+ check(cy.size() == exp_size, lambda: "")
2551
+ check(workspace.dim() == 2, lambda: "")
2552
+ check(workspace.numel() == exp_size[0] * exp_size[1] * 4, lambda: "")
2553
+
2554
+
2555
+ # From aten/src/ATen/native/cuda/RNN.cu
2556
+ @register_meta(aten._thnn_fused_lstm_cell_backward_impl.default)
2557
+ def _thnn_fused_lstm_cell_backward_impl(grad_hy, grad_cy, cx, cy, workspace, has_bias):
2558
+ if grad_hy is None and grad_cy is None:
2559
+ return None, None, None
2560
+ checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace)
2561
+ grad_gates = torch.empty_like(
2562
+ workspace, memory_format=legacy_contiguous_memory_format
2563
+ )
2564
+ grad_cx = torch.empty_like(cx, memory_format=legacy_contiguous_memory_format)
2565
+ grad_bias = grad_gates.sum(0, keepdim=False) if has_bias else None
2566
+ return grad_gates, grad_cx, grad_bias
2567
+
2568
+
2569
+ @register_meta(aten.pixel_shuffle.default)
2570
+ def meta_pixel_shuffle(self, upscale_factor):
2571
+ assert (
2572
+ len(self.shape) > 2 and self.shape[-3] % (upscale_factor * upscale_factor) == 0
2573
+ ), f"Invalid input shape for pixel_shuffle: {self.shape} with upscale_factor = {upscale_factor}"
2574
+
2575
+ def is_channels_last(ten):
2576
+ return torch._prims_common.suggest_memory_format(ten) == torch.channels_last
2577
+
2578
+ def pick_memory_format():
2579
+ if is_channels_last(self):
2580
+ if device_hint(self) == "cuda":
2581
+ return torch.contiguous_format
2582
+ else:
2583
+ return torch.channels_last
2584
+ elif self.is_contiguous(memory_format=torch.contiguous_format):
2585
+ return torch.contiguous_format
2586
+ elif self.is_contiguous(memory_format=torch.preserve_format):
2587
+ return torch.preserve_format
2588
+
2589
+ C = self.shape[-3] // (upscale_factor * upscale_factor)
2590
+ Hr = self.shape[-2] * upscale_factor
2591
+ Wr = self.shape[-1] * upscale_factor
2592
+ out_shape = (*self.shape[:-3], C, Hr, Wr)
2593
+
2594
+ out = self.new_empty(out_shape)
2595
+ out = out.to(memory_format=pick_memory_format()) # type: ignore[call-overload]
2596
+ return out
2597
+
2598
+
2599
+ @register_meta(aten.mkldnn_rnn_layer_backward.default)
2600
+ def mkldnn_rnn_layer_backward(
2601
+ input,
2602
+ weight0,
2603
+ weight1,
2604
+ weight2,
2605
+ weight3,
2606
+ hx_,
2607
+ cx_tmp,
2608
+ output,
2609
+ hy_,
2610
+ cy_,
2611
+ grad_output_r_opt,
2612
+ grad_hy_r_opt,
2613
+ grad_cy_r_opt,
2614
+ reverse,
2615
+ mode,
2616
+ hidden_size,
2617
+ num_layers,
2618
+ has_biases,
2619
+ train,
2620
+ bidirectional,
2621
+ batch_sizes,
2622
+ batch_first,
2623
+ workspace,
2624
+ ):
2625
+ diff_x = input.new_empty(input.shape)
2626
+ diff_hx = hx_.new_empty(hx_.shape)
2627
+ diff_cx = cx_tmp.new_empty(cx_tmp.shape)
2628
+ diff_w1 = weight0.new_empty(weight0.shape)
2629
+ diff_w2 = weight1.new_empty(weight1.shape)
2630
+ diff_b = weight2.new_empty(weight2.shape)
2631
+ return diff_x, diff_w1, diff_w2, diff_b, diff_b, diff_hx, diff_cx
2632
+
2633
+
2634
+ @register_meta([aten.bucketize.Tensor, aten.bucketize.Tensor_out])
2635
+ @out_wrapper()
2636
+ def meta_bucketize(self, boundaries, *, out_int32=False, right=False):
2637
+ return torch.empty_like(
2638
+ self, dtype=torch.int32 if out_int32 else torch.int64
2639
+ ).contiguous()
2640
+
2641
+
2642
+ # We must also trigger meta registrations from PrimTorch ref
2643
+ # decompositions
2644
+ import torch._refs
2645
+ import torch._refs.nn.functional
2646
+ import torch._refs.special
2647
+
2648
+
2649
+ def activate_meta():
2650
+
2651
+ activate_meta_table = {}
2652
+
2653
+ # For a given op, we pick the most specific decomp function from
2654
+ # global_decomp_table in the precedence order of meta > post_autograd > pre_autograd
2655
+ for type in ["meta", "post_autograd", "pre_autograd"]:
2656
+ registry = global_decomposition_table[type]
2657
+
2658
+ for opo in registry:
2659
+ if opo not in activate_meta_table:
2660
+ activate_meta_table[opo] = registry[opo]
2661
+
2662
+ for op_overload, fn in activate_meta_table.items():
2663
+ assert isinstance(op_overload, OpOverload)
2664
+
2665
+ op_overload.py_impl(torch._C.DispatchKey.Meta)(fn)
2666
+
2667
+ if torch._C._dispatch_has_kernel_for_dispatch_key(
2668
+ op_overload.name(), "CompositeImplicitAutograd"
2669
+ ):
2670
+ # Internally, we shouldn't be registering meta kernels for any operators that
2671
+ # have CompositeImplicitAutograd kernels.
2672
+ # Instead, we should be letting those decompositions run, and writing meta kernels
2673
+ # only for the base operators.
2674
+ if op_overload in global_decomposition_table["meta"]:
2675
+ raise RuntimeError(
2676
+ f"{op_overload} is a CompositeImplicitAutograd op, we shouldn't "
2677
+ "register meta function for it. Instead, we should let the decomposition run and write "
2678
+ "meta kernels for the base operators."
2679
+ )
2680
+ pass
2681
+ elif op_overload.is_view:
2682
+ # Attempting to register a python meta kernel for a view operator.
2683
+ # We shouldn't do this, because the output will report as not having aliased storages.
2684
+ # All view ops have meta kernels in C++ today, so we should use those instead.
2685
+ pass
2686
+ elif op_overload.name() in {
2687
+ "aten::empty_strided", # causing infinite recursion, test_meta.py
2688
+ "aten::clone", # causing infinite recursion
2689
+ "aten::_to_copy", # causing infinite recursion, test_serialization.py -k test_tensor_subclass_getstate_overwrite # noqa: B950
2690
+ "aten::copy_", # Exception not raised, test_torch.py -k test_storage_meta_errors_cpu_int64 # noqa: B950
2691
+ "aten::constant_pad_nd", # requires_grad mismatch, test_ops.py -k test_fake_crossref_backward_amp_istft_cuda_float32 # noqa: B950
2692
+ "aten::rot90", # requires_grad mismatch! test_ops.py -k test_fake_crossref_backward_amp_rot90_cuda_float32 # noqa: B950
2693
+ "aten::as_strided_scatter", # requires_grad mismatch, test_ops.py -k test_fake_crossref_backward_no_amp_as_strided_scatter_cuda_float32 # noqa: B950
2694
+ }:
2695
+ pass
2696
+ else:
2697
+ if "mkldnn::" in op_overload.name():
2698
+ _meta_lib_dont_use_me_use_register_meta_for_mkldnn.impl(op_overload, fn)
2699
+ elif "mkl::" in op_overload.name():
2700
+ _meta_lib_dont_use_me_use_register_meta_for_mkl.impl(op_overload, fn)
2701
+ else:
2702
+ _meta_lib_dont_use_me_use_register_meta.impl(op_overload, fn)
2703
+
2704
+
2705
+ activate_meta()
wemm/lib/python3.10/site-packages/torch/_namedtensor_internals.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import OrderedDict
2
+
3
+ """
4
+ This file contains helper functions that implement experimental functionality
5
+ for named tensors in python. All of these are experimental, unstable, and
6
+ subject to change or deletion.
7
+ """
8
+
9
+
10
+ def check_serializing_named_tensor(tensor):
11
+ if tensor.has_names():
12
+ raise RuntimeError(
13
+ "NYI: Named tensors don't support serialization. Please drop "
14
+ "names via `tensor = tensor.rename(None)` before serialization."
15
+ )
16
+
17
+
18
+ def build_dim_map(tensor):
19
+ """Returns a map of { dim: dim_name } where dim is a name if the dim is named
20
+ and the dim index otherwise."""
21
+ return OrderedDict(
22
+ [(idx if name is None else name, name) for idx, name in enumerate(tensor.names)]
23
+ )
24
+
25
+
26
+ def unzip_namedshape(namedshape):
27
+ if isinstance(namedshape, OrderedDict):
28
+ namedshape = namedshape.items()
29
+ if not hasattr(namedshape, "__iter__") and not isinstance(namedshape, tuple):
30
+ raise RuntimeError(
31
+ "Expected namedshape to be OrderedDict or iterable of tuples, got: {}".format(
32
+ type(namedshape)
33
+ )
34
+ )
35
+ if len(namedshape) == 0:
36
+ raise RuntimeError("Expected namedshape to non-empty.")
37
+ return zip(*namedshape)
38
+
39
+
40
+ def namer_api_name(inplace):
41
+ if inplace:
42
+ return "rename_"
43
+ else:
44
+ return "rename"
45
+
46
+
47
+ def is_ellipsis(item):
48
+ return item == Ellipsis or item == "..."
49
+
50
+
51
+ def single_ellipsis_index(names, fn_name):
52
+ ellipsis_indices = [i for i, name in enumerate(names) if is_ellipsis(name)]
53
+ if len(ellipsis_indices) >= 2:
54
+ raise RuntimeError(
55
+ "{}: More than one Ellipsis ('...') found in names ("
56
+ "{}). This function supports up to one Ellipsis.".format(fn_name, names)
57
+ )
58
+ if len(ellipsis_indices) == 1:
59
+ return ellipsis_indices[0]
60
+ return None
61
+
62
+
63
+ def expand_single_ellipsis(numel_pre_glob, numel_post_glob, names):
64
+ return names[numel_pre_glob : len(names) - numel_post_glob]
65
+
66
+
67
+ def replace_ellipsis_by_position(ellipsis_idx, names, tensor_names):
68
+ globbed_names = expand_single_ellipsis(
69
+ ellipsis_idx, len(names) - ellipsis_idx - 1, tensor_names
70
+ )
71
+ return names[:ellipsis_idx] + globbed_names + names[ellipsis_idx + 1 :]
72
+
73
+
74
+ def resolve_ellipsis(names, tensor_names, fn_name):
75
+ """
76
+ Expands ... inside `names` to be equal to a list of names from `tensor_names`.
77
+ """
78
+ ellipsis_idx = single_ellipsis_index(names, fn_name)
79
+ if ellipsis_idx is None:
80
+ return names
81
+ return replace_ellipsis_by_position(ellipsis_idx, names, tensor_names)
82
+
83
+
84
+ def update_names_with_list(tensor, names, inplace):
85
+ # Special case for tensor.rename(None)
86
+ if len(names) == 1 and names[0] is None:
87
+ return tensor._update_names(None, inplace)
88
+
89
+ return tensor._update_names(
90
+ resolve_ellipsis(names, tensor.names, namer_api_name(inplace)), inplace
91
+ )
92
+
93
+
94
+ def update_names_with_mapping(tensor, rename_map, inplace):
95
+ dim_map = build_dim_map(tensor)
96
+ for old_dim in rename_map.keys():
97
+ new_dim = rename_map[old_dim]
98
+ if old_dim in dim_map.keys():
99
+ dim_map[old_dim] = new_dim
100
+ else:
101
+ raise RuntimeError(
102
+ (
103
+ "{api_name}: Tried to rename dim '{old_dim}' to dim "
104
+ "{new_dim} in Tensor[{dims}] but dim '{old_dim}' does not exist"
105
+ ).format(
106
+ old_dim=old_dim,
107
+ new_dim=new_dim,
108
+ dims=tensor.names,
109
+ api_name=namer_api_name(inplace),
110
+ )
111
+ )
112
+ return tensor._update_names(tuple(dim_map.values()), inplace)
113
+
114
+
115
+ def update_names(tensor, names, rename_map, inplace):
116
+ """There are two usages:
117
+
118
+ tensor.rename(*names) returns a view on tensor with named dims `names`.
119
+ `names` must be of length `tensor.dim()`; otherwise, if '...' is in `names`,
120
+ then it is expanded greedily to be equal to the corresponding names from
121
+ `tensor.names`.
122
+
123
+ For example,
124
+ ```
125
+ >>> # xdoctest: +SKIP
126
+ >>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
127
+ >>> x.rename('...', 'height', 'width').names
128
+ ('N', 'C', 'height', 'width')
129
+
130
+ >>> # xdoctest: +SKIP
131
+ >>> x.rename('batch', '...', 'width').names
132
+ ('batch', 'C', 'H', 'width')
133
+
134
+ ```
135
+
136
+ tensor.rename(**rename_map) returns a view on tensor that has rename dims
137
+ as specified in the mapping `rename_map`.
138
+
139
+ For example,
140
+ ```
141
+ >>> # xdoctest: +SKIP
142
+ >>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
143
+ >>> x.rename(W='width', H='height').names
144
+ ('N', 'C', 'height', 'width')
145
+
146
+ ```
147
+
148
+ Finally, tensor.rename has an in-place version called tensor.rename_.
149
+ """
150
+ has_names = len(names) > 0
151
+ has_rename_pairs = bool(rename_map)
152
+ if has_names and has_rename_pairs:
153
+ raise RuntimeError(
154
+ "{api_name}: This function takes either positional "
155
+ "args or keyword args, but not both. Use tensor.{api_name}(*names) "
156
+ "to name dims and tensor.{api_name}(**rename_map) to rename "
157
+ "dims.".format(api_name=namer_api_name(inplace))
158
+ )
159
+
160
+ # Special case for tensor.rename(*[]), which is valid for a 0 dim tensor.
161
+ if not has_names and not has_rename_pairs:
162
+ return update_names_with_list(tensor, names, inplace)
163
+
164
+ if has_names:
165
+ return update_names_with_list(tensor, names, inplace)
166
+ return update_names_with_mapping(tensor, rename_map, inplace)