leewheel commited on
Commit
7e165b0
·
verified ·
1 Parent(s): 3a1dc79

Upload 12515 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +74 -0
  2. env/Lib/site-packages/torch/_C.cp310-win_amd64.pyd +0 -0
  3. env/Lib/site-packages/torch/_C/_VariableFunctions.pyi +0 -0
  4. env/Lib/site-packages/torch/_C/__init__.pyi +0 -0
  5. env/Lib/site-packages/torch/_C/_aoti.pyi +20 -0
  6. env/Lib/site-packages/torch/_C/_autograd.pyi +135 -0
  7. env/Lib/site-packages/torch/_C/_cpu.pyi +12 -0
  8. env/Lib/site-packages/torch/_C/_cudnn.pyi +17 -0
  9. env/Lib/site-packages/torch/_C/_cusparselt.pyi +1 -0
  10. env/Lib/site-packages/torch/_C/_distributed_autograd.pyi +27 -0
  11. env/Lib/site-packages/torch/_C/_distributed_c10d.pyi +699 -0
  12. env/Lib/site-packages/torch/_C/_distributed_rpc.pyi +188 -0
  13. env/Lib/site-packages/torch/_C/_distributed_rpc_testing.pyi +32 -0
  14. env/Lib/site-packages/torch/_C/_functions.pyi +11 -0
  15. env/Lib/site-packages/torch/_C/_functorch.pyi +83 -0
  16. env/Lib/site-packages/torch/_C/_instruction_counter.pyi +4 -0
  17. env/Lib/site-packages/torch/_C/_itt.pyi +5 -0
  18. env/Lib/site-packages/torch/_C/_lazy.pyi +27 -0
  19. env/Lib/site-packages/torch/_C/_lazy_ts_backend.pyi +12 -0
  20. env/Lib/site-packages/torch/_C/_monitor.pyi +44 -0
  21. env/Lib/site-packages/torch/_C/_nn.pyi +89 -0
  22. env/Lib/site-packages/torch/_C/_nvtx.pyi +7 -0
  23. env/Lib/site-packages/torch/_C/_onnx.pyi +39 -0
  24. env/Lib/site-packages/torch/_C/_profiler.pyi +244 -0
  25. env/Lib/site-packages/torch/_C/_verbose.pyi +3 -0
  26. env/Lib/site-packages/torch/_VF.py +31 -0
  27. env/Lib/site-packages/torch/_VF.pyi +0 -0
  28. env/Lib/site-packages/torch/__config__.py +23 -0
  29. env/Lib/site-packages/torch/__future__.py +75 -0
  30. env/Lib/site-packages/torch/__init__.py +0 -0
  31. env/Lib/site-packages/torch/__pycache__/_VF.cpython-310.pyc +0 -0
  32. env/Lib/site-packages/torch/__pycache__/__config__.cpython-310.pyc +0 -0
  33. env/Lib/site-packages/torch/__pycache__/__future__.cpython-310.pyc +0 -0
  34. env/Lib/site-packages/torch/__pycache__/__init__.cpython-310.pyc +0 -0
  35. env/Lib/site-packages/torch/__pycache__/_appdirs.cpython-310.pyc +0 -0
  36. env/Lib/site-packages/torch/__pycache__/_classes.cpython-310.pyc +0 -0
  37. env/Lib/site-packages/torch/__pycache__/_compile.cpython-310.pyc +0 -0
  38. env/Lib/site-packages/torch/__pycache__/_custom_ops.cpython-310.pyc +0 -0
  39. env/Lib/site-packages/torch/__pycache__/_deploy.cpython-310.pyc +0 -0
  40. env/Lib/site-packages/torch/__pycache__/_guards.cpython-310.pyc +0 -0
  41. env/Lib/site-packages/torch/__pycache__/_jit_internal.cpython-310.pyc +0 -0
  42. env/Lib/site-packages/torch/__pycache__/_linalg_utils.cpython-310.pyc +0 -0
  43. env/Lib/site-packages/torch/__pycache__/_lobpcg.cpython-310.pyc +0 -0
  44. env/Lib/site-packages/torch/__pycache__/_lowrank.cpython-310.pyc +0 -0
  45. env/Lib/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc +3 -0
  46. env/Lib/site-packages/torch/__pycache__/_namedtensor_internals.cpython-310.pyc +0 -0
  47. env/Lib/site-packages/torch/__pycache__/_ops.cpython-310.pyc +0 -0
  48. env/Lib/site-packages/torch/__pycache__/_python_dispatcher.cpython-310.pyc +0 -0
  49. env/Lib/site-packages/torch/__pycache__/_size_docs.cpython-310.pyc +0 -0
  50. env/Lib/site-packages/torch/__pycache__/_sources.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -216,3 +216,77 @@ env/Lib/site-packages/torchaudio/lib/_torchaudio.pyd filter=lfs diff=lfs merge=l
216
  env/Lib/site-packages/torchaudio/lib/libctc_prefix_decoder.pyd filter=lfs diff=lfs merge=lfs -text
217
  env/Lib/site-packages/torchaudio/lib/libtorchaudio.pyd filter=lfs diff=lfs merge=lfs -text
218
  env/Lib/site-packages/torchaudio/lib/pybind11_prefixctc.pyd filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
  env/Lib/site-packages/torchaudio/lib/libctc_prefix_decoder.pyd filter=lfs diff=lfs merge=lfs -text
217
  env/Lib/site-packages/torchaudio/lib/libtorchaudio.pyd filter=lfs diff=lfs merge=lfs -text
218
  env/Lib/site-packages/torchaudio/lib/pybind11_prefixctc.pyd filter=lfs diff=lfs merge=lfs -text
219
+ env/Lib/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
220
+ env/Lib/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
221
+ env/Lib/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
222
+ env/Lib/site-packages/torch/__pycache__/overrides.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
223
+ env/Lib/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
224
+ env/Lib/site-packages/torch/_dynamo/__pycache__/trace_rules.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
225
+ env/Lib/site-packages/torch/_inductor/__pycache__/ir.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
226
+ env/Lib/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
227
+ env/Lib/site-packages/torch/_inductor/__pycache__/scheduler.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
228
+ env/Lib/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
229
+ env/Lib/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
230
+ env/Lib/site-packages/torch/bin/asmjit.dll filter=lfs diff=lfs merge=lfs -text
231
+ env/Lib/site-packages/torch/bin/fbgemm.dll filter=lfs diff=lfs merge=lfs -text
232
+ env/Lib/site-packages/torch/bin/protoc.exe filter=lfs diff=lfs merge=lfs -text
233
+ env/Lib/site-packages/torch/distributed/__pycache__/distributed_c10d.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
234
+ env/Lib/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
235
+ env/Lib/site-packages/torch/lib/asmjit.dll filter=lfs diff=lfs merge=lfs -text
236
+ env/Lib/site-packages/torch/lib/asmjit.lib filter=lfs diff=lfs merge=lfs -text
237
+ env/Lib/site-packages/torch/lib/c10_cuda.dll filter=lfs diff=lfs merge=lfs -text
238
+ env/Lib/site-packages/torch/lib/c10.dll filter=lfs diff=lfs merge=lfs -text
239
+ env/Lib/site-packages/torch/lib/c10.lib filter=lfs diff=lfs merge=lfs -text
240
+ env/Lib/site-packages/torch/lib/cpuinfo.lib filter=lfs diff=lfs merge=lfs -text
241
+ env/Lib/site-packages/torch/lib/cublas64_12.dll filter=lfs diff=lfs merge=lfs -text
242
+ env/Lib/site-packages/torch/lib/cublasLt64_12.dll filter=lfs diff=lfs merge=lfs -text
243
+ env/Lib/site-packages/torch/lib/cudart64_12.dll filter=lfs diff=lfs merge=lfs -text
244
+ env/Lib/site-packages/torch/lib/cudnn_adv64_9.dll filter=lfs diff=lfs merge=lfs -text
245
+ env/Lib/site-packages/torch/lib/cudnn_cnn64_9.dll filter=lfs diff=lfs merge=lfs -text
246
+ env/Lib/site-packages/torch/lib/cudnn_engines_precompiled64_9.dll filter=lfs diff=lfs merge=lfs -text
247
+ env/Lib/site-packages/torch/lib/cudnn_engines_runtime_compiled64_9.dll filter=lfs diff=lfs merge=lfs -text
248
+ env/Lib/site-packages/torch/lib/cudnn_graph64_9.dll filter=lfs diff=lfs merge=lfs -text
249
+ env/Lib/site-packages/torch/lib/cudnn_heuristic64_9.dll filter=lfs diff=lfs merge=lfs -text
250
+ env/Lib/site-packages/torch/lib/cudnn_ops64_9.dll filter=lfs diff=lfs merge=lfs -text
251
+ env/Lib/site-packages/torch/lib/cudnn64_9.dll filter=lfs diff=lfs merge=lfs -text
252
+ env/Lib/site-packages/torch/lib/cufft64_11.dll filter=lfs diff=lfs merge=lfs -text
253
+ env/Lib/site-packages/torch/lib/cufftw64_11.dll filter=lfs diff=lfs merge=lfs -text
254
+ env/Lib/site-packages/torch/lib/cupti64_2023.1.1.dll filter=lfs diff=lfs merge=lfs -text
255
+ env/Lib/site-packages/torch/lib/curand64_10.dll filter=lfs diff=lfs merge=lfs -text
256
+ env/Lib/site-packages/torch/lib/cusolver64_11.dll filter=lfs diff=lfs merge=lfs -text
257
+ env/Lib/site-packages/torch/lib/cusolverMg64_11.dll filter=lfs diff=lfs merge=lfs -text
258
+ env/Lib/site-packages/torch/lib/cusparse64_12.dll filter=lfs diff=lfs merge=lfs -text
259
+ env/Lib/site-packages/torch/lib/dnnl.lib filter=lfs diff=lfs merge=lfs -text
260
+ env/Lib/site-packages/torch/lib/fbgemm.dll filter=lfs diff=lfs merge=lfs -text
261
+ env/Lib/site-packages/torch/lib/fbgemm.lib filter=lfs diff=lfs merge=lfs -text
262
+ env/Lib/site-packages/torch/lib/fmt.lib filter=lfs diff=lfs merge=lfs -text
263
+ env/Lib/site-packages/torch/lib/kineto.lib filter=lfs diff=lfs merge=lfs -text
264
+ env/Lib/site-packages/torch/lib/libiomp5md.dll filter=lfs diff=lfs merge=lfs -text
265
+ env/Lib/site-packages/torch/lib/libprotobuf-lite.lib filter=lfs diff=lfs merge=lfs -text
266
+ env/Lib/site-packages/torch/lib/libprotobuf.lib filter=lfs diff=lfs merge=lfs -text
267
+ env/Lib/site-packages/torch/lib/libprotoc.lib filter=lfs diff=lfs merge=lfs -text
268
+ env/Lib/site-packages/torch/lib/nvJitLink_120_0.dll filter=lfs diff=lfs merge=lfs -text
269
+ env/Lib/site-packages/torch/lib/nvrtc-builtins64_121.dll filter=lfs diff=lfs merge=lfs -text
270
+ env/Lib/site-packages/torch/lib/nvrtc64_120_0.dll filter=lfs diff=lfs merge=lfs -text
271
+ env/Lib/site-packages/torch/lib/pthreadpool.lib filter=lfs diff=lfs merge=lfs -text
272
+ env/Lib/site-packages/torch/lib/sleef.lib filter=lfs diff=lfs merge=lfs -text
273
+ env/Lib/site-packages/torch/lib/torch_cpu.dll filter=lfs diff=lfs merge=lfs -text
274
+ env/Lib/site-packages/torch/lib/torch_cpu.lib filter=lfs diff=lfs merge=lfs -text
275
+ env/Lib/site-packages/torch/lib/torch_cuda.dll filter=lfs diff=lfs merge=lfs -text
276
+ env/Lib/site-packages/torch/lib/torch_cuda.lib filter=lfs diff=lfs merge=lfs -text
277
+ env/Lib/site-packages/torch/lib/torch_python.dll filter=lfs diff=lfs merge=lfs -text
278
+ env/Lib/site-packages/torch/lib/torch_python.lib filter=lfs diff=lfs merge=lfs -text
279
+ env/Lib/site-packages/torch/lib/uv.dll filter=lfs diff=lfs merge=lfs -text
280
+ env/Lib/site-packages/torch/lib/XNNPACK.lib filter=lfs diff=lfs merge=lfs -text
281
+ env/Lib/site-packages/torch/linalg/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
282
+ env/Lib/site-packages/torch/nn/__pycache__/functional.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
283
+ env/Lib/site-packages/torch/onnx/__pycache__/symbolic_opset9.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
284
+ env/Lib/site-packages/torch/sparse/__pycache__/_triton_ops_meta.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
285
+ env/Lib/site-packages/torch/testing/_internal/__pycache__/common_methods_invocations.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
286
+ env/Lib/site-packages/torch/testing/_internal/__pycache__/common_nn.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
287
+ env/Lib/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
288
+ env/Lib/site-packages/torch/testing/_internal/__pycache__/common_utils.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
289
+ env/Lib/site-packages/torch/testing/_internal/distributed/__pycache__/distributed_test.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
290
+ env/Lib/site-packages/torch/testing/_internal/distributed/rpc/__pycache__/rpc_test.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
291
+ env/Lib/site-packages/torch/testing/_internal/generated/__pycache__/annotated_fn_args.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
292
+ env/Lib/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
env/Lib/site-packages/torch/_C.cp310-win_amd64.pyd ADDED
Binary file (10.2 kB). View file
 
env/Lib/site-packages/torch/_C/_VariableFunctions.pyi ADDED
The diff for this file is too large to render. See raw diff
 
env/Lib/site-packages/torch/_C/__init__.pyi ADDED
The diff for this file is too large to render. See raw diff
 
env/Lib/site-packages/torch/_C/_aoti.pyi ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ctypes import c_void_p
2
+
3
+ from torch import Tensor
4
+
5
+ # Defined in torch/csrc/inductor/aoti_runner/pybind.cpp
6
+
7
+ # Tensor to AtenTensorHandle
8
+ def unsafe_alloc_void_ptrs_from_tensors(tensors: list[Tensor]) -> list[c_void_p]: ...
9
+ def unsafe_alloc_void_ptr_from_tensor(tensor: Tensor) -> c_void_p: ...
10
+
11
+ # AtenTensorHandle to Tensor
12
+ def alloc_tensors_by_stealing_from_void_ptrs(
13
+ handles: list[c_void_p],
14
+ ) -> list[Tensor]: ...
15
+ def alloc_tensor_by_stealing_from_void_ptr(
16
+ handle: c_void_p,
17
+ ) -> Tensor: ...
18
+
19
+ class AOTIModelContainerRunnerCpu: ...
20
+ class AOTIModelContainerRunnerCuda: ...
env/Lib/site-packages/torch/_C/_autograd.pyi ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from enum import Enum
3
+ from typing import Any, Callable
4
+
5
+ import torch
6
+ from torch._C._profiler import (
7
+ _ProfilerEvent,
8
+ ActiveProfilerType,
9
+ ProfilerActivity,
10
+ ProfilerConfig,
11
+ )
12
+
13
+ # Defined in torch/csrc/autograd/init.cpp
14
+
15
+ class DeviceType(Enum):
16
+ CPU = ...
17
+ CUDA = ...
18
+ XPU = ...
19
+ MKLDNN = ...
20
+ OPENGL = ...
21
+ OPENCL = ...
22
+ IDEEP = ...
23
+ HIP = ...
24
+ FPGA = ...
25
+ MAIA = ...
26
+ XLA = ...
27
+ MTIA = ...
28
+ MPS = ...
29
+ HPU = ...
30
+ Meta = ...
31
+ Vulkan = ...
32
+ Metal = ...
33
+ PrivateUse1 = ...
34
+
35
+ class ProfilerEvent:
36
+ def cpu_elapsed_us(self, other: ProfilerEvent) -> float: ...
37
+ def cpu_memory_usage(self) -> int: ...
38
+ def cuda_elapsed_us(self, other: ProfilerEvent) -> float: ...
39
+ def privateuse1_elapsed_us(self, other: ProfilerEvent) -> float: ...
40
+ def cuda_memory_usage(self) -> int: ...
41
+ def device(self) -> int: ...
42
+ def handle(self) -> int: ...
43
+ def has_cuda(self) -> bool: ...
44
+ def is_remote(self) -> bool: ...
45
+ def kind(self) -> int: ...
46
+ def name(self) -> str: ...
47
+ def node_id(self) -> int: ...
48
+ def sequence_nr(self) -> int: ...
49
+ def shapes(self) -> list[list[int]]: ...
50
+ def thread_id(self) -> int: ...
51
+ def flops(self) -> float: ...
52
+ def is_async(self) -> bool: ...
53
+
54
+ class _KinetoEvent:
55
+ def name(self) -> str: ...
56
+ def device_index(self) -> int: ...
57
+ def device_resource_id(self) -> int: ...
58
+ def start_ns(self) -> int: ...
59
+ def end_ns(self) -> int: ...
60
+ def duration_ns(self) -> int: ...
61
+ def is_async(self) -> bool: ...
62
+ def linked_correlation_id(self) -> int: ...
63
+ def shapes(self) -> list[list[int]]: ...
64
+ def dtypes(self) -> list[str]: ...
65
+ def concrete_inputs(self) -> list[Any]: ...
66
+ def kwinputs(self) -> dict[str, Any]: ...
67
+ def device_type(self) -> DeviceType: ...
68
+ def start_thread_id(self) -> int: ...
69
+ def end_thread_id(self) -> int: ...
70
+ def correlation_id(self) -> int: ...
71
+ def fwd_thread_id(self) -> int: ...
72
+ def stack(self) -> list[str]: ...
73
+ def scope(self) -> int: ...
74
+ def sequence_nr(self) -> int: ...
75
+ def flops(self) -> int: ...
76
+ def cuda_elapsed_us(self) -> int: ...
77
+ def privateuse1_elapsed_us(self) -> int: ...
78
+ def is_user_annotation(self) -> bool: ...
79
+
80
+ class _ProfilerResult:
81
+ def events(self) -> list[_KinetoEvent]: ...
82
+ def legacy_events(self) -> list[list[ProfilerEvent]]: ...
83
+ def save(self, path: str) -> None: ...
84
+ def experimental_event_tree(self) -> list[_ProfilerEvent]: ...
85
+ def trace_start_ns(self) -> int: ...
86
+
87
+ class SavedTensor: ...
88
+
89
+ def _enable_profiler(
90
+ config: ProfilerConfig,
91
+ activities: set[ProfilerActivity],
92
+ ) -> None: ...
93
+ def _prepare_profiler(
94
+ config: ProfilerConfig,
95
+ activities: set[ProfilerActivity],
96
+ ) -> None: ...
97
+ def _toggle_collection_dynamic(
98
+ enable: bool,
99
+ activities: set[ProfilerActivity],
100
+ ) -> None: ...
101
+ def _disable_profiler() -> _ProfilerResult: ...
102
+ def _profiler_enabled() -> bool: ...
103
+ def _add_metadata_json(key: str, value: str) -> None: ...
104
+ def _kineto_step() -> None: ...
105
+ def _get_current_graph_task_keep_graph() -> bool: ...
106
+ def _get_sequence_nr() -> int: ...
107
+ def kineto_available() -> bool: ...
108
+ def _record_function_with_args_enter(name: str, *args) -> torch.Tensor: ...
109
+ def _record_function_with_args_exit(handle: torch.Tensor) -> None: ...
110
+ def _supported_activities() -> set[ProfilerActivity]: ...
111
+ def _enable_record_function(enable: bool) -> None: ...
112
+ def _set_empty_test_observer(is_global: bool, sampling_prob: float) -> None: ...
113
+ def _push_saved_tensors_default_hooks(
114
+ pack_hook: Callable[[torch.Tensor], Any],
115
+ unpack_hook: Callable[[Any], torch.Tensor],
116
+ ) -> None: ...
117
+ def _pop_saved_tensors_default_hooks() -> None: ...
118
+ def _unsafe_set_version_counter(t: torch.Tensor, prev_version: int) -> None: ...
119
+ def _enable_profiler_legacy(config: ProfilerConfig) -> None: ...
120
+ def _disable_profiler_legacy() -> list[list[ProfilerEvent]]: ...
121
+ def _profiler_type() -> ActiveProfilerType: ...
122
+ def _saved_tensors_hooks_enable() -> None: ...
123
+ def _saved_tensors_hooks_disable(message: str) -> None: ...
124
+ def _saved_tensors_hooks_get_disabled_error_message() -> str | None: ...
125
+ def _saved_tensors_hooks_set_tracing(is_tracing: bool) -> bool: ...
126
+
127
+ class CreationMeta(Enum):
128
+ DEFAULT = ...
129
+ IN_CUSTOM_FUNCTION = ...
130
+ MULTI_OUTPUT_NODE = ...
131
+ NO_GRAD_MODE = ...
132
+ INFERENCE_MODE = ...
133
+
134
+ def _set_creation_meta(t: torch.Tensor, creation_meta: CreationMeta) -> None: ...
135
+ def _get_creation_meta(t: torch.Tensor) -> CreationMeta: ...
env/Lib/site-packages/torch/_C/_cpu.pyi ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.types import _bool, _int
2
+
3
+ # Defined in torch/csrc/cpu/Module.cpp
4
+
5
+ def _is_avx2_supported() -> _bool: ...
6
+ def _is_avx512_supported() -> _bool: ...
7
+ def _is_avx512_vnni_supported() -> _bool: ...
8
+ def _is_avx512_bf16_supported() -> _bool: ...
9
+ def _is_amx_tile_supported() -> _bool: ...
10
+ def _init_amx() -> _bool: ...
11
+ def _L1d_cache_size() -> _int: ...
12
+ def _L2_cache_size() -> _int: ...
env/Lib/site-packages/torch/_C/_cudnn.pyi ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+
3
+ from torch.types import _bool, Tuple
4
+
5
+ # Defined in torch/csrc/cuda/shared/cudnn.cpp
6
+ is_cuda: _bool
7
+
8
+ def getRuntimeVersion() -> Tuple[int, int, int]: ...
9
+ def getCompileVersion() -> Tuple[int, int, int]: ...
10
+ def getVersionInt() -> int: ...
11
+
12
+ class RNNMode(int, Enum):
13
+ value: int
14
+ rnn_relu = ...
15
+ rnn_tanh = ...
16
+ lstm = ...
17
+ gru = ...
env/Lib/site-packages/torch/_C/_cusparselt.pyi ADDED
@@ -0,0 +1 @@
 
 
1
+ def getVersionInt() -> int: ...
env/Lib/site-packages/torch/_C/_distributed_autograd.pyi ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from typing import Any
3
+
4
+ import torch
5
+
6
+ # This module is defined in torch/csrc/distributed/autograd/init.cpp
7
+
8
+ class DistAutogradContext:
9
+ def _context_id(self) -> int: ...
10
+ def _recv_functions(self) -> dict[int, Any]: ...
11
+ def _send_functions(self) -> dict[int, Any]: ...
12
+ def _known_worker_ids(self) -> set[int]: ...
13
+
14
+ def _new_context() -> DistAutogradContext: ...
15
+ def _release_context(context_id: int) -> None: ...
16
+ def _get_max_id() -> int: ...
17
+ def _is_valid_context(worker_id: int) -> bool: ...
18
+ def _retrieve_context(context_id: int) -> DistAutogradContext: ...
19
+ def _current_context() -> DistAutogradContext: ...
20
+ def _init(worker_id: int) -> None: ...
21
+ def _get_debug_info() -> dict[str, str]: ...
22
+ def backward(
23
+ context_id: int,
24
+ roots: list[torch.Tensor],
25
+ retain_graph=False,
26
+ ) -> None: ...
27
+ def get_gradients(context_id: int) -> dict[torch.Tensor, torch.Tensor]: ...
env/Lib/site-packages/torch/_C/_distributed_c10d.pyi ADDED
@@ -0,0 +1,699 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # mypy: disable-error-code="type-arg"
3
+ from datetime import timedelta
4
+ from enum import Enum
5
+ from typing import Any, Optional, overload
6
+
7
+ import torch
8
+ from torch import Tensor
9
+ from torch._C import ScriptObject
10
+ from torch.futures import Future
11
+
12
+ # This module is defined in torch/csrc/distributed/c10d/init.cpp
13
+
14
+ _DEFAULT_FIRST_BUCKET_BYTES: int
15
+ _DEFAULT_NO_TIMEOUT: timedelta
16
+ _DEFAULT_PG_TIMEOUT: timedelta
17
+ _DEFAULT_PG_NCCL_TIMEOUT: timedelta
18
+
19
+ class BuiltinCommHookType(Enum):
20
+ ALLREDUCE = ...
21
+ FP16_COMPRESS = ...
22
+
23
+ def _register_comm_hook(reducer: Reducer, state: Any, comm_hook: Any): ...
24
+ def _register_builtin_comm_hook(
25
+ reducer: Reducer,
26
+ comm_hook_type: BuiltinCommHookType,
27
+ ): ...
28
+ def _set_global_rank(rank: int) -> None: ...
29
+ def _hash_tensors(tensors: list[Tensor]) -> int: ...
30
+
31
+ class GradBucket:
32
+ def index(self) -> int: ...
33
+ def buffer(self) -> Tensor: ...
34
+ def gradients(self) -> list[Tensor]: ...
35
+ def is_last(self) -> bool: ...
36
+ def set_buffer(self, tensor: Tensor) -> None: ...
37
+ def parameters(self) -> list[Tensor]: ...
38
+
39
+ class Reducer:
40
+ def __init__(
41
+ self,
42
+ params: list[Tensor],
43
+ bucket_indices: list[list[int]],
44
+ per_bucket_size_limits: list[int],
45
+ process_group: ProcessGroup,
46
+ expect_sparse_gradients: list[bool] = ...,
47
+ bucket_bytes_cap: int = ..., # kDefaultBucketBytesCap in reducer.hpp
48
+ find_unused_parameters: bool = ...,
49
+ gradient_as_bucket_view: bool = ...,
50
+ param_to_name_mapping: dict[int, str] = ...,
51
+ first_bucket_types_cap: int = ..., # kDefaultFirstBucketBytes in reducer.hpp
52
+ ) -> None: ...
53
+ def prepare_for_forward(self) -> None: ...
54
+ def prepare_for_backward(self, output: list[Tensor]) -> None: ...
55
+ def get_backward_stats(self) -> list[int]: ...
56
+ def _install_post_backward_futures(self, futures: list[Future]) -> None: ...
57
+ def _rebuild_buckets(self) -> bool: ...
58
+ def _get_zeros_like_grad_buckets(self) -> list[GradBucket]: ...
59
+ def _push_all_rebuilt_params(self) -> None: ...
60
+ def _set_forward_pass_work_handle(
61
+ self,
62
+ work: Work,
63
+ use_static_world_size: bool,
64
+ ): ...
65
+ def _get_local_used_map(self) -> Tensor: ...
66
+ def _set_ddp_runtime_logging_sample_rate(self, sample_rate: int) -> None: ...
67
+ def _set_static_graph(self) -> None: ...
68
+ def _run_comm_hook(self, bucket: GradBucket) -> Future: ...
69
+ def set_logger(self, logger: Logger) -> None: ...
70
+ def _remove_autograd_hooks(self) -> None: ...
71
+ def _check_reducer_finalized(self) -> None: ...
72
+ def _set_sparse_metadata(self, global_unique_ids: dict[str, Tensor]) -> None: ...
73
+ def _reset_state(self) -> None: ...
74
+ def _update_process_group(self, new_process_group: ProcessGroup) -> None: ...
75
+
76
+ class DDPLoggingData:
77
+ strs_map: dict[str, str]
78
+ ints_map: dict[str, int]
79
+
80
+ class Logger:
81
+ def __init__(self, reducer: Reducer) -> None: ...
82
+ def set_construction_data_and_log(
83
+ self,
84
+ module_name: str,
85
+ device_ids: list[int],
86
+ output_device: int,
87
+ broadcast_buffers: bool,
88
+ has_sync_bn: bool,
89
+ static_graph: bool,
90
+ ): ...
91
+ def set_runtime_stats_and_log(self) -> None: ...
92
+ def set_error_and_log(self, error: str) -> None: ...
93
+ def _get_ddp_logging_data(self) -> DDPLoggingData: ...
94
+ def _set_comm_hook_name(self, comm_hook: str) -> None: ...
95
+ def _set_uneven_input_join(self) -> None: ...
96
+ def _set_static_graph(self) -> None: ...
97
+
98
+ class _WorkerServer:
99
+ def __init__(self, socket_path: str) -> None: ...
100
+ def shutdown(self) -> None: ...
101
+
102
+ def get_debug_level(): ...
103
+ def set_debug_level(): ...
104
+ def set_debug_level_from_env(): ...
105
+
106
+ class DebugLevel(Enum):
107
+ OFF = ...
108
+ INFO = ...
109
+ DETAIL = ...
110
+
111
+ class ReduceOp:
112
+ def __init__(self, op: RedOpType) -> None: ...
113
+
114
+ SUM: RedOpType = ...
115
+ AVG: RedOpType = ...
116
+ PRODUCT: RedOpType = ...
117
+ MIN: RedOpType = ...
118
+ MAX: RedOpType = ...
119
+ BAND: RedOpType = ...
120
+ BOR: RedOpType = ...
121
+ BXOR: RedOpType = ...
122
+ PREMUL_SUM: RedOpType = ...
123
+ UNUSED: RedOpType = ...
124
+
125
+ class RedOpType(Enum): ...
126
+
127
+ class BroadcastOptions:
128
+ rootRank: int
129
+ rootTensor: int
130
+ timeout: timedelta
131
+ asyncOp: bool
132
+
133
+ class AllreduceOptions:
134
+ reduceOp: ReduceOp
135
+ timeout: timedelta
136
+
137
+ class AllreduceCoalescedOptions(AllreduceOptions): ...
138
+
139
+ class ReduceOptions:
140
+ reduceOp: ReduceOp
141
+ rootRank: int
142
+ rootTensor: int
143
+ timeout: timedelta
144
+
145
+ class AllgatherOptions:
146
+ timeout: timedelta
147
+ asyncOp: bool
148
+
149
+ class GatherOptions:
150
+ rootRank: int
151
+ timeout: timedelta
152
+
153
+ class ScatterOptions:
154
+ rootRank: int
155
+ timeout: timedelta
156
+ asyncOp: bool
157
+
158
+ class ReduceScatterOptions:
159
+ reduceOp: ReduceOp
160
+ timeout: timedelta
161
+ asyncOp: bool
162
+
163
+ class BarrierOptions:
164
+ device_ids: list[int]
165
+ device: torch.device
166
+ timeout: timedelta
167
+
168
+ class AllToAllOptions:
169
+ timeout: timedelta
170
+
171
+ class Store:
172
+ def set(self, key: str, value: str): ...
173
+ def get(self, key: str) -> bytes: ...
174
+ def add(self, key: str, value: int) -> int: ...
175
+ def compare_set(
176
+ self,
177
+ key: str,
178
+ expected_value: str,
179
+ desired_value: str,
180
+ ) -> bytes: ...
181
+ def delete_key(self, key: str) -> bool: ...
182
+ def num_keys(self) -> int: ...
183
+ def set_timeout(self, timeout: timedelta): ...
184
+ @overload
185
+ def wait(self, keys: list[str]): ...
186
+ @overload
187
+ def wait(self, keys: list[str], timeout: timedelta): ...
188
+
189
+ class FileStore(Store):
190
+ def __init__(self, path: str, numWorkers: int = ...) -> None: ...
191
+
192
+ class HashStore(Store):
193
+ def __init__(self) -> None: ...
194
+
195
+ class TCPStore(Store):
196
+ def __init__(
197
+ self,
198
+ host_name: str,
199
+ port: int,
200
+ world_size: int | None = ...,
201
+ is_master: bool = ...,
202
+ timeout: timedelta = ...,
203
+ wait_for_workers: bool = ...,
204
+ multi_tenant: bool = ...,
205
+ master_listen_fd: int | None = ...,
206
+ use_libuv: bool | None = ...,
207
+ ) -> None: ...
208
+ @property
209
+ def host(self) -> str: ...
210
+ @property
211
+ def port(self) -> int: ...
212
+
213
+ class PrefixStore(Store):
214
+ def __init__(self, prefix: str, store: Store) -> None: ...
215
+ @property
216
+ def underlying_store(self) -> Store: ...
217
+
218
+ class _ControlCollectives:
219
+ def barrier(self, key: str, timeout: timedelta, blocking: bool) -> None: ...
220
+ def broadcast_send(self, key: str, data: str, timeout: timedelta) -> None: ...
221
+ def broadcast_recv(self, key: str, timeout: timedelta) -> str: ...
222
+ def gather_send(self, key: str, data: str, timeout: timedelta) -> None: ...
223
+ def gather_recv(self, key: str, timeout: timedelta) -> str: ...
224
+ def scatter_send(self, key: str, data: str, timeout: timedelta) -> None: ...
225
+ def scatter_recv(self, key: str, timeout: timedelta) -> str: ...
226
+ def all_gather(self, key: str, data: str, timeout: timedelta) -> str: ...
227
+ def all_sum(self, key: str, data: int, timeout: timedelta) -> int: ...
228
+
229
+ class _StoreCollectives(_ControlCollectives):
230
+ def __init__(self, store: Store, rank: int, world_size: int) -> None: ...
231
+
232
+ class _DistributedBackendOptions:
233
+ def __init__(self) -> None: ...
234
+ @property
235
+ def store(self) -> Store: ...
236
+ @store.setter
237
+ def store(self, store: Store) -> None: ...
238
+ @property
239
+ def group_rank(self) -> int: ...
240
+ @group_rank.setter
241
+ def group_rank(self, rank: int) -> None: ...
242
+ @property
243
+ def group_size(self) -> int: ...
244
+ @group_size.setter
245
+ def group_size(self, size: int) -> None: ...
246
+ @property
247
+ def timeout(self) -> timedelta: ...
248
+ @timeout.setter
249
+ def timeout(self, timeout: timedelta) -> None: ...
250
+ @property
251
+ def group_id(self) -> str: ...
252
+ @group_id.setter
253
+ def group_id(self, group_id: str) -> None: ...
254
+ @property
255
+ def global_ranks_in_group(self) -> list[int]: ...
256
+ @global_ranks_in_group.setter
257
+ def global_ranks_in_group(self, ranks: list[int]) -> None: ...
258
+
259
+ class Work:
260
+ def is_completed(self) -> bool: ...
261
+ def is_success(self) -> bool: ...
262
+ def exception(self) -> Any: ...
263
+ def wait(self, timeout: timedelta = ...) -> bool: ...
264
+ def get_future(self) -> Future: ...
265
+ def source_rank(self) -> int: ...
266
+ def _source_rank(self) -> int: ...
267
+ def result(self) -> list[Tensor]: ...
268
+ def synchronize(self): ...
269
+ def boxed(self) -> ScriptObject: ...
270
+ @staticmethod
271
+ def unbox(obj: ScriptObject) -> Work: ...
272
+
273
+ class Backend:
274
+ class Options:
275
+ def __init__(self, backend: str, timeout: timedelta = ...) -> None: ...
276
+ @property
277
+ def backend(self) -> str: ...
278
+ @property
279
+ def _timeout(self) -> timedelta: ...
280
+ @_timeout.setter
281
+ def _timeout(self, val: timedelta) -> None: ...
282
+
283
+ def __init__(
284
+ self,
285
+ rank: int,
286
+ size: int,
287
+ ) -> None: ...
288
+ @property
289
+ def supports_splitting(self) -> bool: ...
290
+ @property
291
+ def options(self) -> Options: ...
292
+ def rank(self) -> int: ...
293
+ def size(self) -> int: ...
294
+ def eager_connect_single_device(self, device: torch.device | None) -> None: ...
295
+ def _set_sequence_number_for_group(self) -> None: ...
296
+ def _set_default_timeout(self, timeout: timedelta) -> None: ...
297
+
298
+ class ProcessGroup:
299
+ class Options:
300
+ def __init__(self, backend: str, timeout: timedelta = ...) -> None: ...
301
+ @property
302
+ def backend(self) -> str: ...
303
+ @property
304
+ def _timeout(self) -> timedelta: ...
305
+ @_timeout.setter
306
+ def _timeout(self, val: timedelta) -> None: ...
307
+
308
+ class BackendType(Enum):
309
+ UNDEFINED = ...
310
+ GLOO = ...
311
+ NCCL = ...
312
+ UCC = ...
313
+ MPI = ...
314
+ CUSTOM = ...
315
+
316
+ def __init__(
317
+ self,
318
+ store: Store,
319
+ rank: int,
320
+ size: int,
321
+ options: Options,
322
+ ) -> None: ...
323
+ def rank(self) -> int: ...
324
+ def size(self) -> int: ...
325
+ @overload
326
+ def broadcast(
327
+ self,
328
+ tensors: list[Tensor],
329
+ opts=...,
330
+ ) -> Work: ...
331
+ @overload
332
+ def broadcast(
333
+ self,
334
+ tensor: Tensor,
335
+ root: int,
336
+ ) -> Work: ...
337
+ @overload
338
+ def allreduce(
339
+ self,
340
+ tensors: list[Tensor],
341
+ opts: AllreduceOptions = ...,
342
+ ) -> Work: ...
343
+ @overload
344
+ def allreduce(
345
+ self,
346
+ tensors: list[Tensor],
347
+ op=...,
348
+ ) -> Work: ...
349
+ @overload
350
+ def allreduce(
351
+ self,
352
+ tensor: Tensor,
353
+ op=...,
354
+ ) -> Work: ...
355
+ def allreduce_coalesced(
356
+ self,
357
+ tensors: list[Tensor],
358
+ opts=...,
359
+ ) -> Work: ...
360
+ def reduce_scatter_tensor_coalesced(
361
+ self,
362
+ outputTensors: list[Tensor],
363
+ inputTensors: list[Tensor],
364
+ opts: ReduceScatterOptions | None = None,
365
+ ) -> Work: ...
366
+ @overload
367
+ def reduce(
368
+ self,
369
+ tensors: list[Tensor],
370
+ opts=...,
371
+ ) -> Work: ...
372
+ @overload
373
+ def reduce(
374
+ self,
375
+ tensor: Tensor,
376
+ root: int,
377
+ op=...,
378
+ ) -> Work: ...
379
+ @overload
380
+ def allgather(
381
+ self,
382
+ output_tensors: list[list[Tensor]],
383
+ input_tensors: list[Tensor],
384
+ opts=...,
385
+ ) -> Work: ...
386
+ @overload
387
+ def allgather(
388
+ self,
389
+ output_tensors: list[Tensor],
390
+ input_tensor: Tensor,
391
+ ) -> Work: ...
392
+ def _allgather_base(
393
+ self,
394
+ output: Tensor,
395
+ input: Tensor,
396
+ opts=...,
397
+ ) -> Work: ...
398
+ def allgather_coalesced(
399
+ self,
400
+ output_lists: list[list[Tensor]],
401
+ input_list: list[Tensor],
402
+ opts=...,
403
+ ) -> Work: ...
404
+ def allgather_into_tensor_coalesced(
405
+ self,
406
+ output_lists: list[Tensor],
407
+ input_list: list[Tensor],
408
+ opts=...,
409
+ ) -> Work: ...
410
+ @overload
411
+ def gather(
412
+ self,
413
+ output_tensors: list[list[Tensor]],
414
+ input_tensors: list[Tensor],
415
+ opts=...,
416
+ ) -> Work: ...
417
+ @overload
418
+ def gather(
419
+ self,
420
+ output_tensors: list[Tensor],
421
+ input_tensor: Tensor,
422
+ root: int,
423
+ ) -> Work: ...
424
+ @overload
425
+ def scatter(
426
+ self,
427
+ output_tensors: list[Tensor],
428
+ input_tensors: list[list[Tensor]],
429
+ opts=...,
430
+ ) -> Work: ...
431
+ @overload
432
+ def scatter(
433
+ self,
434
+ output_tensor: Tensor,
435
+ input_tensors: list[Tensor],
436
+ root: int,
437
+ ) -> Work: ...
438
+ @overload
439
+ def reduce_scatter(
440
+ self,
441
+ output_tensors: list[Tensor],
442
+ input_tensors: list[list[Tensor]],
443
+ opts=...,
444
+ ) -> Work: ...
445
+ @overload
446
+ def reduce_scatter(
447
+ self,
448
+ output_tensors: Tensor,
449
+ input_tensor: list[Tensor],
450
+ ) -> Work: ...
451
+ def _reduce_scatter_base(
452
+ self,
453
+ outputTensor: Tensor,
454
+ inputTensor: Tensor,
455
+ opts: ReduceScatterOptions | None,
456
+ ) -> Work: ...
457
+ @overload
458
+ def alltoall_base(
459
+ self,
460
+ output_tensor: Tensor,
461
+ input_tensor: Tensor,
462
+ output_split_sizes: list[int],
463
+ input_split_sizes: list[int],
464
+ opts=...,
465
+ ) -> Work: ...
466
+ @overload
467
+ def alltoall_base(
468
+ self,
469
+ output: Tensor,
470
+ input: Tensor,
471
+ output_split_sizes: list[int],
472
+ input_split_sizes: list[int],
473
+ ) -> Work: ...
474
+ @overload
475
+ def alltoall(
476
+ self,
477
+ output_tensor: list[Tensor],
478
+ input_tensor: list[Tensor],
479
+ opts=...,
480
+ ) -> Work: ...
481
+ @overload
482
+ def alltoall(
483
+ self,
484
+ output: list[Tensor],
485
+ input: list[Tensor],
486
+ ) -> Work: ...
487
+ def send(
488
+ self,
489
+ tensors: list[Tensor],
490
+ dstRank: int,
491
+ tag: int,
492
+ ) -> Work: ...
493
+ def recv(
494
+ self,
495
+ tensors: list[Tensor],
496
+ srcRank: int,
497
+ tag: int,
498
+ ) -> Work: ...
499
+ def recv_anysource(self, tensors: list[Tensor], tag: int) -> Work: ...
500
+ def barrier(self, opts=...) -> Work: ...
501
+ def boxed(self) -> ScriptObject: ...
502
+ @staticmethod
503
+ def unbox(obj: ScriptObject) -> ProcessGroup: ...
504
+ def _start_coalescing(self, device: torch.device) -> None: ...
505
+ def _end_coalescing(self, device: torch.device) -> Work: ...
506
+ def _get_backend_name(self) -> str: ...
507
+ def _backend_id(self, backend_type: BackendType) -> int: ...
508
+ @property
509
+ def _device_types(self) -> list[torch.device]: ...
510
+ def _get_backend(self, device: torch.device) -> Backend: ...
511
+ def _register_backend(
512
+ self,
513
+ device: torch.device,
514
+ backend_type: BackendType,
515
+ backend: Backend | None,
516
+ ) -> None: ...
517
+ def _set_group_name(self, name: str) -> None: ...
518
+ def _set_group_desc(self, desc: str) -> None: ...
519
+ def name(self) -> str: ...
520
+ def _has_hooks(self) -> bool: ...
521
+ def _wait_for_pending_works(self) -> None: ...
522
+ def _set_sequence_number_for_group(self) -> None: ...
523
+ @property
524
+ def bound_device_id(self) -> torch.device | None: ...
525
+ @bound_device_id.setter
526
+ def bound_device_id(self, device: torch.device | None) -> None: ...
527
+ @property
528
+ def group_name(self) -> str: ...
529
+ @property
530
+ def group_desc(self) -> str: ...
531
+
532
+ class ProcessGroupGloo(Backend):
533
+ class Device: ...
534
+
535
+ class Options(ProcessGroup.Options):
536
+ devices: list[ProcessGroupGloo.Device]
537
+ threads: int
538
+
539
+ def __init__(self): ...
540
+
541
+ def __init__(
542
+ self,
543
+ store: Store,
544
+ rank: int,
545
+ size: int,
546
+ timeout: timedelta,
547
+ ) -> None: ...
548
+ @staticmethod
549
+ def create_device(hostname="", interface="") -> Device: ...
550
+ @staticmethod
551
+ def create_default_device() -> Device: ...
552
+ def _set_default_timeout(self, timeout) -> None: ...
553
+
554
+ class _ProcessGroupWrapper(Backend):
555
+ def __init__(self, pg: Backend, gloo_pg: ProcessGroupGloo) -> None: ...
556
+ wrapped_pg: Backend
557
+
558
+ class ProcessGroupNCCL(Backend):
559
+ class NCCLConfig:
560
+ blocking: int
561
+ cga_cluster_size: int
562
+ min_ctas: int
563
+ max_ctas: int
564
+
565
+ class Options(ProcessGroup.Options):
566
+ config: ProcessGroupNCCL.NCCLConfig
567
+ is_high_priority_stream: bool
568
+ split_from: ProcessGroupNCCL
569
+ split_color: int
570
+ global_ranks_in_group: list[int]
571
+ group_name: str
572
+
573
+ def __init__(self, is_high_priority_stream: bool = False): ...
574
+
575
+ def __init__(
576
+ self,
577
+ store: Store,
578
+ rank: int,
579
+ size: int,
580
+ options: Options,
581
+ ) -> None: ...
582
+ def _group_start(self) -> None: ...
583
+ def _group_end(self) -> None: ...
584
+ def _set_default_timeout(self, timeout) -> None: ...
585
+ def _shutdown(self) -> None: ...
586
+ def perform_nocolor_split(self, device: torch.device) -> None: ...
587
+ def comm_split_count(self) -> int: ...
588
+ def _add_ephemeral_timeout(self, timeout: timedelta) -> None: ...
589
+ @property
590
+ def uid(self) -> int: ...
591
+ @property
592
+ def options(self) -> Options: ... # type: ignore[override]
593
+
594
+ class ProcessGroupUCC(Backend):
595
+ def __init__(
596
+ self,
597
+ store: Store,
598
+ rank: int,
599
+ size: int,
600
+ timeout: timedelta,
601
+ ) -> None: ...
602
+
603
+ class ProcessGroupMPI(Backend):
604
+ def __init__(
605
+ self,
606
+ rank: int,
607
+ size: int,
608
+ pgComm: int,
609
+ ) -> None: ...
610
+ @staticmethod
611
+ def create(ranks: list[int]) -> ProcessGroupMPI: ...
612
+
613
+ def _compute_bucket_assignment_by_size(
614
+ tensors: list[Tensor],
615
+ bucket_size_limits: list[int],
616
+ expect_sparse_gradient: list[bool] = ...,
617
+ tensor_indices: list[int] = ...,
618
+ ) -> tuple[list[list[int]], list[int]]: ...
619
+ def _broadcast_coalesced(
620
+ process_group: ProcessGroup,
621
+ tensors: list[Tensor],
622
+ buffer_size: int,
623
+ src: int,
624
+ ): ...
625
+ def _test_python_store(store: Store): ...
626
+ def _verify_params_across_processes(
627
+ process_group: ProcessGroup,
628
+ params: list[Tensor],
629
+ logger: Logger | None,
630
+ ): ...
631
+ def _make_nccl_premul_sum(factor: float | list[Tensor]) -> ReduceOp: ...
632
+ def _register_process_group(
633
+ group_name: str,
634
+ process_group: ProcessGroup,
635
+ ) -> None: ...
636
+ def _resolve_process_group(group_name: str) -> ProcessGroup: ...
637
+ def _register_work(tensor: torch.Tensor, work: Work) -> ProcessGroup: ...
638
+ def _unregister_all_process_groups() -> None: ...
639
+ def _unregister_process_group(group_name: str) -> None: ...
640
+
641
+ class _SymmetricMemory:
642
+ @staticmethod
643
+ def set_group_info(
644
+ group_name: str,
645
+ rank: int,
646
+ world_size: int,
647
+ store: Store,
648
+ ) -> None: ...
649
+ @staticmethod
650
+ def empty_strided_p2p(
651
+ size: torch.types._size,
652
+ stride: torch.types._size,
653
+ dtype: torch.dtype,
654
+ device: torch.device,
655
+ group_name: str,
656
+ ) -> torch.Tensor: ...
657
+ @property
658
+ def rank(self) -> int: ...
659
+ @property
660
+ def world_size(self) -> int: ...
661
+ @staticmethod
662
+ def rendezvous(tensor: torch.Tensor) -> _SymmetricMemory: ...
663
+ def get_buffer(
664
+ self,
665
+ rank: int,
666
+ sizes: torch.types._size,
667
+ dtype: torch.dtype,
668
+ storage_offset: int | None = 0,
669
+ ) -> torch.Tensor: ...
670
+ def barrier(self, channel: int = 0) -> None: ...
671
+ def put_signal(self, dst_rank: int, channel: int = 0) -> None: ...
672
+ def wait_signal(self, src_rank: int, channel: int = 0) -> None: ...
673
+
674
+ class ProcessGroupCudaP2P(Backend):
675
+ class Options:
676
+ nccl_options: Optional[ProcessGroupNCCL.Options]
677
+ buffer_size: Optional[int]
678
+
679
+ def __init__(self) -> None: ...
680
+
681
+ def __init__(
682
+ self,
683
+ store: Store,
684
+ rank: int,
685
+ size: int,
686
+ options: ProcessGroupCudaP2P.Options,
687
+ ) -> None: ...
688
+ def is_p2p_available(self) -> bool: ...
689
+ def get_buffer_size(self) -> int: ...
690
+ def stream(self) -> torch.cuda.Stream: ...
691
+ def intra_node_barrier(self) -> Work: ...
692
+ def get_p2p_buffer(
693
+ self,
694
+ rank: int,
695
+ sizes: torch.Size,
696
+ dtype: torch.dtype,
697
+ storage_offset: Optional[int] = 0,
698
+ ) -> torch.Tensor: ...
699
+ def _shutdown(self) -> None: ...
env/Lib/site-packages/torch/_C/_distributed_rpc.pyi ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # mypy: disable-error-code="type-arg"
3
+ from datetime import timedelta
4
+ from typing import Any, Generic, overload, TypeVar
5
+
6
+ import torch
7
+ from torch._C import Future
8
+ from torch._C._autograd import ProfilerEvent
9
+ from torch._C._distributed_c10d import Store
10
+ from torch._C._profiler import ProfilerConfig
11
+
12
+ # This module is defined in torch/csrc/distributed/rpc/init.cpp
13
+
14
+ _DEFAULT_INIT_METHOD: str
15
+ _DEFAULT_NUM_WORKER_THREADS: int
16
+ _UNSET_RPC_TIMEOUT: float
17
+ _DEFAULT_RPC_TIMEOUT_SEC: float
18
+
19
+ _T = TypeVar("_T")
20
+
21
+ class RpcBackendOptions:
22
+ rpc_timeout: float
23
+ init_method: str
24
+ def __init__(
25
+ self,
26
+ rpc_timeout: float = ...,
27
+ init_method: str = ...,
28
+ ) -> None: ...
29
+
30
+ class WorkerInfo:
31
+ def __init__(self, name: str, worker_id: int) -> None: ...
32
+ @property
33
+ def name(self) -> str: ...
34
+ @property
35
+ def id(self) -> int: ...
36
+ def __eq__(self, other: object) -> bool: ...
37
+
38
+ class RpcAgent:
39
+ def join(self, shutdown: bool = False, timeout: float = 0): ...
40
+ def sync(self): ...
41
+ def shutdown(self): ...
42
+ @overload
43
+ def get_worker_info(self) -> WorkerInfo: ...
44
+ @overload
45
+ def get_worker_info(self, workerName: str) -> WorkerInfo: ...
46
+ def get_worker_infos(self) -> list[WorkerInfo]: ...
47
+ def _get_device_map(self, dst: WorkerInfo) -> dict[torch.device, torch.device]: ...
48
+ def get_debug_info(self) -> dict[str, str]: ...
49
+ def get_metrics(self) -> dict[str, str]: ...
50
+
51
+ class PyRRef(Generic[_T]):
52
+ def __init__(self, value: _T, type_hint: Any = None) -> None: ...
53
+ def is_owner(self) -> bool: ...
54
+ def confirmed_by_owner(self) -> bool: ...
55
+ def owner(self) -> WorkerInfo: ...
56
+ def owner_name(self) -> str: ...
57
+ def to_here(self, timeout: float = ...) -> _T: ...
58
+ def local_value(self) -> Any: ...
59
+ def rpc_sync(self, timeout: float = ...) -> Any: ...
60
+ def rpc_async(self, timeout: float = ...) -> Any: ...
61
+ def remote(self, timeout: float = ...) -> Any: ...
62
+ def _serialize(self) -> tuple: ...
63
+ @staticmethod
64
+ def _deserialize(tp: tuple) -> PyRRef: ...
65
+ def _get_type(self) -> type[_T]: ...
66
+ def _get_future(self) -> Future[_T]: ...
67
+ def _get_profiling_future(self) -> Future[_T]: ...
68
+ def _set_profiling_future(self, profilingFuture: Future[_T]): ...
69
+
70
+ class _TensorPipeRpcBackendOptionsBase(RpcBackendOptions):
71
+ num_worker_threads: int
72
+ device_maps: dict[str, dict[torch.device, torch.device]]
73
+ devices: list[torch.device]
74
+ def __init__(
75
+ self,
76
+ num_worker_threads: int,
77
+ _transports: list | None,
78
+ _channels: list | None,
79
+ rpc_timeout: float = ...,
80
+ init_method: str = ...,
81
+ device_maps: dict[str, dict[torch.device, torch.device]] = {}, # noqa: B006
82
+ devices: list[torch.device] = [], # noqa: B006
83
+ ) -> None: ...
84
+ def _set_device_map(
85
+ self,
86
+ to: str,
87
+ device_map: dict[torch.device, torch.device],
88
+ ): ...
89
+
90
+ class TensorPipeAgent(RpcAgent):
91
+ def __init__(
92
+ self,
93
+ store: Store,
94
+ name: str,
95
+ worker_id: int,
96
+ world_size: int | None,
97
+ opts: _TensorPipeRpcBackendOptionsBase,
98
+ reverse_device_maps: dict[str, dict[torch.device, torch.device]],
99
+ devices: list[torch.device],
100
+ ) -> None: ...
101
+ def join(self, shutdown: bool = False, timeout: float = 0): ...
102
+ def shutdown(self): ...
103
+ @overload
104
+ def get_worker_info(self) -> WorkerInfo: ...
105
+ @overload
106
+ def get_worker_info(self, workerName: str) -> WorkerInfo: ...
107
+ @overload
108
+ def get_worker_info(self, id: int) -> WorkerInfo: ...
109
+ def get_worker_infos(self) -> list[WorkerInfo]: ...
110
+ def _get_device_map(self, dst: WorkerInfo) -> dict[torch.device, torch.device]: ...
111
+ def _update_group_membership(
112
+ self,
113
+ worker_info: WorkerInfo,
114
+ my_devices: list[torch.device],
115
+ reverse_device_map: dict[str, dict[torch.device, torch.device]],
116
+ is_join: bool,
117
+ ): ...
118
+ def _get_backend_options(self) -> _TensorPipeRpcBackendOptionsBase: ...
119
+ @property
120
+ def is_static_group(self) -> bool: ...
121
+ @property
122
+ def store(self) -> Store: ...
123
+
124
+ def _is_current_rpc_agent_set() -> bool: ...
125
+ def _get_current_rpc_agent() -> RpcAgent: ...
126
+ def _set_and_start_rpc_agent(agent: RpcAgent): ...
127
+ def _reset_current_rpc_agent(): ...
128
+ def _delete_all_user_and_unforked_owner_rrefs(timeout: timedelta = ...): ...
129
+ def _destroy_rref_context(ignoreRRefLeak: bool): ...
130
+ def _rref_context_get_debug_info() -> dict[str, str]: ...
131
+ def _cleanup_python_rpc_handler(): ...
132
+ def _invoke_rpc_builtin(
133
+ dst: WorkerInfo,
134
+ opName: str,
135
+ rpcTimeoutSeconds: float,
136
+ *args: Any,
137
+ **kwargs: Any,
138
+ ): ...
139
+ def _invoke_rpc_python_udf(
140
+ dst: WorkerInfo,
141
+ pickledPythonUDF: str,
142
+ tensors: list[torch.Tensor],
143
+ rpcTimeoutSeconds: float,
144
+ isAsyncExecution: bool,
145
+ ): ...
146
+ def _invoke_rpc_torchscript(
147
+ dstWorkerName: str,
148
+ qualifiedNameStr: str,
149
+ argsTuple: tuple,
150
+ kwargsDict: dict,
151
+ rpcTimeoutSeconds: float,
152
+ isAsyncExecution: bool,
153
+ ): ...
154
+ def _invoke_remote_builtin(
155
+ dst: WorkerInfo,
156
+ opName: str,
157
+ rpcTimeoutSeconds: float,
158
+ *args: Any,
159
+ **kwargs: Any,
160
+ ): ...
161
+ def _invoke_remote_python_udf(
162
+ dst: WorkerInfo,
163
+ pickledPythonUDF: str,
164
+ tensors: list[torch.Tensor],
165
+ rpcTimeoutSeconds: float,
166
+ isAsyncExecution: bool,
167
+ ): ...
168
+ def _invoke_remote_torchscript(
169
+ dstWorkerName: WorkerInfo,
170
+ qualifiedNameStr: str,
171
+ rpcTimeoutSeconds: float,
172
+ isAsyncExecution: bool,
173
+ *args: Any,
174
+ **kwargs: Any,
175
+ ): ...
176
+ def get_rpc_timeout() -> float: ...
177
+ def enable_gil_profiling(flag: bool): ...
178
+ def _set_rpc_timeout(rpcTimeoutSeconds: float): ...
179
+
180
+ class RemoteProfilerManager:
181
+ @staticmethod
182
+ def set_current_profiling_key(key: str): ...
183
+
184
+ def _enable_server_process_global_profiler(new_config: ProfilerConfig): ...
185
+ def _disable_server_process_global_profiler() -> list[list[list[ProfilerEvent]]]: ...
186
+ def _set_profiler_node_id(default_node_id: int): ...
187
+ def _enable_jit_rref_pickle(): ...
188
+ def _disable_jit_rref_pickle(): ...
env/Lib/site-packages/torch/_C/_distributed_rpc_testing.pyi ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch._C._distributed_c10d import Store
3
+ from torch._C._distributed_rpc import _TensorPipeRpcBackendOptionsBase, TensorPipeAgent
4
+
5
+ # This module is defined in torch/csrc/distributed/rpc/testing/init.cpp
6
+
7
+ class FaultyTensorPipeRpcBackendOptions(_TensorPipeRpcBackendOptionsBase):
8
+ def __init__(
9
+ self,
10
+ num_worker_threads: int,
11
+ rpc_timeout: float,
12
+ init_method: str,
13
+ messages_to_fail: list[str],
14
+ messages_to_delay: dict[str, float],
15
+ num_fail_sends: int,
16
+ ) -> None: ...
17
+ num_send_recv_threads: int
18
+ messages_to_fail: list[str]
19
+ messages_to_delay: dict[str, float]
20
+ num_fail_sends: int
21
+
22
+ class FaultyTensorPipeAgent(TensorPipeAgent):
23
+ def __init__(
24
+ self,
25
+ store: Store,
26
+ name: str,
27
+ rank: int,
28
+ world_size: int,
29
+ options: FaultyTensorPipeRpcBackendOptions,
30
+ reverse_device_maps: dict[str, dict[torch.device, torch.device]],
31
+ devices: list[torch.device],
32
+ ) -> None: ...
env/Lib/site-packages/torch/_C/_functions.pyi ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import AnyStr
2
+
3
+ from torch import Tensor
4
+
5
+ class UndefinedGrad:
6
+ def __init__(self) -> None: ...
7
+ def __call__(self, *inputs: Tensor) -> list[Tensor]: ...
8
+
9
+ class DelayedError:
10
+ def __init__(self, msg: AnyStr, num_inputs: int) -> None: ...
11
+ def __call__(self, inputs: list[Tensor]) -> list[Tensor]: ...
env/Lib/site-packages/torch/_C/_functorch.pyi ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from enum import Enum
3
+
4
+ from torch import Tensor
5
+
6
+ # Defined in torch/csrc/functorch/init.cpp
7
+
8
+ def _set_dynamic_layer_keys_included(included: bool) -> None: ...
9
+ def get_unwrapped(tensor: Tensor) -> Tensor: ...
10
+ def is_batchedtensor(tensor: Tensor) -> bool: ...
11
+ def is_functionaltensor(tensor: Tensor) -> bool: ...
12
+ def is_functorch_wrapped_tensor(tensor: Tensor) -> bool: ...
13
+ def is_gradtrackingtensor(tensor: Tensor) -> bool: ...
14
+ def is_legacy_batchedtensor(tensor: Tensor) -> bool: ...
15
+ def maybe_get_bdim(tensor: Tensor) -> int: ...
16
+ def maybe_get_level(tensor: Tensor) -> int: ...
17
+ def maybe_current_level() -> int | None: ...
18
+ def unwrap_if_dead(tensor: Tensor) -> Tensor: ...
19
+ def _unwrap_for_grad(tensor: Tensor, level: int) -> Tensor: ...
20
+ def _wrap_for_grad(tensor: Tensor, level: int) -> Tensor: ...
21
+ def _unwrap_batched(tensor: Tensor, level: int) -> tuple[Tensor, int | None]: ...
22
+ def current_level() -> int: ...
23
+ def count_jvp_interpreters() -> int: ...
24
+ def _add_batch_dim(tensor: Tensor, bdim: int, level: int) -> Tensor: ...
25
+ def set_single_level_autograd_function_allowed(allowed: bool) -> None: ...
26
+ def get_single_level_autograd_function_allowed() -> bool: ...
27
+ def _unwrap_functional_tensor(tensor: Tensor, reapply_views: bool) -> Tensor: ...
28
+ def _wrap_functional_tensor(tensor: Tensor, level: int) -> Tensor: ...
29
+ def _vmap_increment_nesting(batch_size: int, randomness: str) -> int: ...
30
+ def _vmap_decrement_nesting() -> int: ...
31
+ def _grad_increment_nesting() -> int: ...
32
+ def _grad_decrement_nesting() -> int: ...
33
+ def _jvp_increment_nesting() -> int: ...
34
+ def _jvp_decrement_nesting() -> int: ...
35
+
36
+ # Defined in aten/src/ATen/functorch/Interpreter.h
37
+ class TransformType(Enum):
38
+ Torch: TransformType = ...
39
+ Vmap: TransformType = ...
40
+ Grad: TransformType = ...
41
+ Jvp: TransformType = ...
42
+ Functionalize: TransformType = ...
43
+
44
+ class RandomnessType(Enum):
45
+ Error: TransformType = ...
46
+ Same: TransformType = ...
47
+ Different: TransformType = ...
48
+
49
+ class CInterpreter:
50
+ def key(self) -> TransformType: ...
51
+ def level(self) -> int: ...
52
+
53
+ class CGradInterpreterPtr:
54
+ def __init__(self, interpreter: CInterpreter) -> None: ...
55
+ def lift(self, Tensor) -> Tensor: ...
56
+ def prevGradMode(self) -> bool: ...
57
+
58
+ class CJvpInterpreterPtr:
59
+ def __init__(self, interpreter: CInterpreter) -> None: ...
60
+ def lift(self, Tensor) -> Tensor: ...
61
+ def prevFwdGradMode(self) -> bool: ...
62
+
63
+ class CFunctionalizeInterpreterPtr:
64
+ def __init__(self, interpreter: CInterpreter) -> None: ...
65
+ def key(self) -> TransformType: ...
66
+ def level(self) -> int: ...
67
+ def functionalizeAddBackViews(self) -> bool: ...
68
+
69
+ class CVmapInterpreterPtr:
70
+ def __init__(self, interpreter: CInterpreter) -> None: ...
71
+ def key(self) -> TransformType: ...
72
+ def level(self) -> int: ...
73
+ def batchSize(self) -> int: ...
74
+ def randomness(self) -> RandomnessType: ...
75
+
76
+ class DynamicLayer: ...
77
+
78
+ def get_dynamic_layer_stack_depth() -> int: ...
79
+ def get_interpreter_stack() -> list[CInterpreter]: ...
80
+ def peek_interpreter_stack() -> CInterpreter: ...
81
+ def pop_dynamic_layer_stack() -> DynamicLayer: ...
82
+ def pop_dynamic_layer_stack_and_undo_to_depth(int) -> None: ...
83
+ def push_dynamic_layer_stack(dl: DynamicLayer) -> int: ...
env/Lib/site-packages/torch/_C/_instruction_counter.pyi ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Defined in torch/csrc/instruction_counter/Module.cpp
2
+
3
+ def start() -> int: ...
4
+ def end(id: int) -> int: ...
env/Lib/site-packages/torch/_C/_itt.pyi ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Defined in torch/csrc/itt.cpp
2
+ def is_available() -> None: ...
3
+ def rangePush(message: str) -> None: ...
4
+ def rangePop() -> None: ...
5
+ def mark(message: str) -> None: ...
env/Lib/site-packages/torch/_C/_lazy.pyi ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from torch import Tensor
3
+
4
+ # defined in torch/csrc/lazy/python/init.cpp
5
+ def _mark_step(device: str, devices: list[str], wait: bool): ...
6
+ def _wait_device_ops(devices: list[str]): ...
7
+ def _reset_metrics(): ...
8
+ def _counter_names() -> list[str]: ...
9
+ def _counter_value(name: str) -> int: ...
10
+ def _metrics_report() -> str: ...
11
+ def _get_graph_hash(tensors: list[Tensor]) -> str: ...
12
+ def _sync_multi(
13
+ tensors: list[Tensor],
14
+ devices: list[str],
15
+ wait: bool = True,
16
+ sync_ltc_data: bool = True,
17
+ ): ...
18
+ def _get_tensor_id(tensor: Tensor) -> int: ...
19
+ def _get_tensors_text(tensors: list[Tensor]) -> str: ...
20
+ def _get_tensors_dot(tensors: list[Tensor]) -> str: ...
21
+ def _get_tensors_backend(tensors: list[Tensor]) -> str: ...
22
+ def _get_force_fallback() -> str: ...
23
+ def _set_force_fallback(newval: str): ...
24
+ def _clear_ir_cache(): ...
25
+ def _dump_ir_cache(filename: str): ...
26
+ def _set_reuse_ir(val: bool): ...
27
+ def _get_default_device_type(): ...
env/Lib/site-packages/torch/_C/_lazy_ts_backend.pyi ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # defined in torch/csrc/lazy/python/init.cpp
3
+
4
+ from typing import Any
5
+
6
+ from torch import Tensor
7
+
8
+ def _init(): ...
9
+ def _get_tensors_ts_device_data_node(
10
+ tensors: list[Tensor],
11
+ ) -> tuple[list[int], list[Any]]: ...
12
+ def _run_cached_graph(hash_str: str, graph_inputs: list[Any]) -> list[Tensor]: ...
env/Lib/site-packages/torch/_C/_monitor.pyi ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Defined in torch/csrc/monitor/python_init.cpp
2
+
3
+ import datetime
4
+ from enum import Enum
5
+ from typing import Callable
6
+
7
+ class Aggregation(Enum):
8
+ VALUE = ...
9
+ MEAN = ...
10
+ COUNT = ...
11
+ SUM = ...
12
+ MAX = ...
13
+ MIN = ...
14
+
15
+ class Stat:
16
+ name: str
17
+ count: int
18
+ def __init__(
19
+ self,
20
+ name: str,
21
+ aggregations: list[Aggregation],
22
+ window_size: int,
23
+ max_samples: int = -1,
24
+ ) -> None: ...
25
+ def add(self, v: float) -> None: ...
26
+ def get(self) -> dict[Aggregation, float]: ...
27
+
28
+ class Event:
29
+ name: str
30
+ timestamp: datetime.datetime
31
+ data: dict[str, int | float | bool | str]
32
+ def __init__(
33
+ self,
34
+ name: str,
35
+ timestamp: datetime.datetime,
36
+ data: dict[str, int | float | bool | str],
37
+ ) -> None: ...
38
+
39
+ def log_event(e: Event) -> None: ...
40
+
41
+ class EventHandlerHandle: ...
42
+
43
+ def register_event_handler(handler: Callable[[Event], None]) -> EventHandlerHandle: ...
44
+ def unregister_event_handler(handle: EventHandlerHandle) -> None: ...
env/Lib/site-packages/torch/_C/_nn.pyi ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @generated by tools/pyi/gen_pyi.py from torch/_C/_nn.pyi.in
2
+ # mypy: disable-error-code="type-arg"
3
+
4
+ from typing import List, Literal, Optional, overload, Sequence, Tuple, Union
5
+
6
+ from torch import memory_format, Tensor
7
+ from torch.types import _bool, _device, _dtype, _int, _size
8
+
9
+ # Defined in tools/autograd/templates/python_nn_functions.cpp
10
+
11
+ def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
12
+ def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
13
+ def avg_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> Tensor: ...
14
+ def avg_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> Tensor: ...
15
+ def elu_(input: Tensor, alpha: float = ...) -> Tensor: ...
16
+ def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Union[_int, _size], _random_samples: Tensor) -> Tuple[Tensor, Tensor]: ...
17
+ def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Union[_int, _size], _random_samples: Tensor) -> Tuple[Tensor, Tensor]: ...
18
+ def gelu(input: Tensor, approximate: str = ...) -> Tensor: ...
19
+ def hardsigmoid(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
20
+ def hardtanh(input: Tensor, min_val: float = ..., max_val: float = ..., *, out: Optional[Tensor] = None) -> Tensor: ...
21
+ def hardtanh_(input: Tensor, min_val: float = ..., max_val: float = ...) -> Tensor: ...
22
+ def leaky_relu(input: Tensor, negative_slope: float = ..., *, out: Optional[Tensor] = None) -> Tensor: ...
23
+ def leaky_relu_(input: Tensor, negative_slope: float = ...) -> Tensor: ...
24
+ def linear(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor: ...
25
+ def log_sigmoid(input: Tensor) -> Tensor: ...
26
+ def one_hot(tensor: Tensor, num_classes: int = ...) -> Tensor: ...
27
+ def pad(input: Tensor, pad: Sequence[int], mode: str = ..., value: Optional[float] = None) -> Tensor: ...
28
+ def scaled_dot_product_attention(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: float = 0.0, is_causal: bool = False, scale: Optional[float] = None, enable_gqa: bool = False) -> Tensor: ...
29
+ def softplus(input: Tensor, beta: float = ..., threshold: float = ...) -> Tensor: ...
30
+ def softshrink(input: Tensor, lambd: float = ...) -> Tensor: ...
31
+
32
+ # Defined in aten/src/ATen/native/mkldnn/Linear.cpp
33
+ def mkldnn_linear(input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: ...
34
+
35
+ # Defined at aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp
36
+ def mkldnn_reorder_conv2d_weight(
37
+ self: Tensor,
38
+ padding: List,
39
+ stride: List,
40
+ dilatation: List,
41
+ groups: int,
42
+ ) -> Tensor: ...
43
+ def mkldnn_reorder_conv3d_weight(
44
+ self: Tensor,
45
+ padding: List,
46
+ stride: List,
47
+ dilatation: List,
48
+ groups: int,
49
+ ) -> Tensor: ...
50
+
51
+ # Defined in aten/src/ATen/native/mkldnn/Prelu.cpp
52
+ def mkldnn_prelu(input: Tensor, weight: Tensor) -> Tensor: ...
53
+
54
+ # Defined at tools/autograd/templates/python_nn_functions.cpp
55
+ @overload
56
+ def _parse_to(
57
+ device: _device,
58
+ dtype: _dtype,
59
+ non_blocking: _bool,
60
+ copy: _bool,
61
+ *,
62
+ memory_format: memory_format,
63
+ ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
64
+ @overload
65
+ def _parse_to(
66
+ dtype: _dtype,
67
+ non_blocking: _bool,
68
+ copy: _bool,
69
+ *,
70
+ memory_format: memory_format,
71
+ ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
72
+ @overload
73
+ def _parse_to(
74
+ tensor: Tensor,
75
+ non_blocking: _bool,
76
+ copy: _bool,
77
+ *,
78
+ memory_format: memory_format,
79
+ ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
80
+
81
+ # Defined in aten/src/ATen/native/PackedSequence.cpp
82
+ def pad_sequence(
83
+ sequences: Union[List[Tensor], Tuple[Tensor, ...]],
84
+ batch_first: bool = False,
85
+ padding_value: float = 0.0,
86
+ padding_side: Union[Literal["left", "right"], str] = "right",
87
+ ) -> Tensor: ...
88
+ def flatten_dense_tensors(tensors: List[Tensor]) -> Tensor: ...
89
+ def unflatten_dense_tensors(flat: Tensor, tensors: List[Tensor]) -> List[Tensor]: ...
env/Lib/site-packages/torch/_C/_nvtx.pyi ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # Defined in torch/csrc/cuda/shared/nvtx.cpp
3
+ def rangePushA(message: str) -> int: ...
4
+ def rangePop() -> int: ...
5
+ def rangeStartA(message: str) -> int: ...
6
+ def rangeEnd(int) -> None: ...
7
+ def markA(message: str) -> None: ...
env/Lib/site-packages/torch/_C/_onnx.pyi ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Defined in torch/csrc/onnx/init.cpp
2
+
3
+ from enum import Enum
4
+
5
+ PRODUCER_VERSION: str
6
+
7
+ class TensorProtoDataType(Enum):
8
+ UNDEFINED = ...
9
+ FLOAT = ...
10
+ UINT8 = ...
11
+ INT8 = ...
12
+ UINT16 = ...
13
+ INT16 = ...
14
+ INT32 = ...
15
+ INT64 = ...
16
+ STRING = ...
17
+ BOOL = ...
18
+ FLOAT16 = ...
19
+ DOUBLE = ...
20
+ UINT32 = ...
21
+ UINT64 = ...
22
+ COMPLEX64 = ...
23
+ COMPLEX128 = ...
24
+ BFLOAT16 = ...
25
+ FLOAT8E5M2 = ...
26
+ FLOAT8E4M3FN = ...
27
+ FLOAT8E5M2FNUZ = ...
28
+ FLOAT8E4M3FNUZ = ...
29
+
30
+ class OperatorExportTypes(Enum):
31
+ ONNX = ...
32
+ ONNX_ATEN = ...
33
+ ONNX_ATEN_FALLBACK = ...
34
+ ONNX_FALLTHROUGH = ...
35
+
36
+ class TrainingMode(Enum):
37
+ EVAL = ...
38
+ PRESERVE = ...
39
+ TRAINING = ...
env/Lib/site-packages/torch/_C/_profiler.pyi ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import Any, Literal
3
+ from typing_extensions import TypeAlias
4
+
5
+ from torch._C import device, dtype, layout
6
+
7
+ # defined in torch/csrc/profiler/python/init.cpp
8
+
9
+ class RecordScope(Enum):
10
+ FUNCTION = ...
11
+ BACKWARD_FUNCTION = ...
12
+ TORCHSCRIPT_FUNCTION = ...
13
+ KERNEL_FUNCTION_DTYPE = ...
14
+ CUSTOM_CLASS = ...
15
+ BUILD_FEATURE = ...
16
+ LITE_INTERPRETER = ...
17
+ USER_SCOPE = ...
18
+ STATIC_RUNTIME_OP = ...
19
+ STATIC_RUNTIME_MODEL = ...
20
+
21
+ class ProfilerState(Enum):
22
+ Disable = ...
23
+ CPU = ...
24
+ CUDA = ...
25
+ NVTX = ...
26
+ ITT = ...
27
+ KINETO = ...
28
+ KINETO_GPU_FALLBACK = ...
29
+ KINETO_PRIVATEUSE1_FALLBACK = ...
30
+ KINETO_PRIVATEUSE1 = ...
31
+
32
+ class ActiveProfilerType(Enum):
33
+ NONE = ...
34
+ LEGACY = ...
35
+ KINETO = ...
36
+ NVTX = ...
37
+ ITT = ...
38
+
39
+ class ProfilerActivity(Enum):
40
+ CPU = ...
41
+ CUDA = ...
42
+ XPU = ...
43
+ MTIA = ...
44
+ PrivateUse1 = ...
45
+
46
+ class _EventType(Enum):
47
+ TorchOp = ...
48
+ Backend = ...
49
+ Allocation = ...
50
+ OutOfMemory = ...
51
+ PyCall = ...
52
+ PyCCall = ...
53
+ Kineto = ...
54
+
55
+ class _ExperimentalConfig:
56
+ def __init__(
57
+ self,
58
+ profiler_metrics: list[str] = ...,
59
+ profiler_measure_per_kernel: bool = ...,
60
+ verbose: bool = ...,
61
+ performance_events: list[str] = ...,
62
+ enable_cuda_sync_events: bool = ...,
63
+ ) -> None: ...
64
+
65
+ class ProfilerConfig:
66
+ def __init__(
67
+ self,
68
+ state: ProfilerState,
69
+ report_input_shapes: bool,
70
+ profile_memory: bool,
71
+ with_stack: bool,
72
+ with_flops: bool,
73
+ with_modules: bool,
74
+ experimental_config: _ExperimentalConfig,
75
+ ) -> None: ...
76
+
77
+ class _ProfilerEvent:
78
+ start_tid: int
79
+ start_time_ns: int
80
+ children: list[_ProfilerEvent]
81
+
82
+ # TODO(robieta): remove in favor of `self.typed`
83
+ extra_fields: (
84
+ _ExtraFields_TorchOp
85
+ | _ExtraFields_Backend
86
+ | _ExtraFields_Allocation
87
+ | _ExtraFields_OutOfMemory
88
+ | _ExtraFields_PyCall
89
+ | _ExtraFields_PyCCall
90
+ | _ExtraFields_Kineto
91
+ )
92
+
93
+ @property
94
+ def typed(
95
+ self,
96
+ ) -> (
97
+ tuple[Literal[_EventType.TorchOp], _ExtraFields_TorchOp]
98
+ | tuple[Literal[_EventType.Backend], _ExtraFields_Backend]
99
+ | tuple[Literal[_EventType.Allocation], _ExtraFields_Allocation]
100
+ | tuple[Literal[_EventType.OutOfMemory], _ExtraFields_OutOfMemory]
101
+ | tuple[Literal[_EventType.PyCall], _ExtraFields_PyCall]
102
+ | tuple[Literal[_EventType.PyCCall], _ExtraFields_PyCCall]
103
+ | tuple[Literal[_EventType.Kineto], _ExtraFields_Kineto]
104
+ ): ...
105
+ @property
106
+ def name(self) -> str: ...
107
+ @property
108
+ def tag(self) -> _EventType: ...
109
+ @property
110
+ def id(self) -> int: ...
111
+ @property
112
+ def parent(self) -> _ProfilerEvent | None: ...
113
+ @property
114
+ def correlation_id(self) -> int: ...
115
+ @property
116
+ def end_time_ns(self) -> int: ...
117
+ @property
118
+ def duration_time_ns(self) -> int: ...
119
+
120
+ class _TensorMetadata:
121
+ impl_ptr: int | None
122
+ storage_data_ptr: int | None
123
+ id: int | None
124
+
125
+ @property
126
+ def allocation_id(self) -> int | None: ...
127
+ @property
128
+ def layout(self) -> layout: ...
129
+ @property
130
+ def device(self) -> device: ...
131
+ @property
132
+ def dtype(self) -> dtype: ...
133
+ @property
134
+ def sizes(self) -> list[int]: ...
135
+ @property
136
+ def strides(self) -> list[int]: ...
137
+
138
+ Scalar: TypeAlias = int | float | bool | complex
139
+ Input: TypeAlias = _TensorMetadata | list[_TensorMetadata] | Scalar | None
140
+
141
+ class _ExtraFields_TorchOp:
142
+ name: str
143
+ sequence_number: int
144
+ allow_tf32_cublas: bool
145
+
146
+ @property
147
+ def inputs(self) -> list[Input]: ...
148
+ @property
149
+ def scope(self) -> RecordScope: ...
150
+
151
+ class _ExtraFields_Backend: ...
152
+
153
+ class _ExtraFields_Allocation:
154
+ ptr: int
155
+ id: int | None
156
+ alloc_size: int
157
+ total_allocated: int
158
+ total_reserved: int
159
+
160
+ @property
161
+ def allocation_id(self) -> int | None: ...
162
+ @property
163
+ def device(self) -> device: ...
164
+
165
+ class _ExtraFields_OutOfMemory: ...
166
+
167
+ class _PyFrameState:
168
+ line_number: int
169
+ function_name: str
170
+
171
+ @property
172
+ def file_name(self) -> str: ...
173
+
174
+ class _NNModuleInfo:
175
+ @property
176
+ def self_ptr(self) -> int: ...
177
+ @property
178
+ def cls_ptr(self) -> int: ...
179
+ @property
180
+ def cls_name(self) -> str: ...
181
+ @property
182
+ def parameters(
183
+ self,
184
+ ) -> list[tuple[str, _TensorMetadata, _TensorMetadata | None]]: ...
185
+
186
+ class _OptimizerInfo:
187
+ @property
188
+ def parameters(
189
+ self,
190
+ ) -> list[
191
+ tuple[
192
+ # Parameter
193
+ _TensorMetadata,
194
+ #
195
+ # Gradient (if present during optimizer.step())
196
+ _TensorMetadata | None,
197
+ #
198
+ # Optimizer state for Parameter as (name, tensor) pairs
199
+ list[tuple[str, _TensorMetadata]],
200
+ ]
201
+ ]: ...
202
+
203
+ class _ExtraFields_PyCCall:
204
+ @property
205
+ def caller(self) -> _PyFrameState: ...
206
+
207
+ class _ExtraFields_PyCall:
208
+ @property
209
+ def callsite(self) -> _PyFrameState: ...
210
+ @property
211
+ def caller(self) -> _PyFrameState: ...
212
+ @property
213
+ def module(self) -> _NNModuleInfo | None: ...
214
+ @property
215
+ def optimizer(self) -> _OptimizerInfo | None: ...
216
+
217
+ class _ExtraFields_Kineto: ...
218
+
219
+ def _add_execution_trace_observer(output_file_path: str) -> bool: ...
220
+ def _remove_execution_trace_observer() -> None: ...
221
+ def _enable_execution_trace_observer() -> None: ...
222
+ def _disable_execution_trace_observer() -> None: ...
223
+ def _set_record_concrete_inputs_enabled_val(val: bool) -> None: ...
224
+ def _set_fwd_bwd_enabled_val(val: bool) -> None: ...
225
+ def _set_cuda_sync_enabled_val(val: bool) -> None: ...
226
+
227
+ class CapturedTraceback: ...
228
+
229
+ def gather_traceback(python: bool, script: bool, cpp: bool) -> CapturedTraceback: ...
230
+
231
+ # The Dict has name, filename, line
232
+ def symbolize_tracebacks(
233
+ to_symbolize: list[CapturedTraceback],
234
+ ) -> list[list[dict[str, str]]]: ...
235
+
236
+ class _RecordFunctionFast:
237
+ def __init__(
238
+ self,
239
+ name: str,
240
+ input_values: list | tuple | None = None,
241
+ keyword_values: dict | None = None,
242
+ ) -> None: ...
243
+ def __enter__(self) -> None: ...
244
+ def __exit__(self, *args: Any) -> None: ...
env/Lib/site-packages/torch/_C/_verbose.pyi ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Defined in torch/csrc/utils/verbose.cpp
2
+ def mkl_set_verbose(enable: int) -> int: ...
3
+ def mkldnn_set_verbose(level: int) -> int: ...
env/Lib/site-packages/torch/_VF.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This makes the functions in torch._C._VariableFunctions available as
3
+ torch._VF.<funcname>
4
+ without mypy being able to find them.
5
+
6
+ A subset of those functions are mapped to ATen functions in
7
+ torch/jit/_builtins.py
8
+
9
+ See https://github.com/pytorch/pytorch/issues/21478 for the reason for
10
+ introducing torch._VF
11
+
12
+ """
13
+
14
+ import sys
15
+ import types
16
+
17
+ import torch
18
+
19
+
20
+ class VFModule(types.ModuleType):
21
+ vf: types.ModuleType
22
+
23
+ def __init__(self, name: str):
24
+ super().__init__(name)
25
+ self.vf = torch._C._VariableFunctions
26
+
27
+ def __getattr__(self, name: str) -> object:
28
+ return getattr(self.vf, name)
29
+
30
+
31
+ sys.modules[__name__] = VFModule(__name__)
env/Lib/site-packages/torch/_VF.pyi ADDED
The diff for this file is too large to render. See raw diff
 
env/Lib/site-packages/torch/__config__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+
4
+
5
+ def show():
6
+ """
7
+ Return a human-readable string with descriptions of the
8
+ configuration of PyTorch.
9
+ """
10
+ return torch._C._show_config()
11
+
12
+
13
+ # TODO: In principle, we could provide more structured version/config
14
+ # information here. For now only CXX_FLAGS is exposed, as Timer
15
+ # uses them.
16
+ def _cxx_flags():
17
+ """Returns the CXX_FLAGS used when building PyTorch."""
18
+ return torch._C._cxx_flags()
19
+
20
+
21
+ def parallel_info():
22
+ r"""Returns detailed string with parallelization settings"""
23
+ return torch._C._parallel_info()
env/Lib/site-packages/torch/__future__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _overwrite_module_params_on_conversion: bool = False
2
+ _swap_module_params_on_conversion: bool = False
3
+
4
+
5
+ def set_overwrite_module_params_on_conversion(value: bool) -> None:
6
+ """
7
+ Sets whether to assign new tensors to the parameters instead of changing the
8
+ existing parameters in-place when converting an ``nn.Module``.
9
+
10
+ When enabled, the following methods will assign new parameters to the module:
11
+
12
+ #. ``module.{device}()`` (e.g. :meth:`nn.Module.cuda()`) for moving a module between devices
13
+ #. ``module.{dtype}()`` (e.g. :meth:`nn.Module.float()`) for converting a module to a different dtype
14
+ #. :meth:`nn.Module.to`
15
+ #. :meth:`nn.Module.to_empty`
16
+
17
+ Args:
18
+ value (bool): Whether to assign new tensors or not.
19
+
20
+ """
21
+ global _overwrite_module_params_on_conversion
22
+ _overwrite_module_params_on_conversion = value
23
+
24
+
25
+ def get_overwrite_module_params_on_conversion() -> bool:
26
+ """
27
+ Returns whether to assign new tensors to the parameters instead of changing the
28
+ existing parameters in-place when converting an :class:`torch.nn.Module`. Defaults to ``False``.
29
+
30
+ See :func:`~torch.__future__.set_overwrite_module_params_on_conversion` for more information.
31
+ """
32
+ return _overwrite_module_params_on_conversion
33
+
34
+
35
+ def set_swap_module_params_on_conversion(value: bool) -> None:
36
+ """
37
+ Sets whether to use :func:`~torch.utils.swap_tensors` instead of setting ``.data`` to
38
+ change the existing parameters in-place when converting an ``nn.Module`` and instead
39
+ of ``param.copy_(state_dict[key])`` when loading a state dict into an ``nn.Module``.
40
+
41
+ .. note::
42
+ This function takes precedence over :func:`~torch.__future__.get_overwrite_module_params_on_conversion`
43
+
44
+ When enabled, the following methods will swap the existing parameters in-place:
45
+
46
+ #. ``module.{device}()`` (e.g. :meth:`nn.Module.cuda()`) for moving a module between devices
47
+ #. ``module.{dtype}()`` (e.g. :meth:`nn.Module.float()`) for converting a module to a different dtype
48
+ #. :meth:`nn.Module.to`
49
+ #. :meth:`nn.Module.to_empty`
50
+ #. :meth:`nn.Module.load_state_dict`
51
+
52
+ The semantics for :meth:`~nn.Module.load_state_dict` when this is set are as follows:
53
+
54
+ #. For each parameter/buffer, its corresponding ``state_dict['key']`` is transformed via
55
+ :meth:`~torch.Tensor.module_load` (i.e. ``res = param.module_load(state_dict['key'])``)
56
+ #. If necessary, ``res`` will be wrapped in an :class:`~nn.Parameter`
57
+ #. The parameter/buffer in the module will be swapped via :func:`~torch.utils.swap_tensors`
58
+ with ``res``
59
+
60
+ Args:
61
+ value (bool): Whether to use :func:`~torch.utils.swap_tensors` or not.
62
+
63
+ """
64
+ global _swap_module_params_on_conversion
65
+ _swap_module_params_on_conversion = value
66
+
67
+
68
+ def get_swap_module_params_on_conversion() -> bool:
69
+ """
70
+ Returns whether to use :func:`~torch.utils.swap_tensors` instead of setting .data to
71
+ change the existing parameters in-place when converting an ``nn.Module``. Defaults to ``False``.
72
+
73
+ See :func:`~torch.__future__.set_swap_module_params_on_conversion` for more information.
74
+ """
75
+ return _swap_module_params_on_conversion
env/Lib/site-packages/torch/__init__.py ADDED
The diff for this file is too large to render. See raw diff
 
env/Lib/site-packages/torch/__pycache__/_VF.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
env/Lib/site-packages/torch/__pycache__/__config__.cpython-310.pyc ADDED
Binary file (749 Bytes). View file
 
env/Lib/site-packages/torch/__pycache__/__future__.cpython-310.pyc ADDED
Binary file (3.41 kB). View file
 
env/Lib/site-packages/torch/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (78.2 kB). View file
 
env/Lib/site-packages/torch/__pycache__/_appdirs.cpython-310.pyc ADDED
Binary file (21.8 kB). View file
 
env/Lib/site-packages/torch/__pycache__/_classes.cpython-310.pyc ADDED
Binary file (2.52 kB). View file
 
env/Lib/site-packages/torch/__pycache__/_compile.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
env/Lib/site-packages/torch/__pycache__/_custom_ops.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
env/Lib/site-packages/torch/__pycache__/_deploy.cpython-310.pyc ADDED
Binary file (2.54 kB). View file
 
env/Lib/site-packages/torch/__pycache__/_guards.cpython-310.pyc ADDED
Binary file (25.2 kB). View file
 
env/Lib/site-packages/torch/__pycache__/_jit_internal.cpython-310.pyc ADDED
Binary file (36.4 kB). View file
 
env/Lib/site-packages/torch/__pycache__/_linalg_utils.cpython-310.pyc ADDED
Binary file (5.17 kB). View file
 
env/Lib/site-packages/torch/__pycache__/_lobpcg.cpython-310.pyc ADDED
Binary file (30.4 kB). View file
 
env/Lib/site-packages/torch/__pycache__/_lowrank.cpython-310.pyc ADDED
Binary file (9.78 kB). View file
 
env/Lib/site-packages/torch/__pycache__/_meta_registrations.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6816020fde1fbf1a23bef79984080fe90b37e6afee7c6f403784a66a5a205c49
3
+ size 169893
env/Lib/site-packages/torch/__pycache__/_namedtensor_internals.cpython-310.pyc ADDED
Binary file (4.96 kB). View file
 
env/Lib/site-packages/torch/__pycache__/_ops.cpython-310.pyc ADDED
Binary file (34.9 kB). View file
 
env/Lib/site-packages/torch/__pycache__/_python_dispatcher.cpython-310.pyc ADDED
Binary file (3.48 kB). View file
 
env/Lib/site-packages/torch/__pycache__/_size_docs.cpython-310.pyc ADDED
Binary file (1.13 kB). View file
 
env/Lib/site-packages/torch/__pycache__/_sources.cpython-310.pyc ADDED
Binary file (4.2 kB). View file