ZTWHHH commited on
Commit
35e7d99
·
verified ·
1 Parent(s): 703ad23

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/_fw_primal_compositeexplicitautograd_dispatch.h +23 -0
  3. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_ops.h +39 -0
  4. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_gru_cell_backward_compositeimplicitautograd_dispatch.h +23 -0
  5. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/alias_copy_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
  6. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_reduce.h +39 -0
  7. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/bmm_meta.h +27 -0
  8. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/conj_physical_ops.h +50 -0
  9. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cpu_dispatch.h +25 -0
  10. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve.h +39 -0
  11. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/log10_meta.h +27 -0
  12. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/ne_cpu_dispatch.h +30 -0
  13. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/new_full_native.h +22 -0
  14. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/numpy_T_ops.h +28 -0
  15. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like_native.h +24 -0
  16. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/ravel_native.h +21 -0
  17. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_ops.h +39 -0
  18. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_as_native.h +22 -0
  19. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/resize_ops.h +50 -0
  20. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/special_expit_native.h +22 -0
  21. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h +83 -0
  22. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/special_spherical_bessel_j0_cuda_dispatch.h +25 -0
  23. openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/unique_dim_consecutive_ops.h +39 -0
  24. phi4/lib/python3.10/site-packages/pycparser/__pycache__/yacctab.cpython-310.pyc +3 -0
  25. phi4/lib/python3.10/site-packages/torch/__config__.py +23 -0
  26. phi4/lib/python3.10/site-packages/torch/__future__.py +75 -0
  27. phi4/lib/python3.10/site-packages/torch/_appdirs.py +667 -0
  28. phi4/lib/python3.10/site-packages/torch/_dynamo/__init__.py +142 -0
  29. phi4/lib/python3.10/site-packages/torch/_dynamo/callback.py +100 -0
  30. phi4/lib/python3.10/site-packages/torch/_dynamo/current_scope_id.py +25 -0
  31. phi4/lib/python3.10/site-packages/torch/_dynamo/decorators.py +634 -0
  32. phi4/lib/python3.10/site-packages/torch/_dynamo/device_interface.py +381 -0
  33. phi4/lib/python3.10/site-packages/torch/_dynamo/hooks.py +12 -0
  34. phi4/lib/python3.10/site-packages/torch/_dynamo/output_graph.py +0 -0
  35. phi4/lib/python3.10/site-packages/torch/_dynamo/replay_record.py +113 -0
  36. phi4/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py +0 -0
  37. phi4/lib/python3.10/site-packages/torch/_dynamo/test_case.py +79 -0
  38. phi4/lib/python3.10/site-packages/torch/_namedtensor_internals.py +159 -0
  39. phi4/lib/python3.10/site-packages/torch/_ops.py +1362 -0
  40. phi4/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc +3 -0
  41. phi4/lib/python3.10/site-packages/torch/_utils_internal.py +274 -0
  42. phi4/lib/python3.10/site-packages/torch/functional.py +2209 -0
  43. phi4/lib/python3.10/site-packages/torch/py.typed +0 -0
  44. phi4/lib/python3.10/site-packages/torch/quantization/__pycache__/__init__.cpython-310.pyc +0 -0
  45. phi4/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite.cpython-310.pyc +0 -0
  46. phi4/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc +0 -0
  47. phi4/lib/python3.10/site-packages/torch/quantization/__pycache__/_quantized_conversions.cpython-310.pyc +0 -0
  48. phi4/lib/python3.10/site-packages/torch/quantization/__pycache__/fuser_method_mappings.cpython-310.pyc +0 -0
  49. phi4/lib/python3.10/site-packages/torch/quantization/__pycache__/observer.cpython-310.pyc +0 -0
  50. phi4/lib/python3.10/site-packages/torch/quantization/__pycache__/qconfig.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -746,3 +746,6 @@ openflamingo/lib/python3.10/site-packages/pycocoevalcap/spice/lib/guava-19.0.jar
746
  openflamingo/lib/python3.10/site-packages/torch/__pycache__/overrides.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
747
  phi4/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
748
  phi4/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_spin.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
746
  openflamingo/lib/python3.10/site-packages/torch/__pycache__/overrides.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
747
  phi4/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
748
  phi4/lib/python3.10/site-packages/sympy/physics/quantum/tests/__pycache__/test_spin.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
749
+ phi4/lib/python3.10/site-packages/pycparser/__pycache__/yacctab.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
750
+ phi4/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
751
+ phi4/lib/python3.10/site-packages/torchvision.libs/libwebp.54a0d02a.so.7 filter=lfs diff=lfs merge=lfs -text
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/_fw_primal_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor _fw_primal(const at::Tensor & self, int64_t level);
21
+
22
+ } // namespace compositeexplicitautograd
23
+ } // namespace at
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _standard_gamma {
18
+ using schema = at::Tensor (const at::Tensor &, c10::optional<at::Generator>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_standard_gamma")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_standard_gamma(Tensor self, Generator? generator=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, c10::optional<at::Generator> generator);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Generator> generator);
26
+ };
27
+
28
+ struct TORCH_API _standard_gamma_out {
29
+ using schema = at::Tensor & (const at::Tensor &, c10::optional<at::Generator>, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_standard_gamma")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::Generator> generator, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_differentiable_gru_cell_backward_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_gru_cell_backward(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/alias_copy_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor alias_copy(const at::Tensor & self);
21
+
22
+ } // namespace compositeexplicitautogradnonfunctional
23
+ } // namespace at
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_reduce.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/batch_norm_backward_reduce_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)
26
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_reduce(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g) {
27
+ return at::_ops::batch_norm_backward_reduce::call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g);
28
+ }
29
+
30
+ // aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
31
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> batch_norm_backward_reduce_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g) {
32
+ return at::_ops::batch_norm_backward_reduce_out::call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g, out0, out1, out2, out3);
33
+ }
34
+ // aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))
35
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> batch_norm_backward_reduce_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) {
36
+ return at::_ops::batch_norm_backward_reduce_out::call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g, out0, out1, out2, out3);
37
+ }
38
+
39
+ }
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/bmm_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_bmm : public at::impl::MetaBase {
21
+
22
+
23
+ void meta(const at::Tensor & self, const at::Tensor & mat2);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/conj_physical_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API conj_physical {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::conj_physical")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "conj_physical(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API conj_physical_out {
29
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::conj_physical")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
37
+ };
38
+
39
+ struct TORCH_API conj_physical_ {
40
+ using schema = at::Tensor & (at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::conj_physical_")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "conj_physical_(Tensor(a!) self) -> Tensor(a!)")
46
+ static at::Tensor & call(at::Tensor & self);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
48
+ };
49
+
50
+ }} // namespace at::_ops
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cpu_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none");
21
+ TORCH_API at::Tensor & gelu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none");
22
+ TORCH_API at::Tensor & gelu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input);
23
+
24
+ } // namespace cpu
25
+ } // namespace at
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/linalg_ldl_solve_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor
26
+ inline at::Tensor linalg_ldl_solve(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false) {
27
+ return at::_ops::linalg_ldl_solve::call(LD, pivots, B, hermitian);
28
+ }
29
+
30
+ // aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & linalg_ldl_solve_out(at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false) {
32
+ return at::_ops::linalg_ldl_solve_out::call(LD, pivots, B, hermitian, out);
33
+ }
34
+ // aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & linalg_ldl_solve_outf(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) {
36
+ return at::_ops::linalg_ldl_solve_out::call(LD, pivots, B, hermitian, out);
37
+ }
38
+
39
+ }
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/log10_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_log10 : public TensorIteratorBase {
21
+
22
+
23
+ void meta(const at::Tensor & self);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/ne_cpu_dispatch.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor ne(const at::Tensor & self, const at::Scalar & other);
21
+ TORCH_API at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other);
22
+ TORCH_API at::Tensor & ne_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
23
+ TORCH_API at::Tensor & ne_(at::Tensor & self, const at::Scalar & other);
24
+ TORCH_API at::Tensor ne(const at::Tensor & self, const at::Tensor & other);
25
+ TORCH_API at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
26
+ TORCH_API at::Tensor & ne_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
27
+ TORCH_API at::Tensor & ne_(at::Tensor & self, const at::Tensor & other);
28
+
29
+ } // namespace cpu
30
+ } // namespace at
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/new_full_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor new_full(const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype={}, c10::optional<at::Layout> layout={}, c10::optional<at::Device> device={}, c10::optional<bool> pin_memory={});
20
+ TORCH_API at::Tensor & new_full_out_symint(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out);
21
+ } // namespace native
22
+ } // namespace at
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/numpy_T_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API numpy_T {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::numpy_T")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "numpy_T(Tensor(a) self) -> Tensor(a)")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ }} // namespace at::_ops
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like_native.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor randint_like(const at::Tensor & self, int64_t high, c10::optional<at::ScalarType> dtype={}, c10::optional<at::Layout> layout={}, c10::optional<at::Device> device={}, c10::optional<bool> pin_memory={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt);
20
+ TORCH_API at::Tensor & randint_like_out(const at::Tensor & self, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out);
21
+ TORCH_API at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::ScalarType> dtype={}, c10::optional<at::Layout> layout={}, c10::optional<at::Device> device={}, c10::optional<bool> pin_memory={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt);
22
+ TORCH_API at::Tensor & randint_like_low_dtype_out(const at::Tensor & self, int64_t low, int64_t high, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out);
23
+ } // namespace native
24
+ } // namespace at
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/ravel_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor ravel(const at::Tensor & self);
20
+ } // namespace native
21
+ } // namespace at
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API reflection_pad3d_out {
18
+ using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reflection_pad3d")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)")
24
+ static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out);
25
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out);
26
+ };
27
+
28
+ struct TORCH_API reflection_pad3d {
29
+ using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reflection_pad3d")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor")
35
+ static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef padding);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding);
37
+ };
38
+
39
+ }} // namespace at::_ops
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_as_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor reshape_as(const at::Tensor & self, const at::Tensor & other);
20
+ TORCH_API at::Tensor reshape_as_nested(const at::Tensor & self, const at::Tensor & other);
21
+ } // namespace native
22
+ } // namespace at
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/resize_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API resize_ {
18
+ using schema = const at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::optional<at::MemoryFormat>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::resize_")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)")
24
+ static const at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format);
25
+ static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format);
26
+ };
27
+
28
+ struct TORCH_API resize_out {
29
+ using schema = const at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::optional<at::MemoryFormat>, const at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::resize")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)")
35
+ static const at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out);
36
+ static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format, const at::Tensor & out);
37
+ };
38
+
39
+ struct TORCH_API resize {
40
+ using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::optional<at::MemoryFormat>);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::resize")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor")
46
+ static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format);
47
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format);
48
+ };
49
+
50
+ }} // namespace at::_ops
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/special_expit_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor special_expit(const at::Tensor & self);
20
+ TORCH_API at::Tensor & special_expit_out(const at::Tensor & self, at::Tensor & out);
21
+ } // namespace native
22
+ } // namespace at
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_ops.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API special_shifted_chebyshev_polynomial_w {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_shifted_chebyshev_polynomial_w")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & x, const at::Tensor & n);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n);
26
+ };
27
+
28
+ struct TORCH_API special_shifted_chebyshev_polynomial_w_x_scalar {
29
+ using schema = at::Tensor (const at::Scalar &, const at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_shifted_chebyshev_polynomial_w")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor")
35
+ static at::Tensor call(const at::Scalar & x, const at::Tensor & n);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n);
37
+ };
38
+
39
+ struct TORCH_API special_shifted_chebyshev_polynomial_w_n_scalar {
40
+ using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_shifted_chebyshev_polynomial_w")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor")
46
+ static at::Tensor call(const at::Tensor & x, const at::Scalar & n);
47
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n);
48
+ };
49
+
50
+ struct TORCH_API special_shifted_chebyshev_polynomial_w_out {
51
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
52
+ using ptr_schema = schema*;
53
+ // See Note [static constexpr char* members for windows NVCC]
54
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_shifted_chebyshev_polynomial_w")
55
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
56
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
57
+ static at::Tensor & call(const at::Tensor & x, const at::Tensor & n, at::Tensor & out);
58
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out);
59
+ };
60
+
61
+ struct TORCH_API special_shifted_chebyshev_polynomial_w_x_scalar_out {
62
+ using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &);
63
+ using ptr_schema = schema*;
64
+ // See Note [static constexpr char* members for windows NVCC]
65
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_shifted_chebyshev_polynomial_w")
66
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x_scalar_out")
67
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)")
68
+ static at::Tensor & call(const at::Scalar & x, const at::Tensor & n, at::Tensor & out);
69
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out);
70
+ };
71
+
72
+ struct TORCH_API special_shifted_chebyshev_polynomial_w_n_scalar_out {
73
+ using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
74
+ using ptr_schema = schema*;
75
+ // See Note [static constexpr char* members for windows NVCC]
76
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_shifted_chebyshev_polynomial_w")
77
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "n_scalar_out")
78
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)")
79
+ static at::Tensor & call(const at::Tensor & x, const at::Scalar & n, at::Tensor & out);
80
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out);
81
+ };
82
+
83
+ }} // namespace at::_ops
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/special_spherical_bessel_j0_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor special_spherical_bessel_j0(const at::Tensor & x);
21
+ TORCH_API at::Tensor & special_spherical_bessel_j0_out(at::Tensor & out, const at::Tensor & x);
22
+ TORCH_API at::Tensor & special_spherical_bessel_j0_outf(const at::Tensor & x, at::Tensor & out);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
openflamingo/lib/python3.10/site-packages/torch/include/ATen/ops/unique_dim_consecutive_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API unique_dim_consecutive {
18
+ using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, int64_t, bool, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unique_dim_consecutive")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)")
24
+ static ::std::tuple<at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts);
25
+ static ::std::tuple<at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts);
26
+ };
27
+
28
+ struct TORCH_API unique_dim_consecutive_out {
29
+ using schema = ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> (const at::Tensor &, int64_t, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unique_dim_consecutive")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
35
+ static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> call(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
36
+ static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
37
+ };
38
+
39
+ }} // namespace at::_ops
phi4/lib/python3.10/site-packages/pycparser/__pycache__/yacctab.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dafee31d03cb520eb8e82ddfb17d309660d4c64366f806d9d17ae4f5110fee9f
3
+ size 179983
phi4/lib/python3.10/site-packages/torch/__config__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+
4
+
5
+ def show():
6
+ """
7
+ Return a human-readable string with descriptions of the
8
+ configuration of PyTorch.
9
+ """
10
+ return torch._C._show_config()
11
+
12
+
13
+ # TODO: In principle, we could provide more structured version/config
14
+ # information here. For now only CXX_FLAGS is exposed, as Timer
15
+ # uses them.
16
+ def _cxx_flags():
17
+ """Returns the CXX_FLAGS used when building PyTorch."""
18
+ return torch._C._cxx_flags()
19
+
20
+
21
+ def parallel_info():
22
+ r"""Returns detailed string with parallelization settings"""
23
+ return torch._C._parallel_info()
phi4/lib/python3.10/site-packages/torch/__future__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _overwrite_module_params_on_conversion: bool = False
2
+ _swap_module_params_on_conversion: bool = False
3
+
4
+
5
+ def set_overwrite_module_params_on_conversion(value: bool) -> None:
6
+ """
7
+ Sets whether to assign new tensors to the parameters instead of changing the
8
+ existing parameters in-place when converting an ``nn.Module``.
9
+
10
+ When enabled, the following methods will assign new parameters to the module:
11
+
12
+ #. ``module.{device}()`` (e.g. :meth:`nn.Module.cuda()`) for moving a module between devices
13
+ #. ``module.{dtype}()`` (e.g. :meth:`nn.Module.float()`) for converting a module to a different dtype
14
+ #. :meth:`nn.Module.to`
15
+ #. :meth:`nn.Module.to_empty`
16
+
17
+ Args:
18
+ value (bool): Whether to assign new tensors or not.
19
+
20
+ """
21
+ global _overwrite_module_params_on_conversion
22
+ _overwrite_module_params_on_conversion = value
23
+
24
+
25
+ def get_overwrite_module_params_on_conversion() -> bool:
26
+ """
27
+ Returns whether to assign new tensors to the parameters instead of changing the
28
+ existing parameters in-place when converting an :class:`torch.nn.Module`. Defaults to ``False``.
29
+
30
+ See :func:`~torch.__future__.set_overwrite_module_params_on_conversion` for more information.
31
+ """
32
+ return _overwrite_module_params_on_conversion
33
+
34
+
35
+ def set_swap_module_params_on_conversion(value: bool) -> None:
36
+ """
37
+ Sets whether to use :func:`~torch.utils.swap_tensors` instead of setting ``.data`` to
38
+ change the existing parameters in-place when converting an ``nn.Module`` and instead
39
+ of ``param.copy_(state_dict[key])`` when loading a state dict into an ``nn.Module``.
40
+
41
+ .. note::
42
+ This function takes precedence over :func:`~torch.__future__.get_overwrite_module_params_on_conversion`
43
+
44
+ When enabled, the following methods will swap the existing parameters in-place:
45
+
46
+ #. ``module.{device}()`` (e.g. :meth:`nn.Module.cuda()`) for moving a module between devices
47
+ #. ``module.{dtype}()`` (e.g. :meth:`nn.Module.float()`) for converting a module to a different dtype
48
+ #. :meth:`nn.Module.to`
49
+ #. :meth:`nn.Module.to_empty`
50
+ #. :meth:`nn.Module.load_state_dict`
51
+
52
+ The semantics for :meth:`~nn.Module.load_state_dict` when this is set are as follows:
53
+
54
+ #. For each parameter/buffer, its corresponding ``state_dict['key']`` is transformed via
55
+ :meth:`~torch.Tensor.module_load` (i.e. ``res = param.module_load(state_dict['key'])``)
56
+ #. If necessary, ``res`` will be wrapped in an :class:`~nn.Parameter`
57
+ #. The parameter/buffer in the module will be swapped via :func:`~torch.utils.swap_tensors`
58
+ with ``res``
59
+
60
+ Args:
61
+ value (bool): Whether to use :func:`~torch.utils.swap_tensors` or not.
62
+
63
+ """
64
+ global _swap_module_params_on_conversion
65
+ _swap_module_params_on_conversion = value
66
+
67
+
68
+ def get_swap_module_params_on_conversion() -> bool:
69
+ """
70
+ Returns whether to use :func:`~torch.utils.swap_tensors` instead of setting .data to
71
+ change the existing parameters in-place when converting an ``nn.Module``. Defaults to ``False``.
72
+
73
+ See :func:`~torch.__future__.set_swap_module_params_on_conversion` for more information.
74
+ """
75
+ return _swap_module_params_on_conversion
phi4/lib/python3.10/site-packages/torch/_appdirs.py ADDED
@@ -0,0 +1,667 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # Copyright (c) 2005-2010 ActiveState Software Inc.
4
+ # Copyright (c) 2013 Eddy Petrișor
5
+
6
+ # flake8: noqa
7
+
8
+ """
9
+ This file is directly from
10
+ https://github.com/ActiveState/appdirs/blob/3fe6a83776843a46f20c2e5587afcffe05e03b39/appdirs.py
11
+
12
+ The license of https://github.com/ActiveState/appdirs copied below:
13
+
14
+
15
+ # This is the MIT license
16
+
17
+ Copyright (c) 2010 ActiveState Software Inc.
18
+
19
+ Permission is hereby granted, free of charge, to any person obtaining a
20
+ copy of this software and associated documentation files (the
21
+ "Software"), to deal in the Software without restriction, including
22
+ without limitation the rights to use, copy, modify, merge, publish,
23
+ distribute, sublicense, and/or sell copies of the Software, and to
24
+ permit persons to whom the Software is furnished to do so, subject to
25
+ the following conditions:
26
+
27
+ The above copyright notice and this permission notice shall be included
28
+ in all copies or substantial portions of the Software.
29
+
30
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
31
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
32
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
33
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
34
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
35
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
36
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
37
+ """
38
+
39
+ """Utilities for determining application-specific dirs.
40
+
41
+ See <https://github.com/ActiveState/appdirs> for details and usage.
42
+ """
43
+ # Dev Notes:
44
+ # - MSDN on where to store app data files:
45
+ # http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
46
+ # - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
47
+ # - XDG spec for Un*x: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
48
+
49
+ __version__ = "1.4.4"
50
+ __version_info__ = tuple(int(segment) for segment in __version__.split("."))
51
+
52
+
53
+ import os
54
+ import sys
55
+
56
+
57
+ unicode = str
58
+
59
+ if sys.platform.startswith("java"):
60
+ import platform
61
+
62
+ os_name = platform.java_ver()[3][0]
63
+ if os_name.startswith("Windows"): # "Windows XP", "Windows 7", etc.
64
+ system = "win32"
65
+ elif os_name.startswith("Mac"): # "Mac OS X", etc.
66
+ system = "darwin"
67
+ else: # "Linux", "SunOS", "FreeBSD", etc.
68
+ # Setting this to "linux2" is not ideal, but only Windows or Mac
69
+ # are actually checked for and the rest of the module expects
70
+ # *sys.platform* style strings.
71
+ system = "linux2"
72
+ else:
73
+ system = sys.platform
74
+
75
+
76
+ def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
77
+ r"""Return full path to the user-specific data dir for this application.
78
+
79
+ "appname" is the name of application.
80
+ If None, just the system directory is returned.
81
+ "appauthor" (only used on Windows) is the name of the
82
+ appauthor or distributing body for this application. Typically
83
+ it is the owning company name. This falls back to appname. You may
84
+ pass False to disable it.
85
+ "version" is an optional version path element to append to the
86
+ path. You might want to use this if you want multiple versions
87
+ of your app to be able to run independently. If used, this
88
+ would typically be "<major>.<minor>".
89
+ Only applied when appname is present.
90
+ "roaming" (boolean, default False) can be set True to use the Windows
91
+ roaming appdata directory. That means that for users on a Windows
92
+ network setup for roaming profiles, this user data will be
93
+ sync'd on login. See
94
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
95
+ for a discussion of issues.
96
+
97
+ Typical user data directories are:
98
+ Mac OS X: ~/Library/Application Support/<AppName>
99
+ Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
100
+ Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
101
+ Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
102
+ Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
103
+ Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
104
+
105
+ For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
106
+ That means, by default "~/.local/share/<AppName>".
107
+ """
108
+ if system == "win32":
109
+ if appauthor is None:
110
+ appauthor = appname
111
+ const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
112
+ path = os.path.normpath(_get_win_folder(const))
113
+ if appname:
114
+ if appauthor is not False:
115
+ path = os.path.join(path, appauthor, appname)
116
+ else:
117
+ path = os.path.join(path, appname)
118
+ elif system == "darwin":
119
+ path = os.path.expanduser("~/Library/Application Support/")
120
+ if appname:
121
+ path = os.path.join(path, appname)
122
+ else:
123
+ path = os.getenv("XDG_DATA_HOME", os.path.expanduser("~/.local/share"))
124
+ if appname:
125
+ path = os.path.join(path, appname)
126
+ if appname and version:
127
+ path = os.path.join(path, version)
128
+ return path
129
+
130
+
131
+ def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
132
+ r"""Return full path to the user-shared data dir for this application.
133
+
134
+ "appname" is the name of application.
135
+ If None, just the system directory is returned.
136
+ "appauthor" (only used on Windows) is the name of the
137
+ appauthor or distributing body for this application. Typically
138
+ it is the owning company name. This falls back to appname. You may
139
+ pass False to disable it.
140
+ "version" is an optional version path element to append to the
141
+ path. You might want to use this if you want multiple versions
142
+ of your app to be able to run independently. If used, this
143
+ would typically be "<major>.<minor>".
144
+ Only applied when appname is present.
145
+ "multipath" is an optional parameter only applicable to *nix
146
+ which indicates that the entire list of data dirs should be
147
+ returned. By default, the first item from XDG_DATA_DIRS is
148
+ returned, or '/usr/local/share/<AppName>',
149
+ if XDG_DATA_DIRS is not set
150
+
151
+ Typical site data directories are:
152
+ Mac OS X: /Library/Application Support/<AppName>
153
+ Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
154
+ Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
155
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
156
+ Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
157
+
158
+ For Unix, this is using the $XDG_DATA_DIRS[0] default.
159
+
160
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
161
+ """
162
+ if system == "win32":
163
+ if appauthor is None:
164
+ appauthor = appname
165
+ path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
166
+ if appname:
167
+ if appauthor is not False:
168
+ path = os.path.join(path, appauthor, appname)
169
+ else:
170
+ path = os.path.join(path, appname)
171
+ elif system == "darwin":
172
+ path = os.path.expanduser("/Library/Application Support")
173
+ if appname:
174
+ path = os.path.join(path, appname)
175
+ else:
176
+ # XDG default for $XDG_DATA_DIRS
177
+ # only first, if multipath is False
178
+ path = os.getenv(
179
+ "XDG_DATA_DIRS", os.pathsep.join(["/usr/local/share", "/usr/share"])
180
+ )
181
+ pathlist = [
182
+ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)
183
+ ]
184
+ if appname:
185
+ if version:
186
+ appname = os.path.join(appname, version)
187
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
188
+
189
+ if multipath:
190
+ path = os.pathsep.join(pathlist)
191
+ else:
192
+ path = pathlist[0]
193
+ return path
194
+
195
+ if appname and version:
196
+ path = os.path.join(path, version)
197
+ return path
198
+
199
+
200
+ def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
201
+ r"""Return full path to the user-specific config dir for this application.
202
+
203
+ "appname" is the name of application.
204
+ If None, just the system directory is returned.
205
+ "appauthor" (only used on Windows) is the name of the
206
+ appauthor or distributing body for this application. Typically
207
+ it is the owning company name. This falls back to appname. You may
208
+ pass False to disable it.
209
+ "version" is an optional version path element to append to the
210
+ path. You might want to use this if you want multiple versions
211
+ of your app to be able to run independently. If used, this
212
+ would typically be "<major>.<minor>".
213
+ Only applied when appname is present.
214
+ "roaming" (boolean, default False) can be set True to use the Windows
215
+ roaming appdata directory. That means that for users on a Windows
216
+ network setup for roaming profiles, this user data will be
217
+ sync'd on login. See
218
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
219
+ for a discussion of issues.
220
+
221
+ Typical user config directories are:
222
+ Mac OS X: ~/Library/Preferences/<AppName>
223
+ Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
224
+ Win *: same as user_data_dir
225
+
226
+ For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
227
+ That means, by default "~/.config/<AppName>".
228
+ """
229
+ if system == "win32":
230
+ path = user_data_dir(appname, appauthor, None, roaming)
231
+ elif system == "darwin":
232
+ path = os.path.expanduser("~/Library/Preferences/")
233
+ if appname:
234
+ path = os.path.join(path, appname)
235
+ else:
236
+ path = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
237
+ if appname:
238
+ path = os.path.join(path, appname)
239
+ if appname and version:
240
+ path = os.path.join(path, version)
241
+ return path
242
+
243
+
244
+ def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
245
+ r"""Return full path to the user-shared data dir for this application.
246
+
247
+ "appname" is the name of application.
248
+ If None, just the system directory is returned.
249
+ "appauthor" (only used on Windows) is the name of the
250
+ appauthor or distributing body for this application. Typically
251
+ it is the owning company name. This falls back to appname. You may
252
+ pass False to disable it.
253
+ "version" is an optional version path element to append to the
254
+ path. You might want to use this if you want multiple versions
255
+ of your app to be able to run independently. If used, this
256
+ would typically be "<major>.<minor>".
257
+ Only applied when appname is present.
258
+ "multipath" is an optional parameter only applicable to *nix
259
+ which indicates that the entire list of config dirs should be
260
+ returned. By default, the first item from XDG_CONFIG_DIRS is
261
+ returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
262
+
263
+ Typical site config directories are:
264
+ Mac OS X: same as site_data_dir
265
+ Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
266
+ $XDG_CONFIG_DIRS
267
+ Win *: same as site_data_dir
268
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
269
+
270
+ For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
271
+
272
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
273
+ """
274
+ if system == "win32":
275
+ path = site_data_dir(appname, appauthor)
276
+ if appname and version:
277
+ path = os.path.join(path, version)
278
+ elif system == "darwin":
279
+ path = os.path.expanduser("/Library/Preferences")
280
+ if appname:
281
+ path = os.path.join(path, appname)
282
+ else:
283
+ # XDG default for $XDG_CONFIG_DIRS
284
+ # only first, if multipath is False
285
+ path = os.getenv("XDG_CONFIG_DIRS", "/etc/xdg")
286
+ pathlist = [
287
+ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)
288
+ ]
289
+ if appname:
290
+ if version:
291
+ appname = os.path.join(appname, version)
292
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
293
+
294
+ if multipath:
295
+ path = os.pathsep.join(pathlist)
296
+ else:
297
+ path = pathlist[0]
298
+ return path
299
+
300
+
301
+ def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
302
+ r"""Return full path to the user-specific cache dir for this application.
303
+
304
+ "appname" is the name of application.
305
+ If None, just the system directory is returned.
306
+ "appauthor" (only used on Windows) is the name of the
307
+ appauthor or distributing body for this application. Typically
308
+ it is the owning company name. This falls back to appname. You may
309
+ pass False to disable it.
310
+ "version" is an optional version path element to append to the
311
+ path. You might want to use this if you want multiple versions
312
+ of your app to be able to run independently. If used, this
313
+ would typically be "<major>.<minor>".
314
+ Only applied when appname is present.
315
+ "opinion" (boolean) can be False to disable the appending of
316
+ "Cache" to the base app data dir for Windows. See
317
+ discussion below.
318
+
319
+ Typical user cache directories are:
320
+ Mac OS X: ~/Library/Caches/<AppName>
321
+ Unix: ~/.cache/<AppName> (XDG default)
322
+ Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
323
+ Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
324
+
325
+ On Windows the only suggestion in the MSDN docs is that local settings go in
326
+ the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
327
+ app data dir (the default returned by `user_data_dir` above). Apps typically
328
+ put cache data somewhere *under* the given dir here. Some examples:
329
+ ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
330
+ ...\Acme\SuperApp\Cache\1.0
331
+ OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
332
+ This can be disabled with the `opinion=False` option.
333
+ """
334
+ if system == "win32":
335
+ if appauthor is None:
336
+ appauthor = appname
337
+ path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
338
+ if appname:
339
+ if appauthor is not False:
340
+ path = os.path.join(path, appauthor, appname)
341
+ else:
342
+ path = os.path.join(path, appname)
343
+ if opinion:
344
+ path = os.path.join(path, "Cache")
345
+ elif system == "darwin":
346
+ path = os.path.expanduser("~/Library/Caches")
347
+ if appname:
348
+ path = os.path.join(path, appname)
349
+ else:
350
+ path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
351
+ if appname:
352
+ path = os.path.join(path, appname)
353
+ if appname and version:
354
+ path = os.path.join(path, version)
355
+ return path
356
+
357
+
358
+ def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
359
+ r"""Return full path to the user-specific state dir for this application.
360
+
361
+ "appname" is the name of application.
362
+ If None, just the system directory is returned.
363
+ "appauthor" (only used on Windows) is the name of the
364
+ appauthor or distributing body for this application. Typically
365
+ it is the owning company name. This falls back to appname. You may
366
+ pass False to disable it.
367
+ "version" is an optional version path element to append to the
368
+ path. You might want to use this if you want multiple versions
369
+ of your app to be able to run independently. If used, this
370
+ would typically be "<major>.<minor>".
371
+ Only applied when appname is present.
372
+ "roaming" (boolean, default False) can be set True to use the Windows
373
+ roaming appdata directory. That means that for users on a Windows
374
+ network setup for roaming profiles, this user data will be
375
+ sync'd on login. See
376
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
377
+ for a discussion of issues.
378
+
379
+ Typical user state directories are:
380
+ Mac OS X: same as user_data_dir
381
+ Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
382
+ Win *: same as user_data_dir
383
+
384
+ For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
385
+ to extend the XDG spec and support $XDG_STATE_HOME.
386
+
387
+ That means, by default "~/.local/state/<AppName>".
388
+ """
389
+ if system in ["win32", "darwin"]:
390
+ path = user_data_dir(appname, appauthor, None, roaming)
391
+ else:
392
+ path = os.getenv("XDG_STATE_HOME", os.path.expanduser("~/.local/state"))
393
+ if appname:
394
+ path = os.path.join(path, appname)
395
+ if appname and version:
396
+ path = os.path.join(path, version)
397
+ return path
398
+
399
+
400
+ def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
401
+ r"""Return full path to the user-specific log dir for this application.
402
+
403
+ "appname" is the name of application.
404
+ If None, just the system directory is returned.
405
+ "appauthor" (only used on Windows) is the name of the
406
+ appauthor or distributing body for this application. Typically
407
+ it is the owning company name. This falls back to appname. You may
408
+ pass False to disable it.
409
+ "version" is an optional version path element to append to the
410
+ path. You might want to use this if you want multiple versions
411
+ of your app to be able to run independently. If used, this
412
+ would typically be "<major>.<minor>".
413
+ Only applied when appname is present.
414
+ "opinion" (boolean) can be False to disable the appending of
415
+ "Logs" to the base app data dir for Windows, and "log" to the
416
+ base cache dir for Unix. See discussion below.
417
+
418
+ Typical user log directories are:
419
+ Mac OS X: ~/Library/Logs/<AppName>
420
+ Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
421
+ Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
422
+ Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
423
+
424
+ On Windows the only suggestion in the MSDN docs is that local settings
425
+ go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
426
+ examples of what some windows apps use for a logs dir.)
427
+
428
+ OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
429
+ value for Windows and appends "log" to the user cache dir for Unix.
430
+ This can be disabled with the `opinion=False` option.
431
+ """
432
+ if system == "darwin":
433
+ path = os.path.join(os.path.expanduser("~/Library/Logs"), appname)
434
+ elif system == "win32":
435
+ path = user_data_dir(appname, appauthor, version)
436
+ version = False
437
+ if opinion:
438
+ path = os.path.join(path, "Logs")
439
+ else:
440
+ path = user_cache_dir(appname, appauthor, version)
441
+ version = False
442
+ if opinion:
443
+ path = os.path.join(path, "log")
444
+ if appname and version:
445
+ path = os.path.join(path, version)
446
+ return path
447
+
448
+
449
+ class AppDirs(object):
450
+ """Convenience wrapper for getting application dirs."""
451
+
452
+ def __init__(
453
+ self, appname=None, appauthor=None, version=None, roaming=False, multipath=False
454
+ ):
455
+ self.appname = appname
456
+ self.appauthor = appauthor
457
+ self.version = version
458
+ self.roaming = roaming
459
+ self.multipath = multipath
460
+
461
+ @property
462
+ def user_data_dir(self):
463
+ return user_data_dir(
464
+ self.appname, self.appauthor, version=self.version, roaming=self.roaming
465
+ )
466
+
467
+ @property
468
+ def site_data_dir(self):
469
+ return site_data_dir(
470
+ self.appname, self.appauthor, version=self.version, multipath=self.multipath
471
+ )
472
+
473
+ @property
474
+ def user_config_dir(self):
475
+ return user_config_dir(
476
+ self.appname, self.appauthor, version=self.version, roaming=self.roaming
477
+ )
478
+
479
+ @property
480
+ def site_config_dir(self):
481
+ return site_config_dir(
482
+ self.appname, self.appauthor, version=self.version, multipath=self.multipath
483
+ )
484
+
485
+ @property
486
+ def user_cache_dir(self):
487
+ return user_cache_dir(self.appname, self.appauthor, version=self.version)
488
+
489
+ @property
490
+ def user_state_dir(self):
491
+ return user_state_dir(self.appname, self.appauthor, version=self.version)
492
+
493
+ @property
494
+ def user_log_dir(self):
495
+ return user_log_dir(self.appname, self.appauthor, version=self.version)
496
+
497
+
498
+ # ---- internal support stuff
499
+
500
+
501
+ def _get_win_folder_from_registry(csidl_name):
502
+ """This is a fallback technique at best. I'm not sure if using the
503
+ registry for this guarantees us the correct answer for all CSIDL_*
504
+ names.
505
+ """
506
+ import winreg as _winreg
507
+
508
+ shell_folder_name = {
509
+ "CSIDL_APPDATA": "AppData",
510
+ "CSIDL_COMMON_APPDATA": "Common AppData",
511
+ "CSIDL_LOCAL_APPDATA": "Local AppData",
512
+ }[csidl_name]
513
+
514
+ key = _winreg.OpenKey(
515
+ _winreg.HKEY_CURRENT_USER,
516
+ r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
517
+ )
518
+ dir, type = _winreg.QueryValueEx(key, shell_folder_name)
519
+ return dir
520
+
521
+
522
+ def _get_win_folder_with_pywin32(csidl_name):
523
+ from win32com.shell import shell, shellcon
524
+
525
+ dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
526
+ # Try to make this a unicode path because SHGetFolderPath does
527
+ # not return unicode strings when there is unicode data in the
528
+ # path.
529
+ try:
530
+ dir = unicode(dir)
531
+
532
+ # Downgrade to short path name if have highbit chars. See
533
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
534
+ has_high_char = False
535
+ for c in dir:
536
+ if ord(c) > 255:
537
+ has_high_char = True
538
+ break
539
+ if has_high_char:
540
+ try:
541
+ import win32api
542
+
543
+ dir = win32api.GetShortPathName(dir)
544
+ except ImportError:
545
+ pass
546
+ except UnicodeError:
547
+ pass
548
+ return dir
549
+
550
+
551
+ def _get_win_folder_with_ctypes(csidl_name):
552
+ import ctypes
553
+
554
+ csidl_const = {
555
+ "CSIDL_APPDATA": 26,
556
+ "CSIDL_COMMON_APPDATA": 35,
557
+ "CSIDL_LOCAL_APPDATA": 28,
558
+ }[csidl_name]
559
+
560
+ buf = ctypes.create_unicode_buffer(1024)
561
+ ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
562
+
563
+ # Downgrade to short path name if have highbit chars. See
564
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
565
+ has_high_char = False
566
+ for c in buf:
567
+ if ord(c) > 255:
568
+ has_high_char = True
569
+ break
570
+ if has_high_char:
571
+ buf2 = ctypes.create_unicode_buffer(1024)
572
+ if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
573
+ buf = buf2
574
+
575
+ return buf.value
576
+
577
+
578
+ def _get_win_folder_with_jna(csidl_name):
579
+ import array
580
+
581
+ from com.sun import jna
582
+ from com.sun.jna.platform import win32
583
+
584
+ buf_size = win32.WinDef.MAX_PATH * 2
585
+ buf = array.zeros("c", buf_size)
586
+ shell = win32.Shell32.INSTANCE
587
+ shell.SHGetFolderPath(
588
+ None,
589
+ getattr(win32.ShlObj, csidl_name),
590
+ None,
591
+ win32.ShlObj.SHGFP_TYPE_CURRENT,
592
+ buf,
593
+ )
594
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
595
+
596
+ # Downgrade to short path name if have highbit chars. See
597
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
598
+ has_high_char = False
599
+ for c in dir:
600
+ if ord(c) > 255:
601
+ has_high_char = True
602
+ break
603
+ if has_high_char:
604
+ buf = array.zeros("c", buf_size)
605
+ kernel = win32.Kernel32.INSTANCE
606
+ if kernel.GetShortPathName(dir, buf, buf_size):
607
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
608
+
609
+ return dir
610
+
611
+
612
+ if system == "win32":
613
+ try:
614
+ import win32com.shell
615
+
616
+ _get_win_folder = _get_win_folder_with_pywin32
617
+ except ImportError:
618
+ try:
619
+ from ctypes import windll
620
+
621
+ _get_win_folder = _get_win_folder_with_ctypes
622
+ except ImportError:
623
+ try:
624
+ import com.sun.jna
625
+
626
+ _get_win_folder = _get_win_folder_with_jna
627
+ except ImportError:
628
+ _get_win_folder = _get_win_folder_from_registry
629
+
630
+
631
+ # ---- self test code
632
+
633
+ if __name__ == "__main__":
634
+ appname = "MyApp"
635
+ appauthor = "MyCompany"
636
+
637
+ props = (
638
+ "user_data_dir",
639
+ "user_config_dir",
640
+ "user_cache_dir",
641
+ "user_state_dir",
642
+ "user_log_dir",
643
+ "site_data_dir",
644
+ "site_config_dir",
645
+ )
646
+
647
+ print(f"-- app dirs {__version__} --")
648
+
649
+ print("-- app dirs (with optional 'version')")
650
+ dirs = AppDirs(appname, appauthor, version="1.0")
651
+ for prop in props:
652
+ print(f"{prop}: {getattr(dirs, prop)}")
653
+
654
+ print("\n-- app dirs (without optional 'version')")
655
+ dirs = AppDirs(appname, appauthor)
656
+ for prop in props:
657
+ print(f"{prop}: {getattr(dirs, prop)}")
658
+
659
+ print("\n-- app dirs (without optional 'appauthor')")
660
+ dirs = AppDirs(appname)
661
+ for prop in props:
662
+ print(f"{prop}: {getattr(dirs, prop)}")
663
+
664
+ print("\n-- app dirs (with disabled 'appauthor')")
665
+ dirs = AppDirs(appname, appauthor=False)
666
+ for prop in props:
667
+ print(f"{prop}: {getattr(dirs, prop)}")
phi4/lib/python3.10/site-packages/torch/_dynamo/__init__.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from . import convert_frame, eval_frame, resume_execution
4
+ from .backends.registry import list_backends, lookup_backend, register_backend
5
+ from .callback import callback_handler, on_compile_end, on_compile_start
6
+ from .code_context import code_context
7
+ from .convert_frame import replay
8
+ from .decorators import (
9
+ allow_in_graph,
10
+ assume_constant_result,
11
+ disable,
12
+ disallow_in_graph,
13
+ forbid_in_graph,
14
+ graph_break,
15
+ mark_dynamic,
16
+ mark_static,
17
+ mark_static_address,
18
+ maybe_mark_dynamic,
19
+ run,
20
+ set_stance,
21
+ substitute_in_graph,
22
+ )
23
+ from .eval_frame import (
24
+ _reset_guarded_backend_cache,
25
+ explain,
26
+ export,
27
+ is_dynamo_supported,
28
+ is_inductor_supported,
29
+ optimize,
30
+ optimize_assert,
31
+ OptimizedModule,
32
+ reset_code,
33
+ )
34
+ from .external_utils import is_compiling
35
+ from .mutation_guard import GenerationTracker
36
+ from .pgo import reset_code_state
37
+ from .symbolic_convert import TensorifyState
38
+ from .utils import graph_break_reasons, guard_failures, orig_code_map, reset_frame_count
39
+
40
+
41
+ # Register polyfill functions
42
+ from .polyfills import loader as _ # usort: skip # noqa: F401
43
+
44
+
45
+ __all__ = [
46
+ "allow_in_graph",
47
+ "assume_constant_result",
48
+ "disallow_in_graph",
49
+ "forbid_in_graph",
50
+ "substitute_in_graph",
51
+ "graph_break",
52
+ "mark_dynamic",
53
+ "maybe_mark_dynamic",
54
+ "mark_static",
55
+ "mark_static_address",
56
+ "optimize",
57
+ "optimize_assert",
58
+ "export",
59
+ "explain",
60
+ "run",
61
+ "replay",
62
+ "disable",
63
+ "set_stance",
64
+ "reset",
65
+ "OptimizedModule",
66
+ "is_compiling",
67
+ "register_backend",
68
+ "list_backends",
69
+ "lookup_backend",
70
+ ]
71
+
72
+ # allowlist this for weights_only load of NJTs
73
+ torch.serialization.add_safe_globals([torch._dynamo.decorators._DimRange])
74
+
75
+ if torch.manual_seed is torch.random.manual_seed:
76
+ import torch.jit._builtins
77
+
78
+ # Wrap manual_seed with the disable decorator.
79
+ # Can't do it at its implementation due to dependency issues.
80
+ torch.manual_seed = torch._disable_dynamo(torch.manual_seed)
81
+ # Add the new manual_seed to the builtin registry.
82
+ torch.jit._builtins._register_builtin(torch.manual_seed, "aten::manual_seed")
83
+
84
+
85
+ def reset() -> None:
86
+ """
87
+ Clear all compile caches and restore initial state. This function is intended
88
+ to reset Dynamo's state *as if* you had started a fresh process invocation, which
89
+ makes it good for testing scenarios where you want to behave as if you started
90
+ a new process. It does NOT affect any file system caches.
91
+
92
+ NB: this does NOT reset logging state. Don't use this to test logging
93
+ initialization/reinitialization.
94
+ """
95
+ # TODO: https://github.com/pytorch/pytorch/issues/139200
96
+ import logging
97
+
98
+ log = logging.getLogger(__name__)
99
+ log.info("torch._dynamo.reset")
100
+ with convert_frame.compile_lock:
101
+ reset_code_caches()
102
+ convert_frame.input_codes.clear()
103
+ reset_code_state()
104
+ convert_frame.output_codes.clear()
105
+ orig_code_map.clear()
106
+ guard_failures.clear()
107
+ graph_break_reasons.clear()
108
+ resume_execution.ContinueExecutionCache.cache.clear()
109
+ _reset_guarded_backend_cache()
110
+ reset_frame_count()
111
+ torch._C._dynamo.compiled_autograd.clear_cache()
112
+ convert_frame.FRAME_COUNTER = 0
113
+ convert_frame.FRAME_COMPILE_COUNTER.clear()
114
+ callback_handler.clear()
115
+ GenerationTracker.clear()
116
+ TensorifyState.clear()
117
+ torch._dynamo.utils.warn_once_cache.clear()
118
+ torch._dynamo.utils.user_obj_id_to_weakref.clear()
119
+ torch._C._autograd._saved_tensors_hooks_set_tracing(False)
120
+
121
+
122
+ def reset_code_caches() -> None:
123
+ """
124
+ Clears in-memory code cache, which is what stores compiled products. This
125
+ resets less state than :func:`reset` and is mostly only used for testing
126
+ purposes.
127
+ """
128
+ # TODO: https://github.com/pytorch/pytorch/issues/139200
129
+ import logging
130
+
131
+ log = logging.getLogger(__name__)
132
+ log.info("torch._dynamo.reset_code_caches")
133
+ """Clear compile caches that are keyed by code objects"""
134
+ with convert_frame.compile_lock:
135
+ reset_code_state()
136
+ for weak_code in (
137
+ convert_frame.input_codes.seen + convert_frame.output_codes.seen
138
+ ):
139
+ code = weak_code()
140
+ if code:
141
+ reset_code(code)
142
+ code_context.clear()
phi4/lib/python3.10/site-packages/torch/_dynamo/callback.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+ from dataclasses import dataclass, field # noqa: F811
3
+ from typing import Any, Callable, Generator, List
4
+
5
+
6
+ @dataclass
7
+ class CompilationCallbackHandler:
8
+ start_callbacks: List[Callable[[], None]] = field(default_factory=list)
9
+ end_callbacks: List[Callable[[], None]] = field(default_factory=list)
10
+
11
+ def register_start_callback(
12
+ self, callback: Callable[[], None]
13
+ ) -> Callable[[], None]:
14
+ """
15
+ Register a callback function to be called when the compilation starts.
16
+
17
+ Args:
18
+ - callback (Callable): The callback function to register.
19
+ """
20
+ self.start_callbacks.append(callback)
21
+ return callback
22
+
23
+ def register_end_callback(self, callback: Callable[[], None]) -> Callable[[], None]:
24
+ """
25
+ Register a callback function to be called when the compilation ends.
26
+
27
+ Args:
28
+ - callback (Callable): The callback function to register.
29
+ """
30
+ self.end_callbacks.append(callback)
31
+ return callback
32
+
33
+ def remove_start_callback(self, callback: Callable[[], None]) -> None:
34
+ """
35
+ Remove a registered start callback function.
36
+
37
+ Args:
38
+ - callback (Callable): The callback function to remove.
39
+ """
40
+ self.start_callbacks.remove(callback)
41
+
42
+ def remove_end_callback(self, callback: Callable[[], None]) -> None:
43
+ """
44
+ Remove a registered end callback function.
45
+
46
+ Args:
47
+ - callback (Callable): The callback function to remove.
48
+ """
49
+ self.end_callbacks.remove(callback)
50
+
51
+ def run_start_callbacks(self) -> None:
52
+ """
53
+ Execute all registered start callbacks.
54
+ """
55
+ for callback in self.start_callbacks:
56
+ callback()
57
+
58
+ def run_end_callbacks(self) -> None:
59
+ """
60
+ Execute all registered end callbacks.
61
+ """
62
+ for callback in self.end_callbacks:
63
+ callback()
64
+
65
+ @contextmanager
66
+ def install_callbacks(self) -> Generator[None, Any, Any]:
67
+ """
68
+ Context manager to install the callbacks and run them when the context is exited.
69
+ """
70
+ try:
71
+ self.run_start_callbacks()
72
+ yield
73
+ finally:
74
+ self.run_end_callbacks()
75
+
76
+ def clear(self) -> None:
77
+ """
78
+ Clear all registered callbacks.
79
+ """
80
+ self.start_callbacks.clear()
81
+ self.end_callbacks.clear()
82
+
83
+
84
+ callback_handler = CompilationCallbackHandler()
85
+
86
+
87
+ def on_compile_start(callback: Callable[[], None]) -> Callable[[], None]:
88
+ """
89
+ Decorator to register a callback function for the start of the compilation.
90
+ """
91
+ callback_handler.register_start_callback(callback)
92
+ return callback
93
+
94
+
95
+ def on_compile_end(callback: Callable[[], None]) -> Callable[[], None]:
96
+ """
97
+ Decorator to register a callback function for the end of the compilation.
98
+ """
99
+ callback_handler.register_end_callback(callback)
100
+ return callback
phi4/lib/python3.10/site-packages/torch/_dynamo/current_scope_id.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import threading
3
+ from typing import Generator
4
+
5
+
6
+ # Global variable to identify which SubgraphTracer we are in.
7
+ # It is sometimes difficult to find an InstructionTranslator to use.
8
+ _current_scope_id = threading.local()
9
+
10
+
11
+ def current_scope_id() -> int:
12
+ global _current_scope_id
13
+ if not hasattr(_current_scope_id, "value"):
14
+ _current_scope_id.value = 1
15
+ return _current_scope_id.value
16
+
17
+
18
+ @contextlib.contextmanager
19
+ def enter_new_scope() -> Generator[None, None, None]:
20
+ global _current_scope_id
21
+ try:
22
+ _current_scope_id.value = current_scope_id() + 1
23
+ yield
24
+ finally:
25
+ _current_scope_id.value = current_scope_id() - 1
phi4/lib/python3.10/site-packages/torch/_dynamo/decorators.py ADDED
@@ -0,0 +1,634 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # ruff: noqa: TCH004
3
+ import functools
4
+ import inspect
5
+ from dataclasses import dataclass
6
+ from typing import Any, Callable, Dict, Type, TYPE_CHECKING, TypeVar
7
+
8
+ import torch
9
+ from torch.utils._contextlib import _DecoratorContextManager
10
+ from torch.utils._python_dispatch import is_traceable_wrapper_subclass
11
+
12
+ from . import trace_rules, variables
13
+ from .comptime import comptime
14
+ from .eval_frame import (
15
+ _set_stance,
16
+ DisableContext,
17
+ DynamoStance,
18
+ innermost_fn,
19
+ RunOnlyContext,
20
+ )
21
+ from .exc import IncorrectUsage
22
+ from .external_utils import is_compiling
23
+ from .utils import is_function
24
+
25
+
26
+ if TYPE_CHECKING:
27
+ from types import FunctionType
28
+
29
+ from torch._C._dynamo.eval_frame import ( # noqa: F401
30
+ reset_code,
31
+ set_eval_frame,
32
+ set_guard_error_hook,
33
+ skip_code,
34
+ unsupported,
35
+ )
36
+
37
+ from .variables import VariableTracker
38
+ else:
39
+ for name in dir(torch._C._dynamo.eval_frame):
40
+ if name.startswith("__"):
41
+ continue
42
+ globals()[name] = getattr(torch._C._dynamo.eval_frame, name)
43
+
44
+
45
+ _F = TypeVar("_F", bound=Callable[..., Any])
46
+
47
+
48
+ def run(fn=None):
49
+ """Don't do any dynamic compiles, just use prior optimizations"""
50
+ if fn is not None:
51
+ fn = innermost_fn(fn)
52
+ assert callable(fn)
53
+ return RunOnlyContext()(fn)
54
+ return RunOnlyContext()
55
+
56
+
57
+ def disable(fn=None, recursive=True):
58
+ """
59
+ Decorator to disable TorchDynamo
60
+
61
+ If recursive=True, Dynamo is completely skipped on the decorated function
62
+ frame as well as the recursively invoked functions.
63
+
64
+ If recursive=False, Dynamo skips frames associated with the function code,
65
+ but still process recursively invoked frames.
66
+ """
67
+ if recursive:
68
+ if fn is not None:
69
+ fn = innermost_fn(fn)
70
+ assert callable(fn)
71
+ return DisableContext()(fn)
72
+ return DisableContext()
73
+ else:
74
+ return skip(fn)
75
+
76
+
77
+ def skip(fn=None):
78
+ """
79
+ Skip frames associated with the function code, but still process recursively
80
+ invoked frames
81
+ """
82
+ if fn is None:
83
+ return skip
84
+ fn = innermost_fn(fn)
85
+ assert callable(fn)
86
+ skip_code(fn.__code__)
87
+ fn._torchdynamo_disable = True
88
+ return fn
89
+
90
+
91
+ class set_stance(_DecoratorContextManager):
92
+ """
93
+ Decorator, context manager, function to set the current stance of the compiler.
94
+
95
+ Stances documented in corresponding function in torch/compiler/__init__.py
96
+ """
97
+
98
+ _dynamo_forbidden = True
99
+
100
+ def __init__(
101
+ self,
102
+ stance: str = "default",
103
+ *,
104
+ skip_guard_eval_unsafe: bool = False,
105
+ force_backend=None,
106
+ ) -> None:
107
+ if force_backend is not None and stance != "default":
108
+ raise RuntimeError("non-default stance cannot have force_backend set")
109
+
110
+ self.stance = DynamoStance(stance, skip_guard_eval_unsafe, force_backend)
111
+ self.prev = _set_stance(self.stance)
112
+
113
+ def __call__(self, fn):
114
+ _set_stance(self.prev)
115
+ wrapper = super().__call__(fn)
116
+ # forbid wrapper in graph
117
+ wrapper._dynamo_forbidden = True # type: ignore[attr-defined]
118
+ return wrapper
119
+
120
+ def __enter__(self):
121
+ _set_stance(self.stance)
122
+
123
+ def __exit__(self, exc_type, exc_val, exc_tb):
124
+ _set_stance(self.prev)
125
+
126
+ def clone(self):
127
+ return self.__class__(self.stance.stance, force_backend=self.stance.backend)
128
+
129
+
130
+ def assume_constant_result(fn):
131
+ fn._dynamo_marked_constant = True
132
+ return fn
133
+
134
+
135
+ def allow_in_graph(fn):
136
+ """
137
+ Tells the compiler frontend (Dynamo) to skip symbolic introspection of the function
138
+ and instead directly write it to the graph when encountered.
139
+
140
+ See :func:`torch.compiler.allow_in_graph`'s docstring for the full documentation
141
+
142
+ WARNING: this API can be a footgun, please read the documentation carefully.
143
+ """
144
+ if isinstance(fn, (list, tuple)):
145
+ return [allow_in_graph(x) for x in fn]
146
+ assert callable(fn), "allow_in_graph expects a callable"
147
+ if trace_rules.lookup_callable(fn) != variables.TorchInGraphFunctionVariable:
148
+ trace_rules._disallowed_callable_ids.remove(id(fn))
149
+ trace_rules._allowed_callable_ids.add(id(fn))
150
+ return fn
151
+
152
+
153
+ def _disallow_in_graph_helper(throw_if_not_allowed):
154
+ def inner(fn):
155
+ if isinstance(fn, (list, tuple)):
156
+ return [disallow_in_graph(x) for x in fn]
157
+ assert callable(fn), "disallow_in_graph expects a callable"
158
+ if (
159
+ throw_if_not_allowed
160
+ and trace_rules.lookup_callable(fn)
161
+ != variables.TorchInGraphFunctionVariable
162
+ and trace_rules.lookup(fn) != variables.TorchInGraphFunctionVariable
163
+ ):
164
+ raise IncorrectUsage(
165
+ "disallow_in_graph is expected to be used on an already allowed callable (like torch.* ops). "
166
+ "Allowed callables means callables that TorchDynamo puts as-is in the extracted graph."
167
+ )
168
+ trace_rules._allowed_callable_ids.remove(id(fn))
169
+ trace_rules._disallowed_callable_ids.add(id(fn))
170
+ return fn
171
+
172
+ return inner
173
+
174
+
175
+ def disallow_in_graph(fn):
176
+ """
177
+ Customize which functions TorchDynamo will exclude in the generated
178
+ graph and force a graph break on.
179
+ ::
180
+
181
+ torch._dynamo.disallow_in_graph(torch.sub)
182
+
183
+ @torch._dynamo.optimize(...)
184
+ def fn(a):
185
+ x = torch.add(x, 1)
186
+ x = torch.sub(x, 1)
187
+ x = torch.add(x, 1)
188
+ return x
189
+
190
+ fn(...)
191
+
192
+ Will break the graph on `torch.sub`, and give two graphs each with a
193
+ single `torch.add()` op.
194
+ """
195
+ return _disallow_in_graph_helper(throw_if_not_allowed=True)(fn)
196
+
197
+
198
+ @_disallow_in_graph_helper(throw_if_not_allowed=False)
199
+ def graph_break():
200
+ """Force a graph break"""
201
+
202
+
203
+ def forbid_in_graph(fn):
204
+ """
205
+ Customize which functions TorchDynamo will assert are not present while tracing.
206
+
207
+ If you want a graph break on this function instead, use disallow_in_graph.
208
+ TODO(voz): We now have allow_in_graph, disallow_in_graph, forbid_in_graph - some more robust
209
+ documentation would not be amiss.
210
+ """
211
+ if isinstance(fn, (list, tuple)):
212
+ return [forbid_in_graph(x) for x in fn]
213
+ assert callable(fn), "forbid_in_graph applies only to callables"
214
+ fn._dynamo_forbidden = True
215
+ return fn
216
+
217
+
218
+ def substitute_in_graph(
219
+ original_fn: _F,
220
+ *,
221
+ can_constant_fold_through: bool = False,
222
+ skip_signature_check: bool = False,
223
+ # type that is embedded in the Python interpreter
224
+ is_embedded_type: bool = False, # internal use only
225
+ ) -> Callable[[_F], _F]:
226
+ """
227
+ Register a polyfill handler for a function, usually a C function from the C extension, to be
228
+ used in place of the original function when inlining the original function in the graph.
229
+
230
+ .. note::
231
+
232
+ The polyfill handler is only used when inlining the original function. It is not used when
233
+ the original function is called directly. In the eager mode, the decorated function calls
234
+ the performant C function rather than the polyfill handler.
235
+
236
+ The polyfill handler is a function that will be called in place of the original function when
237
+ inlining the original function. The polyfill handler should have the same signature and the same
238
+ behavior as the original function.
239
+
240
+ Args:
241
+ original_fn (callable): The original function, usually a C function, to register a polyfill
242
+ handler for.
243
+ can_constant_fold_through (bool, optional): Whether the polyfill handler can be constant
244
+ folded through. That is, if the polyfill handler is a pure function and its arguments
245
+ are constant, the result of the polyfill handler can be constant folded during the
246
+ compilation. Defaults to ``False``.
247
+ skip_signature_check (bool, optional): Whether to skip the signature check between the
248
+ original function and the polyfill handler. Defaults to ``False``.
249
+
250
+ Returns:
251
+ A decorator that registers the polyfill handler for the original function.
252
+
253
+ Example::
254
+
255
+ >>> # xdoctest: +SKIP("conflict with the tests: duplicate polyfill handlers")
256
+ >>> import operator
257
+ >>> operator.indexOf([1, 2, 3, 4, 5], 3)
258
+ 2
259
+ >>> torch.compile(operator.indexOf, fullgraph=True)([1, 2, 3, 4, 5], 3)
260
+ Traceback (most recent call last):
261
+ ...
262
+ torch._dynamo.exc.Unsupported: ...
263
+
264
+ >>> @torch.compiler.substitute_in_graph(operator.indexOf)
265
+ ... def indexOf(a, b, /):
266
+ ... for i, item in enumerate(a):
267
+ ... if item is b or item == b:
268
+ ... return i
269
+ ... raise ValueError("sequence.index(x): x not in sequence")
270
+ >>>
271
+ >>> torch.compile(operator.indexOf, fullgraph=True)([1, 2, 3, 4, 5], 3)
272
+ 2
273
+ """
274
+ if not is_function(original_fn) and not (
275
+ is_embedded_type and inspect.isclass(original_fn)
276
+ ):
277
+ raise TypeError(
278
+ f"substitute_in_graph expects a function but got {type(original_fn)!r}"
279
+ )
280
+ if is_embedded_type:
281
+ if not inspect.isclass(original_fn):
282
+ raise TypeError(
283
+ f"substitute_in_graph expects a class but got {type(original_fn)!r}"
284
+ )
285
+
286
+ from .variables.builder import ITERTOOLS_POLYFILLED_TYPE_IDS, ITERTOOLS_TYPE_IDS
287
+
288
+ if id(original_fn) in ITERTOOLS_TYPE_IDS:
289
+ ITERTOOLS_POLYFILLED_TYPE_IDS.add(id(original_fn))
290
+
291
+ def wrapper(traceable_fn: _F) -> _F:
292
+ if not is_function(traceable_fn):
293
+ raise TypeError(
294
+ f"@substitute_in_graph(...) expects a function but got {type(traceable_fn)!r}"
295
+ )
296
+
297
+ if not skip_signature_check:
298
+ try:
299
+ original_sig = inspect.signature(original_fn)
300
+ except ValueError:
301
+ pass
302
+ else:
303
+ traceable_sig = inspect.signature(traceable_fn)
304
+
305
+ def sig_ident(sig):
306
+ # Ignore annotations for parameters and return type
307
+ return (
308
+ tuple(
309
+ p.name
310
+ for p in sig.parameters.values()
311
+ if (
312
+ p.kind
313
+ not in {
314
+ p.KEYWORD_ONLY,
315
+ # the name of *args and **kwargs is not important
316
+ p.VAR_POSITIONAL,
317
+ p.VAR_KEYWORD,
318
+ }
319
+ )
320
+ ),
321
+ {
322
+ p.name
323
+ for p in sig.parameters.values()
324
+ if p.kind == p.KEYWORD_ONLY
325
+ },
326
+ {
327
+ p.name: p.default
328
+ for p in sig.parameters.values()
329
+ # the name of *args and **kwargs is not important
330
+ if p.kind not in {p.VAR_POSITIONAL, p.VAR_KEYWORD}
331
+ },
332
+ )
333
+
334
+ wildcard_sig = inspect.signature(lambda *args, **kwargs: None)
335
+
336
+ if (
337
+ sig_ident(original_sig) != sig_ident(traceable_sig)
338
+ and sig_ident(original_sig) != sig_ident(wildcard_sig)
339
+ and sig_ident(traceable_sig) != sig_ident(wildcard_sig)
340
+ ):
341
+ raise TypeError(
342
+ f"Signature mismatch between {original_fn} and {traceable_fn}: "
343
+ f"{original_sig} != {traceable_sig}"
344
+ )
345
+
346
+ from torch._dynamo.guards import GuardBuilder
347
+ from torch._dynamo.trace_rules import (
348
+ _polyfilled_function_ids,
349
+ get_torch_obj_rule_map,
350
+ )
351
+ from torch._dynamo.variables import PolyfilledFunctionVariable
352
+ from torch._dynamo.variables.builder import VariableBuilder
353
+
354
+ id_dispatch_map = VariableBuilder._id_dispatch()
355
+ if id(original_fn) in id_dispatch_map:
356
+ raise ValueError(
357
+ f"Duplicate dispatch rule for {original_fn}: "
358
+ "already registered in VariableBuilder's id dispatch map"
359
+ )
360
+
361
+ if id(original_fn) in _polyfilled_function_ids:
362
+ raise ValueError(f"Duplicate polyfilled object {original_fn}")
363
+
364
+ rule_map: Dict[Any, Type[VariableTracker]] = get_torch_obj_rule_map()
365
+ if original_fn in rule_map:
366
+ raise ValueError(
367
+ f"Duplicate object {original_fn} with different rules: "
368
+ f"{PolyfilledFunctionVariable}, {rule_map[original_fn]}"
369
+ )
370
+
371
+ polyfill_handlers: Dict[Callable[..., Any], FunctionType]
372
+ polyfill_handlers = PolyfilledFunctionVariable._get_polyfill_handlers()
373
+ if original_fn in polyfill_handlers:
374
+ raise ValueError(
375
+ f"Duplicate polyfill handlers for {original_fn}: "
376
+ f"already handled by {polyfill_handlers[original_fn]}"
377
+ )
378
+
379
+ # Need to wrap the function because we may cannot assign __torch_dynamo_polyfill__ to a
380
+ # C++ function.
381
+ @functools.wraps(traceable_fn)
382
+ def wrapped(*args, **kwargs):
383
+ return original_fn(*args, **kwargs)
384
+
385
+ def dispatch_fn(self, value: _F) -> PolyfilledFunctionVariable:
386
+ return PolyfilledFunctionVariable(
387
+ value,
388
+ source=self.source,
389
+ **self.install_guards(GuardBuilder.FUNCTION_MATCH),
390
+ )
391
+
392
+ id_dispatch_map[id(original_fn)] = id_dispatch_map[id(wrapped)] = dispatch_fn
393
+ _polyfilled_function_ids.add(id(original_fn))
394
+ _polyfilled_function_ids.add(id(wrapped))
395
+ rule_map[original_fn] = rule_map[wrapped] = PolyfilledFunctionVariable
396
+ polyfill_handlers[original_fn] = polyfill_handlers[wrapped] = wrapped # type: ignore[assignment]
397
+
398
+ wrapped.__torch_dynamo_original__ = original_fn # type: ignore[attr-defined]
399
+ wrapped.__torch_dynamo_polyfill__ = traceable_fn # type: ignore[attr-defined]
400
+ wrapped.__torch_dynamo_can_constant_fold_through__ = can_constant_fold_through # type: ignore[attr-defined]
401
+
402
+ return wrapped # type: ignore[return-value]
403
+
404
+ return wrapper
405
+
406
+
407
+ # Helper function to flatten a tensor subclass and apply a function to
408
+ # all inner tensors that match the outer dim. Used to reduce duplication
409
+ # across the various marking APIs.
410
+ def _apply_func_to_inner_tensors_of_same_dim(func, t, *args, **kwargs):
411
+ assert is_traceable_wrapper_subclass(t)
412
+
413
+ attrs, ctx = t.__tensor_flatten__()
414
+ assert isinstance(t, torch.Tensor)
415
+ for attr in attrs:
416
+ inner = getattr(t, attr)
417
+ if inner.dim() == t.dim():
418
+ func(inner, *args, **kwargs)
419
+
420
+
421
+ @dataclass(frozen=True)
422
+ class _DimRange:
423
+ """
424
+ This represents an dimension of a tensor and the corresponding
425
+ min and max values it can take. Don't create this
426
+ class directly; instead, use :func:`mark_dynamic`.
427
+ """
428
+
429
+ dim: int
430
+ min: int
431
+ max: int
432
+
433
+
434
+ @forbid_in_graph
435
+ def mark_unbacked(t, index):
436
+ """
437
+ Mark a tensor as having an unbacked dim. This changes the semantics of operations,
438
+ we will always report the size does not equal zero/one, we will turn asserts
439
+ on this index into runtime asserts, and if you try to get the real value we will
440
+ raise an exception. In other words, we will treat this dimension as if it was
441
+ data dependent (we do not know anything about its value.)
442
+ """
443
+ # You could have copied the mark_dynamic behavior but I'm not convinced
444
+ # it's what you want
445
+ assert not is_traceable_wrapper_subclass(t), "not implemented yet"
446
+
447
+ if isinstance(index, int):
448
+ if not hasattr(t, "_dynamo_unbacked_indices"):
449
+ t._dynamo_unbacked_indices = set()
450
+ t._dynamo_unbacked_indices.add(index)
451
+ return
452
+
453
+ assert isinstance(index, (list, tuple))
454
+ for i in index:
455
+ mark_unbacked(t, i)
456
+
457
+
458
+ @forbid_in_graph
459
+ def mark_dynamic(t, index, *, min=None, max=None):
460
+ """
461
+ Mark a tensor as having a dynamic dim and set corresponding min and max range for the dim.
462
+
463
+ [Note - on the state of mark_dynamic]
464
+
465
+ The behavior of having a dynamic dimension on a tensor is governed by a few factors:
466
+
467
+ 1) torch._dynamo.config dynamic_shapes True or False.
468
+ a) dynamic_shapes=True - dynamic_shapes must be True for mark_dynamic to work.
469
+ a) dynamic_shapes=False - This config will raise an exception when used in conjunction with
470
+ mark_dynamic. We will eventually support this.
471
+
472
+ 2) If the dimension is fully constrained - as in, it does not allow more than a single value
473
+ in both eager (torch.compile, torch._dynamo.optimize) mode and export mode (torch._dynamo.export),
474
+ we will raise an error
475
+
476
+ 3) If the dimension is partially constrained - allowing at least 2 values but not the full unbounded
477
+ range of shapes, in eager we will pass it through, but export will raise an error.
478
+
479
+ 4) Attempts to trace this function will explicitly raise. As such, all calls to mark_dynamic must be made
480
+ before torch.compile.
481
+
482
+ """
483
+ if is_traceable_wrapper_subclass(t):
484
+ # default behavior: mirror mark_dynamic() on all inner tensors with same dim as t
485
+ # TODO: Make this configurable via a supported public API
486
+ _apply_func_to_inner_tensors_of_same_dim(
487
+ mark_dynamic, t, index, min=min, max=max
488
+ )
489
+
490
+ if isinstance(index, int):
491
+ if not hasattr(t, "_dynamo_dynamic_indices"):
492
+ t._dynamo_dynamic_indices = set()
493
+ t._dynamo_dynamic_range = set()
494
+ # TODO(voz): Should we bounds check?
495
+ t._dynamo_dynamic_indices.add(index)
496
+ t._dynamo_dynamic_range.add(_DimRange(index, min, max))
497
+ return
498
+
499
+ assert isinstance(index, (list, tuple))
500
+ for i in index:
501
+ mark_dynamic(t, i, min=min, max=max)
502
+
503
+
504
+ @forbid_in_graph
505
+ def maybe_mark_dynamic(t, index):
506
+ """
507
+ Mark a tensor as having a dynamic dim, but don't enforce it (i.e., if this
508
+ dimension ends up getting specialized, don't error).
509
+ """
510
+ if is_traceable_wrapper_subclass(t):
511
+ # default behavior: mirror maybe_mark_dynamic() on all inner tensors with same dim as t
512
+ # TODO: Make this configurable via a supported public API
513
+ _apply_func_to_inner_tensors_of_same_dim(maybe_mark_dynamic, t, index)
514
+
515
+ if isinstance(index, int):
516
+ if not hasattr(t, "_dynamo_weak_dynamic_indices"):
517
+ t._dynamo_weak_dynamic_indices = set()
518
+ # TODO(voz): Should we bounds check?
519
+ t._dynamo_weak_dynamic_indices.add(index)
520
+ return
521
+
522
+ assert isinstance(index, (list, tuple))
523
+ for i in index:
524
+ maybe_mark_dynamic(t, i)
525
+
526
+
527
+ def mark_static(t, index=None):
528
+ """
529
+ Mark a tensor as having a static dim or mark a nn module class as static.
530
+
531
+ For tensors
532
+ ===========
533
+ This will prevent us from attempting to compile it dynamically
534
+ when dynamic=True; this can improve trace-time performance.
535
+
536
+ This has lower precedence than mark_dynamic.
537
+
538
+ Unlike mark_dynamic, this can be done inside a graph, in which case it
539
+ induces specialization on the tensor.
540
+
541
+ For nn.Module classes
542
+ =====================
543
+ For static nn.Module classes, TorchDynamo assumes that the module instance
544
+ attributes will not be modified after compilation. This will ensure that
545
+ TorchDynamo keeps integer attributes CONSTANT and not symints.
546
+
547
+ From TorchDynamo implementation side, the instances of static-marked
548
+ nn.Module class will be converted to UnspecializedBuiltinNNModuleVariable,
549
+ which have the same properties.
550
+
551
+ Note that we still have to guard on the attributes, because different
552
+ instances of the nn.Module can have different values of the attributes. The
553
+ key point here is that the attributes are static.
554
+ """
555
+ if is_compiling():
556
+ if index is None:
557
+ for s in t.size():
558
+ comptime.force_static(s)
559
+ else:
560
+ comptime.force_static(t.size(index))
561
+ return
562
+
563
+ if is_traceable_wrapper_subclass(t):
564
+ # default behavior: mirror mark_static() on all inner tensors with same dim as t
565
+ # TODO: Make this configurable via a supported public API
566
+ _apply_func_to_inner_tensors_of_same_dim(mark_static, t, index)
567
+
568
+ if not isinstance(t, torch.Tensor) and issubclass(t, torch.nn.Module):
569
+ t._dynamo_marked_static = True
570
+ return t
571
+
572
+ if not isinstance(t, torch.Tensor):
573
+ raise TypeError(
574
+ f"mark_static expects a tensor/nn.Module class but recieved {type(t)}"
575
+ )
576
+
577
+ if isinstance(index, int):
578
+ if not hasattr(t, "_dynamo_static_indices"):
579
+ t._dynamo_static_indices = set() # type: ignore[attr-defined]
580
+ # TODO(voz): Should we bounds check?
581
+ t._dynamo_static_indices.add(index) # type: ignore[attr-defined]
582
+ elif index is None:
583
+ for i in range(t.dim()):
584
+ mark_static(t, i)
585
+ else:
586
+ assert isinstance(index, (list, tuple))
587
+ for i in index:
588
+ mark_static(t, i)
589
+
590
+
591
+ @forbid_in_graph
592
+ def mark_static_address(t, guard=True):
593
+ """
594
+ Marks an input tensor whose data_ptr will not change across multiple calls
595
+ to a dynamo-compiled function. This indicates to cudagraphs that an extra allocation
596
+ is not needed for this input. The data_ptr will be guarded if guard=True. Note:
597
+ Tensors marked in this way will be kept alive until `torch._dynamo.reset()` is called.
598
+ """
599
+ if not isinstance(t, torch.Tensor):
600
+ raise TypeError(f"mark_static_address expects a tensor but recieved {type(t)}")
601
+
602
+ if guard:
603
+ t._dynamo_static_input_type = "guarded" # type: ignore[attr-defined]
604
+ else:
605
+ t._dynamo_static_input_type = "unguarded" # type: ignore[attr-defined]
606
+
607
+
608
+ # Note: this carefully avoids eagerly import einops.
609
+ # TODO: we should delete this whole _allow_in_graph_einops logic by approximately 2024 Q2
610
+ def _allow_in_graph_einops():
611
+ import einops
612
+
613
+ try:
614
+ # requires einops > 0.6.1, torch >= 2.0
615
+ from einops._torch_specific import ( # type: ignore[attr-defined] # noqa: F401
616
+ _ops_were_registered_in_torchdynamo,
617
+ )
618
+
619
+ # einops > 0.6.1 will call the op registration logic as it is imported.
620
+ except ImportError:
621
+ # einops <= 0.6.1
622
+ allow_in_graph(einops.rearrange)
623
+ allow_in_graph(einops.reduce)
624
+ if hasattr(einops, "repeat"):
625
+ allow_in_graph(einops.repeat) # available since einops 0.2.0
626
+ if hasattr(einops, "einsum"):
627
+ allow_in_graph(einops.einsum) # available since einops 0.5.0
628
+ if hasattr(einops, "pack"):
629
+ allow_in_graph(einops.pack) # available since einops 0.6.0
630
+ if hasattr(einops, "unpack"):
631
+ allow_in_graph(einops.unpack) # available since einops 0.6.0
632
+
633
+
634
+ trace_rules.add_module_init_func("einops", _allow_in_graph_einops)
phi4/lib/python3.10/site-packages/torch/_dynamo/device_interface.py ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import time
3
+ from dataclasses import dataclass
4
+ from typing import Any, Callable, Dict, Iterable, Optional, Tuple, Type, Union
5
+
6
+ import torch
7
+
8
+
9
+ get_cuda_stream: Optional[Callable[[int], int]]
10
+ if torch.cuda._is_compiled():
11
+ from torch._C import _cuda_getCurrentRawStream as get_cuda_stream
12
+ else:
13
+ get_cuda_stream = None
14
+
15
+ _device_t = Union[torch.device, str, int, None]
16
+
17
+ # Recording the device properties in the main process but used in worker process.
18
+ caching_worker_device_properties: Dict[str, Any] = {}
19
+ caching_worker_current_devices: Dict[str, int] = {}
20
+
21
+
22
+ class DeviceInterface:
23
+ """
24
+ This is a simple device runtime interface for Inductor. It enables custom
25
+ backends to be integrated with Inductor in a device-agnostic semantic.
26
+ """
27
+
28
+ class device:
29
+ def __new__(cls, device: _device_t):
30
+ raise NotImplementedError
31
+
32
+ class Event:
33
+ def __new__(cls, *args, **kwargs):
34
+ raise NotImplementedError(
35
+ "Event should be inherited from torch.Event, otherwise, it couldn't be captured by dynamo."
36
+ )
37
+
38
+ class Stream:
39
+ def __new__(cls, *args, **kwargs):
40
+ raise NotImplementedError(
41
+ "Stream should be inherited from torch.Stream, otherwise, it couldn't be captured by dynamo."
42
+ )
43
+
44
+ class Worker:
45
+ """
46
+ Worker API to query device properties that will work in multi processing
47
+ workers that cannot use the GPU APIs (due to processing fork() and
48
+ initialization time issues). Properties are recorded in the main process
49
+ before we fork the workers.
50
+ """
51
+
52
+ @staticmethod
53
+ def set_device(device: int):
54
+ raise NotImplementedError
55
+
56
+ @staticmethod
57
+ def current_device() -> int:
58
+ raise NotImplementedError
59
+
60
+ @staticmethod
61
+ def get_device_properties(device: _device_t = None):
62
+ raise NotImplementedError
63
+
64
+ @staticmethod
65
+ def current_device():
66
+ raise NotImplementedError
67
+
68
+ @staticmethod
69
+ def set_device(device: _device_t):
70
+ raise NotImplementedError
71
+
72
+ @staticmethod
73
+ def maybe_exchange_device(device: int) -> int:
74
+ raise NotImplementedError
75
+
76
+ @staticmethod
77
+ def exchange_device(device: int) -> int:
78
+ raise NotImplementedError
79
+
80
+ @staticmethod
81
+ def device_count():
82
+ raise NotImplementedError
83
+
84
+ @staticmethod
85
+ def is_available() -> bool:
86
+ raise NotImplementedError
87
+
88
+ @staticmethod
89
+ def stream(stream: torch.Stream):
90
+ raise NotImplementedError
91
+
92
+ @staticmethod
93
+ def current_stream():
94
+ raise NotImplementedError
95
+
96
+ @staticmethod
97
+ def set_stream(stream: torch.Stream):
98
+ raise NotImplementedError
99
+
100
+ @staticmethod
101
+ def _set_stream_by_id(stream_id: int, device_index: int, device_type: int):
102
+ raise NotImplementedError
103
+
104
+ @staticmethod
105
+ def get_raw_stream(device_idx: int) -> int:
106
+ raise NotImplementedError
107
+
108
+ @staticmethod
109
+ def synchronize(device: _device_t = None):
110
+ raise NotImplementedError
111
+
112
+ @classmethod
113
+ def get_device_properties(cls, device: _device_t = None):
114
+ return cls.Worker.get_device_properties(device)
115
+
116
+ @staticmethod
117
+ def get_compute_capability(device: _device_t = None):
118
+ raise NotImplementedError
119
+
120
+ @staticmethod
121
+ def is_bf16_supported(including_emulation: bool = False):
122
+ raise NotImplementedError
123
+
124
+ @staticmethod
125
+ def memory_allocated(device: _device_t = None) -> int:
126
+ raise NotImplementedError
127
+
128
+
129
+ class DeviceGuard:
130
+ """
131
+ This class provides a context manager for device switching. This is a stripped
132
+ down version of torch.{device_name}.device.
133
+
134
+ The context manager changes the current device to the given device index
135
+ on entering the context and restores the original device on exiting.
136
+ The device is switched using the provided device interface.
137
+ """
138
+
139
+ def __init__(
140
+ self, device_interface: Type[DeviceInterface], index: Optional[int]
141
+ ) -> None:
142
+ self.device_interface = device_interface
143
+ self.idx = index
144
+ self.prev_idx = -1
145
+
146
+ def __enter__(self):
147
+ if self.idx is not None:
148
+ self.prev_idx = self.device_interface.exchange_device(self.idx)
149
+
150
+ def __exit__(self, type: Any, value: Any, traceback: Any):
151
+ if self.idx is not None:
152
+ self.idx = self.device_interface.maybe_exchange_device(self.prev_idx)
153
+ return False
154
+
155
+
156
+ class CudaInterface(DeviceInterface):
157
+ device = torch.cuda.device
158
+
159
+ # register Event and Stream class into the backend interface
160
+ # make sure Event and Stream are implemented and inherited from the torch.Event and torch.Stream
161
+ Event = torch.cuda.Event
162
+ Stream = torch.cuda.Stream
163
+
164
+ class Worker:
165
+ @staticmethod
166
+ def set_device(device: int):
167
+ caching_worker_current_devices["cuda"] = device
168
+
169
+ @staticmethod
170
+ def current_device() -> int:
171
+ if "cuda" in caching_worker_current_devices:
172
+ return caching_worker_current_devices["cuda"]
173
+ return torch.cuda.current_device()
174
+
175
+ @staticmethod
176
+ def get_device_properties(device: _device_t = None):
177
+ if device is not None:
178
+ if isinstance(device, str):
179
+ device = torch.device(device)
180
+ assert device.type == "cuda"
181
+ if isinstance(device, torch.device):
182
+ device = device.index
183
+ if device is None:
184
+ device = CudaInterface.Worker.current_device()
185
+
186
+ if "cuda" not in caching_worker_device_properties:
187
+ device_prop = [
188
+ torch.cuda.get_device_properties(i)
189
+ for i in range(torch.cuda.device_count())
190
+ ]
191
+ caching_worker_device_properties["cuda"] = device_prop
192
+
193
+ return caching_worker_device_properties["cuda"][device]
194
+
195
+ current_device = staticmethod(torch.cuda.current_device)
196
+ set_device = staticmethod(torch.cuda.set_device)
197
+ device_count = staticmethod(torch.cuda.device_count)
198
+ stream = staticmethod(torch.cuda.stream) # type: ignore[assignment]
199
+ current_stream = staticmethod(torch.cuda.current_stream)
200
+ set_stream = staticmethod(torch.cuda.set_stream) # type: ignore[assignment]
201
+ _set_stream_by_id = staticmethod(torch.cuda._set_stream_by_id) # type: ignore[assignment]
202
+ synchronize = staticmethod(torch.cuda.synchronize)
203
+ get_device_properties = staticmethod(torch.cuda.get_device_properties) # type: ignore[assignment]
204
+ get_raw_stream = staticmethod(get_cuda_stream) # type: ignore[assignment, arg-type]
205
+ exchange_device = staticmethod(torch.cuda._exchange_device) # type: ignore[arg-type]
206
+ maybe_exchange_device = staticmethod(torch.cuda._maybe_exchange_device) # type: ignore[arg-type]
207
+ memory_allocated = staticmethod(torch.cuda.memory_allocated)
208
+ is_bf16_supported = staticmethod(torch.cuda.is_bf16_supported) # type: ignore[arg-type]
209
+
210
+ # Can be mock patched by @patch decorator.
211
+ @staticmethod
212
+ def is_available() -> bool:
213
+ return torch.cuda.is_available()
214
+
215
+ @staticmethod
216
+ def get_compute_capability(device: _device_t = None):
217
+ if torch.version.hip is None:
218
+ major, min = torch.cuda.get_device_capability(device)
219
+ return major * 10 + min
220
+ else:
221
+ return torch.cuda.get_device_properties(device).gcnArchName.split(":", 1)[0]
222
+
223
+
224
+ get_xpu_stream: Optional[Callable[[int], int]]
225
+ if torch.xpu._is_compiled():
226
+ from torch._C import _xpu_getCurrentRawStream as get_xpu_stream
227
+ else:
228
+ get_xpu_stream = None
229
+
230
+
231
+ class XpuInterface(DeviceInterface):
232
+ device = torch.xpu.device
233
+ Event = torch.xpu.Event
234
+ Stream = torch.xpu.Stream
235
+
236
+ class Worker:
237
+ @staticmethod
238
+ def set_device(device: int):
239
+ caching_worker_current_devices["xpu"] = device
240
+
241
+ @staticmethod
242
+ def current_device() -> int:
243
+ if "xpu" in caching_worker_current_devices:
244
+ return caching_worker_current_devices["xpu"]
245
+ return torch.xpu.current_device()
246
+
247
+ @staticmethod
248
+ def get_device_properties(device: _device_t = None):
249
+ if device is not None:
250
+ if isinstance(device, str):
251
+ device = torch.device(device)
252
+ assert device.type == "xpu"
253
+ if isinstance(device, torch.device):
254
+ device = device.index
255
+ if device is None:
256
+ device = XpuInterface.Worker.current_device()
257
+
258
+ if "xpu" not in caching_worker_device_properties:
259
+ device_prop = [
260
+ torch.xpu.get_device_properties(i)
261
+ for i in range(torch.xpu.device_count())
262
+ ]
263
+ caching_worker_device_properties["xpu"] = device_prop
264
+
265
+ return caching_worker_device_properties["xpu"][device]
266
+
267
+ current_device = staticmethod(torch.xpu.current_device)
268
+ set_device = staticmethod(torch.xpu.set_device)
269
+ device_count = staticmethod(torch.xpu.device_count)
270
+ stream = staticmethod(torch.xpu.stream) # type: ignore[assignment]
271
+ current_stream = staticmethod(torch.xpu.current_stream)
272
+ set_stream = staticmethod(torch.xpu.set_stream) # type: ignore[assignment]
273
+ _set_stream_by_id = staticmethod(torch.xpu._set_stream_by_id) # type: ignore[assignment]
274
+ synchronize = staticmethod(torch.xpu.synchronize)
275
+ get_device_properties = staticmethod(torch.xpu.get_device_properties) # type: ignore[assignment]
276
+ get_raw_stream = staticmethod(get_xpu_stream) # type: ignore[assignment, arg-type]
277
+ exchange_device = staticmethod(torch.xpu._exchange_device) # type: ignore[arg-type]
278
+ maybe_exchange_device = staticmethod(torch.xpu._maybe_exchange_device) # type: ignore[arg-type]
279
+ memory_allocated = staticmethod(torch.xpu.memory_allocated)
280
+
281
+ # Can be mock patched by @patch decorator.
282
+ @staticmethod
283
+ def is_available() -> bool:
284
+ return torch.xpu.is_available()
285
+
286
+ @staticmethod
287
+ def get_compute_capability(device: _device_t = None):
288
+ cc = torch.xpu.get_device_capability(device)
289
+ return cc
290
+
291
+ @staticmethod
292
+ def is_bf16_supported(including_emulation: bool = False) -> bool:
293
+ return torch.xpu.is_bf16_supported()
294
+
295
+
296
+ @dataclass
297
+ class CpuDeviceProperties:
298
+ multi_processor_count: int
299
+
300
+
301
+ class CpuInterface(DeviceInterface):
302
+ class Event(torch.Event):
303
+ def __init__(self, enable_timing=True):
304
+ self.time = 0.0
305
+
306
+ def elapsed_time(self, end_event) -> float:
307
+ return (end_event.time - self.time) * 1000
308
+
309
+ def record(self, stream=None):
310
+ self.time = time.perf_counter()
311
+
312
+ @staticmethod
313
+ def is_available() -> bool:
314
+ return True
315
+
316
+ @staticmethod
317
+ def get_compute_capability(device: _device_t = None) -> str:
318
+ return ""
319
+
320
+ @staticmethod
321
+ def get_raw_stream(device_idx) -> int:
322
+ return 0
323
+
324
+ @staticmethod
325
+ def current_device():
326
+ return 0
327
+
328
+ @staticmethod
329
+ def synchronize(device: _device_t = None):
330
+ pass
331
+
332
+ class Worker:
333
+ @staticmethod
334
+ def get_device_properties(device: _device_t = None):
335
+ import multiprocessing
336
+
337
+ cpu_count = multiprocessing.cpu_count()
338
+ return CpuDeviceProperties(cpu_count)
339
+
340
+
341
+ device_interfaces: Dict[str, Type[DeviceInterface]] = {}
342
+ _device_initialized = False
343
+
344
+
345
+ def register_interface_for_device(
346
+ device: Union[str, torch.device], device_interface: Type[DeviceInterface]
347
+ ):
348
+ if isinstance(device, torch.device):
349
+ device = device.type
350
+ device_interfaces[device] = device_interface
351
+
352
+
353
+ def get_interface_for_device(device: Union[str, torch.device]) -> Type[DeviceInterface]:
354
+ if isinstance(device, torch.device):
355
+ device = device.type
356
+ if not _device_initialized:
357
+ init_device_reg()
358
+ if device in device_interfaces:
359
+ return device_interfaces[device]
360
+ raise NotImplementedError(f"No interface for device {device}")
361
+
362
+
363
+ def get_registered_device_interfaces() -> Iterable[Tuple[str, Type[DeviceInterface]]]:
364
+ if not _device_initialized:
365
+ init_device_reg()
366
+ return device_interfaces.items()
367
+
368
+
369
+ def init_device_reg():
370
+ global _device_initialized
371
+ register_interface_for_device("cuda", CudaInterface)
372
+ for i in range(torch.cuda.device_count()):
373
+ register_interface_for_device(f"cuda:{i}", CudaInterface)
374
+
375
+ register_interface_for_device("xpu", XpuInterface)
376
+ for i in range(torch.xpu.device_count()):
377
+ register_interface_for_device(f"xpu:{i}", XpuInterface)
378
+
379
+ register_interface_for_device("cpu", CpuInterface)
380
+
381
+ _device_initialized = True
phi4/lib/python3.10/site-packages/torch/_dynamo/hooks.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from typing import Callable, Optional
3
+
4
+ from torch._guards import GuardsSet
5
+
6
+ from .types import GuardFail
7
+
8
+
9
+ @dataclasses.dataclass
10
+ class Hooks:
11
+ guard_export_fn: Optional[Callable[[GuardsSet], None]] = None
12
+ guard_fail_fn: Optional[Callable[[GuardFail], None]] = None
phi4/lib/python3.10/site-packages/torch/_dynamo/output_graph.py ADDED
The diff for this file is too large to render. See raw diff
 
phi4/lib/python3.10/site-packages/torch/_dynamo/replay_record.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from dataclasses import field
3
+ from types import CellType, CodeType, ModuleType
4
+ from typing import Any, BinaryIO, Dict, IO, Tuple
5
+ from typing_extensions import Self
6
+
7
+ from torch.utils._import_utils import import_dill
8
+
9
+
10
+ dill = import_dill()
11
+
12
+
13
+ @dataclasses.dataclass
14
+ class ModuleRecord:
15
+ module: ModuleType
16
+ accessed_attrs: Dict[str, Any] = field(default_factory=dict)
17
+
18
+
19
+ @dataclasses.dataclass
20
+ class DummyModule:
21
+ name: str
22
+ is_torch: bool = False
23
+
24
+ @property
25
+ def __name__(self) -> str:
26
+ return self.name
27
+
28
+
29
+ @dataclasses.dataclass
30
+ class ExecutionRecord:
31
+ code: CodeType
32
+ closure: Tuple[CellType]
33
+ globals: Dict[str, Any] = field(default_factory=dict)
34
+ locals: Dict[str, Any] = field(default_factory=dict)
35
+ builtins: Dict[str, Any] = field(default_factory=dict)
36
+ code_options: Dict[str, Any] = field(default_factory=dict)
37
+
38
+ def dump(self, f: IO[str]) -> None:
39
+ assert dill is not None, "replay_record requires `pip install dill`"
40
+ dill.dump(self, f)
41
+
42
+ @classmethod
43
+ def load(cls, f: BinaryIO) -> Self:
44
+ assert dill is not None, "replay_record requires `pip install dill`"
45
+ return dill.load(f)
46
+
47
+
48
+ @dataclasses.dataclass
49
+ class ExecutionRecorder:
50
+ LOCAL_MOD_PREFIX = "___local_mod_"
51
+
52
+ code: CodeType
53
+ closure: Tuple[CellType]
54
+ globals: Dict[str, Any] = field(default_factory=dict)
55
+ locals: Dict[str, Any] = field(default_factory=dict)
56
+ builtins: Dict[str, Any] = field(default_factory=dict)
57
+ code_options: Dict[str, Any] = field(default_factory=dict)
58
+ name_to_modrec: Dict[str, ModuleRecord] = field(default_factory=dict)
59
+
60
+ def add_local_var(self, name: str, var: Any) -> None:
61
+ if isinstance(var, ModuleType):
62
+ self.locals[name] = self._add_mod(var)
63
+ else:
64
+ self.locals[name] = var
65
+
66
+ def add_global_var(self, name: str, var: Any) -> None:
67
+ if isinstance(var, ModuleType):
68
+ self.globals[name] = self._add_mod(var)
69
+ else:
70
+ self.globals[name] = var
71
+
72
+ def add_local_mod(self, name: str, mod: ModuleType) -> None:
73
+ assert isinstance(mod, ModuleType)
74
+ self.add_global_var(name, mod)
75
+
76
+ def record_module_access(self, mod: ModuleType, name: str, val: Any) -> None:
77
+ if isinstance(val, ModuleType):
78
+ self.name_to_modrec[mod.__name__].accessed_attrs[name] = self._add_mod(val)
79
+ return
80
+
81
+ if mod.__name__ in self.name_to_modrec:
82
+ self.name_to_modrec[mod.__name__].accessed_attrs[name] = val
83
+
84
+ def get_record(self) -> ExecutionRecord:
85
+ return ExecutionRecord(
86
+ self.code,
87
+ self.closure,
88
+ ExecutionRecorder._resolve_modules(self.globals),
89
+ ExecutionRecorder._resolve_modules(self.locals),
90
+ self.builtins.copy(),
91
+ self.code_options.copy(),
92
+ )
93
+
94
+ def _add_mod(self, mod: ModuleType) -> ModuleRecord:
95
+ if mod.__name__ not in self.name_to_modrec:
96
+ self.name_to_modrec[mod.__name__] = ModuleRecord(mod)
97
+
98
+ return self.name_to_modrec[mod.__name__]
99
+
100
+ @classmethod
101
+ def _resolve_modules(cls, vars: Dict[str, Any]) -> Dict[str, Any]:
102
+ def resolve_module(var: Any) -> Any:
103
+ if not isinstance(var, ModuleRecord):
104
+ return var
105
+
106
+ dummy_mod = DummyModule(var.module.__name__)
107
+ for attr_name, attr_value in var.accessed_attrs.items():
108
+ attr_value = resolve_module(attr_value)
109
+ dummy_mod.__setattr__(attr_name, attr_value)
110
+
111
+ return dummy_mod
112
+
113
+ return {k: resolve_module(v) for k, v in vars.items()}
phi4/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py ADDED
The diff for this file is too large to render. See raw diff
 
phi4/lib/python3.10/site-packages/torch/_dynamo/test_case.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import importlib
3
+ import logging
4
+ from typing import Tuple, Union
5
+
6
+ import torch
7
+ import torch.testing
8
+ from torch._logging._internal import trace_log
9
+ from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
10
+ IS_WINDOWS,
11
+ TEST_WITH_CROSSREF,
12
+ TEST_WITH_TORCHDYNAMO,
13
+ TestCase as TorchTestCase,
14
+ )
15
+
16
+ from . import config, reset, utils
17
+
18
+
19
+ log = logging.getLogger(__name__)
20
+
21
+
22
+ def run_tests(needs: Union[str, Tuple[str, ...]] = ()) -> None:
23
+ from torch.testing._internal.common_utils import run_tests
24
+
25
+ if TEST_WITH_TORCHDYNAMO or IS_WINDOWS or TEST_WITH_CROSSREF:
26
+ return # skip testing
27
+
28
+ if isinstance(needs, str):
29
+ needs = (needs,)
30
+ for need in needs:
31
+ if need == "cuda":
32
+ if not torch.cuda.is_available():
33
+ return
34
+ else:
35
+ try:
36
+ importlib.import_module(need)
37
+ except ImportError:
38
+ return
39
+ run_tests()
40
+
41
+
42
+ class TestCase(TorchTestCase):
43
+ _exit_stack: contextlib.ExitStack
44
+
45
+ @classmethod
46
+ def tearDownClass(cls) -> None:
47
+ cls._exit_stack.close()
48
+ super().tearDownClass()
49
+
50
+ @classmethod
51
+ def setUpClass(cls) -> None:
52
+ super().setUpClass()
53
+ cls._exit_stack = contextlib.ExitStack() # type: ignore[attr-defined]
54
+ cls._exit_stack.enter_context( # type: ignore[attr-defined]
55
+ config.patch(
56
+ raise_on_ctx_manager_usage=True,
57
+ suppress_errors=False,
58
+ log_compilation_metrics=False,
59
+ ),
60
+ )
61
+
62
+ def setUp(self) -> None:
63
+ self._prior_is_grad_enabled = torch.is_grad_enabled()
64
+ super().setUp()
65
+ reset()
66
+ utils.counters.clear()
67
+ self.handler = logging.NullHandler()
68
+ trace_log.addHandler(self.handler)
69
+
70
+ def tearDown(self) -> None:
71
+ trace_log.removeHandler(self.handler)
72
+ for k, v in utils.counters.items():
73
+ print(k, v.most_common())
74
+ reset()
75
+ utils.counters.clear()
76
+ super().tearDown()
77
+ if self._prior_is_grad_enabled is not torch.is_grad_enabled():
78
+ log.warning("Running test changed grad mode")
79
+ torch.set_grad_enabled(self._prior_is_grad_enabled)
phi4/lib/python3.10/site-packages/torch/_namedtensor_internals.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from collections import OrderedDict
3
+
4
+
5
+ """
6
+ This file contains helper functions that implement experimental functionality
7
+ for named tensors in python. All of these are experimental, unstable, and
8
+ subject to change or deletion.
9
+ """
10
+
11
+
12
+ def check_serializing_named_tensor(tensor):
13
+ if tensor.has_names():
14
+ raise RuntimeError(
15
+ "NYI: Named tensors don't support serialization. Please drop "
16
+ "names via `tensor = tensor.rename(None)` before serialization."
17
+ )
18
+
19
+
20
+ def build_dim_map(tensor):
21
+ """Returns a map of { dim: dim_name } where dim is a name if the dim is named
22
+ and the dim index otherwise."""
23
+ return OrderedDict(
24
+ [(idx if name is None else name, name) for idx, name in enumerate(tensor.names)]
25
+ )
26
+
27
+
28
+ def unzip_namedshape(namedshape):
29
+ if isinstance(namedshape, OrderedDict):
30
+ namedshape = namedshape.items()
31
+ if not hasattr(namedshape, "__iter__") and not isinstance(namedshape, tuple):
32
+ raise RuntimeError(
33
+ f"Expected namedshape to be OrderedDict or iterable of tuples, got: {type(namedshape)}"
34
+ )
35
+ if len(namedshape) == 0:
36
+ raise RuntimeError("Expected namedshape to non-empty.")
37
+ return zip(*namedshape)
38
+
39
+
40
+ def namer_api_name(inplace):
41
+ if inplace:
42
+ return "rename_"
43
+ else:
44
+ return "rename"
45
+
46
+
47
+ def is_ellipsis(item):
48
+ return item == Ellipsis or item == "..."
49
+
50
+
51
+ def single_ellipsis_index(names, fn_name):
52
+ ellipsis_indices = [i for i, name in enumerate(names) if is_ellipsis(name)]
53
+ if len(ellipsis_indices) >= 2:
54
+ raise RuntimeError(
55
+ f"{fn_name}: More than one Ellipsis ('...') found in names ("
56
+ f"{names}). This function supports up to one Ellipsis."
57
+ )
58
+ if len(ellipsis_indices) == 1:
59
+ return ellipsis_indices[0]
60
+ return None
61
+
62
+
63
+ def expand_single_ellipsis(numel_pre_glob, numel_post_glob, names):
64
+ return names[numel_pre_glob : len(names) - numel_post_glob]
65
+
66
+
67
+ def replace_ellipsis_by_position(ellipsis_idx, names, tensor_names):
68
+ globbed_names = expand_single_ellipsis(
69
+ ellipsis_idx, len(names) - ellipsis_idx - 1, tensor_names
70
+ )
71
+ return names[:ellipsis_idx] + globbed_names + names[ellipsis_idx + 1 :]
72
+
73
+
74
+ def resolve_ellipsis(names, tensor_names, fn_name):
75
+ """
76
+ Expands ... inside `names` to be equal to a list of names from `tensor_names`.
77
+ """
78
+ ellipsis_idx = single_ellipsis_index(names, fn_name)
79
+ if ellipsis_idx is None:
80
+ return names
81
+ return replace_ellipsis_by_position(ellipsis_idx, names, tensor_names)
82
+
83
+
84
+ def update_names_with_list(tensor, names, inplace):
85
+ # Special case for tensor.rename(None)
86
+ if len(names) == 1 and names[0] is None:
87
+ return tensor._update_names(None, inplace)
88
+
89
+ return tensor._update_names(
90
+ resolve_ellipsis(names, tensor.names, namer_api_name(inplace)), inplace
91
+ )
92
+
93
+
94
+ def update_names_with_mapping(tensor, rename_map, inplace):
95
+ dim_map = build_dim_map(tensor)
96
+ for old_dim in rename_map.keys():
97
+ new_dim = rename_map[old_dim]
98
+ if old_dim in dim_map.keys():
99
+ dim_map[old_dim] = new_dim
100
+ else:
101
+ raise RuntimeError(
102
+ f"{namer_api_name(inplace)}: Tried to rename dim '{old_dim}' to dim "
103
+ f"{new_dim} in Tensor[{tensor.names}] but dim '{old_dim}' does not exist"
104
+ )
105
+ return tensor._update_names(tuple(dim_map.values()), inplace)
106
+
107
+
108
+ def update_names(tensor, names, rename_map, inplace):
109
+ """There are two usages:
110
+
111
+ tensor.rename(*names) returns a view on tensor with named dims `names`.
112
+ `names` must be of length `tensor.dim()`; otherwise, if '...' is in `names`,
113
+ then it is expanded greedily to be equal to the corresponding names from
114
+ `tensor.names`.
115
+
116
+ For example,
117
+ ```
118
+ >>> # xdoctest: +SKIP
119
+ >>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
120
+ >>> x.rename('...', 'height', 'width').names
121
+ ('N', 'C', 'height', 'width')
122
+
123
+ >>> # xdoctest: +SKIP
124
+ >>> x.rename('batch', '...', 'width').names
125
+ ('batch', 'C', 'H', 'width')
126
+
127
+ ```
128
+
129
+ tensor.rename(**rename_map) returns a view on tensor that has rename dims
130
+ as specified in the mapping `rename_map`.
131
+
132
+ For example,
133
+ ```
134
+ >>> # xdoctest: +SKIP
135
+ >>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
136
+ >>> x.rename(W='width', H='height').names
137
+ ('N', 'C', 'height', 'width')
138
+
139
+ ```
140
+
141
+ Finally, tensor.rename has an in-place version called tensor.rename_.
142
+ """
143
+ has_names = len(names) > 0
144
+ has_rename_pairs = bool(rename_map)
145
+ if has_names and has_rename_pairs:
146
+ raise RuntimeError(
147
+ f"{namer_api_name(inplace)}: This function takes either positional "
148
+ f"args or keyword args, but not both. Use tensor.{namer_api_name(inplace)}(*names) "
149
+ f"to name dims and tensor.{namer_api_name(inplace)}(**rename_map) to rename "
150
+ "dims."
151
+ )
152
+
153
+ # Special case for tensor.rename(*[]), which is valid for a 0 dim tensor.
154
+ if not has_names and not has_rename_pairs:
155
+ return update_names_with_list(tensor, names, inplace)
156
+
157
+ if has_names:
158
+ return update_names_with_list(tensor, names, inplace)
159
+ return update_names_with_mapping(tensor, rename_map, inplace)
phi4/lib/python3.10/site-packages/torch/_ops.py ADDED
@@ -0,0 +1,1362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import abc
3
+ import contextlib
4
+ import ctypes
5
+ import importlib
6
+ import inspect
7
+ import sys
8
+ import types
9
+ from typing import Any, Callable, Dict, List, Set, Type, TypeVar, Union
10
+
11
+ import torch
12
+ import torch.utils._pytree as pytree
13
+ from torch import _utils_internal
14
+ from torch._C import _dispatch_is_included_in_alias as is_included_in_alias, DispatchKey
15
+ from torch._functorch.pyfunctorch import dispatch_functorch
16
+ from torch.utils._python_dispatch import TorchDispatchMode
17
+
18
+
19
+ _F = TypeVar("_F", bound=Callable[..., Any])
20
+
21
+
22
+ # Query `hasattr` only once.
23
+ _SET_GLOBAL_FLAGS = hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags")
24
+
25
+
26
+ @contextlib.contextmanager
27
+ def dl_open_guard():
28
+ """
29
+ Context manager to set the RTLD_GLOBAL dynamic linker flag while we open a
30
+ shared library to load custom operators.
31
+ """
32
+ if not _SET_GLOBAL_FLAGS:
33
+ yield
34
+ return
35
+ old_flags = sys.getdlopenflags()
36
+ sys.setdlopenflags(old_flags | ctypes.RTLD_GLOBAL)
37
+ try:
38
+ yield
39
+ finally:
40
+ sys.setdlopenflags(old_flags)
41
+
42
+
43
+ class OperatorBase:
44
+ """
45
+ Base class for OpOverload (which represents C++ ATen operators) and HigherOrderOperator
46
+ (which represents Python-only operators that are unrepresentable in TorchScript).
47
+ """
48
+
49
+ def __init__(self):
50
+ # The dispatch cache precomputes a mapping of dispatch key that the
51
+ # dispatcher wants to dispatch to, to an actual implementation of the
52
+ # dispatch key. Confusingly, the actual implementation could *also* be a
53
+ # dispatch key, but in this case, this refers to the C++ kernel that
54
+ # was registered to some dispatch key. Aliases are permitted in the
55
+ # latter but not the former; for example, you might lookup the
56
+ # entry for AutogradCPU, and this maps you to the Autograd key for
57
+ # the generic autograd kernel that works for all devices. Since this
58
+ # is the Python dispatcher, you can also put an arbitrary Python
59
+ # callable to call instead. This handler gets precisely the
60
+ # args/kwargs that the operator was __call__'ed with.
61
+ # NB: This name is hard-coded in torch/csrc/autograd/python_variable.cpp
62
+ # for use with OpOverload; cache lookup is done entirely from C++
63
+ # for speed.
64
+ # TODO: The cache is NOT currently used by HigherOrderOperator, but it should!
65
+ self._dispatch_cache: Dict[
66
+ DispatchKey, Union[DispatchKey, Callable[..., Any]]
67
+ ] = {}
68
+
69
+ # This table allows you to override the behavior of a particular
70
+ # dispatch key to call a custom Python function, rather than the
71
+ # ordinary C++ configured behavior. This is the raison d'etre of
72
+ # Python dispatcher: to let you program the dispatcher from Python
73
+ # in case you need something unusual, and don't want to clobber
74
+ # the existing registrations using the Python operator registration
75
+ # API.
76
+ self.py_kernels: Dict[DispatchKey, Callable[..., Any]] = {}
77
+
78
+ # This table allows you to override the behavior of a particular
79
+ # operator for a particular TorchDispatchMode. In practice,
80
+ # we are using this mostly for ProxyTensorMode. Modes can be
81
+ # thought of as an open world extension of dispatch keys, so it
82
+ # makes sense that you should be able to register them, the same
83
+ # way you can register dispatch keys.
84
+ self.python_key_table: Dict[
85
+ Union[Type[TorchDispatchMode], Type[torch.Tensor]], Callable[..., Any]
86
+ ] = {}
87
+
88
+ # This table allows you to override the behavior of functorch
89
+ # transformations. NB: this currently only does something for
90
+ # HigherOrderOperator
91
+ self.functorch_table = {}
92
+
93
+ def __call__(self, *args, **kwargs):
94
+ raise NotImplementedError
95
+
96
+ def has_kernel_for_dispatch_key(self, k):
97
+ return k in self.py_kernels
98
+
99
+ def has_kernel_for_any_dispatch_key(self, ks):
100
+ for k in self.py_kernels:
101
+ if not torch._C._dispatch_is_alias_key(k) and ks.has(k):
102
+ return True
103
+ return False
104
+
105
+ def py_impl(self, k: Any) -> Callable[[_F], _F]:
106
+ def inner(fn: _F) -> _F:
107
+ if inspect.isclass(k) and (
108
+ issubclass(k, TorchDispatchMode) or issubclass(k, torch.Tensor)
109
+ ):
110
+ assert k not in self.python_key_table
111
+ # TODO(voz): Should we replace setting DispatchKey.Python entirely with setting mode keys?
112
+ self.python_key_table[k] = fn
113
+ self._dispatch_cache.clear()
114
+ return fn
115
+
116
+ if isinstance(k, torch._C._functorch.TransformType):
117
+ assert k not in self.functorch_table
118
+ self.functorch_table[k] = fn
119
+ return fn
120
+
121
+ assert isinstance(k, DispatchKey)
122
+ assert (
123
+ k != DispatchKey.Python
124
+ ), "Please register a mode for the torch._C.DispatchKey.Python key instead."
125
+
126
+ if k in self.py_kernels:
127
+ raise RuntimeError(
128
+ f"Trying to override a python impl for {k} on operator {self.name()}"
129
+ )
130
+ self.py_kernels[k] = fn
131
+ self._dispatch_cache.clear()
132
+ return fn
133
+
134
+ return inner
135
+
136
+ # Registers an implementation to all **3** variants of functionalization that we have:
137
+ # - DispatchKey.Functionalize
138
+ # - functorch.TransformType.Functionalize
139
+ # - FunctionalTensorMode
140
+ # Example:
141
+ # @py_functionalize_impl
142
+ # def functionalize_rule(ctx, inner_f, *args):
143
+ # args_unwrapped = ctx.unwrap_tensors(args)
144
+ # with ctx.redispatch_to_next():
145
+ # out = ctx.functionalize(inner_f)(*args_unwrapped)
146
+ # return ctx.wrap_tensors(out)
147
+ def py_functionalize_impl(self, fn: _F) -> _F:
148
+ from torch._subclasses.functional_tensor import (
149
+ CppFunctionalizeAPI as _CppFunctionalizeAPI,
150
+ FunctorchFunctionalizeAPI as _FunctorchFunctionalizeAPI,
151
+ PythonFunctionalizeAPI as _PythonFunctionalizeAPI,
152
+ )
153
+
154
+ # Construct our three flavors of functionalization,
155
+ # each of which have slightly different wrap/unwrap/redispatch policies
156
+ def functionalize_dk_fn(*args, **kwargs):
157
+ return fn(_CppFunctionalizeAPI(), *args, **kwargs)
158
+
159
+ def functionalize_dispatch_mode_fn(mode, *args, **kwargs):
160
+ return fn(_PythonFunctionalizeAPI(mode), *args, **kwargs)
161
+
162
+ def functionalize_functorch_fn(interpreter, *args, **kwargs):
163
+ return fn(_FunctorchFunctionalizeAPI(interpreter), *args, **kwargs)
164
+
165
+ self.py_impl(DispatchKey.Functionalize)(functionalize_dk_fn)
166
+ self.py_impl(torch._subclasses.functional_tensor.FunctionalTensorMode)(
167
+ functionalize_dispatch_mode_fn
168
+ )
169
+ self.py_impl(torch._C._functorch.TransformType.Functionalize)(
170
+ functionalize_functorch_fn
171
+ )
172
+
173
+ return fn
174
+
175
+ def name(self):
176
+ raise NotImplementedError
177
+
178
+
179
+ # Equivalent to computeDispatchTableEntryWithDebug
180
+ def resolve_key(op: OperatorBase, k: DispatchKey): # type: ignore[valid-type]
181
+ # 1. (Direct) operator registration
182
+ if op.has_kernel_for_dispatch_key(k):
183
+ return k
184
+ # 2.1 Use CompositeExplicitAutogradNonFunctional kernel if available
185
+ cand = DispatchKey.CompositeExplicitAutogradNonFunctional
186
+ if (
187
+ k == DispatchKey.Undefined or is_included_in_alias(k, cand)
188
+ ) and op.has_kernel_for_dispatch_key(cand):
189
+ return cand
190
+ # 2.2 Use CompositeExplicitAutograd kernel if available
191
+ cand = DispatchKey.CompositeExplicitAutograd
192
+ if (
193
+ k == DispatchKey.Undefined or is_included_in_alias(k, cand)
194
+ ) and op.has_kernel_for_dispatch_key(cand):
195
+ return cand
196
+ has_backend_kernel = op.has_kernel_for_any_dispatch_key(
197
+ torch._C._dispatch_get_backend_keyset_from_autograd(k)
198
+ ) or op.has_kernel_for_dispatch_key(DispatchKey.CompositeExplicitAutograd)
199
+ # 2.3. Use CompositeImplicitAutograd kernel if available
200
+ cand = DispatchKey.CompositeImplicitAutogradNestedTensor
201
+ if (
202
+ (k != DispatchKey.Undefined and is_included_in_alias(k, cand))
203
+ and op.has_kernel_for_dispatch_key(cand)
204
+ and not has_backend_kernel
205
+ ):
206
+ return cand
207
+ cand = DispatchKey.CompositeImplicitAutograd
208
+ if (
209
+ k == DispatchKey.Undefined or is_included_in_alias(k, cand)
210
+ ) and op.has_kernel_for_dispatch_key(cand):
211
+ if k == DispatchKey.AutogradOther and op.has_kernel_for_any_dispatch_key(
212
+ torch._C._dispatch_autogradother_backends
213
+ ):
214
+ raise RuntimeError("ambiguous autogradother kernel")
215
+ elif not has_backend_kernel:
216
+ return cand
217
+ # 2.4. For autograd backend keys, use kernel from DispatchKey::Autograd if available
218
+ cand = DispatchKey.Autograd
219
+ if is_included_in_alias(k, cand) and op.has_kernel_for_dispatch_key(cand):
220
+ return cand
221
+ # 2.5 Use kernel from DispatchKey::FuncTorchBatchedDecomposition if available
222
+ cand = DispatchKey.FuncTorchBatchedDecomposition
223
+ if is_included_in_alias(k, cand) and op.has_kernel_for_dispatch_key(cand):
224
+ return cand
225
+ # Backend fallback
226
+ if torch._C._dispatch_has_backend_fallback(k):
227
+ # The dispatch key itself will implicitly route to backend fallback.
228
+ # This is probably not great for the pure Python implementation.
229
+ return k
230
+ raise NotImplementedError(f"could not find kernel for {op} at dispatch key {k}")
231
+
232
+
233
+ _higher_order_ops: Dict[str, "HigherOrderOperator"] = {}
234
+
235
+ _HIGHER_ORDER_OP_DEFAULT_FALLTHROUGH_DISPATCH_KEYS = [
236
+ DispatchKey.PythonDispatcher, # type: ignore[attr-defined]
237
+ DispatchKey.PythonTLSSnapshot, # type: ignore[attr-defined]
238
+ DispatchKey.ADInplaceOrView,
239
+ DispatchKey.BackendSelect,
240
+ DispatchKey.AutocastCPU, # type: ignore[attr-defined]
241
+ DispatchKey.AutocastCUDA, # type: ignore[attr-defined]
242
+ ]
243
+
244
+
245
+ class HigherOrderOperator(OperatorBase, abc.ABC):
246
+ # The HigherOrderOperator will appear as torch.ops.higher_order.{name}
247
+ #
248
+ # If you're creating a new HigherOrderOperator, please do not change the
249
+ # default. Adding operators to the global torch.ops namespace is a bad
250
+ # practice due to name collisions.
251
+ def __init__(self, name, *, cacheable=False):
252
+ super().__init__()
253
+ if type(self) is HigherOrderOperator:
254
+ raise RuntimeError(
255
+ "Direct instantiation of HigherOrderOperator is not allowed. Please subclass it."
256
+ )
257
+ self._name = name
258
+
259
+ # Make _OPNamespace not scream, this whole name based association needs a good hard look
260
+ self.__name__ = name
261
+ _higher_order_ops[name] = self
262
+ self._ns = "higher_order"
263
+ self.__module__ = "torch.ops.higher_order"
264
+ self._cacheable = cacheable
265
+
266
+ self.non_fallthrough_keys = torch._C._dispatch_keyset_full()
267
+
268
+ for dispatch_key in _HIGHER_ORDER_OP_DEFAULT_FALLTHROUGH_DISPATCH_KEYS:
269
+ self.fallthrough(dispatch_key)
270
+
271
+ # [NOTE] We have to register pre-dispatch key implementation
272
+ # because sometimes HOP use aot-dispatch tracing to detect certaion
273
+ # mutations. This is problematic when we are functionalizing HOP
274
+ # during pre-dispatch because when the inner tracer starts, it will see
275
+ # that PreDispatch key is still active. In that case, we just redispatch
276
+ # it to next key. This is only safe to do when PreDispatch key stack has no
277
+ # active modes.
278
+
279
+ def py_impl(self, k: Any) -> Callable[[_F], _F]:
280
+ if isinstance(k, DispatchKey) and not self.non_fallthrough_keys.has(k):
281
+ self.non_fallthrough_keys = self.non_fallthrough_keys.add(k)
282
+ return super().py_impl(k)
283
+
284
+ @property
285
+ def namespace(self):
286
+ return self._ns
287
+
288
+ def cacheable(self):
289
+ return self._cacheable
290
+
291
+ def fallthrough(self, dispatch_key):
292
+ self.non_fallthrough_keys = self.non_fallthrough_keys.remove(dispatch_key)
293
+
294
+ # Use positional-only argument to avoid naming collide with custom ops arguments
295
+ # that are named "self".
296
+ def dispatch(self, /, dispatch_key, *args, **kwargs):
297
+ from torch.utils._python_dispatch import _get_current_dispatch_mode
298
+
299
+ if dispatch_key in self._dispatch_cache:
300
+ kernel = self._dispatch_cache[dispatch_key]
301
+ assert not isinstance(kernel, DispatchKey)
302
+ return kernel(*args, **kwargs)
303
+
304
+ if dispatch_key == DispatchKey.FuncTorchDynamicLayerFrontMode:
305
+ return dispatch_functorch(self, args, kwargs)
306
+
307
+ if dispatch_key == DispatchKey.Python:
308
+ # Keep the following 1:1 with handle_torch_function_no_python_arg_parser
309
+ # in torch/csrc/utils/python_arg_parser.cpp
310
+
311
+ overloaded_args_list = []
312
+
313
+ def has_python_key(tensor):
314
+ return torch._C._dispatch_keys(tensor).has("Python")
315
+
316
+ def check_overloaded(arg):
317
+ if isinstance(arg, torch.Tensor) and has_python_key(arg):
318
+ overloaded_args_list.append(arg)
319
+
320
+ for arg in (*args, *kwargs.values()):
321
+ check_overloaded(arg)
322
+ if isinstance(arg, (list, tuple)):
323
+ for a in arg:
324
+ check_overloaded(a)
325
+
326
+ overloaded_args = tuple(overloaded_args_list)
327
+ overloaded_types = tuple(type(arg) for arg in overloaded_args)
328
+
329
+ # Step 1: dispatch on any user TorchDispatchModes
330
+ from torch.utils._python_dispatch import _pop_mode_temporarily
331
+
332
+ curr_mode = _get_current_dispatch_mode()
333
+ if curr_mode is not None:
334
+ if type(curr_mode) in self.python_key_table:
335
+ handler = self.python_key_table[type(curr_mode)]
336
+ with _pop_mode_temporarily() as mode:
337
+ # "natural" calling convention: (mode, *args, **kwargs)
338
+ # TODO(rzou): we should support torch_dispatch calling convention too.
339
+ result = handler(mode, *args, **kwargs)
340
+ else:
341
+ raise NotImplementedError(
342
+ f"There was no rule registered for HOP {self._name} and mode {curr_mode}. "
343
+ f"We recommend filing an issue."
344
+ )
345
+ if result is not NotImplemented:
346
+ return result
347
+
348
+ # Step 2: dispatch on any subclasses
349
+ for arg in overloaded_args:
350
+ subclass_type = type(arg)
351
+ if (
352
+ subclass_type.__torch_dispatch__
353
+ == torch._C._disabled_torch_dispatch_impl
354
+ ):
355
+ continue
356
+ if subclass_type in self.python_key_table:
357
+ handler = self.python_key_table[subclass_type]
358
+ # "natural" calling convention: (*args, **kwargs)
359
+ # TODO(rzou): we should support torch_dispatch calling convention too.
360
+ result = handler(*args, **kwargs)
361
+ else:
362
+ raise NotImplementedError(
363
+ f"There was no rule registered for HOP {self._name} and subclass {subclass_type}. "
364
+ f"We recommend filing an issue."
365
+ )
366
+ if result is not NotImplemented:
367
+ return result
368
+
369
+ # All handlers returned NotImplemented
370
+ raise TypeError(
371
+ f"Multiple dispatch failed for {self._name}. There was no registered that "
372
+ f"did not return NotImplemented. Use HOP.py_impl to register some. "
373
+ f"Tried mode: {curr_mode}) and subclasses: "
374
+ f"{[type(a) for a in overloaded_args]}"
375
+ )
376
+
377
+ functionality_key = torch._C._to_functionality_key(dispatch_key) # type: ignore[attr-defined]
378
+ if functionality_key == DispatchKey.PreDispatch:
379
+ from torch.utils._python_dispatch import _pop_mode_temporarily
380
+
381
+ # The check for Python in the exclude set is so we properly respect `with no_dispatch()`
382
+ # calls inside of a mode.
383
+ if (
384
+ _len_torch_dispatch_stack_pre_dispatch() > 0
385
+ ) and not torch._C._dispatch_tls_is_dispatch_key_excluded(
386
+ DispatchKey.Python
387
+ ):
388
+ curr_mode = _get_current_dispatch_mode_pre_dispatch()
389
+ assert (
390
+ curr_mode is not None
391
+ ), "Illegal invocation of dispatch on torch._C.DispatchKey.PreDispatch without a mode."
392
+ assert (
393
+ type(curr_mode) in self.python_key_table
394
+ ), f"Current active mode {curr_mode} not registered"
395
+ handler = self.python_key_table[type(curr_mode)]
396
+ with _pop_mode_temporarily(functionality_key) as mode:
397
+ return handler(mode, *args, **kwargs)
398
+
399
+ final_key = resolve_key(self, dispatch_key)
400
+
401
+ # This can current fail due to backend fallbacks. You just have to
402
+ # register them by hand for HigherOrderOperator.
403
+ if final_key not in self.py_kernels:
404
+ raise NotImplementedError(
405
+ f"could not find kernel for HigherOrderOperator {self._name} "
406
+ f"at dispatch key {final_key} (resolved from {dispatch_key})"
407
+ )
408
+
409
+ # [NOTE] We shouldn't cache PreDispatch kernel here because depending
410
+ # on what modes are active, predispatch behaviour is different.
411
+ # Also we do same thing for normal ops:
412
+ # See Note [Not Caching Per-Dispatch-Key Mode Handlers]
413
+ if dispatch_key != DispatchKey.PreDispatch:
414
+ self._dispatch_cache[dispatch_key] = self.py_kernels[final_key]
415
+ kernel = self.py_kernels[final_key]
416
+ # It's illegal to register DispatchKey to py_kernels, since there's no
417
+ # C++ kernel to call into
418
+ assert not isinstance(kernel, DispatchKey)
419
+ return kernel(*args, **kwargs)
420
+
421
+ @abc.abstractmethod
422
+ def __call__(self, /, *args, **kwargs):
423
+ # Dynamo already traces the body of HigherOrderOp beforehand when it
424
+ # so no need to trace into it.
425
+ from torch._dynamo import disable
426
+
427
+ @disable
428
+ def wrapper():
429
+ flat_args = _to_flat_tuple(args, kwargs)
430
+ if torch.overrides.has_torch_function(flat_args):
431
+ return torch.overrides.handle_torch_function(
432
+ self, flat_args, *args, **kwargs
433
+ )
434
+
435
+ dispatch_key_set = _compute_keyset(args, kwargs, self.non_fallthrough_keys)
436
+ return self.dispatch(
437
+ dispatch_key_set.highestPriorityTypeId(), *args, **kwargs
438
+ )
439
+
440
+ return wrapper()
441
+
442
+ def __str__(self):
443
+ return f"{self.name()}"
444
+
445
+ def name(self):
446
+ return self._name
447
+
448
+
449
+ def _to_flat_tuple(args, kwargs):
450
+ return pytree.arg_tree_leaves(*args, **kwargs)
451
+
452
+
453
+ def _compute_keyset(args, kwargs, non_fallthrough_keys):
454
+ tensors = _get_tensors(args, kwargs)
455
+ return key_extractor(tensors, non_fallthrough_keys)
456
+
457
+
458
+ def _get_tensors(args, kwargs):
459
+ flat_all = _to_flat_tuple(args, kwargs)
460
+ tensor_args = [t for t in flat_all if isinstance(t, torch.Tensor)]
461
+ return tuple(tensor_args)
462
+
463
+
464
+ # Note - this should maintain identical impl to the C++ dispatcher key extraction logic
465
+ # at ATen/core/dispatch/DispatchKeyExtractor.h
466
+ def key_extractor(tensors, key_mask):
467
+ key_set = torch._C._dispatch_tls_local_include_set()
468
+ for tensor in tensors:
469
+ key_set = key_set | torch._C._dispatch_keys(tensor)
470
+ key_set = key_set - torch._C._dispatch_tls_local_exclude_set()
471
+ key_set = key_set & key_mask
472
+ return key_set
473
+
474
+
475
+ # Mode stack for PreDispatchKey
476
+ # it should always have three keys with
477
+ # priority given to FunctionalTensorMode and
478
+ # then ProxyTorchDispatchMode. It means that
479
+ # slot 0 belongs to ProxyTorchDispatchMode and
480
+ # slot 1 belongs to FunctionalTensorMode.
481
+ #
482
+ # SchemaCheckMode is separate from the other 2,
483
+ # and is only valid when the stack is empty.
484
+ # SchemaCheckMode is for testing purposes, and
485
+ # is meant to run in eager mode on concrete inputs,
486
+ # checking for incorrect schemas in regards to
487
+ # aliasing or mutating ops.
488
+ class _ModeStackStateForPreDispatch:
489
+ def __init__(self):
490
+ self.__infra_modes = [None, None]
491
+ self._schema_check_mode = None
492
+
493
+ def set(self, index, mode):
494
+ assert index < len(self.__infra_modes)
495
+ self.__infra_modes[index] = mode
496
+
497
+ def get(self, index):
498
+ assert index < len(self.__infra_modes)
499
+ return self.__infra_modes[index]
500
+
501
+ def count(self):
502
+ return len([i for i in self.__infra_modes if i is not None]) + int(
503
+ self._schema_check_mode is not None
504
+ )
505
+
506
+
507
+ _mode_stack_state_for_pre_dispatch = _ModeStackStateForPreDispatch()
508
+
509
+
510
+ def unset_mode_pre_dispatch(mode_key, schema_check=False):
511
+ current_mode_stack_pre_dispatch = mode_stack_state_for_pre_dispatch()
512
+ assert mode_key is None or mode_key in (
513
+ torch._C._TorchDispatchModeKey.PROXY,
514
+ torch._C._TorchDispatchModeKey.FUNCTIONAL,
515
+ )
516
+ if schema_check:
517
+ assert mode_key is None
518
+
519
+ def _unset_mode():
520
+ if mode_key == torch._C._TorchDispatchModeKey.PROXY:
521
+ current_mode = current_mode_stack_pre_dispatch.get(0)
522
+ mode_stack_state_for_pre_dispatch().set(0, None)
523
+ return current_mode
524
+ elif mode_key == torch._C._TorchDispatchModeKey.FUNCTIONAL:
525
+ current_mode = current_mode_stack_pre_dispatch.get(1)
526
+ mode_stack_state_for_pre_dispatch().set(1, None)
527
+ return current_mode
528
+ else:
529
+ current_mode = mode_stack_state_for_pre_dispatch()._schema_check_mode
530
+ mode_stack_state_for_pre_dispatch()._schema_check_mode = None
531
+ return current_mode
532
+
533
+ current_mode = _unset_mode()
534
+
535
+ new_pre_dispatch_len = _len_torch_dispatch_stack_pre_dispatch()
536
+ # When we are unsetting a mode, we need to check if there is
537
+ # active mode left on the PreDispatch key. If there is nothing
538
+ # active, we need to remove PreDispatch key from local dispatch include
539
+ # set.
540
+ if new_pre_dispatch_len == 0:
541
+ torch._C._dispatch_tls_set_dispatch_key_included(DispatchKey.PreDispatch, False)
542
+
543
+ return current_mode
544
+
545
+
546
+ def _set_mode_pre_dispatch(mode):
547
+ from torch._subclasses.functional_tensor import FunctionalTensorMode
548
+ from torch._subclasses.schema_check_mode import SchemaCheckMode
549
+ from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode
550
+
551
+ assert isinstance(
552
+ mode,
553
+ (
554
+ FunctionalTensorMode,
555
+ ProxyTorchDispatchMode,
556
+ SchemaCheckMode,
557
+ ),
558
+ )
559
+
560
+ previous_mode_stack_len = _len_torch_dispatch_stack_pre_dispatch()
561
+ if isinstance(mode, SchemaCheckMode):
562
+ current_mode = mode_stack_state_for_pre_dispatch()._schema_check_mode
563
+ if previous_mode_stack_len > 0:
564
+ raise AssertionError(
565
+ "SchemaCheckMode for pre-dispatch must be used exclusively, found other modes on the stack"
566
+ )
567
+ mode_stack_state_for_pre_dispatch()._schema_check_mode = mode
568
+ elif isinstance(mode, FunctionalTensorMode):
569
+ current_mode = mode_stack_state_for_pre_dispatch().get(1)
570
+ assert current_mode is None
571
+ mode_stack_state_for_pre_dispatch().set(1, mode)
572
+ else:
573
+ current_mode = mode_stack_state_for_pre_dispatch().get(0)
574
+ assert current_mode is None
575
+ mode_stack_state_for_pre_dispatch().set(0, mode)
576
+
577
+ # When we are setting a mode, we need to check if there is
578
+ # active mode left on the PreDispatch key. If there was nothing
579
+ # active before setting this mode, it means that PreDispatch key
580
+ # was turned off. So we need to turn it on again.
581
+ if previous_mode_stack_len == 0:
582
+ torch._C._dispatch_tls_set_dispatch_key_included(DispatchKey.PreDispatch, True)
583
+
584
+
585
+ def _pop_mode_from_pre_dispatch():
586
+ mode_stack = mode_stack_state_for_pre_dispatch()
587
+ pre_dispatch_len = _len_torch_dispatch_stack_pre_dispatch()
588
+
589
+ if pre_dispatch_len == 0:
590
+ raise AssertionError("Trying to pop empty mode stack")
591
+
592
+ if mode_stack._schema_check_mode is not None:
593
+ return unset_mode_pre_dispatch(None, schema_check=True)
594
+ if mode_stack.get(1) is not None:
595
+ return unset_mode_pre_dispatch(torch._C._TorchDispatchModeKey.FUNCTIONAL)
596
+ if mode_stack.get(0) is not None:
597
+ return unset_mode_pre_dispatch(torch._C._TorchDispatchModeKey.PROXY)
598
+
599
+
600
+ def _len_torch_dispatch_stack_pre_dispatch():
601
+ return mode_stack_state_for_pre_dispatch().count()
602
+
603
+
604
+ def _get_dispatch_mode_pre_dispatch(mode_key):
605
+ assert mode_key in (
606
+ torch._C._TorchDispatchModeKey.PROXY,
607
+ torch._C._TorchDispatchModeKey.FUNCTIONAL,
608
+ )
609
+ if mode_key == torch._C._TorchDispatchModeKey.PROXY:
610
+ return mode_stack_state_for_pre_dispatch().get(0)
611
+ else:
612
+ return mode_stack_state_for_pre_dispatch().get(1)
613
+
614
+
615
+ def _get_current_dispatch_mode_pre_dispatch():
616
+ if mode_stack_state_for_pre_dispatch()._schema_check_mode is not None:
617
+ return mode_stack_state_for_pre_dispatch()._schema_check_mode
618
+ else:
619
+ stack_len = mode_stack_state_for_pre_dispatch().count()
620
+ if stack_len == 2:
621
+ return mode_stack_state_for_pre_dispatch().get(1)
622
+ if stack_len == 1:
623
+ return (
624
+ mode_stack_state_for_pre_dispatch().get(1)
625
+ if mode_stack_state_for_pre_dispatch().get(1) is not None
626
+ else mode_stack_state_for_pre_dispatch().get(0)
627
+ )
628
+ return None
629
+
630
+
631
+ def mode_stack_state_for_pre_dispatch():
632
+ global _mode_stack_state_for_pre_dispatch
633
+ return _mode_stack_state_for_pre_dispatch
634
+
635
+
636
+ cached_ops: Set["OpOverload"] = set()
637
+
638
+
639
+ def add_cached_op(op_overload):
640
+ global cached_ops
641
+ cached_ops.add(op_overload)
642
+
643
+
644
+ def reset_cached_ops():
645
+ global cached_ops
646
+ cached_ops.clear()
647
+
648
+
649
+ def get_cached_ops():
650
+ global cached_ops
651
+ return cached_ops
652
+
653
+
654
+ # Each OpOverload object contains pointer to a specific operator overload, a pointer to the parent `OpOverloadPacket` object.
655
+ # You can obtain an OpOverload object through attribute query on OpOverloadPacket.
656
+ class OpOverload(OperatorBase):
657
+ def __init__(self, overloadpacket, op, op_dk, schema, tags):
658
+ super().__init__()
659
+ self._op = op
660
+ self._op_dk = op_dk
661
+ self._schema = schema
662
+ self._overloadpacket = overloadpacket
663
+ self._tags = tags
664
+ self._overloadname = (
665
+ "default" if schema.overload_name == "" else schema.overload_name
666
+ )
667
+ self._name = self._schema.name
668
+ if schema.overload_name:
669
+ self._name += "." + schema.overload_name
670
+ self.__name__ = f"{self._schema.name.split('::')[1]}.{self._overloadname}"
671
+ self.__module__ = overloadpacket.__module__
672
+ op.__module__ = overloadpacket.__module__
673
+ self.__qualname__ = self._name
674
+ self.__annotations__ = {}
675
+ # Only compute the OperatorHandle when we need it. Not all OpOverloads have
676
+ # OperatorHandles (the TorchScript ones don't...)
677
+ self._lazy_handle = None
678
+
679
+ # If the OpOverload was constructed from a Library.def in Python.
680
+ self._defined_in_python = self.__qualname__ in torch.library._defs
681
+
682
+ # Logic replicated from aten/src/ATen/native/MathBitsFallback.h
683
+ is_write = None
684
+ for a in self._schema.arguments:
685
+ if a.alias_info is None:
686
+ continue
687
+ if is_write is None:
688
+ is_write = a.alias_info.is_write
689
+ else:
690
+ # We will conservatively call mixed mutable/non-mutable
691
+ # aliased inputs as NOT a view
692
+ is_write = a.alias_info.is_write or is_write
693
+ self.is_view = is_write is not None and not is_write
694
+
695
+ @property
696
+ def _namespace(self):
697
+ return self._schema.name.split("::")[0]
698
+
699
+ @property
700
+ def _opname(self):
701
+ return self._schema.name.split("::")[1]
702
+
703
+ @property
704
+ def _handle(self):
705
+ if self._lazy_handle is None:
706
+ self._lazy_handle = torch._C._dispatch_find_schema_or_throw(
707
+ self._schema.name, self._schema.overload_name
708
+ )
709
+ return self._lazy_handle
710
+
711
+ # it's a no-op since OpOverload object is immutable and must be unique for a given op overload.
712
+ def __deepcopy__(self, memo=None):
713
+ return self
714
+
715
+ def __repr__(self):
716
+ return "<OpOverload(op='{}.{}', overload='{}')>".format(
717
+ *self._schema.name.split("::"), self._overloadname
718
+ )
719
+
720
+ # Use positional-only argument to avoid naming collision with aten ops arguments
721
+ # that are named "self". This way, all the aten ops can be called by kwargs.
722
+ def __call__(self, /, *args, **kwargs):
723
+ return self._op(*args, **kwargs)
724
+
725
+ # Use positional-only argument to avoid naming collision with aten ops arguments
726
+ # that are named "self". This way, all the aten ops can be called by kwargs.
727
+ def redispatch(self, /, keyset, *args, **kwargs):
728
+ return self._handle.redispatch_boxed(keyset, *args, **kwargs)
729
+
730
+ def __hash__(self):
731
+ return hash(self._op)
732
+
733
+ # `my_namespace.my_op_name.overload_name`
734
+ def __str__(self):
735
+ return "{}.{}.{}".format(*self._schema.name.split("::"), self._overloadname)
736
+
737
+ def has_kernel_for_dispatch_key(self, k):
738
+ return super().has_kernel_for_dispatch_key(
739
+ k
740
+ ) or torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), k)
741
+
742
+ def has_kernel_for_any_dispatch_key(self, ks):
743
+ return torch._C._dispatch_has_kernel_for_any_dispatch_key(
744
+ self.name(), ks
745
+ ) or super().has_kernel_for_any_dispatch_key(ks)
746
+
747
+ @property
748
+ def namespace(self):
749
+ return self._schema.name.split("::")[0]
750
+
751
+ def _can_decompose(self):
752
+ dk = DispatchKey.CompositeImplicitAutograd
753
+ return dk in self.py_kernels or torch._C._dispatch_has_kernel_for_dispatch_key(
754
+ self.name(), dk
755
+ )
756
+
757
+ def decompose(self, *args, **kwargs):
758
+ dk = DispatchKey.CompositeImplicitAutograd
759
+ if dk in self.py_kernels:
760
+ # NB: This branch is not too necessary anymore, because we can
761
+ # apply Python CompositeImplicitAutograd *before* tracing
762
+ # using Python dispatcher (also taking advantage of the autograd
763
+ # formula). But it's included for completeness
764
+ return self.py_kernels[dk](*args, **kwargs)
765
+ elif torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), dk):
766
+ return self._op_dk(dk, *args, **kwargs)
767
+ else:
768
+ return NotImplemented
769
+
770
+ # Remove a dispatch key from the dispatch cache. This will force it to get
771
+ # recomputed the next time. Does nothing
772
+ # WARNING: if you register a dispatch key to py_kernels of an OpOverload,
773
+ # calling _del_dispatch on that key is NOT sufficient to apply your change,
774
+ # because a single registration may affect MULTIPLE dispatch keys (e.g.,
775
+ # registering Autograd affects AutogradCPU). del_dispatch is to be used
776
+ # only if you are specifically modifying how get_dispatch handles a
777
+ # particular input 'key'.
778
+ def _uncache_dispatch(self, key):
779
+ self._dispatch_cache.pop(key, None)
780
+
781
+ # This implements the pre-computation logic for the Python dispatcher.
782
+ def _get_dispatch(self, key):
783
+ # This is only called upon a cache miss
784
+ assert key not in self._dispatch_cache, f"{self} {key}"
785
+
786
+ if key == DispatchKey.Python:
787
+ if not isinstance(self, TorchBindOpOverload) and not self.python_key_table:
788
+ self._dispatch_cache[key] = key
789
+ add_cached_op(self)
790
+ return key
791
+
792
+ def handler(*args, **kwargs):
793
+ from torch.utils._python_dispatch import _get_current_dispatch_mode
794
+
795
+ # TODO: We also need to handle tensor subclasses here
796
+ # TODO(voz): We should walk all the nodes here / turn it into a list, topmode is ok for now.
797
+ curr_mode = type(_get_current_dispatch_mode())
798
+ assert (
799
+ curr_mode is not None
800
+ ), "Illegal invocation of dispatch on torch._C.DispatchKey.Python without a mode."
801
+
802
+ if curr_mode not in self.python_key_table:
803
+ if isinstance(self, TorchBindOpOverload):
804
+ with torch.utils._python_dispatch._pop_mode_temporarily() as mode:
805
+ return torch._library.utils.handle_dispatch_mode(
806
+ mode, self, *args, **kwargs
807
+ )
808
+ else:
809
+ return self._op_dk(key, *args, **kwargs)
810
+
811
+ with torch.utils._python_dispatch._pop_mode_temporarily() as mode:
812
+ return self.python_key_table[curr_mode](mode, *args, **kwargs)
813
+
814
+ self._dispatch_cache[key] = handler
815
+ add_cached_op(self)
816
+ return handler
817
+
818
+ functionality_key = torch._C._to_functionality_key(key) # type: ignore[attr-defined]
819
+ if functionality_key == DispatchKey.PreDispatch:
820
+ curr_stack_len = _len_torch_dispatch_stack_pre_dispatch()
821
+ # The check for Python in the exclude set is so we properly respect `with no_dispatch()`
822
+ # calls inside of a mode.
823
+ if (
824
+ curr_stack_len > 0
825
+ and not torch._C._dispatch_tls_is_dispatch_key_excluded(
826
+ DispatchKey.Python
827
+ )
828
+ ):
829
+
830
+ def handler(*args, **kwargs):
831
+ @contextlib.contextmanager
832
+ def _temporarily_pop_modes_from_pre_dispatch():
833
+ top_mode = _pop_mode_from_pre_dispatch()
834
+ try:
835
+ yield top_mode
836
+ finally:
837
+ _set_mode_pre_dispatch(top_mode)
838
+
839
+ with _temporarily_pop_modes_from_pre_dispatch() as curr_mode:
840
+ return torch._library.utils.handle_dispatch_mode(
841
+ curr_mode, self, *args, **kwargs
842
+ )
843
+
844
+ # Note [Not Caching Per-Dispatch-Key Mode Handlers]
845
+ # Note that we're not caching this handler. There isn't really a point, since the slow bit
846
+ # is the handler itself (in python).
847
+ # Also, not caching means that we don't have to reset the cache when any existing
848
+ # modes go out of scope (which in of itself takes time to loop through all operators).
849
+ return handler
850
+
851
+ final_key = resolve_key(self, key)
852
+
853
+ # See Note [Not Caching Per-Dispatch-Key Mode Handlers]
854
+ cache_result = key != DispatchKey.PreDispatch
855
+
856
+ # TODO: We could potentially have lots of debugging wrappers against
857
+ # dispatch keys; design some general registration mechanism instead of
858
+ # having if statement for each of them
859
+ if key == DispatchKey.Functionalize:
860
+ import torch._dispatch.python as pydispatch
861
+
862
+ if pydispatch.CROSSREF_FUNCTIONALIZE:
863
+ handler = pydispatch.make_crossref_functionalize(self, final_key)
864
+ if cache_result:
865
+ self._dispatch_cache[key] = handler
866
+ add_cached_op(self)
867
+ return handler
868
+
869
+ r = self.py_kernels.get(final_key, final_key)
870
+ if cache_result:
871
+ self._dispatch_cache[key] = r
872
+ add_cached_op(self)
873
+ return r
874
+
875
+ def name(self):
876
+ return self._name
877
+
878
+ @property
879
+ def overloadpacket(self):
880
+ return self._overloadpacket
881
+
882
+ @property
883
+ def op(self):
884
+ return self._op
885
+
886
+ @property
887
+ def tags(self):
888
+ return self._tags
889
+
890
+ # TODO: add more methods to expose information about input and output arguments
891
+
892
+
893
+ # TorchBindOpOverload are those custom ops which have at least one overload's
894
+ # schema consists of torch.ScriptObject (i.e. custom class) input.
895
+ # TorchBindOpOverload will skip C++ dispatcher and purely dispatched in python
896
+ # when its inputs contain FakeScriptObject in a similar way as higher order ops.
897
+ class TorchBindOpOverload(OpOverload):
898
+ def _fallthrough_keys(self) -> List[DispatchKey]:
899
+ # TODO: we should be calling the fallback for these, but a fallthrough is almost close
900
+ # enough to the fallback in most cases that we care about.
901
+ _DEFAULT_FALLTHROUGH_KEYS = [
902
+ DispatchKey.Autograd,
903
+ DispatchKey.AutogradCPU,
904
+ DispatchKey.AutogradCUDA,
905
+ DispatchKey.ADInplaceOrView,
906
+ DispatchKey.BackendSelect,
907
+ DispatchKey.PythonTLSSnapshot,
908
+ DispatchKey.PythonDispatcher,
909
+ ]
910
+
911
+ def _may_use_fallthrough_instead_of_fallback(key: DispatchKey):
912
+ if torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), key):
913
+ return torch._C._dispatch_kernel_for_dispatch_key_is_fallthrough(
914
+ self.name(), key
915
+ )
916
+
917
+ return (
918
+ key not in self.py_kernels
919
+ or self.py_kernels[key] is torch.library.fallthrough_kernel
920
+ )
921
+
922
+ return [
923
+ key
924
+ for key in _DEFAULT_FALLTHROUGH_KEYS
925
+ if _may_use_fallthrough_instead_of_fallback(key)
926
+ ]
927
+
928
+ @contextlib.contextmanager
929
+ def _register_as_effectful_op_temporarily(self):
930
+ from torch._higher_order_ops.effects import (
931
+ _EffectType,
932
+ _register_effectful_op,
933
+ SIDE_EFFECTS,
934
+ )
935
+
936
+ try:
937
+ if self not in SIDE_EFFECTS:
938
+ _register_effectful_op(self, _EffectType.ORDERED)
939
+ yield
940
+ finally:
941
+ if self in SIDE_EFFECTS:
942
+ del SIDE_EFFECTS[self]
943
+
944
+ # Use positional-only argument to avoid naming collision with aten ops arguments
945
+ # that are named "self". This way, all the aten ops can be called by kwargs.
946
+ def __call__(self, /, *args, **kwargs):
947
+ if _must_dispatch_in_python(args, kwargs):
948
+ # When any inputs are FakeScriptObject, we need to
949
+ # skip c++ dispatcher and dispatch in python through _get_dispatch of python_dispatcher
950
+ # because C++ dispatcher will check the schema and cannot recognize FakeScriptObject.
951
+ #
952
+ # Note:
953
+ # 1. We only register the torchbind op temporarily as effectful op because we only want
954
+ # the effect token functionalization logic to be applied during tracing. Otherwise, the behavior
955
+ # of the eagerly executing the op might change after tracing.
956
+ # 2. We don't want to register the op as effectful for all torchbind ops in ctor because this might
957
+ # cause unexpected behavior for some autograd.profiler ops e.g. profiler._record_function_exit._RecordFunction.
958
+ with self._register_as_effectful_op_temporarily():
959
+ return self._dispatch_in_python(args, kwargs, self._fallthrough_keys())
960
+ return self._op(*args, **kwargs)
961
+
962
+ def _dispatch_in_python(self, args, kwargs, fallthrough_keys):
963
+ non_fallthrough_keys = torch._C._dispatch_keyset_full()
964
+ for key in fallthrough_keys:
965
+ non_fallthrough_keys = non_fallthrough_keys.remove(key)
966
+
967
+ dispatch_key_set = _compute_keyset(args, kwargs, non_fallthrough_keys)
968
+ dispatch_key = dispatch_key_set.highestPriorityTypeId()
969
+
970
+ handler = (
971
+ self._get_dispatch(dispatch_key)
972
+ if dispatch_key not in self._dispatch_cache
973
+ else self._dispatch_cache[dispatch_key]
974
+ )
975
+
976
+ if isinstance(handler, DispatchKey):
977
+ # fallthrough keys can be registered at runtime via torch.library.impl
978
+ # so need to add it to fallthrough_keys and re-dispatch.
979
+ if torch._C._dispatch_kernel_for_dispatch_key_is_fallthrough(
980
+ self.name(), dispatch_key
981
+ ):
982
+ return self._dispatch_in_python(
983
+ args, kwargs, fallthrough_keys + [dispatch_key]
984
+ )
985
+
986
+ raise RuntimeError(
987
+ f"Torchbind op {self} received a FakeScriptObject input when dispatching {handler}."
988
+ f" but no python implementation is found."
989
+ f" Please file an issue on this when you encounter this error."
990
+ f" This error can happen when you export or compile the model."
991
+ f" It can still happpen even if a C++ implementation for {dispatch_key}. "
992
+ f" has been registered. That's because FakeScriptObject purely lives in python and cannot work "
993
+ f" with a C++ implementation."
994
+ )
995
+
996
+ assert isinstance(handler, Callable) # type: ignore[arg-type]
997
+ return handler(*args, **kwargs)
998
+
999
+
1000
+ def _must_dispatch_in_python(args, kwargs):
1001
+ return pytree.tree_any(
1002
+ lambda obj: isinstance(
1003
+ obj, torch._library.fake_class_registry.FakeScriptObject
1004
+ ),
1005
+ (args, kwargs),
1006
+ )
1007
+
1008
+
1009
+ def _has_script_object_arg(schema: torch.FunctionSchema) -> bool:
1010
+ return any(isinstance(arg.type, torch.ClassType) for arg in schema.arguments)
1011
+
1012
+
1013
+ # OpOverloadPacket class contains pointer to a base unresolved operator that doesn't correspond to a specific operator
1014
+ # You can obtain an OpOverload object through attribute query.
1015
+ class OpOverloadPacket:
1016
+ def __init__(self, qualified_op_name, op_name, op, overload_names):
1017
+ # These attributes are accessible on the object through the properties
1018
+ # defined below but are immutable
1019
+ self._qualified_op_name = qualified_op_name
1020
+ self.__name__ = op_name
1021
+ self._op = op
1022
+ self._overload_names = overload_names
1023
+ self._dir = []
1024
+ self._has_torchbind_op_overload = any(
1025
+ _has_script_object_arg(schema) for schema in self._schemas.values()
1026
+ )
1027
+
1028
+ # it's a no-op since OpOverloadPacket object is immutable and must be unique for a given op.
1029
+ def __deepcopy__(self, memo=None):
1030
+ return self
1031
+
1032
+ def __repr__(self):
1033
+ return "<OpOverloadPacket(op='{}.{}')>".format(
1034
+ *self._qualified_op_name.split("::")
1035
+ )
1036
+
1037
+ def __hash__(self):
1038
+ return hash(self._op)
1039
+
1040
+ def __str__(self):
1041
+ return "{}.{}".format(*self._qualified_op_name.split("::"))
1042
+
1043
+ @property
1044
+ def op(self):
1045
+ return self._op
1046
+
1047
+ @property
1048
+ def _schemas(self):
1049
+ return {
1050
+ overload_name: torch._C._get_schema(self._qualified_op_name, overload_name)
1051
+ for overload_name in self._overload_names
1052
+ }
1053
+
1054
+ def __getattr__(self, key):
1055
+ # It is not a valid op_name when __file__ is passed in
1056
+ if key == "__file__":
1057
+ return "torch.ops"
1058
+
1059
+ # ensure that query for dunder attributes that does not exist on
1060
+ # opoverloadpacket but instead exists on the self._op object does not unnecessarily call
1061
+ # `_get_operation_overload` (which is an expensive operation).
1062
+ # This is done to prevent any potential slowdown. This list can be extended
1063
+ # if there exists other attributes like `__name__` that only exist on self._op and not on the
1064
+ # opoverloadpacket.
1065
+ # This is ok since we are guaranteed that an overload name for an aten op can't start with '__'
1066
+ try:
1067
+ if key.startswith("__"):
1068
+ return getattr(self._op, key)
1069
+ except AttributeError:
1070
+ # for consistency because it seems weird to
1071
+ # throw an attribute error with a message containing
1072
+ # an object name different from the one the attribute
1073
+ # query was performed on.
1074
+ raise AttributeError(
1075
+ f"'{str(self)}' can't have an overload name beginning with '__' and the "
1076
+ f"underlying op {str(self._op)} has no attribute {key} either."
1077
+ ) from None
1078
+
1079
+ try:
1080
+ # This is ok since we are guaranteed that an overload name for an aten op can't be 'default'
1081
+ use_key = "" if key == "default" else key
1082
+ # TODO: disallow access to overloads registered by JIT
1083
+ op_dk_tags = torch._C._get_operation_overload(
1084
+ self._qualified_op_name, use_key
1085
+ )
1086
+ if op_dk_tags is None:
1087
+ raise AttributeError(
1088
+ f"The underlying op of '{str(self)}' has no overload name '{key}'"
1089
+ )
1090
+
1091
+ op_, op_dk_, tags = op_dk_tags
1092
+ schema = torch._C._get_schema(self._qualified_op_name, use_key)
1093
+ overload = (
1094
+ OpOverload(self, op_, op_dk_, schema, tags)
1095
+ if not _has_script_object_arg(schema)
1096
+ else TorchBindOpOverload(self, op_, op_dk_, schema, tags)
1097
+ )
1098
+ # cache the overload object
1099
+ setattr(self, key, overload)
1100
+ self._dir.append(key)
1101
+ return overload
1102
+ except RuntimeError:
1103
+ raise AttributeError(
1104
+ f"The underlying op of '{str(self)}' has no overload name '{key}'"
1105
+ ) from None
1106
+
1107
+ def __iter__(self):
1108
+ return iter(self._dir)
1109
+
1110
+ # Use positional-only argument to avoid naming collision with aten ops arguments
1111
+ # that are named "self". This way, all the aten ops can be called by kwargs.
1112
+ def __call__(self, /, *args, **kwargs):
1113
+ # overloading __call__ to ensure torch.ops.foo.bar()
1114
+ # is still callable from JIT
1115
+ # We save the function ptr as the `op` attribute on
1116
+ # OpOverloadPacket to access it here.
1117
+
1118
+ # Directly calling OverloadPacket goes into C++, which will check
1119
+ # the schema and cause an error for torchbind op when inputs consist of FakeScriptObject so we
1120
+ # intercept it here and call TorchBindOpverload instead.
1121
+ if self._has_torchbind_op_overload and _must_dispatch_in_python(args, kwargs):
1122
+ return _call_overload_packet_from_python(self, args, kwargs)
1123
+ return self._op(*args, **(kwargs or {}))
1124
+
1125
+ # TODO: use this to make a __dir__
1126
+ def overloads(self):
1127
+ return [n if n else "default" for n in self._overload_names]
1128
+
1129
+
1130
+ # Note - this mirrors the logic of the cpp_function defined in jit/python/init.cpp
1131
+ # _jit_get_operations, which calls _get_operation_for_overload_or_packet.
1132
+ def _call_overload_packet_from_python(op: OpOverloadPacket, args, kwargs):
1133
+ # Re-use the torch function handling logic in cpp
1134
+ torch_function_called, ret = torch._C._maybe_call_torch_function_for_op_packet(
1135
+ op, *args, **kwargs
1136
+ )
1137
+
1138
+ if torch_function_called:
1139
+ return ret
1140
+
1141
+ # The following mirrors getOpWithStack.
1142
+ # In cpp, we do a schema matching for the arguments, and call ToIValue to
1143
+ # to check whether the arguments are valid. But need to do similar things here
1144
+ # and check the schema whether the FakeScriptObject is the corresponding fake class
1145
+ # of the actual class used in schema.
1146
+ exceptions = {}
1147
+ found_op = None
1148
+ for overload_name in op.overloads():
1149
+ op_overload = getattr(op, overload_name)
1150
+ try:
1151
+ _ = torch._C._check_schema_allow_fake_script_object(
1152
+ op_overload._schema, *args, **kwargs
1153
+ )
1154
+ found_op = op_overload
1155
+ break
1156
+ except RuntimeError as e:
1157
+ exceptions[overload_name] = e
1158
+
1159
+ if found_op:
1160
+ return found_op(*args, **kwargs)
1161
+
1162
+ err_msg = (
1163
+ f"Fail to match any TorchBindOverload of {op} with following exceptions:\n"
1164
+ )
1165
+ for i, (key, msg) in enumerate(exceptions.items()):
1166
+ err_msg += f"Overload name {key}:\n {msg}\n"
1167
+ raise RuntimeError(err_msg)
1168
+
1169
+
1170
+ # Resolution of torch.fn is different from torch.ops.aten.fn
1171
+ # torch.fn uses the Python argparser, matches with the
1172
+ # appropriate schema, and calls into the unboxed version of the method
1173
+ # torch.ops.aten.fn resolution is done via the mechanism defined in JIT.
1174
+ # JIT creates a stack of all the overloads and then tries to match the
1175
+ # correct one at runtime and always calls into the boxed version of the method
1176
+ # Autograd codegen creates VariableType, TracerType,
1177
+ # inplace or view type and python bindings.
1178
+ # Aten codegen generates tensor methods for the tensor class.
1179
+
1180
+ # _OpNamespace is a subclass of ModuleType because the torch script
1181
+ # allows attribute lookups on modules only. Since we want torch.ops.foo.bar()
1182
+ # to work from script, we need to ensure ops and foo are modules
1183
+
1184
+
1185
+ class _OpNamespace(types.ModuleType):
1186
+ """
1187
+ An op namespace to dynamically bind Operators into Python.
1188
+
1189
+ Say a user has created a custom Operator called "my_namespace::my_op". To
1190
+ call this op, the user will write torch.ops.my_namespace.my_op(...).
1191
+ At startup, this operation will not yet be bound into Python. Instead, the
1192
+ following sequence of magic tricks will occur:
1193
+ 1. `torch.ops.my_namespace` will invoke the `__getattr__` magic method
1194
+ on the `torch.ops` object, which will create a new `_OpNamespace`
1195
+ object called `my_namespace` and set it as an attribute on the `ops`
1196
+ object.
1197
+ 2. `torch.ops.my_namespace.my_op` will then invoke `__getattr__` on
1198
+ the `my_namespace` object, which will retrieve the operation via
1199
+ `torch.get_operation`, a function bound from C++, and then in a similar
1200
+ fashion bind this new object onto the `my_namespace` object.
1201
+ 3. `torch.ops.my_namespace.my_op(...)` then calls this new operation
1202
+ and subsequent accesses will incur no further lookup (the namespace and
1203
+ operation will already exist).
1204
+ """
1205
+
1206
+ def __init__(self, name):
1207
+ super().__init__("torch.ops." + name)
1208
+ self.name = name
1209
+ self._dir = []
1210
+
1211
+ def __iter__(self):
1212
+ return iter(self._dir)
1213
+
1214
+ def __getattr__(self, op_name):
1215
+ # It is not a valid op_name when __file__ is passed in
1216
+ if op_name == "__file__":
1217
+ return "torch.ops"
1218
+ elif op_name in ["__origin__", "__self__"]:
1219
+ raise AttributeError(
1220
+ f"Invalid attribute '{op_name}' for '_OpNamespace' '{self.name}'"
1221
+ )
1222
+
1223
+ # Get the op `my_namespace::my_op` if available. This will also check
1224
+ # for overloads and raise an exception if there are more than one.
1225
+ namespace_name = self.name
1226
+ qualified_op_name = f"{namespace_name}::{op_name}"
1227
+ module_name = self.__module__ + "." + namespace_name
1228
+
1229
+ try:
1230
+ op, overload_names = _get_packet(qualified_op_name, module_name)
1231
+ if op is None:
1232
+ raise AttributeError(
1233
+ f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
1234
+ )
1235
+ except RuntimeError as e:
1236
+ # Turn this into AttributeError so getattr(obj, key, default)
1237
+ # works (this is called by TorchScript with __origin__)
1238
+ raise AttributeError(
1239
+ f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
1240
+ ) from e
1241
+
1242
+ op.__module__ = module_name
1243
+ opoverloadpacket = OpOverloadPacket(
1244
+ qualified_op_name, op_name, op, overload_names
1245
+ )
1246
+ opoverloadpacket.__module__ = self.__module__ + "." + namespace_name
1247
+ # cache the opoverloadpacket to ensure that each op corresponds to
1248
+ # a unique OpOverloadPacket object
1249
+ setattr(self, op_name, opoverloadpacket)
1250
+ self._dir.append(op_name)
1251
+ return opoverloadpacket
1252
+
1253
+
1254
+ def _get_packet(qualname, op_module):
1255
+ op, overload_names = torch._C._jit_get_operation(qualname)
1256
+ if op is not None:
1257
+ # let the script frontend know that op is identical to the builtin op
1258
+ # with qualified_op_name
1259
+ torch.jit._builtins._register_builtin(op, qualname)
1260
+ op.__module__ = op_module
1261
+ return op, overload_names
1262
+
1263
+
1264
+ def _refresh_packet(packet):
1265
+ op, overload_names = _get_packet(packet._qualified_op_name, packet._op.__module__)
1266
+ assert op is not None
1267
+ packet._op = op
1268
+ packet._overload_names = overload_names
1269
+
1270
+
1271
+ class _PyOpNamespace(_OpNamespace):
1272
+ def __init__(self, name, ops):
1273
+ super().__init__(name)
1274
+ self._ops = ops
1275
+
1276
+ def __getattr__(self, name):
1277
+ # Following _OpNamespace.__getattr__, we cache the op on the _PyOpNamespace object.
1278
+ op = self._ops.get(name, None)
1279
+ if op is None:
1280
+ raise AttributeError(
1281
+ f"'_PyOpNamespace' '{self.name}' object has no attribute '{name}'"
1282
+ )
1283
+ setattr(self, name, op)
1284
+ return op
1285
+
1286
+
1287
+ class _Ops(types.ModuleType):
1288
+ __file__ = "_ops.py"
1289
+
1290
+ def __init__(self):
1291
+ super().__init__("torch.ops")
1292
+ self.loaded_libraries = set()
1293
+ self._higher_order_op_namespace = _PyOpNamespace(
1294
+ "torch.ops.higher_order", _higher_order_ops
1295
+ )
1296
+ self._dir = []
1297
+
1298
+ def __getattr__(self, name):
1299
+ # Check if the name is a HigherOrderOperator
1300
+ if name == "higher_order":
1301
+ return self._higher_order_op_namespace
1302
+
1303
+ # Here we are creating `torch.ops.my_namespace`
1304
+ namespace = _OpNamespace(name)
1305
+ setattr(self, name, namespace)
1306
+ self._dir.append(name)
1307
+ return namespace
1308
+
1309
+ def __iter__(self):
1310
+ return iter(self._dir)
1311
+
1312
+ def import_module(self, module):
1313
+ """
1314
+ Imports a Python module that has torch.library registrations.
1315
+
1316
+ Generally, to extend PyTorch with custom operators, a user will
1317
+ create a Python module whose import triggers registration of
1318
+ the custom operators via a torch.ops.load_library call or a call
1319
+ to one or more torch.library.* APIs.
1320
+
1321
+ It is unexpected for Python modules to have side effects, so some
1322
+ linters and formatters will complain. Use this API to import Python
1323
+ modules that contain these torch.library side effects.
1324
+
1325
+ Args:
1326
+ module (str): The name of the Python module to import
1327
+
1328
+ """
1329
+ importlib.import_module(module)
1330
+
1331
+ def load_library(self, path):
1332
+ """
1333
+ Loads a shared library from the given path into the current process.
1334
+
1335
+ The library being loaded may run global initialization code to register
1336
+ custom operators with the PyTorch JIT runtime. This allows dynamically
1337
+ loading custom operators. For this, you should compile your operator
1338
+ and the static registration code into a shared library object, and then
1339
+ call ``torch.ops.load_library('path/to/libcustom.so')`` to load the
1340
+ shared object.
1341
+
1342
+ After the library is loaded, it is added to the
1343
+ ``torch.ops.loaded_libraries`` attribute, a set that may be inspected
1344
+ for the paths of all libraries loaded using this function.
1345
+
1346
+ Args:
1347
+ path (str): A path to a shared library to load.
1348
+ """
1349
+ if torch._running_with_deploy():
1350
+ return
1351
+
1352
+ path = _utils_internal.resolve_library_path(path)
1353
+ with dl_open_guard():
1354
+ # Import the shared library into the process, thus running its
1355
+ # static (global) initialization code in order to register custom
1356
+ # operators with the JIT.
1357
+ ctypes.CDLL(path)
1358
+ self.loaded_libraries.add(path)
1359
+
1360
+
1361
+ # The ops "namespace"
1362
+ ops: _Ops = _Ops()
phi4/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c86b25d2a14bb1da46ea74b0f7a63d7959533118af3a614c4fb454b6de01637
3
+ size 144958
phi4/lib/python3.10/site-packages/torch/_utils_internal.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import functools
3
+ import logging
4
+ import os
5
+ import sys
6
+ import tempfile
7
+ from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar
8
+ from typing_extensions import ParamSpec
9
+
10
+ import torch
11
+ from torch._strobelight.compile_time_profiler import StrobelightCompileTimeProfiler
12
+
13
+
14
+ _T = TypeVar("_T")
15
+ _P = ParamSpec("_P")
16
+
17
+ log = logging.getLogger(__name__)
18
+
19
+ if os.environ.get("TORCH_COMPILE_STROBELIGHT", False):
20
+ import shutil
21
+
22
+ if not shutil.which("strobeclient"):
23
+ log.info(
24
+ "TORCH_COMPILE_STROBELIGHT is true, but seems like you are not on a FB machine."
25
+ )
26
+ else:
27
+ log.info("Strobelight profiler is enabled via environment variable")
28
+ StrobelightCompileTimeProfiler.enable()
29
+
30
+ # this arbitrary-looking assortment of functionality is provided here
31
+ # to have a central place for overrideable behavior. The motivating
32
+ # use is the FB build environment, where this source file is replaced
33
+ # by an equivalent.
34
+
35
+ if torch._running_with_deploy():
36
+ # __file__ is meaningless in the context of frozen torch used in torch deploy.
37
+ # setting empty torch_parent should allow below functions to operate without crashing,
38
+ # but it's unclear if there is a valid use case for them in the context of deploy.
39
+ torch_parent = ""
40
+ else:
41
+ if os.path.basename(os.path.dirname(__file__)) == "shared":
42
+ torch_parent = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
43
+ else:
44
+ torch_parent = os.path.dirname(os.path.dirname(__file__))
45
+
46
+
47
+ def get_file_path(*path_components: str) -> str:
48
+ return os.path.join(torch_parent, *path_components)
49
+
50
+
51
+ def get_file_path_2(*path_components: str) -> str:
52
+ return os.path.join(*path_components)
53
+
54
+
55
+ def get_writable_path(path: str) -> str:
56
+ if os.access(path, os.W_OK):
57
+ return path
58
+ return tempfile.mkdtemp(suffix=os.path.basename(path))
59
+
60
+
61
+ def prepare_multiprocessing_environment(path: str) -> None:
62
+ pass
63
+
64
+
65
+ def resolve_library_path(path: str) -> str:
66
+ return os.path.realpath(path)
67
+
68
+
69
+ def throw_abstract_impl_not_imported_error(opname, module, context):
70
+ if module in sys.modules:
71
+ raise NotImplementedError(
72
+ f"{opname}: We could not find the fake impl for this operator. "
73
+ )
74
+ else:
75
+ raise NotImplementedError(
76
+ f"{opname}: We could not find the fake impl for this operator. "
77
+ f"The operator specified that you may need to import the '{module}' "
78
+ f"Python module to load the fake impl. {context}"
79
+ )
80
+
81
+
82
+ # NB! This treats "skip" kwarg specially!!
83
+ def compile_time_strobelight_meta(
84
+ phase_name: str,
85
+ ) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:
86
+ def compile_time_strobelight_meta_inner(
87
+ function: Callable[_P, _T],
88
+ ) -> Callable[_P, _T]:
89
+ @functools.wraps(function)
90
+ def wrapper_function(*args: _P.args, **kwargs: _P.kwargs) -> _T:
91
+ if "skip" in kwargs and isinstance(skip := kwargs["skip"], int):
92
+ kwargs["skip"] = skip + 1
93
+
94
+ if not StrobelightCompileTimeProfiler.enabled:
95
+ return function(*args, **kwargs)
96
+
97
+ return StrobelightCompileTimeProfiler.profile_compile_time(
98
+ function, phase_name, *args, **kwargs
99
+ )
100
+
101
+ return wrapper_function
102
+
103
+ return compile_time_strobelight_meta_inner
104
+
105
+
106
+ # Meta only, see
107
+ # https://www.internalfb.com/intern/wiki/ML_Workflow_Observability/User_Guides/Adding_instrumentation_to_your_code/
108
+ #
109
+ # This will cause an event to get logged to Scuba via the signposts API. You
110
+ # can view samples on the API at https://fburl.com/scuba/workflow_signpost/zh9wmpqs
111
+ # we log to subsystem "torch", and the category and name you provide here.
112
+ # Each of the arguments translate into a Scuba column. We're still figuring
113
+ # out local conventions in PyTorch, but category should be something like
114
+ # "dynamo" or "inductor", and name should be a specific string describing what
115
+ # kind of event happened.
116
+ #
117
+ # Killswitch is at
118
+ # https://www.internalfb.com/intern/justknobs/?name=pytorch%2Fsignpost#event
119
+ def signpost_event(category: str, name: str, parameters: Dict[str, Any]):
120
+ log.info("%s %s: %r", category, name, parameters)
121
+
122
+
123
+ def log_compilation_event(metrics):
124
+ log.info("%s", metrics)
125
+
126
+
127
+ def upload_graph(graph):
128
+ pass
129
+
130
+
131
+ def set_pytorch_distributed_envs_from_justknobs():
132
+ pass
133
+
134
+
135
+ def log_export_usage(**kwargs):
136
+ pass
137
+
138
+
139
+ def log_trace_structured_event(*args, **kwargs) -> None:
140
+ pass
141
+
142
+
143
+ def log_cache_bypass(*args, **kwargs) -> None:
144
+ pass
145
+
146
+
147
+ def log_torchscript_usage(api: str, **kwargs):
148
+ _ = api
149
+ return
150
+
151
+
152
+ def check_if_torch_exportable():
153
+ return False
154
+
155
+
156
+ def export_training_ir_rollout_check() -> bool:
157
+ return True
158
+
159
+
160
+ def log_torch_jit_trace_exportability(
161
+ api: str,
162
+ type_of_export: str,
163
+ export_outcome: str,
164
+ result: str,
165
+ ):
166
+ _, _, _, _ = api, type_of_export, export_outcome, result
167
+ return
168
+
169
+
170
+ def capture_pre_autograd_graph_using_training_ir() -> bool:
171
+ return False
172
+
173
+
174
+ def justknobs_check(name: str, default: bool = True) -> bool:
175
+ """
176
+ This function can be used to killswitch functionality in FB prod,
177
+ where you can toggle this value to False in JK without having to
178
+ do a code push. In OSS, we always have everything turned on all
179
+ the time, because downstream users can simply choose to not update
180
+ PyTorch. (If more fine-grained enable/disable is needed, we could
181
+ potentially have a map we lookup name in to toggle behavior. But
182
+ the point is that it's all tied to source code in OSS, since there's
183
+ no live server to query.)
184
+
185
+ This is the bare minimum functionality I needed to do some killswitches.
186
+ We have a more detailed plan at
187
+ https://docs.google.com/document/d/1Ukerh9_42SeGh89J-tGtecpHBPwGlkQ043pddkKb3PU/edit
188
+ In particular, in some circumstances it may be necessary to read in
189
+ a knob once at process start, and then use it consistently for the
190
+ rest of the process. Future functionality will codify these patterns
191
+ into a better high level API.
192
+
193
+ WARNING: Do NOT call this function at module import time, JK is not
194
+ fork safe and you will break anyone who forks the process and then
195
+ hits JK again.
196
+ """
197
+ return default
198
+
199
+
200
+ def justknobs_getval_int(name: str) -> int:
201
+ """
202
+ Read warning on justknobs_check
203
+ """
204
+ return 0
205
+
206
+
207
+ def is_fb_unit_test() -> bool:
208
+ return False
209
+
210
+
211
+ @functools.lru_cache(None)
212
+ def max_clock_rate():
213
+ if not torch.version.hip:
214
+ from triton.testing import nvsmi
215
+
216
+ return nvsmi(["clocks.max.sm"])[0]
217
+ else:
218
+ # Manually set max-clock speeds on ROCm until equivalent nvmsi
219
+ # functionality in triton.testing or via pyamdsmi enablement. Required
220
+ # for test_snode_runtime unit tests.
221
+ gcn_arch = str(torch.cuda.get_device_properties(0).gcnArchName.split(":", 1)[0])
222
+ if "gfx94" in gcn_arch:
223
+ return 1700
224
+ elif "gfx90a" in gcn_arch:
225
+ return 1700
226
+ elif "gfx908" in gcn_arch:
227
+ return 1502
228
+ elif "gfx11" in gcn_arch:
229
+ return 1700
230
+ elif "gfx103" in gcn_arch:
231
+ return 1967
232
+ elif "gfx101" in gcn_arch:
233
+ return 1144
234
+ else:
235
+ return 1100
236
+
237
+
238
+ def get_mast_job_name_version() -> Optional[Tuple[str, int]]:
239
+ return None
240
+
241
+
242
+ TEST_MASTER_ADDR = "127.0.0.1"
243
+ TEST_MASTER_PORT = 29500
244
+ # USE_GLOBAL_DEPS controls whether __init__.py tries to load
245
+ # libtorch_global_deps, see Note [Global dependencies]
246
+ USE_GLOBAL_DEPS = True
247
+ # USE_RTLD_GLOBAL_WITH_LIBTORCH controls whether __init__.py tries to load
248
+ # _C.so with RTLD_GLOBAL during the call to dlopen.
249
+ USE_RTLD_GLOBAL_WITH_LIBTORCH = False
250
+ # If an op was defined in C++ and extended from Python using the
251
+ # torch.library.register_fake, returns if we require that there be a
252
+ # m.set_python_module("mylib.ops") call from C++ that associates
253
+ # the C++ op with a python module.
254
+ REQUIRES_SET_PYTHON_MODULE = False
255
+
256
+
257
+ def maybe_upload_prof_stats_to_manifold(profile_path: str) -> Optional[str]:
258
+ print("Uploading profile stats (fb-only otherwise no-op)")
259
+ return None
260
+
261
+
262
+ def log_chromium_event_internal(
263
+ event: Dict[str, Any],
264
+ stack: List[str],
265
+ logger_uuid: str,
266
+ start_time_ns: int,
267
+ ):
268
+ return None
269
+
270
+
271
+ def record_chromium_event_internal(
272
+ event: Dict[str, Any],
273
+ ):
274
+ return None
phi4/lib/python3.10/site-packages/torch/functional.py ADDED
@@ -0,0 +1,2209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import itertools
3
+ import operator
4
+ from typing import Any, List, Optional, Sequence, Tuple, TYPE_CHECKING, Union
5
+
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from torch import _VF, Tensor
9
+ from torch._C import _add_docstr
10
+ from torch._jit_internal import _overload as overload, boolean_dispatch
11
+ from torch._lowrank import pca_lowrank, svd_lowrank
12
+ from torch.overrides import (
13
+ handle_torch_function,
14
+ has_torch_function,
15
+ has_torch_function_unary,
16
+ has_torch_function_variadic,
17
+ )
18
+
19
+
20
+ __all__ = [
21
+ "atleast_1d",
22
+ "atleast_2d",
23
+ "atleast_3d",
24
+ "align_tensors",
25
+ "broadcast_shapes",
26
+ "broadcast_tensors",
27
+ "cartesian_prod",
28
+ "block_diag",
29
+ "cdist",
30
+ "chain_matmul",
31
+ "einsum",
32
+ "istft",
33
+ "lu",
34
+ "norm",
35
+ "meshgrid",
36
+ "pca_lowrank",
37
+ "split",
38
+ "stft",
39
+ "svd_lowrank",
40
+ "tensordot",
41
+ "unique",
42
+ "unique_consecutive",
43
+ "unravel_index",
44
+ ]
45
+
46
+
47
+ def broadcast_tensors(*tensors):
48
+ r"""broadcast_tensors(*tensors) -> List of Tensors
49
+
50
+ Broadcasts the given tensors according to :ref:`broadcasting-semantics`.
51
+
52
+ Args:
53
+ *tensors: any number of tensors of the same type
54
+
55
+ .. warning::
56
+
57
+ More than one element of a broadcasted tensor may refer to a single
58
+ memory location. As a result, in-place operations (especially ones that
59
+ are vectorized) may result in incorrect behavior. If you need to write
60
+ to the tensors, please clone them first.
61
+
62
+ Example::
63
+
64
+ >>> x = torch.arange(3).view(1, 3)
65
+ >>> y = torch.arange(2).view(2, 1)
66
+ >>> a, b = torch.broadcast_tensors(x, y)
67
+ >>> a.size()
68
+ torch.Size([2, 3])
69
+ >>> a
70
+ tensor([[0, 1, 2],
71
+ [0, 1, 2]])
72
+ """
73
+ # This wrapper exists to support variadic args.
74
+ if has_torch_function(tensors):
75
+ return handle_torch_function(broadcast_tensors, tensors, *tensors)
76
+ return _VF.broadcast_tensors(tensors) # type: ignore[attr-defined]
77
+
78
+
79
+ def broadcast_shapes(*shapes):
80
+ r"""broadcast_shapes(*shapes) -> Size
81
+
82
+ Similar to :func:`broadcast_tensors` but for shapes.
83
+
84
+ This is equivalent to
85
+ ``torch.broadcast_tensors(*map(torch.empty, shapes))[0].shape``
86
+ but avoids the need create to intermediate tensors. This is useful for
87
+ broadcasting tensors of common batch shape but different rightmost shape,
88
+ e.g. to broadcast mean vectors with covariance matrices.
89
+
90
+ Example::
91
+
92
+ >>> torch.broadcast_shapes((2,), (3, 1), (1, 1, 1))
93
+ torch.Size([1, 3, 2])
94
+
95
+ Args:
96
+ \*shapes (torch.Size): Shapes of tensors.
97
+
98
+ Returns:
99
+ shape (torch.Size): A shape compatible with all input shapes.
100
+
101
+ Raises:
102
+ RuntimeError: If shapes are incompatible.
103
+ """
104
+ # This wrapper exists to support variadic args.
105
+ # TODO Move this to C++ once the jit has better support for torch.Size.
106
+ if not torch.jit.is_tracing():
107
+ max_len = 0
108
+ for shape in shapes:
109
+ if isinstance(shape, (int, torch.SymInt)):
110
+ if max_len < 1:
111
+ max_len = 1
112
+ elif isinstance(shape, (tuple, list)):
113
+ s = len(shape)
114
+ if max_len < s:
115
+ max_len = s
116
+ result = [1] * max_len
117
+
118
+ from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
119
+
120
+ for shape in shapes:
121
+ if isinstance(shape, (int, torch.SymInt)):
122
+ shape = (shape,)
123
+ if isinstance(shape, (tuple, list)):
124
+ for i in range(-1, -1 - len(shape), -1):
125
+ if shape[i] < 0:
126
+ raise RuntimeError(
127
+ f"Trying to create tensor with negative dimension ({shape[i]}): ({shape[i]})"
128
+ )
129
+ # NB: result is initialized to 1 so this is effectively an
130
+ # equals one test
131
+ if guard_size_oblivious(shape[i] == 1) or guard_size_oblivious(
132
+ shape[i] == result[i]
133
+ ):
134
+ continue
135
+ if result[i] != 1:
136
+ raise RuntimeError(
137
+ "Shape mismatch: objects cannot be broadcast to a single shape"
138
+ )
139
+ result[i] = shape[i]
140
+ else:
141
+ raise RuntimeError(
142
+ "Input shapes should be of type ints, a tuple of ints, or a list of ints, got ",
143
+ shape,
144
+ )
145
+ return torch.Size(result)
146
+ else:
147
+ # with implementation above, torch.jit.trace hardcodes the sizes which makes subsequent replays fail
148
+ with torch.no_grad():
149
+ scalar = torch.zeros((), device="cpu")
150
+ tensors = [scalar.expand(shape) for shape in shapes]
151
+ tensors = broadcast_tensors(*tensors)
152
+ return tensors[0].shape
153
+
154
+
155
+ def split(
156
+ tensor: Tensor,
157
+ split_size_or_sections: Union[int, List[int]],
158
+ dim: int = 0,
159
+ ) -> Tuple[Tensor, ...]:
160
+ r"""Splits the tensor into chunks. Each chunk is a view of the original tensor.
161
+
162
+ If :attr:`split_size_or_sections` is an integer type, then :attr:`tensor` will
163
+ be split into equally sized chunks (if possible). Last chunk will be smaller if
164
+ the tensor size along the given dimension :attr:`dim` is not divisible by
165
+ :attr:`split_size`.
166
+
167
+ If :attr:`split_size_or_sections` is a list, then :attr:`tensor` will be split
168
+ into ``len(split_size_or_sections)`` chunks with sizes in :attr:`dim` according
169
+ to :attr:`split_size_or_sections`.
170
+
171
+ Args:
172
+ tensor (Tensor): tensor to split.
173
+ split_size_or_sections (int) or (list(int)): size of a single chunk or
174
+ list of sizes for each chunk
175
+ dim (int): dimension along which to split the tensor.
176
+
177
+ Example::
178
+
179
+ >>> a = torch.arange(10).reshape(5, 2)
180
+ >>> a
181
+ tensor([[0, 1],
182
+ [2, 3],
183
+ [4, 5],
184
+ [6, 7],
185
+ [8, 9]])
186
+ >>> torch.split(a, 2)
187
+ (tensor([[0, 1],
188
+ [2, 3]]),
189
+ tensor([[4, 5],
190
+ [6, 7]]),
191
+ tensor([[8, 9]]))
192
+ >>> torch.split(a, [1, 4])
193
+ (tensor([[0, 1]]),
194
+ tensor([[2, 3],
195
+ [4, 5],
196
+ [6, 7],
197
+ [8, 9]]))
198
+ """
199
+ if has_torch_function_unary(tensor):
200
+ return handle_torch_function(
201
+ split, (tensor,), tensor, split_size_or_sections, dim=dim
202
+ )
203
+ # Overwriting reason:
204
+ # This dispatches to two ATen functions depending on the type of
205
+ # split_size_or_sections. The branching code is in _tensor.py, which we
206
+ # call here.
207
+ return tensor.split(split_size_or_sections, dim)
208
+
209
+
210
+ def einsum(*args: Any) -> Tensor:
211
+ r"""einsum(equation, *operands) -> Tensor
212
+
213
+ Sums the product of the elements of the input :attr:`operands` along dimensions specified using a notation
214
+ based on the Einstein summation convention.
215
+
216
+ Einsum allows computing many common multi-dimensional linear algebraic array operations by representing them
217
+ in a short-hand format based on the Einstein summation convention, given by :attr:`equation`. The details of
218
+ this format are described below, but the general idea is to label every dimension of the input :attr:`operands`
219
+ with some subscript and define which subscripts are part of the output. The output is then computed by summing
220
+ the product of the elements of the :attr:`operands` along the dimensions whose subscripts are not part of the
221
+ output. For example, matrix multiplication can be computed using einsum as `torch.einsum("ij,jk->ik", A, B)`.
222
+ Here, j is the summation subscript and i and k the output subscripts (see section below for more details on why).
223
+
224
+ Equation:
225
+
226
+ The :attr:`equation` string specifies the subscripts (letters in `[a-zA-Z]`) for each dimension of
227
+ the input :attr:`operands` in the same order as the dimensions, separating subscripts for each operand by a
228
+ comma (','), e.g. `'ij,jk'` specify subscripts for two 2D operands. The dimensions labeled with the same subscript
229
+ must be broadcastable, that is, their size must either match or be `1`. The exception is if a subscript is
230
+ repeated for the same input operand, in which case the dimensions labeled with this subscript for this operand
231
+ must match in size and the operand will be replaced by its diagonal along these dimensions. The subscripts that
232
+ appear exactly once in the :attr:`equation` will be part of the output, sorted in increasing alphabetical order.
233
+ The output is computed by multiplying the input :attr:`operands` element-wise, with their dimensions aligned based
234
+ on the subscripts, and then summing out the dimensions whose subscripts are not part of the output.
235
+
236
+ Optionally, the output subscripts can be explicitly defined by adding an arrow ('->') at the end of the equation
237
+ followed by the subscripts for the output. For instance, the following equation computes the transpose of a
238
+ matrix multiplication: 'ij,jk->ki'. The output subscripts must appear at least once for some input operand and
239
+ at most once for the output.
240
+
241
+ Ellipsis ('...') can be used in place of subscripts to broadcast the dimensions covered by the ellipsis.
242
+ Each input operand may contain at most one ellipsis which will cover the dimensions not covered by subscripts,
243
+ e.g. for an input operand with 5 dimensions, the ellipsis in the equation `'ab...c'` cover the third and fourth
244
+ dimensions. The ellipsis does not need to cover the same number of dimensions across the :attr:`operands` but the
245
+ 'shape' of the ellipsis (the size of the dimensions covered by them) must broadcast together. If the output is not
246
+ explicitly defined with the arrow ('->') notation, the ellipsis will come first in the output (left-most dimensions),
247
+ before the subscript labels that appear exactly once for the input operands. e.g. the following equation implements
248
+ batch matrix multiplication `'...ij,...jk'`.
249
+
250
+ A few final notes: the equation may contain whitespaces between the different elements (subscripts, ellipsis,
251
+ arrow and comma) but something like `'. . .'` is not valid. An empty string `''` is valid for scalar operands.
252
+
253
+ .. note::
254
+
255
+ ``torch.einsum`` handles ellipsis ('...') differently from NumPy in that it allows dimensions
256
+ covered by the ellipsis to be summed over, that is, ellipsis are not required to be part of the output.
257
+
258
+ .. note::
259
+
260
+ Please install opt-einsum (https://optimized-einsum.readthedocs.io/en/stable/) in order to enroll into a more
261
+ performant einsum. You can install when installing torch like so: `pip install torch[opt-einsum]` or by itself
262
+ with `pip install opt-einsum`.
263
+
264
+ If opt-einsum is available, this function will automatically speed up computation and/or consume less memory
265
+ by optimizing contraction order through our opt_einsum backend :mod:`torch.backends.opt_einsum` (The _ vs - is
266
+ confusing, I know). This optimization occurs when there are at least three inputs, since the order does not matter
267
+ otherwise. Note that finding `the` optimal path is an NP-hard problem, thus, opt-einsum relies on different
268
+ heuristics to achieve near-optimal results. If opt-einsum is not available, the default order is to contract
269
+ from left to right.
270
+
271
+ To bypass this default behavior, add the following to disable opt_einsum and skip path calculation:
272
+ ``torch.backends.opt_einsum.enabled = False``
273
+
274
+ To specify which strategy you'd like for opt_einsum to compute the contraction path, add the following line:
275
+ ``torch.backends.opt_einsum.strategy = 'auto'``. The default strategy is 'auto', and we also support 'greedy' and
276
+ 'optimal'. Disclaimer that the runtime of 'optimal' is factorial in the number of inputs! See more details in
277
+ the opt_einsum documentation (https://optimized-einsum.readthedocs.io/en/stable/path_finding.html).
278
+
279
+ .. note::
280
+
281
+ As of PyTorch 1.10 :func:`torch.einsum` also supports the sublist format (see examples below). In this format,
282
+ subscripts for each operand are specified by sublists, list of integers in the range [0, 52). These sublists
283
+ follow their operands, and an extra sublist can appear at the end of the input to specify the output's
284
+ subscripts., e.g. `torch.einsum(op1, sublist1, op2, sublist2, ..., [subslist_out])`. Python's `Ellipsis` object
285
+ may be provided in a sublist to enable broadcasting as described in the Equation section above.
286
+
287
+ Args:
288
+ equation (str): The subscripts for the Einstein summation.
289
+ operands (List[Tensor]): The tensors to compute the Einstein summation of.
290
+
291
+ Examples::
292
+
293
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
294
+ >>> # trace
295
+ >>> torch.einsum('ii', torch.randn(4, 4))
296
+ tensor(-1.2104)
297
+
298
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
299
+ >>> # diagonal
300
+ >>> torch.einsum('ii->i', torch.randn(4, 4))
301
+ tensor([-0.1034, 0.7952, -0.2433, 0.4545])
302
+
303
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
304
+ >>> # outer product
305
+ >>> x = torch.randn(5)
306
+ >>> y = torch.randn(4)
307
+ >>> torch.einsum('i,j->ij', x, y)
308
+ tensor([[ 0.1156, -0.2897, -0.3918, 0.4963],
309
+ [-0.3744, 0.9381, 1.2685, -1.6070],
310
+ [ 0.7208, -1.8058, -2.4419, 3.0936],
311
+ [ 0.1713, -0.4291, -0.5802, 0.7350],
312
+ [ 0.5704, -1.4290, -1.9323, 2.4480]])
313
+
314
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
315
+ >>> # batch matrix multiplication
316
+ >>> As = torch.randn(3, 2, 5)
317
+ >>> Bs = torch.randn(3, 5, 4)
318
+ >>> torch.einsum('bij,bjk->bik', As, Bs)
319
+ tensor([[[-1.0564, -1.5904, 3.2023, 3.1271],
320
+ [-1.6706, -0.8097, -0.8025, -2.1183]],
321
+
322
+ [[ 4.2239, 0.3107, -0.5756, -0.2354],
323
+ [-1.4558, -0.3460, 1.5087, -0.8530]],
324
+
325
+ [[ 2.8153, 1.8787, -4.3839, -1.2112],
326
+ [ 0.3728, -2.1131, 0.0921, 0.8305]]])
327
+
328
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
329
+ >>> # with sublist format and ellipsis
330
+ >>> torch.einsum(As, [..., 0, 1], Bs, [..., 1, 2], [..., 0, 2])
331
+ tensor([[[-1.0564, -1.5904, 3.2023, 3.1271],
332
+ [-1.6706, -0.8097, -0.8025, -2.1183]],
333
+
334
+ [[ 4.2239, 0.3107, -0.5756, -0.2354],
335
+ [-1.4558, -0.3460, 1.5087, -0.8530]],
336
+
337
+ [[ 2.8153, 1.8787, -4.3839, -1.2112],
338
+ [ 0.3728, -2.1131, 0.0921, 0.8305]]])
339
+
340
+ >>> # batch permute
341
+ >>> A = torch.randn(2, 3, 4, 5)
342
+ >>> torch.einsum('...ij->...ji', A).shape
343
+ torch.Size([2, 3, 5, 4])
344
+
345
+ >>> # equivalent to torch.nn.functional.bilinear
346
+ >>> A = torch.randn(3, 5, 4)
347
+ >>> l = torch.randn(2, 5)
348
+ >>> r = torch.randn(2, 4)
349
+ >>> torch.einsum('bn,anm,bm->ba', l, A, r)
350
+ tensor([[-0.3430, -5.2405, 0.4494],
351
+ [ 0.3311, 5.5201, -3.0356]])
352
+ """
353
+ import torch.backends.opt_einsum as opt_einsum
354
+
355
+ # This wrapper exists to support variadic args.
356
+ if len(args) < 2:
357
+ raise ValueError(
358
+ "einsum(): must specify the equation string and at least one operand, "
359
+ "or at least one operand and its subscripts list"
360
+ )
361
+
362
+ equation = None
363
+ operands = None
364
+
365
+ if isinstance(args[0], torch.Tensor):
366
+ # Convert the subscript list format which is an interleaving of operand and its subscripts
367
+ # list with an optional output subscripts list at the end (see documentation for more details on this)
368
+ # to the equation string format by creating the equation string from the subscripts list and grouping the
369
+ # input operands into a tensorlist (List[Tensor]).
370
+ def parse_subscript(n: int) -> str:
371
+ if n == Ellipsis:
372
+ return "..."
373
+ if n >= 0 and n < 26:
374
+ return chr(ord("A") + n)
375
+ if n >= 26 and n < 52:
376
+ return chr(ord("a") + n - 26)
377
+ raise ValueError(
378
+ "einsum(): subscript in subscript list is not within the valid range [0, 52)"
379
+ )
380
+
381
+ # Parse subscripts for input operands
382
+ equation = ",".join("".join(parse_subscript(s) for s in l) for l in args[1::2])
383
+
384
+ # Parse optional output subscripts (provided when the number of arguments is odd)
385
+ if len(args) % 2 == 1:
386
+ equation += "->" + "".join(parse_subscript(s) for s in args[-1])
387
+ operands = args[:-1:2]
388
+ else:
389
+ operands = args[::2]
390
+ else:
391
+ equation = args[0]
392
+ operands = args[1:]
393
+
394
+ if has_torch_function(operands):
395
+ return handle_torch_function(einsum, operands, equation, *operands)
396
+
397
+ if len(operands) == 1 and isinstance(operands[0], (list, tuple)):
398
+ # the old interface of passing the operands as one list argument
399
+ _operands = operands[0]
400
+ # recurse incase operands contains value that has torch function
401
+ # in the original implementation this line is omitted
402
+ return einsum(equation, *_operands)
403
+
404
+ if len(operands) <= 2 or not opt_einsum.enabled:
405
+ # the path for contracting 0 or 1 time(s) is already optimized
406
+ # or the user has disabled using opt_einsum
407
+ return _VF.einsum(equation, operands) # type: ignore[attr-defined]
408
+
409
+ path = None
410
+ if opt_einsum.is_available():
411
+ _opt_einsum = opt_einsum.get_opt_einsum()
412
+ tupled_path = _opt_einsum.contract_path(
413
+ equation, *operands, optimize=opt_einsum.strategy
414
+ )[0]
415
+ # flatten path for dispatching to C++
416
+ path = [item for pair in tupled_path for item in pair]
417
+ return _VF.einsum(equation, operands, path=path) # type: ignore[attr-defined]
418
+
419
+
420
+ # This wrapper exists to support variadic args.
421
+ if TYPE_CHECKING:
422
+ # The JIT doesn't understand Union, so only add type annotation for mypy
423
+ def meshgrid(
424
+ *tensors: Union[Tensor, List[Tensor]], indexing: Optional[str] = None
425
+ ) -> Tuple[Tensor, ...]:
426
+ return _meshgrid(*tensors, indexing=indexing)
427
+
428
+ else:
429
+
430
+ def meshgrid(*tensors, indexing: Optional[str] = None) -> Tuple[Tensor, ...]:
431
+ r"""Creates grids of coordinates specified by the 1D inputs in `attr`:tensors.
432
+
433
+ This is helpful when you want to visualize data over some
434
+ range of inputs. See below for a plotting example.
435
+
436
+ Given :math:`N` 1D tensors :math:`T_0 \ldots T_{N-1}` as
437
+ inputs with corresponding sizes :math:`S_0 \ldots S_{N-1}`,
438
+ this creates :math:`N` N-dimensional tensors :math:`G_0 \ldots
439
+ G_{N-1}`, each with shape :math:`(S_0, ..., S_{N-1})` where
440
+ the output :math:`G_i` is constructed by expanding :math:`T_i`
441
+ to the result shape.
442
+
443
+ .. note::
444
+ 0D inputs are treated equivalently to 1D inputs of a
445
+ single element.
446
+
447
+ .. warning::
448
+ `torch.meshgrid(*tensors)` currently has the same behavior
449
+ as calling `numpy.meshgrid(*arrays, indexing='ij')`.
450
+
451
+ In the future `torch.meshgrid` will transition to
452
+ `indexing='xy'` as the default.
453
+
454
+ https://github.com/pytorch/pytorch/issues/50276 tracks
455
+ this issue with the goal of migrating to NumPy's behavior.
456
+
457
+ .. seealso::
458
+
459
+ :func:`torch.cartesian_prod` has the same effect but it
460
+ collects the data in a tensor of vectors.
461
+
462
+ Args:
463
+ tensors (list of Tensor): list of scalars or 1 dimensional tensors. Scalars will be
464
+ treated as tensors of size :math:`(1,)` automatically
465
+
466
+ indexing: (str, optional): the indexing mode, either "xy"
467
+ or "ij", defaults to "ij". See warning for future changes.
468
+
469
+ If "xy" is selected, the first dimension corresponds
470
+ to the cardinality of the second input and the second
471
+ dimension corresponds to the cardinality of the first
472
+ input.
473
+
474
+ If "ij" is selected, the dimensions are in the same
475
+ order as the cardinality of the inputs.
476
+
477
+ Returns:
478
+ seq (sequence of Tensors): If the input has :math:`N`
479
+ tensors of size :math:`S_0 \ldots S_{N-1}``, then the
480
+ output will also have :math:`N` tensors, where each tensor
481
+ is of shape :math:`(S_0, ..., S_{N-1})`.
482
+
483
+ Example::
484
+
485
+ >>> x = torch.tensor([1, 2, 3])
486
+ >>> y = torch.tensor([4, 5, 6])
487
+
488
+ Observe the element-wise pairings across the grid, (1, 4),
489
+ (1, 5), ..., (3, 6). This is the same thing as the
490
+ cartesian product.
491
+ >>> grid_x, grid_y = torch.meshgrid(x, y, indexing='ij')
492
+ >>> grid_x
493
+ tensor([[1, 1, 1],
494
+ [2, 2, 2],
495
+ [3, 3, 3]])
496
+ >>> grid_y
497
+ tensor([[4, 5, 6],
498
+ [4, 5, 6],
499
+ [4, 5, 6]])
500
+
501
+ This correspondence can be seen when these grids are
502
+ stacked properly.
503
+ >>> torch.equal(torch.cat(tuple(torch.dstack([grid_x, grid_y]))),
504
+ ... torch.cartesian_prod(x, y))
505
+ True
506
+
507
+ `torch.meshgrid` is commonly used to produce a grid for
508
+ plotting.
509
+ >>> # xdoctest: +REQUIRES(module:matplotlib)
510
+ >>> # xdoctest: +REQUIRES(env:DOCTEST_SHOW)
511
+ >>> import matplotlib.pyplot as plt
512
+ >>> xs = torch.linspace(-5, 5, steps=100)
513
+ >>> ys = torch.linspace(-5, 5, steps=100)
514
+ >>> x, y = torch.meshgrid(xs, ys, indexing='xy')
515
+ >>> z = torch.sin(torch.sqrt(x * x + y * y))
516
+ >>> ax = plt.axes(projection='3d')
517
+ >>> ax.plot_surface(x.numpy(), y.numpy(), z.numpy())
518
+ >>> plt.show()
519
+
520
+ .. image:: ../_static/img/meshgrid.png
521
+ :width: 512
522
+
523
+ """
524
+ return _meshgrid(*tensors, indexing=indexing)
525
+
526
+
527
+ def _meshgrid(*tensors, indexing: Optional[str]):
528
+ if has_torch_function(tensors):
529
+ return handle_torch_function(meshgrid, tensors, *tensors, indexing=indexing)
530
+ if len(tensors) == 1 and isinstance(tensors[0], (list, tuple)):
531
+ # the old interface of passing the operands as one list argument
532
+ tensors = tensors[0] # type: ignore[assignment]
533
+
534
+ # Continue allowing call of old method that takes no indexing
535
+ # kwarg for forward compatibility reasons.
536
+ #
537
+ # Remove this two weeks after landing.
538
+ kwargs = {} if indexing is None else {"indexing": indexing}
539
+ return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
540
+
541
+
542
+ def stft(
543
+ input: Tensor,
544
+ n_fft: int,
545
+ hop_length: Optional[int] = None,
546
+ win_length: Optional[int] = None,
547
+ window: Optional[Tensor] = None,
548
+ center: bool = True,
549
+ pad_mode: str = "reflect",
550
+ normalized: bool = False,
551
+ onesided: Optional[bool] = None,
552
+ return_complex: Optional[bool] = None,
553
+ ) -> Tensor:
554
+ r"""Short-time Fourier transform (STFT).
555
+
556
+ .. warning::
557
+ From version 1.8.0, :attr:`return_complex` must always be given
558
+ explicitly for real inputs and `return_complex=False` has been
559
+ deprecated. Strongly prefer `return_complex=True` as in a future
560
+ pytorch release, this function will only return complex tensors.
561
+
562
+ Note that :func:`torch.view_as_real` can be used to recover a real
563
+ tensor with an extra last dimension for real and imaginary components.
564
+
565
+ .. warning::
566
+ From version 2.1, a warning will be provided if a :attr:`window` is
567
+ not specified. In a future release, this attribute will be required.
568
+ Not providing a window currently defaults to using a rectangular window,
569
+ which may result in undesirable artifacts. Consider using tapered windows,
570
+ such as :func:`torch.hann_window`.
571
+
572
+ The STFT computes the Fourier transform of short overlapping windows of the
573
+ input. This giving frequency components of the signal as they change over
574
+ time. The interface of this function is modeled after (but *not* a drop-in
575
+ replacement for) librosa_ stft function.
576
+
577
+ .. _librosa: https://librosa.org/doc/latest/generated/librosa.stft.html
578
+
579
+ Ignoring the optional batch dimension, this method computes the following
580
+ expression:
581
+
582
+ .. math::
583
+ X[\omega, m] = \sum_{k = 0}^{\text{win\_length-1}}%
584
+ \text{window}[k]\ \text{input}[m \times \text{hop\_length} + k]\ %
585
+ \exp\left(- j \frac{2 \pi \cdot \omega k}{\text{n\_fft}}\right),
586
+
587
+ where :math:`m` is the index of the sliding window, and :math:`\omega` is
588
+ the frequency :math:`0 \leq \omega < \text{n\_fft}` for ``onesided=False``,
589
+ or :math:`0 \leq \omega < \lfloor \text{n\_fft} / 2 \rfloor + 1` for ``onesided=True``.
590
+
591
+ * :attr:`input` must be either a 1-D time sequence or a 2-D batch of time
592
+ sequences.
593
+
594
+ * If :attr:`hop_length` is ``None`` (default), it is treated as equal to
595
+ ``floor(n_fft / 4)``.
596
+
597
+ * If :attr:`win_length` is ``None`` (default), it is treated as equal to
598
+ :attr:`n_fft`.
599
+
600
+ * :attr:`window` can be a 1-D tensor of size :attr:`win_length`, e.g., from
601
+ :meth:`torch.hann_window`. If :attr:`window` is ``None`` (default), it is
602
+ treated as if having :math:`1` everywhere in the window. If
603
+ :math:`\text{win\_length} < \text{n\_fft}`, :attr:`window` will be padded on
604
+ both sides to length :attr:`n_fft` before being applied.
605
+
606
+ * If :attr:`center` is ``True`` (default), :attr:`input` will be padded on
607
+ both sides so that the :math:`t`-th frame is centered at time
608
+ :math:`t \times \text{hop\_length}`. Otherwise, the :math:`t`-th frame
609
+ begins at time :math:`t \times \text{hop\_length}`.
610
+
611
+ * :attr:`pad_mode` determines the padding method used on :attr:`input` when
612
+ :attr:`center` is ``True``. See :meth:`torch.nn.functional.pad` for
613
+ all available options. Default is ``"reflect"``.
614
+
615
+ * If :attr:`onesided` is ``True`` (default for real input), only values for
616
+ :math:`\omega` in :math:`\left[0, 1, 2, \dots, \left\lfloor
617
+ \frac{\text{n\_fft}}{2} \right\rfloor + 1\right]` are returned because
618
+ the real-to-complex Fourier transform satisfies the conjugate symmetry,
619
+ i.e., :math:`X[m, \omega] = X[m, \text{n\_fft} - \omega]^*`.
620
+ Note if the input or window tensors are complex, then :attr:`onesided`
621
+ output is not possible.
622
+
623
+ * If :attr:`normalized` is ``True`` (default is ``False``), the function
624
+ returns the normalized STFT results, i.e., multiplied by :math:`(\text{frame\_length})^{-0.5}`.
625
+
626
+ * If :attr:`return_complex` is ``True`` (default if input is complex), the
627
+ return is a ``input.dim() + 1`` dimensional complex tensor. If ``False``,
628
+ the output is a ``input.dim() + 2`` dimensional real tensor where the last
629
+ dimension represents the real and imaginary components.
630
+
631
+ Returns either a complex tensor of size :math:`(* \times N \times T)` if
632
+ :attr:`return_complex` is true, or a real tensor of size :math:`(* \times N
633
+ \times T \times 2)`. Where :math:`*` is the optional batch size of
634
+ :attr:`input`, :math:`N` is the number of frequencies where STFT is applied
635
+ and :math:`T` is the total number of frames used.
636
+
637
+ .. warning::
638
+ This function changed signature at version 0.4.1. Calling with the
639
+ previous signature may cause error or return incorrect result.
640
+
641
+ Args:
642
+ input (Tensor): the input tensor of shape `(B?, L)` where `B?` is an optional
643
+ batch dimension
644
+ n_fft (int): size of Fourier transform
645
+ hop_length (int, optional): the distance between neighboring sliding window
646
+ frames. Default: ``None`` (treated as equal to ``floor(n_fft / 4)``)
647
+ win_length (int, optional): the size of window frame and STFT filter.
648
+ Default: ``None`` (treated as equal to :attr:`n_fft`)
649
+ window (Tensor, optional): the optional window function.
650
+ Shape must be 1d and `<= n_fft`
651
+ Default: ``None`` (treated as window of all :math:`1` s)
652
+ center (bool, optional): whether to pad :attr:`input` on both sides so
653
+ that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
654
+ Default: ``True``
655
+ pad_mode (str, optional): controls the padding method used when
656
+ :attr:`center` is ``True``. Default: ``"reflect"``
657
+ normalized (bool, optional): controls whether to return the normalized STFT results
658
+ Default: ``False``
659
+ onesided (bool, optional): controls whether to return half of results to
660
+ avoid redundancy for real inputs.
661
+ Default: ``True`` for real :attr:`input` and :attr:`window`, ``False`` otherwise.
662
+ return_complex (bool, optional): whether to return a complex tensor, or
663
+ a real tensor with an extra last dimension for the real and
664
+ imaginary components.
665
+
666
+ .. versionchanged:: 2.0
667
+ ``return_complex`` is now a required argument for real inputs,
668
+ as the default is being transitioned to ``True``.
669
+
670
+ .. deprecated:: 2.0
671
+ ``return_complex=False`` is deprecated, instead use ``return_complex=True``
672
+ Note that calling :func:`torch.view_as_real` on the output will
673
+ recover the deprecated output format.
674
+
675
+ Returns:
676
+ Tensor: A tensor containing the STFT result with shape `(B?, N, T, C?)` where
677
+ - `B?` is an optional batch dimension from the input.
678
+ - `N` is the number of frequency samples, `(n_fft // 2) + 1` for
679
+ `onesided=True`, or otherwise `n_fft`.
680
+ - `T` is the number of frames, `1 + L // hop_length`
681
+ for `center=True`, or `1 + (L - n_fft) // hop_length` otherwise.
682
+ - `C?` is an optional length-2 dimension of real and imaginary
683
+ components, present when `return_complex=False`.
684
+
685
+ """
686
+ if has_torch_function_unary(input):
687
+ return handle_torch_function(
688
+ stft,
689
+ (input,),
690
+ input,
691
+ n_fft,
692
+ hop_length=hop_length,
693
+ win_length=win_length,
694
+ window=window,
695
+ center=center,
696
+ pad_mode=pad_mode,
697
+ normalized=normalized,
698
+ onesided=onesided,
699
+ return_complex=return_complex,
700
+ )
701
+ # NOTE: Do not edit. This code will be removed once the forward-compatibility
702
+ # period is over for PR #73432
703
+ if center:
704
+ signal_dim = input.dim()
705
+ extended_shape = [1] * (3 - signal_dim) + list(input.size())
706
+ pad = int(n_fft // 2)
707
+ input = F.pad(input.view(extended_shape), [pad, pad], pad_mode)
708
+ input = input.view(input.shape[-signal_dim:])
709
+ return _VF.stft( # type: ignore[attr-defined]
710
+ input,
711
+ n_fft,
712
+ hop_length,
713
+ win_length,
714
+ window,
715
+ normalized,
716
+ onesided,
717
+ return_complex,
718
+ )
719
+
720
+
721
+ istft = _add_docstr(
722
+ torch.istft,
723
+ "istft(input, n_fft, hop_length=None, win_length=None, window=None, center=True, "
724
+ "normalized=False, onesided=None, length=None, return_complex=False) -> Tensor:\n"
725
+ r"""
726
+ Inverse short time Fourier Transform. This is expected to be the inverse of :func:`~torch.stft`.
727
+
728
+ .. warning::
729
+ From version 2.1, a warning will be provided if a :attr:`window` is
730
+ not specified. In a future release, this attribute will be required.
731
+ Please provide the same window used in the stft call.
732
+
733
+ It has the same parameters (+ additional optional parameter of :attr:`length`) and it should return the
734
+ least squares estimation of the original signal. The algorithm will check using the NOLA condition (
735
+ nonzero overlap).
736
+
737
+ Important consideration in the parameters :attr:`window` and :attr:`center` so that the envelope
738
+ created by the summation of all the windows is never zero at certain point in time. Specifically,
739
+ :math:`\sum_{t=-\infty}^{\infty} |w|^2[n-t\times hop\_length] \cancel{=} 0`.
740
+
741
+ Since :func:`~torch.stft` discards elements at the end of the signal if they do not fit in a frame,
742
+ ``istft`` may return a shorter signal than the original signal (can occur if :attr:`center` is False
743
+ since the signal isn't padded). If `length` is given in the arguments and is longer than expected,
744
+ ``istft`` will pad zeros to the end of the returned signal.
745
+
746
+ If :attr:`center` is ``True``, then there will be padding e.g. ``'constant'``, ``'reflect'``, etc.
747
+ Left padding can be trimmed off exactly because they can be calculated but right padding cannot be
748
+ calculated without additional information.
749
+
750
+ Example: Suppose the last window is:
751
+ ``[17, 18, 0, 0, 0]`` vs ``[18, 0, 0, 0, 0]``
752
+
753
+ The :attr:`n_fft`, :attr:`hop_length`, :attr:`win_length` are all the same which prevents the calculation
754
+ of right padding. These additional values could be zeros or a reflection of the signal so providing
755
+ :attr:`length` could be useful. If :attr:`length` is ``None`` then padding will be aggressively removed
756
+ (some loss of signal).
757
+
758
+ [1] D. W. Griffin and J. S. Lim, "Signal estimation from modified short-time Fourier transform,"
759
+ IEEE Trans. ASSP, vol.32, no.2, pp.236-243, Apr. 1984.
760
+
761
+ Args:
762
+ input (Tensor): The input tensor. Expected to be in the format of :func:`~torch.stft`,
763
+ output. That is a complex tensor of shape `(B?, N, T)` where
764
+
765
+ - `B?` is an optional batch dimension
766
+ - `N` is the number of frequency samples, `(n_fft // 2) + 1`
767
+ for onesided input, or otherwise `n_fft`.
768
+ - `T` is the number of frames, `1 + length // hop_length` for centered stft,
769
+ or `1 + (length - n_fft) // hop_length` otherwise.
770
+
771
+ .. versionchanged:: 2.0
772
+ Real datatype inputs are no longer supported. Input must now have a
773
+ complex datatype, as returned by ``stft(..., return_complex=True)``.
774
+ n_fft (int): Size of Fourier transform
775
+ hop_length (Optional[int]): The distance between neighboring sliding window frames.
776
+ (Default: ``n_fft // 4``)
777
+ win_length (Optional[int]): The size of window frame and STFT filter. (Default: ``n_fft``)
778
+ window (Optional[torch.Tensor]): The optional window function.
779
+ Shape must be 1d and `<= n_fft`
780
+ (Default: ``torch.ones(win_length)``)
781
+ center (bool): Whether :attr:`input` was padded on both sides so that the :math:`t`-th frame is
782
+ centered at time :math:`t \times \text{hop\_length}`.
783
+ (Default: ``True``)
784
+ normalized (bool): Whether the STFT was normalized. (Default: ``False``)
785
+ onesided (Optional[bool]): Whether the STFT was onesided.
786
+ (Default: ``True`` if `n_fft != fft_size` in the input size)
787
+ length (Optional[int]): The amount to trim the signal by (i.e. the
788
+ original signal length). Defaults to `(T - 1) * hop_length` for
789
+ centered stft, or `n_fft + (T - 1) * hop_length` otherwise, where `T`
790
+ is the number of input frames.
791
+ return_complex (Optional[bool]):
792
+ Whether the output should be complex, or if the input should be
793
+ assumed to derive from a real signal and window.
794
+ Note that this is incompatible with ``onesided=True``.
795
+ (Default: ``False``)
796
+
797
+ Returns:
798
+ Tensor: Least squares estimation of the original signal of shape `(B?, length)` where
799
+ `B?` is an optional batch dimension from the input tensor.
800
+ """,
801
+ )
802
+
803
+
804
+ if TYPE_CHECKING:
805
+ # These _impl functions return a variable number of tensors as output with
806
+ # __torch_function__; tuple unpacking is done already rather than being
807
+ # done by the caller of the _impl function
808
+ _unique_impl_out = Any
809
+ else:
810
+ _unique_impl_out = Tuple[Tensor, Tensor, Tensor]
811
+
812
+
813
+ def _unique_impl(
814
+ input: Tensor,
815
+ sorted: bool = True,
816
+ return_inverse: bool = False,
817
+ return_counts: bool = False,
818
+ dim: Optional[int] = None,
819
+ ) -> _unique_impl_out:
820
+ r"""unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None) -> Tuple[Tensor, Tensor, Tensor]
821
+
822
+ Returns the unique elements of the input tensor.
823
+
824
+ .. note:: This function is different from :func:`torch.unique_consecutive` in the sense that
825
+ this function also eliminates non-consecutive duplicate values.
826
+
827
+ .. note:: Currently in the CUDA implementation and the CPU implementation,
828
+ `torch.unique` always sort the tensor at the beginning regardless of the `sort` argument.
829
+ Sorting could be slow, so if your input tensor is already sorted, it is recommended to use
830
+ :func:`torch.unique_consecutive` which avoids the sorting.
831
+
832
+ Args:
833
+ input (Tensor): the input tensor
834
+ sorted (bool): Whether to sort the unique elements in ascending order
835
+ before returning as output.
836
+ return_inverse (bool): Whether to also return the indices for where
837
+ elements in the original input ended up in the returned unique list.
838
+ return_counts (bool): Whether to also return the counts for each unique
839
+ element.
840
+ dim (int, optional): the dimension to operate upon. If ``None``, the
841
+ unique of the flattened input is returned. Otherwise, each of the
842
+ tensors indexed by the given dimension is treated as one of the
843
+ elements to apply the unique operation upon. See examples for more
844
+ details. Default: ``None``
845
+
846
+ Returns:
847
+ (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing
848
+
849
+ - **output** (*Tensor*): the output list of unique scalar elements.
850
+ - **inverse_indices** (*Tensor*): (optional) if
851
+ :attr:`return_inverse` is True, there will be an additional
852
+ returned tensor (same shape as input) representing the indices
853
+ for where elements in the original input map to in the output;
854
+ otherwise, this function will only return a single tensor.
855
+ - **counts** (*Tensor*): (optional) if
856
+ :attr:`return_counts` is True, there will be an additional
857
+ returned tensor (same shape as output or output.size(dim),
858
+ if dim was specified) representing the number of occurrences
859
+ for each unique value or tensor.
860
+
861
+ Example::
862
+
863
+ >>> output = torch.unique(torch.tensor([1, 3, 2, 3], dtype=torch.long))
864
+ >>> output
865
+ tensor([1, 2, 3])
866
+
867
+ >>> output, inverse_indices = torch.unique(
868
+ ... torch.tensor([1, 3, 2, 3], dtype=torch.long), sorted=True, return_inverse=True)
869
+ >>> output
870
+ tensor([1, 2, 3])
871
+ >>> inverse_indices
872
+ tensor([0, 2, 1, 2])
873
+
874
+ >>> output, inverse_indices = torch.unique(
875
+ ... torch.tensor([[1, 3], [2, 3]], dtype=torch.long), sorted=True, return_inverse=True)
876
+ >>> output
877
+ tensor([1, 2, 3])
878
+ >>> inverse_indices
879
+ tensor([[0, 2],
880
+ [1, 2]])
881
+
882
+ >>> a = torch.tensor([
883
+ ... [
884
+ ... [1, 1, 0, 0],
885
+ ... [1, 1, 0, 0],
886
+ ... [0, 0, 1, 1],
887
+ ... ],
888
+ ... [
889
+ ... [0, 0, 1, 1],
890
+ ... [0, 0, 1, 1],
891
+ ... [1, 1, 1, 1],
892
+ ... ],
893
+ ... [
894
+ ... [1, 1, 0, 0],
895
+ ... [1, 1, 0, 0],
896
+ ... [0, 0, 1, 1],
897
+ ... ],
898
+ ... ])
899
+
900
+ >>> # If we call `torch.unique(a, dim=0)`, each of the tensors `a[idx, :, :]`
901
+ >>> # will be compared. We can see that `a[0, :, :]` and `a[2, :, :]` match
902
+ >>> # each other, so one of them will be removed.
903
+ >>> (a[0, :, :] == a[2, :, :]).all()
904
+ tensor(True)
905
+ >>> a_unique_dim0 = torch.unique(a, dim=0)
906
+ >>> a_unique_dim0
907
+ tensor([[[0, 0, 1, 1],
908
+ [0, 0, 1, 1],
909
+ [1, 1, 1, 1]],
910
+ [[1, 1, 0, 0],
911
+ [1, 1, 0, 0],
912
+ [0, 0, 1, 1]]])
913
+
914
+ >>> # Notice which sub-tensors from `a` match with the sub-tensors from
915
+ >>> # `a_unique_dim0`:
916
+ >>> (a_unique_dim0[0, :, :] == a[1, :, :]).all()
917
+ tensor(True)
918
+ >>> (a_unique_dim0[1, :, :] == a[0, :, :]).all()
919
+ tensor(True)
920
+
921
+ >>> # For `torch.unique(a, dim=1)`, each of the tensors `a[:, idx, :]` are
922
+ >>> # compared. `a[:, 0, :]` and `a[:, 1, :]` match each other, so one of
923
+ >>> # them will be removed.
924
+ >>> (a[:, 0, :] == a[:, 1, :]).all()
925
+ tensor(True)
926
+ >>> torch.unique(a, dim=1)
927
+ tensor([[[0, 0, 1, 1],
928
+ [1, 1, 0, 0]],
929
+ [[1, 1, 1, 1],
930
+ [0, 0, 1, 1]],
931
+ [[0, 0, 1, 1],
932
+ [1, 1, 0, 0]]])
933
+
934
+ >>> # For `torch.unique(a, dim=2)`, the tensors `a[:, :, idx]` are compared.
935
+ >>> # `a[:, :, 0]` and `a[:, :, 1]` match each other. Also, `a[:, :, 2]` and
936
+ >>> # `a[:, :, 3]` match each other as well. So in this case, two of the
937
+ >>> # sub-tensors will be removed.
938
+ >>> (a[:, :, 0] == a[:, :, 1]).all()
939
+ tensor(True)
940
+ >>> (a[:, :, 2] == a[:, :, 3]).all()
941
+ tensor(True)
942
+ >>> torch.unique(a, dim=2)
943
+ tensor([[[0, 1],
944
+ [0, 1],
945
+ [1, 0]],
946
+ [[1, 0],
947
+ [1, 0],
948
+ [1, 1]],
949
+ [[0, 1],
950
+ [0, 1],
951
+ [1, 0]]])
952
+ """
953
+ if has_torch_function_unary(input):
954
+ return handle_torch_function(
955
+ unique,
956
+ (input,),
957
+ input,
958
+ sorted=sorted,
959
+ return_inverse=return_inverse,
960
+ return_counts=return_counts,
961
+ dim=dim,
962
+ )
963
+
964
+ if dim is not None:
965
+ output, inverse_indices, counts = _VF.unique_dim(
966
+ input,
967
+ dim,
968
+ sorted=sorted,
969
+ return_inverse=return_inverse,
970
+ return_counts=return_counts,
971
+ )
972
+ else:
973
+ output, inverse_indices, counts = torch._unique2(
974
+ input,
975
+ sorted=sorted,
976
+ return_inverse=return_inverse,
977
+ return_counts=return_counts,
978
+ )
979
+ return output, inverse_indices, counts
980
+
981
+
982
+ def _unique_consecutive_impl(
983
+ input: Tensor,
984
+ return_inverse: bool = False,
985
+ return_counts: bool = False,
986
+ dim: Optional[int] = None,
987
+ ) -> _unique_impl_out:
988
+ r"""Eliminates all but the first element from every consecutive group of equivalent elements.
989
+
990
+ .. note:: This function is different from :func:`torch.unique` in the sense that this function
991
+ only eliminates consecutive duplicate values. This semantics is similar to `std::unique`
992
+ in C++.
993
+
994
+ Args:
995
+ input (Tensor): the input tensor
996
+ return_inverse (bool): Whether to also return the indices for where
997
+ elements in the original input ended up in the returned unique list.
998
+ return_counts (bool): Whether to also return the counts for each unique
999
+ element.
1000
+ dim (int): the dimension to apply unique. If ``None``, the unique of the
1001
+ flattened input is returned. default: ``None``
1002
+
1003
+ Returns:
1004
+ (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing
1005
+
1006
+ - **output** (*Tensor*): the output list of unique scalar elements.
1007
+ - **inverse_indices** (*Tensor*): (optional) if
1008
+ :attr:`return_inverse` is True, there will be an additional
1009
+ returned tensor (same shape as input) representing the indices
1010
+ for where elements in the original input map to in the output;
1011
+ otherwise, this function will only return a single tensor.
1012
+ - **counts** (*Tensor*): (optional) if
1013
+ :attr:`return_counts` is True, there will be an additional
1014
+ returned tensor (same shape as output or output.size(dim),
1015
+ if dim was specified) representing the number of occurrences
1016
+ for each unique value or tensor.
1017
+
1018
+ Example::
1019
+
1020
+ >>> x = torch.tensor([1, 1, 2, 2, 3, 1, 1, 2])
1021
+ >>> output = torch.unique_consecutive(x)
1022
+ >>> output
1023
+ tensor([1, 2, 3, 1, 2])
1024
+
1025
+ >>> output, inverse_indices = torch.unique_consecutive(x, return_inverse=True)
1026
+ >>> output
1027
+ tensor([1, 2, 3, 1, 2])
1028
+ >>> inverse_indices
1029
+ tensor([0, 0, 1, 1, 2, 3, 3, 4])
1030
+
1031
+ >>> output, counts = torch.unique_consecutive(x, return_counts=True)
1032
+ >>> output
1033
+ tensor([1, 2, 3, 1, 2])
1034
+ >>> counts
1035
+ tensor([2, 2, 1, 2, 1])
1036
+ """
1037
+ if has_torch_function_unary(input):
1038
+ return handle_torch_function(
1039
+ unique_consecutive,
1040
+ (input,),
1041
+ input,
1042
+ return_inverse=return_inverse,
1043
+ return_counts=return_counts,
1044
+ dim=dim,
1045
+ )
1046
+ output, inverse_indices, counts = _VF.unique_consecutive( # type: ignore[attr-defined]
1047
+ input, return_inverse=return_inverse, return_counts=return_counts, dim=dim
1048
+ )
1049
+ return output, inverse_indices, counts
1050
+
1051
+
1052
+ def _return_counts(
1053
+ input,
1054
+ sorted=True,
1055
+ return_inverse=False,
1056
+ return_counts=False,
1057
+ dim=None,
1058
+ ):
1059
+ # type: (Tensor, bool, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
1060
+
1061
+ if has_torch_function_unary(input):
1062
+ return _unique_impl(input, sorted, return_inverse, return_counts, dim)
1063
+
1064
+ output, _, counts = _unique_impl(input, sorted, return_inverse, return_counts, dim)
1065
+ return output, counts
1066
+
1067
+
1068
+ def _return_output(
1069
+ input,
1070
+ sorted=True,
1071
+ return_inverse=False,
1072
+ return_counts=False,
1073
+ dim=None,
1074
+ ):
1075
+ # type: (Tensor, bool, bool, bool, Optional[int]) -> Tensor
1076
+
1077
+ if has_torch_function_unary(input):
1078
+ return _unique_impl(input, sorted, return_inverse, return_counts, dim)
1079
+
1080
+ output, _, _ = _unique_impl(input, sorted, return_inverse, return_counts, dim)
1081
+ return output
1082
+
1083
+
1084
+ def _return_inverse(
1085
+ input,
1086
+ sorted=True,
1087
+ return_inverse=False,
1088
+ return_counts=False,
1089
+ dim=None,
1090
+ ):
1091
+ # type: (Tensor, bool, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
1092
+
1093
+ if has_torch_function_unary(input):
1094
+ return _unique_impl(input, sorted, return_inverse, return_counts, dim)
1095
+
1096
+ output, inverse_indices, _ = _unique_impl(
1097
+ input, sorted, return_inverse, return_counts, dim
1098
+ )
1099
+ return output, inverse_indices
1100
+
1101
+
1102
+ _return_inverse_false = boolean_dispatch(
1103
+ arg_name="return_counts",
1104
+ arg_index=3,
1105
+ default=False,
1106
+ if_true=_return_counts,
1107
+ if_false=_return_output,
1108
+ module_name=__name__,
1109
+ func_name="unique",
1110
+ )
1111
+
1112
+ _return_inverse_true = boolean_dispatch(
1113
+ arg_name="return_counts",
1114
+ arg_index=3,
1115
+ default=False,
1116
+ if_true=_unique_impl,
1117
+ if_false=_return_inverse,
1118
+ module_name=__name__,
1119
+ func_name="unique",
1120
+ )
1121
+
1122
+ # The return type of unique depends on `return_inverse`, and `return_counts` so in order to
1123
+ # resolve the output type in TorchScript we need to statically know the value of both parameters
1124
+
1125
+ unique = boolean_dispatch(
1126
+ arg_name="return_inverse",
1127
+ arg_index=2,
1128
+ default=False,
1129
+ if_true=_return_inverse_true,
1130
+ if_false=_return_inverse_false,
1131
+ module_name=__name__,
1132
+ func_name="unique",
1133
+ )
1134
+ unique.__doc__ = _unique_impl.__doc__
1135
+
1136
+
1137
+ def _consecutive_return_counts(
1138
+ input,
1139
+ return_inverse=False,
1140
+ return_counts=False,
1141
+ dim=None,
1142
+ ):
1143
+ # type: (Tensor, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
1144
+
1145
+ if has_torch_function_unary(input):
1146
+ return _unique_consecutive_impl(input, return_inverse, return_counts, dim)
1147
+
1148
+ output, _, counts = _unique_consecutive_impl(
1149
+ input, return_inverse, return_counts, dim
1150
+ )
1151
+ return output, counts
1152
+
1153
+
1154
+ def _consecutive_return_output(
1155
+ input,
1156
+ return_inverse=False,
1157
+ return_counts=False,
1158
+ dim=None,
1159
+ ):
1160
+ # type: (Tensor, bool, bool, Optional[int]) -> Tensor
1161
+
1162
+ if has_torch_function_unary(input):
1163
+ return _unique_consecutive_impl(input, return_inverse, return_counts, dim)
1164
+
1165
+ output, _, _ = _unique_consecutive_impl(input, return_inverse, return_counts, dim)
1166
+ return output
1167
+
1168
+
1169
+ def _consecutive_return_inverse(
1170
+ input,
1171
+ return_inverse=False,
1172
+ return_counts=False,
1173
+ dim=None,
1174
+ ):
1175
+ # type: (Tensor, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
1176
+
1177
+ if has_torch_function_unary(input):
1178
+ return _unique_consecutive_impl(input, return_inverse, return_counts, dim)
1179
+
1180
+ output, inverse_indices, _ = _unique_consecutive_impl(
1181
+ input, return_inverse, return_counts, dim
1182
+ )
1183
+ return output, inverse_indices
1184
+
1185
+
1186
+ _consecutive_return_inverse_false = boolean_dispatch(
1187
+ arg_name="return_counts",
1188
+ arg_index=1,
1189
+ default=False,
1190
+ if_true=_consecutive_return_counts,
1191
+ if_false=_consecutive_return_output,
1192
+ module_name=__name__,
1193
+ func_name="unique_consecutive",
1194
+ )
1195
+
1196
+ _consecutive_return_inverse_true = boolean_dispatch(
1197
+ arg_name="return_counts",
1198
+ arg_index=1,
1199
+ default=False,
1200
+ if_true=_unique_consecutive_impl,
1201
+ if_false=_consecutive_return_inverse,
1202
+ module_name=__name__,
1203
+ func_name="unique_consecutive",
1204
+ )
1205
+
1206
+ # The return type of unique depends on `return_inverse`, and `return_counts` so in order to
1207
+ # resolve the output type in TorchScript we need to statically know the value of both parameters
1208
+
1209
+ unique_consecutive = boolean_dispatch(
1210
+ arg_name="return_inverse",
1211
+ arg_index=2,
1212
+ default=False,
1213
+ if_true=_consecutive_return_inverse_true,
1214
+ if_false=_consecutive_return_inverse_false,
1215
+ module_name=__name__,
1216
+ func_name="unique_consecutive",
1217
+ )
1218
+ unique_consecutive.__doc__ = _unique_consecutive_impl.__doc__
1219
+
1220
+ if TYPE_CHECKING:
1221
+ pass
1222
+ # There's no good way to use this type annotation without breaking JIT
1223
+ # overloads. So leave untyped for mypy for now.
1224
+ else:
1225
+
1226
+ @overload
1227
+ def tensordot(
1228
+ a,
1229
+ b,
1230
+ dims: int = 2,
1231
+ out: Optional[torch.Tensor] = None,
1232
+ ):
1233
+ pass
1234
+
1235
+ @overload
1236
+ def tensordot( # noqa: F811
1237
+ a,
1238
+ b,
1239
+ dims: Tuple[List[int], List[int]],
1240
+ out: Optional[torch.Tensor] = None,
1241
+ ):
1242
+ pass
1243
+
1244
+ @overload
1245
+ def tensordot( # noqa: F811
1246
+ a,
1247
+ b,
1248
+ dims: List[List[int]],
1249
+ out: Optional[torch.Tensor] = None,
1250
+ ):
1251
+ pass
1252
+
1253
+ @overload
1254
+ def tensordot( # noqa: F811
1255
+ a,
1256
+ b,
1257
+ dims: torch.Tensor,
1258
+ out: Optional[torch.Tensor] = None,
1259
+ ):
1260
+ pass
1261
+
1262
+
1263
+ def tensordot( # noqa: F811
1264
+ a,
1265
+ b,
1266
+ dims=2,
1267
+ out: Optional[torch.Tensor] = None,
1268
+ ):
1269
+ r"""Returns a contraction of a and b over multiple dimensions.
1270
+
1271
+ :attr:`tensordot` implements a generalized matrix product.
1272
+
1273
+ Args:
1274
+ a (Tensor): Left tensor to contract
1275
+ b (Tensor): Right tensor to contract
1276
+ dims (int or Tuple[List[int], List[int]] or List[List[int]] containing two lists or Tensor): number of dimensions to
1277
+ contract or explicit lists of dimensions for :attr:`a` and
1278
+ :attr:`b` respectively
1279
+
1280
+ When called with a non-negative integer argument :attr:`dims` = :math:`d`, and
1281
+ the number of dimensions of :attr:`a` and :attr:`b` is :math:`m` and :math:`n`,
1282
+ respectively, :func:`~torch.tensordot` computes
1283
+
1284
+ .. math::
1285
+ r_{i_0,...,i_{m-d}, i_d,...,i_n}
1286
+ = \sum_{k_0,...,k_{d-1}} a_{i_0,...,i_{m-d},k_0,...,k_{d-1}} \times b_{k_0,...,k_{d-1}, i_d,...,i_n}.
1287
+
1288
+ When called with :attr:`dims` of the list form, the given dimensions will be contracted
1289
+ in place of the last :math:`d` of :attr:`a` and the first :math:`d` of :math:`b`. The sizes
1290
+ in these dimensions must match, but :func:`~torch.tensordot` will deal with broadcasted
1291
+ dimensions.
1292
+
1293
+ Examples::
1294
+
1295
+ >>> a = torch.arange(60.).reshape(3, 4, 5)
1296
+ >>> b = torch.arange(24.).reshape(4, 3, 2)
1297
+ >>> torch.tensordot(a, b, dims=([1, 0], [0, 1]))
1298
+ tensor([[4400., 4730.],
1299
+ [4532., 4874.],
1300
+ [4664., 5018.],
1301
+ [4796., 5162.],
1302
+ [4928., 5306.]])
1303
+
1304
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
1305
+ >>> a = torch.randn(3, 4, 5, device='cuda')
1306
+ >>> b = torch.randn(4, 5, 6, device='cuda')
1307
+ >>> c = torch.tensordot(a, b, dims=2).cpu()
1308
+ tensor([[ 8.3504, -2.5436, 6.2922, 2.7556, -1.0732, 3.2741],
1309
+ [ 3.3161, 0.0704, 5.0187, -0.4079, -4.3126, 4.8744],
1310
+ [ 0.8223, 3.9445, 3.2168, -0.2400, 3.4117, 1.7780]])
1311
+
1312
+ >>> a = torch.randn(3, 5, 4, 6)
1313
+ >>> b = torch.randn(6, 4, 5, 3)
1314
+ >>> torch.tensordot(a, b, dims=([2, 1, 3], [1, 2, 0]))
1315
+ tensor([[ 7.7193, -2.4867, -10.3204],
1316
+ [ 1.5513, -14.4737, -6.5113],
1317
+ [ -0.2850, 4.2573, -3.5997]])
1318
+ """
1319
+ if has_torch_function_variadic(a, b):
1320
+ return handle_torch_function(tensordot, (a, b), a, b, dims=dims, out=out)
1321
+
1322
+ if not isinstance(dims, (tuple, list, torch.Tensor, int, torch.SymInt)):
1323
+ raise RuntimeError(
1324
+ "tensordot expects dims to be int or "
1325
+ + "Tuple[List[int], List[int]] or "
1326
+ + "List[List[int]] containing two lists, but got "
1327
+ + f"dims={dims}"
1328
+ )
1329
+
1330
+ dims_a: List[int] = []
1331
+ dims_b: List[int] = []
1332
+
1333
+ if isinstance(dims, (tuple, list)):
1334
+ dims_a, dims_b = dims
1335
+
1336
+ if isinstance(dims, torch.Tensor):
1337
+ num_elements = dims.numel()
1338
+ if num_elements > 1:
1339
+ assert dims.size()[0] == 2
1340
+ dims_a = torch.jit.annotate(List[int], dims[0].tolist())
1341
+ dims_b = torch.jit.annotate(List[int], dims[1].tolist())
1342
+ else:
1343
+ dims_val = int(dims.item())
1344
+ if dims_val < 0:
1345
+ raise RuntimeError(f"tensordot expects dims >= 0, but got dims={dims}")
1346
+ dims_a = list(range(-dims_val, 0))
1347
+ dims_b = list(range(dims_val))
1348
+
1349
+ if isinstance(dims, (int, torch.SymInt)):
1350
+ if dims < 0:
1351
+ raise RuntimeError(f"tensordot expects dims >= 0, but got dims={dims}")
1352
+ if dims > min(a.dim(), b.dim()):
1353
+ raise RuntimeError(
1354
+ f"tensordot expects dims < ndim_a or ndim_b, but got dims={dims}"
1355
+ )
1356
+ dims_a = list(range(-dims, 0))
1357
+ dims_b = list(range(dims))
1358
+
1359
+ if out is None:
1360
+ return _VF.tensordot(a, b, dims_a, dims_b) # type: ignore[attr-defined]
1361
+ else:
1362
+ return _VF.tensordot(a, b, dims_a, dims_b, out=out) # type: ignore[attr-defined]
1363
+
1364
+
1365
+ def cartesian_prod(*tensors: Tensor) -> Tensor:
1366
+ """Do cartesian product of the given sequence of tensors. The behavior is similar to
1367
+ python's `itertools.product`.
1368
+
1369
+ Args:
1370
+ *tensors: any number of 1 dimensional tensors.
1371
+
1372
+ Returns:
1373
+ Tensor: A tensor equivalent to converting all the input tensors into lists,
1374
+ do `itertools.product` on these lists, and finally convert the resulting list
1375
+ into tensor.
1376
+
1377
+ Example::
1378
+
1379
+ >>> import itertools
1380
+ >>> a = [1, 2, 3]
1381
+ >>> b = [4, 5]
1382
+ >>> list(itertools.product(a, b))
1383
+ [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)]
1384
+ >>> tensor_a = torch.tensor(a)
1385
+ >>> tensor_b = torch.tensor(b)
1386
+ >>> torch.cartesian_prod(tensor_a, tensor_b)
1387
+ tensor([[1, 4],
1388
+ [1, 5],
1389
+ [2, 4],
1390
+ [2, 5],
1391
+ [3, 4],
1392
+ [3, 5]])
1393
+ """
1394
+ # This wrapper exists to support variadic args.
1395
+ if has_torch_function(tensors):
1396
+ return handle_torch_function(cartesian_prod, tensors, *tensors)
1397
+ return _VF.cartesian_prod(tensors) # type: ignore[attr-defined]
1398
+
1399
+
1400
+ def block_diag(*tensors):
1401
+ """Create a block diagonal matrix from provided tensors.
1402
+
1403
+ Args:
1404
+ *tensors: One or more tensors with 0, 1, or 2 dimensions.
1405
+
1406
+ Returns:
1407
+ Tensor: A 2 dimensional tensor with all the input tensors arranged in
1408
+ order such that their upper left and lower right corners are
1409
+ diagonally adjacent. All other elements are set to 0.
1410
+
1411
+ Example::
1412
+
1413
+ >>> import torch
1414
+ >>> A = torch.tensor([[0, 1], [1, 0]])
1415
+ >>> B = torch.tensor([[3, 4, 5], [6, 7, 8]])
1416
+ >>> C = torch.tensor(7)
1417
+ >>> D = torch.tensor([1, 2, 3])
1418
+ >>> E = torch.tensor([[4], [5], [6]])
1419
+ >>> torch.block_diag(A, B, C, D, E)
1420
+ tensor([[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
1421
+ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
1422
+ [0, 0, 3, 4, 5, 0, 0, 0, 0, 0],
1423
+ [0, 0, 6, 7, 8, 0, 0, 0, 0, 0],
1424
+ [0, 0, 0, 0, 0, 7, 0, 0, 0, 0],
1425
+ [0, 0, 0, 0, 0, 0, 1, 2, 3, 0],
1426
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 4],
1427
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 5],
1428
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 6]])
1429
+ """
1430
+ # This wrapper exists to support variadic args.
1431
+ if has_torch_function(tensors):
1432
+ return handle_torch_function(block_diag, tensors, *tensors)
1433
+ return torch._C._VariableFunctions.block_diag(tensors) # type: ignore[attr-defined]
1434
+
1435
+
1436
+ def cdist(x1, x2, p=2.0, compute_mode="use_mm_for_euclid_dist_if_necessary"):
1437
+ # type: (Tensor, Tensor, float, str) -> (Tensor)
1438
+ r"""Computes batched the p-norm distance between each pair of the two collections of row vectors.
1439
+
1440
+ Args:
1441
+ x1 (Tensor): input tensor of shape :math:`B \times P \times M`.
1442
+ x2 (Tensor): input tensor of shape :math:`B \times R \times M`.
1443
+ p: p value for the p-norm distance to calculate between each vector pair
1444
+ :math:`\in [0, \infty]`.
1445
+ compute_mode:
1446
+ 'use_mm_for_euclid_dist_if_necessary' - will use matrix multiplication approach to calculate
1447
+ euclidean distance (p = 2) if P > 25 or R > 25
1448
+ 'use_mm_for_euclid_dist' - will always use matrix multiplication approach to calculate
1449
+ euclidean distance (p = 2)
1450
+ 'donot_use_mm_for_euclid_dist' - will never use matrix multiplication approach to calculate
1451
+ euclidean distance (p = 2)
1452
+ Default: use_mm_for_euclid_dist_if_necessary.
1453
+
1454
+ If x1 has shape :math:`B \times P \times M` and x2 has shape :math:`B \times R \times M` then the
1455
+ output will have shape :math:`B \times P \times R`.
1456
+
1457
+ This function is equivalent to `scipy.spatial.distance.cdist(input,'minkowski', p=p)`
1458
+ if :math:`p \in (0, \infty)`. When :math:`p = 0` it is equivalent to
1459
+ `scipy.spatial.distance.cdist(input, 'hamming') * M`. When :math:`p = \infty`, the closest
1460
+ scipy function is `scipy.spatial.distance.cdist(xn, lambda x, y: np.abs(x - y).max())`.
1461
+
1462
+ Example:
1463
+
1464
+ >>> a = torch.tensor([[0.9041, 0.0196], [-0.3108, -2.4423], [-0.4821, 1.059]])
1465
+ >>> a
1466
+ tensor([[ 0.9041, 0.0196],
1467
+ [-0.3108, -2.4423],
1468
+ [-0.4821, 1.0590]])
1469
+ >>> b = torch.tensor([[-2.1763, -0.4713], [-0.6986, 1.3702]])
1470
+ >>> b
1471
+ tensor([[-2.1763, -0.4713],
1472
+ [-0.6986, 1.3702]])
1473
+ >>> torch.cdist(a, b, p=2)
1474
+ tensor([[3.1193, 2.0959],
1475
+ [2.7138, 3.8322],
1476
+ [2.2830, 0.3791]])
1477
+ """
1478
+ if has_torch_function_variadic(x1, x2):
1479
+ return handle_torch_function(
1480
+ cdist, (x1, x2), x1, x2, p=p, compute_mode=compute_mode
1481
+ )
1482
+ if compute_mode == "use_mm_for_euclid_dist_if_necessary":
1483
+ return _VF.cdist(x1, x2, p, None) # type: ignore[attr-defined]
1484
+ elif compute_mode == "use_mm_for_euclid_dist":
1485
+ return _VF.cdist(x1, x2, p, 1) # type: ignore[attr-defined]
1486
+ elif compute_mode == "donot_use_mm_for_euclid_dist":
1487
+ return _VF.cdist(x1, x2, p, 2) # type: ignore[attr-defined]
1488
+ else:
1489
+ raise ValueError(f"{compute_mode} is not a valid value for compute_mode")
1490
+
1491
+
1492
+ def atleast_1d(*tensors):
1493
+ r"""
1494
+ Returns a 1-dimensional view of each input tensor with zero dimensions.
1495
+ Input tensors with one or more dimensions are returned as-is.
1496
+
1497
+ Args:
1498
+ input (Tensor or list of Tensors)
1499
+
1500
+ Returns:
1501
+ output (Tensor or tuple of Tensors)
1502
+
1503
+ Example::
1504
+
1505
+ >>> x = torch.arange(2)
1506
+ >>> x
1507
+ tensor([0, 1])
1508
+ >>> torch.atleast_1d(x)
1509
+ tensor([0, 1])
1510
+ >>> x = torch.tensor(1.)
1511
+ >>> x
1512
+ tensor(1.)
1513
+ >>> torch.atleast_1d(x)
1514
+ tensor([1.])
1515
+ >>> x = torch.tensor(0.5)
1516
+ >>> y = torch.tensor(1.)
1517
+ >>> torch.atleast_1d((x, y))
1518
+ (tensor([0.5000]), tensor([1.]))
1519
+ """
1520
+ # This wrapper exists to support variadic args.
1521
+ if has_torch_function(tensors):
1522
+ return handle_torch_function(atleast_1d, tensors, *tensors)
1523
+ if len(tensors) == 1:
1524
+ tensors = tensors[0]
1525
+ return _VF.atleast_1d(tensors) # type: ignore[attr-defined]
1526
+
1527
+
1528
+ def atleast_2d(*tensors):
1529
+ r"""
1530
+ Returns a 2-dimensional view of each input tensor with zero dimensions.
1531
+ Input tensors with two or more dimensions are returned as-is.
1532
+
1533
+ Args:
1534
+ input (Tensor or list of Tensors)
1535
+
1536
+ Returns:
1537
+ output (Tensor or tuple of Tensors)
1538
+
1539
+ Example::
1540
+
1541
+ >>> x = torch.tensor(1.)
1542
+ >>> x
1543
+ tensor(1.)
1544
+ >>> torch.atleast_2d(x)
1545
+ tensor([[1.]])
1546
+ >>> x = torch.arange(4).view(2, 2)
1547
+ >>> x
1548
+ tensor([[0, 1],
1549
+ [2, 3]])
1550
+ >>> torch.atleast_2d(x)
1551
+ tensor([[0, 1],
1552
+ [2, 3]])
1553
+ >>> x = torch.tensor(0.5)
1554
+ >>> y = torch.tensor(1.)
1555
+ >>> torch.atleast_2d((x, y))
1556
+ (tensor([[0.5000]]), tensor([[1.]]))
1557
+ """
1558
+ # This wrapper exists to support variadic args.
1559
+ if has_torch_function(tensors):
1560
+ return handle_torch_function(atleast_2d, tensors, *tensors)
1561
+ if len(tensors) == 1:
1562
+ tensors = tensors[0]
1563
+ return _VF.atleast_2d(tensors) # type: ignore[attr-defined]
1564
+
1565
+
1566
+ def atleast_3d(*tensors):
1567
+ r"""
1568
+ Returns a 3-dimensional view of each input tensor with zero dimensions.
1569
+ Input tensors with three or more dimensions are returned as-is.
1570
+
1571
+ Args:
1572
+ input (Tensor or list of Tensors)
1573
+
1574
+ Returns:
1575
+ output (Tensor or tuple of Tensors)
1576
+
1577
+ Example:
1578
+
1579
+ >>> x = torch.tensor(0.5)
1580
+ >>> x
1581
+ tensor(0.5000)
1582
+ >>> torch.atleast_3d(x)
1583
+ tensor([[[0.5000]]])
1584
+ >>> y = torch.arange(4).view(2, 2)
1585
+ >>> y
1586
+ tensor([[0, 1],
1587
+ [2, 3]])
1588
+ >>> torch.atleast_3d(y)
1589
+ tensor([[[0],
1590
+ [1]],
1591
+ <BLANKLINE>
1592
+ [[2],
1593
+ [3]]])
1594
+ >>> x = torch.tensor(1).view(1, 1, 1)
1595
+ >>> x
1596
+ tensor([[[1]]])
1597
+ >>> torch.atleast_3d(x)
1598
+ tensor([[[1]]])
1599
+ >>> x = torch.tensor(0.5)
1600
+ >>> y = torch.tensor(1.0)
1601
+ >>> torch.atleast_3d((x, y))
1602
+ (tensor([[[0.5000]]]), tensor([[[1.]]]))
1603
+ """
1604
+ # This wrapper exists to support variadic args.
1605
+ if has_torch_function(tensors):
1606
+ return handle_torch_function(atleast_3d, tensors, *tensors)
1607
+ if len(tensors) == 1:
1608
+ tensors = tensors[0]
1609
+ return _VF.atleast_3d(tensors) # type: ignore[attr-defined]
1610
+
1611
+
1612
+ if TYPE_CHECKING:
1613
+ pass
1614
+ # There's no good way to use this type annotation; cannot rename norm() to
1615
+ # _norm_impl() in a way that doesn't break JIT overloads. So leave untyped
1616
+ # for mypy for now.
1617
+ # def norm(input: Tensor,
1618
+ # p: Optional[Union[str, Number]] = "fro",
1619
+ # dim: Optional[Union[int, List[int]]] = None,
1620
+ # keepdim: bool = False,
1621
+ # out: Optional[Tensor] = None,
1622
+ # dtype: _dtype = None) -> Tensor:
1623
+ # return _norm_impl(input, p, dim, keepdim, out, dtype)
1624
+ else:
1625
+ # TODO: type dim as BroadcastingList when
1626
+ # https://github.com/pytorch/pytorch/issues/33782 is fixed
1627
+ @overload
1628
+ def norm(
1629
+ input,
1630
+ p="fro",
1631
+ dim=None,
1632
+ keepdim=False,
1633
+ out=None,
1634
+ dtype=None,
1635
+ ):
1636
+ # type: (Tensor, str, Optional[List[int]], bool, Optional[Tensor], Optional[int]) -> Tensor
1637
+ pass
1638
+
1639
+ @overload
1640
+ def norm( # noqa: F811
1641
+ input,
1642
+ p="fro",
1643
+ dim=None,
1644
+ keepdim=False,
1645
+ out=None,
1646
+ dtype=None,
1647
+ ):
1648
+ # type: (Tensor, Optional[number], Optional[List[int]], bool, Optional[Tensor], Optional[int]) -> Tensor
1649
+ pass
1650
+
1651
+ @overload
1652
+ def norm( # noqa: F811
1653
+ input,
1654
+ p="fro",
1655
+ dim=None,
1656
+ keepdim=False,
1657
+ out=None,
1658
+ dtype=None,
1659
+ ):
1660
+ # type: (Tensor, Optional[number], Optional[int], bool, Optional[Tensor], Optional[int]) -> Tensor
1661
+ pass
1662
+
1663
+ @overload
1664
+ def norm( # noqa: F811
1665
+ input,
1666
+ p="fro",
1667
+ dim=None,
1668
+ keepdim=False,
1669
+ out=None,
1670
+ dtype=None,
1671
+ ):
1672
+ # type: (Tensor, str, Optional[int], bool, Optional[Tensor], Optional[int]) -> Tensor
1673
+ pass
1674
+
1675
+
1676
+ def norm( # noqa: F811
1677
+ input,
1678
+ p: Optional[Union[float, str]] = "fro",
1679
+ dim=None,
1680
+ keepdim=False,
1681
+ out=None,
1682
+ dtype=None,
1683
+ ):
1684
+ r"""Returns the matrix norm or vector norm of a given tensor.
1685
+
1686
+ .. warning::
1687
+
1688
+ torch.norm is deprecated and may be removed in a future PyTorch release.
1689
+ Its documentation and behavior may be incorrect, and it is no longer
1690
+ actively maintained.
1691
+
1692
+ Use :func:`torch.linalg.vector_norm` when computing vector norms and
1693
+ :func:`torch.linalg.matrix_norm` when computing matrix norms.
1694
+ For a function with a similar behavior as this one see :func:`torch.linalg.norm`.
1695
+ Note, however, the signature for these functions is slightly different than the
1696
+ signature for ``torch.norm``.
1697
+
1698
+ Args:
1699
+ input (Tensor): The input tensor. Its data type must be either a floating
1700
+ point or complex type. For complex inputs, the norm is calculated using the
1701
+ absolute value of each element. If the input is complex and neither
1702
+ :attr:`dtype` nor :attr:`out` is specified, the result's data type will
1703
+ be the corresponding floating point type (e.g. float if :attr:`input` is
1704
+ complexfloat).
1705
+
1706
+ p (int, float, inf, -inf, 'fro', 'nuc', optional): the order of norm. Default: ``'fro'``
1707
+ The following norms can be calculated:
1708
+
1709
+ ====== ============== ==========================
1710
+ ord matrix norm vector norm
1711
+ ====== ============== ==========================
1712
+ 'fro' Frobenius norm --
1713
+ 'nuc' nuclear norm --
1714
+ Number -- sum(abs(x)**ord)**(1./ord)
1715
+ ====== ============== ==========================
1716
+
1717
+ The vector norm can be calculated across any number of dimensions.
1718
+ The corresponding dimensions of :attr:`input` are flattened into
1719
+ one dimension, and the norm is calculated on the flattened
1720
+ dimension.
1721
+
1722
+ Frobenius norm produces the same result as ``p=2`` in all cases
1723
+ except when :attr:`dim` is a list of three or more dims, in which
1724
+ case Frobenius norm throws an error.
1725
+
1726
+ Nuclear norm can only be calculated across exactly two dimensions.
1727
+
1728
+ dim (int, tuple of ints, list of ints, optional):
1729
+ Specifies which dimension or dimensions of :attr:`input` to
1730
+ calculate the norm across. If :attr:`dim` is ``None``, the norm will
1731
+ be calculated across all dimensions of :attr:`input`. If the norm
1732
+ type indicated by :attr:`p` does not support the specified number of
1733
+ dimensions, an error will occur.
1734
+ keepdim (bool, optional): whether the output tensors have :attr:`dim`
1735
+ retained or not. Ignored if :attr:`dim` = ``None`` and
1736
+ :attr:`out` = ``None``. Default: ``False``
1737
+ out (Tensor, optional): the output tensor. Ignored if
1738
+ :attr:`dim` = ``None`` and :attr:`out` = ``None``.
1739
+ dtype (:class:`torch.dtype`, optional): the desired data type of
1740
+ returned tensor. If specified, the input tensor is casted to
1741
+ :attr:`dtype` while performing the operation. Default: None.
1742
+
1743
+ .. note::
1744
+ Even though ``p='fro'`` supports any number of dimensions, the true
1745
+ mathematical definition of Frobenius norm only applies to tensors with
1746
+ exactly two dimensions. :func:`torch.linalg.matrix_norm` with ``ord='fro'``
1747
+ aligns with the mathematical definition, since it can only be applied across
1748
+ exactly two dimensions.
1749
+
1750
+ Example::
1751
+
1752
+ >>> import torch
1753
+ >>> a = torch.arange(9, dtype= torch.float) - 4
1754
+ >>> b = a.reshape((3, 3))
1755
+ >>> torch.norm(a)
1756
+ tensor(7.7460)
1757
+ >>> torch.norm(b)
1758
+ tensor(7.7460)
1759
+ >>> torch.norm(a, float('inf'))
1760
+ tensor(4.)
1761
+ >>> torch.norm(b, float('inf'))
1762
+ tensor(4.)
1763
+ >>> c = torch.tensor([[ 1, 2, 3], [-1, 1, 4]] , dtype=torch.float)
1764
+ >>> torch.norm(c, dim=0)
1765
+ tensor([1.4142, 2.2361, 5.0000])
1766
+ >>> torch.norm(c, dim=1)
1767
+ tensor([3.7417, 4.2426])
1768
+ >>> torch.norm(c, p=1, dim=1)
1769
+ tensor([6., 6.])
1770
+ >>> d = torch.arange(8, dtype=torch.float).reshape(2, 2, 2)
1771
+ >>> torch.norm(d, dim=(1, 2))
1772
+ tensor([ 3.7417, 11.2250])
1773
+ >>> torch.norm(d[0, :, :]), torch.norm(d[1, :, :])
1774
+ (tensor(3.7417), tensor(11.2250))
1775
+ """
1776
+
1777
+ if has_torch_function_unary(input):
1778
+ return handle_torch_function(
1779
+ norm, (input,), input, p=p, dim=dim, keepdim=keepdim, out=out, dtype=dtype
1780
+ )
1781
+
1782
+ # NB. All the repeated code and weird python is to please TorchScript.
1783
+ # For a more compact implementation see the relevant function in `_refs/__init__.py`
1784
+
1785
+ # We don't do this for MPS or sparse tensors
1786
+ if input.layout == torch.strided and input.device.type in (
1787
+ "cpu",
1788
+ "cuda",
1789
+ "meta",
1790
+ torch.utils.backend_registration._privateuse1_backend_name,
1791
+ ):
1792
+ if dim is not None:
1793
+ if isinstance(dim, (int, torch.SymInt)):
1794
+ _dim = [dim]
1795
+ else:
1796
+ _dim = dim
1797
+ else:
1798
+ _dim = None # type: ignore[assignment]
1799
+
1800
+ if isinstance(p, str):
1801
+ if p == "fro" and (
1802
+ dim is None or isinstance(dim, (int, torch.SymInt)) or len(dim) <= 2
1803
+ ):
1804
+ if out is None:
1805
+ return torch.linalg.vector_norm(
1806
+ input, 2, _dim, keepdim, dtype=dtype
1807
+ )
1808
+ else:
1809
+ return torch.linalg.vector_norm(
1810
+ input, 2, _dim, keepdim, dtype=dtype, out=out
1811
+ )
1812
+
1813
+ # Here we either call the nuclear norm, or we call matrix_norm with some arguments
1814
+ # that will throw an error
1815
+ if _dim is None:
1816
+ _dim = list(range(input.ndim))
1817
+ if out is None:
1818
+ return torch.linalg.matrix_norm(input, p, _dim, keepdim, dtype=dtype)
1819
+ else:
1820
+ return torch.linalg.matrix_norm(
1821
+ input, p, _dim, keepdim, dtype=dtype, out=out
1822
+ )
1823
+ else:
1824
+ # NB. p should be Union[str, number], not Optional!
1825
+ _p = 2.0 if p is None else p
1826
+ if out is None:
1827
+ return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype)
1828
+ else:
1829
+ return torch.linalg.vector_norm(
1830
+ input, _p, _dim, keepdim, dtype=dtype, out=out
1831
+ )
1832
+
1833
+ ndim = input.dim()
1834
+
1835
+ # catch default case
1836
+ if dim is None and out is None and dtype is None and p is not None:
1837
+ if isinstance(p, str):
1838
+ if p == "fro":
1839
+ return _VF.frobenius_norm(input, dim=(), keepdim=keepdim)
1840
+ if not isinstance(p, str):
1841
+ _dim = list(range(ndim))
1842
+ return _VF.norm(input, p, dim=_dim, keepdim=keepdim) # type: ignore[attr-defined]
1843
+
1844
+ # TODO: when https://github.com/pytorch/pytorch/issues/33782 is fixed
1845
+ # remove the overloads where dim is an int and replace with BraodcastingList1
1846
+ # and remove next four lines, replace _dim with dim
1847
+ if dim is not None:
1848
+ if isinstance(dim, (int, torch.SymInt)):
1849
+ _dim = [dim]
1850
+ else:
1851
+ _dim = dim
1852
+ else:
1853
+ _dim = None # type: ignore[assignment]
1854
+
1855
+ if isinstance(p, str):
1856
+ if p == "fro":
1857
+ if dtype is not None:
1858
+ raise ValueError("dtype argument is not supported in frobenius norm")
1859
+
1860
+ if _dim is None:
1861
+ _dim = list(range(ndim))
1862
+ if out is None:
1863
+ return _VF.frobenius_norm(input, _dim, keepdim=keepdim) # type: ignore[arg-type]
1864
+ else:
1865
+ return _VF.frobenius_norm(input, _dim, keepdim=keepdim, out=out) # type: ignore[arg-type]
1866
+ elif p == "nuc":
1867
+ if dtype is not None:
1868
+ raise ValueError("dtype argument is not supported in nuclear norm")
1869
+ if _dim is None:
1870
+ if out is None:
1871
+ return _VF.nuclear_norm(input, keepdim=keepdim) # type: ignore[arg-type]
1872
+ else:
1873
+ return _VF.nuclear_norm(input, keepdim=keepdim, out=out) # type: ignore[arg-type]
1874
+ else:
1875
+ if out is None:
1876
+ return _VF.nuclear_norm(input, _dim, keepdim=keepdim) # type: ignore[arg-type]
1877
+ else:
1878
+ return _VF.nuclear_norm(input, _dim, keepdim=keepdim, out=out) # type: ignore[arg-type]
1879
+ raise RuntimeError(f"only valid string values are 'fro' and 'nuc', found {p}")
1880
+ else:
1881
+ if _dim is None:
1882
+ _dim = list(range(ndim))
1883
+
1884
+ if out is None:
1885
+ if dtype is None:
1886
+ return _VF.norm(input, p, _dim, keepdim=keepdim) # type: ignore[attr-defined]
1887
+ else:
1888
+ return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype) # type: ignore[attr-defined]
1889
+ else:
1890
+ if dtype is None:
1891
+ return _VF.norm(input, p, _dim, keepdim=keepdim, out=out) # type: ignore[attr-defined]
1892
+ else:
1893
+ return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype, out=out) # type: ignore[attr-defined]
1894
+
1895
+
1896
+ def unravel_index(
1897
+ indices: Tensor,
1898
+ shape: Union[int, Sequence[int], torch.Size],
1899
+ ) -> Tuple[Tensor, ...]:
1900
+ r"""Converts a tensor of flat indices into a tuple of coordinate tensors that
1901
+ index into an arbitrary tensor of the specified shape.
1902
+
1903
+ Args:
1904
+ indices (Tensor): An integer tensor containing indices into the
1905
+ flattened version of an arbitrary tensor of shape :attr:`shape`.
1906
+ All elements must be in the range ``[0, prod(shape) - 1]``.
1907
+
1908
+ shape (int, sequence of ints, or torch.Size): The shape of the arbitrary
1909
+ tensor. All elements must be non-negative.
1910
+
1911
+ Returns:
1912
+ tuple of Tensors: Each ``i``-th tensor in the output corresponds with
1913
+ dimension ``i`` of :attr:`shape`. Each tensor has the same shape as
1914
+ ``indices`` and contains one index into dimension ``i`` for each of the
1915
+ flat indices given by ``indices``.
1916
+
1917
+ Example::
1918
+
1919
+ >>> import torch
1920
+ >>> torch.unravel_index(torch.tensor(4), (3, 2))
1921
+ (tensor(2),
1922
+ tensor(0))
1923
+
1924
+ >>> torch.unravel_index(torch.tensor([4, 1]), (3, 2))
1925
+ (tensor([2, 0]),
1926
+ tensor([0, 1]))
1927
+
1928
+ >>> torch.unravel_index(torch.tensor([0, 1, 2, 3, 4, 5]), (3, 2))
1929
+ (tensor([0, 0, 1, 1, 2, 2]),
1930
+ tensor([0, 1, 0, 1, 0, 1]))
1931
+
1932
+ >>> torch.unravel_index(torch.tensor([1234, 5678]), (10, 10, 10, 10))
1933
+ (tensor([1, 5]),
1934
+ tensor([2, 6]),
1935
+ tensor([3, 7]),
1936
+ tensor([4, 8]))
1937
+
1938
+ >>> torch.unravel_index(torch.tensor([[1234], [5678]]), (10, 10, 10, 10))
1939
+ (tensor([[1], [5]]),
1940
+ tensor([[2], [6]]),
1941
+ tensor([[3], [7]]),
1942
+ tensor([[4], [8]]))
1943
+
1944
+ >>> torch.unravel_index(torch.tensor([[1234], [5678]]), (100, 100))
1945
+ (tensor([[12], [56]]),
1946
+ tensor([[34], [78]]))
1947
+ """
1948
+ if has_torch_function_unary(indices):
1949
+ return handle_torch_function(unravel_index, (indices,), indices, shape=shape)
1950
+ res_tensor = _unravel_index(indices, shape)
1951
+ return res_tensor.unbind(-1)
1952
+
1953
+
1954
+ def _unravel_index(indices: Tensor, shape: Union[int, Sequence[int]]) -> Tensor:
1955
+ torch._check_type(
1956
+ not indices.is_complex()
1957
+ and not indices.is_floating_point()
1958
+ and not indices.dtype == torch.bool,
1959
+ lambda: f"expected 'indices' to be integer dtype, but got {indices.dtype}",
1960
+ )
1961
+
1962
+ torch._check_type(
1963
+ isinstance(shape, (int, torch.SymInt, Sequence)),
1964
+ lambda: f"expected 'shape' to be int or sequence of ints, but got {type(shape)}",
1965
+ )
1966
+
1967
+ if isinstance(shape, (int, torch.SymInt)):
1968
+ shape = torch.Size([shape])
1969
+ else:
1970
+ for dim in shape:
1971
+ torch._check_type(
1972
+ isinstance(dim, (int, torch.SymInt)),
1973
+ lambda: f"expected 'shape' sequence to only contain ints, but got {type(dim)}",
1974
+ )
1975
+ shape = torch.Size(shape)
1976
+
1977
+ torch._check_value(
1978
+ all(dim >= 0 for dim in shape),
1979
+ lambda: f"'shape' cannot have negative values, but got {tuple(shape)}",
1980
+ )
1981
+
1982
+ coefs = list(
1983
+ reversed(
1984
+ list(
1985
+ itertools.accumulate(
1986
+ reversed(shape[1:] + torch.Size([1])), func=operator.mul
1987
+ )
1988
+ )
1989
+ )
1990
+ )
1991
+ return indices.unsqueeze(-1).floor_divide(
1992
+ torch.tensor(coefs, device=indices.device, dtype=torch.int64)
1993
+ ) % torch.tensor(shape, device=indices.device, dtype=torch.int64)
1994
+
1995
+
1996
+ def chain_matmul(*matrices, out=None):
1997
+ r"""Returns the matrix product of the :math:`N` 2-D tensors. This product is efficiently computed
1998
+ using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms
1999
+ of arithmetic operations (`[CLRS]`_). Note that since this is a function to compute the product, :math:`N`
2000
+ needs to be greater than or equal to 2; if equal to 2 then a trivial matrix-matrix product is returned.
2001
+ If :math:`N` is 1, then this is a no-op - the original matrix is returned as is.
2002
+
2003
+ .. warning::
2004
+
2005
+ :func:`torch.chain_matmul` is deprecated and will be removed in a future PyTorch release.
2006
+ Use :func:`torch.linalg.multi_dot` instead, which accepts a list of two or more tensors
2007
+ rather than multiple arguments.
2008
+
2009
+ Args:
2010
+ matrices (Tensors...): a sequence of 2 or more 2-D tensors whose product is to be determined.
2011
+ out (Tensor, optional): the output tensor. Ignored if :attr:`out` = ``None``.
2012
+
2013
+ Returns:
2014
+ Tensor: if the :math:`i^{th}` tensor was of dimensions :math:`p_{i} \times p_{i + 1}`, then the product
2015
+ would be of dimensions :math:`p_{1} \times p_{N + 1}`.
2016
+
2017
+ Example::
2018
+
2019
+ >>> # xdoctest: +SKIP
2020
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
2021
+ >>> a = torch.randn(3, 4)
2022
+ >>> b = torch.randn(4, 5)
2023
+ >>> c = torch.randn(5, 6)
2024
+ >>> d = torch.randn(6, 7)
2025
+ >>> # will raise a deprecation warning
2026
+ >>> torch.chain_matmul(a, b, c, d)
2027
+ tensor([[ -2.3375, -3.9790, -4.1119, -6.6577, 9.5609, -11.5095, -3.2614],
2028
+ [ 21.4038, 3.3378, -8.4982, -5.2457, -10.2561, -2.4684, 2.7163],
2029
+ [ -0.9647, -5.8917, -2.3213, -5.2284, 12.8615, -12.2816, -2.5095]])
2030
+
2031
+ .. _`[CLRS]`: https://mitpress.mit.edu/books/introduction-algorithms-third-edition
2032
+ """
2033
+ # This wrapper exists to support variadic args.
2034
+ if has_torch_function(matrices):
2035
+ return handle_torch_function(chain_matmul, matrices, *matrices)
2036
+
2037
+ if out is None:
2038
+ return _VF.chain_matmul(matrices) # type: ignore[attr-defined]
2039
+ else:
2040
+ return _VF.chain_matmul(matrices, out=out) # type: ignore[attr-defined]
2041
+
2042
+
2043
+ def _lu_impl(A, pivot=True, get_infos=False, out=None):
2044
+ # type: (Tensor, bool, bool, Any) -> Tuple[Tensor, Tensor, Tensor]
2045
+ r"""Computes the LU factorization of a matrix or batches of matrices
2046
+ :attr:`A`. Returns a tuple containing the LU factorization and
2047
+ pivots of :attr:`A`. Pivoting is done if :attr:`pivot` is set to
2048
+ ``True``.
2049
+
2050
+ .. warning::
2051
+
2052
+ :func:`torch.lu` is deprecated in favor of :func:`torch.linalg.lu_factor`
2053
+ and :func:`torch.linalg.lu_factor_ex`. :func:`torch.lu` will be removed in a
2054
+ future PyTorch release.
2055
+ ``LU, pivots, info = torch.lu(A, compute_pivots)`` should be replaced with
2056
+
2057
+ .. code:: python
2058
+
2059
+ LU, pivots = torch.linalg.lu_factor(A, compute_pivots)
2060
+
2061
+ ``LU, pivots, info = torch.lu(A, compute_pivots, get_infos=True)`` should be replaced with
2062
+
2063
+ .. code:: python
2064
+
2065
+ LU, pivots, info = torch.linalg.lu_factor_ex(A, compute_pivots)
2066
+
2067
+ .. note::
2068
+ * The returned permutation matrix for every matrix in the batch is
2069
+ represented by a 1-indexed vector of size ``min(A.shape[-2], A.shape[-1])``.
2070
+ ``pivots[i] == j`` represents that in the ``i``-th step of the algorithm,
2071
+ the ``i``-th row was permuted with the ``j-1``-th row.
2072
+ * LU factorization with :attr:`pivot` = ``False`` is not available
2073
+ for CPU, and attempting to do so will throw an error. However,
2074
+ LU factorization with :attr:`pivot` = ``False`` is available for
2075
+ CUDA.
2076
+ * This function does not check if the factorization was successful
2077
+ or not if :attr:`get_infos` is ``True`` since the status of the
2078
+ factorization is present in the third element of the return tuple.
2079
+ * In the case of batches of square matrices with size less or equal
2080
+ to 32 on a CUDA device, the LU factorization is repeated for
2081
+ singular matrices due to the bug in the MAGMA library
2082
+ (see magma issue 13).
2083
+ * ``L``, ``U``, and ``P`` can be derived using :func:`torch.lu_unpack`.
2084
+
2085
+ .. warning::
2086
+ The gradients of this function will only be finite when :attr:`A` is full rank.
2087
+ This is because the LU decomposition is just differentiable at full rank matrices.
2088
+ Furthermore, if :attr:`A` is close to not being full rank,
2089
+ the gradient will be numerically unstable as it depends on the computation of :math:`L^{-1}` and :math:`U^{-1}`.
2090
+
2091
+ Args:
2092
+ A (Tensor): the tensor to factor of size :math:`(*, m, n)`
2093
+ pivot (bool, optional): controls whether pivoting is done. Default: ``True``
2094
+ get_infos (bool, optional): if set to ``True``, returns an info IntTensor.
2095
+ Default: ``False``
2096
+ out (tuple, optional): optional output tuple. If :attr:`get_infos` is ``True``,
2097
+ then the elements in the tuple are Tensor, IntTensor,
2098
+ and IntTensor. If :attr:`get_infos` is ``False``, then the
2099
+ elements in the tuple are Tensor, IntTensor. Default: ``None``
2100
+
2101
+ Returns:
2102
+ (Tensor, IntTensor, IntTensor (optional)): A tuple of tensors containing
2103
+
2104
+ - **factorization** (*Tensor*): the factorization of size :math:`(*, m, n)`
2105
+
2106
+ - **pivots** (*IntTensor*): the pivots of size :math:`(*, \text{min}(m, n))`.
2107
+ ``pivots`` stores all the intermediate transpositions of rows.
2108
+ The final permutation ``perm`` could be reconstructed by
2109
+ applying ``swap(perm[i], perm[pivots[i] - 1])`` for ``i = 0, ..., pivots.size(-1) - 1``,
2110
+ where ``perm`` is initially the identity permutation of :math:`m` elements
2111
+ (essentially this is what :func:`torch.lu_unpack` is doing).
2112
+
2113
+ - **infos** (*IntTensor*, *optional*): if :attr:`get_infos` is ``True``, this is a tensor of
2114
+ size :math:`(*)` where non-zero values indicate whether factorization for the matrix or
2115
+ each minibatch has succeeded or failed
2116
+
2117
+ Example::
2118
+
2119
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
2120
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
2121
+ >>> A = torch.randn(2, 3, 3)
2122
+ >>> A_LU, pivots = torch.lu(A)
2123
+ >>> A_LU
2124
+ tensor([[[ 1.3506, 2.5558, -0.0816],
2125
+ [ 0.1684, 1.1551, 0.1940],
2126
+ [ 0.1193, 0.6189, -0.5497]],
2127
+
2128
+ [[ 0.4526, 1.2526, -0.3285],
2129
+ [-0.7988, 0.7175, -0.9701],
2130
+ [ 0.2634, -0.9255, -0.3459]]])
2131
+ >>> pivots
2132
+ tensor([[ 3, 3, 3],
2133
+ [ 3, 3, 3]], dtype=torch.int32)
2134
+ >>> A_LU, pivots, info = torch.lu(A, get_infos=True)
2135
+ >>> if info.nonzero().size(0) == 0:
2136
+ ... print('LU factorization succeeded for all samples!')
2137
+ LU factorization succeeded for all samples!
2138
+ """
2139
+ # If get_infos is True, then we don't need to check for errors and vice versa
2140
+ return torch._lu_with_info(A, pivot=pivot, check_errors=(not get_infos))
2141
+
2142
+
2143
+ if TYPE_CHECKING:
2144
+ _ListOrSeq = Sequence[Tensor]
2145
+ else:
2146
+ _ListOrSeq = List[Tensor]
2147
+
2148
+
2149
+ def _check_list_size(out_len: int, get_infos: bool, out: _ListOrSeq) -> None:
2150
+ get_infos_int = 1 if get_infos else 0
2151
+ if out_len - get_infos_int != 2:
2152
+ raise TypeError(
2153
+ f"expected tuple of {2 + int(get_infos)} elements but got {out_len}"
2154
+ )
2155
+ if not isinstance(out, (tuple, list)):
2156
+ raise TypeError(
2157
+ f"argument 'out' must be tuple of Tensors, not {type(out).__name__}"
2158
+ )
2159
+
2160
+
2161
+ def _lu_with_infos(A, pivot=True, get_infos=False, out=None):
2162
+ # type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor, Tensor]]) -> Tuple[Tensor, Tensor, Tensor]
2163
+ if has_torch_function_unary(A):
2164
+ return handle_torch_function(
2165
+ lu, (A,), A, pivot=pivot, get_infos=get_infos, out=out
2166
+ )
2167
+ result = _lu_impl(A, pivot, get_infos, out)
2168
+ if out is not None:
2169
+ _check_list_size(len(out), get_infos, out)
2170
+ for i in range(len(out)):
2171
+ out[i].resize_as_(result[i]).copy_(result[i])
2172
+ return out
2173
+ else:
2174
+ return result # A_LU, pivots, infos
2175
+
2176
+
2177
+ def _lu_no_infos(A, pivot=True, get_infos=False, out=None):
2178
+ # type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, Tensor]
2179
+ # need to check for torch_function here so that we exit if
2180
+ if has_torch_function_unary(A):
2181
+ return handle_torch_function(
2182
+ lu, (A,), A, pivot=pivot, get_infos=get_infos, out=out
2183
+ )
2184
+ result = _lu_impl(A, pivot, get_infos, out)
2185
+ if out is not None:
2186
+ _check_list_size(len(out), get_infos, out)
2187
+ for i in range(len(out)):
2188
+ out[i].resize_as_(result[i]).copy_(result[i])
2189
+ return out
2190
+ else:
2191
+ return result[0], result[1] # A_LU, pivots
2192
+
2193
+
2194
+ # The return type of lu depends on `get_infos`, so in order to resolve the output type
2195
+ # of lu in TorchScript we need to statically know the value of `get_infos`
2196
+ lu = boolean_dispatch(
2197
+ arg_name="get_infos",
2198
+ arg_index=2,
2199
+ default=False,
2200
+ if_true=_lu_with_infos,
2201
+ if_false=_lu_no_infos,
2202
+ module_name=__name__,
2203
+ func_name="lu",
2204
+ )
2205
+ lu.__doc__ = _lu_impl.__doc__
2206
+
2207
+
2208
+ def align_tensors(*tensors):
2209
+ raise RuntimeError("`align_tensors` not yet implemented.")
phi4/lib/python3.10/site-packages/torch/py.typed ADDED
File without changes
phi4/lib/python3.10/site-packages/torch/quantization/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.83 kB). View file
 
phi4/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite.cpython-310.pyc ADDED
Binary file (1.03 kB). View file
 
phi4/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc ADDED
Binary file (991 Bytes). View file
 
phi4/lib/python3.10/site-packages/torch/quantization/__pycache__/_quantized_conversions.cpython-310.pyc ADDED
Binary file (2.68 kB). View file
 
phi4/lib/python3.10/site-packages/torch/quantization/__pycache__/fuser_method_mappings.cpython-310.pyc ADDED
Binary file (705 Bytes). View file
 
phi4/lib/python3.10/site-packages/torch/quantization/__pycache__/observer.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
phi4/lib/python3.10/site-packages/torch/quantization/__pycache__/qconfig.cpython-310.pyc ADDED
Binary file (1.17 kB). View file