ZTWHHH commited on
Commit
63e7e04
·
verified ·
1 Parent(s): 56f0cee

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_dimI_native.h +21 -0
  3. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_acos_ops.h +50 -0
  4. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice.h +30 -0
  5. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_indices_copy_compositeexplicitautograd_dispatch.h +24 -0
  6. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_local_scalar_dense_native.h +22 -0
  7. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_scramble.h +30 -0
  8. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/and.h +35 -0
  9. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_cuda_dispatch.h +25 -0
  10. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_ops.h +105 -0
  11. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/div_ops.h +149 -0
  12. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/dropout_ops.h +39 -0
  13. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_ops.h +28 -0
  14. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/i0_meta.h +27 -0
  15. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/log2_ops.h +50 -0
  16. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/log_native.h +23 -0
  17. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/min_meta_dispatch.h +25 -0
  18. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/min_ops.h +105 -0
  19. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_native.h +22 -0
  20. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_erf_compositeimplicitautograd_dispatch.h +25 -0
  21. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/tril_meta.h +27 -0
  22. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/triplet_margin_loss_ops.h +28 -0
  23. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/triu_indices.h +43 -0
  24. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_cuda_dispatch.h +28 -0
  25. vllm/lib/python3.10/site-packages/cupyx/distributed/__pycache__/__init__.cpython-310.pyc +0 -0
  26. vllm/lib/python3.10/site-packages/cupyx/distributed/__pycache__/_comm.cpython-310.pyc +0 -0
  27. vllm/lib/python3.10/site-packages/cupyx/distributed/__pycache__/_init.cpython-310.pyc +0 -0
  28. vllm/lib/python3.10/site-packages/cupyx/distributed/__pycache__/_klv_utils.cpython-310.pyc +0 -0
  29. vllm/lib/python3.10/site-packages/cupyx/distributed/__pycache__/_nccl_comm.cpython-310.pyc +0 -0
  30. vllm/lib/python3.10/site-packages/cupyx/distributed/__pycache__/_store.cpython-310.pyc +0 -0
  31. vllm/lib/python3.10/site-packages/cupyx/distributed/__pycache__/_store_actions.cpython-310.pyc +0 -0
  32. vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/__init__.cpython-310.pyc +0 -0
  33. vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_array.cpython-310.pyc +0 -0
  34. vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_chunk.cpython-310.pyc +0 -0
  35. vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_data_transfer.cpython-310.pyc +0 -0
  36. vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_elementwise.cpython-310.pyc +0 -0
  37. vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_index_arith.cpython-310.pyc +0 -0
  38. vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_linalg.cpython-310.pyc +0 -0
  39. vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_modes.cpython-310.pyc +0 -0
  40. vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_reduction.cpython-310.pyc +0 -0
  41. vllm/lib/python3.10/site-packages/cupyx/distributed/array/_array.py +899 -0
  42. vllm/lib/python3.10/site-packages/cupyx/distributed/array/_chunk.py +228 -0
  43. vllm/lib/python3.10/site-packages/cupyx/distributed/array/_data_transfer.py +122 -0
  44. vllm/lib/python3.10/site-packages/cupyx/distributed/array/_reduction.py +90 -0
  45. vllm/lib/python3.10/site-packages/cupyx/jit/__init__.py +36 -0
  46. vllm/lib/python3.10/site-packages/cupyx/jit/__pycache__/_builtin_funcs.cpython-310.pyc +0 -0
  47. vllm/lib/python3.10/site-packages/cupyx/jit/__pycache__/_compile.cpython-310.pyc +0 -0
  48. vllm/lib/python3.10/site-packages/cupyx/jit/__pycache__/_cuda_typerules.cpython-310.pyc +0 -0
  49. vllm/lib/python3.10/site-packages/cupyx/jit/__pycache__/_cuda_types.cpython-310.pyc +0 -0
  50. vllm/lib/python3.10/site-packages/cupyx/jit/__pycache__/_interface.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -1436,3 +1436,5 @@ vllm/lib/python3.10/site-packages/wandb/sdk/__pycache__/wandb_run.cpython-310.py
1436
  parrot/lib/python3.10/site-packages/numpy/fft/_pocketfft_umath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1437
  parrot/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_numeric.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1438
  parrot/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
1436
  parrot/lib/python3.10/site-packages/numpy/fft/_pocketfft_umath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1437
  parrot/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_numeric.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1438
  parrot/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1439
+ vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1440
+ vllm/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_dimI_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API int64_t sparse_dim_sparse(const at::Tensor & self);
20
+ } // namespace native
21
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_acos_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _foreach_acos {
18
+ using schema = ::std::vector<at::Tensor> (at::TensorList);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_acos")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_acos(Tensor[] self) -> Tensor[]")
24
+ static ::std::vector<at::Tensor> call(at::TensorList self);
25
+ static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
26
+ };
27
+
28
+ struct TORCH_API _foreach_acos_ {
29
+ using schema = void (at::TensorList);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_acos_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_acos_(Tensor(a!)[] self) -> ()")
35
+ static void call(at::TensorList self);
36
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
37
+ };
38
+
39
+ struct TORCH_API _foreach_acos_out {
40
+ using schema = void (at::TensorList, at::TensorList);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_acos")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
46
+ static void call(at::TensorList self, at::TensorList out);
47
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
48
+ };
49
+
50
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_fused_sdp_choice_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> int
26
+ inline int64_t _fused_sdp_choice(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask={}, double dropout_p=0.0, bool is_causal=false, c10::optional<double> scale=c10::nullopt) {
27
+ return at::_ops::_fused_sdp_choice::call(query, key, value, attn_mask, dropout_p, is_causal, scale);
28
+ }
29
+
30
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_indices_copy_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & _indices_copy_out(at::Tensor & out, const at::Tensor & self);
21
+ TORCH_API at::Tensor & _indices_copy_outf(const at::Tensor & self, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_local_scalar_dense_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Scalar _local_scalar_dense_cpu(const at::Tensor & self);
20
+ TORCH_API at::Scalar _local_scalar_dense_cuda(const at::Tensor & self);
21
+ } // namespace native
22
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_scramble.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_sobol_engine_scramble_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)
26
+ inline at::Tensor & _sobol_engine_scramble_(at::Tensor & self, const at::Tensor & ltm, int64_t dimension) {
27
+ return at::_ops::_sobol_engine_scramble_::call(self, ltm, dimension);
28
+ }
29
+
30
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/and.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/and_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor
26
+ inline at::Tensor __and__(const at::Tensor & self, const at::Scalar & other) {
27
+ return at::_ops::__and___Scalar::call(self, other);
28
+ }
29
+
30
+ // aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor
31
+ inline at::Tensor __and__(const at::Tensor & self, const at::Tensor & other) {
32
+ return at::_ops::__and___Tensor::call(self, other);
33
+ }
34
+
35
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean);
21
+ TORCH_API at::Tensor & binary_cross_entropy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean);
22
+ TORCH_API at::Tensor & binary_cross_entropy_outf(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_xor_ops.h ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API bitwise_xor_Tensor_out {
18
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_xor")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
24
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
25
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
26
+ };
27
+
28
+ struct TORCH_API bitwise_xor_Scalar_out {
29
+ using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_xor")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
37
+ };
38
+
39
+ struct TORCH_API bitwise_xor_Scalar {
40
+ using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_xor")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor")
46
+ static at::Tensor call(const at::Tensor & self, const at::Scalar & other);
47
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other);
48
+ };
49
+
50
+ struct TORCH_API bitwise_xor_Scalar_Tensor {
51
+ using schema = at::Tensor (const at::Scalar &, const at::Tensor &);
52
+ using ptr_schema = schema*;
53
+ // See Note [static constexpr char* members for windows NVCC]
54
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_xor")
55
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor")
56
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor")
57
+ static at::Tensor call(const at::Scalar & self, const at::Tensor & other);
58
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other);
59
+ };
60
+
61
+ struct TORCH_API bitwise_xor_Tensor {
62
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
63
+ using ptr_schema = schema*;
64
+ // See Note [static constexpr char* members for windows NVCC]
65
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_xor")
66
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
67
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor")
68
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & other);
69
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
70
+ };
71
+
72
+ struct TORCH_API bitwise_xor__Scalar {
73
+ using schema = at::Tensor & (at::Tensor &, const at::Scalar &);
74
+ using ptr_schema = schema*;
75
+ // See Note [static constexpr char* members for windows NVCC]
76
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_xor_")
77
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
78
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
79
+ static at::Tensor & call(at::Tensor & self, const at::Scalar & other);
80
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other);
81
+ };
82
+
83
+ struct TORCH_API bitwise_xor__Tensor {
84
+ using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
85
+ using ptr_schema = schema*;
86
+ // See Note [static constexpr char* members for windows NVCC]
87
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_xor_")
88
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
89
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
90
+ static at::Tensor & call(at::Tensor & self, const at::Tensor & other);
91
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other);
92
+ };
93
+
94
+ struct TORCH_API bitwise_xor_Scalar_Tensor_out {
95
+ using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &);
96
+ using ptr_schema = schema*;
97
+ // See Note [static constexpr char* members for windows NVCC]
98
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_xor")
99
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_Tensor_out")
100
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
101
+ static at::Tensor & call(const at::Scalar & self, const at::Tensor & other, at::Tensor & out);
102
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out);
103
+ };
104
+
105
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/div_ops.h ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API div_Tensor {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Tensor(Tensor self, Tensor other) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & other);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
26
+ };
27
+
28
+ struct TORCH_API div__Tensor {
29
+ using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
35
+ static at::Tensor & call(at::Tensor & self, const at::Tensor & other);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other);
37
+ };
38
+
39
+ struct TORCH_API div_out {
40
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
46
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
48
+ };
49
+
50
+ struct TORCH_API div_Tensor_mode {
51
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional<c10::string_view>);
52
+ using ptr_schema = schema*;
53
+ // See Note [static constexpr char* members for windows NVCC]
54
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
55
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_mode")
56
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor")
57
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode);
58
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode);
59
+ };
60
+
61
+ struct TORCH_API div__Tensor_mode {
62
+ using schema = at::Tensor & (at::Tensor &, const at::Tensor &, c10::optional<c10::string_view>);
63
+ using ptr_schema = schema*;
64
+ // See Note [static constexpr char* members for windows NVCC]
65
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div_")
66
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_mode")
67
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)")
68
+ static at::Tensor & call(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode);
69
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode);
70
+ };
71
+
72
+ struct TORCH_API div_out_mode {
73
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional<c10::string_view>, at::Tensor &);
74
+ using ptr_schema = schema*;
75
+ // See Note [static constexpr char* members for windows NVCC]
76
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
77
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out_mode")
78
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)")
79
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out);
80
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out);
81
+ };
82
+
83
+ struct TORCH_API div_Scalar {
84
+ using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
85
+ using ptr_schema = schema*;
86
+ // See Note [static constexpr char* members for windows NVCC]
87
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
88
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
89
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Scalar(Tensor self, Scalar other) -> Tensor")
90
+ static at::Tensor call(const at::Tensor & self, const at::Scalar & other);
91
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other);
92
+ };
93
+
94
+ struct TORCH_API div__Scalar {
95
+ using schema = at::Tensor & (at::Tensor &, const at::Scalar &);
96
+ using ptr_schema = schema*;
97
+ // See Note [static constexpr char* members for windows NVCC]
98
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div_")
99
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
100
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
101
+ static at::Tensor & call(at::Tensor & self, const at::Scalar & other);
102
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other);
103
+ };
104
+
105
+ struct TORCH_API div_Scalar_mode {
106
+ using schema = at::Tensor (const at::Tensor &, const at::Scalar &, c10::optional<c10::string_view>);
107
+ using ptr_schema = schema*;
108
+ // See Note [static constexpr char* members for windows NVCC]
109
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
110
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_mode")
111
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor")
112
+ static at::Tensor call(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode);
113
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode);
114
+ };
115
+
116
+ struct TORCH_API div__Scalar_mode {
117
+ using schema = at::Tensor & (at::Tensor &, const at::Scalar &, c10::optional<c10::string_view>);
118
+ using ptr_schema = schema*;
119
+ // See Note [static constexpr char* members for windows NVCC]
120
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div_")
121
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_mode")
122
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)")
123
+ static at::Tensor & call(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode);
124
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode);
125
+ };
126
+
127
+ struct TORCH_API div_Scalar_out {
128
+ using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
129
+ using ptr_schema = schema*;
130
+ // See Note [static constexpr char* members for windows NVCC]
131
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
132
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out")
133
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
134
+ static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
135
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
136
+ };
137
+
138
+ struct TORCH_API div_Scalar_mode_out {
139
+ using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, c10::optional<c10::string_view>, at::Tensor &);
140
+ using ptr_schema = schema*;
141
+ // See Note [static constexpr char* members for windows NVCC]
142
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::div")
143
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_mode_out")
144
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)")
145
+ static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out);
146
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode, at::Tensor & out);
147
+ };
148
+
149
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/dropout_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API dropout {
18
+ using schema = at::Tensor (const at::Tensor &, double, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::dropout")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "dropout(Tensor input, float p, bool train) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & input, double p, bool train);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train);
26
+ };
27
+
28
+ struct TORCH_API dropout_ {
29
+ using schema = at::Tensor & (at::Tensor &, double, bool);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::dropout_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)")
35
+ static at::Tensor & call(at::Tensor & self, double p, bool train);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train);
37
+ };
38
+
39
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API embedding_sparse_backward {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::embedding_sparse_backward")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq);
26
+ };
27
+
28
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/i0_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_i0 : public TensorIteratorBase {
21
+
22
+
23
+ void meta(const at::Tensor & self);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/log2_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API log2 {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log2")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log2(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API log2_ {
29
+ using schema = at::Tensor & (at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log2_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log2_(Tensor(a!) self) -> Tensor(a!)")
35
+ static at::Tensor & call(at::Tensor & self);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
37
+ };
38
+
39
+ struct TORCH_API log2_out {
40
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log2")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
46
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
48
+ };
49
+
50
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/log_native.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ #include <ATen/ops/log_meta.h>
16
+
17
+ namespace at {
18
+ namespace native {
19
+ struct TORCH_API structured_log_out : public at::meta::structured_log {
20
+ void impl(const at::Tensor & self, const at::Tensor & out);
21
+ };
22
+ } // namespace native
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/min_meta_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> min(const at::Tensor & self, int64_t dim, bool keepdim=false);
21
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> min_out(at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, int64_t dim, bool keepdim=false);
22
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> min_outf(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices);
23
+
24
+ } // namespace meta
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/min_ops.h ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API min_dim {
18
+ using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, int64_t, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::min")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)")
24
+ static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & self, int64_t dim, bool keepdim);
25
+ static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim);
26
+ };
27
+
28
+ struct TORCH_API min_dim_min {
29
+ using schema = ::std::tuple<at::Tensor &,at::Tensor &> (const at::Tensor &, int64_t, bool, at::Tensor &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::min")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim_min")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)")
35
+ static ::std::tuple<at::Tensor &,at::Tensor &> call(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices);
36
+ static ::std::tuple<at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices);
37
+ };
38
+
39
+ struct TORCH_API min_names_dim {
40
+ using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, at::Dimname, bool);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::min")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_dim")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)")
46
+ static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & self, at::Dimname dim, bool keepdim);
47
+ static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim);
48
+ };
49
+
50
+ struct TORCH_API min_names_dim_min {
51
+ using schema = ::std::tuple<at::Tensor &,at::Tensor &> (const at::Tensor &, at::Dimname, bool, at::Tensor &, at::Tensor &);
52
+ using ptr_schema = schema*;
53
+ // See Note [static constexpr char* members for windows NVCC]
54
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::min")
55
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_dim_min")
56
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)")
57
+ static ::std::tuple<at::Tensor &,at::Tensor &> call(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices);
58
+ static ::std::tuple<at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices);
59
+ };
60
+
61
+ struct TORCH_API min {
62
+ using schema = at::Tensor (const at::Tensor &);
63
+ using ptr_schema = schema*;
64
+ // See Note [static constexpr char* members for windows NVCC]
65
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::min")
66
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
67
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "min(Tensor self) -> Tensor")
68
+ static at::Tensor call(const at::Tensor & self);
69
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
70
+ };
71
+
72
+ struct TORCH_API min_unary_out {
73
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
74
+ using ptr_schema = schema*;
75
+ // See Note [static constexpr char* members for windows NVCC]
76
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::min")
77
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "unary_out")
78
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
79
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
80
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
81
+ };
82
+
83
+ struct TORCH_API min_out {
84
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
85
+ using ptr_schema = schema*;
86
+ // See Note [static constexpr char* members for windows NVCC]
87
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::min")
88
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
89
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
90
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
91
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
92
+ };
93
+
94
+ struct TORCH_API min_other {
95
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
96
+ using ptr_schema = schema*;
97
+ // See Note [static constexpr char* members for windows NVCC]
98
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::min")
99
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "other")
100
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "min.other(Tensor self, Tensor other) -> Tensor")
101
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & other);
102
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
103
+ };
104
+
105
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor repeat(const at::Tensor & self, at::IntArrayRef repeats);
20
+ TORCH_API at::Tensor & repeat_out_symint(const at::Tensor & self, c10::SymIntArrayRef repeats, at::Tensor & out);
21
+ } // namespace native
22
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_erf_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor special_erf(const at::Tensor & self);
21
+ TORCH_API at::Tensor & special_erf_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & special_erf_outf(const at::Tensor & self, at::Tensor & out);
23
+
24
+ } // namespace compositeimplicitautograd
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/tril_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_tril : public at::impl::MetaBase {
21
+
22
+
23
+ void meta(const at::Tensor & self, int64_t diagonal);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/triplet_margin_loss_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API triplet_margin_loss {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, double, double, bool, int64_t);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::triplet_margin_loss")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction);
26
+ };
27
+
28
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/triu_indices.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/triu_indices_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
26
+ inline at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset=0, at::TensorOptions options=at::kLong) {
27
+ return at::_ops::triu_indices::call(row, col, offset, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
28
+ }
29
+ // aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
30
+ inline at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
31
+ return at::_ops::triu_indices::call(row, col, offset, dtype, layout, device, pin_memory);
32
+ }
33
+
34
+ // aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & triu_indices_out(at::Tensor & out, int64_t row, int64_t col, int64_t offset=0) {
36
+ return at::_ops::triu_indices_out::call(row, col, offset, out);
37
+ }
38
+ // aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)
39
+ inline at::Tensor & triu_indices_outf(int64_t row, int64_t col, int64_t offset, at::Tensor & out) {
40
+ return at::_ops::triu_indices_out::call(row, col, offset, out);
41
+ }
42
+
43
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_cuda_dispatch.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales=c10::nullopt);
21
+ TORCH_API at::Tensor upsample_nearest1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales=c10::nullopt);
22
+ TORCH_API at::Tensor & upsample_nearest1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales=c10::nullopt);
23
+ TORCH_API at::Tensor & upsample_nearest1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input);
24
+ TORCH_API at::Tensor & upsample_nearest1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales=c10::nullopt);
25
+ TORCH_API at::Tensor & upsample_nearest1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales, at::Tensor & grad_input);
26
+
27
+ } // namespace cuda
28
+ } // namespace at
vllm/lib/python3.10/site-packages/cupyx/distributed/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (297 Bytes). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/__pycache__/_comm.cpython-310.pyc ADDED
Binary file (2.65 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/__pycache__/_init.cpython-310.pyc ADDED
Binary file (3.55 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/__pycache__/_klv_utils.cpython-310.pyc ADDED
Binary file (1.68 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/__pycache__/_nccl_comm.cpython-310.pyc ADDED
Binary file (25.8 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/__pycache__/_store.cpython-310.pyc ADDED
Binary file (5.38 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/__pycache__/_store_actions.cpython-310.pyc ADDED
Binary file (6.72 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (532 Bytes). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_array.cpython-310.pyc ADDED
Binary file (29.8 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_chunk.cpython-310.pyc ADDED
Binary file (6.74 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_data_transfer.cpython-310.pyc ADDED
Binary file (4.26 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_elementwise.cpython-310.pyc ADDED
Binary file (6.81 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_index_arith.cpython-310.pyc ADDED
Binary file (4.13 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_linalg.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_modes.cpython-310.pyc ADDED
Binary file (1.85 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/array/__pycache__/_reduction.cpython-310.pyc ADDED
Binary file (2.29 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/distributed/array/_array.py ADDED
@@ -0,0 +1,899 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import chain
2
+ from typing import Any, Callable, Iterable, Optional
3
+
4
+ import numpy
5
+ from numpy.typing import ArrayLike
6
+ from numpy.typing import DTypeLike
7
+
8
+ import cupy
9
+ from cupy._core.core import ndarray
10
+ import cupy._creation.from_data as _creation_from_data
11
+ import cupy._core._routines_math as _math
12
+ import cupy._core._routines_statistics as _statistics
13
+ from cupy.cuda.device import Device
14
+ from cupy.cuda.stream import Stream
15
+ from cupy.cuda.stream import get_current_stream
16
+
17
+ from cupyx.distributed.array import _chunk
18
+ from cupyx.distributed.array._chunk import _Chunk
19
+ from cupyx.distributed.array import _data_transfer
20
+ from cupyx.distributed.array._data_transfer import _Communicator
21
+ from cupyx.distributed.array import _elementwise
22
+ from cupyx.distributed.array import _index_arith
23
+ from cupyx.distributed.array import _modes
24
+ from cupyx.distributed.array import _reduction
25
+ from cupyx.distributed.array import _linalg
26
+
27
+
28
+ class _MultiDeviceDummyMemory(cupy.cuda.memory.Memory):
29
+ pass
30
+
31
+
32
+ class _MultiDeviceDummyPointer(cupy.cuda.memory.MemoryPointer):
33
+ @property
34
+ def device(self) -> Device:
35
+ # This override is needed to assign an invalid device id
36
+ # Since the array is not residing in a single device now
37
+ return Device(-1)
38
+
39
+
40
+ def _make_chunk_async(src_dev, dst_dev, idx, src_array, comms):
41
+ src_stream = get_current_stream(src_dev)
42
+ with src_array.device:
43
+ src_array = _creation_from_data.ascontiguousarray(src_array)
44
+ src_data = _data_transfer._AsyncData(
45
+ src_array, src_stream.record(), prevent_gc=src_array)
46
+ with Device(dst_dev):
47
+ dst_stream = get_current_stream()
48
+ copied = _data_transfer._transfer(
49
+ comms[src_dev], src_stream, src_data,
50
+ comms[dst_dev], dst_stream, dst_dev)
51
+ return _Chunk(copied.array, copied.ready, idx,
52
+ prevent_gc=src_data)
53
+
54
+
55
+ def _make_chunk_sync(src_dev, dst_dev, idx, src_array, comms):
56
+ with Device(dst_dev):
57
+ stream = get_current_stream()
58
+ copied = _creation_from_data.array(src_array)
59
+ return _Chunk(copied, stream.record(), idx, prevent_gc=src_array)
60
+
61
+
62
+ class DistributedArray(ndarray):
63
+ """
64
+ __init__(self, shape, dtype, chunks_map, mode=REPLICA, comms=None)
65
+
66
+ Multi-dimensional array distributed across multiple CUDA devices.
67
+
68
+ This class implements some elementary operations that :class:`cupy.ndarray`
69
+ provides. The array content is split into chunks, contiguous arrays
70
+ corresponding to slices of the original array. Note that one device can
71
+ hold multiple chunks.
72
+
73
+ This direct constructor is designed for internal calls. Users should create
74
+ distributed arrays using :func:`distributed_array`.
75
+
76
+ Args:
77
+ shape (tuple of ints): Shape of created array.
78
+ dtype (dtype_like): Any object that can be interpreted as a numpy data
79
+ type.
80
+ chunks_map (dict from int to list of chunks): Lists of chunk objects
81
+ associated with each device.
82
+ mode (mode object, optional): Mode that determines how overlaps
83
+ of the chunks are interpreted. Defaults to
84
+ ``cupyx.distributed.array.REPLICA``.
85
+ comms (optional): Communicator objects which a distributed array
86
+ hold internally. Sharing them with other distributed arrays can
87
+ save time because their initialization is a costly operation.
88
+
89
+ .. seealso::
90
+ :attr:`DistributedArray.mode` for details about modes.
91
+ """
92
+
93
+ _chunks_map: dict[int, list[_Chunk]]
94
+ _mode: _modes.Mode
95
+ _streams: dict[int, Stream]
96
+ _comms: dict[int, _Communicator]
97
+
98
+ def __new__(
99
+ cls, shape: tuple[int, ...], dtype: DTypeLike,
100
+ chunks_map: dict[int, list[_Chunk]],
101
+ mode: _modes.Mode = _modes.REPLICA,
102
+ comms: Optional[dict[int, _Communicator]] = None,
103
+ ) -> 'DistributedArray':
104
+ mem = _MultiDeviceDummyMemory(0)
105
+ memptr = _MultiDeviceDummyPointer(mem, 0)
106
+ obj = super().__new__(cls, shape, dtype, memptr=memptr)
107
+ obj._chunks_map = chunks_map
108
+
109
+ obj._mode = mode
110
+
111
+ obj._streams = {}
112
+ obj._comms = comms if comms is not None else {}
113
+
114
+ return obj
115
+
116
+ def __init__(self, *args, **kwargs) -> None:
117
+ super().__init__(*args, **kwargs)
118
+
119
+ def __array_finalize__(self, obj):
120
+ if obj is not None:
121
+ raise RuntimeError(
122
+ 'Distributed array can only be instantiated by an explicit'
123
+ 'constructor call')
124
+
125
+ @property
126
+ def mode(self) -> _modes.Mode:
127
+ """Describe how overlaps of the chunks are interpreted.
128
+
129
+ In the replica mode, chunks are guaranteed to have identical values on
130
+ their overlapping segments. In other modes, they are not necessarily
131
+ identical and represent the original data as their max, sum, etc.
132
+
133
+ :class:`DistributedArray` currently supports
134
+ ``cupyx.distributed.array.REPLICA``, ``cupyx.distributed.array.MIN``,
135
+ ``cupyx.distributed.array.MAX``, ``cupyx.distributed.array.SUM``,
136
+ ``cupyx.distributed.array.PROD`` modes.
137
+
138
+ Many operations on distributed arrays including :class:`cupy.ufunc`
139
+ and :func:`~cupyx.distributed.array.matmul` involve changing their mode
140
+ beforehand. These mode conversions are done automatically, so in most
141
+ cases users do not have to manage modes manually.
142
+
143
+ Example:
144
+ >>> A = distributed_array(
145
+ ... cupy.arange(6).reshape(2, 3),
146
+ ... make_2d_index_map([0, 2], [0, 1, 3],
147
+ ... [[{0}, {1, 2}]]))
148
+ >>> B = distributed_array(
149
+ ... cupy.arange(12).reshape(3, 4),
150
+ ... make_2d_index_map([0, 1, 3], [0, 2, 4],
151
+ ... [[{0}, {0}],
152
+ ... [{1}, {2}]]))
153
+ >>> C = A @ B
154
+ >>> C
155
+ array([[20, 23, 26, 29],
156
+ [56, 68, 80, 92]])
157
+ >>> C.mode
158
+ 'sum'
159
+ >>> C.all_chunks()
160
+ {0: [array([[0, 0],
161
+ [0, 3]]), # left half
162
+ array([[0, 0],
163
+ [6, 9]])], # right half
164
+ 1: [array([[20, 23],
165
+ [56, 65]])], # left half
166
+ 2: [array([[26, 29],
167
+ [74, 83]])]} # right half
168
+ >>> C_replica = C.change_mode('replica')
169
+ >>> C_replica.mode
170
+ 'replica'
171
+ >>> C_replica.all_chunks()
172
+ {0: [array([[20, 23],
173
+ [56, 68]]), # left half
174
+ array([[26, 29],
175
+ [80, 92]])], # right half
176
+ 1: [array([[20, 23],
177
+ [56, 68]])], # left half
178
+ 2: [array([[26, 29],
179
+ [80, 92]])]} # right half
180
+ """
181
+ return self._mode
182
+
183
+ @property
184
+ def devices(self) -> Iterable[int]:
185
+ """A collection of device IDs holding part of the data."""
186
+ return self._chunks_map.keys()
187
+
188
+ @property
189
+ def index_map(self) -> dict[int, list[tuple[slice, ...]]]:
190
+ """Indices for the chunks that devices with designated IDs own."""
191
+ return {dev: [chunk.index for chunk in chunks]
192
+ for dev, chunks in self._chunks_map.items()}
193
+
194
+ def all_chunks(self) -> dict[int, list[ndarray]]:
195
+ """Return the chunks with all buffered data flushed.
196
+
197
+ Buffered data are created in situations such as resharding and mode
198
+ changing.
199
+ """
200
+ chunks_map: dict[int, list[ndarray]] = {}
201
+ for dev, chunks in self._chunks_map.items():
202
+ chunks_map[dev] = []
203
+ for chunk in chunks:
204
+ chunk.flush(self._mode)
205
+ chunks_map[dev].append(chunk.array)
206
+ return chunks_map
207
+
208
+ def _prepare_comms_and_streams(self, devices: Iterable[int]) -> None:
209
+ # Ensure communicators and streams are prepared for communication
210
+ # between `devices` and the devices currently owning chunks
211
+ devices = self._chunks_map.keys() | devices
212
+
213
+ if not devices.issubset(self._comms.keys()):
214
+ self._comms = _data_transfer._create_communicators(devices)
215
+
216
+ for dev in devices - self._streams.keys():
217
+ with Device(dev):
218
+ self._streams[dev] = Stream()
219
+
220
+ def __cupy_override_elementwise_kernel__(self, kernel, *args, **kwargs):
221
+ # This method is called from cupy.ufunc and cupy.ElementwiseKernel
222
+ # to dispatch elementwise operations
223
+ return _elementwise._execute(kernel, args, kwargs)
224
+
225
+ def __cupy_override_reduction_kernel__(
226
+ self, kernel, axis, dtype, out, keepdims) -> Any:
227
+ # This method is called from _SimpleReductionKernel and elementary
228
+ # reduction methods of ndarray to dispatch reduction operations
229
+ # TODO: Support user-defined ReductionKernel
230
+ if axis is None:
231
+ raise RuntimeError('axis must be specified')
232
+ if out is not None:
233
+ raise RuntimeError('Argument `out` is not supported')
234
+ if keepdims:
235
+ raise RuntimeError('Argument `keepdims` is not supported')
236
+
237
+ return _reduction._execute(self, kernel, axis, dtype)
238
+
239
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
240
+ if ufunc.__name__ == 'matmul' and method == '__call__':
241
+ return _linalg.matmul(*inputs, **kwargs)
242
+ return NotImplemented
243
+
244
+ def __matmul__(x, y):
245
+ if isinstance(y, DistributedArray):
246
+ return _linalg.matmul(x, y)
247
+ else:
248
+ return NotImplemented
249
+
250
+ def _copy_chunks_map_in_replica_mode(self) -> dict[int, list[_Chunk]]:
251
+ # Return a copy of self.chunks_map in the replica mode
252
+ chunks_map = {}
253
+ for dev, chunks in self._chunks_map.items():
254
+ chunks_map[dev] = [chunk.copy() for chunk in chunks]
255
+
256
+ if self._mode is not _modes.REPLICA:
257
+ self._prepare_comms_and_streams(self._chunks_map.keys())
258
+ _chunk._all_reduce_intersections(
259
+ self._mode, self.shape, chunks_map, self._comms, self._streams)
260
+
261
+ return chunks_map
262
+
263
+ def _copy_chunks_map_in_op_mode(
264
+ self, op_mode: _modes._OpMode) -> dict[int, list[_Chunk]]:
265
+ # Return a copy of self.chunks_map in the given op mode
266
+ chunks_map = self._copy_chunks_map_in_replica_mode()
267
+
268
+ for chunk in chain.from_iterable(chunks_map.values()):
269
+ chunk.flush(_modes.REPLICA)
270
+
271
+ chunks_list = list(chain.from_iterable(chunks_map.values()))
272
+ identity = op_mode.identity_of(self.dtype)
273
+
274
+ # TODO: Fair distribution of work
275
+ # In the current implementation, devices that appear earlier have to
276
+ # execute set_identity_on_intersection repeatedly, whereas the last
277
+ # device has no work to do
278
+ for i in range(len(chunks_list)):
279
+ a_chunk = chunks_list[i]
280
+ for j in range(i + 1, len(chunks_list)):
281
+ b_chunk = chunks_list[j]
282
+ a_chunk.set_identity_on_intersection(
283
+ b_chunk.index, self.shape, identity)
284
+
285
+ return chunks_map
286
+
287
+ def _to_op_mode(self, op_mode: _modes.Mode) -> 'DistributedArray':
288
+ # Return a view or a copy of the chunks_map in the given mode
289
+ if self._mode is op_mode:
290
+ return self
291
+
292
+ if len(self._chunks_map) == 1:
293
+ chunks, = self._chunks_map.values()
294
+ if len(chunks) == 1:
295
+ chunks[0].flush(self._mode)
296
+ return DistributedArray(
297
+ self.shape, self.dtype, self._chunks_map,
298
+ op_mode, self._comms)
299
+ if op_mode is _modes.REPLICA:
300
+ chunks_map = self._copy_chunks_map_in_replica_mode()
301
+ else:
302
+ assert op_mode is not None
303
+ chunks_map = self._copy_chunks_map_in_op_mode(op_mode)
304
+ return DistributedArray(
305
+ self.shape, self.dtype, chunks_map, op_mode, self._comms)
306
+
307
+ def change_mode(self, mode: _modes.Mode) -> 'DistributedArray':
308
+ """Return a view or a copy in the given mode.
309
+
310
+ Args:
311
+ mode (mode Object): How overlaps of
312
+ the chunks are interpreted.
313
+
314
+ .. seealso::
315
+ :attr:`DistributedArray.mode` for details about modes.
316
+ """
317
+ return self._to_op_mode(mode)
318
+
319
+ def reshard(self, index_map: dict[int, Any]) -> 'DistributedArray':
320
+ """Return a view or a copy having the given index_map.
321
+
322
+ Data transfers across devices are done on separate streams created
323
+ internally. To make them asynchronous, transferred data is buffered and
324
+ reflected to the chunks when necessary.
325
+
326
+ Args:
327
+ index_map (dict from int to array indices): Indices for the chunks
328
+ that devices with designated IDs own. The current index_map of
329
+ a distributed array can be obtained from
330
+ :attr:`DistributedArray.index_map`.
331
+ """
332
+ new_index_map = _index_arith._normalize_index_map(
333
+ self.shape, index_map)
334
+ if new_index_map == self.index_map:
335
+ return self
336
+
337
+ old_chunks_map = self._chunks_map
338
+ new_chunks_map: dict[int, list[_Chunk]] = {}
339
+
340
+ # Set up new_chunks_map compatible with new_index_map
341
+ # as placeholders of chunks
342
+ for dev, idxs in new_index_map.items():
343
+ new_chunks_map[dev] = []
344
+
345
+ for idx in idxs:
346
+ with Device(dev):
347
+ dst_shape = _index_arith._shape_after_indexing(
348
+ self.shape, idx)
349
+ new_chunk = _Chunk.create_placeholder(dst_shape, dev, idx)
350
+ new_chunks_map[dev].append(new_chunk)
351
+
352
+ self._prepare_comms_and_streams(index_map.keys())
353
+
354
+ # Data transfer from old chunks to new chunks
355
+ # TODO: Reorder transfers to minimize latency
356
+
357
+ # The current implementation transfers the same data multiple times
358
+ # where chunks overlap. This is particularly problematic when matrix
359
+ # multiplication is involved, where one block tends to be shared
360
+ # between multiple devices
361
+ # TODO: Avoid duplicate data transfers
362
+ for src_chunk in chain.from_iterable(old_chunks_map.values()):
363
+ src_chunk.flush(self._mode)
364
+
365
+ if self._mode is not _modes.REPLICA:
366
+ src_chunk = src_chunk.copy()
367
+
368
+ for dst_chunk in chain.from_iterable(new_chunks_map.values()):
369
+ src_chunk.apply_to(
370
+ dst_chunk, self._mode, self.shape,
371
+ self._comms, self._streams)
372
+
373
+ return DistributedArray(
374
+ self.shape, self.dtype, new_chunks_map, self._mode, self._comms)
375
+
376
+ def get(
377
+ self, stream=None, order='C', out=None, blocking=True
378
+ ) -> numpy.ndarray:
379
+ """Return a copy of the array on the host memory."""
380
+ if stream is not None:
381
+ raise RuntimeError('Argument `stream` not supported')
382
+ if order != 'C':
383
+ raise RuntimeError('Argument `order` not supported')
384
+ if out is not None:
385
+ raise RuntimeError('Argument `out` not supported')
386
+
387
+ for chunk in chain.from_iterable(self._chunks_map.values()):
388
+ chunk.flush(self._mode)
389
+
390
+ if self._mode is _modes.REPLICA:
391
+ np_array = numpy.empty(self.shape, dtype=self.dtype)
392
+ else:
393
+ identity = self._mode.identity_of(self.dtype)
394
+ np_array = numpy.full(self.shape, identity, self.dtype)
395
+
396
+ # We avoid 0D array because we expect data[idx] to return a view
397
+ np_array = numpy.atleast_1d(np_array)
398
+
399
+ for chunk in chain.from_iterable(self._chunks_map.values()):
400
+ chunk.ready.synchronize()
401
+ idx = chunk.index
402
+ if self._mode is _modes.REPLICA:
403
+ np_array[idx] = cupy.asnumpy(chunk.array)
404
+ else:
405
+ self._mode.numpy_func(
406
+ np_array[idx], cupy.asnumpy(chunk.array), np_array[idx])
407
+
408
+ # Undo numpy.atleast_1d
409
+ return np_array.reshape(self.shape)
410
+
411
+ # -----------------------------------------------------
412
+ # Overriding unsupported methods inherited from ndarray
413
+ # -----------------------------------------------------
414
+
415
+ def __getitem__(self, *args, **kwargs):
416
+ """Not supported."""
417
+ raise NotImplementedError(
418
+ 'DistributedArray currently does not support __getitem__.')
419
+
420
+ def __setitem__(self, *args, **kwargs):
421
+ """Not supported."""
422
+ raise NotImplementedError(
423
+ 'DistributedArray currently does not support __setitem__.')
424
+
425
+ def __len__(self, *args, **kwargs):
426
+ """Not supported."""
427
+ raise NotImplementedError(
428
+ 'DistributedArray currently does not support __len__.')
429
+
430
+ def __iter__(self, *args, **kwargs):
431
+ """Not supported."""
432
+ raise NotImplementedError(
433
+ 'DistributedArray currently does not support __iter__.')
434
+
435
+ def __copy__(self, *args, **kwargs):
436
+ """Not supported."""
437
+ raise NotImplementedError(
438
+ 'DistributedArray currently does not support __copy__.')
439
+
440
+ def all(self, *args, **kwargs):
441
+ """Not supported."""
442
+ raise NotImplementedError(
443
+ 'DistributedArray currently does not support all.')
444
+
445
+ def any(self, *args, **kwargs):
446
+ """Not supported."""
447
+ raise NotImplementedError(
448
+ 'DistributedArray currently does not support any.')
449
+
450
+ def argmax(self, *args, **kwargs):
451
+ """Not supported."""
452
+ raise NotImplementedError(
453
+ 'DistributedArray currently does not support argmax.')
454
+
455
+ def argmin(self, *args, **kwargs):
456
+ """Not supported."""
457
+ raise NotImplementedError(
458
+ 'DistributedArray currently does not support argmin.')
459
+
460
+ def argpartition(self, *args, **kwargs):
461
+ """Not supported."""
462
+ raise NotImplementedError(
463
+ 'DistributedArray currently does not support argpartition.')
464
+
465
+ def argsort(self, *args, **kwargs):
466
+ """Not supported."""
467
+ raise NotImplementedError(
468
+ 'DistributedArray currently does not support argsort.')
469
+
470
+ def astype(self, *args, **kwargs):
471
+ """Not supported."""
472
+ raise NotImplementedError(
473
+ 'DistributedArray currently does not support astype.')
474
+
475
+ def choose(self, *args, **kwargs):
476
+ """Not supported."""
477
+ raise NotImplementedError(
478
+ 'DistributedArray currently does not support choose.')
479
+
480
+ def clip(self, *args, **kwargs):
481
+ """Not supported."""
482
+ raise NotImplementedError(
483
+ 'DistributedArray currently does not support clip.')
484
+
485
+ def compress(self, *args, **kwargs):
486
+ """Not supported."""
487
+ raise NotImplementedError(
488
+ 'DistributedArray currently does not support compress.')
489
+
490
+ def copy(self, *args, **kwargs):
491
+ """Not supported."""
492
+ raise NotImplementedError(
493
+ 'DistributedArray currently does not support copy.')
494
+
495
+ def cumprod(self, *args, **kwargs):
496
+ """Not supported."""
497
+ raise NotImplementedError(
498
+ 'DistributedArray currently does not support cumprod.')
499
+
500
+ def cumsum(self, *args, **kwargs):
501
+ """Not supported."""
502
+ raise NotImplementedError(
503
+ 'DistributedArray currently does not support cumsum.')
504
+
505
+ def diagonal(self, *args, **kwargs):
506
+ """Not supported."""
507
+ raise NotImplementedError(
508
+ 'DistributedArray currently does not support diagonal.')
509
+
510
+ def dot(self, *args, **kwargs):
511
+ """Not supported."""
512
+ raise NotImplementedError(
513
+ 'DistributedArray currently does not support dot.')
514
+
515
+ def dump(self, *args, **kwargs):
516
+ """Not supported."""
517
+ raise NotImplementedError(
518
+ 'DistributedArray currently does not support dump.')
519
+
520
+ def dumps(self, *args, **kwargs):
521
+ """Not supported."""
522
+ raise NotImplementedError(
523
+ 'DistributedArray currently does not support dumps.')
524
+
525
+ def fill(self, *args, **kwargs):
526
+ """Not supported."""
527
+ raise NotImplementedError(
528
+ 'DistributedArray currently does not support fill.')
529
+
530
+ def flatten(self, *args, **kwargs):
531
+ """Not supported."""
532
+ raise NotImplementedError(
533
+ 'DistributedArray currently does not support flatten.')
534
+
535
+ def item(self, *args, **kwargs):
536
+ """Not supported."""
537
+ raise NotImplementedError(
538
+ 'DistributedArray currently does not support item.')
539
+
540
+ def max(self, axis=None, out=None, keepdims=False):
541
+ """Return the maximum along a given axis.
542
+
543
+ .. note::
544
+
545
+ Currently, it only supports non-``None`` values for ``axis`` and
546
+ the default values for ``out`` and ``keepdims``.
547
+
548
+ .. seealso::
549
+ :meth:`cupy.ndarray.max`, :meth:`numpy.ndarray.max`
550
+ """
551
+ return self.__cupy_override_reduction_kernel__(
552
+ _statistics.amax, axis, None, out, keepdims)
553
+
554
+ def mean(self, *args, **kwargs):
555
+ """Not supported."""
556
+ raise NotImplementedError(
557
+ 'DistributedArray currently does not support mean.')
558
+
559
+ def min(self, axis=None, out=None, keepdims=False):
560
+ """Return the minimum along a given axis.
561
+
562
+ .. note::
563
+
564
+ Currently, it only supports non-``None`` values for ``axis`` and
565
+ the default values for ``out`` and ``keepdims``.
566
+
567
+ .. seealso::
568
+ :meth:`cupy.ndarray.min`, :meth:`numpy.ndarray.min`
569
+ """
570
+ return self.__cupy_override_reduction_kernel__(
571
+ _statistics.amin, axis, None, out, keepdims)
572
+
573
+ def nonzero(self, *args, **kwargs):
574
+ """Not supported."""
575
+ raise NotImplementedError(
576
+ 'DistributedArray currently does not support nonzero.')
577
+
578
+ def partition(self, *args, **kwargs):
579
+ """Not supported."""
580
+ raise NotImplementedError(
581
+ 'DistributedArray currently does not support partition.')
582
+
583
+ def prod(self, axis=None, dtype=None, out=None, keepdims=None):
584
+ """Return the minimum along a given axis.
585
+
586
+ .. note::
587
+
588
+ Currently, it only supports non-``None`` values for ``axis`` and
589
+ the default values for ``out`` and ``keepdims``.
590
+
591
+ .. seealso::
592
+ :meth:`cupy.ndarray.prod`, :meth:`numpy.ndarray.prod`
593
+ """
594
+ if dtype is None:
595
+ return self.__cupy_override_reduction_kernel__(
596
+ _math.prod_auto_dtype, axis, dtype, out, keepdims)
597
+ else:
598
+ return self.__cupy_override_reduction_kernel__(
599
+ _math.prod_keep_dtype, axis, dtype, out, keepdims)
600
+
601
+ def ptp(self, *args, **kwargs):
602
+ """Not supported."""
603
+ raise NotImplementedError(
604
+ 'DistributedArray currently does not support ptp.')
605
+
606
+ def put(self, *args, **kwargs):
607
+ """Not supported."""
608
+ raise NotImplementedError(
609
+ 'DistributedArray currently does not support put.')
610
+
611
+ def ravel(self, *args, **kwargs):
612
+ """Not supported."""
613
+ raise NotImplementedError(
614
+ 'DistributedArray currently does not support ravel.')
615
+
616
+ def reduced_view(self, *args, **kwargs):
617
+ """Not supported."""
618
+ raise NotImplementedError(
619
+ 'DistributedArray currently does not support reduced_view.')
620
+
621
+ def repeat(self, *args, **kwargs):
622
+ """Not supported."""
623
+ raise NotImplementedError(
624
+ 'DistributedArray currently does not support repeat.')
625
+
626
+ def reshape(self, *args, **kwargs):
627
+ """Not supported."""
628
+ raise NotImplementedError(
629
+ 'DistributedArray currently does not support reshape.')
630
+
631
+ def round(self, *args, **kwargs):
632
+ """Not supported."""
633
+ raise NotImplementedError(
634
+ 'DistributedArray currently does not support round.')
635
+
636
+ def scatter_add(self, *args, **kwargs):
637
+ """Not supported."""
638
+ raise NotImplementedError(
639
+ 'DistributedArray currently does not support scatter_add.')
640
+
641
+ def scatter_max(self, *args, **kwargs):
642
+ """Not supported."""
643
+ raise NotImplementedError(
644
+ 'DistributedArray currently does not support scatter_max.')
645
+
646
+ def scatter_min(self, *args, **kwargs):
647
+ """Not supported."""
648
+ raise NotImplementedError(
649
+ 'DistributedArray currently does not support scatter_min.')
650
+
651
+ def searchsorted(self, *args, **kwargs):
652
+ """Not supported."""
653
+ raise NotImplementedError(
654
+ 'DistributedArray currently does not support searchsorted.')
655
+
656
+ def set(self, *args, **kwargs):
657
+ """Not supported."""
658
+ raise NotImplementedError(
659
+ 'DistributedArray currently does not support set.')
660
+
661
+ def sort(self, *args, **kwargs):
662
+ """Not supported."""
663
+ raise NotImplementedError(
664
+ 'DistributedArray currently does not support sort.')
665
+
666
+ def squeeze(self, *args, **kwargs):
667
+ """Not supported."""
668
+ raise NotImplementedError(
669
+ 'DistributedArray currently does not support squeeze.')
670
+
671
+ def std(self, *args, **kwargs):
672
+ """Not supported."""
673
+ raise NotImplementedError(
674
+ 'DistributedArray currently does not support std.')
675
+
676
+ def sum(self, axis=None, dtype=None, out=None, keepdims=False):
677
+ """Return the minimum along a given axis.
678
+
679
+ .. note::
680
+
681
+ Currently, it only supports non-``None`` values for ``axis`` and
682
+ the default values for ``out`` and ``keepdims``.
683
+
684
+ .. seealso::
685
+ :meth:`cupy.ndarray.sum`, :meth:`numpy.ndarray.sum`
686
+ """
687
+ if dtype is None:
688
+ return self.__cupy_override_reduction_kernel__(
689
+ _math.sum_auto_dtype, axis, dtype, out, keepdims)
690
+ else:
691
+ return self.__cupy_override_reduction_kernel__(
692
+ _math.sum_keep_dtype, axis, dtype, out, keepdims)
693
+
694
+ def swapaxes(self, *args, **kwargs):
695
+ """Not supported."""
696
+ raise NotImplementedError(
697
+ 'DistributedArray currently does not support swapaxes.')
698
+
699
+ def take(self, *args, **kwargs):
700
+ """Not supported."""
701
+ raise NotImplementedError(
702
+ 'DistributedArray currently does not support take.')
703
+
704
+ def toDlpack(self, *args, **kwargs):
705
+ """Not supported."""
706
+ raise NotImplementedError(
707
+ 'DistributedArray currently does not support toDlpack.')
708
+
709
+ def tobytes(self, *args, **kwargs):
710
+ """Not supported."""
711
+ raise NotImplementedError(
712
+ 'DistributedArray currently does not support tobytes.')
713
+
714
+ def tofile(self, *args, **kwargs):
715
+ """Not supported."""
716
+ raise NotImplementedError(
717
+ 'DistributedArray currently does not support tofile.')
718
+
719
+ def tolist(self, *args, **kwargs):
720
+ """Not supported."""
721
+ raise NotImplementedError(
722
+ 'DistributedArray currently does not support tolist.')
723
+
724
+ def trace(self, *args, **kwargs):
725
+ """Not supported."""
726
+ raise NotImplementedError(
727
+ 'DistributedArray currently does not support trace.')
728
+
729
+ def transpose(self, *args, **kwargs):
730
+ """Not supported."""
731
+ raise NotImplementedError(
732
+ 'DistributedArray currently does not support transpose.')
733
+
734
+ def var(self, *args, **kwargs):
735
+ """Not supported."""
736
+ raise NotImplementedError(
737
+ 'DistributedArray currently does not support var.')
738
+
739
+ def view(self, *args, **kwargs):
740
+ """Not supported."""
741
+ raise NotImplementedError(
742
+ 'DistributedArray currently does not support view.')
743
+
744
+ @property
745
+ def T(self):
746
+ """Not supported."""
747
+ raise NotImplementedError(
748
+ 'DistributedArray currently does not support T.')
749
+
750
+ @property
751
+ def base(self):
752
+ """Not supported."""
753
+ raise NotImplementedError(
754
+ 'DistributedArray currently does not support base.')
755
+
756
+ @property
757
+ def cstruct(self):
758
+ """Not supported."""
759
+ raise NotImplementedError(
760
+ 'DistributedArray currently does not support cstruct.')
761
+
762
+ @property
763
+ def data(self):
764
+ """Not supported."""
765
+ raise NotImplementedError(
766
+ 'DistributedArray currently does not support data.')
767
+
768
+ @property
769
+ def device(self):
770
+ """Not supported."""
771
+ raise NotImplementedError(
772
+ 'DistributedArray currently does not support device.')
773
+
774
+ @property
775
+ def flags(self):
776
+ """Not supported."""
777
+ raise NotImplementedError(
778
+ 'DistributedArray currently does not support flags.')
779
+
780
+ @property
781
+ def flat(self):
782
+ """Not supported."""
783
+ raise NotImplementedError(
784
+ 'DistributedArray currently does not support flat.')
785
+
786
+ @property
787
+ def imag(self):
788
+ """Not supported."""
789
+ raise NotImplementedError(
790
+ 'DistributedArray currently does not support imag.')
791
+
792
+ @property
793
+ def real(self):
794
+ """Not supported."""
795
+ raise NotImplementedError(
796
+ 'DistributedArray currently does not support real.')
797
+
798
+ @property
799
+ def shape(self):
800
+ """Tuple of array dimensions.
801
+
802
+ Assignment to this property is currently not supported.
803
+
804
+ .. seealso: :attr:`cupy.ndarray.shape`, :attr:`numpy.ndarray.shape`
805
+
806
+ """
807
+ return super().shape
808
+
809
+ @shape.setter
810
+ def shape(self, newshape):
811
+ raise NotImplementedError(
812
+ 'DistributedArray currently does not support assignment to shape.')
813
+
814
+ @property
815
+ def strides(self):
816
+ """Not supported."""
817
+ raise NotImplementedError(
818
+ 'DistributedArray currently does not support strides.')
819
+
820
+
821
+ def distributed_array(
822
+ array: ArrayLike,
823
+ index_map: dict[int, Any],
824
+ mode: _modes.Mode = _modes.REPLICA,
825
+ ) -> DistributedArray:
826
+ """Creates a distributed array from the given data.
827
+
828
+ This function does not check if all elements of the given array are stored
829
+ in some of the chunks.
830
+
831
+ Args:
832
+ array (array_like): :class:`DistributedArray` object,
833
+ :class:`cupy.ndarray` object or any other object that can be passed
834
+ to :func:`numpy.array`.
835
+ index_map (dict from int to array indices): Indices for the chunks
836
+ that devices with designated IDs own. One device can have multiple
837
+ chunks, which can be specified as a list of array indices.
838
+ mode (mode object, optional): Mode that determines how overlaps
839
+ of the chunks are interpreted. Defaults to
840
+ ``cupyx.distributed.array.REPLICA``.
841
+
842
+ .. seealso::
843
+ :attr:`DistributedArray.mode` for details about modes.
844
+
845
+ Example:
846
+ >>> array = cupy.arange(9).reshape(3, 3)
847
+ >>> A = distributed_array(
848
+ ... array,
849
+ ... {0: [(slice(2), slice(2)), # array[:2, :2]
850
+ ... slice(None, None, 2)], # array[::2]
851
+ ... 1: (slice(1, None), 2)}) # array[1:, 2]
852
+ """
853
+ if isinstance(array, DistributedArray):
854
+ if array.mode != mode:
855
+ array = array.change_mode(mode)
856
+ if array.index_map != index_map:
857
+ array = array.reshard(index_map)
858
+ return DistributedArray(
859
+ array.shape, array.dtype, array._chunks_map, array._mode,
860
+ array._comms)
861
+
862
+ if isinstance(array, (numpy.ndarray, ndarray)):
863
+ if mode != _modes.REPLICA:
864
+ array = array.copy()
865
+ else:
866
+ array = numpy.array(array)
867
+
868
+ index_map = _index_arith._normalize_index_map(array.shape, index_map)
869
+ comms = None
870
+
871
+ # Define how to form a chunk from (dev, idx, src_array)
872
+ make_chunk: Callable[
873
+ [int, int, tuple[slice, ...], ndarray, Optional[list[Any]]],
874
+ _Chunk
875
+ ]
876
+
877
+ if isinstance(array, ndarray):
878
+ src_dev = array.device.id
879
+ devices = index_map.keys() | {array.device.id}
880
+ comms = _data_transfer._create_communicators(devices)
881
+ make_chunk = _make_chunk_async
882
+ else:
883
+ src_dev = -1
884
+ make_chunk = _make_chunk_sync
885
+
886
+ chunks_map: dict[int, list[_Chunk]] = {}
887
+ for dev, idxs in index_map.items():
888
+ chunks_map[dev] = []
889
+
890
+ for idx in idxs:
891
+ chunk_array = array[idx]
892
+ chunk = make_chunk(src_dev, dev, idx, chunk_array, comms)
893
+ chunks_map[dev].append(chunk)
894
+ if (mode is not _modes.REPLICA
895
+ and not mode.idempotent):
896
+ array[idx] = mode.identity_of(array.dtype)
897
+
898
+ return DistributedArray(
899
+ array.shape, array.dtype, chunks_map, mode, comms)
vllm/lib/python3.10/site-packages/cupyx/distributed/array/_chunk.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ from itertools import chain
3
+ from typing import Any, Iterator, Optional, Union
4
+
5
+ import numpy
6
+
7
+ from cupy._core.core import ndarray
8
+ import cupy._creation.basic as _creation_basic
9
+ import cupy._manipulation.dims as _manipulation_dims
10
+ from cupy.cuda.device import Device
11
+ from cupy.cuda.stream import Event
12
+ from cupy.cuda.stream import Stream
13
+ from cupy.cuda.stream import get_current_stream
14
+
15
+ from cupyx.distributed.array import _modes
16
+ from cupyx.distributed.array import _index_arith
17
+ from cupyx.distributed.array import _data_transfer
18
+ from cupyx.distributed.array._data_transfer import _Communicator
19
+
20
+
21
+ class _ArrayPlaceholder:
22
+ # Mocks ndarray
23
+ # Eventually overwritten by PartialUpdates entirely, so
24
+ # any operation on _DataPlaceholder can be skipped
25
+ shape: tuple[int, ...]
26
+ device: Device
27
+
28
+ def __init__(self, shape: tuple[int, ...], device: Device) -> None:
29
+ self.shape = shape
30
+ self.device = device
31
+
32
+ def reshape(self, new_shape: tuple[int, ...]) -> '_ArrayPlaceholder':
33
+ return _ArrayPlaceholder(new_shape, self.device)
34
+
35
+ def to_ndarray(
36
+ self, mode: '_modes.Mode', dtype: numpy.dtype) -> ndarray:
37
+ with self.device:
38
+ if mode is _modes.REPLICA:
39
+ data = _creation_basic.empty(self.shape, dtype)
40
+ else:
41
+ value = mode.identity_of(dtype)
42
+ data = _creation_basic.full(self.shape, value, dtype)
43
+
44
+ # We avoid 0D array because we expect data[idx] to return a view
45
+ return _manipulation_dims.atleast_1d(data)
46
+
47
+
48
+ class _Chunk:
49
+ array: Union[ndarray, _ArrayPlaceholder]
50
+ ready: Event
51
+ index: tuple[slice, ...]
52
+ updates: list[_data_transfer._PartialUpdate]
53
+ prevent_gc: Any = None # TODO: Release it to avoid OOM
54
+
55
+ # Rule: whenever data is DataPlaceholder, ready is empty
56
+
57
+ def __init__(
58
+ self, data: Union[ndarray, _ArrayPlaceholder], ready: Event,
59
+ index: tuple[slice, ...],
60
+ updates: Optional[list[_data_transfer._PartialUpdate]] = None,
61
+ prevent_gc: Any = None
62
+ ) -> None:
63
+ self.array = data
64
+ self.ready = ready
65
+ self.index = index
66
+ self.updates = updates if updates is not None else []
67
+ self.prevent_gc = prevent_gc
68
+
69
+ @classmethod
70
+ def create_placeholder(
71
+ cls, shape: tuple[int, ...], device: Union[int, Device],
72
+ index: tuple[slice, ...],
73
+ updates: Optional[list[_data_transfer._PartialUpdate]] = None,
74
+ ) -> '_Chunk':
75
+ if isinstance(device, int):
76
+ device = Device(device)
77
+
78
+ data = _ArrayPlaceholder(shape, device)
79
+ with device:
80
+ ready = Event()
81
+ if updates is None:
82
+ updates = []
83
+
84
+ return _Chunk(data, ready, index, updates)
85
+
86
+ @contextlib.contextmanager
87
+ def on_ready(self) -> Iterator[Stream]:
88
+ with self.array.device:
89
+ stream = get_current_stream()
90
+ stream.wait_event(self.ready)
91
+ yield stream
92
+
93
+ def add_update(
94
+ self, update: _data_transfer._AsyncData, idx: tuple[slice, ...],
95
+ ) -> None:
96
+ self.updates.append((update, idx))
97
+
98
+ def copy(self) -> '_Chunk':
99
+ # TODO: Calling flush here would reduce the amount of future copying
100
+ if isinstance(self.array, _ArrayPlaceholder):
101
+ data = self.array
102
+ ready = self.ready
103
+ else:
104
+ with self.on_ready() as stream:
105
+ data = self.array.copy()
106
+ ready = stream.record()
107
+
108
+ return _Chunk(data, ready, self.index, list(self.updates),
109
+ prevent_gc=self.prevent_gc)
110
+
111
+ def flush(self, mode: '_modes.Mode') -> None:
112
+ """Apply all updates in-place."""
113
+ if len(self.updates) == 0:
114
+ return
115
+
116
+ if isinstance(self.array, _ArrayPlaceholder):
117
+ dtype = self.updates[0][0].array.dtype
118
+ self.array = self.array.to_ndarray(mode, dtype)
119
+
120
+ with self.on_ready() as stream:
121
+ for update_data, idx in self.updates:
122
+ stream.wait_event(update_data.ready)
123
+ if mode is _modes.REPLICA:
124
+ self.array[idx] = update_data.array
125
+ else:
126
+ self.array[idx] = mode.func(
127
+ self.array[idx], update_data.array)
128
+
129
+ stream.record(self.ready)
130
+ self.prevent_gc = (self.prevent_gc, self.updates)
131
+ self.updates = []
132
+
133
+ def apply_to(
134
+ self, target: '_Chunk', mode: '_modes.Mode',
135
+ shape: tuple[int, ...],
136
+ comms: dict[int, _data_transfer._Communicator],
137
+ streams: dict[int, Stream],
138
+ ) -> None:
139
+ # Overwrite target with mode.func(self, target) on their overlaps
140
+ # This is just appending part of self to target.updates in the mode
141
+ src_chunk = self
142
+ dst_chunk = target
143
+
144
+ assert len(src_chunk.updates) == 0
145
+ assert isinstance(src_chunk.array, ndarray)
146
+
147
+ src_dev = src_chunk.array.device.id
148
+ dst_dev = dst_chunk.array.device.id
149
+ src_idx = src_chunk.index
150
+ dst_idx = dst_chunk.index
151
+
152
+ intersection = _index_arith._index_intersection(
153
+ src_idx, dst_idx, shape)
154
+ if intersection is None:
155
+ return
156
+
157
+ src_new_idx = _index_arith._index_for_subindex(
158
+ src_idx, intersection, shape)
159
+ dst_new_idx = _index_arith._index_for_subindex(
160
+ dst_idx, intersection, shape)
161
+
162
+ data_to_transfer = _data_transfer._AsyncData(
163
+ src_chunk.array[src_new_idx], src_chunk.ready,
164
+ src_chunk.prevent_gc)
165
+
166
+ if mode is not _modes.REPLICA and not mode.idempotent:
167
+ data_to_transfer = data_to_transfer.copy()
168
+
169
+ update = _data_transfer._transfer(
170
+ comms[src_dev], streams[src_dev], data_to_transfer,
171
+ comms[dst_dev], streams[dst_dev], dst_dev)
172
+ dst_chunk.add_update(update, dst_new_idx)
173
+
174
+ if mode is not _modes.REPLICA and not mode.idempotent:
175
+ dtype = src_chunk.array.dtype
176
+ with data_to_transfer.on_ready() as stream:
177
+ # Now src data has been copied, so we can write on src_chunk
178
+ src_chunk.array[src_new_idx] = mode.identity_of(dtype)
179
+ stream.record(src_chunk.ready)
180
+
181
+ def set_identity_on_intersection(
182
+ self, idx: tuple[slice, ...], shape: tuple[int, ...], identity,
183
+ ) -> None:
184
+ assert isinstance(self.array, ndarray)
185
+
186
+ intersection = _index_arith._index_intersection(self.index, idx, shape)
187
+ if intersection is None:
188
+ return
189
+ self_new_idx = _index_arith._index_for_subindex(
190
+ self.index, intersection, shape)
191
+ with self.on_ready() as stream:
192
+ self.array[self_new_idx] = identity
193
+ stream.record(self.ready)
194
+
195
+ def set_identity_on_overwritten_entries(self, identity) -> None:
196
+ if isinstance(self.array, _ArrayPlaceholder):
197
+ return
198
+
199
+ with self.on_ready() as stream:
200
+ for _, idx in self.updates:
201
+ self.array[idx] = identity
202
+ stream.record(self.ready)
203
+
204
+
205
+ def _all_reduce_intersections(
206
+ op_mode: '_modes._OpMode', shape: tuple[int, ...],
207
+ chunk_map: dict[int, list[_Chunk]],
208
+ comms: dict[int, _Communicator], streams: dict[int, Stream],
209
+ ) -> None:
210
+ chunks_list = list(chain.from_iterable(chunk_map.values()))
211
+
212
+ for i in range(len(chunks_list)):
213
+ src_chunk = chunks_list[i]
214
+ src_chunk.flush(op_mode)
215
+
216
+ for j in range(i + 1, len(chunks_list)):
217
+ dst_chunk = chunks_list[j]
218
+
219
+ src_chunk.apply_to(dst_chunk, op_mode, shape, comms, streams)
220
+
221
+ for j in range(len(chunks_list) - 1, -1, -1):
222
+ src_chunk = chunks_list[j]
223
+ src_chunk.flush(_modes.REPLICA)
224
+
225
+ for i in range(j):
226
+ dst_chunk = chunks_list[i]
227
+ src_chunk.apply_to(
228
+ dst_chunk, _modes.REPLICA, shape, comms, streams)
vllm/lib/python3.10/site-packages/cupyx/distributed/array/_data_transfer.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import dataclasses
3
+ from typing import Any, Iterable, Iterator
4
+
5
+ from cupy._core.core import ndarray
6
+ import cupy._creation.from_data as _creation_from_data
7
+ import cupy._creation.basic as _creation_basic
8
+ from cupy.cuda.device import Device
9
+ from cupy.cuda.stream import Event
10
+ from cupy.cuda.stream import Stream
11
+ from cupy.cuda.stream import get_current_stream
12
+
13
+ from cupy.cuda import nccl
14
+ from cupyx.distributed._nccl_comm import _get_nccl_dtype_and_count
15
+
16
+ if nccl.available:
17
+ from cupy.cuda.nccl import NcclCommunicator as _Communicator
18
+ else:
19
+ class _MockCommunicator:
20
+ pass
21
+
22
+ _Communicator = _MockCommunicator
23
+
24
+
25
+ @dataclasses.dataclass
26
+ class _AsyncData:
27
+ array: ndarray
28
+ ready: Event
29
+ prevent_gc: Any = None # TODO: Release it to avoid OOM
30
+
31
+ def copy(self) -> '_AsyncData':
32
+ with self.on_ready() as stream:
33
+ array = self.array.copy()
34
+ stream.record(self.ready)
35
+
36
+ return _AsyncData(array, stream.record(), self.prevent_gc)
37
+
38
+ @contextlib.contextmanager
39
+ def on_ready(self) -> Iterator[Stream]:
40
+ with self.array.device:
41
+ stream = get_current_stream()
42
+ stream.wait_event(self.ready)
43
+ yield stream
44
+
45
+
46
+ # Overwrite in replica mode, apply in op mode
47
+ _PartialUpdate = tuple[_AsyncData, tuple[slice, ...]]
48
+
49
+
50
+ if nccl.available:
51
+ def _create_communicators(
52
+ devices: Iterable[int],
53
+ ) -> dict[int, _Communicator]:
54
+ comms_list = _Communicator.initAll(list(devices))
55
+ return {comm.device_id(): comm for comm in comms_list}
56
+
57
+ def _transfer(
58
+ src_comm: _Communicator, src_stream: Stream, src_data: _AsyncData,
59
+ dst_comm: _Communicator, dst_stream: Stream, dst_dev: int,
60
+ ) -> _AsyncData:
61
+ src_dev = src_data.array.device.id
62
+ if src_dev == dst_dev:
63
+ return _AsyncData(src_data.array, src_data.ready)
64
+
65
+ prev_src_stream = get_current_stream(src_dev)
66
+ prev_dst_stream = get_current_stream(dst_dev)
67
+ try:
68
+ with Device(src_dev):
69
+ src_stream.use()
70
+ src_stream.wait_event(src_data.ready)
71
+ src_array = _creation_from_data.ascontiguousarray(
72
+ src_data.array)
73
+
74
+ with Device(dst_dev):
75
+ dst_stream.use()
76
+ dst_buf = _creation_basic.empty(
77
+ src_array.shape, src_array.dtype)
78
+
79
+ dtype, count = _get_nccl_dtype_and_count(src_array)
80
+ nccl.groupStart()
81
+
82
+ with Device(src_dev):
83
+ src_comm.send(src_array.data.ptr, count, dtype,
84
+ dst_comm.rank_id(), src_stream.ptr)
85
+
86
+ with Device(dst_dev):
87
+ dst_comm.recv(dst_buf.data.ptr, count, dtype,
88
+ src_comm.rank_id(), dst_stream.ptr)
89
+
90
+ nccl.groupEnd()
91
+ return _AsyncData(dst_buf, dst_stream.record(),
92
+ prevent_gc=src_data)
93
+ finally:
94
+ with Device(src_dev):
95
+ prev_src_stream.use()
96
+ with Device(dst_dev):
97
+ prev_dst_stream.use()
98
+ else:
99
+ def _create_communicators(
100
+ devices: Iterable[int],
101
+ ) -> dict[int, _Communicator]:
102
+ return {dev: _Communicator() for dev in devices}
103
+
104
+ def _transfer(
105
+ src_comm: _Communicator, src_stream: Stream, src_data: _AsyncData,
106
+ dst_comm: _Communicator, dst_stream: Stream, dst_dev: int,
107
+ ) -> _AsyncData:
108
+ src_dev = src_data.array.device.id
109
+ if src_dev == dst_dev:
110
+ return _AsyncData(src_data.array, src_data.ready)
111
+
112
+ with Device(dst_dev):
113
+ prev_stream = get_current_stream()
114
+ try:
115
+ dst_stream.use()
116
+ dst_stream.wait_event(src_data.ready)
117
+
118
+ dst_array = src_data.array.copy()
119
+ return _AsyncData(
120
+ dst_array, dst_stream.record(), prevent_gc=src_data.array)
121
+ finally:
122
+ prev_stream.use()
vllm/lib/python3.10/site-packages/cupyx/distributed/array/_reduction.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import typing
2
+ from typing import Any
3
+
4
+ from numpy.typing import DTypeLike
5
+
6
+ import cupy._manipulation.dims as _manipulation_dims
7
+ from cupyx.distributed.array import _array
8
+ from cupyx.distributed.array import _chunk
9
+ from cupyx.distributed.array import _data_transfer
10
+ from cupyx.distributed.array import _modes
11
+
12
+
13
+ def _execute(
14
+ arr: '_array.DistributedArray', kernel, axis: int, dtype: DTypeLike,
15
+ ) -> Any:
16
+ overwrites = False
17
+ mode_overrides = {
18
+ 'cupy_max': _modes.MAX,
19
+ 'cupy_min': _modes.MIN,
20
+ 'cupy_sum': _modes.SUM,
21
+ 'cupy_prod': _modes.PROD,
22
+ }
23
+ if kernel.name not in mode_overrides:
24
+
25
+ raise RuntimeError(f'Unsupported kernel: {kernel.name}')
26
+ mode = mode_overrides[kernel.name]
27
+ if mode in (_modes.MAX, _modes.MIN):
28
+ if arr._mode is not mode:
29
+ arr = arr._to_op_mode(_modes.REPLICA)
30
+ overwrites = True
31
+ else:
32
+ arr = arr._to_op_mode(mode)
33
+
34
+ chunks_map = arr._chunks_map
35
+
36
+ if overwrites:
37
+ mode = typing.cast(_modes._OpMode, mode)
38
+ identity = mode.identity_of(arr.dtype)
39
+ for chunks in chunks_map.values():
40
+ for i in range(len(chunks)):
41
+ if len(chunks[i].updates) == 0:
42
+ continue
43
+ chunks[i] = chunks[i].copy()
44
+ chunks[i].set_identity_on_overwritten_entries(identity)
45
+
46
+ shape = arr.shape[:axis] + arr.shape[axis+1:]
47
+ out_dtype = None
48
+ out_chunks_map: dict[int, list[_chunk._Chunk]] = {}
49
+
50
+ for dev, chunks in chunks_map.items():
51
+ out_chunks_map[dev] = []
52
+ for chunk in chunks:
53
+ with chunk.on_ready() as stream:
54
+ out_index = chunk.index[:axis] + chunk.index[axis+1:]
55
+
56
+ if isinstance(chunk.array, _chunk._ArrayPlaceholder):
57
+ old_shape = chunk.array.shape
58
+ out_shape = old_shape[:axis] + old_shape[axis+1:]
59
+ out_chunk = _chunk._Chunk.create_placeholder(
60
+ out_shape, chunk.array.device, out_index)
61
+ else:
62
+ # We avoid 0D array because
63
+ # we expect data[idx] to return a view
64
+ out_array = _manipulation_dims.atleast_1d(
65
+ kernel(chunk.array, axis=axis, dtype=dtype))
66
+
67
+ out_dtype = out_array.dtype
68
+ out_chunk = _chunk._Chunk(
69
+ out_array, stream.record(), out_index,
70
+ prevent_gc=chunk.prevent_gc)
71
+
72
+ out_chunks_map[dev].append(out_chunk)
73
+
74
+ if len(chunk.updates) == 0:
75
+ continue
76
+
77
+ for update, update_index in chunk.updates:
78
+ stream.wait_event(update.ready)
79
+ out_update_array = _manipulation_dims.atleast_1d(
80
+ kernel(update.array, axis=axis, dtype=dtype))
81
+ out_dtype = out_update_array.dtype
82
+
83
+ out_update = _data_transfer._AsyncData(
84
+ out_update_array, stream.record(),
85
+ prevent_gc=update.prevent_gc)
86
+ out_index = update_index[:axis] + update_index[axis+1:]
87
+ out_chunk.add_update(out_update, out_index)
88
+
89
+ return _array.DistributedArray(
90
+ shape, out_dtype, out_chunks_map, mode, arr._comms)
vllm/lib/python3.10/site-packages/cupyx/jit/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from cupyx.jit._interface import rawkernel # NOQA
2
+
3
+ from cupyx.jit._interface import threadIdx # NOQA
4
+ from cupyx.jit._interface import blockDim # NOQA
5
+ from cupyx.jit._interface import blockIdx # NOQA
6
+ from cupyx.jit._interface import gridDim # NOQA
7
+ from cupyx.jit._interface import warpsize # NOQA
8
+
9
+ from cupyx.jit._builtin_funcs import range_ as range # NOQA
10
+ from cupyx.jit._builtin_funcs import syncthreads # NOQA
11
+ from cupyx.jit._builtin_funcs import syncwarp # NOQA
12
+ from cupyx.jit._builtin_funcs import shared_memory # NOQA
13
+ from cupyx.jit._builtin_funcs import atomic_add # NOQA
14
+ from cupyx.jit._builtin_funcs import atomic_sub # NOQA
15
+ from cupyx.jit._builtin_funcs import atomic_exch # NOQA
16
+ from cupyx.jit._builtin_funcs import atomic_min # NOQA
17
+ from cupyx.jit._builtin_funcs import atomic_max # NOQA
18
+ from cupyx.jit._builtin_funcs import atomic_inc # NOQA
19
+ from cupyx.jit._builtin_funcs import atomic_dec # NOQA
20
+ from cupyx.jit._builtin_funcs import atomic_cas # NOQA
21
+ from cupyx.jit._builtin_funcs import atomic_and # NOQA
22
+ from cupyx.jit._builtin_funcs import atomic_or # NOQA
23
+ from cupyx.jit._builtin_funcs import atomic_xor # NOQA
24
+ from cupyx.jit._builtin_funcs import grid # NOQA
25
+ from cupyx.jit._builtin_funcs import gridsize # NOQA
26
+ from cupyx.jit._builtin_funcs import laneid # NOQA
27
+ from cupyx.jit._builtin_funcs import shfl_sync # NOQA
28
+ from cupyx.jit._builtin_funcs import shfl_up_sync # NOQA
29
+ from cupyx.jit._builtin_funcs import shfl_down_sync # NOQA
30
+ from cupyx.jit._builtin_funcs import shfl_xor_sync # NOQA
31
+
32
+ from cupyx.jit import cg # NOQA
33
+ from cupyx.jit import cub # NOQA
34
+ from cupyx.jit import thrust # NOQA
35
+
36
+ _n_functions_upperlimit = 100
vllm/lib/python3.10/site-packages/cupyx/jit/__pycache__/_builtin_funcs.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/jit/__pycache__/_compile.cpython-310.pyc ADDED
Binary file (29 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/jit/__pycache__/_cuda_typerules.cpython-310.pyc ADDED
Binary file (4.37 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/jit/__pycache__/_cuda_types.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
vllm/lib/python3.10/site-packages/cupyx/jit/__pycache__/_interface.cpython-310.pyc ADDED
Binary file (6.38 kB). View file