Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Long_ops.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh.h +44 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_trunc.h +44 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_tensor_quantized_tensor_cpu_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_softmax.h +49 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_meta.h +27 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cat_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cummaxmin_backward.h +30 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft_compositeimplicitautograd_dispatch.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_cpu_dispatch.h +26 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_ops.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_native.h +21 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_cuda_dispatch.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_compositeexplicitautograd_dispatch.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/nanmean.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_backward_cuda_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sign_meta.h +27 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_expm1_ops.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_logit_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_softmax_native.h +21 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_meta_dispatch.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/zero_cpu_dispatch.h +23 -0
- vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/INSTALLER +1 -0
- vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/METADATA +246 -0
- vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/RECORD +56 -0
- vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/REQUESTED +0 -0
- vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/licenses/LICENSE +21 -0
- vllm/lib/python3.10/site-packages/click/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/tokenizers-0.21.0.dist-info/INSTALLER +1 -0
- vllm/lib/python3.10/site-packages/tokenizers-0.21.0.dist-info/RECORD +46 -0
- vllm/lib/python3.10/site-packages/torchvision/_C.so +3 -0
- vllm/lib/python3.10/site-packages/torchvision/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torchvision/__pycache__/_internally_replaced_utils.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torchvision/__pycache__/_meta_registrations.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torchvision/__pycache__/_utils.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torchvision/__pycache__/extension.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torchvision/__pycache__/version.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/_api.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/_meta.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/_utils.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/convnext.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/efficientnet.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/mobilenetv2.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -1491,3 +1491,4 @@ parrot/lib/python3.10/site-packages/scipy/stats/_biasedurn.cpython-310-x86_64-li
|
|
| 1491 |
parrot/lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1492 |
vllm/lib/python3.10/site-packages/ray/data/__pycache__/dataset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1493 |
vllm/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 1491 |
parrot/lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 1492 |
vllm/lib/python3.10/site-packages/ray/data/__pycache__/dataset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1493 |
vllm/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text
|
| 1494 |
+
vllm/lib/python3.10/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs -text
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Long_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _cast_Long {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, bool);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cast_Long")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cast_Long(Tensor self, bool non_blocking=False) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, bool non_blocking);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh.h
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_foreach_sinh_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_foreach_sinh(Tensor[] self) -> Tensor[]
|
| 26 |
+
inline ::std::vector<at::Tensor> _foreach_sinh(at::TensorList self) {
|
| 27 |
+
return at::_ops::_foreach_sinh::call(self);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_foreach_sinh_(Tensor(a!)[] self) -> ()
|
| 31 |
+
inline void _foreach_sinh_(at::TensorList self) {
|
| 32 |
+
return at::_ops::_foreach_sinh_::call(self);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
|
| 36 |
+
inline void _foreach_sinh_out(at::TensorList out, at::TensorList self) {
|
| 37 |
+
return at::_ops::_foreach_sinh_out::call(self, out);
|
| 38 |
+
}
|
| 39 |
+
// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
|
| 40 |
+
inline void _foreach_sinh_outf(at::TensorList self, at::TensorList out) {
|
| 41 |
+
return at::_ops::_foreach_sinh_out::call(self, out);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_trunc.h
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_foreach_trunc_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_foreach_trunc(Tensor[] self) -> Tensor[]
|
| 26 |
+
inline ::std::vector<at::Tensor> _foreach_trunc(at::TensorList self) {
|
| 27 |
+
return at::_ops::_foreach_trunc::call(self);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_foreach_trunc_(Tensor(a!)[] self) -> ()
|
| 31 |
+
inline void _foreach_trunc_(at::TensorList self) {
|
| 32 |
+
return at::_ops::_foreach_trunc_::call(self);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
// aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
|
| 36 |
+
inline void _foreach_trunc_out(at::TensorList out, at::TensorList self) {
|
| 37 |
+
return at::_ops::_foreach_trunc_out::call(self, out);
|
| 38 |
+
}
|
| 39 |
+
// aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()
|
| 40 |
+
inline void _foreach_trunc_outf(at::TensorList self, at::TensorList out) {
|
| 41 |
+
return at::_ops::_foreach_trunc_out::call(self, out);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_tensor_quantized_tensor_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _make_per_tensor_quantized_tensor(const at::Tensor & self, double scale, int64_t zero_point);
|
| 21 |
+
|
| 22 |
+
} // namespace cpu
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_softmax.h
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_sparse_softmax_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
|
| 26 |
+
inline at::Tensor _sparse_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype=c10::nullopt) {
|
| 27 |
+
return at::_ops::_sparse_softmax_int::call(self, dim, dtype);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
|
| 31 |
+
inline at::Tensor _sparse_softmax(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype=c10::nullopt) {
|
| 32 |
+
return at::_ops::_sparse_softmax_Dimname::call(self, dim, dtype);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
// aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor
|
| 36 |
+
inline at::Tensor _sparse_softmax(const at::Tensor & self, int64_t dim, bool half_to_float) {
|
| 37 |
+
return at::_ops::_sparse_softmax::call(self, dim, half_to_float);
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
// aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
|
| 41 |
+
inline at::Tensor & _sparse_softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) {
|
| 42 |
+
return at::_ops::_sparse_softmax_out::call(self, dim, half_to_float, out);
|
| 43 |
+
}
|
| 44 |
+
// aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)
|
| 45 |
+
inline at::Tensor & _sparse_softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) {
|
| 46 |
+
return at::_ops::_sparse_softmax_out::call(self, dim, half_to_float, out);
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_meta.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/TensorIterator.h>
|
| 13 |
+
#include <ATen/TensorMeta.h>
|
| 14 |
+
#include <tuple>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
struct TORCH_API structured_bitwise_or_Tensor : public TensorIteratorBase {
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
void meta(const at::Tensor & self, const at::Tensor & other);
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} // namespace native
|
| 27 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cat_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautogradnonfunctional {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor cat(const at::ITensorListRef & tensors, int64_t dim=0);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeexplicitautogradnonfunctional
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cummaxmin_backward.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/cummaxmin_backward_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor
|
| 26 |
+
inline at::Tensor cummaxmin_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) {
|
| 27 |
+
return at::_ops::cummaxmin_backward::call(grad, input, indices, dim);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor fft_hfft(const at::Tensor & self, c10::optional<int64_t> n=c10::nullopt, int64_t dim=-1, c10::optional<c10::string_view> norm=c10::nullopt);
|
| 21 |
+
TORCH_API at::Tensor fft_hfft_symint(const at::Tensor & self, c10::optional<c10::SymInt> n=c10::nullopt, int64_t dim=-1, c10::optional<c10::string_view> norm=c10::nullopt);
|
| 22 |
+
TORCH_API at::Tensor & fft_hfft_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> n=c10::nullopt, int64_t dim=-1, c10::optional<c10::string_view> norm=c10::nullopt);
|
| 23 |
+
TORCH_API at::Tensor & fft_hfft_outf(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out);
|
| 24 |
+
TORCH_API at::Tensor & fft_hfft_symint_out(at::Tensor & out, const at::Tensor & self, c10::optional<c10::SymInt> n=c10::nullopt, int64_t dim=-1, c10::optional<c10::string_view> norm=c10::nullopt);
|
| 25 |
+
TORCH_API at::Tensor & fft_hfft_symint_outf(const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out);
|
| 26 |
+
|
| 27 |
+
} // namespace compositeimplicitautograd
|
| 28 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor fft_irfft_symint(const at::Tensor & self, c10::optional<c10::SymInt> n=c10::nullopt, int64_t dim=-1, c10::optional<c10::string_view> norm=c10::nullopt);
|
| 20 |
+
TORCH_API at::Tensor & fft_irfft_symint_out(const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor fft_rfft_symint(const at::Tensor & self, c10::optional<c10::SymInt> n=c10::nullopt, int64_t dim=-1, c10::optional<c10::string_view> norm=c10::nullopt);
|
| 20 |
+
TORCH_API at::Tensor & fft_rfft_symint_out(const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor floor_divide(const at::Tensor & self, const at::Tensor & other);
|
| 21 |
+
TORCH_API at::Tensor & floor_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
|
| 22 |
+
TORCH_API at::Tensor & floor_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & floor_divide_(at::Tensor & self, const at::Tensor & other);
|
| 24 |
+
|
| 25 |
+
} // namespace cpu
|
| 26 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/indices_copy_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API indices_copy {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::indices_copy")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "indices_copy(Tensor self) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API indices_copy_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::indices_copy")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/isclose_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor isclose(const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/isreal_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor isreal(const at::Tensor & self);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/masked_scatter_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor masked_scatter(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source);
|
| 21 |
+
TORCH_API at::Tensor & masked_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source);
|
| 22 |
+
TORCH_API at::Tensor & masked_scatter_outf(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace compositeexplicitautograd
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_backward_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor max_pool3d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices);
|
| 21 |
+
TORCH_API at::Tensor & max_pool3d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices);
|
| 22 |
+
TORCH_API at::Tensor & max_pool3d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input);
|
| 23 |
+
|
| 24 |
+
} // namespace cuda
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_weights_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> mkldnn_linear_backward_weights_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> mkldnn_linear_backward_weights_outf(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautograd
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/nanmean.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/nanmean_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
|
| 26 |
+
inline at::Tensor nanmean(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
|
| 27 |
+
return at::_ops::nanmean::call(self, dim, keepdim, dtype);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & nanmean_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt) {
|
| 32 |
+
return at::_ops::nanmean_out::call(self, dim, keepdim, dtype, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & nanmean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out) {
|
| 36 |
+
return at::_ops::nanmean_out::call(self, dim, keepdim, dtype, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/native_batch_norm_backward_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask);
|
| 21 |
+
|
| 22 |
+
} // namespace cuda
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sign_meta.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/TensorIterator.h>
|
| 13 |
+
#include <ATen/TensorMeta.h>
|
| 14 |
+
#include <tuple>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
struct TORCH_API structured_sign : public TensorIteratorBase {
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
void meta(const at::Tensor & self);
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} // namespace native
|
| 27 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/special_bessel_y1_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::special_bessel_y1(Tensor self) -> Tensor
|
| 26 |
+
inline at::Tensor special_bessel_y1(const at::Tensor & self) {
|
| 27 |
+
return at::_ops::special_bessel_y1::call(self);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & special_bessel_y1_out(at::Tensor & out, const at::Tensor & self) {
|
| 32 |
+
return at::_ops::special_bessel_y1_out::call(self, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & special_bessel_y1_outf(const at::Tensor & self, at::Tensor & out) {
|
| 36 |
+
return at::_ops::special_bessel_y1_out::call(self, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_expm1_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API special_expm1 {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_expm1")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_expm1(Tensor self) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API special_expm1_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_expm1")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_logit_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor special_logit(const at::Tensor & self, c10::optional<double> eps=c10::nullopt);
|
| 20 |
+
TORCH_API at::Tensor & special_logit_out(const at::Tensor & self, c10::optional<double> eps, at::Tensor & out);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_softmax_native.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor special_softmax(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype=c10::nullopt);
|
| 20 |
+
} // namespace native
|
| 21 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward_meta_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
| 21 |
+
TORCH_API at::Tensor upsample_nearest3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
| 22 |
+
TORCH_API at::Tensor & upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
| 23 |
+
TORCH_API at::Tensor & upsample_nearest3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input);
|
| 24 |
+
TORCH_API at::Tensor & upsample_nearest3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d=c10::nullopt, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
|
| 25 |
+
TORCH_API at::Tensor & upsample_nearest3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input);
|
| 26 |
+
|
| 27 |
+
} // namespace meta
|
| 28 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/zero_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & zero_(at::Tensor & self);
|
| 21 |
+
|
| 22 |
+
} // namespace cpu
|
| 23 |
+
} // namespace at
|
vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.4
|
| 2 |
+
Name: attrs
|
| 3 |
+
Version: 24.3.0
|
| 4 |
+
Summary: Classes Without Boilerplate
|
| 5 |
+
Project-URL: Documentation, https://www.attrs.org/
|
| 6 |
+
Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html
|
| 7 |
+
Project-URL: GitHub, https://github.com/python-attrs/attrs
|
| 8 |
+
Project-URL: Funding, https://github.com/sponsors/hynek
|
| 9 |
+
Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi
|
| 10 |
+
Author-email: Hynek Schlawack <hs@ox.cx>
|
| 11 |
+
License-Expression: MIT
|
| 12 |
+
License-File: LICENSE
|
| 13 |
+
Keywords: attribute,boilerplate,class
|
| 14 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 15 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 16 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 17 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 18 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.13
|
| 21 |
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
| 22 |
+
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
| 23 |
+
Classifier: Typing :: Typed
|
| 24 |
+
Requires-Python: >=3.8
|
| 25 |
+
Provides-Extra: benchmark
|
| 26 |
+
Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'benchmark'
|
| 27 |
+
Requires-Dist: hypothesis; extra == 'benchmark'
|
| 28 |
+
Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'benchmark'
|
| 29 |
+
Requires-Dist: pympler; extra == 'benchmark'
|
| 30 |
+
Requires-Dist: pytest-codspeed; extra == 'benchmark'
|
| 31 |
+
Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'benchmark'
|
| 32 |
+
Requires-Dist: pytest-xdist[psutil]; extra == 'benchmark'
|
| 33 |
+
Requires-Dist: pytest>=4.3.0; extra == 'benchmark'
|
| 34 |
+
Provides-Extra: cov
|
| 35 |
+
Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'cov'
|
| 36 |
+
Requires-Dist: coverage[toml]>=5.3; extra == 'cov'
|
| 37 |
+
Requires-Dist: hypothesis; extra == 'cov'
|
| 38 |
+
Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'cov'
|
| 39 |
+
Requires-Dist: pympler; extra == 'cov'
|
| 40 |
+
Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'cov'
|
| 41 |
+
Requires-Dist: pytest-xdist[psutil]; extra == 'cov'
|
| 42 |
+
Requires-Dist: pytest>=4.3.0; extra == 'cov'
|
| 43 |
+
Provides-Extra: dev
|
| 44 |
+
Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'dev'
|
| 45 |
+
Requires-Dist: hypothesis; extra == 'dev'
|
| 46 |
+
Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'dev'
|
| 47 |
+
Requires-Dist: pre-commit-uv; extra == 'dev'
|
| 48 |
+
Requires-Dist: pympler; extra == 'dev'
|
| 49 |
+
Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'dev'
|
| 50 |
+
Requires-Dist: pytest-xdist[psutil]; extra == 'dev'
|
| 51 |
+
Requires-Dist: pytest>=4.3.0; extra == 'dev'
|
| 52 |
+
Provides-Extra: docs
|
| 53 |
+
Requires-Dist: cogapp; extra == 'docs'
|
| 54 |
+
Requires-Dist: furo; extra == 'docs'
|
| 55 |
+
Requires-Dist: myst-parser; extra == 'docs'
|
| 56 |
+
Requires-Dist: sphinx; extra == 'docs'
|
| 57 |
+
Requires-Dist: sphinx-notfound-page; extra == 'docs'
|
| 58 |
+
Requires-Dist: sphinxcontrib-towncrier; extra == 'docs'
|
| 59 |
+
Requires-Dist: towncrier<24.7; extra == 'docs'
|
| 60 |
+
Provides-Extra: tests
|
| 61 |
+
Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'tests'
|
| 62 |
+
Requires-Dist: hypothesis; extra == 'tests'
|
| 63 |
+
Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests'
|
| 64 |
+
Requires-Dist: pympler; extra == 'tests'
|
| 65 |
+
Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests'
|
| 66 |
+
Requires-Dist: pytest-xdist[psutil]; extra == 'tests'
|
| 67 |
+
Requires-Dist: pytest>=4.3.0; extra == 'tests'
|
| 68 |
+
Provides-Extra: tests-mypy
|
| 69 |
+
Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests-mypy'
|
| 70 |
+
Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests-mypy'
|
| 71 |
+
Description-Content-Type: text/markdown
|
| 72 |
+
|
| 73 |
+
<p align="center">
|
| 74 |
+
<a href="https://www.attrs.org/">
|
| 75 |
+
<img src="https://raw.githubusercontent.com/python-attrs/attrs/main/docs/_static/attrs_logo.svg" width="35%" alt="attrs" />
|
| 76 |
+
</a>
|
| 77 |
+
</p>
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
*attrs* is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka [dunder methods](https://www.attrs.org/en/latest/glossary.html#term-dunder-methods)).
|
| 81 |
+
[Trusted by NASA](https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-github-profile/customizing-your-profile/personalizing-your-profile#list-of-qualifying-repositories-for-mars-2020-helicopter-contributor-achievement) for Mars missions since 2020!
|
| 82 |
+
|
| 83 |
+
Its main goal is to help you to write **concise** and **correct** software without slowing down your code.
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
## Sponsors
|
| 87 |
+
|
| 88 |
+
*attrs* would not be possible without our [amazing sponsors](https://github.com/sponsors/hynek).
|
| 89 |
+
Especially those generously supporting us at the *The Organization* tier and higher:
|
| 90 |
+
|
| 91 |
+
<!-- sponsor-break-begin -->
|
| 92 |
+
|
| 93 |
+
<p align="center">
|
| 94 |
+
|
| 95 |
+
<!-- [[[cog
|
| 96 |
+
import pathlib, tomllib
|
| 97 |
+
|
| 98 |
+
for sponsor in tomllib.loads(pathlib.Path("pyproject.toml").read_text())["tool"]["sponcon"]["sponsors"]:
|
| 99 |
+
print(f'<a href="{sponsor["url"]}"><img title="{sponsor["title"]}" src="https://www.attrs.org/en/24.3.0/_static/sponsors/{sponsor["img"]}" width="190" /></a>')
|
| 100 |
+
]]] -->
|
| 101 |
+
<a href="https://www.variomedia.de/"><img title="Variomedia AG" src="https://www.attrs.org/en/24.3.0/_static/sponsors/Variomedia.svg" width="190" /></a>
|
| 102 |
+
<a href="https://tidelift.com/?utm_source=lifter&utm_medium=referral&utm_campaign=hynek"><img title="Tidelift" src="https://www.attrs.org/en/24.3.0/_static/sponsors/Tidelift.svg" width="190" /></a>
|
| 103 |
+
<a href="https://klaviyo.com/"><img title="Klaviyo" src="https://www.attrs.org/en/24.3.0/_static/sponsors/Klaviyo.svg" width="190" /></a>
|
| 104 |
+
<a href="https://www.emsys-renewables.com/"><img title="emsys renewables" src="https://www.attrs.org/en/24.3.0/_static/sponsors/emsys-renewables.svg" width="190" /></a>
|
| 105 |
+
<a href="https://filepreviews.io/"><img title="FilePreviews" src="https://www.attrs.org/en/24.3.0/_static/sponsors/FilePreviews.svg" width="190" /></a>
|
| 106 |
+
<a href="https://polar.sh/"><img title="Polar" src="https://www.attrs.org/en/24.3.0/_static/sponsors/Polar.svg" width="190" /></a>
|
| 107 |
+
<!-- [[[end]]] -->
|
| 108 |
+
|
| 109 |
+
</p>
|
| 110 |
+
|
| 111 |
+
<!-- sponsor-break-end -->
|
| 112 |
+
|
| 113 |
+
<p align="center">
|
| 114 |
+
<strong>Please consider <a href="https://github.com/sponsors/hynek">joining them</a> to help make <em>attrs</em>’s maintenance more sustainable!</strong>
|
| 115 |
+
</p>
|
| 116 |
+
|
| 117 |
+
<!-- teaser-end -->
|
| 118 |
+
|
| 119 |
+
## Example
|
| 120 |
+
|
| 121 |
+
*attrs* gives you a class decorator and a way to declaratively define the attributes on that class:
|
| 122 |
+
|
| 123 |
+
<!-- code-begin -->
|
| 124 |
+
|
| 125 |
+
```pycon
|
| 126 |
+
>>> from attrs import asdict, define, make_class, Factory
|
| 127 |
+
|
| 128 |
+
>>> @define
|
| 129 |
+
... class SomeClass:
|
| 130 |
+
... a_number: int = 42
|
| 131 |
+
... list_of_numbers: list[int] = Factory(list)
|
| 132 |
+
...
|
| 133 |
+
... def hard_math(self, another_number):
|
| 134 |
+
... return self.a_number + sum(self.list_of_numbers) * another_number
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
>>> sc = SomeClass(1, [1, 2, 3])
|
| 138 |
+
>>> sc
|
| 139 |
+
SomeClass(a_number=1, list_of_numbers=[1, 2, 3])
|
| 140 |
+
|
| 141 |
+
>>> sc.hard_math(3)
|
| 142 |
+
19
|
| 143 |
+
>>> sc == SomeClass(1, [1, 2, 3])
|
| 144 |
+
True
|
| 145 |
+
>>> sc != SomeClass(2, [3, 2, 1])
|
| 146 |
+
True
|
| 147 |
+
|
| 148 |
+
>>> asdict(sc)
|
| 149 |
+
{'a_number': 1, 'list_of_numbers': [1, 2, 3]}
|
| 150 |
+
|
| 151 |
+
>>> SomeClass()
|
| 152 |
+
SomeClass(a_number=42, list_of_numbers=[])
|
| 153 |
+
|
| 154 |
+
>>> C = make_class("C", ["a", "b"])
|
| 155 |
+
>>> C("foo", "bar")
|
| 156 |
+
C(a='foo', b='bar')
|
| 157 |
+
```
|
| 158 |
+
|
| 159 |
+
After *declaring* your attributes, *attrs* gives you:
|
| 160 |
+
|
| 161 |
+
- a concise and explicit overview of the class's attributes,
|
| 162 |
+
- a nice human-readable `__repr__`,
|
| 163 |
+
- equality-checking methods,
|
| 164 |
+
- an initializer,
|
| 165 |
+
- and much more,
|
| 166 |
+
|
| 167 |
+
*without* writing dull boilerplate code again and again and *without* runtime performance penalties.
|
| 168 |
+
|
| 169 |
+
---
|
| 170 |
+
|
| 171 |
+
This example uses *attrs*'s modern APIs that have been introduced in version 20.1.0, and the *attrs* package import name that has been added in version 21.3.0.
|
| 172 |
+
The classic APIs (`@attr.s`, `attr.ib`, plus their serious-business aliases) and the `attr` package import name will remain **indefinitely**.
|
| 173 |
+
|
| 174 |
+
Check out [*On The Core API Names*](https://www.attrs.org/en/latest/names.html) for an in-depth explanation!
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
### Hate Type Annotations!?
|
| 178 |
+
|
| 179 |
+
No problem!
|
| 180 |
+
Types are entirely **optional** with *attrs*.
|
| 181 |
+
Simply assign `attrs.field()` to the attributes instead of annotating them with types:
|
| 182 |
+
|
| 183 |
+
```python
|
| 184 |
+
from attrs import define, field
|
| 185 |
+
|
| 186 |
+
@define
|
| 187 |
+
class SomeClass:
|
| 188 |
+
a_number = field(default=42)
|
| 189 |
+
list_of_numbers = field(factory=list)
|
| 190 |
+
```
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
## Data Classes
|
| 194 |
+
|
| 195 |
+
On the tin, *attrs* might remind you of `dataclasses` (and indeed, `dataclasses` [are a descendant](https://hynek.me/articles/import-attrs/) of *attrs*).
|
| 196 |
+
In practice it does a lot more and is more flexible.
|
| 197 |
+
For instance, it allows you to define [special handling of NumPy arrays for equality checks](https://www.attrs.org/en/stable/comparison.html#customization), allows more ways to [plug into the initialization process](https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization), has a replacement for `__init_subclass__`, and allows for stepping through the generated methods using a debugger.
|
| 198 |
+
|
| 199 |
+
For more details, please refer to our [comparison page](https://www.attrs.org/en/stable/why.html#data-classes), but generally speaking, we are more likely to commit crimes against nature to make things work that one would expect to work, but that are quite complicated in practice.
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
## Project Information
|
| 203 |
+
|
| 204 |
+
- [**Changelog**](https://www.attrs.org/en/stable/changelog.html)
|
| 205 |
+
- [**Documentation**](https://www.attrs.org/)
|
| 206 |
+
- [**PyPI**](https://pypi.org/project/attrs/)
|
| 207 |
+
- [**Source Code**](https://github.com/python-attrs/attrs)
|
| 208 |
+
- [**Contributing**](https://github.com/python-attrs/attrs/blob/main/.github/CONTRIBUTING.md)
|
| 209 |
+
- [**Third-party Extensions**](https://github.com/python-attrs/attrs/wiki/Extensions-to-attrs)
|
| 210 |
+
- **Get Help**: use the `python-attrs` tag on [Stack Overflow](https://stackoverflow.com/questions/tagged/python-attrs)
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
### *attrs* for Enterprise
|
| 214 |
+
|
| 215 |
+
Available as part of the [Tidelift Subscription](https://tidelift.com/?utm_source=lifter&utm_medium=referral&utm_campaign=hynek).
|
| 216 |
+
|
| 217 |
+
The maintainers of *attrs* and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications.
|
| 218 |
+
Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use.
|
| 219 |
+
|
| 220 |
+
## Release Information
|
| 221 |
+
|
| 222 |
+
### Backwards-incompatible Changes
|
| 223 |
+
|
| 224 |
+
- Python 3.7 has been dropped.
|
| 225 |
+
[#1340](https://github.com/python-attrs/attrs/issues/1340)
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
### Changes
|
| 229 |
+
|
| 230 |
+
- Introduce `attrs.NothingType`, for annotating types consistent with `attrs.NOTHING`.
|
| 231 |
+
[#1358](https://github.com/python-attrs/attrs/issues/1358)
|
| 232 |
+
- Allow mutating `__suppress_context__` and `__notes__` on frozen exceptions.
|
| 233 |
+
[#1365](https://github.com/python-attrs/attrs/issues/1365)
|
| 234 |
+
- `attrs.converters.optional()` works again when taking `attrs.converters.pipe()` or another Converter as its argument.
|
| 235 |
+
[#1372](https://github.com/python-attrs/attrs/issues/1372)
|
| 236 |
+
- *attrs* instances now support [`copy.replace()`](https://docs.python.org/3/library/copy.html#copy.replace).
|
| 237 |
+
[#1383](https://github.com/python-attrs/attrs/issues/1383)
|
| 238 |
+
- `attrs.validators.instance_of()`'s type hints now allow for union types.
|
| 239 |
+
For example: `instance_of(str | int)`
|
| 240 |
+
[#1385](https://github.com/python-attrs/attrs/issues/1385)
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
---
|
| 245 |
+
|
| 246 |
+
[Full changelog →](https://www.attrs.org/en/stable/changelog.html)
|
vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
attr/__init__.py,sha256=fOYIvt1eGSqQre4uCS3sJWKZ0mwAuC8UD6qba5OS9_U,2057
|
| 2 |
+
attr/__init__.pyi,sha256=QIXnnHPoucmDWkbpNsWTP-cgJ1bn8le7DjyRa_wYdew,11281
|
| 3 |
+
attr/__pycache__/__init__.cpython-310.pyc,,
|
| 4 |
+
attr/__pycache__/_cmp.cpython-310.pyc,,
|
| 5 |
+
attr/__pycache__/_compat.cpython-310.pyc,,
|
| 6 |
+
attr/__pycache__/_config.cpython-310.pyc,,
|
| 7 |
+
attr/__pycache__/_funcs.cpython-310.pyc,,
|
| 8 |
+
attr/__pycache__/_make.cpython-310.pyc,,
|
| 9 |
+
attr/__pycache__/_next_gen.cpython-310.pyc,,
|
| 10 |
+
attr/__pycache__/_version_info.cpython-310.pyc,,
|
| 11 |
+
attr/__pycache__/converters.cpython-310.pyc,,
|
| 12 |
+
attr/__pycache__/exceptions.cpython-310.pyc,,
|
| 13 |
+
attr/__pycache__/filters.cpython-310.pyc,,
|
| 14 |
+
attr/__pycache__/setters.cpython-310.pyc,,
|
| 15 |
+
attr/__pycache__/validators.cpython-310.pyc,,
|
| 16 |
+
attr/_cmp.py,sha256=3umHiBtgsEYtvNP_8XrQwTCdFoZIX4DEur76N-2a3X8,4123
|
| 17 |
+
attr/_cmp.pyi,sha256=U-_RU_UZOyPUEQzXE6RMYQQcjkZRY25wTH99sN0s7MM,368
|
| 18 |
+
attr/_compat.py,sha256=4hlXbWhdDjQCDK6FKF1EgnZ3POiHgtpp54qE0nxaGHg,2704
|
| 19 |
+
attr/_config.py,sha256=dGq3xR6fgZEF6UBt_L0T-eUHIB4i43kRmH0P28sJVw8,843
|
| 20 |
+
attr/_funcs.py,sha256=5-tUKJtp3h5El55EcDl6GWXFp68fT8D8U7uCRN6497I,15854
|
| 21 |
+
attr/_make.py,sha256=orKSf6C-B1eZfpat4lbAtxvmSyE_yxlG8zY9115ufWk,94157
|
| 22 |
+
attr/_next_gen.py,sha256=7FRkbtl_N017SuBhf_Vw3mw2c2pGZhtCGOzadgz7tp4,24395
|
| 23 |
+
attr/_typing_compat.pyi,sha256=XDP54TUn-ZKhD62TOQebmzrwFyomhUCoGRpclb6alRA,469
|
| 24 |
+
attr/_version_info.py,sha256=exSqb3b5E-fMSsgZAlEw9XcLpEgobPORCZpcaEglAM4,2121
|
| 25 |
+
attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209
|
| 26 |
+
attr/converters.py,sha256=GlDeOzPeTFgeBBLbj9G57Ez5lAk68uhSALRYJ_exe84,3861
|
| 27 |
+
attr/converters.pyi,sha256=orU2bff-VjQa2kMDyvnMQV73oJT2WRyQuw4ZR1ym1bE,643
|
| 28 |
+
attr/exceptions.py,sha256=HRFq4iybmv7-DcZwyjl6M1euM2YeJVK_hFxuaBGAngI,1977
|
| 29 |
+
attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539
|
| 30 |
+
attr/filters.py,sha256=ZBiKWLp3R0LfCZsq7X11pn9WX8NslS2wXM4jsnLOGc8,1795
|
| 31 |
+
attr/filters.pyi,sha256=3J5BG-dTxltBk1_-RuNRUHrv2qu1v8v4aDNAQ7_mifA,208
|
| 32 |
+
attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 33 |
+
attr/setters.py,sha256=5-dcT63GQK35ONEzSgfXCkbB7pPkaR-qv15mm4PVSzQ,1617
|
| 34 |
+
attr/setters.pyi,sha256=NnVkaFU1BB4JB8E4JuXyrzTUgvtMpj8p3wBdJY7uix4,584
|
| 35 |
+
attr/validators.py,sha256=WaB1HLAHHqRHWsrv_K9H-sJ7ESil3H3Cmv2d8TtVZx4,20046
|
| 36 |
+
attr/validators.pyi,sha256=s2WhKPqskxbsckJfKk8zOuuB088GfgpyxcCYSNFLqNU,2603
|
| 37 |
+
attrs-24.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 38 |
+
attrs-24.3.0.dist-info/METADATA,sha256=f9hhECeAUyS7iewHPRuMLDy1tpJ6vyy8R_TKUnCmiA8,11654
|
| 39 |
+
attrs-24.3.0.dist-info/RECORD,,
|
| 40 |
+
attrs-24.3.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 41 |
+
attrs-24.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
| 42 |
+
attrs-24.3.0.dist-info/licenses/LICENSE,sha256=iCEVyV38KvHutnFPjsbVy8q_Znyv-HKfQkINpj9xTp8,1109
|
| 43 |
+
attrs/__init__.py,sha256=qeQJZ4O08yczSn840v9bYOaZyRE81WsVi-QCrY3krCU,1107
|
| 44 |
+
attrs/__init__.pyi,sha256=nZmInocjM7tHV4AQw0vxO_fo6oJjL_PonlV9zKKW8DY,7931
|
| 45 |
+
attrs/__pycache__/__init__.cpython-310.pyc,,
|
| 46 |
+
attrs/__pycache__/converters.cpython-310.pyc,,
|
| 47 |
+
attrs/__pycache__/exceptions.cpython-310.pyc,,
|
| 48 |
+
attrs/__pycache__/filters.cpython-310.pyc,,
|
| 49 |
+
attrs/__pycache__/setters.cpython-310.pyc,,
|
| 50 |
+
attrs/__pycache__/validators.cpython-310.pyc,,
|
| 51 |
+
attrs/converters.py,sha256=8kQljrVwfSTRu8INwEk8SI0eGrzmWftsT7rM0EqyohM,76
|
| 52 |
+
attrs/exceptions.py,sha256=ACCCmg19-vDFaDPY9vFl199SPXCQMN_bENs4DALjzms,76
|
| 53 |
+
attrs/filters.py,sha256=VOUMZug9uEU6dUuA0dF1jInUK0PL3fLgP0VBS5d-CDE,73
|
| 54 |
+
attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 55 |
+
attrs/setters.py,sha256=eL1YidYQV3T2h9_SYIZSZR1FAcHGb1TuCTy0E0Lv2SU,73
|
| 56 |
+
attrs/validators.py,sha256=xcy6wD5TtTkdCG1f4XWbocPSO0faBjk5IfVJfP6SUj0,76
|
vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/REQUESTED
ADDED
|
File without changes
|
vllm/lib/python3.10/site-packages/attrs-24.3.0.dist-info/licenses/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
The MIT License (MIT)
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2015 Hynek Schlawack and the attrs contributors
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
vllm/lib/python3.10/site-packages/click/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.6 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/tokenizers-0.21.0.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
vllm/lib/python3.10/site-packages/tokenizers-0.21.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
tokenizers-0.21.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
tokenizers-0.21.0.dist-info/METADATA,sha256=oNldYkLKpnavqOq1XABp8c_yNaR65mGu_qaFlD0St2M,6719
|
| 3 |
+
tokenizers-0.21.0.dist-info/RECORD,,
|
| 4 |
+
tokenizers-0.21.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 5 |
+
tokenizers-0.21.0.dist-info/WHEEL,sha256=PuLiPGpD0eVcoUkb9lueobt7VbCYShlDtLaTRPpT7Z0,127
|
| 6 |
+
tokenizers/__init__.py,sha256=ZE5ZagUvobBScrHBQdEobhx4wqM0bsq9F9aLYkBNjYQ,2615
|
| 7 |
+
tokenizers/__init__.pyi,sha256=jw34WZXaYu8NBBJ2_cypfOqJYxI7CXKPzlveisXw4XQ,40182
|
| 8 |
+
tokenizers/__pycache__/__init__.cpython-310.pyc,,
|
| 9 |
+
tokenizers/decoders/__init__.py,sha256=hfwM6CFUDvlMGGL4-xsaaYz81K9P5rQI5ZL5UHWK8Y4,372
|
| 10 |
+
tokenizers/decoders/__init__.pyi,sha256=U0dfPVxoGpb-RmNKzZMZebe0fK2riRMbxQh9yJMHjYE,7378
|
| 11 |
+
tokenizers/decoders/__pycache__/__init__.cpython-310.pyc,,
|
| 12 |
+
tokenizers/implementations/__init__.py,sha256=VzAsplaIo7rl4AFO8Miu7ig7MfZjvonwVblZw01zR6M,310
|
| 13 |
+
tokenizers/implementations/__pycache__/__init__.cpython-310.pyc,,
|
| 14 |
+
tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc,,
|
| 15 |
+
tokenizers/implementations/__pycache__/bert_wordpiece.cpython-310.pyc,,
|
| 16 |
+
tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc,,
|
| 17 |
+
tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc,,
|
| 18 |
+
tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc,,
|
| 19 |
+
tokenizers/implementations/__pycache__/sentencepiece_unigram.cpython-310.pyc,,
|
| 20 |
+
tokenizers/implementations/base_tokenizer.py,sha256=2TFZhLupaJiMDYGJuUNmxYJv-cnR8bDHmbMzaYpFROs,14206
|
| 21 |
+
tokenizers/implementations/bert_wordpiece.py,sha256=sKCum0FKPYdSgJFJN8LDerVBoTDRSqyqSdrcm-lvQqI,5520
|
| 22 |
+
tokenizers/implementations/byte_level_bpe.py,sha256=OA_jyy3EQmYTa6hnf-EKwLOFuyroqFYOJz25ysM2BUk,4289
|
| 23 |
+
tokenizers/implementations/char_level_bpe.py,sha256=Q2ZEAW0xMQHF7YCUtmplwaxbU-J0P2NK4PJGMxUb-_c,5466
|
| 24 |
+
tokenizers/implementations/sentencepiece_bpe.py,sha256=LwrofoohnUfME2lK2lQYoyQIhP84RP0CIlHRaj0hyNs,3738
|
| 25 |
+
tokenizers/implementations/sentencepiece_unigram.py,sha256=SYiVXL8ZtqLXKpuqwnwmrfxgGotu8yAkOu7dLztEXIo,7580
|
| 26 |
+
tokenizers/models/__init__.py,sha256=eJZ4HTAQZpxnKILNylWaTFqxXy-Ba6OKswWN47feeV8,176
|
| 27 |
+
tokenizers/models/__init__.pyi,sha256=clPTwiyjz7FlVdEuwo_3Wa_TmQrbZhW0SGmnNylepnY,16929
|
| 28 |
+
tokenizers/models/__pycache__/__init__.cpython-310.pyc,,
|
| 29 |
+
tokenizers/normalizers/__init__.py,sha256=_06w4cqRItveEgIddYaLMScgkSOkIAMIzYCesb5AA4U,841
|
| 30 |
+
tokenizers/normalizers/__init__.pyi,sha256=dwfVsvg0YbeYoAaBSmKsImqL-tyxiDyHaaTFsZK4aZw,20897
|
| 31 |
+
tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc,,
|
| 32 |
+
tokenizers/pre_tokenizers/__init__.py,sha256=wd6KYQA_RsGSQK-HeG9opTRhv4ttSRkyno2dk6az-PM,557
|
| 33 |
+
tokenizers/pre_tokenizers/__init__.pyi,sha256=dLtaxOgcBa85vQC6byvfKGCOWTEi4c42IcqimfatksQ,23602
|
| 34 |
+
tokenizers/pre_tokenizers/__pycache__/__init__.cpython-310.pyc,,
|
| 35 |
+
tokenizers/processors/__init__.py,sha256=xM2DEKwKtHIumHsszM8AMkq-AlaqvBZFXWgLU8SNhOY,307
|
| 36 |
+
tokenizers/processors/__init__.pyi,sha256=hx767ZY8SHhxb_hiXPRxm-f_KcoR4XDx7vfK2c0lR-Q,11357
|
| 37 |
+
tokenizers/processors/__pycache__/__init__.cpython-310.pyc,,
|
| 38 |
+
tokenizers/tokenizers.abi3.so,sha256=uPe1mVjvIUFcXkPyyik0lcgOjPT3LKlTtAOTuDhZAN0,8942016
|
| 39 |
+
tokenizers/tools/__init__.py,sha256=xG8caB9OHC8cbB01S5vYV14HZxhO6eWbLehsb70ppio,55
|
| 40 |
+
tokenizers/tools/__pycache__/__init__.cpython-310.pyc,,
|
| 41 |
+
tokenizers/tools/__pycache__/visualizer.cpython-310.pyc,,
|
| 42 |
+
tokenizers/tools/visualizer-styles.css,sha256=zAydq1oGWD8QEll4-eyL8Llw0B1sty_hpIE3tYxL02k,4850
|
| 43 |
+
tokenizers/tools/visualizer.py,sha256=gi-E2NCP7FuG6ujpQOdalSTXUlaV85V6NI-ZPPTvA_4,14625
|
| 44 |
+
tokenizers/trainers/__init__.py,sha256=UTu22AGcp76IvpW45xLRbJWET04NxPW6NfCb2YYz0EM,248
|
| 45 |
+
tokenizers/trainers/__init__.pyi,sha256=3TwFKts4me7zQfVRcSTmtXYiP4XwcRjfAYtwqoZVtoQ,5382
|
| 46 |
+
tokenizers/trainers/__pycache__/__init__.cpython-310.pyc,,
|
vllm/lib/python3.10/site-packages/torchvision/_C.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d19d6a3341eb3c0e248ae08c06bff9ec797d1fca30659dc7ae2e527d55206c2a
|
| 3 |
+
size 7746688
|
vllm/lib/python3.10/site-packages/torchvision/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (3.47 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torchvision/__pycache__/_internally_replaced_utils.cpython-310.pyc
ADDED
|
Binary file (1.5 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torchvision/__pycache__/_meta_registrations.cpython-310.pyc
ADDED
|
Binary file (6.78 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torchvision/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (1.44 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torchvision/__pycache__/extension.cpython-310.pyc
ADDED
|
Binary file (2.6 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torchvision/__pycache__/version.cpython-310.pyc
ADDED
|
Binary file (351 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (807 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/_api.cpython-310.pyc
ADDED
|
Binary file (9.55 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/_meta.cpython-310.pyc
ADDED
|
Binary file (19.4 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (9.66 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/convnext.cpython-310.pyc
ADDED
|
Binary file (12.5 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/efficientnet.cpython-310.pyc
ADDED
|
Binary file (29.2 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/torchvision/models/__pycache__/mobilenetv2.cpython-310.pyc
ADDED
|
Binary file (7.77 kB). View file
|
|
|