diff --git a/.gitattributes b/.gitattributes index 731760163f5c7b3ed95222bbc258a087d38cf437..ba29765c594416143d8098c19e4e0a34a7d8113e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1577,3 +1577,4 @@ parrot/lib/python3.10/site-packages/scipy/stats/__pycache__/_distn_infrastructur vllm/lib/python3.10/site-packages/multidict/_multidict.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/pydantic_core/_pydantic_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/torio/lib/_torio_ffmpeg6.so filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/propcache/_helpers_c.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ed1a0ebdf20ea9deb348f24e2b1fbc1d1f5a29bc --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false); +TORCH_API at::Tensor _ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log.h new file mode 100644 index 0000000000000000000000000000000000000000..cfa5e50ea148351730407548a030209f0eac7aa4 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_log(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_log(at::TensorList self) { + return at::_ops::_foreach_log::call(self); +} + +// aten::_foreach_log_(Tensor(a!)[] self) -> () +inline void _foreach_log_(at::TensorList self) { + return at::_ops::_foreach_log_::call(self); +} + +// aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_log_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_log_out::call(self, out); +} +// aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_log_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_log_out::call(self, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adam.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adam.h new file mode 100644 index 0000000000000000000000000000000000000000..09a067936d524f0a6fdbd4263c0e935fb346c560 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adam.h @@ -0,0 +1,63 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () +inline void _fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +// aten::_fused_adam_.tensor_lr(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () +inline void _fused_adam_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam__tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} +// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} + +// aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) +inline ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +// aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_out(at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} +// aten::_fused_adam.tensor_lr_out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () +inline void _fused_adam_outf(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_adam_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); +} + +// aten::_fused_adam.tensor_lr(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, Tensor lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) +inline ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_indices.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_indices.h new file mode 100644 index 0000000000000000000000000000000000000000..27972b4601cfe0dfa8cf712a45ef986c2f0ea45e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_indices.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual.h new file mode 100644 index 0000000000000000000000000000000000000000..1f8e9f83873cbc4ffdd1a394c09400de9ce7529e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a) +inline at::Tensor _make_dual(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { + return at::_ops::_make_dual::call(primal, tangent, level); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3dcbe996a58c09c9e9f2cb313e03a7b1c6210884 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _mkldnn_transpose { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_mkldnn_transpose") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim0, int64_t dim1); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1); +}; + +struct TORCH_API _mkldnn_transpose_ { + using schema = at::Tensor & (at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_mkldnn_transpose_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t dim0, int64_t dim1); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1); +}; + +struct TORCH_API _mkldnn_transpose_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_mkldnn_transpose") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..9f2a8d8a9cc825541d8b77d8cf7fa17ac0d4c37a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); + } +} + +// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w, grad_input); + } +} + +// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_bicubic2d_aa_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_bicubic2d_aa_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } +} + +// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_bicubic2d_aa_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_bicubic2d_aa_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } +} + +// aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales_h, scales_w); + } +} + +// aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor _upsample_bicubic2d_aa_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor _upsample_bicubic2d_aa_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fb009e3023319aa351391d7172921000c402a4aa --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _upsample_nearest_exact3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & _upsample_nearest_exact3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2f9d0441ea8602dd59eb24b5f49a9220a6aad07d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _validate_sparse_compressed_tensor_args(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..db3a3e51be2c6dffac8566efa76e27e9fda58273 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _weight_norm(const at::Tensor & v, const at::Tensor & g, int64_t dim=0); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..37f7c864c2ae13e0fe3b5d63e3b021b5a2af823f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cross_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cross_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..de9b36bbeda4da429980947514e60ba563d85f46 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cross_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor cross(const at::Tensor & self, const at::Tensor & other, c10::optional dim=c10::nullopt); +TORCH_API at::Tensor & cross_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional dim=c10::nullopt); +TORCH_API at::Tensor & cross_outf(const at::Tensor & self, const at::Tensor & other, c10::optional dim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..84d1fbe15eb5bacbd3182d684f3a09bd5e40e161 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API crow_indices { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::crow_indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "crow_indices(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/dequantize_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/dequantize_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..25d1ecbc5ef2d2ef01f1ee4bb92081e6ad5532b1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/dequantize_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor dequantize(const at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/dsplit_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/dsplit_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..02b4b6cabe12327f746c6b489dda2470fbb2cbcb --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/dsplit_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector dsplit(const at::Tensor & self, int64_t sections); +TORCH_API ::std::vector dsplit(const at::Tensor & self, at::IntArrayRef indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/floor_meta_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/floor_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..058fdbb796fc12e1238b3a76ade4575504b0f0f0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/floor_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor floor(const at::Tensor & self); +TORCH_API at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & floor_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/greater_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/greater_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4c0f50753a554030a87c85c4f1549a2a4496c73f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/greater_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor greater(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & greater_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & greater_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor greater(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & greater_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & greater_(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9ea2ee18b4f10495bd44d2f6e581133d190aa61e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API hardshrink_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardshrink") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out); +}; + +struct TORCH_API hardshrink { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::hardshrink") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & lambd); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7903efd51345cce8adc6ff4af99bfda9969d9a96 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor lift_fresh_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_power_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_power_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1799fe278abc2de3a1ecd8e37a029cad8cd62e03 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_power_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_matrix_power { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_power") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_power(Tensor self, int n) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t n); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n); +}; + +struct TORCH_API linalg_matrix_power_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_power") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t n, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b1994c71edf67861925b4b07c7ba717b9f2367c3 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor matrix_exp_backward(const at::Tensor & self, const at::Tensor & grad); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mm_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6af961bf25b378abc75e8707b0ca2bc5d2af20a3 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mm { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mm(Tensor self, Tensor mat2) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & mat2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2); +}; + +struct TORCH_API mm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_meta_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7b5ac4927674537d701a78d24dd45a2f4de98e0f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor nll_loss_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor & nll_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor & nll_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); +TORCH_API at::Tensor & nll_loss_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor & nll_loss_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/ormqr_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/ormqr_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1f9498221406fe9e29bb8ebd9fef9b2fc8a0d64b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/ormqr_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor ormqr(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left=true, bool transpose=false); +TORCH_API at::Tensor & ormqr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left=true, bool transpose=false); +TORCH_API at::Tensor & ormqr_outf(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3df082405352489d96a7744348b466a84231a459 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/randint_like_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API randint_like { + using schema = at::Tensor (const at::Tensor &, c10::SymInt, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint_like") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); +}; + +struct TORCH_API randint_like_low_dtype { + using schema = at::Tensor (const at::Tensor &, c10::SymInt, c10::SymInt, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint_like") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "low_dtype") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); +}; + +struct TORCH_API randint_like_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymInt, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint_like") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymInt high, c10::optional memory_format, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt high, c10::optional memory_format, at::Tensor & out); +}; + +struct TORCH_API randint_like_low_dtype_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymInt, c10::SymInt, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::randint_like") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "low_dtype_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional memory_format, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional memory_format, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3cf750a912c2b966d56e8344e0a7b413ed6625a3 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor replication_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor replication_pad1d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +TORCH_API at::Tensor & replication_pad1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2a1040281a7bcdf2e4b09450ae79c4a6f67e5608 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sparse_resize_ { + using schema = const at::Tensor & (const at::Tensor &, at::IntArrayRef, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_resize_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)") + static const at::Tensor & call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); + static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); +}; + +struct TORCH_API sparse_resize_out { + using schema = const at::Tensor & (const at::Tensor &, at::IntArrayRef, int64_t, int64_t, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_resize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)") + static const at::Tensor & call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out); + static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out); +}; + +struct TORCH_API sparse_resize { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_resize") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9701de88f63ddf403f675a559fbf8867b802bae1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_bessel_j0(const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j0_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_softmax_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_softmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cd9e2631a9abd488377c53455833605c44eb26b1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_softmax_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_log_softmax { + using schema = at::Tensor (const at::Tensor &, int64_t, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_log_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, c10::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_zeta_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_zeta_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4776b381d3d19628c97cbda859864abab59bdf8e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_zeta_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_zeta_out : public at::meta::structured_special_zeta { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor special_zeta(const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & special_zeta_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor special_zeta(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & special_zeta_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..72827b1e7a2cc9c1c4d695035fb622f310f29cdf --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & tril_indices_out(at::Tensor & out, int64_t row, int64_t col, int64_t offset=0); +TORCH_API at::Tensor & tril_indices_outf(int64_t row, int64_t col, int64_t offset, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_copy_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3d2b2d72d1002bb2d6cbc98e11f3d9e0c87eb29c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & unfold_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step); +TORCH_API at::Tensor & unfold_copy_outf(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/dotenv/cli.py b/vllm/lib/python3.10/site-packages/dotenv/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..65ead46155f568a197a16b64c6335f1f28cda9a6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dotenv/cli.py @@ -0,0 +1,199 @@ +import json +import os +import shlex +import sys +from contextlib import contextmanager +from subprocess import Popen +from typing import Any, Dict, IO, Iterator, List + +try: + import click +except ImportError: + sys.stderr.write('It seems python-dotenv is not installed with cli option. \n' + 'Run pip install "python-dotenv[cli]" to fix this.') + sys.exit(1) + +from .main import dotenv_values, set_key, unset_key +from .version import __version__ + + +def enumerate_env(): + """ + Return a path for the ${pwd}/.env file. + + If pwd does not exist, return None. + """ + try: + cwd = os.getcwd() + except FileNotFoundError: + return None + path = os.path.join(cwd, '.env') + return path + + +@click.group() +@click.option('-f', '--file', default=enumerate_env(), + type=click.Path(file_okay=True), + help="Location of the .env file, defaults to .env file in current working directory.") +@click.option('-q', '--quote', default='always', + type=click.Choice(['always', 'never', 'auto']), + help="Whether to quote or not the variable values. Default mode is always. This does not affect parsing.") +@click.option('-e', '--export', default=False, + type=click.BOOL, + help="Whether to write the dot file as an executable bash script.") +@click.version_option(version=__version__) +@click.pass_context +def cli(ctx: click.Context, file: Any, quote: Any, export: Any) -> None: + """This script is used to set, get or unset values from a .env file.""" + ctx.obj = {'QUOTE': quote, 'EXPORT': export, 'FILE': file} + + +@contextmanager +def stream_file(path: os.PathLike) -> Iterator[IO[str]]: + """ + Open a file and yield the corresponding (decoded) stream. + + Exits with error code 2 if the file cannot be opened. + """ + + try: + with open(path) as stream: + yield stream + except OSError as exc: + print(f"Error opening env file: {exc}", file=sys.stderr) + exit(2) + + +@cli.command() +@click.pass_context +@click.option('--format', default='simple', + type=click.Choice(['simple', 'json', 'shell', 'export']), + help="The format in which to display the list. Default format is simple, " + "which displays name=value without quotes.") +def list(ctx: click.Context, format: bool) -> None: + """Display all the stored key/value.""" + file = ctx.obj['FILE'] + + with stream_file(file) as stream: + values = dotenv_values(stream=stream) + + if format == 'json': + click.echo(json.dumps(values, indent=2, sort_keys=True)) + else: + prefix = 'export ' if format == 'export' else '' + for k in sorted(values): + v = values[k] + if v is not None: + if format in ('export', 'shell'): + v = shlex.quote(v) + click.echo(f'{prefix}{k}={v}') + + +@cli.command() +@click.pass_context +@click.argument('key', required=True) +@click.argument('value', required=True) +def set(ctx: click.Context, key: Any, value: Any) -> None: + """Store the given key/value.""" + file = ctx.obj['FILE'] + quote = ctx.obj['QUOTE'] + export = ctx.obj['EXPORT'] + success, key, value = set_key(file, key, value, quote, export) + if success: + click.echo(f'{key}={value}') + else: + exit(1) + + +@cli.command() +@click.pass_context +@click.argument('key', required=True) +def get(ctx: click.Context, key: Any) -> None: + """Retrieve the value for the given key.""" + file = ctx.obj['FILE'] + + with stream_file(file) as stream: + values = dotenv_values(stream=stream) + + stored_value = values.get(key) + if stored_value: + click.echo(stored_value) + else: + exit(1) + + +@cli.command() +@click.pass_context +@click.argument('key', required=True) +def unset(ctx: click.Context, key: Any) -> None: + """Removes the given key.""" + file = ctx.obj['FILE'] + quote = ctx.obj['QUOTE'] + success, key = unset_key(file, key, quote) + if success: + click.echo(f"Successfully removed {key}") + else: + exit(1) + + +@cli.command(context_settings={'ignore_unknown_options': True}) +@click.pass_context +@click.option( + "--override/--no-override", + default=True, + help="Override variables from the environment file with those from the .env file.", +) +@click.argument('commandline', nargs=-1, type=click.UNPROCESSED) +def run(ctx: click.Context, override: bool, commandline: List[str]) -> None: + """Run command with environment variables present.""" + file = ctx.obj['FILE'] + if not os.path.isfile(file): + raise click.BadParameter( + f'Invalid value for \'-f\' "{file}" does not exist.', + ctx=ctx + ) + dotenv_as_dict = { + k: v + for (k, v) in dotenv_values(file).items() + if v is not None and (override or k not in os.environ) + } + + if not commandline: + click.echo('No command given.') + exit(1) + ret = run_command(commandline, dotenv_as_dict) + exit(ret) + + +def run_command(command: List[str], env: Dict[str, str]) -> int: + """Run command in sub process. + + Runs the command in a sub process with the variables from `env` + added in the current environment variables. + + Parameters + ---------- + command: List[str] + The command and it's parameters + env: Dict + The additional environment variables + + Returns + ------- + int + The return code of the command + + """ + # copy the current environment variables and add the vales from + # `env` + cmd_env = os.environ.copy() + cmd_env.update(env) + + p = Popen(command, + universal_newlines=True, + bufsize=0, + shell=False, + env=cmd_env) + _, _ = p.communicate() + + return p.returncode diff --git a/vllm/lib/python3.10/site-packages/dotenv/variables.py b/vllm/lib/python3.10/site-packages/dotenv/variables.py new file mode 100644 index 0000000000000000000000000000000000000000..667f2f26ff2182ecdfc5b809ba97a6cf1d1be13a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dotenv/variables.py @@ -0,0 +1,86 @@ +import re +from abc import ABCMeta, abstractmethod +from typing import Iterator, Mapping, Optional, Pattern + +_posix_variable: Pattern[str] = re.compile( + r""" + \$\{ + (?P[^\}:]*) + (?::- + (?P[^\}]*) + )? + \} + """, + re.VERBOSE, +) + + +class Atom(metaclass=ABCMeta): + def __ne__(self, other: object) -> bool: + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + return not result + + @abstractmethod + def resolve(self, env: Mapping[str, Optional[str]]) -> str: ... + + +class Literal(Atom): + def __init__(self, value: str) -> None: + self.value = value + + def __repr__(self) -> str: + return f"Literal(value={self.value})" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, self.__class__): + return NotImplemented + return self.value == other.value + + def __hash__(self) -> int: + return hash((self.__class__, self.value)) + + def resolve(self, env: Mapping[str, Optional[str]]) -> str: + return self.value + + +class Variable(Atom): + def __init__(self, name: str, default: Optional[str]) -> None: + self.name = name + self.default = default + + def __repr__(self) -> str: + return f"Variable(name={self.name}, default={self.default})" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, self.__class__): + return NotImplemented + return (self.name, self.default) == (other.name, other.default) + + def __hash__(self) -> int: + return hash((self.__class__, self.name, self.default)) + + def resolve(self, env: Mapping[str, Optional[str]]) -> str: + default = self.default if self.default is not None else "" + result = env.get(self.name, default) + return result if result is not None else "" + + +def parse_variables(value: str) -> Iterator[Atom]: + cursor = 0 + + for match in _posix_variable.finditer(value): + (start, end) = match.span() + name = match["name"] + default = match["default"] + + if start > cursor: + yield Literal(value=value[cursor:start]) + + yield Variable(name=name, default=default) + cursor = end + + length = len(value) + if cursor < length: + yield Literal(value=value[cursor:length]) diff --git a/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/License.txt b/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/License.txt new file mode 100644 index 0000000000000000000000000000000000000000..b491c70e0aef319022ded661e111ddbd45b8a17f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/License.txt @@ -0,0 +1,1568 @@ +End User License Agreement +-------------------------- + + +Preface +------- + +The Software License Agreement in Chapter 1 and the Supplement +in Chapter 2 contain license terms and conditions that govern +the use of NVIDIA software. By accepting this agreement, you +agree to comply with all the terms and conditions applicable +to the product(s) included herein. + + +NVIDIA Driver + + +Description + +This package contains the operating system driver and +fundamental system software components for NVIDIA GPUs. + + +NVIDIA CUDA Toolkit + + +Description + +The NVIDIA CUDA Toolkit provides command-line and graphical +tools for building, debugging and optimizing the performance +of applications accelerated by NVIDIA GPUs, runtime and math +libraries, and documentation including programming guides, +user manuals, and API references. + + +Default Install Location of CUDA Toolkit + +Windows platform: + +%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.# + +Linux platform: + +/usr/local/cuda-#.# + +Mac platform: + +/Developer/NVIDIA/CUDA-#.# + + +NVIDIA CUDA Samples + + +Description + +This package includes over 100+ CUDA examples that demonstrate +various CUDA programming principles, and efficient CUDA +implementation of algorithms in specific application domains. + + +Default Install Location of CUDA Samples + +Windows platform: + +%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.# + +Linux platform: + +/usr/local/cuda-#.#/samples + +and + +$HOME/NVIDIA_CUDA-#.#_Samples + +Mac platform: + +/Developer/NVIDIA/CUDA-#.#/samples + + +NVIDIA Nsight Visual Studio Edition (Windows only) + + +Description + +NVIDIA Nsight Development Platform, Visual Studio Edition is a +development environment integrated into Microsoft Visual +Studio that provides tools for debugging, profiling, analyzing +and optimizing your GPU computing and graphics applications. + + +Default Install Location of Nsight Visual Studio Edition + +Windows platform: + +%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.# + + +1. License Agreement for NVIDIA Software Development Kits +--------------------------------------------------------- + + +Release Date: July 26, 2018 +--------------------------- + + +Important NoticeRead before downloading, installing, +copying or using the licensed software: +------------------------------------------------------- + +This license agreement, including exhibits attached +("Agreement”) is a legal agreement between you and NVIDIA +Corporation ("NVIDIA") and governs your use of a NVIDIA +software development kit (“SDK”). + +Each SDK has its own set of software and materials, but here +is a description of the types of items that may be included in +a SDK: source code, header files, APIs, data sets and assets +(examples include images, textures, models, scenes, videos, +native API input/output files), binary software, sample code, +libraries, utility programs, programming code and +documentation. + +This Agreement can be accepted only by an adult of legal age +of majority in the country in which the SDK is used. + +If you are entering into this Agreement on behalf of a company +or other legal entity, you represent that you have the legal +authority to bind the entity to this Agreement, in which case +“you” will mean the entity you represent. + +If you don’t have the required age or authority to accept +this Agreement, or if you don’t accept all the terms and +conditions of this Agreement, do not download, install or use +the SDK. + +You agree to use the SDK only for purposes that are permitted +by (a) this Agreement, and (b) any applicable law, regulation +or generally accepted practices or guidelines in the relevant +jurisdictions. + + +1.1. License + + +1.1.1. License Grant + +Subject to the terms of this Agreement, NVIDIA hereby grants +you a non-exclusive, non-transferable license, without the +right to sublicense (except as expressly provided in this +Agreement) to: + + 1. Install and use the SDK, + + 2. Modify and create derivative works of sample source code + delivered in the SDK, and + + 3. Distribute those portions of the SDK that are identified + in this Agreement as distributable, as incorporated in + object code format into a software application that meets + the distribution requirements indicated in this Agreement. + + +1.1.2. Distribution Requirements + +These are the distribution requirements for you to exercise +the distribution grant: + + 1. Your application must have material additional + functionality, beyond the included portions of the SDK. + + 2. The distributable portions of the SDK shall only be + accessed by your application. + + 3. The following notice shall be included in modifications + and derivative works of sample source code distributed: + “This software contains source code provided by NVIDIA + Corporation.” + + 4. Unless a developer tool is identified in this Agreement + as distributable, it is delivered for your internal use + only. + + 5. The terms under which you distribute your application + must be consistent with the terms of this Agreement, + including (without limitation) terms relating to the + license grant and license restrictions and protection of + NVIDIA’s intellectual property rights. Additionally, you + agree that you will protect the privacy, security and + legal rights of your application users. + + 6. You agree to notify NVIDIA in writing of any known or + suspected distribution or use of the SDK not in compliance + with the requirements of this Agreement, and to enforce + the terms of your agreements with respect to distributed + SDK. + + +1.1.3. Authorized Users + +You may allow employees and contractors of your entity or of +your subsidiary(ies) to access and use the SDK from your +secure network to perform work on your behalf. + +If you are an academic institution you may allow users +enrolled or employed by the academic institution to access and +use the SDK from your secure network. + +You are responsible for the compliance with the terms of this +Agreement by your authorized users. If you become aware that +your authorized users didn’t follow the terms of this +Agreement, you agree to take reasonable steps to resolve the +non-compliance and prevent new occurrences. + + +1.1.4. Pre-Release SDK + +The SDK versions identified as alpha, beta, preview or +otherwise as pre-release, may not be fully functional, may +contain errors or design flaws, and may have reduced or +different security, privacy, accessibility, availability, and +reliability standards relative to commercial versions of +NVIDIA software and materials. Use of a pre-release SDK may +result in unexpected results, loss of data, project delays or +other unpredictable damage or loss. + +You may use a pre-release SDK at your own risk, understanding +that pre-release SDKs are not intended for use in production +or business-critical systems. + +NVIDIA may choose not to make available a commercial version +of any pre-release SDK. NVIDIA may also choose to abandon +development and terminate the availability of a pre-release +SDK at any time without liability. + + +1.1.5. Updates + +NVIDIA may, at its option, make available patches, workarounds +or other updates to this SDK. Unless the updates are provided +with their separate governing terms, they are deemed part of +the SDK licensed to you as provided in this Agreement. You +agree that the form and content of the SDK that NVIDIA +provides may change without prior notice to you. While NVIDIA +generally maintains compatibility between versions, NVIDIA may +in some cases make changes that introduce incompatibilities in +future versions of the SDK. + + +1.1.6. Third Party Licenses + +The SDK may come bundled with, or otherwise include or be +distributed with, third party software licensed by a NVIDIA +supplier and/or open source software provided under an open +source license. Use of third party software is subject to the +third-party license terms, or in the absence of third party +terms, the terms of this Agreement. Copyright to third party +software is held by the copyright holders indicated in the +third-party software or license. + + +1.1.7. Reservation of Rights + +NVIDIA reserves all rights, title, and interest in and to the +SDK, not expressly granted to you under this Agreement. + + +1.2. Limitations + +The following license limitations apply to your use of the +SDK: + + 1. You may not reverse engineer, decompile or disassemble, + or remove copyright or other proprietary notices from any + portion of the SDK or copies of the SDK. + + 2. Except as expressly provided in this Agreement, you may + not copy, sell, rent, sublicense, transfer, distribute, + modify, or create derivative works of any portion of the + SDK. For clarity, you may not distribute or sublicense the + SDK as a stand-alone product. + + 3. Unless you have an agreement with NVIDIA for this + purpose, you may not indicate that an application created + with the SDK is sponsored or endorsed by NVIDIA. + + 4. You may not bypass, disable, or circumvent any + encryption, security, digital rights management or + authentication mechanism in the SDK. + + 5. You may not use the SDK in any manner that would cause it + to become subject to an open source software license. As + examples, licenses that require as a condition of use, + modification, and/or distribution that the SDK be: + + a. Disclosed or distributed in source code form; + + b. Licensed for the purpose of making derivative works; + or + + c. Redistributable at no charge. + + 6. Unless you have an agreement with NVIDIA for this + purpose, you may not use the SDK with any system or + application where the use or failure of the system or + application can reasonably be expected to threaten or + result in personal injury, death, or catastrophic loss. + Examples include use in avionics, navigation, military, + medical, life support or other life critical applications. + NVIDIA does not design, test or manufacture the SDK for + these critical uses and NVIDIA shall not be liable to you + or any third party, in whole or in part, for any claims or + damages arising from such uses. + + 7. You agree to defend, indemnify and hold harmless NVIDIA + and its affiliates, and their respective employees, + contractors, agents, officers and directors, from and + against any and all claims, damages, obligations, losses, + liabilities, costs or debt, fines, restitutions and + expenses (including but not limited to attorney’s fees + and costs incident to establishing the right of + indemnification) arising out of or related to your use of + the SDK outside of the scope of this Agreement, or not in + compliance with its terms. + + +1.3. Ownership + + 1. NVIDIA or its licensors hold all rights, title and + interest in and to the SDK and its modifications and + derivative works, including their respective intellectual + property rights, subject to your rights described in this + section. This SDK may include software and materials from + NVIDIA’s licensors, and these licensors are intended + third party beneficiaries that may enforce this Agreement + with respect to their intellectual property rights. + + 2. You hold all rights, title and interest in and to your + applications and your derivative works of the sample + source code delivered in the SDK, including their + respective intellectual property rights, subject to + NVIDIA’s rights described in this section. + + 3. You may, but don’t have to, provide to NVIDIA + suggestions, feature requests or other feedback regarding + the SDK, including possible enhancements or modifications + to the SDK. For any feedback that you voluntarily provide, + you hereby grant NVIDIA and its affiliates a perpetual, + non-exclusive, worldwide, irrevocable license to use, + reproduce, modify, license, sublicense (through multiple + tiers of sublicensees), and distribute (through multiple + tiers of distributors) it without the payment of any + royalties or fees to you. NVIDIA will use feedback at its + choice. NVIDIA is constantly looking for ways to improve + its products, so you may send feedback to NVIDIA through + the developer portal at https://developer.nvidia.com. + + +1.4. No Warranties + +THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL +FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND +ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND +OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, +BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE +ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO +WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF +DEALING OR COURSE OF TRADE. + + +1.5. Limitation of Liability + +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS +AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, +PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS +OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF +PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION +WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK, +WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH +OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), +PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF +LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES +TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS +AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE +NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS +LIMIT. + +These exclusions and limitations of liability shall apply +regardless if NVIDIA or its affiliates have been advised of +the possibility of such damages, and regardless of whether a +remedy fails its essential purpose. These exclusions and +limitations of liability form an essential basis of the +bargain between the parties, and, absent any of these +exclusions or limitations of liability, the provisions of this +Agreement, including, without limitation, the economic terms, +would be substantially different. + + +1.6. Termination + + 1. This Agreement will continue to apply until terminated by + either you or NVIDIA as described below. + + 2. If you want to terminate this Agreement, you may do so by + stopping to use the SDK. + + 3. NVIDIA may, at any time, terminate this Agreement if: + + a. (i) you fail to comply with any term of this + Agreement and the non-compliance is not fixed within + thirty (30) days following notice from NVIDIA (or + immediately if you violate NVIDIA’s intellectual + property rights); + + b. (ii) you commence or participate in any legal + proceeding against NVIDIA with respect to the SDK; or + + c. (iii) NVIDIA decides to no longer provide the SDK in + a country or, in NVIDIA’s sole discretion, the + continued use of it is no longer commercially viable. + + 4. Upon any termination of this Agreement, you agree to + promptly discontinue use of the SDK and destroy all copies + in your possession or control. Your prior distributions in + accordance with this Agreement are not affected by the + termination of this Agreement. Upon written request, you + will certify in writing that you have complied with your + commitments under this section. Upon any termination of + this Agreement all provisions survive except for the + license grant provisions. + + +1.7. General + +If you wish to assign this Agreement or your rights and +obligations, including by merger, consolidation, dissolution +or operation of law, contact NVIDIA to ask for permission. Any +attempted assignment not approved by NVIDIA in writing shall +be void and of no effect. NVIDIA may assign, delegate or +transfer this Agreement and its rights and obligations, and if +to a non-affiliate you will be notified. + +You agree to cooperate with NVIDIA and provide reasonably +requested information to verify your compliance with this +Agreement. + +This Agreement will be governed in all respects by the laws of +the United States and of the State of Delaware as those laws +are applied to contracts entered into and performed entirely +within Delaware by Delaware residents, without regard to the +conflicts of laws principles. The United Nations Convention on +Contracts for the International Sale of Goods is specifically +disclaimed. You agree to all terms of this Agreement in the +English language. + +The state or federal courts residing in Santa Clara County, +California shall have exclusive jurisdiction over any dispute +or claim arising out of this Agreement. Notwithstanding this, +you agree that NVIDIA shall still be allowed to apply for +injunctive remedies or an equivalent type of urgent legal +relief in any jurisdiction. + +If any court of competent jurisdiction determines that any +provision of this Agreement is illegal, invalid or +unenforceable, such provision will be construed as limited to +the extent necessary to be consistent with and fully +enforceable under the law and the remaining provisions will +remain in full force and effect. Unless otherwise specified, +remedies are cumulative. + +Each party acknowledges and agrees that the other is an +independent contractor in the performance of this Agreement. + +The SDK has been developed entirely at private expense and is +“commercial items” consisting of “commercial computer +software” and “commercial computer software +documentation” provided with RESTRICTED RIGHTS. Use, +duplication or disclosure by the U.S. Government or a U.S. +Government subcontractor is subject to the restrictions in +this Agreement pursuant to DFARS 227.7202-3(a) or as set forth +in subparagraphs (c)(1) and (2) of the Commercial Computer +Software - Restricted Rights clause at FAR 52.227-19, as +applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas +Expressway, Santa Clara, CA 95051. + +The SDK is subject to United States export laws and +regulations. You agree that you will not ship, transfer or +export the SDK into any country, or use the SDK in any manner, +prohibited by the United States Bureau of Industry and +Security or economic sanctions regulations administered by the +U.S. Department of Treasury’s Office of Foreign Assets +Control (OFAC), or any applicable export laws, restrictions or +regulations. These laws include restrictions on destinations, +end users and end use. By accepting this Agreement, you +confirm that you are not a resident or citizen of any country +currently embargoed by the U.S. and that you are not otherwise +prohibited from receiving the SDK. + +Any notice delivered by NVIDIA to you under this Agreement +will be delivered via mail, email or fax. You agree that any +notices that NVIDIA sends you electronically will satisfy any +legal communication requirements. Please direct your legal +notices or other correspondence to NVIDIA Corporation, 2788 +San Tomas Expressway, Santa Clara, California 95051, United +States of America, Attention: Legal Department. + +This Agreement and any exhibits incorporated into this +Agreement constitute the entire agreement of the parties with +respect to the subject matter of this Agreement and supersede +all prior negotiations or documentation exchanged between the +parties relating to this SDK license. Any additional and/or +conflicting terms on documents issued by you are null, void, +and invalid. Any amendment or waiver under this Agreement +shall be in writing and signed by representatives of both +parties. + + +2. CUDA Toolkit Supplement to Software License Agreement for +NVIDIA Software Development Kits +------------------------------------------------------------ + + +Release date: August 16, 2018 +----------------------------- + +The terms in this supplement govern your use of the NVIDIA +CUDA Toolkit SDK under the terms of your license agreement +(“Agreement”) as modified by this supplement. Capitalized +terms used but not defined below have the meaning assigned to +them in the Agreement. + +This supplement is an exhibit to the Agreement and is +incorporated as an integral part of the Agreement. In the +event of conflict between the terms in this supplement and the +terms in the Agreement, the terms in this supplement govern. + + +2.1. License Scope + +The SDK is licensed for you to develop applications only for +use in systems with NVIDIA GPUs. + + +2.2. Distribution + +The portions of the SDK that are distributable under the +Agreement are listed in Attachment A. + + +2.3. Operating Systems + +Those portions of the SDK designed exclusively for use on the +Linux or FreeBSD operating systems, or other operating systems +derived from the source code to these operating systems, may +be copied and redistributed for use in accordance with this +Agreement, provided that the object code files are not +modified in any way (except for unzipping of compressed +files). + + +2.4. Audio and Video Encoders and Decoders + +You acknowledge and agree that it is your sole responsibility +to obtain any additional third-party licenses required to +make, have made, use, have used, sell, import, and offer for +sale your products or services that include or incorporate any +third-party software and content relating to audio and/or +video encoders and decoders from, including but not limited +to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A., +MPEG-LA, and Coding Technologies. NVIDIA does not grant to you +under this Agreement any necessary patent or other rights with +respect to any audio and/or video encoders and decoders. + + +2.5. Licensing + +If the distribution terms in this Agreement are not suitable +for your organization, or for any questions regarding this +Agreement, please contact NVIDIA at +nvidia-compute-license-questions@nvidia.com. + + +2.6. Attachment A + +The following portions of the SDK are distributable under the +Agreement: + +Component + +CUDA Runtime + +Windows + +cudart.dll, cudart_static.lib, cudadevrt.lib + +Mac OSX + +libcudart.dylib, libcudart_static.a, libcudadevrt.a + +Linux + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Android + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Component + +CUDA FFT Library + +Windows + +cufft.dll, cufftw.dll, cufft.lib, cufftw.lib + +Mac OSX + +libcufft.dylib, libcufft_static.a, libcufftw.dylib, +libcufftw_static.a + +Linux + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Android + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Component + +CUDA BLAS Library + +Windows + +cublas.dll, cublasLt.dll + +Mac OSX + +libcublas.dylib, libcublasLt.dylib, libcublas_static.a, +libcublasLt_static.a + +Linux + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Android + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Component + +NVIDIA "Drop-in" BLAS Library + +Windows + +nvblas.dll + +Mac OSX + +libnvblas.dylib + +Linux + +libnvblas.so + +Component + +CUDA Sparse Matrix Library + +Windows + +cusparse.dll, cusparse.lib + +Mac OSX + +libcusparse.dylib, libcusparse_static.a + +Linux + +libcusparse.so, libcusparse_static.a + +Android + +libcusparse.so, libcusparse_static.a + +Component + +CUDA Linear Solver Library + +Windows + +cusolver.dll, cusolver.lib + +Mac OSX + +libcusolver.dylib, libcusolver_static.a + +Linux + +libcusolver.so, libcusolver_static.a + +Android + +libcusolver.so, libcusolver_static.a + +Component + +CUDA Random Number Generation Library + +Windows + +curand.dll, curand.lib + +Mac OSX + +libcurand.dylib, libcurand_static.a + +Linux + +libcurand.so, libcurand_static.a + +Android + +libcurand.so, libcurand_static.a + +Component + +CUDA Accelerated Graph Library + +Component + +NVIDIA Performance Primitives Library + +Windows + +nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll, +nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll, +nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib, +nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll, +nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib + +Mac OSX + +libnppc.dylib, libnppc_static.a, libnppial.dylib, +libnppial_static.a, libnppicc.dylib, libnppicc_static.a, +libnppicom.dylib, libnppicom_static.a, libnppidei.dylib, +libnppidei_static.a, libnppif.dylib, libnppif_static.a, +libnppig.dylib, libnppig_static.a, libnppim.dylib, +libnppisu_static.a, libnppitc.dylib, libnppitc_static.a, +libnpps.dylib, libnpps_static.a + +Linux + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Android + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Component + +NVIDIA JPEG Library + +Linux + +libnvjpeg.so, libnvjpeg_static.a + +Component + +Internal common library required for statically linking to +cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP + +Mac OSX + +libculibos.a + +Linux + +libculibos.a + +Component + +NVIDIA Runtime Compilation Library and Header + +All + +nvrtc.h + +Windows + +nvrtc.dll, nvrtc-builtins.dll + +Mac OSX + +libnvrtc.dylib, libnvrtc-builtins.dylib + +Linux + +libnvrtc.so, libnvrtc-builtins.so + +Component + +NVIDIA Optimizing Compiler Library + +Windows + +nvvm.dll + +Mac OSX + +libnvvm.dylib + +Linux + +libnvvm.so + +Component + +NVIDIA Common Device Math Functions Library + +Windows + +libdevice.10.bc + +Mac OSX + +libdevice.10.bc + +Linux + +libdevice.10.bc + +Component + +CUDA Occupancy Calculation Header Library + +All + +cuda_occupancy.h + +Component + +CUDA Half Precision Headers + +All + +cuda_fp16.h, cuda_fp16.hpp + +Component + +CUDA Profiling Tools Interface (CUPTI) Library + +Windows + +cupti.dll + +Mac OSX + +libcupti.dylib + +Linux + +libcupti.so + +Component + +NVIDIA Tools Extension Library + +Windows + +nvToolsExt.dll, nvToolsExt.lib + +Mac OSX + +libnvToolsExt.dylib + +Linux + +libnvToolsExt.so + +Component + +NVIDIA CUDA Driver Libraries + +Linux + +libcuda.so, libnvidia-fatbinaryloader.so, +libnvidia-ptxjitcompiler.so + +The NVIDIA CUDA Driver Libraries are only distributable in +applications that meet this criteria: + + 1. The application was developed starting from a NVIDIA CUDA + container obtained from Docker Hub or the NVIDIA GPU + Cloud, and + + 2. The resulting application is packaged as a Docker + container and distributed to users on Docker Hub or the + NVIDIA GPU Cloud only. + + +2.7. Attachment B + + +Additional Licensing Obligations + +The following third party components included in the SOFTWARE +are licensed to Licensee pursuant to the following terms and +conditions: + + 1. Licensee's use of the GDB third party component is + subject to the terms and conditions of GNU GPL v3: + + This product includes copyrighted third-party software licensed + under the terms of the GNU General Public License v3 ("GPL v3"). + All third-party software packages are copyright by their respective + authors. GPL v3 terms and conditions are hereby incorporated into + the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt + + Consistent with these licensing requirements, the software + listed below is provided under the terms of the specified + open source software licenses. To obtain source code for + software provided under licenses that require + redistribution of source code, including the GNU General + Public License (GPL) and GNU Lesser General Public License + (LGPL), contact oss-requests@nvidia.com. This offer is + valid for a period of three (3) years from the date of the + distribution of this product by NVIDIA CORPORATION. + + Component License + CUDA-GDB GPL v3 + + 2. Licensee represents and warrants that any and all third + party licensing and/or royalty payment obligations in + connection with Licensee's use of the H.264 video codecs + are solely the responsibility of Licensee. + + 3. Licensee's use of the Thrust library is subject to the + terms and conditions of the Apache License Version 2.0. + All third-party software packages are copyright by their + respective authors. Apache License Version 2.0 terms and + conditions are hereby incorporated into the Agreement by + this reference. + http://www.apache.org/licenses/LICENSE-2.0.html + + In addition, Licensee acknowledges the following notice: + Thrust includes source code from the Boost Iterator, + Tuple, System, and Random Number libraries. + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 4. Licensee's use of the LLVM third party component is + subject to the following terms and conditions: + + ====================================================== + LLVM Release License + ====================================================== + University of Illinois/NCSA + Open Source License + + Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign. + All rights reserved. + + Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal with the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at Urbana- + Champaign, nor the names of its contributors may be used to endorse or + promote products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS WITH THE SOFTWARE. + + 5. Licensee's use (e.g. nvprof) of the PCRE third party + component is subject to the following terms and + conditions: + + ------------ + PCRE LICENCE + ------------ + PCRE is a library of functions to support regular expressions whose syntax + and semantics are as close as possible to those of the Perl 5 language. + Release 8 of PCRE is distributed under the terms of the "BSD" licence, as + specified below. The documentation for PCRE, supplied in the "doc" + directory, is distributed under the same terms as the software itself. The + basic library functions are written in C and are freestanding. Also + included in the distribution is a set of C++ wrapper functions, and a just- + in-time compiler that can be used to optimize pattern matching. These are + both optional features that can be omitted when the library is built. + + THE BASIC LIBRARY FUNCTIONS + --------------------------- + Written by: Philip Hazel + Email local part: ph10 + Email domain: cam.ac.uk + University of Cambridge Computing Service, + Cambridge, England. + Copyright (c) 1997-2012 University of Cambridge + All rights reserved. + + PCRE JUST-IN-TIME COMPILATION SUPPORT + ------------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2010-2012 Zoltan Herczeg + All rights reserved. + + STACK-LESS JUST-IN-TIME COMPILER + -------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2009-2012 Zoltan Herczeg + All rights reserved. + + THE C++ WRAPPER FUNCTIONS + ------------------------- + Contributed by: Google Inc. + Copyright (c) 2007-2012, Google Inc. + All rights reserved. + + THE "BSD" LICENCE + ----------------- + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the University of Cambridge nor the name of Google + Inc. nor the names of their contributors may be used to endorse or + promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 6. Some of the cuBLAS library routines were written by or + derived from code written by Vasily Volkov and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2007-2009, Regents of the University of California + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the University of California, Berkeley nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 7. Some of the cuBLAS library routines were written by or + derived from code written by Davide Barbieri and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 8. Some of the cuBLAS library routines were derived from + code developed by the University of Tennessee and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2010 The University of Tennessee. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer listed in this license in the documentation and/or + other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 9. Some of the cuBLAS library routines were written by or + derived from code written by Jonathan Hogg and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2012, The Science and Technology Facilities Council (STFC). + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the STFC nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 10. Some of the cuBLAS library routines were written by or + derived from code written by Ahmad M. Abdelfattah, David + Keyes, and Hatem Ltaief, and are subject to the Apache + License, Version 2.0, as follows: + + -- (C) Copyright 2013 King Abdullah University of Science and Technology + Authors: + Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa) + David Keyes (david.keyes@kaust.edu.sa) + Hatem Ltaief (hatem.ltaief@kaust.edu.sa) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the King Abdullah University of Science and + Technology nor the names of its contributors may be used to endorse + or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE + + 11. Some of the cuSPARSE library routines were written by or + derived from code written by Li-Wen Chang and are subject + to the NCSA Open Source License as follows: + + Copyright (c) 2012, University of Illinois. + + All rights reserved. + + Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal with the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimers in the documentation and/or other materials provided + with the distribution. + * Neither the names of IMPACT Group, University of Illinois, nor + the names of its contributors may be used to endorse or promote + products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. + + 12. Some of the cuRAND library routines were written by or + derived from code written by Mutsuo Saito and Makoto + Matsumoto and are subject to the following license: + + Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + University. All rights reserved. + + Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + University and University of Tokyo. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the Hiroshima University nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 13. Some of the cuRAND library routines were derived from + code developed by D. E. Shaw Research and are subject to + the following license: + + Copyright 2010-2011, D. E. Shaw Research. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions, and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of D. E. Shaw Research nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 14. Some of the Math library routines were written by or + derived from code developed by Norbert Juffa and are + subject to the following license: + + Copyright (c) 2015-2017, Norbert Juffa + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 15. Licensee's use of the lz4 third party component is + subject to the following terms and conditions: + + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 16. The NPP library uses code from the Boost Math Toolkit, + and is subject to the following license: + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 17. Portions of the Nsight Eclipse Edition is subject to the + following license: + + The Eclipse Foundation makes available all content in this plug-in + ("Content"). Unless otherwise indicated below, the Content is provided + to you under the terms and conditions of the Eclipse Public License + Version 1.0 ("EPL"). A copy of the EPL is available at http:// + www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program" + will mean the Content. + + If you did not receive this Content directly from the Eclipse + Foundation, the Content is being redistributed by another party + ("Redistributor") and different terms and conditions may apply to your + use of any object code in the Content. Check the Redistributor's + license that was provided with the Content. If no such license exists, + contact the Redistributor. Unless otherwise indicated below, the terms + and conditions of the EPL still apply to any source code in the + Content and such source code may be obtained at http://www.eclipse.org. + + 18. Some of the cuBLAS library routines uses code from + OpenAI, which is subject to the following license: + + License URL + https://github.com/openai/openai-gemm/blob/master/LICENSE + + License Text + The MIT License + + Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + 19. Licensee's use of the Visual Studio Setup Configuration + Samples is subject to the following license: + + The MIT License (MIT) + Copyright (C) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of the Software, + and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + 20. Licensee's use of linmath.h header for CPU functions for + GL vector/matrix operations from lunarG is subject to the + Apache License Version 2.0. + + 21. The DX12-CUDA sample uses the d3dx12.h header, which is + subject to the MIT license . + +----------------- diff --git a/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/METADATA b/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..e8ef8db3f0102373f6e6897f513e5a80fb4e4dbd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/METADATA @@ -0,0 +1,36 @@ +Metadata-Version: 2.1 +Name: nvidia-cusparse-cu12 +Version: 12.3.1.170 +Summary: CUSPARSE native runtime libraries +Home-page: https://developer.nvidia.com/cuda-zone +Author: Nvidia CUDA Installer Team +Author-email: cuda_installer@nvidia.com +License: NVIDIA Proprietary Software +Keywords: cuda,nvidia,runtime,machine learning,deep learning +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: Other/Proprietary License +Classifier: Natural Language :: English +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Requires-Python: >=3 +License-File: License.txt +Requires-Dist: nvidia-nvjitlink-cu12 + +CUSPARSE native runtime libraries diff --git a/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/RECORD b/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..707383a6bc6bdc2ca13ca09fcf0ca520cac55bab --- /dev/null +++ b/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/RECORD @@ -0,0 +1,18 @@ +nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusparse/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusparse/include/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/include/cusparse.h,sha256=g-jyaV5nBdnCExOVQOd7lf4lMBoR3KvriiNHUoXNgXw,295187 +nvidia/cusparse/include/cusparse_v2.h,sha256=jkH2A9hYc-TEF0vuQ_SurbhPNEHkYGUIRuxKXhFAqnw,2587 +nvidia/cusparse/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/cusparse/lib/__pycache__/__init__.cpython-310.pyc,, +nvidia/cusparse/lib/libcusparse.so.12,sha256=y0KIyWVFOoAgZTwP2SzfzRK7WmRoQ9Zj6dTl9SD4kVw,281313984 +nvidia_cusparse_cu12-12.3.1.170.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +nvidia_cusparse_cu12-12.3.1.170.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262 +nvidia_cusparse_cu12-12.3.1.170.dist-info/METADATA,sha256=ivWTVzZv8-4bxy3u1Ce2wd3qUZeGJ3jTuH3-8u7AILM,1550 +nvidia_cusparse_cu12-12.3.1.170.dist-info/RECORD,, +nvidia_cusparse_cu12-12.3.1.170.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia_cusparse_cu12-12.3.1.170.dist-info/WHEEL,sha256=XDTs3wIbcE-BcRO08VJlZpA6z9OaC1mOKPCGGGwuM2g,109 +nvidia_cusparse_cu12-12.3.1.170.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7 diff --git a/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/REQUESTED b/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/WHEEL b/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..e6c30e957cfb045017a9fef3430bb8ee87c4a074 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py3-none-manylinux2014_x86_64 + diff --git a/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/top_level.txt b/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb --- /dev/null +++ b/vllm/lib/python3.10/site-packages/nvidia_cusparse_cu12-12.3.1.170.dist-info/top_level.txt @@ -0,0 +1 @@ +nvidia diff --git a/vllm/lib/python3.10/site-packages/propcache/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/propcache/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..484e4afe181f659f2f8cbdb99c9dfa31565505ad Binary files /dev/null and b/vllm/lib/python3.10/site-packages/propcache/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/propcache/__pycache__/_helpers_py.cpython-310.pyc b/vllm/lib/python3.10/site-packages/propcache/__pycache__/_helpers_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7db9bb7a4340b6c8a2f563e4dd217e96d311ca6d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/propcache/__pycache__/_helpers_py.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/propcache/_helpers_c.cpython-310-x86_64-linux-gnu.so b/vllm/lib/python3.10/site-packages/propcache/_helpers_c.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..bedfa3bc7d567d7aaf57a103b9c2f2b03250c8ab --- /dev/null +++ b/vllm/lib/python3.10/site-packages/propcache/_helpers_c.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5cf5330adcb20c97093c6ca14e37f6cf7064d369ac908f6255d1ffb840bb26a +size 672736 diff --git a/vllm/lib/python3.10/site-packages/starlette/__pycache__/_exception_handler.cpython-310.pyc b/vllm/lib/python3.10/site-packages/starlette/__pycache__/_exception_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c6d9def4e0de9ddd33caecb6dc938187ffe01d0 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/starlette/__pycache__/_exception_handler.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/starlette/__pycache__/applications.cpython-310.pyc b/vllm/lib/python3.10/site-packages/starlette/__pycache__/applications.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3c6b062f2bcd1c835ae76ad701746f7f7e4b035 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/starlette/__pycache__/applications.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/starlette/__pycache__/exceptions.cpython-310.pyc b/vllm/lib/python3.10/site-packages/starlette/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee6625f0f085d83ffc9391270689388de2493a66 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/starlette/__pycache__/exceptions.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/starlette/__pycache__/types.cpython-310.pyc b/vllm/lib/python3.10/site-packages/starlette/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b965c5588ef5d7006c10821a1b9ae787e01311f1 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/starlette/__pycache__/types.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/starlette/__pycache__/websockets.cpython-310.pyc b/vllm/lib/python3.10/site-packages/starlette/__pycache__/websockets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06be1d8a727429a266a1ddc7cb9bef1fc44cda25 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/starlette/__pycache__/websockets.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/starlette/middleware/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/starlette/middleware/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c5f7799ca5dacefda0647829ac9e7e8a57597e2 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/starlette/middleware/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/starlette/middleware/__pycache__/errors.cpython-310.pyc b/vllm/lib/python3.10/site-packages/starlette/middleware/__pycache__/errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14db3a965133b432ca7f3ac53d71e50d96d1f675 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/starlette/middleware/__pycache__/errors.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/starlette/middleware/__pycache__/httpsredirect.cpython-310.pyc b/vllm/lib/python3.10/site-packages/starlette/middleware/__pycache__/httpsredirect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fd861a38ed2946bd2e05cd55339d199f2198485 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/starlette/middleware/__pycache__/httpsredirect.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/starlette/middleware/__pycache__/sessions.cpython-310.pyc b/vllm/lib/python3.10/site-packages/starlette/middleware/__pycache__/sessions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3d233da304b1c771e453683a3f90db37a3be15f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/starlette/middleware/__pycache__/sessions.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/starlette/middleware/__pycache__/wsgi.cpython-310.pyc b/vllm/lib/python3.10/site-packages/starlette/middleware/__pycache__/wsgi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..989330a43f277db99ada926fc71ca3a789584a5e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/starlette/middleware/__pycache__/wsgi.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wids/__init__.py b/vllm/lib/python3.10/site-packages/wids/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..34208c1e0b6992fd30ce88fc607f7f20149ca1e3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wids/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) 2017-2019 NVIDIA CORPORATION. All rights reserved. +# This file is part of the WebDataset library. +# See the LICENSE file for licensing terms (BSD-style). +# +# flake8: noqa + +from .wids import ( + ChunkedSampler, + DistributedChunkedSampler, + ShardedSampler, + ShardListDataset, +) diff --git a/vllm/lib/python3.10/site-packages/wids/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wids/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0965f68efe4a826eaa713ae83839f5d5b7cca061 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wids/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wids/__pycache__/wids.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ac42adae3691be2da3617c53eff3a1b34b68ff3 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_bench.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_bench.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92822846cee5f3af910034f1786b0e793aed8477 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_bench.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_cleanup.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_cleanup.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5689807951abcd33808637195e364f5b55e33700 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_cleanup.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_dir.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_dir.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..beb2f00ed6d0c4c62c23bd9c201b8b8a52477842 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_dir.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_dl.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_dl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37f96a2a1e3e6e00f3c08e0a394691a855c86c9b Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_dl.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_index.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fbca60bd016ffcf15cf9c89637e465ccd766b07 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_index.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_lru.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_lru.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..493949d23ac4dee413cba7cd2e92bce34da34006 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_lru.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_mmtar.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_mmtar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22a63d615bab2c777d812fd245b747445a191eec Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_mmtar.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_specs.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_specs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18522d0d9568d6643b5735e9218223df6934f3e3 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_specs.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_tar.cpython-310.pyc b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_tar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e03e8bc56b6c5294e65e5cd628ceb37f9f405df Binary files /dev/null and b/vllm/lib/python3.10/site-packages/wids/__pycache__/wids_tar.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/wids/wids.py b/vllm/lib/python3.10/site-packages/wids/wids.py new file mode 100644 index 0000000000000000000000000000000000000000..31fbfe04f8640c724cad3a1d168422359f9b2fb3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wids/wids.py @@ -0,0 +1,762 @@ +import base64 +import gzip +import hashlib +import io +import os +import random +import re +import sqlite3 +import sys +import uuid +import warnings +from functools import partial +from typing import Any, BinaryIO, Dict, Optional, TypeVar, Union +from urllib.parse import quote, urlparse + +import numpy as np +import torch.distributed as dist + +from .wids_dl import download_and_open +from .wids_lru import LRUCache +from .wids_mmtar import MMIndexedTar +from .wids_specs import load_dsdesc_and_resolve, urldir +from .wids_tar import TarFileReader, find_index_file + +try: + from torch.utils.data import Dataset, Sampler +except ImportError: + + class Dataset: + pass + + class Sampler: + pass + + +T = TypeVar("T") + +T_co = TypeVar("T_co", covariant=True) + + +def compute_file_md5sum(fname: Union[str, BinaryIO], chunksize: int = 1000000) -> str: + """Compute the md5sum of a file in chunks. + + Parameters + ---------- + fname : Union[str, BinaryIO] + Filename or file object + chunksize : int, optional + Chunk size in bytes, by default 1000000 + + Returns + ------- + str + MD5 sum of the file + + Examples + -------- + >>> compute_file_md5sum("test.txt") + 'd41d8cd98f00b204e9800998ecf8427e' + """ + md5 = hashlib.md5() + if isinstance(fname, str): + with open(fname, "rb") as f: + for chunk in iter(lambda: f.read(chunksize), b""): + md5.update(chunk) + else: + fname.seek(0) + for chunk in iter(lambda: fname.read(chunksize), b""): + md5.update(chunk) + return md5.hexdigest() + + +def compute_file_md5sum(fname: Union[str, BinaryIO], chunksize: int = 1000000) -> str: + """Compute the md5sum of a file in chunks.""" + md5 = hashlib.md5() + if isinstance(fname, str): + with open(fname, "rb") as f: + for chunk in iter(lambda: f.read(chunksize), b""): + md5.update(chunk) + else: + fname.seek(0) + for chunk in iter(lambda: fname.read(chunksize), b""): + md5.update(chunk) + return md5.hexdigest() + + +def compute_num_samples(fname): + ds = IndexedTarSamples(path=fname) + return len(ds) + + +def splitname(fname): + """Returns the basename and extension of a filename""" + assert "." in fname, "Filename must have an extension" + basename, extension = re.match(r"^((?:.*/)?.*?)(\..*)$", fname).groups() + return basename, extension + + +def group_by_key(names): + """Group the file names by key. + + Args: + names: A list of file names. + + Returns: + A list of lists of indices, where each sublist contains indices of files + with the same key. + """ + groups = [] + last_key = None + current = [] + for i, fname in enumerate(names): + # Ignore files that are not in a subdirectory. + if "." not in fname: + print(f"Warning: Ignoring file {fname} (no '.')") + continue + key, ext = splitname(fname) + if key != last_key: + if current: + groups.append(current) + current = [] + last_key = key + current.append(i) + if current: + groups.append(current) + return groups + + +def default_decoder(sample: Dict[str, Any], format: Optional[Union[bool, str]] = True): + """A default decoder for webdataset. + + This handles common file extensions: .txt, .cls, .cls2, + .jpg, .png, .json, .npy, .mp, .pt, .pth, .pickle, .pkl. + These are the most common extensions used in webdataset. + For other extensions, users can provide their own decoder. + + Args: + sample: sample, modified in place + """ + sample = dict(sample) + for key, stream in sample.items(): + extensions = key.split(".") + if len(extensions) < 1: + continue + extension = extensions[-1] + if extension in ["gz"]: + decompressed = gzip.decompress(stream.read()) + stream = io.BytesIO(decompressed) + if len(extensions) < 2: + sample[key] = stream + continue + extension = extensions[-2] + if key.startswith("__"): + continue + elif extension in ["txt", "text"]: + value = stream.read() + sample[key] = value.decode("utf-8") + elif extension in ["cls", "cls2"]: + value = stream.read() + sample[key] = int(value.decode("utf-8")) + elif extension in ["jpg", "png", "ppm", "pgm", "pbm", "pnm"]: + if format == "PIL": + import PIL.Image + + sample[key] = PIL.Image.open(stream) + elif format == "numpy": + import numpy as np + + sample[key] = np.asarray(PIL.Image.open(stream)) + else: + raise ValueError(f"Unknown format: {format}") + elif extension == "json": + import json + + value = stream.read() + sample[key] = json.loads(value) + elif extension == "npy": + import numpy as np + + sample[key] = np.load(stream) + elif extension == "mp": + import msgpack + + value = stream.read() + sample[key] = msgpack.unpackb(value, raw=False) + elif extension in ["pt", "pth"]: + import torch + + sample[key] = torch.load(stream) + elif extension in ["pickle", "pkl"]: + import pickle + + sample[key] = pickle.load(stream) + return sample + + +open_itfs = {} + + +class IndexedTarSamples: + """A class that accesses samples in a tar file. The tar file must follow + WebDataset conventions. The tar file is indexed when the IndexedTarSamples + object is created. The samples are accessed by index using the __getitem__ + method. The __getitem__ method returns a dictionary containing the files + for the sample. The key for each file is the extension of the file name. + The key "__key__" is reserved for the key of the sample (the basename of + each file without the extension). For example, if the tar file contains + the files "sample1.jpg" and "sample1.txt", then the sample with key + "sample1" will be returned as the dictionary {"jpg": ..., "txt": ...}. + """ + + def __init__( + self, + *, + path=None, + stream=None, + md5sum=None, + expected_size=None, + use_mmap=True, + index_file=find_index_file, + ): + assert path is not None or stream is not None + + # Create TarFileReader object to read from tar_file + self.path = path + stream = self.stream = stream or open(path, "rb") + + # verify the MD5 sum + if md5sum is not None: + stream.seek(0) + got = compute_file_md5sum(stream) + assert got == md5sum, f"MD5 sum mismatch: expected {md5sum}, got {got}" + stream.seek(0) + + # use either the mmap or the stream based implementation + if use_mmap: + self.reader = MMIndexedTar(stream) + else: + self.reader = TarFileReader(stream, index_file=index_file) + + # Get list of all files in stream + all_files = self.reader.names() + + # Group files by key into samples + self.samples = group_by_key(all_files) + + # check that the number of samples is correct + if expected_size is not None: + assert ( + len(self) == expected_size + ), f"Expected {expected_size} samples, got {len(self)}" + + self.uuid = str(uuid.uuid4()) + + def close(self): + self.reader.close() + if not self.stream.closed: + self.stream.close() + + def __len__(self): + return len(self.samples) + + def __getitem__(self, idx): + # Get indexes of files for the sample at index idx + indexes = self.samples[idx] + sample = {} + key = None + for i in indexes: + # Get filename and data for the file at index i + fname, data = self.reader.get_file(i) + # Split filename into key and extension + k, ext = splitname(fname) + # Make sure all files in sample have same key + key = key or k + assert key == k + sample[ext] = data + # Add key to sample + sample["__key__"] = key + return sample + + def __str__(self): + return f"" + + def __repr__(self): + return str(self) + + +def hash_localname(dldir="/tmp/_wids_cache"): + os.makedirs(dldir, exist_ok=True) + + connection = sqlite3.connect(os.path.join(dldir, "cache.db")) + cursor = connection.cursor() + cursor.execute( + "CREATE TABLE IF NOT EXISTS cache (url TEXT PRIMARY KEY, path TEXT, checksum TEXT)" + ) + connection.commit() + + def f(shard): + """Given a URL, return a local name for the shard.""" + if shard.startswith("pipe:"): + # uuencode the entire URL string + hex32 = base64.urlsafe_b64encode(hashlib.sha256(shard.encode()).digest())[ + :32 + ].decode() + return os.path.join(dldir, "pipe__" + hex32) + else: + # we hash the host and directory components into a 16 character string + dirname = urldir(shard) + hex16 = base64.urlsafe_b64encode(hashlib.sha256(dirname.encode()).digest())[ + :16 + ].decode() + # the cache name is the concatenation of the hex16 string and the file name component of the URL + cachename = "data__" + hex16 + "__" + os.path.basename(urlparse(shard).path) + checksum = None + cursor.execute( + "INSERT OR REPLACE INTO cache VALUES (?, ?, ?)", + (shard, cachename, checksum), + ) + connection.commit() + return os.path.join(dldir, cachename) + + return f + + +def cache_localname(cachedir): + os.makedirs(cachedir, exist_ok=True) + + def f(shard): + """Given a URL, return a local name for the shard.""" + path = urlparse(shard).path + fname = os.path.basename(path) + return os.path.join(cachedir, fname) + + return f + + +def default_localname(dldir="/tmp/_wids_cache"): + os.makedirs(dldir, exist_ok=True) + + def f(shard): + """Given a URL, return a local name for the shard.""" + cachename = quote(shard, safe="+-") + return os.path.join(dldir, cachename) + + return f + + +class LRUShards: + """A class that manages a cache of shards. The cache is a LRU cache that + stores the local names of the shards as keys and the downloaded paths as + values. The shards are downloaded to a directory specified by dldir. + The local name of a shard is computed by the localname function, which + takes the shard URL as an argument. If keep is True, the downloaded files + are not deleted when they are no longer needed. + """ + + def __init__(self, lru_size, keep=False, localname=default_localname()): + self.localname = localname + # the cache contains the local name as the key and the downloaded path as the value + self.lru = LRUCache(lru_size, release_handler=self.release_handler) + # keep statistics + self.reset_stats() + + def reset_stats(self): + self.accesses = 0 + self.misses = 0 + + def __len__(self): + return len(self.lru) + + def release_handler(self, key, value): + value.close() + + def clear(self): + self.lru.clear() + + def get_shard(self, url): + assert isinstance(url, str) + self.accesses += 1 + if url not in self.lru: + local = self.localname(url) + with download_and_open(url, local) as stream: + itf = IndexedTarSamples(path=local, stream=stream) + self.lru[url] = itf + self.misses += 1 + self.last_missed = True + else: + self.last_missed = False + return self.lru[url] + + +def interpret_transformations(transformations): + """Interpret the transformations argument. + + This takes care of transformations specified as string shortcuts + and returns a list of callables. + """ + if not isinstance(transformations, list): + transformations = [transformations] + + result = [] + + for transformation in transformations: + if transformation == "PIL": + transformation = partial(default_decoder, format="PIL") + elif transformation == "numpy": + transformation = partial(default_decoder, format="numpy") + else: + assert callable(transformation) + result.append(transformation) + + return result + + +def hash_dataset_name(input_string): + """Compute a hash of the input string and return the first 16 characters of the hash.""" + # Compute SHA256 hash of the input string + hash_object = hashlib.sha256(input_string.encode()) + hash_digest = hash_object.digest() + + # Encode the hash in base64 + base64_encoded_hash = base64.urlsafe_b64encode(hash_digest) + + # Return the first 16 characters of the base64-encoded hash + return base64_encoded_hash[:16].decode("ascii") + + +class ShardListDataset(Dataset[T]): + """An indexable dataset based on a list of shards. + + The dataset is either given as a list of shards with optional options and name, + or as a URL pointing to a JSON descriptor file. + + Datasets can reference other datasets via `source_url`. + + Shard references within a dataset are resolve relative to an explicitly + given `base` property, or relative to the URL from which the dataset + descriptor was loaded. + """ + + def __init__( + self, + shards, + *, + cache_size=int(1e12), + cache_dir=None, + lru_size=10, + dataset_name=None, + localname=None, + transformations="PIL", + keep=False, + base=None, + options=None, + ): + """Create a ShardListDataset. + + Args: + shards: a list of (filename, length) pairs or a URL pointing to a JSON descriptor file + cache_size: the number of shards to keep in the cache + lru_size: the number of shards to keep in the LRU cache + localname: a function that maps URLs to local filenames + + Note that there are two caches: an on-disk directory, and an in-memory LRU cache. + """ + if options is None: + options = {} + super(ShardListDataset, self).__init__() + # shards is a list of (filename, length) pairs. We'll need to + # keep track of the lengths and cumulative lengths to know how + # to map indices to shards and indices within shards. + if isinstance(shards, (str, io.IOBase)): + if base is None and isinstance(shards, str): + base = urldir(shards) + self.base = base + self.spec = load_dsdesc_and_resolve(shards, options=options, base=base) + self.shards = self.spec.get("shardlist", []) + self.dataset_name = self.spec.get("name") or hash_dataset_name(str(shards)) + else: + self.base = None + self.spec = options + self.shards = shards + self.dataset_name = dataset_name or hash_dataset_name(str(shards)) + + self.lengths = [shard["nsamples"] for shard in self.shards] + self.cum_lengths = np.cumsum(self.lengths) + self.total_length = self.cum_lengths[-1] + + if cache_dir is not None: + # when a cache dir is explicitly given, we download files into + # that directory without any changes + self.cache_dir = cache_dir + self.localname = cache_localname(cache_dir) + elif localname is not None: + # when a localname function is given, we use that + self.cache_dir = None + self.localname = localname + else: + # when no cache dir or localname are given, use the cache from the environment + self.cache_dir = os.environ.get("WIDS_CACHE", "/tmp/_wids_cache") + self.localname = default_localname(self.cache_dir) + + if True or int(os.environ.get("WIDS_VERBOSE", 0)): + nbytes = sum(shard.get("filesize", 0) for shard in self.shards) + nsamples = sum(shard["nsamples"] for shard in self.shards) + print( + str(shards)[:50], + "base:", + self.base, + "name:", + self.spec.get("name"), + "nfiles:", + len(self.shards), + "nbytes:", + nbytes, + "samples:", + nsamples, + "cache:", + self.cache_dir, + file=sys.stderr, + ) + self.transformations = interpret_transformations(transformations) + + if lru_size > 200: + warnings.warn( + "LRU size is very large; consider reducing it to avoid running out of file descriptors" + ) + self.cache = LRUShards(lru_size, localname=self.localname, keep=keep) + + def add_transform(self, transform): + """Add a transformation to the dataset.""" + self.transformations.append(transform) + return self + + def __len__(self): + """Return the total number of samples in the dataset.""" + return self.total_length + + def get_stats(self): + """Return the number of cache accesses and misses.""" + return self.cache.accesses, self.cache.misses + + def check_cache_misses(self): + """Check if the cache miss rate is too high.""" + accesses, misses = self.get_stats() + if accesses > 100 and misses / accesses > 0.3: + # output a warning only once + self.check_cache_misses = lambda: None + print( + "Warning: ShardListDataset has a cache miss rate of {:.1%}%".format( + misses * 100.0 / accesses + ) + ) + + def get_shard(self, index): + """Get the shard and index within the shard corresponding to the given index.""" + # Find the shard corresponding to the given index. + shard_idx = np.searchsorted(self.cum_lengths, index, side="right") + + # Figure out which index within the shard corresponds to the + # given index. + if shard_idx == 0: + inner_idx = index + else: + inner_idx = index - self.cum_lengths[shard_idx - 1] + + # Get the shard and return the corresponding element. + desc = self.shards[shard_idx] + url = desc["url"] + shard = self.cache.get_shard(url) + return shard, inner_idx, desc + + def __getitem__(self, index): + """Return the sample corresponding to the given index.""" + shard, inner_idx, desc = self.get_shard(index) + sample = shard[inner_idx] + + # Check if we're missing the cache too often. + self.check_cache_misses() + + sample["__dataset__"] = desc.get("dataset") + sample["__index__"] = index + sample["__shard__"] = desc["url"] + sample["__shardindex__"] = inner_idx + + # Apply transformations + for transform in self.transformations: + sample = transform(sample) + + return sample + + def close(self): + """Close the dataset.""" + self.cache.clear() + + +def lengths_to_ranges(lengths): + """Convert a list of lengths to a list of ranges.""" + ranges = [] + start = 0 + for length in lengths: + ranges.append((start, start + length)) + start += length + return ranges + + +def intersect_range(a, b): + """Return the intersection of the two half-open integer intervals.""" + result = max(a[0], b[0]), min(a[1], b[1]) + if result[0] >= result[1]: + return None + return result + + +def intersect_ranges(rangelist, r): + """Return the intersection of the half-open integer interval r with the list of half-open integer intervals.""" + result = [] + for a in rangelist: + x = intersect_range(a, r) + if x is not None: + result.append(x) + return result + + +def iterate_ranges(ranges, rng, indexshuffle=True, shardshuffle=True): + """Iterate over the ranges in a random order.""" + shard_indexes = list(range(len(ranges))) + if shardshuffle: + rng.shuffle(shard_indexes) + for i in shard_indexes: + lo, hi = ranges[i] + sample_indexes = list(range(lo, hi)) + if indexshuffle: + rng.shuffle(sample_indexes) + yield from sample_indexes + + +class ShardListSampler(Sampler): + """A sampler that samples consistent with a ShardListDataset. + + This sampler is used to sample from a ShardListDataset in a way that + preserves locality. + + This returns a permutation of the indexes by shard, then a permutation of + indexes within each shard. This ensures that the data is accessed in a + way that preserves locality. + + Note that how this ends up splitting data between multiple workers ends up + on the details of the DataLoader. Generally, it will likely load samples from the + same shard in each worker. + + Other more sophisticated shard-aware samplers are possible and will likely + be added. + """ + + def __init__(self, dataset, *, lengths=None, seed=0, shufflefirst=False): + if lengths is None: + lengths = list(dataset.lengths) + self.ranges = lengths_to_ranges(lengths) + self.seed = seed + self.shufflefirst = shufflefirst + self.epoch = 0 + + def __iter__(self): + self.rng = random.Random(self.seed + 1289738273 * self.epoch) + shardshuffle = self.shufflefirst or self.epoch > 0 + yield from iterate_ranges(self.ranges, self.rng, shardshuffle=shardshuffle) + self.epoch += 1 + + +ShardedSampler = ShardListSampler + + +class ChunkedSampler(Sampler): + """A sampler that samples in chunks and then shuffles the samples within each chunk. + + This preserves locality of reference while still shuffling the data. + """ + + def __init__( + self, + dataset, + *, + num_samples=None, + chunksize=2000, + seed=0, + shuffle=True, + shufflefirst=False, + ): + if isinstance(num_samples, int): + lo, hi = 0, num_samples + elif num_samples is None: + lo, hi = 0, len(dataset) + else: + lo, hi = num_samples + self.ranges = [(i, min(i + chunksize, hi)) for i in range(lo, hi, chunksize)] + self.seed = seed + self.shuffle = shuffle + self.shufflefirst = shufflefirst + self.epoch = 0 + + def set_epoch(self, epoch): + self.epoch = epoch + + def __iter__(self): + self.rng = random.Random(self.seed + 1289738273 * self.epoch) + shardshuffle = self.shufflefirst or self.epoch > 0 + yield from iterate_ranges( + self.ranges, + self.rng, + indexshuffle=self.shuffle, + shardshuffle=(self.shuffle and shardshuffle), + ) + self.epoch += 1 + + +def DistributedChunkedSampler( + dataset: Dataset, + *, + num_replicas: Optional[int] = None, + num_samples: Optional[int] = None, + rank: Optional[int] = None, + shuffle: bool = True, + shufflefirst: bool = False, + seed: int = 0, + drop_last: bool = None, + chunksize: int = 1000000, +) -> ChunkedSampler: + """Return a ChunkedSampler for the current worker in distributed training. + + Reverts to a simple ChunkedSampler if not running in distributed mode. + + Since the split among workers takes place before the chunk shuffle, + workers end up with a fixed set of shards they need to download. The + more workers, the fewer shards are used by each worker. + """ + if drop_last is not None: + warnings.warn( + "DistributedChunkedSampler does not support drop_last, thus it will be ignored" + ) + if not dist.is_initialized(): + warnings.warn( + "DistributedChunkedSampler is called without distributed initialized; assuming single process" + ) + num_replicas = 1 + rank = 0 + else: + num_replicas = num_replicas or dist.get_world_size() + rank = rank or dist.get_rank() + assert rank >= 0 and rank < num_replicas + + num_samples = num_samples or len(dataset) + worker_chunk = (num_samples + num_replicas - 1) // num_replicas + worker_start = rank * worker_chunk + worker_end = min(worker_start + worker_chunk, num_samples) + return ChunkedSampler( + dataset, + num_samples=(worker_start, worker_end), + chunksize=chunksize, + seed=seed, + shuffle=shuffle, + shufflefirst=shufflefirst, + ) diff --git a/vllm/lib/python3.10/site-packages/wids/wids_bench.py b/vllm/lib/python3.10/site-packages/wids/wids_bench.py new file mode 100644 index 0000000000000000000000000000000000000000..92044b694bbcba23e0b21a1aafacfd9681806558 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wids/wids_bench.py @@ -0,0 +1,47 @@ +import argparse +import json + +from . import wids +from .compat import WebDataset + + +def main_wids(args): + desc = json.load(open(args.dataset)) + files = desc["files"] + dataset = wids.ShardListDataset(files, cache_size=4) + print(len(dataset)) + for i in range(len(dataset)): + print(i, dataset[i]["__key__"]) + dataset.close() + + +def main_wds(args): + desc = json.load(open(args.dataset)) + files = desc["files"] + urls = [f["url"] for f in files] + dataset = WebDataset(urls) + for i, sample in enumerate(dataset): + print(i, sample["__key__"]) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # there are two subcommands: wids and wds + subparsers = parser.add_subparsers(dest="command") + wids_parser = subparsers.add_parser("wids") + wds_parser = subparsers.add_parser("wds") + + # wids subcommand + wids_parser.add_argument("dataset", help="dataset name") + + # wds subcommand + wds_parser.add_argument("dataset", help="dataset name") + + args = parser.parse_args() + + if args.command == "wids": + main_wids(args) + elif args.command == "wds": + main_wds(args) + else: + raise ValueError(f"Unknown command: {args.command}") diff --git a/vllm/lib/python3.10/site-packages/wids/wids_cleanup.py b/vllm/lib/python3.10/site-packages/wids/wids_cleanup.py new file mode 100644 index 0000000000000000000000000000000000000000..9e84597103eea5915837d11a75c622f38bd68f77 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wids/wids_cleanup.py @@ -0,0 +1,95 @@ +""" +This module provides utilities for managing files in a directory. + +It includes a function `keep_most_recent_files` that keeps the most recent +files in a directory, deleting the rest based on the maximum size of the directory +in bytes and the maximum number of files to keep. + +The cleanup job can be run in the background using `create_cleanup_background_process`. +""" + +import fcntl +import glob +import os +import time + +import numpy as np + + +def keep_most_recent_files(pattern, maxsize=int(1e12), maxfiles=1000, debug=False): + """Keep the most recent files in a directory, deleting the rest. + + The maxsize is the maximum size of the directory in bytes. The maxfiles is + the maximum number of files to keep. The files are sorted by modification + time, and the most recent files are kept. If the directory is already + smaller than maxsize, then no files are deleted. If there are fewer than + maxfiles, then no files are deleted.""" + + # get the list of files in the directory + fnames = glob.glob(pattern) + # compute a list of (mtime, fname, size) triples + files = [] + for fname in fnames: + try: + s = os.stat(fname) + except FileNotFoundError: + continue + files.append((s.st_mtime, fname, s.st_size)) + # sort the list by mtime, most recent first + files.sort(reverse=True) + # compute an accumulated total of the file sizes in order using np.cumsum + sizes = np.cumsum([size for mtime, fname, size in files]) + # compute a cutoff index based on maxsize + cutoff = np.searchsorted(sizes, maxsize) + # compute a cutoff index based on maxfiles + cutoff = min(cutoff, maxfiles) + # delete the files above the cutoff in reverse order + for mtime, fname, size in files[cutoff:][::-1]: + try: + os.unlink(fname) + except FileNotFoundError: + pass + + +class ExclusiveLock: + """A simple non-blocking exclusive lock using fcntl.""" + + def __init__(self, lockfile): + self.lockfile = lockfile + + def try_lock(self): + try: + self.lock = open(self.lockfile, "w") + fcntl.flock(self.lock.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) + return True + except OSError as e: + if e.errno in (errno.EAGAIN, errno.EWOULDBLOCK): + return False + else: + raise + + def release_lock(self): + self.lock.close() + os.unlink(self.lockfile) + + +def create_cleanup_background_process( + pattern, maxsize=int(1e12), maxfiles=1000, every=60 +): + """Create a background process that keeps a directory below a certain size.""" + + def cleanup_worker(every): + # use a lock file to ensure that only one cleanup worker is running + lockfile = os.path.join(os.path.dirname(pattern), ".cleanup.lock") + lock = ExclusiveLock(lockfile) + if not lock.try_lock(): + return + while True: + keep_most_recent_files(pattern, maxsize=maxsize, maxfiles=maxfiles) + time.sleep(every) + + import multiprocessing + + p = multiprocessing.Process(target=cleanup_worker, args=(every,)) + p.start() + return p diff --git a/vllm/lib/python3.10/site-packages/wids/wids_dir.py b/vllm/lib/python3.10/site-packages/wids/wids_dir.py new file mode 100644 index 0000000000000000000000000000000000000000..dd53dac71c5bb4f07bb64bad3d049a2a9f0b95b8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wids/wids_dir.py @@ -0,0 +1,21 @@ +""" +# dynamically create a shard index +class DirectoryDataset(ShardListDataset): + def __init__(self, directory): + pass +""" + +""" +# randomly choose shards from a directory +class DirectoryQueueDataset(IterableDataset): + def __init__(self, directory, strategy="replace", choice="random", downloader=None, transformations="PIL"): + pass + def add_transform(self, transform): + pass + def __iter__(self): + # pick file according to strategy + # rename file to .active + # randomly yield samples from file + # rename file back to its original name or unlink it, according to strategy + pass +""" diff --git a/vllm/lib/python3.10/site-packages/wids/wids_dl.py b/vllm/lib/python3.10/site-packages/wids/wids_dl.py new file mode 100644 index 0000000000000000000000000000000000000000..cb4fbcbe39ea1712c9e453b0555911cc73be9247 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wids/wids_dl.py @@ -0,0 +1,146 @@ +import fcntl +import os +import shutil +import sys +import time +from collections import deque +from datetime import datetime +from urllib.parse import urlparse + +recent_downloads = deque(maxlen=1000) + +open_objects = {} +max_open_objects = 100 + + +class ULockFile: + """A simple locking class. We don't need any of the third + party libraries since we rely on POSIX semantics for linking + below anyway.""" + + def __init__(self, path): + self.lockfile_path = path + self.lockfile = None + + def __enter__(self): + self.lockfile = open(self.lockfile_path, "w") + fcntl.flock(self.lockfile.fileno(), fcntl.LOCK_EX) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + fcntl.flock(self.lockfile.fileno(), fcntl.LOCK_UN) + self.lockfile.close() + self.lockfile = None + try: + os.unlink(self.lockfile_path) + except FileNotFoundError: + pass + + +def pipe_download(remote, local): + """Perform a download for a pipe: url.""" + assert remote.startswith("pipe:") + cmd = remote[5:] + cmd = cmd.format(local=local) + assert os.system(cmd) == 0, "Command failed: %s" % cmd + + +def copy_file(remote, local): + remote = urlparse(remote) + assert remote.scheme in ["file", ""] + # use absolute path + remote = os.path.abspath(remote.path) + local = urlparse(local) + assert local.scheme in ["file", ""] + local = os.path.abspath(local.path) + if remote == local: + return + # check if the local file exists + shutil.copyfile(remote, local) + + +verbose_cmd = int(os.environ.get("WIDS_VERBOSE_CMD", "0")) + + +def vcmd(flag, verbose_flag=""): + return verbose_flag if verbose_cmd else flag + + +default_cmds = { + "posixpath": copy_file, + "file": copy_file, + "pipe": pipe_download, + "http": "curl " + vcmd("-s") + " -L {url} -o {local}", + "https": "curl " + vcmd("-s") + " -L {url} -o {local}", + "ftp": "curl " + vcmd("-s") + " -L {url} -o {local}", + "ftps": "curl " + vcmd("-s") + " -L {url} -o {local}", + "gs": "gsutil " + vcmd("-q") + " cp {url} {local}", + "s3": "aws s3 cp {url} {local}", +} + + +def download_file_no_log(remote, local, handlers=default_cmds): + """Download a file from a remote url to a local path. + The remote url can be a pipe: url, in which case the remainder of + the url is treated as a command template that is executed to perform the download. + """ + + if remote.startswith("pipe:"): + schema = "pipe" + else: + schema = urlparse(remote).scheme + if schema is None or schema == "": + schema = "posixpath" + # get the handler + handler = handlers.get(schema) + if handler is None: + raise ValueError("Unknown schema: %s" % schema) + # call the handler + if callable(handler): + handler(remote, local) + else: + assert isinstance(handler, str) + cmd = handler.format(url=remote, local=local) + assert os.system(cmd) == 0, "Command failed: %s" % cmd + return local + + +def download_file(remote, local, handlers=default_cmds, verbose=False): + start = time.time() + try: + return download_file_no_log(remote, local, handlers=handlers) + finally: + recent_downloads.append((remote, local, time.time(), time.time() - start)) + if verbose: + print( + "downloaded", + remote, + "to", + local, + "in", + time.time() - start, + "seconds", + file=sys.stderr, + ) + + +def download_and_open(remote, local, mode="rb", handlers=default_cmds, verbose=False): + with ULockFile(local + ".lock"): + if not os.path.exists(local): + if verbose: + print("downloading", remote, "to", local, file=sys.stderr) + download_file(remote, local, handlers=handlers) + else: + if verbose: + print("using cached", local, file=sys.stderr) + result = open(local, mode) + if open_objects is not None: + for k, v in list(open_objects.items()): + if v.closed: + del open_objects[k] + if len(open_objects) > max_open_objects: + raise RuntimeError("Too many open objects") + current_time = datetime.now().strftime("%Y%m%d%H%M%S") + key = tuple(str(x) for x in [remote, local, mode, current_time]) + open_objects[key] = result + return result diff --git a/vllm/lib/python3.10/site-packages/wids/wids_index.py b/vllm/lib/python3.10/site-packages/wids/wids_index.py new file mode 100644 index 0000000000000000000000000000000000000000..d9a9b388aa745652beb3b6f273009ec248d2f0da --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wids/wids_index.py @@ -0,0 +1,355 @@ +import argparse +import json +import os +import re +import sys +from urllib.parse import urlparse, urlunparse + +import braceexpand + +from . import wids, wids_dl +from .wids_specs import load_remote_dsdesc_raw + + +def format_with_suffix(num): + suffixes = ["", "k", "M", "G", "T", "E"] + i = 0 + while num >= 1000 and i < len(suffixes) - 1: + num /= 1000.0 + i += 1 + return f"{num:.1f}{suffixes[i]}" + + +class AtomicJsonUpdate: + def __init__(self, filename): + self.filename = filename + self.backup_filename = filename + ".bak" + self.temp_filename = filename + ".temp" + self.data = None + + def __enter__(self): + # Read the original file + with open(self.filename, "r") as file: + self.data = json.load(file) + return self.data + + def __exit__(self, exc_type, exc_value, traceback): + if exc_type is None: + # Write the modified data to the temporary file + with open(self.temp_filename, "w") as file: + json.dump(self.data, file, indent=2) + # Rename the original file to a backup + os.rename(self.filename, self.backup_filename) + # Rename the new file to the original file name + os.rename(self.temp_filename, self.filename) + elif os.path.exists(self.temp_filename): + os.remove(self.temp_filename) + + +def urldir(url): + """Return the directory part of a url.""" + parsed_url = urlparse(url) + path = parsed_url.path + directory = os.path.dirname(path) + return parsed_url._replace(path=directory).geturl() + + +def urlfile(url): + """Return the file part of a url.""" + parsed_url = urlparse(url) + path = parsed_url.path + filename = os.path.basename(path) + return filename + + +def urldirbase(url): + # Parse the URL + parsed_url = urlparse(url) + + # Use 'file' scheme if no scheme is given + scheme = parsed_url.scheme or "file" + + # Handle file URLs and relative paths + if scheme == "file" and not parsed_url.netloc: + path = os.path.abspath(parsed_url.path) + else: + path = parsed_url.path + # Get the directory without the filename + path_without_filename = os.path.dirname(path) + + # Reconstruct URL without filename + url_without_filename = urlunparse( + (scheme, parsed_url.netloc, path_without_filename, "", "", "") + ) + + return url_without_filename + + +def shorten_name(s): + l = re.split(r"[^a-zA-Z0-9_]+", s) + found = set() + result = [] + for word in l: + if re.match(r"^[0-9]*$", word): + continue + if word not in found: + result.append(word) + found.add(word) + return "-".join(result) + + +def main_create(args): + """Create a full shard index for a list of files.""" + # set default output file name + if args.output is None: + args.output = "shardindex.json" + + if args.name is None: + first = os.path.splitext(args.files[0])[0] + args.name = shorten_name(first) + print("setting name to", args.name) + + # read the list of files from stdin if there is only one file and it is "-" + if len(args.files) == 1 and args.files[0] == "-": + args.files = [line.strip() for line in sys.stdin] + + # expand any brace expressions in the file names + fnames = [] + for f in args.files: + fnames.extend(braceexpand.braceexpand(f)) + + # create the shard index + files = [] + for fname in fnames: + print(fname) + downloaded = wids_dl.download_file(fname, "/tmp/shard.tar") + md5sum = wids.compute_file_md5sum(downloaded) + nsamples = wids.compute_num_samples(downloaded) + filesize = os.stat(downloaded).st_size + files.append( + dict(url=fname, md5sum=md5sum, nsamples=nsamples, filesize=filesize) + ) + + files = sorted(files, key=lambda x: x["url"]) + + # create the result dictionary + result = dict( + __kind__="wids-shard-index-v1", + wids_version=1, + shardlist=files, + ) + + if args.name != "": + result["name"] = args.name + + if args.base is not None: + result["base"] = args.base + + # add info if it is given + if args.info is not None: + info = open(args.info).read() + result["info"] = info + + # write the result + with open(args.output, "w") as f: + json.dump(result, f, indent=2) + + +def main_update(args): + """Update an existing file.""" + with AtomicJsonUpdate(args.filename) as data: + if args.name != "": + data["name"] = args.name + if args.keep: + data["keep"] = True + if args.nokeep: + data["keep"] = False + if args.info != "": + data["info"] = args.info + if args.base != "": + data["base"] = args.base + if args.rebase: + bases = {urldirbase(shard["url"]) for shard in data["shardlist"]} + assert len(bases) == 1, f"multiple/no bases found: {bases}" + base = bases.pop() + print(f"rebasing to {base}") + data["base"] = base + if args.dir != "" or args.nodir or args.rebase: + shardlist = data["shardlist"] + for shard in shardlist: + url = shard["url"] + file = urlfile(url) + if args.nodir: + shard["url"] = file + else: + shard["url"] = os.path.join(args.dir, file) + data["shardlist"] = sorted(shardlist, key=lambda x: x["url"]) + if "name" not in data: + parsed = urlparse(args.filename) + data["name"] = os.path.splitext(os.path.basename(parsed.path))[0] + + +def print_long_info(data, filename): + print(" name:", data.get("name")) + print(" info:", data.get("info")) + print(" base:", data.get("base"), f"(assumed: {urldir(filename)})") + total_size = sum(shard["filesize"] for shard in data["shardlist"]) + total_samples = sum(shard["nsamples"] for shard in data["shardlist"]) + print(" total size:", format_with_suffix(total_size)) + print(" total samples:", format_with_suffix(total_samples)) + print(" avg sample size:", format_with_suffix(int(total_size / total_samples))) + print( + " avg shard size:", + format_with_suffix(int(total_size / len(data["shardlist"]))), + ) + print(" first shard:", data["shardlist"][0]["url"]) + print(" last shard:", data["shardlist"][-1]["url"]) + if len(data.get("datasets", [])) > 0: + print(" datasets:") + for dataset in data.get("datasets", []): + print(" dataset name:", dataset.get("name")) + print( + " dataset url:", + dataset.get("source_url", len(data.get("shardlist", []))), + ) + + +def main_info(args): + """Show info about an index file.""" + if args.table: + print("file\tname\tnbytes\tnsamples\tbase\tlast\tdatasets") + for filename in args.filenames: + data = load_remote_dsdesc_raw(filename) + print( + filename, + data.get("name"), + sum(shard["filesize"] for shard in data["shardlist"]), + sum(shard["nsamples"] for shard in data["shardlist"]), + data.get("base"), + data["shardlist"][-1]["url"], + len(data.get("datasets", [])), + sep="\t", + ) + else: + for filename in args.filenames: + data = load_remote_dsdesc_raw(filename) + print("filename:", filename) + print_long_info(data, filename) + print() + + +def maybe_read(x): + try: + return x.read() + except AttributeError: + return x + + +def maybe_decode(sample): + sample = {k: maybe_read(v) for k, v in sample.items()} + return sample + + +def main_sample(args): + raw = args.raw or args.cat is not None + if raw: + ds = wids.ShardListDataset(args.filename, transformations=[maybe_decode]) + else: + ds = wids.ShardListDataset(args.filename) + print("dataset size:", len(ds), file=sys.stderr) + sample = ds[args.index] + if args.cat is not None: + sys.stdout.buffer.write(sample[args.cat]) + return 0 + mkl = max(len(k) for k in sample.keys()) + for k, v in sorted(sample.items()): + print(k.ljust(mkl), repr(v)[: args.width - mkl - 1]) + + +def main(): + """Commands for manipulating the shard index.""" + # Create the top-level parser + parser = argparse.ArgumentParser( + description="Command line tool with subcommands for file operations." + ) + subparsers = parser.add_subparsers( + dest="command", required=True, help="Subcommands" + ) + + # Create the parser for the "create" command + create_parser = subparsers.add_parser("create", help="Create a new file") + create_parser.add_argument("files", nargs="+", help="files to index") + create_parser.add_argument("--output", "-o", help="output file name") + create_parser.add_argument("--name", "-n", help="name for dataset", default=None) + create_parser.add_argument( + "--info", "-i", help="description for dataset", default=None + ) + create_parser.add_argument("--base", "-b", help="base path", default=None) + + # Create the parser for the "update" command + update_parser = subparsers.add_parser("update", help="Update an existing file") + update_parser.add_argument("filename", type=str, help="Name of the file to update") + update_parser.add_argument("-n", "--name", default="", help="set the dataset name") + update_parser.add_argument( + "-k", "--keep", default="store_true", help="set the keep flag" + ) + update_parser.add_argument( + "-K", "--nokeep", default="store_true", help="clear the keep flag" + ) + update_parser.add_argument("-i", "--info", default="", help="set the dataset info") + update_parser.add_argument( + "-D", "--nodir", action="store_true", help="remove the directory from the URLs" + ) + update_parser.add_argument( + "-d", "--dir", default="", help="set the directory on the URLs" + ) + update_parser.add_argument("-b", "--base", default="", help="set the base") + update_parser.add_argument( + "-B", "--rebase", action="store_true", help="rebase the URLs" + ) + + # Create the parser for the "info" command + info_parser = subparsers.add_parser("info", help="Show info about an index file") + info_parser.add_argument( + "filenames", type=str, nargs="*", help="Name of the file to display" + ) + info_parser.add_argument( + "-t", "--table", action="store_true", help="output in table format" + ) + + # Create the parser for the "sample" command + sample_parser = subparsers.add_parser( + "sample", help="Show info about an index file" + ) + sample_parser.add_argument("filename", type=str, help="Name of the file to update") + sample_parser.add_argument( + "index", type=int, default=0, help="Index of the sample to show" + ) + sample_parser.add_argument( + "-p", "--python", action="store_true", help="Show raw sample" + ) + sample_parser.add_argument( + "-r", "--raw", action="store_true", help="Show raw sample" + ) + sample_parser.add_argument( + "-c", "--cat", type=str, default=None, help="Output the bytes for a given key" + ) + sample_parser.add_argument( + "-w", "--width", type=int, default=250, help="Output the bytes for a given key" + ) + + # Parse the arguments + args = ( + parser.parse_args() + ) # Dynamically call the appropriate function based on the subcommand + + try: + func = getattr(sys.modules[__name__], f"main_{args.command}") + except AttributeError: + parser.print_help() + + func(args) + + +if __name__ == "__main__": + main() diff --git a/vllm/lib/python3.10/site-packages/wids/wids_lru.py b/vllm/lib/python3.10/site-packages/wids/wids_lru.py new file mode 100644 index 0000000000000000000000000000000000000000..b288094f3f8e67ebbeedca19f0b6895e22e9dbf6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wids/wids_lru.py @@ -0,0 +1,64 @@ +from collections import OrderedDict + + +class LRUCache: + def __init__(self, capacity: int, release_handler=None): + """Initialize a new LRU cache with the given capacity.""" + self.capacity = capacity + self.cache = OrderedDict() + self.release_handler = release_handler + + def __getitem__(self, key): + """Return the value associated with the given key, or None.""" + if key not in self.cache: + return None + self.cache.move_to_end(key) + return self.cache[key] + + def __setitem__(self, key, value): + """Associate the given value with the given key.""" + if key in self.cache: + self.cache.move_to_end(key) + self.cache[key] = value + if len(self.cache) > self.capacity: + key, value = self.cache.popitem(last=False) + if self.release_handler is not None: + self.release_handler(key, value) + + def __delitem__(self, key): + """Remove the given key from the cache.""" + if key in self.cache: + if self.release_handler is not None: + value = self.cache[key] + self.release_handler(key, value) + del self.cache[key] + + def __len__(self): + """Return the number of entries in the cache.""" + return len(self.cache) + + def __contains__(self, key): + """Return whether the cache contains the given key.""" + return key in self.cache + + def items(self): + """Return an iterator over the keys of the cache.""" + return self.cache.items() + + def keys(self): + """Return an iterator over the keys of the cache.""" + return self.cache.keys() + + def values(self): + """Return an iterator over the values of the cache.""" + return self.cache.values() + + def clear(self): + for key in list(self.keys()): + value = self.cache[key] + if self.release_handler is not None: + self.release_handler(key, value) + del self[key] + + def __del__(self): + self.clear() diff --git a/vllm/lib/python3.10/site-packages/wids/wids_mmtar.py b/vllm/lib/python3.10/site-packages/wids/wids_mmtar.py new file mode 100644 index 0000000000000000000000000000000000000000..bf45b52647ea7aaf5cccc9d53a3002340940c453 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wids/wids_mmtar.py @@ -0,0 +1,150 @@ +import collections +import fcntl +import io +import mmap +import os +import struct + +TarHeader = collections.namedtuple( + "TarHeader", + [ + "name", + "mode", + "uid", + "gid", + "size", + "mtime", + "chksum", + "typeflag", + "linkname", + "magic", + "version", + "uname", + "gname", + "devmajor", + "devminor", + "prefix", + ], +) + + +def parse_tar_header(header_bytes): + header = struct.unpack("!100s8s8s8s12s12s8s1s100s6s2s32s32s8s8s155s", header_bytes) + return TarHeader(*header) + + +def next_header(offset, header): + block_size = 512 + size = header.size.decode("utf-8").strip("\x00") + if size == "": + return -1 + size = int(size, 8) + # compute the file size rounded up to the next block size if it is a partial block + padded_file_size = (size + block_size - 1) // block_size * block_size + return offset + block_size + padded_file_size + + +class MMIndexedTar: + def __init__(self, fname, index_file=None, verbose=True, cleanup_callback=None): + self.verbose = verbose + self.cleanup_callback = cleanup_callback + if isinstance(fname, str): + self.stream = open(fname, "rb") + self.fname = fname + elif isinstance(fname, io.IOBase): + self.stream = fname + self.fname = None + self.mmapped_file = mmap.mmap(self.stream.fileno(), 0, access=mmap.ACCESS_READ) + if cleanup_callback: + cleanup_callback(fname, self.stream.fileno(), "start") + self._build_index() + + def close(self, dispose=False): + if self.cleanup_callback: + self.cleanup_callback(self.fname, self.stream.fileno(), "end") + self.mmapped_file.close() + self.stream.close() + + def _build_index(self): + self.by_name = {} + self.by_index = [] + offset = 0 + while offset >= 0 and offset < len(self.mmapped_file): + header = parse_tar_header(self.mmapped_file[offset : offset + 500]) + name = header.name.decode("utf-8").strip("\x00") + typeflag = header.typeflag.decode("utf-8").strip("\x00") + if name != "" and name != "././@PaxHeader" and typeflag in ["0", ""]: + try: + size = int(header.size.decode("utf-8")[:-1], 8) + except ValueError as exn: + print(header) + raise exn + self.by_name[name] = offset + self.by_index.append((name, offset, size)) + offset = next_header(offset, header) + + def names(self): + return self.by_name.keys() + + def get_at_offset(self, offset): + header = parse_tar_header(self.mmapped_file[offset : offset + 500]) + name = header.name.decode("utf-8").strip("\x00") + start = offset + 512 + end = start + int(header.size.decode("utf-8")[:-1], 8) + return name, self.mmapped_file[start:end] + + def get_at_index(self, index): + name, offset, size = self.by_index[index] + return self.get_at_offset(offset) + + def get_by_name(self, name): + offset = self.by_name[name] + return self.get_at_offset(offset) + + def __iter__(self): + for name, offset, size in self.by_index: + yield name, self.mmapped_file[offset + 512 : offset + 512 + size] + + def __getitem__(self, key): + if isinstance(key, int): + return self.get_at_index(key) + else: + return self.get_by_name(key) + + def __len__(self): + return len(self.by_index) + + def get_file(self, i): + fname, data = self.get_at_index(i) + return fname, io.BytesIO(data) + + +def keep_while_reading(fname, fd, phase, delay=0.0): + """This is a possible cleanup callback for cleanup_callback of MIndexedTar. + + It assumes that as long as there are some readers for a file, + more readers may be trying to open it. + + Note that on Linux, unlinking the file doesn't matter after + it has been mmapped. The contents will only be deleted when + all readers close the file. The unlinking merely makes the file + unavailable to new readers, since the downloader checks first + whether the file exists. + """ + assert delay == 0.0, "delay not implemented" + if fd < 0 or fname is None: + return + if phase == "start": + fcntl.flock(fd, fcntl.LOCK_SH) + elif phase == "end": + try: + fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + os.unlink(fname) + except FileNotFoundError: + # someone else deleted it already + pass + except BlockingIOError: + # we couldn't get an exclusive lock, so someone else is still reading + pass + else: + raise ValueError(f"Unknown phase {phase}") diff --git a/vllm/lib/python3.10/site-packages/wids/wids_specs.py b/vllm/lib/python3.10/site-packages/wids/wids_specs.py new file mode 100644 index 0000000000000000000000000000000000000000..5843bdd773cb3a8c997ee5b0789ba2aee2f03b85 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wids/wids_specs.py @@ -0,0 +1,177 @@ +import io +import json +import os +import tempfile +from urllib.parse import urlparse, urlunparse + +from wids.wids_dl import download_and_open + + +def urldir(url): + """Return the directory part of a url.""" + parsed_url = urlparse(url) + path = parsed_url.path + directory = os.path.dirname(path) + return parsed_url._replace(path=directory).geturl() + + +def urlmerge(base, url): + """Merge a base URL and a relative URL. + + The function fills in any missing part of the url from the base, + except for params, query, and fragment, which are taken only from the 'url'. + For the pathname component, it merges the paths like os.path.join: + an absolute path in 'url' overrides the base path, otherwise the paths are merged. + + Parameters: + base (str): The base URL. + url (str): The URL to merge with the base. + + Returns: + str: The merged URL. + """ + # Parse the base and the relative URL + parsed_base = urlparse(base) + parsed_url = urlparse(url) + + # Merge paths using os.path.join + # If the url path is absolute, it overrides the base path + if parsed_url.path.startswith("/"): + merged_path = parsed_url.path + else: + merged_path = os.path.normpath(os.path.join(parsed_base.path, parsed_url.path)) + + # Construct the merged URL + merged_url = urlunparse( + ( + parsed_url.scheme or parsed_base.scheme, + parsed_url.netloc or parsed_base.netloc, + merged_path, + parsed_url.params, # Use params from the url only + parsed_url.query, # Use query from the url only + parsed_url.fragment, # Use fragment from the url only + ) + ) + + return merged_url + + +def check_shards(l): + """Check that a list of shards is well-formed. + + This checks that the list is a list of dictionaries, and that + each dictionary has a "url" and a "nsamples" key. + """ + assert isinstance(l, list) + for shard in l: + assert isinstance(shard, dict) + assert "url" in shard + assert "nsamples" in shard + return l + + +def set_all(l, k, v): + """Set a key to a value in a list of dictionaries.""" + if v is None: + return + for x in l: + if k not in x: + x[k] = v + + +def load_remote_dsdesc_raw(source): + """Load a remote or local dataset description in JSON format.""" + if isinstance(source, str): + with tempfile.TemporaryDirectory() as tmpdir: + dlname = os.path.join(tmpdir, "dataset.json") + with download_and_open(source, dlname) as f: + dsdesc = json.load(f) + elif isinstance(source, io.IOBase): + dsdesc = json.load(source) + else: + # FIXME: use gopen + import requests + + jsondata = requests.get(source).text + dsdesc = json.loads(jsondata) + return dsdesc + + +def rebase_shardlist(shardlist, base): + """Rebase the URLs in a shardlist.""" + if base is None: + return shardlist + for shard in shardlist: + shard["url"] = urlmerge(base, shard["url"]) + return shardlist + + +def resolve_dsdesc(dsdesc, *, options=None, base=None): + """Resolve a dataset description. + + This rebases the shards as necessary and loads any remote references. + + Dataset descriptions are JSON files. They must have the following format; + + { + "wids_version": 1, + # optional immediate shardlist + "shardlist": [ + {"url": "http://example.com/file.tar", "nsamples": 1000}, + ... + ], + # sub-datasets + "datasets": [ + {"source_url": "http://example.com/dataset.json"}, + {"shardlist": [ + {"url": "http://example.com/file.tar", "nsamples": 1000}, + ... + ]} + ... + ] + } + """ + if options is None: + options = {} + assert isinstance(dsdesc, dict) + dsdesc = dict(dsdesc, **options) + shardlist = rebase_shardlist(dsdesc.get("shardlist", []), base) + assert shardlist is not None + set_all(shardlist, "weight", dsdesc.get("weight")) + set_all(shardlist, "name", dsdesc.get("name")) + check_shards(shardlist) + assert "wids_version" in dsdesc, "No wids_version in dataset description" + assert dsdesc["wids_version"] == 1, "Unknown wids_version" + for component in dsdesc.get("datasets", []): + # we use the weight from the reference to the dataset, + # regardless of remote loading + weight = component.get("weight") + # follow any source_url dsdescs through remote loading + source_url = None + if "source_url" in component: + source_url = component["source_url"] + component = load_remote_dsdesc_raw(source_url) + assert ( + "source_url" not in component + ), "double indirection in dataset description" + assert "shardlist" in component, "no shardlist in dataset description" + # if the component has a base, use it to rebase the shardlist + # otherwise use the base from the source_url, if any + subbase = component.get("base", urldir(source_url) if source_url else None) + if subbase is not None: + rebase_shardlist(component["shardlist"], subbase) + l = check_shards(component["shardlist"]) + set_all(l, "weight", weight) + set_all(l, "source_url", source_url) + set_all(l, "dataset", component.get("name")) + shardlist.extend(l) + assert len(shardlist) > 0, "No shards found" + dsdesc["shardlist"] = shardlist + return dsdesc + + +def load_dsdesc_and_resolve(source, *, options=None, base=None): + if options is None: + options = {} + dsdesc = load_remote_dsdesc_raw(source) + return resolve_dsdesc(dsdesc, base=base, options=options) diff --git a/vllm/lib/python3.10/site-packages/wids/wids_tar.py b/vllm/lib/python3.10/site-packages/wids/wids_tar.py new file mode 100644 index 0000000000000000000000000000000000000000..05a5f388301cbed64b24e56cd3c10c38ad192133 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/wids/wids_tar.py @@ -0,0 +1,83 @@ +import io +import os +import os.path +import pickle +import re +import tarfile + +import numpy as np + + +def find_index_file(file): + prefix, last_ext = os.path.splitext(file) + if re.match("._[0-9]+_$", last_ext): + return prefix + ".index" + else: + return file + ".index" + + +class TarFileReader: + def __init__(self, file, index_file=find_index_file, verbose=True): + self.verbose = verbose + if callable(index_file): + index_file = index_file(file) + self.index_file = index_file + + # Open the tar file and keep it open + if isinstance(file, str): + self.tar_file = tarfile.open(file, "r") + else: + self.tar_file = tarfile.open(fileobj=file, mode="r") + + # Create the index + self._create_tar_index() + + def _create_tar_index(self): + if self.index_file is not None and os.path.exists(self.index_file): + if self.verbose: + print("Loading tar index from", self.index_file) + with open(self.index_file, "rb") as stream: + self.fnames, self.index = pickle.load(stream) + return + # Create an empty list for the index + self.fnames = [] + self.index = [] + + if self.verbose: + print("Creating tar index for", self.tar_file.name, "at", self.index_file) + # Iterate over the members of the tar file + for member in self.tar_file: + # If the member is a file, add it to the index + if member.isfile(): + # Get the file's offset + offset = self.tar_file.fileobj.tell() + self.fnames.append(member.name) + self.index.append([offset, member.size]) + if self.verbose: + print( + "Done creating tar index for", self.tar_file.name, "at", self.index_file + ) + self.index = np.array(self.index) + if self.index_file is not None: + if os.path.exists(self.index_file + ".temp"): + os.unlink(self.index_file + ".temp") + with open(self.index_file + ".temp", "wb") as stream: + pickle.dump((self.fnames, self.index), stream) + os.rename(self.index_file + ".temp", self.index_file) + + def names(self): + return self.fnames + + def __len__(self): + return len(self.index) + + def get_file(self, i): + name = self.fnames[i] + offset, size = self.index[i] + self.tar_file.fileobj.seek(offset) + file_bytes = self.tar_file.fileobj.read(size) + return name, io.BytesIO(file_bytes) + + def close(self): + # Close the tar file + self.tar_file.close()