Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_backward_ops.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_dense_backward_ops.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_channel_affine_cuda_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_div_cpu_dispatch.h +30 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_local_scalar_dense.h +30 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautograd_dispatch.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_backward_data.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_meta.h +27 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_prod_compositeexplicitautograd_dispatch.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_unique_compositeexplicitautograd_dispatch.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_unpack_dual.h +30 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/add_cpu_dispatch.h +26 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/chain_matmul.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/channel_shuffle_cpu_dispatch.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/det_compositeimplicitautograd_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/diff_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_renorm_meta_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/exponential_cpu_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fmin_native.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_cuda_dispatch.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_compositeimplicitautograd_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_native.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mish_meta_dispatch.h +26 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_compositeimplicitautograd_dispatch.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/polygamma_cpu_dispatch.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/q_per_channel_scales_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_ops.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/resize_as_sparse_ops.h +50 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/segment_reduce_compositeexplicitautograd_dispatch.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_gammaln.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_compositeexplicitautograd_dispatch.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/std.h +86 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/trapezoid_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/unflatten_dense_tensors_ops.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/vander.h +30 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/vsplit_native.h +22 -0
- vllm/lib/python3.10/site-packages/dotenv/__init__.py +49 -0
- vllm/lib/python3.10/site-packages/dotenv/__main__.py +6 -0
- vllm/lib/python3.10/site-packages/dotenv/ipython.py +39 -0
- vllm/lib/python3.10/site-packages/dotenv/main.py +392 -0
- vllm/lib/python3.10/site-packages/dotenv/parser.py +175 -0
- vllm/lib/python3.10/site-packages/dotenv/py.typed +1 -0
- vllm/lib/python3.10/site-packages/dotenv/version.py +1 -0
- vllm/lib/python3.10/site-packages/outlines-0.1.11.dist-info/INSTALLER +1 -0
- vllm/lib/python3.10/site-packages/outlines-0.1.11.dist-info/LICENSE +201 -0
- vllm/lib/python3.10/site-packages/outlines-0.1.11.dist-info/METADATA +503 -0
- vllm/lib/python3.10/site-packages/outlines-0.1.11.dist-info/RECORD +101 -0
- vllm/lib/python3.10/site-packages/outlines-0.1.11.dist-info/REQUESTED +0 -0
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_backward_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _backward {
|
| 18 |
+
using schema = void (const at::Tensor &, at::TensorList, const c10::optional<at::Tensor> &, c10::optional<bool>, bool);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_backward")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()")
|
| 24 |
+
static void call(const at::Tensor & self, at::TensorList inputs, const c10::optional<at::Tensor> & gradient, c10::optional<bool> retain_graph, bool create_graph);
|
| 25 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList inputs, const c10::optional<at::Tensor> & gradient, c10::optional<bool> retain_graph, bool create_graph);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_dense_backward_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _embedding_bag_dense_backward {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::SymInt, bool, int64_t, const c10::optional<at::Tensor> &, int64_t);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_embedding_bag_dense_backward")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _embedding_bag_dense_backward_out {
|
| 29 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::SymInt, bool, int64_t, const c10::optional<at::Tensor> &, int64_t, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_embedding_bag_dense_backward")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static at::Tensor & call(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out);
|
| 36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx, at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_channel_affine_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _fake_quantize_learnable_per_channel_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor=1.0);
|
| 21 |
+
|
| 22 |
+
} // namespace cuda
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_div_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_div(at::TensorList self, const at::Scalar & scalar);
|
| 21 |
+
TORCH_API void _foreach_div_(at::TensorList self, const at::Scalar & scalar);
|
| 22 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_div(at::TensorList self, at::TensorList other);
|
| 23 |
+
TORCH_API void _foreach_div_(at::TensorList self, at::TensorList other);
|
| 24 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_div(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
| 25 |
+
TORCH_API void _foreach_div_(at::TensorList self, at::ArrayRef<at::Scalar> scalars);
|
| 26 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_div(at::TensorList self, const at::Tensor & other);
|
| 27 |
+
TORCH_API void _foreach_div_(at::TensorList self, const at::Tensor & other);
|
| 28 |
+
|
| 29 |
+
} // namespace cpu
|
| 30 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_local_scalar_dense.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_local_scalar_dense_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_local_scalar_dense(Tensor self) -> Scalar
|
| 26 |
+
inline at::Scalar _local_scalar_dense(const at::Tensor & self) {
|
| 27 |
+
return at::_ops::_local_scalar_dense::call(self);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & _nested_view_from_buffer_copy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets);
|
| 21 |
+
TORCH_API at::Tensor & _nested_view_from_buffer_copy_outf(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets, at::Tensor & out);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautograd
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_backward_data.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_softmax_backward_data_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
|
| 26 |
+
inline at::Tensor _softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
|
| 27 |
+
return at::_ops::_softmax_backward_data::call(grad_output, output, dim, input_dtype);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & _softmax_backward_data_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
|
| 32 |
+
return at::_ops::_softmax_backward_data_out::call(grad_output, output, dim, input_dtype, grad_input);
|
| 33 |
+
}
|
| 34 |
+
// aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & _softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input) {
|
| 36 |
+
return at::_ops::_softmax_backward_data_out::call(grad_output, output, dim, input_dtype, grad_input);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_meta.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/TensorIterator.h>
|
| 13 |
+
#include <ATen/TensorMeta.h>
|
| 14 |
+
#include <tuple>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
struct TORCH_API structured__softmax : public at::impl::MetaBase {
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
void meta(const at::Tensor & self, int64_t dim, bool half_to_float);
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} // namespace native
|
| 27 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_prod_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & _sparse_csr_prod_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional<at::ScalarType> dtype=c10::nullopt);
|
| 21 |
+
TORCH_API at::Tensor & _sparse_csr_prod_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype, at::Tensor & out);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautograd
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_unique_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> _unique_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, bool sorted=true, bool return_inverse=false);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> _unique_outf(const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautograd
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_unpack_dual.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_unpack_dual_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)
|
| 26 |
+
inline ::std::tuple<at::Tensor,at::Tensor> _unpack_dual(const at::Tensor & dual, int64_t level) {
|
| 27 |
+
return at::_ops::_unpack_dual::call(dual, level);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/add_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1);
|
| 21 |
+
TORCH_API at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1);
|
| 22 |
+
TORCH_API at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1);
|
| 24 |
+
|
| 25 |
+
} // namespace cpu
|
| 26 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/chain_matmul.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/chain_matmul_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::chain_matmul(Tensor[] matrices) -> Tensor
|
| 26 |
+
inline at::Tensor chain_matmul(at::TensorList matrices) {
|
| 27 |
+
return at::_ops::chain_matmul::call(matrices);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & chain_matmul_out(at::Tensor & out, at::TensorList matrices) {
|
| 32 |
+
return at::_ops::chain_matmul_out::call(matrices, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & chain_matmul_outf(at::TensorList matrices, at::Tensor & out) {
|
| 36 |
+
return at::_ops::chain_matmul_out::call(matrices, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/channel_shuffle_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor channel_shuffle(const at::Tensor & self, int64_t groups);
|
| 21 |
+
TORCH_API at::Tensor channel_shuffle_symint(const at::Tensor & self, c10::SymInt groups);
|
| 22 |
+
|
| 23 |
+
} // namespace cpu
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/det_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor det(const at::Tensor & self);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/diff_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor diff(const at::Tensor & self, int64_t n=1, int64_t dim=-1, const c10::optional<at::Tensor> & prepend={}, const c10::optional<at::Tensor> & append={});
|
| 20 |
+
TORCH_API at::Tensor & diff_out(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append, at::Tensor & out);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_renorm_meta_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & embedding_renorm_(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type);
|
| 21 |
+
|
| 22 |
+
} // namespace meta
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/exponential_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & exponential_(at::Tensor & self, double lambd=1, c10::optional<at::Generator> generator=c10::nullopt);
|
| 21 |
+
|
| 22 |
+
} // namespace cpu
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fmin_native.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
#include <ATen/ops/fmin_meta.h>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
struct TORCH_API structured_fmin_out : public at::meta::structured_fmin {
|
| 20 |
+
void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out);
|
| 21 |
+
};
|
| 22 |
+
} // namespace native
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> frexp_out(at::Tensor & mantissa, at::Tensor & exponent, const at::Tensor & self);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> frexp_outf(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent);
|
| 22 |
+
|
| 23 |
+
} // namespace cuda
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/linalg_lu_solve_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor
|
| 26 |
+
inline at::Tensor linalg_lu_solve(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false) {
|
| 27 |
+
return at::_ops::linalg_lu_solve::call(LU, pivots, B, left, adjoint);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & linalg_lu_solve_out(at::Tensor & out, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false) {
|
| 32 |
+
return at::_ops::linalg_lu_solve_out::call(LU, pivots, B, left, adjoint, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & linalg_lu_solve_outf(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out) {
|
| 36 |
+
return at::_ops::linalg_lu_solve_out::call(LU, pivots, B, left, adjoint, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor max_pool1d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_native.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> max_pool3d_with_indices_cpu(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false);
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> max_pool3d_with_indices_out_cpu(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> max_pool3d_with_indices_cuda(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false);
|
| 22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> max_pool3d_with_indices_out_cuda(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices);
|
| 23 |
+
} // namespace native
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mish_meta_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor mish(const at::Tensor & self);
|
| 21 |
+
TORCH_API at::Tensor & mish_out(at::Tensor & out, const at::Tensor & self);
|
| 22 |
+
TORCH_API at::Tensor & mish_outf(const at::Tensor & self, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & mish_(at::Tensor & self);
|
| 24 |
+
|
| 25 |
+
} // namespace meta
|
| 26 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype);
|
| 21 |
+
TORCH_API at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype);
|
| 22 |
+
TORCH_API at::Tensor & norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim=false);
|
| 24 |
+
TORCH_API at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim=false);
|
| 25 |
+
TORCH_API at::Tensor & norm_outf(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::Tensor & out);
|
| 26 |
+
|
| 27 |
+
} // namespace compositeimplicitautograd
|
| 28 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/polygamma_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor polygamma(int64_t n, const at::Tensor & self);
|
| 21 |
+
TORCH_API at::Tensor & polygamma_out(at::Tensor & out, int64_t n, const at::Tensor & self);
|
| 22 |
+
TORCH_API at::Tensor & polygamma_outf(int64_t n, const at::Tensor & self, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace cpu
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/q_per_channel_scales_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor & q_per_channel_scales_out(const at::Tensor & self, at::Tensor & out);
|
| 20 |
+
TORCH_API at::Tensor q_per_channel_scales(const at::Tensor & self);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reshape_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API reshape {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reshape")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef shape);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef shape);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/resize_as_sparse_ops.h
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API resize_as_sparse_ {
|
| 18 |
+
using schema = const at::Tensor & (const at::Tensor &, const at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::resize_as_sparse_")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)")
|
| 24 |
+
static const at::Tensor & call(const at::Tensor & self, const at::Tensor & the_template);
|
| 25 |
+
static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API resize_as_sparse_out {
|
| 29 |
+
using schema = const at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::resize_as_sparse")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!)")
|
| 35 |
+
static const at::Tensor & call(const at::Tensor & self, const at::Tensor & the_template, const at::Tensor & out);
|
| 36 |
+
static const at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, const at::Tensor & out);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API resize_as_sparse {
|
| 40 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::resize_as_sparse")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "resize_as_sparse(Tensor self, Tensor the_template) -> Tensor")
|
| 46 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & the_template);
|
| 47 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> rnn_relu(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first);
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> rnn_relu(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/segment_reduce_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor & segment_reduce_out(at::Tensor & out, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths={}, const c10::optional<at::Tensor> & indices={}, const c10::optional<at::Tensor> & offsets={}, int64_t axis=0, bool unsafe=false, const c10::optional<at::Scalar> & initial=c10::nullopt);
|
| 21 |
+
TORCH_API at::Tensor & segment_reduce_outf(const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial, at::Tensor & out);
|
| 22 |
+
|
| 23 |
+
} // namespace compositeexplicitautograd
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_gammaln.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/special_gammaln_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::special_gammaln(Tensor self) -> Tensor
|
| 26 |
+
inline at::Tensor special_gammaln(const at::Tensor & self) {
|
| 27 |
+
return at::_ops::special_gammaln::call(self);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & special_gammaln_out(at::Tensor & out, const at::Tensor & self) {
|
| 32 |
+
return at::_ops::special_gammaln_out::call(self, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & special_gammaln_outf(const at::Tensor & self, at::Tensor & out) {
|
| 36 |
+
return at::_ops::special_gammaln_out::call(self, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor special_hermite_polynomial_h(const at::Scalar & x, const at::Tensor & n);
|
| 21 |
+
TORCH_API at::Tensor & special_hermite_polynomial_h_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n);
|
| 22 |
+
TORCH_API at::Tensor & special_hermite_polynomial_h_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor special_hermite_polynomial_h(const at::Tensor & x, const at::Scalar & n);
|
| 24 |
+
TORCH_API at::Tensor & special_hermite_polynomial_h_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n);
|
| 25 |
+
TORCH_API at::Tensor & special_hermite_polynomial_h_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out);
|
| 26 |
+
|
| 27 |
+
} // namespace compositeexplicitautograd
|
| 28 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/std.h
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/std_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::std(Tensor self, bool unbiased=True) -> Tensor
|
| 26 |
+
inline at::Tensor std(const at::Tensor & self, bool unbiased) {
|
| 27 |
+
return at::_ops::std::call(self, unbiased);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor
|
| 31 |
+
inline at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) {
|
| 32 |
+
return at::_ops::std_dim::call(self, dim, unbiased, keepdim);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
// aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor
|
| 36 |
+
inline at::Tensor std(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional<at::Scalar> & correction=c10::nullopt, bool keepdim=false) {
|
| 37 |
+
return at::_ops::std_correction::call(self, dim, correction, keepdim);
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
|
| 41 |
+
inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) {
|
| 42 |
+
return at::_ops::std_out::call(self, dim, unbiased, keepdim, out);
|
| 43 |
+
}
|
| 44 |
+
// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
|
| 45 |
+
inline at::Tensor & std_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) {
|
| 46 |
+
return at::_ops::std_out::call(self, dim, unbiased, keepdim, out);
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
// aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
|
| 50 |
+
inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional<at::Scalar> & correction=c10::nullopt, bool keepdim=false) {
|
| 51 |
+
return at::_ops::std_correction_out::call(self, dim, correction, keepdim, out);
|
| 52 |
+
}
|
| 53 |
+
// aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
|
| 54 |
+
inline at::Tensor & std_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
|
| 55 |
+
return at::_ops::std_correction_out::call(self, dim, correction, keepdim, out);
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor
|
| 59 |
+
inline at::Tensor std(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) {
|
| 60 |
+
return at::_ops::std_names_dim::call(self, dim, unbiased, keepdim);
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
|
| 64 |
+
inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim=false) {
|
| 65 |
+
return at::_ops::std_names_out::call(self, dim, unbiased, keepdim, out);
|
| 66 |
+
}
|
| 67 |
+
// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)
|
| 68 |
+
inline at::Tensor & std_outf(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) {
|
| 69 |
+
return at::_ops::std_names_out::call(self, dim, unbiased, keepdim, out);
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
// aten::std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor
|
| 73 |
+
inline at::Tensor std(const at::Tensor & self, at::DimnameList dim, const c10::optional<at::Scalar> & correction=c10::nullopt, bool keepdim=false) {
|
| 74 |
+
return at::_ops::std_correction_names::call(self, dim, correction, keepdim);
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
|
| 78 |
+
inline at::Tensor & std_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, const c10::optional<at::Scalar> & correction=c10::nullopt, bool keepdim=false) {
|
| 79 |
+
return at::_ops::std_correction_names_out::call(self, dim, correction, keepdim, out);
|
| 80 |
+
}
|
| 81 |
+
// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)
|
| 82 |
+
inline at::Tensor & std_outf(const at::Tensor & self, at::DimnameList dim, const c10::optional<at::Scalar> & correction, bool keepdim, at::Tensor & out) {
|
| 83 |
+
return at::_ops::std_correction_names_out::call(self, dim, correction, keepdim, out);
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/trapezoid_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor trapezoid(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1);
|
| 20 |
+
TORCH_API at::Tensor trapezoid(const at::Tensor & y, const at::Scalar & dx=1, int64_t dim=-1);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/unflatten_dense_tensors_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API unflatten_dense_tensors {
|
| 18 |
+
using schema = ::std::vector<at::Tensor> (const at::Tensor &, at::TensorList);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unflatten_dense_tensors")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]")
|
| 24 |
+
static ::std::vector<at::Tensor> call(const at::Tensor & flat, at::TensorList tensors);
|
| 25 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & flat, at::TensorList tensors);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/vander.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/vander_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor
|
| 26 |
+
inline at::Tensor vander(const at::Tensor & x, c10::optional<int64_t> N=c10::nullopt, bool increasing=false) {
|
| 27 |
+
return at::_ops::vander::call(x, N, increasing);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/vsplit_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API ::std::vector<at::Tensor> vsplit(const at::Tensor & self, int64_t sections);
|
| 20 |
+
TORCH_API ::std::vector<at::Tensor> vsplit(const at::Tensor & self, at::IntArrayRef indices);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
vllm/lib/python3.10/site-packages/dotenv/__init__.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Optional
|
| 2 |
+
|
| 3 |
+
from .main import (dotenv_values, find_dotenv, get_key, load_dotenv, set_key,
|
| 4 |
+
unset_key)
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def load_ipython_extension(ipython: Any) -> None:
|
| 8 |
+
from .ipython import load_ipython_extension
|
| 9 |
+
load_ipython_extension(ipython)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def get_cli_string(
|
| 13 |
+
path: Optional[str] = None,
|
| 14 |
+
action: Optional[str] = None,
|
| 15 |
+
key: Optional[str] = None,
|
| 16 |
+
value: Optional[str] = None,
|
| 17 |
+
quote: Optional[str] = None,
|
| 18 |
+
):
|
| 19 |
+
"""Returns a string suitable for running as a shell script.
|
| 20 |
+
|
| 21 |
+
Useful for converting a arguments passed to a fabric task
|
| 22 |
+
to be passed to a `local` or `run` command.
|
| 23 |
+
"""
|
| 24 |
+
command = ['dotenv']
|
| 25 |
+
if quote:
|
| 26 |
+
command.append(f'-q {quote}')
|
| 27 |
+
if path:
|
| 28 |
+
command.append(f'-f {path}')
|
| 29 |
+
if action:
|
| 30 |
+
command.append(action)
|
| 31 |
+
if key:
|
| 32 |
+
command.append(key)
|
| 33 |
+
if value:
|
| 34 |
+
if ' ' in value:
|
| 35 |
+
command.append(f'"{value}"')
|
| 36 |
+
else:
|
| 37 |
+
command.append(value)
|
| 38 |
+
|
| 39 |
+
return ' '.join(command).strip()
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
__all__ = ['get_cli_string',
|
| 43 |
+
'load_dotenv',
|
| 44 |
+
'dotenv_values',
|
| 45 |
+
'get_key',
|
| 46 |
+
'set_key',
|
| 47 |
+
'unset_key',
|
| 48 |
+
'find_dotenv',
|
| 49 |
+
'load_ipython_extension']
|
vllm/lib/python3.10/site-packages/dotenv/__main__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Entry point for cli, enables execution with `python -m dotenv`"""
|
| 2 |
+
|
| 3 |
+
from .cli import cli
|
| 4 |
+
|
| 5 |
+
if __name__ == "__main__":
|
| 6 |
+
cli()
|
vllm/lib/python3.10/site-packages/dotenv/ipython.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from IPython.core.magic import Magics, line_magic, magics_class # type: ignore
|
| 2 |
+
from IPython.core.magic_arguments import (argument, magic_arguments, # type: ignore
|
| 3 |
+
parse_argstring) # type: ignore
|
| 4 |
+
|
| 5 |
+
from .main import find_dotenv, load_dotenv
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@magics_class
|
| 9 |
+
class IPythonDotEnv(Magics):
|
| 10 |
+
|
| 11 |
+
@magic_arguments()
|
| 12 |
+
@argument(
|
| 13 |
+
'-o', '--override', action='store_true',
|
| 14 |
+
help="Indicate to override existing variables"
|
| 15 |
+
)
|
| 16 |
+
@argument(
|
| 17 |
+
'-v', '--verbose', action='store_true',
|
| 18 |
+
help="Indicate function calls to be verbose"
|
| 19 |
+
)
|
| 20 |
+
@argument('dotenv_path', nargs='?', type=str, default='.env',
|
| 21 |
+
help='Search in increasingly higher folders for the `dotenv_path`')
|
| 22 |
+
@line_magic
|
| 23 |
+
def dotenv(self, line):
|
| 24 |
+
args = parse_argstring(self.dotenv, line)
|
| 25 |
+
# Locate the .env file
|
| 26 |
+
dotenv_path = args.dotenv_path
|
| 27 |
+
try:
|
| 28 |
+
dotenv_path = find_dotenv(dotenv_path, True, True)
|
| 29 |
+
except IOError:
|
| 30 |
+
print("cannot find .env file")
|
| 31 |
+
return
|
| 32 |
+
|
| 33 |
+
# Load the .env file
|
| 34 |
+
load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def load_ipython_extension(ipython):
|
| 38 |
+
"""Register the %dotenv magic."""
|
| 39 |
+
ipython.register_magics(IPythonDotEnv)
|
vllm/lib/python3.10/site-packages/dotenv/main.py
ADDED
|
@@ -0,0 +1,392 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
import pathlib
|
| 5 |
+
import shutil
|
| 6 |
+
import sys
|
| 7 |
+
import tempfile
|
| 8 |
+
from collections import OrderedDict
|
| 9 |
+
from contextlib import contextmanager
|
| 10 |
+
from typing import (IO, Dict, Iterable, Iterator, Mapping, Optional, Tuple,
|
| 11 |
+
Union)
|
| 12 |
+
|
| 13 |
+
from .parser import Binding, parse_stream
|
| 14 |
+
from .variables import parse_variables
|
| 15 |
+
|
| 16 |
+
# A type alias for a string path to be used for the paths in this file.
|
| 17 |
+
# These paths may flow to `open()` and `shutil.move()`; `shutil.move()`
|
| 18 |
+
# only accepts string paths, not byte paths or file descriptors. See
|
| 19 |
+
# https://github.com/python/typeshed/pull/6832.
|
| 20 |
+
StrPath = Union[str, 'os.PathLike[str]']
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def with_warn_for_invalid_lines(mappings: Iterator[Binding]) -> Iterator[Binding]:
|
| 26 |
+
for mapping in mappings:
|
| 27 |
+
if mapping.error:
|
| 28 |
+
logger.warning(
|
| 29 |
+
"Python-dotenv could not parse statement starting at line %s",
|
| 30 |
+
mapping.original.line,
|
| 31 |
+
)
|
| 32 |
+
yield mapping
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class DotEnv:
|
| 36 |
+
def __init__(
|
| 37 |
+
self,
|
| 38 |
+
dotenv_path: Optional[StrPath],
|
| 39 |
+
stream: Optional[IO[str]] = None,
|
| 40 |
+
verbose: bool = False,
|
| 41 |
+
encoding: Optional[str] = None,
|
| 42 |
+
interpolate: bool = True,
|
| 43 |
+
override: bool = True,
|
| 44 |
+
) -> None:
|
| 45 |
+
self.dotenv_path: Optional[StrPath] = dotenv_path
|
| 46 |
+
self.stream: Optional[IO[str]] = stream
|
| 47 |
+
self._dict: Optional[Dict[str, Optional[str]]] = None
|
| 48 |
+
self.verbose: bool = verbose
|
| 49 |
+
self.encoding: Optional[str] = encoding
|
| 50 |
+
self.interpolate: bool = interpolate
|
| 51 |
+
self.override: bool = override
|
| 52 |
+
|
| 53 |
+
@contextmanager
|
| 54 |
+
def _get_stream(self) -> Iterator[IO[str]]:
|
| 55 |
+
if self.dotenv_path and os.path.isfile(self.dotenv_path):
|
| 56 |
+
with open(self.dotenv_path, encoding=self.encoding) as stream:
|
| 57 |
+
yield stream
|
| 58 |
+
elif self.stream is not None:
|
| 59 |
+
yield self.stream
|
| 60 |
+
else:
|
| 61 |
+
if self.verbose:
|
| 62 |
+
logger.info(
|
| 63 |
+
"Python-dotenv could not find configuration file %s.",
|
| 64 |
+
self.dotenv_path or '.env',
|
| 65 |
+
)
|
| 66 |
+
yield io.StringIO('')
|
| 67 |
+
|
| 68 |
+
def dict(self) -> Dict[str, Optional[str]]:
|
| 69 |
+
"""Return dotenv as dict"""
|
| 70 |
+
if self._dict:
|
| 71 |
+
return self._dict
|
| 72 |
+
|
| 73 |
+
raw_values = self.parse()
|
| 74 |
+
|
| 75 |
+
if self.interpolate:
|
| 76 |
+
self._dict = OrderedDict(resolve_variables(raw_values, override=self.override))
|
| 77 |
+
else:
|
| 78 |
+
self._dict = OrderedDict(raw_values)
|
| 79 |
+
|
| 80 |
+
return self._dict
|
| 81 |
+
|
| 82 |
+
def parse(self) -> Iterator[Tuple[str, Optional[str]]]:
|
| 83 |
+
with self._get_stream() as stream:
|
| 84 |
+
for mapping in with_warn_for_invalid_lines(parse_stream(stream)):
|
| 85 |
+
if mapping.key is not None:
|
| 86 |
+
yield mapping.key, mapping.value
|
| 87 |
+
|
| 88 |
+
def set_as_environment_variables(self) -> bool:
|
| 89 |
+
"""
|
| 90 |
+
Load the current dotenv as system environment variable.
|
| 91 |
+
"""
|
| 92 |
+
if not self.dict():
|
| 93 |
+
return False
|
| 94 |
+
|
| 95 |
+
for k, v in self.dict().items():
|
| 96 |
+
if k in os.environ and not self.override:
|
| 97 |
+
continue
|
| 98 |
+
if v is not None:
|
| 99 |
+
os.environ[k] = v
|
| 100 |
+
|
| 101 |
+
return True
|
| 102 |
+
|
| 103 |
+
def get(self, key: str) -> Optional[str]:
|
| 104 |
+
"""
|
| 105 |
+
"""
|
| 106 |
+
data = self.dict()
|
| 107 |
+
|
| 108 |
+
if key in data:
|
| 109 |
+
return data[key]
|
| 110 |
+
|
| 111 |
+
if self.verbose:
|
| 112 |
+
logger.warning("Key %s not found in %s.", key, self.dotenv_path)
|
| 113 |
+
|
| 114 |
+
return None
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def get_key(
|
| 118 |
+
dotenv_path: StrPath,
|
| 119 |
+
key_to_get: str,
|
| 120 |
+
encoding: Optional[str] = "utf-8",
|
| 121 |
+
) -> Optional[str]:
|
| 122 |
+
"""
|
| 123 |
+
Get the value of a given key from the given .env.
|
| 124 |
+
|
| 125 |
+
Returns `None` if the key isn't found or doesn't have a value.
|
| 126 |
+
"""
|
| 127 |
+
return DotEnv(dotenv_path, verbose=True, encoding=encoding).get(key_to_get)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
@contextmanager
|
| 131 |
+
def rewrite(
|
| 132 |
+
path: StrPath,
|
| 133 |
+
encoding: Optional[str],
|
| 134 |
+
) -> Iterator[Tuple[IO[str], IO[str]]]:
|
| 135 |
+
pathlib.Path(path).touch()
|
| 136 |
+
|
| 137 |
+
with tempfile.NamedTemporaryFile(mode="w", encoding=encoding, delete=False) as dest:
|
| 138 |
+
error = None
|
| 139 |
+
try:
|
| 140 |
+
with open(path, encoding=encoding) as source:
|
| 141 |
+
yield (source, dest)
|
| 142 |
+
except BaseException as err:
|
| 143 |
+
error = err
|
| 144 |
+
|
| 145 |
+
if error is None:
|
| 146 |
+
shutil.move(dest.name, path)
|
| 147 |
+
else:
|
| 148 |
+
os.unlink(dest.name)
|
| 149 |
+
raise error from None
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def set_key(
|
| 153 |
+
dotenv_path: StrPath,
|
| 154 |
+
key_to_set: str,
|
| 155 |
+
value_to_set: str,
|
| 156 |
+
quote_mode: str = "always",
|
| 157 |
+
export: bool = False,
|
| 158 |
+
encoding: Optional[str] = "utf-8",
|
| 159 |
+
) -> Tuple[Optional[bool], str, str]:
|
| 160 |
+
"""
|
| 161 |
+
Adds or Updates a key/value to the given .env
|
| 162 |
+
|
| 163 |
+
If the .env path given doesn't exist, fails instead of risking creating
|
| 164 |
+
an orphan .env somewhere in the filesystem
|
| 165 |
+
"""
|
| 166 |
+
if quote_mode not in ("always", "auto", "never"):
|
| 167 |
+
raise ValueError(f"Unknown quote_mode: {quote_mode}")
|
| 168 |
+
|
| 169 |
+
quote = (
|
| 170 |
+
quote_mode == "always"
|
| 171 |
+
or (quote_mode == "auto" and not value_to_set.isalnum())
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
if quote:
|
| 175 |
+
value_out = "'{}'".format(value_to_set.replace("'", "\\'"))
|
| 176 |
+
else:
|
| 177 |
+
value_out = value_to_set
|
| 178 |
+
if export:
|
| 179 |
+
line_out = f'export {key_to_set}={value_out}\n'
|
| 180 |
+
else:
|
| 181 |
+
line_out = f"{key_to_set}={value_out}\n"
|
| 182 |
+
|
| 183 |
+
with rewrite(dotenv_path, encoding=encoding) as (source, dest):
|
| 184 |
+
replaced = False
|
| 185 |
+
missing_newline = False
|
| 186 |
+
for mapping in with_warn_for_invalid_lines(parse_stream(source)):
|
| 187 |
+
if mapping.key == key_to_set:
|
| 188 |
+
dest.write(line_out)
|
| 189 |
+
replaced = True
|
| 190 |
+
else:
|
| 191 |
+
dest.write(mapping.original.string)
|
| 192 |
+
missing_newline = not mapping.original.string.endswith("\n")
|
| 193 |
+
if not replaced:
|
| 194 |
+
if missing_newline:
|
| 195 |
+
dest.write("\n")
|
| 196 |
+
dest.write(line_out)
|
| 197 |
+
|
| 198 |
+
return True, key_to_set, value_to_set
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def unset_key(
|
| 202 |
+
dotenv_path: StrPath,
|
| 203 |
+
key_to_unset: str,
|
| 204 |
+
quote_mode: str = "always",
|
| 205 |
+
encoding: Optional[str] = "utf-8",
|
| 206 |
+
) -> Tuple[Optional[bool], str]:
|
| 207 |
+
"""
|
| 208 |
+
Removes a given key from the given `.env` file.
|
| 209 |
+
|
| 210 |
+
If the .env path given doesn't exist, fails.
|
| 211 |
+
If the given key doesn't exist in the .env, fails.
|
| 212 |
+
"""
|
| 213 |
+
if not os.path.exists(dotenv_path):
|
| 214 |
+
logger.warning("Can't delete from %s - it doesn't exist.", dotenv_path)
|
| 215 |
+
return None, key_to_unset
|
| 216 |
+
|
| 217 |
+
removed = False
|
| 218 |
+
with rewrite(dotenv_path, encoding=encoding) as (source, dest):
|
| 219 |
+
for mapping in with_warn_for_invalid_lines(parse_stream(source)):
|
| 220 |
+
if mapping.key == key_to_unset:
|
| 221 |
+
removed = True
|
| 222 |
+
else:
|
| 223 |
+
dest.write(mapping.original.string)
|
| 224 |
+
|
| 225 |
+
if not removed:
|
| 226 |
+
logger.warning("Key %s not removed from %s - key doesn't exist.", key_to_unset, dotenv_path)
|
| 227 |
+
return None, key_to_unset
|
| 228 |
+
|
| 229 |
+
return removed, key_to_unset
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def resolve_variables(
|
| 233 |
+
values: Iterable[Tuple[str, Optional[str]]],
|
| 234 |
+
override: bool,
|
| 235 |
+
) -> Mapping[str, Optional[str]]:
|
| 236 |
+
new_values: Dict[str, Optional[str]] = {}
|
| 237 |
+
|
| 238 |
+
for (name, value) in values:
|
| 239 |
+
if value is None:
|
| 240 |
+
result = None
|
| 241 |
+
else:
|
| 242 |
+
atoms = parse_variables(value)
|
| 243 |
+
env: Dict[str, Optional[str]] = {}
|
| 244 |
+
if override:
|
| 245 |
+
env.update(os.environ) # type: ignore
|
| 246 |
+
env.update(new_values)
|
| 247 |
+
else:
|
| 248 |
+
env.update(new_values)
|
| 249 |
+
env.update(os.environ) # type: ignore
|
| 250 |
+
result = "".join(atom.resolve(env) for atom in atoms)
|
| 251 |
+
|
| 252 |
+
new_values[name] = result
|
| 253 |
+
|
| 254 |
+
return new_values
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def _walk_to_root(path: str) -> Iterator[str]:
|
| 258 |
+
"""
|
| 259 |
+
Yield directories starting from the given directory up to the root
|
| 260 |
+
"""
|
| 261 |
+
if not os.path.exists(path):
|
| 262 |
+
raise IOError('Starting path not found')
|
| 263 |
+
|
| 264 |
+
if os.path.isfile(path):
|
| 265 |
+
path = os.path.dirname(path)
|
| 266 |
+
|
| 267 |
+
last_dir = None
|
| 268 |
+
current_dir = os.path.abspath(path)
|
| 269 |
+
while last_dir != current_dir:
|
| 270 |
+
yield current_dir
|
| 271 |
+
parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
|
| 272 |
+
last_dir, current_dir = current_dir, parent_dir
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def find_dotenv(
|
| 276 |
+
filename: str = '.env',
|
| 277 |
+
raise_error_if_not_found: bool = False,
|
| 278 |
+
usecwd: bool = False,
|
| 279 |
+
) -> str:
|
| 280 |
+
"""
|
| 281 |
+
Search in increasingly higher folders for the given file
|
| 282 |
+
|
| 283 |
+
Returns path to the file if found, or an empty string otherwise
|
| 284 |
+
"""
|
| 285 |
+
|
| 286 |
+
def _is_interactive():
|
| 287 |
+
""" Decide whether this is running in a REPL or IPython notebook """
|
| 288 |
+
try:
|
| 289 |
+
main = __import__('__main__', None, None, fromlist=['__file__'])
|
| 290 |
+
except ModuleNotFoundError:
|
| 291 |
+
return False
|
| 292 |
+
return not hasattr(main, '__file__')
|
| 293 |
+
|
| 294 |
+
if usecwd or _is_interactive() or getattr(sys, 'frozen', False):
|
| 295 |
+
# Should work without __file__, e.g. in REPL or IPython notebook.
|
| 296 |
+
path = os.getcwd()
|
| 297 |
+
else:
|
| 298 |
+
# will work for .py files
|
| 299 |
+
frame = sys._getframe()
|
| 300 |
+
current_file = __file__
|
| 301 |
+
|
| 302 |
+
while frame.f_code.co_filename == current_file or not os.path.exists(
|
| 303 |
+
frame.f_code.co_filename
|
| 304 |
+
):
|
| 305 |
+
assert frame.f_back is not None
|
| 306 |
+
frame = frame.f_back
|
| 307 |
+
frame_filename = frame.f_code.co_filename
|
| 308 |
+
path = os.path.dirname(os.path.abspath(frame_filename))
|
| 309 |
+
|
| 310 |
+
for dirname in _walk_to_root(path):
|
| 311 |
+
check_path = os.path.join(dirname, filename)
|
| 312 |
+
if os.path.isfile(check_path):
|
| 313 |
+
return check_path
|
| 314 |
+
|
| 315 |
+
if raise_error_if_not_found:
|
| 316 |
+
raise IOError('File not found')
|
| 317 |
+
|
| 318 |
+
return ''
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
def load_dotenv(
|
| 322 |
+
dotenv_path: Optional[StrPath] = None,
|
| 323 |
+
stream: Optional[IO[str]] = None,
|
| 324 |
+
verbose: bool = False,
|
| 325 |
+
override: bool = False,
|
| 326 |
+
interpolate: bool = True,
|
| 327 |
+
encoding: Optional[str] = "utf-8",
|
| 328 |
+
) -> bool:
|
| 329 |
+
"""Parse a .env file and then load all the variables found as environment variables.
|
| 330 |
+
|
| 331 |
+
Parameters:
|
| 332 |
+
dotenv_path: Absolute or relative path to .env file.
|
| 333 |
+
stream: Text stream (such as `io.StringIO`) with .env content, used if
|
| 334 |
+
`dotenv_path` is `None`.
|
| 335 |
+
verbose: Whether to output a warning the .env file is missing.
|
| 336 |
+
override: Whether to override the system environment variables with the variables
|
| 337 |
+
from the `.env` file.
|
| 338 |
+
encoding: Encoding to be used to read the file.
|
| 339 |
+
Returns:
|
| 340 |
+
Bool: True if at least one environment variable is set else False
|
| 341 |
+
|
| 342 |
+
If both `dotenv_path` and `stream` are `None`, `find_dotenv()` is used to find the
|
| 343 |
+
.env file.
|
| 344 |
+
"""
|
| 345 |
+
if dotenv_path is None and stream is None:
|
| 346 |
+
dotenv_path = find_dotenv()
|
| 347 |
+
|
| 348 |
+
dotenv = DotEnv(
|
| 349 |
+
dotenv_path=dotenv_path,
|
| 350 |
+
stream=stream,
|
| 351 |
+
verbose=verbose,
|
| 352 |
+
interpolate=interpolate,
|
| 353 |
+
override=override,
|
| 354 |
+
encoding=encoding,
|
| 355 |
+
)
|
| 356 |
+
return dotenv.set_as_environment_variables()
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def dotenv_values(
|
| 360 |
+
dotenv_path: Optional[StrPath] = None,
|
| 361 |
+
stream: Optional[IO[str]] = None,
|
| 362 |
+
verbose: bool = False,
|
| 363 |
+
interpolate: bool = True,
|
| 364 |
+
encoding: Optional[str] = "utf-8",
|
| 365 |
+
) -> Dict[str, Optional[str]]:
|
| 366 |
+
"""
|
| 367 |
+
Parse a .env file and return its content as a dict.
|
| 368 |
+
|
| 369 |
+
The returned dict will have `None` values for keys without values in the .env file.
|
| 370 |
+
For example, `foo=bar` results in `{"foo": "bar"}` whereas `foo` alone results in
|
| 371 |
+
`{"foo": None}`
|
| 372 |
+
|
| 373 |
+
Parameters:
|
| 374 |
+
dotenv_path: Absolute or relative path to the .env file.
|
| 375 |
+
stream: `StringIO` object with .env content, used if `dotenv_path` is `None`.
|
| 376 |
+
verbose: Whether to output a warning if the .env file is missing.
|
| 377 |
+
encoding: Encoding to be used to read the file.
|
| 378 |
+
|
| 379 |
+
If both `dotenv_path` and `stream` are `None`, `find_dotenv()` is used to find the
|
| 380 |
+
.env file.
|
| 381 |
+
"""
|
| 382 |
+
if dotenv_path is None and stream is None:
|
| 383 |
+
dotenv_path = find_dotenv()
|
| 384 |
+
|
| 385 |
+
return DotEnv(
|
| 386 |
+
dotenv_path=dotenv_path,
|
| 387 |
+
stream=stream,
|
| 388 |
+
verbose=verbose,
|
| 389 |
+
interpolate=interpolate,
|
| 390 |
+
override=True,
|
| 391 |
+
encoding=encoding,
|
| 392 |
+
).dict()
|
vllm/lib/python3.10/site-packages/dotenv/parser.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import codecs
|
| 2 |
+
import re
|
| 3 |
+
from typing import (IO, Iterator, Match, NamedTuple, Optional, # noqa:F401
|
| 4 |
+
Pattern, Sequence, Tuple)
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def make_regex(string: str, extra_flags: int = 0) -> Pattern[str]:
|
| 8 |
+
return re.compile(string, re.UNICODE | extra_flags)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
_newline = make_regex(r"(\r\n|\n|\r)")
|
| 12 |
+
_multiline_whitespace = make_regex(r"\s*", extra_flags=re.MULTILINE)
|
| 13 |
+
_whitespace = make_regex(r"[^\S\r\n]*")
|
| 14 |
+
_export = make_regex(r"(?:export[^\S\r\n]+)?")
|
| 15 |
+
_single_quoted_key = make_regex(r"'([^']+)'")
|
| 16 |
+
_unquoted_key = make_regex(r"([^=\#\s]+)")
|
| 17 |
+
_equal_sign = make_regex(r"(=[^\S\r\n]*)")
|
| 18 |
+
_single_quoted_value = make_regex(r"'((?:\\'|[^'])*)'")
|
| 19 |
+
_double_quoted_value = make_regex(r'"((?:\\"|[^"])*)"')
|
| 20 |
+
_unquoted_value = make_regex(r"([^\r\n]*)")
|
| 21 |
+
_comment = make_regex(r"(?:[^\S\r\n]*#[^\r\n]*)?")
|
| 22 |
+
_end_of_line = make_regex(r"[^\S\r\n]*(?:\r\n|\n|\r|$)")
|
| 23 |
+
_rest_of_line = make_regex(r"[^\r\n]*(?:\r|\n|\r\n)?")
|
| 24 |
+
_double_quote_escapes = make_regex(r"\\[\\'\"abfnrtv]")
|
| 25 |
+
_single_quote_escapes = make_regex(r"\\[\\']")
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class Original(NamedTuple):
|
| 29 |
+
string: str
|
| 30 |
+
line: int
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class Binding(NamedTuple):
|
| 34 |
+
key: Optional[str]
|
| 35 |
+
value: Optional[str]
|
| 36 |
+
original: Original
|
| 37 |
+
error: bool
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class Position:
|
| 41 |
+
def __init__(self, chars: int, line: int) -> None:
|
| 42 |
+
self.chars = chars
|
| 43 |
+
self.line = line
|
| 44 |
+
|
| 45 |
+
@classmethod
|
| 46 |
+
def start(cls) -> "Position":
|
| 47 |
+
return cls(chars=0, line=1)
|
| 48 |
+
|
| 49 |
+
def set(self, other: "Position") -> None:
|
| 50 |
+
self.chars = other.chars
|
| 51 |
+
self.line = other.line
|
| 52 |
+
|
| 53 |
+
def advance(self, string: str) -> None:
|
| 54 |
+
self.chars += len(string)
|
| 55 |
+
self.line += len(re.findall(_newline, string))
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class Error(Exception):
|
| 59 |
+
pass
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class Reader:
|
| 63 |
+
def __init__(self, stream: IO[str]) -> None:
|
| 64 |
+
self.string = stream.read()
|
| 65 |
+
self.position = Position.start()
|
| 66 |
+
self.mark = Position.start()
|
| 67 |
+
|
| 68 |
+
def has_next(self) -> bool:
|
| 69 |
+
return self.position.chars < len(self.string)
|
| 70 |
+
|
| 71 |
+
def set_mark(self) -> None:
|
| 72 |
+
self.mark.set(self.position)
|
| 73 |
+
|
| 74 |
+
def get_marked(self) -> Original:
|
| 75 |
+
return Original(
|
| 76 |
+
string=self.string[self.mark.chars:self.position.chars],
|
| 77 |
+
line=self.mark.line,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
def peek(self, count: int) -> str:
|
| 81 |
+
return self.string[self.position.chars:self.position.chars + count]
|
| 82 |
+
|
| 83 |
+
def read(self, count: int) -> str:
|
| 84 |
+
result = self.string[self.position.chars:self.position.chars + count]
|
| 85 |
+
if len(result) < count:
|
| 86 |
+
raise Error("read: End of string")
|
| 87 |
+
self.position.advance(result)
|
| 88 |
+
return result
|
| 89 |
+
|
| 90 |
+
def read_regex(self, regex: Pattern[str]) -> Sequence[str]:
|
| 91 |
+
match = regex.match(self.string, self.position.chars)
|
| 92 |
+
if match is None:
|
| 93 |
+
raise Error("read_regex: Pattern not found")
|
| 94 |
+
self.position.advance(self.string[match.start():match.end()])
|
| 95 |
+
return match.groups()
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def decode_escapes(regex: Pattern[str], string: str) -> str:
|
| 99 |
+
def decode_match(match: Match[str]) -> str:
|
| 100 |
+
return codecs.decode(match.group(0), 'unicode-escape') # type: ignore
|
| 101 |
+
|
| 102 |
+
return regex.sub(decode_match, string)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def parse_key(reader: Reader) -> Optional[str]:
|
| 106 |
+
char = reader.peek(1)
|
| 107 |
+
if char == "#":
|
| 108 |
+
return None
|
| 109 |
+
elif char == "'":
|
| 110 |
+
(key,) = reader.read_regex(_single_quoted_key)
|
| 111 |
+
else:
|
| 112 |
+
(key,) = reader.read_regex(_unquoted_key)
|
| 113 |
+
return key
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def parse_unquoted_value(reader: Reader) -> str:
|
| 117 |
+
(part,) = reader.read_regex(_unquoted_value)
|
| 118 |
+
return re.sub(r"\s+#.*", "", part).rstrip()
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def parse_value(reader: Reader) -> str:
|
| 122 |
+
char = reader.peek(1)
|
| 123 |
+
if char == u"'":
|
| 124 |
+
(value,) = reader.read_regex(_single_quoted_value)
|
| 125 |
+
return decode_escapes(_single_quote_escapes, value)
|
| 126 |
+
elif char == u'"':
|
| 127 |
+
(value,) = reader.read_regex(_double_quoted_value)
|
| 128 |
+
return decode_escapes(_double_quote_escapes, value)
|
| 129 |
+
elif char in (u"", u"\n", u"\r"):
|
| 130 |
+
return u""
|
| 131 |
+
else:
|
| 132 |
+
return parse_unquoted_value(reader)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def parse_binding(reader: Reader) -> Binding:
|
| 136 |
+
reader.set_mark()
|
| 137 |
+
try:
|
| 138 |
+
reader.read_regex(_multiline_whitespace)
|
| 139 |
+
if not reader.has_next():
|
| 140 |
+
return Binding(
|
| 141 |
+
key=None,
|
| 142 |
+
value=None,
|
| 143 |
+
original=reader.get_marked(),
|
| 144 |
+
error=False,
|
| 145 |
+
)
|
| 146 |
+
reader.read_regex(_export)
|
| 147 |
+
key = parse_key(reader)
|
| 148 |
+
reader.read_regex(_whitespace)
|
| 149 |
+
if reader.peek(1) == "=":
|
| 150 |
+
reader.read_regex(_equal_sign)
|
| 151 |
+
value: Optional[str] = parse_value(reader)
|
| 152 |
+
else:
|
| 153 |
+
value = None
|
| 154 |
+
reader.read_regex(_comment)
|
| 155 |
+
reader.read_regex(_end_of_line)
|
| 156 |
+
return Binding(
|
| 157 |
+
key=key,
|
| 158 |
+
value=value,
|
| 159 |
+
original=reader.get_marked(),
|
| 160 |
+
error=False,
|
| 161 |
+
)
|
| 162 |
+
except Error:
|
| 163 |
+
reader.read_regex(_rest_of_line)
|
| 164 |
+
return Binding(
|
| 165 |
+
key=None,
|
| 166 |
+
value=None,
|
| 167 |
+
original=reader.get_marked(),
|
| 168 |
+
error=True,
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def parse_stream(stream: IO[str]) -> Iterator[Binding]:
|
| 173 |
+
reader = Reader(stream)
|
| 174 |
+
while reader.has_next():
|
| 175 |
+
yield parse_binding(reader)
|
vllm/lib/python3.10/site-packages/dotenv/py.typed
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Marker file for PEP 561
|
vllm/lib/python3.10/site-packages/dotenv/version.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
__version__ = "1.0.1"
|
vllm/lib/python3.10/site-packages/outlines-0.1.11.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
vllm/lib/python3.10/site-packages/outlines-0.1.11.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright 2023- The Outlines developers
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
vllm/lib/python3.10/site-packages/outlines-0.1.11.dist-info/METADATA
ADDED
|
@@ -0,0 +1,503 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: outlines
|
| 3 |
+
Version: 0.1.11
|
| 4 |
+
Summary: Probabilistic Generative Model Programming
|
| 5 |
+
Author: Outlines Developers
|
| 6 |
+
License: Apache-2.0
|
| 7 |
+
Project-URL: homepage, https://github.com/dottxt-ai/outlines
|
| 8 |
+
Project-URL: documentation, https://dottxt-ai.github.io/outlines/
|
| 9 |
+
Project-URL: repository, https://github.com/dottxt-ai/outlines
|
| 10 |
+
Keywords: machine learning,deep learning,language models,structured generation
|
| 11 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 12 |
+
Classifier: Intended Audience :: Developers
|
| 13 |
+
Classifier: Intended Audience :: Information Technology
|
| 14 |
+
Classifier: Intended Audience :: Science/Research
|
| 15 |
+
Classifier: Operating System :: OS Independent
|
| 16 |
+
Classifier: Programming Language :: Python :: 3
|
| 17 |
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
| 18 |
+
Requires-Python: >=3.9
|
| 19 |
+
Description-Content-Type: text/markdown
|
| 20 |
+
License-File: LICENSE
|
| 21 |
+
Requires-Dist: interegular
|
| 22 |
+
Requires-Dist: jinja2
|
| 23 |
+
Requires-Dist: lark
|
| 24 |
+
Requires-Dist: nest_asyncio
|
| 25 |
+
Requires-Dist: numpy
|
| 26 |
+
Requires-Dist: cloudpickle
|
| 27 |
+
Requires-Dist: diskcache
|
| 28 |
+
Requires-Dist: pydantic>=2.0
|
| 29 |
+
Requires-Dist: referencing
|
| 30 |
+
Requires-Dist: jsonschema
|
| 31 |
+
Requires-Dist: requests
|
| 32 |
+
Requires-Dist: tqdm
|
| 33 |
+
Requires-Dist: typing_extensions
|
| 34 |
+
Requires-Dist: pycountry
|
| 35 |
+
Requires-Dist: airportsdata
|
| 36 |
+
Requires-Dist: torch
|
| 37 |
+
Requires-Dist: outlines_core==0.1.26
|
| 38 |
+
Provides-Extra: vllm
|
| 39 |
+
Requires-Dist: vllm; extra == "vllm"
|
| 40 |
+
Requires-Dist: transformers; extra == "vllm"
|
| 41 |
+
Requires-Dist: numpy<2; extra == "vllm"
|
| 42 |
+
Provides-Extra: transformers
|
| 43 |
+
Requires-Dist: transformers; extra == "transformers"
|
| 44 |
+
Requires-Dist: accelerate; extra == "transformers"
|
| 45 |
+
Requires-Dist: datasets; extra == "transformers"
|
| 46 |
+
Requires-Dist: numpy<2; extra == "transformers"
|
| 47 |
+
Provides-Extra: mlxlm
|
| 48 |
+
Requires-Dist: mlx-lm; extra == "mlxlm"
|
| 49 |
+
Requires-Dist: datasets; extra == "mlxlm"
|
| 50 |
+
Provides-Extra: openai
|
| 51 |
+
Requires-Dist: openai; extra == "openai"
|
| 52 |
+
Provides-Extra: llamacpp
|
| 53 |
+
Requires-Dist: llama-cpp-python; extra == "llamacpp"
|
| 54 |
+
Requires-Dist: transformers; extra == "llamacpp"
|
| 55 |
+
Requires-Dist: datasets; extra == "llamacpp"
|
| 56 |
+
Requires-Dist: numpy<2; extra == "llamacpp"
|
| 57 |
+
Provides-Extra: exllamav2
|
| 58 |
+
Requires-Dist: exllamav2; extra == "exllamav2"
|
| 59 |
+
Provides-Extra: test
|
| 60 |
+
Requires-Dist: pre-commit; extra == "test"
|
| 61 |
+
Requires-Dist: pytest; extra == "test"
|
| 62 |
+
Requires-Dist: pytest-benchmark; extra == "test"
|
| 63 |
+
Requires-Dist: pytest-cov; extra == "test"
|
| 64 |
+
Requires-Dist: pytest-mock; extra == "test"
|
| 65 |
+
Requires-Dist: coverage[toml]>=5.1; extra == "test"
|
| 66 |
+
Requires-Dist: diff-cover; extra == "test"
|
| 67 |
+
Requires-Dist: accelerate; extra == "test"
|
| 68 |
+
Requires-Dist: beartype<0.16.0; extra == "test"
|
| 69 |
+
Requires-Dist: responses; extra == "test"
|
| 70 |
+
Requires-Dist: llama-cpp-python; extra == "test"
|
| 71 |
+
Requires-Dist: mlx-lm>=0.19.2; (platform_machine == "arm64" and sys_platform == "darwin") and extra == "test"
|
| 72 |
+
Requires-Dist: huggingface_hub; extra == "test"
|
| 73 |
+
Requires-Dist: openai>=1.0.0; extra == "test"
|
| 74 |
+
Requires-Dist: datasets; extra == "test"
|
| 75 |
+
Requires-Dist: vllm; sys_platform != "darwin" and extra == "test"
|
| 76 |
+
Requires-Dist: transformers; extra == "test"
|
| 77 |
+
Requires-Dist: pillow; extra == "test"
|
| 78 |
+
Requires-Dist: exllamav2; extra == "test"
|
| 79 |
+
Requires-Dist: jax; extra == "test"
|
| 80 |
+
Provides-Extra: serve
|
| 81 |
+
Requires-Dist: vllm>=0.3.0; extra == "serve"
|
| 82 |
+
Requires-Dist: uvicorn; extra == "serve"
|
| 83 |
+
Requires-Dist: fastapi; extra == "serve"
|
| 84 |
+
Requires-Dist: pydantic>=2.0; extra == "serve"
|
| 85 |
+
|
| 86 |
+
<div align="center" style="margin-bottom: 1em;">
|
| 87 |
+
|
| 88 |
+
<img src="./docs/assets/images/logo.png" alt="Outlines Logo" width=500></img>
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
🗒️ *Make LLMs speak the language of every application.* 🗒️
|
| 92 |
+
|
| 93 |
+
Made with ❤👷️ by the team at [.txt](https://dottxt.co).
|
| 94 |
+
|
| 95 |
+
[![Documentation][documentation-badge]][documentation]
|
| 96 |
+
[![Contributors][contributors-badge]][contributors]
|
| 97 |
+
[![Downloads][downloads-badge]][pypistats]
|
| 98 |
+
[![Discord][discord-badge]][discord]
|
| 99 |
+
|
| 100 |
+
[Youtube channel][youtube-dottxt] | [.txt blog][blog-dottxt] | [Twitter][dottxt-twitter]
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
</div>
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
``` bash
|
| 107 |
+
pip install outlines
|
| 108 |
+
```
|
| 109 |
+
|
| 110 |
+
First time here? Go to our [setup guide](https://dottxt-ai.github.io/outlines/latest/welcome/)
|
| 111 |
+
|
| 112 |
+
## Features
|
| 113 |
+
|
| 114 |
+
- [x] 🤖 [Multiple model integrations](https://dottxt-ai.github.io/outlines/latest/installation): OpenAI, transformers, llama.cpp, exllama2, mamba
|
| 115 |
+
- [x] 🖍️ Simple and powerful prompting primitives based on the [Jinja templating engine](https://jinja.palletsprojects.com/)
|
| 116 |
+
- [x] 🚄 [Multiple choices](#multiple-choices), [type constraints](#type-constraint) and dynamic stopping
|
| 117 |
+
- [x] ⚡ Fast [regex-structured generation](#efficient-regex-structured-generation)
|
| 118 |
+
- [x] 🔥 Fast [JSON generation](#efficient-json-generation-following-a-pydantic-model) following a JSON schema or a Pydantic model
|
| 119 |
+
- [x] 📝 [Grammar-structured generation](#using-context-free-grammars-to-guide-generation)
|
| 120 |
+
- [x] 🐍 Interleave completions with loops, conditionals, and custom Python functions
|
| 121 |
+
- [x] 💾 Caching of generations
|
| 122 |
+
- [x] 🗂️ Batch inference
|
| 123 |
+
- [x] 🎲 Sample with the greedy, multinomial and beam search algorithms (and more to come!)
|
| 124 |
+
- [x] 🚀 [Serve with vLLM](https://dottxt-ai.github.io/outlines/latest/reference/serve/vllm), with official Docker image, [`outlinesdev/outlines`](https://hub.docker.com/r/outlinesdev/outlines)!
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
Outlines has new releases and features coming every week. Make sure to ⭐ star and 👀 watch this repository, follow [@dottxtai][dottxt-twitter] to stay up to date!
|
| 128 |
+
|
| 129 |
+
## Why should I use structured generation?
|
| 130 |
+
|
| 131 |
+
* It doesn't add any overhead during inference (cost-free)
|
| 132 |
+
* It allows Open Source models to beat closed source models ([Mistral](https://x.com/dottxtai/status/1797692104023363765), [GPT-4](https://x.com/dottxtai/status/1798443290913853770))
|
| 133 |
+
* [It speeds up inference](http://blog.dottxt.co/coalescence.html)
|
| 134 |
+
* [It improves the performance of base models (GSM8K)](http://blog.dottxt.co/performance-gsm8k.html)
|
| 135 |
+
* [It improves the performance of finetuned models (CoNNL)](https://predibase.com/blog/lorax-outlines-better-json-extraction-with-structured-generation-and-lora)
|
| 136 |
+
* [It improves model efficiency (less examples needed)](https://huggingface.co/blog/evaluation-structured-outputs)
|
| 137 |
+
|
| 138 |
+
## .txt company
|
| 139 |
+
|
| 140 |
+
<div align="center">
|
| 141 |
+
<img src="./docs/assets/images/dottxt.png" alt="Outlines Logo" width=100></img>
|
| 142 |
+
</div>
|
| 143 |
+
|
| 144 |
+
We started a company to keep pushing the boundaries of structured generation. Learn more about [.txt](https://twitter.com/dottxtai), and [give our .json API a try](https://h1xbpbfsf0w.typeform.com/to/ZgBCvJHF) if you need a hosted solution ✨
|
| 145 |
+
|
| 146 |
+
## Structured generation
|
| 147 |
+
|
| 148 |
+
The first step towards reliability of systems that include large language models
|
| 149 |
+
is to ensure that there is a well-defined interface between their output and
|
| 150 |
+
user-defined code. **Outlines** provides ways to control the generation of
|
| 151 |
+
language models to make their output more predictable.
|
| 152 |
+
|
| 153 |
+
### Multiple choices
|
| 154 |
+
|
| 155 |
+
You can reduce the completion to a choice between multiple possibilities:
|
| 156 |
+
|
| 157 |
+
``` python
|
| 158 |
+
import outlines
|
| 159 |
+
|
| 160 |
+
model = outlines.models.transformers("microsoft/Phi-3-mini-4k-instruct")
|
| 161 |
+
|
| 162 |
+
prompt = """You are a sentiment-labelling assistant.
|
| 163 |
+
Is the following review positive or negative?
|
| 164 |
+
|
| 165 |
+
Review: This restaurant is just awesome!
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
generator = outlines.generate.choice(model, ["Positive", "Negative"])
|
| 169 |
+
answer = generator(prompt)
|
| 170 |
+
```
|
| 171 |
+
|
| 172 |
+
You can also pass these choices through en enum:
|
| 173 |
+
|
| 174 |
+
````python
|
| 175 |
+
from enum import Enum
|
| 176 |
+
|
| 177 |
+
import outlines
|
| 178 |
+
|
| 179 |
+
class Sentiment(str, Enum):
|
| 180 |
+
positive = "Positive"
|
| 181 |
+
negative = "Negative"
|
| 182 |
+
|
| 183 |
+
model = outlines.models.transformers("microsoft/Phi-3-mini-4k-instruct")
|
| 184 |
+
|
| 185 |
+
prompt = """You are a sentiment-labelling assistant.
|
| 186 |
+
Is the following review positive or negative?
|
| 187 |
+
|
| 188 |
+
Review: This restaurant is just awesome!
|
| 189 |
+
"""
|
| 190 |
+
|
| 191 |
+
generator = outlines.generate.choice(model, Sentiment)
|
| 192 |
+
answer = generator(prompt)
|
| 193 |
+
````
|
| 194 |
+
|
| 195 |
+
### Type constraint
|
| 196 |
+
|
| 197 |
+
You can instruct the model to only return integers or floats:
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
``` python
|
| 201 |
+
import outlines
|
| 202 |
+
|
| 203 |
+
model = outlines.models.transformers("WizardLM/WizardMath-7B-V1.1")
|
| 204 |
+
|
| 205 |
+
prompt = "<s>result of 9 + 9 = 18</s><s>result of 1 + 2 = "
|
| 206 |
+
answer = outlines.generate.format(model, int)(prompt)
|
| 207 |
+
print(answer)
|
| 208 |
+
# 3
|
| 209 |
+
|
| 210 |
+
prompt = "sqrt(2)="
|
| 211 |
+
generator = outlines.generate.format(model, float)
|
| 212 |
+
answer = generator(prompt, max_tokens=10)
|
| 213 |
+
print(answer)
|
| 214 |
+
# 1.41421356
|
| 215 |
+
```
|
| 216 |
+
|
| 217 |
+
### Efficient regex-structured generation
|
| 218 |
+
|
| 219 |
+
Outlines also comes with fast regex-structured generation. In fact, the `choice` and
|
| 220 |
+
`format` functions above all use regex-structured generation under the
|
| 221 |
+
hood:
|
| 222 |
+
|
| 223 |
+
``` python
|
| 224 |
+
import outlines
|
| 225 |
+
|
| 226 |
+
model = outlines.models.transformers("microsoft/Phi-3-mini-4k-instruct")
|
| 227 |
+
|
| 228 |
+
prompt = "What is the IP address of the Google DNS servers? "
|
| 229 |
+
|
| 230 |
+
generator = outlines.generate.text(model)
|
| 231 |
+
unstructured = generator(prompt, max_tokens=30)
|
| 232 |
+
|
| 233 |
+
generator = outlines.generate.regex(
|
| 234 |
+
model,
|
| 235 |
+
r"((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)",
|
| 236 |
+
)
|
| 237 |
+
structured = generator(prompt, max_tokens=30)
|
| 238 |
+
|
| 239 |
+
print(unstructured)
|
| 240 |
+
# What is the IP address of the Google DNS servers?
|
| 241 |
+
#
|
| 242 |
+
# Passive DNS servers are at DNS servers that are private.
|
| 243 |
+
# In other words, both IP servers are private. The database
|
| 244 |
+
# does not contain Chelsea Manning
|
| 245 |
+
|
| 246 |
+
print(structured)
|
| 247 |
+
# What is the IP address of the Google DNS servers?
|
| 248 |
+
# 2.2.6.1
|
| 249 |
+
```
|
| 250 |
+
|
| 251 |
+
Unlike other libraries, regex-structured generation in Outlines is almost as fast
|
| 252 |
+
as non-structured generation.
|
| 253 |
+
|
| 254 |
+
### Efficient JSON generation following a Pydantic model
|
| 255 |
+
|
| 256 |
+
Outlines allows to guide the generation process so the output is *guaranteed* to follow a [JSON schema](https://json-schema.org/) or [Pydantic model](https://docs.pydantic.dev/latest/):
|
| 257 |
+
|
| 258 |
+
```python
|
| 259 |
+
from enum import Enum
|
| 260 |
+
from pydantic import BaseModel, constr
|
| 261 |
+
|
| 262 |
+
import outlines
|
| 263 |
+
import torch
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
class Weapon(str, Enum):
|
| 267 |
+
sword = "sword"
|
| 268 |
+
axe = "axe"
|
| 269 |
+
mace = "mace"
|
| 270 |
+
spear = "spear"
|
| 271 |
+
bow = "bow"
|
| 272 |
+
crossbow = "crossbow"
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
class Armor(str, Enum):
|
| 276 |
+
leather = "leather"
|
| 277 |
+
chainmail = "chainmail"
|
| 278 |
+
plate = "plate"
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
class Character(BaseModel):
|
| 282 |
+
name: constr(max_length=10)
|
| 283 |
+
age: int
|
| 284 |
+
armor: Armor
|
| 285 |
+
weapon: Weapon
|
| 286 |
+
strength: int
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
model = outlines.models.transformers("microsoft/Phi-3-mini-4k-instruct")
|
| 290 |
+
|
| 291 |
+
# Construct structured sequence generator
|
| 292 |
+
generator = outlines.generate.json(model, Character)
|
| 293 |
+
|
| 294 |
+
# Draw a sample
|
| 295 |
+
seed = 789001
|
| 296 |
+
|
| 297 |
+
character = generator("Give me a character description", seed=seed)
|
| 298 |
+
|
| 299 |
+
print(repr(character))
|
| 300 |
+
# Character(name='Anderson', age=28, armor=<Armor.chainmail: 'chainmail'>, weapon=<Weapon.sword: 'sword'>, strength=8)
|
| 301 |
+
|
| 302 |
+
character = generator("Give me an interesting character description")
|
| 303 |
+
|
| 304 |
+
print(repr(character))
|
| 305 |
+
# Character(name='Vivian Thr', age=44, armor=<Armor.plate: 'plate'>, weapon=<Weapon.crossbow: 'crossbow'>, strength=125)
|
| 306 |
+
```
|
| 307 |
+
|
| 308 |
+
The method works with union types, optional types, arrays, nested schemas, etc. Some field constraints are [not supported yet](https://github.com/dottxt-ai/outlines/issues/215), but everything else should work.
|
| 309 |
+
|
| 310 |
+
### Efficient JSON generation following a JSON Schema
|
| 311 |
+
|
| 312 |
+
Sometimes you just want to be able to pass a JSON Schema instead of a Pydantic model. We've got you covered:
|
| 313 |
+
|
| 314 |
+
``` python
|
| 315 |
+
import outlines
|
| 316 |
+
|
| 317 |
+
schema = '''{
|
| 318 |
+
"title": "Character",
|
| 319 |
+
"type": "object",
|
| 320 |
+
"properties": {
|
| 321 |
+
"name": {
|
| 322 |
+
"title": "Name",
|
| 323 |
+
"maxLength": 10,
|
| 324 |
+
"type": "string"
|
| 325 |
+
},
|
| 326 |
+
"age": {
|
| 327 |
+
"title": "Age",
|
| 328 |
+
"type": "integer"
|
| 329 |
+
},
|
| 330 |
+
"armor": {"$ref": "#/definitions/Armor"},
|
| 331 |
+
"weapon": {"$ref": "#/definitions/Weapon"},
|
| 332 |
+
"strength": {
|
| 333 |
+
"title": "Strength",
|
| 334 |
+
"type": "integer"
|
| 335 |
+
}
|
| 336 |
+
},
|
| 337 |
+
"required": ["name", "age", "armor", "weapon", "strength"],
|
| 338 |
+
"definitions": {
|
| 339 |
+
"Armor": {
|
| 340 |
+
"title": "Armor",
|
| 341 |
+
"description": "An enumeration.",
|
| 342 |
+
"enum": ["leather", "chainmail", "plate"],
|
| 343 |
+
"type": "string"
|
| 344 |
+
},
|
| 345 |
+
"Weapon": {
|
| 346 |
+
"title": "Weapon",
|
| 347 |
+
"description": "An enumeration.",
|
| 348 |
+
"enum": ["sword", "axe", "mace", "spear", "bow", "crossbow"],
|
| 349 |
+
"type": "string"
|
| 350 |
+
}
|
| 351 |
+
}
|
| 352 |
+
}'''
|
| 353 |
+
|
| 354 |
+
model = outlines.models.transformers("microsoft/Phi-3-mini-4k-instruct")
|
| 355 |
+
generator = outlines.generate.json(model, schema)
|
| 356 |
+
character = generator("Give me a character description")
|
| 357 |
+
```
|
| 358 |
+
|
| 359 |
+
### Using context-free grammars to guide generation
|
| 360 |
+
|
| 361 |
+
Formal grammars rule the world, and Outlines makes them rule LLMs too. You can pass any context-free grammar in the EBNF format and Outlines will generate an output that is valid to this grammar:
|
| 362 |
+
|
| 363 |
+
``` python
|
| 364 |
+
import outlines
|
| 365 |
+
|
| 366 |
+
arithmetic_grammar = """
|
| 367 |
+
?start: expression
|
| 368 |
+
|
| 369 |
+
?expression: term (("+" | "-") term)*
|
| 370 |
+
|
| 371 |
+
?term: factor (("*" | "/") factor)*
|
| 372 |
+
|
| 373 |
+
?factor: NUMBER
|
| 374 |
+
| "-" factor
|
| 375 |
+
| "(" expression ")"
|
| 376 |
+
|
| 377 |
+
%import common.NUMBER
|
| 378 |
+
"""
|
| 379 |
+
|
| 380 |
+
model = outlines.models.transformers("WizardLM/WizardMath-7B-V1.1")
|
| 381 |
+
generator = outlines.generate.cfg(model, arithmetic_grammar)
|
| 382 |
+
sequence = generator("Alice had 4 apples and Bob ate 2. Write an expression for Alice's apples:")
|
| 383 |
+
|
| 384 |
+
print(sequence)
|
| 385 |
+
# (8-2)
|
| 386 |
+
```
|
| 387 |
+
|
| 388 |
+
This was a very simple grammar, and you can use `outlines.generate.cfg` to generate syntactically valid Python, SQL, and much more than this. Any kind of structured text, really. All you have to do is search for "X EBNF grammar" on the web, and take a look at the [Outlines `grammars` module](https://github.com/dottxt-ai/outlines/tree/main/outlines/grammars).
|
| 389 |
+
|
| 390 |
+
### Open functions
|
| 391 |
+
|
| 392 |
+
Outlines can infer the structure of the output from the signature of a function. The result is a dictionary, and can be passed directly to the function using the usual dictionary expansion syntax `**`:
|
| 393 |
+
|
| 394 |
+
```python
|
| 395 |
+
import outlines
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def add(a: int, b: int):
|
| 399 |
+
return a + b
|
| 400 |
+
|
| 401 |
+
model = outlines.models.transformers("WizardLM/WizardMath-7B-V1.1")
|
| 402 |
+
generator = outlines.generate.json(model, add)
|
| 403 |
+
result = generator("Return json with two integers named a and b respectively. a is odd and b even.")
|
| 404 |
+
|
| 405 |
+
print(add(**result))
|
| 406 |
+
# 3
|
| 407 |
+
```
|
| 408 |
+
|
| 409 |
+
A great advantage of passing functions directly to specify the structure is that the structure of the LLM will change with the function's definition. No need to change the code at several places!
|
| 410 |
+
|
| 411 |
+
You can also embed various functions into an enum to generate params:
|
| 412 |
+
|
| 413 |
+
```python
|
| 414 |
+
from enum import Enum
|
| 415 |
+
from functools import partial
|
| 416 |
+
|
| 417 |
+
import outlines
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
def add(a: int, b: int) -> int:
|
| 421 |
+
return a + b
|
| 422 |
+
|
| 423 |
+
def mul(c: float, d: float) -> float:
|
| 424 |
+
return c * d
|
| 425 |
+
|
| 426 |
+
class Operation(Enum):
|
| 427 |
+
add = partial(add)
|
| 428 |
+
mul = partial(mul)
|
| 429 |
+
|
| 430 |
+
model = outlines.models.transformers("WizardLM/WizardMath-7B-V1.1")
|
| 431 |
+
generator = outlines.generate.json(model, add)
|
| 432 |
+
result = generator("Return json with two float named c and d respectively. c is negative and d greater than 1.0.")
|
| 433 |
+
|
| 434 |
+
print(result)
|
| 435 |
+
# {'c': -3.14, 'd': 1.5}
|
| 436 |
+
```
|
| 437 |
+
|
| 438 |
+
## Prompting
|
| 439 |
+
|
| 440 |
+
Building prompts can get messy. **Outlines** makes it easier to write and manage
|
| 441 |
+
prompts by encapsulating templates inside "template functions".
|
| 442 |
+
|
| 443 |
+
These functions make it possible to neatly separate the prompt logic from the
|
| 444 |
+
general program logic; they can be imported from other modules and libraries.
|
| 445 |
+
|
| 446 |
+
Template functions require no superfluous abstraction, they use the Jinja2
|
| 447 |
+
templating engine to help build complex prompts in a concise manner:
|
| 448 |
+
|
| 449 |
+
``` python
|
| 450 |
+
import outlines
|
| 451 |
+
|
| 452 |
+
examples = [
|
| 453 |
+
("The food was disgusting", "Negative"),
|
| 454 |
+
("We had a fantastic night", "Positive"),
|
| 455 |
+
("Recommended", "Positive"),
|
| 456 |
+
("The waiter was rude", "Negative")
|
| 457 |
+
]
|
| 458 |
+
|
| 459 |
+
@outlines.prompt
|
| 460 |
+
def labelling(to_label, examples):
|
| 461 |
+
"""You are a sentiment-labelling assistant.
|
| 462 |
+
|
| 463 |
+
{% for example in examples %}
|
| 464 |
+
{{ example[0] }} // {{ example[1] }}
|
| 465 |
+
{% endfor %}
|
| 466 |
+
{{ to_label }} //
|
| 467 |
+
"""
|
| 468 |
+
|
| 469 |
+
model = outlines.models.transformers("microsoft/Phi-3-mini-4k-instruct")
|
| 470 |
+
prompt = labelling("Just awesome", examples)
|
| 471 |
+
answer = outlines.generate.text(model)(prompt, max_tokens=100)
|
| 472 |
+
```
|
| 473 |
+
|
| 474 |
+
## Join us
|
| 475 |
+
|
| 476 |
+
- 💡 **Have an idea?** Come chat with us on [Discord][discord]
|
| 477 |
+
- 🔨 **Want to contribute?** Consult our [contribution guide](https://dottxt-ai.github.io/outlines/latest/community/contribute/).
|
| 478 |
+
- 🐞 **Found a bug?** Open an [issue](https://github.com/dottxt-ai/outlines/issues)
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
## Cite Outlines
|
| 482 |
+
|
| 483 |
+
```
|
| 484 |
+
@article{willard2023efficient,
|
| 485 |
+
title={Efficient Guided Generation for LLMs},
|
| 486 |
+
author={Willard, Brandon T and Louf, R{\'e}mi},
|
| 487 |
+
journal={arXiv preprint arXiv:2307.09702},
|
| 488 |
+
year={2023}
|
| 489 |
+
}
|
| 490 |
+
```
|
| 491 |
+
|
| 492 |
+
[documentation]: https://dottxt-ai.github.io/outlines/latest/welcome/
|
| 493 |
+
[documentation-badge]: https://img.shields.io/readthedocs/outlines
|
| 494 |
+
[contributors]: https://github.com/dottxt-ai/outlines/graphs/contributors
|
| 495 |
+
[contributors-badge]: https://img.shields.io/github/contributors/dottxt-ai/outlines?style=flat-square&logo=github&logoColor=white&color=ECEFF4
|
| 496 |
+
[dottxt-twitter]: https://twitter.com/dottxtai
|
| 497 |
+
[discord]: https://discord.gg/R9DSu34mGd
|
| 498 |
+
[discord-badge]: https://img.shields.io/discord/1182316225284554793?color=81A1C1&logo=discord&logoColor=white&style=flat-square
|
| 499 |
+
[downloads-badge]: https://img.shields.io/pypi/dm/outlines?color=89AC6B&logo=python&logoColor=white&style=flat-square
|
| 500 |
+
[pypistats]: https://pypistats.org/packages/outlines
|
| 501 |
+
[dottxt-twitter-badge]: https://img.shields.io/twitter/follow/dottxtai?style=social
|
| 502 |
+
[youtube-dottxt]: https://www.youtube.com/@dottxt-ai
|
| 503 |
+
[blog-dottxt]: https://blog.dottxt.co/
|
vllm/lib/python3.10/site-packages/outlines-0.1.11.dist-info/RECORD
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
outlines-0.1.11.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
outlines-0.1.11.dist-info/LICENSE,sha256=9xB47oqqPVZwSIdW8Zk7neOuZMlUagIy67vdWVxTddc,11354
|
| 3 |
+
outlines-0.1.11.dist-info/METADATA,sha256=90I6ySed9yjWM_A0cZZ7kYaG6CSh1DiTnGq-Q1s_jeM,17137
|
| 4 |
+
outlines-0.1.11.dist-info/RECORD,,
|
| 5 |
+
outlines-0.1.11.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
outlines-0.1.11.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
| 7 |
+
outlines-0.1.11.dist-info/top_level.txt,sha256=DRbCwvEBUKClPATvDaHzpX6gD7LgECM9WVYkEq0NHpY,9
|
| 8 |
+
outlines/__init__.py,sha256=sYuMGn7xxyuPhwq-M3M2WKjwGqFwEXG0xyJw6lw31Ng,495
|
| 9 |
+
outlines/__pycache__/__init__.cpython-310.pyc,,
|
| 10 |
+
outlines/__pycache__/_version.cpython-310.pyc,,
|
| 11 |
+
outlines/__pycache__/base.cpython-310.pyc,,
|
| 12 |
+
outlines/__pycache__/caching.cpython-310.pyc,,
|
| 13 |
+
outlines/__pycache__/function.cpython-310.pyc,,
|
| 14 |
+
outlines/__pycache__/grammars.cpython-310.pyc,,
|
| 15 |
+
outlines/__pycache__/prompts.cpython-310.pyc,,
|
| 16 |
+
outlines/__pycache__/samplers.cpython-310.pyc,,
|
| 17 |
+
outlines/_version.py,sha256=HreDwlLXV189L3kiBj3huM_kqWD1usijlC8LN1YXcCM,413
|
| 18 |
+
outlines/base.py,sha256=InRqZU2VeNPjpkb3wfCDnYZ5xW1wxSYeCNXCHTLz_Vg,10501
|
| 19 |
+
outlines/caching.py,sha256=WxfFldbINw0MBtsHhHI51nugsgH7dDpYyPf07A6Yv2E,5337
|
| 20 |
+
outlines/fsm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 21 |
+
outlines/fsm/__pycache__/__init__.cpython-310.pyc,,
|
| 22 |
+
outlines/fsm/__pycache__/guide.cpython-310.pyc,,
|
| 23 |
+
outlines/fsm/__pycache__/json_schema.cpython-310.pyc,,
|
| 24 |
+
outlines/fsm/__pycache__/parsing.cpython-310.pyc,,
|
| 25 |
+
outlines/fsm/__pycache__/types.cpython-310.pyc,,
|
| 26 |
+
outlines/fsm/guide.py,sha256=0DZwVei2g-3kA9Cn5NECwDalWB2ufKTwxJVvdXOVGQ0,8953
|
| 27 |
+
outlines/fsm/json_schema.py,sha256=eB0fMz3UKI-pHOsuYdVQZmsm2Jr1QIw_6DzkC83mB6Y,2535
|
| 28 |
+
outlines/fsm/parsing.py,sha256=ypJ52to1umo2wItiUqhxXDGW4fQf731mq5cGLrQAOeI,39516
|
| 29 |
+
outlines/fsm/types.py,sha256=XEhFaGaM6rrFKsXNXnGmvk1_5Jfht8nkqCcKBk2piDQ,2493
|
| 30 |
+
outlines/function.py,sha256=kN22C9c5IBoQ3KR5GwCFR0gyPzG2Ke5k6ZAPb6pF55U,3707
|
| 31 |
+
outlines/generate/__init__.py,sha256=aQs6Ga6r0n_KMzAY-d1NQhnGkQSWGdQXNCdJzMcbeGo,202
|
| 32 |
+
outlines/generate/__pycache__/__init__.cpython-310.pyc,,
|
| 33 |
+
outlines/generate/__pycache__/api.cpython-310.pyc,,
|
| 34 |
+
outlines/generate/__pycache__/cfg.cpython-310.pyc,,
|
| 35 |
+
outlines/generate/__pycache__/choice.cpython-310.pyc,,
|
| 36 |
+
outlines/generate/__pycache__/format.cpython-310.pyc,,
|
| 37 |
+
outlines/generate/__pycache__/fsm.cpython-310.pyc,,
|
| 38 |
+
outlines/generate/__pycache__/generator.cpython-310.pyc,,
|
| 39 |
+
outlines/generate/__pycache__/json.cpython-310.pyc,,
|
| 40 |
+
outlines/generate/__pycache__/regex.cpython-310.pyc,,
|
| 41 |
+
outlines/generate/__pycache__/text.cpython-310.pyc,,
|
| 42 |
+
outlines/generate/api.py,sha256=54ww0C759h2A6COktBcJeLPDXPH1Nn4l0Iv2i-gLH84,20666
|
| 43 |
+
outlines/generate/cfg.py,sha256=giAHsT-TAi4OnO_d3U15JJX1X194SKQrBqYgdxnFEw4,1686
|
| 44 |
+
outlines/generate/choice.py,sha256=MNJZ0Ig-ZvW_Ci1IazrMqJNkuqnYU7H0R7cvic9YbPc,1752
|
| 45 |
+
outlines/generate/format.py,sha256=d0tEbpdImunihJorf4cYc3KK3aeFrjuWI6G3KoO8Dqg,1435
|
| 46 |
+
outlines/generate/fsm.py,sha256=N7M6BUmEoN02gcVijV3kPUa3Bk9S_sGfFGt1I-lvCeY,1111
|
| 47 |
+
outlines/generate/generator.py,sha256=-EnFq8pb7fbfLPmqRFvMeXN-kA1l_mhwrGvDoRxKWx0,8811
|
| 48 |
+
outlines/generate/json.py,sha256=cFHVogIC_ltTjoPURCP2WaQjuqslRuzcR7GLy3dlgjA,4309
|
| 49 |
+
outlines/generate/regex.py,sha256=3PhYSiR2tpDLj3ty_fvjv7vMcU28Y9dgYiGsfRFOe8Q,1715
|
| 50 |
+
outlines/generate/text.py,sha256=8-DcHDtV4imaqKfG_f4hhYQ_wbPwhhCdjuPmHG_HVo4,1409
|
| 51 |
+
outlines/grammars.py,sha256=OXxQyKvthoQCfrwQuCHSSi4VYcb3GMAOYudC2DmvquU,396
|
| 52 |
+
outlines/grammars/arithmetic.lark,sha256=4aWsZ_IkS9nP7NGihdgPf0wWaP2tn0xb_jhFNF5ws50,293
|
| 53 |
+
outlines/grammars/common.lark,sha256=h6mPVV0vitrbCSVDUnL_GvQriCfwrN8EtWLFiss3K9Q,2243
|
| 54 |
+
outlines/grammars/json.lark,sha256=6d6owpAzgVkAOUSsINg6MLu81VV_HQknRsMsSXHYB-k,373
|
| 55 |
+
outlines/models/__init__.py,sha256=8vIXGlkrjOIeBYx21Uo0-3U6A4UyOBOMf9iK4Wswvcw,701
|
| 56 |
+
outlines/models/__pycache__/__init__.cpython-310.pyc,,
|
| 57 |
+
outlines/models/__pycache__/exllamav2.cpython-310.pyc,,
|
| 58 |
+
outlines/models/__pycache__/llamacpp.cpython-310.pyc,,
|
| 59 |
+
outlines/models/__pycache__/mlxlm.cpython-310.pyc,,
|
| 60 |
+
outlines/models/__pycache__/openai.cpython-310.pyc,,
|
| 61 |
+
outlines/models/__pycache__/tokenizer.cpython-310.pyc,,
|
| 62 |
+
outlines/models/__pycache__/transformers.cpython-310.pyc,,
|
| 63 |
+
outlines/models/__pycache__/transformers_vision.cpython-310.pyc,,
|
| 64 |
+
outlines/models/__pycache__/vllm.cpython-310.pyc,,
|
| 65 |
+
outlines/models/exllamav2.py,sha256=Mo8gpuQI7KQe77T-BZHXHOV3Kkucgvkqo7-TjJcpzV0,13295
|
| 66 |
+
outlines/models/llamacpp.py,sha256=mI_xD-DqfcADl9asF554qOKxpusekx65GEl1Ja-C-xY,14662
|
| 67 |
+
outlines/models/mlxlm.py,sha256=ieim5QadwNQXM6311RBXOoYh52EnRcJZSvPiEfLpxbU,8588
|
| 68 |
+
outlines/models/openai.py,sha256=Oa-HiCUf5tk8HL_UCMI9FJ4tz4F0gAnQgggE1EB28QU,9009
|
| 69 |
+
outlines/models/tokenizer.py,sha256=x6228TFhbcGe-XssA4SAAjaOBEZoAvFciQUpK22Y28U,996
|
| 70 |
+
outlines/models/transformers.py,sha256=xJblsZB8FoXfDxrhvJ7pW0Hj8HSLT9FndURPrZ7kO2M,15337
|
| 71 |
+
outlines/models/transformers_vision.py,sha256=t77kgdRa5DIRiPis126AOfTnKl3PswL3klouUlFR9Jk,5069
|
| 72 |
+
outlines/models/vllm.py,sha256=BRvkrYAC2gTMZ3vhcETXJYf_mlO1U49m3bMArGymyDU,7769
|
| 73 |
+
outlines/processors/__init__.py,sha256=fDMQ-pyBPaDB7Eb8pgwJ16eTUbPAm-w2Wf-Vn8BuCGY,158
|
| 74 |
+
outlines/processors/__pycache__/__init__.cpython-310.pyc,,
|
| 75 |
+
outlines/processors/__pycache__/base_logits_processor.cpython-310.pyc,,
|
| 76 |
+
outlines/processors/__pycache__/structured.cpython-310.pyc,,
|
| 77 |
+
outlines/processors/base_logits_processor.py,sha256=vFM2p65Mstk4YkO2ZC1xOON3YGj4KgWgjj_iFnROSQQ,5354
|
| 78 |
+
outlines/processors/structured.py,sha256=XOZ3hq_B9BbD6nRuOjdZYQvXYRIYY1s6PJFYzdwtV-c,8240
|
| 79 |
+
outlines/prompts.py,sha256=By6LodDBBDeh9xhCXqkxQqnD1pGNStK7JNJDmMylBMg,10071
|
| 80 |
+
outlines/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 81 |
+
outlines/samplers.py,sha256=aQqVwEqgCoAVjr2qDkSk28hJXf4CQ8DT0LEJv73vQC4,10646
|
| 82 |
+
outlines/serve/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 83 |
+
outlines/serve/__pycache__/__init__.cpython-310.pyc,,
|
| 84 |
+
outlines/serve/__pycache__/serve.cpython-310.pyc,,
|
| 85 |
+
outlines/serve/serve.py,sha256=xZnXnos-mB7xurY_y2zQIRkUi9508QNxZERZTfbxosw,4940
|
| 86 |
+
outlines/types/__init__.py,sha256=0ZVfLELb_CZ6P9RTete561Uja8bgoGZ4S2shDy-iNhg,110
|
| 87 |
+
outlines/types/__pycache__/__init__.cpython-310.pyc,,
|
| 88 |
+
outlines/types/__pycache__/airports.cpython-310.pyc,,
|
| 89 |
+
outlines/types/__pycache__/countries.cpython-310.pyc,,
|
| 90 |
+
outlines/types/__pycache__/email.cpython-310.pyc,,
|
| 91 |
+
outlines/types/__pycache__/isbn.cpython-310.pyc,,
|
| 92 |
+
outlines/types/__pycache__/locales.cpython-310.pyc,,
|
| 93 |
+
outlines/types/__pycache__/phone_numbers.cpython-310.pyc,,
|
| 94 |
+
outlines/types/__pycache__/zip_codes.cpython-310.pyc,,
|
| 95 |
+
outlines/types/airports.py,sha256=L2rBblU02mkiXrQfm35XS-r4h0L8OySZ-rEpJJvw75s,241
|
| 96 |
+
outlines/types/countries.py,sha256=XWjvIEXkKNwHSdG4TILxfpSU3xHNJnTeMhvVLp1n_S4,748
|
| 97 |
+
outlines/types/email.py,sha256=aOc004pbeIY4p_Ssj5kWBYXfwAukHxVVY10lTj77byY,739
|
| 98 |
+
outlines/types/isbn.py,sha256=2HtRGX-eoOvGImOI0WL2LUAa7IuvJmGgr1Xb7JZOwi8,761
|
| 99 |
+
outlines/types/locales.py,sha256=rKj2OfDIgY4akyjMWOCWF7jB93kv3NzdQcihM4ojh-s,530
|
| 100 |
+
outlines/types/phone_numbers.py,sha256=l8MSwbzsQ2qjGzKN0vVH546IdaHTuT9OD9XzZE4zAp8,435
|
| 101 |
+
outlines/types/zip_codes.py,sha256=lGj2OBwX3LwLk7agw396WK17Aky4a5fZpLeZsNPkjAg,300
|
vllm/lib/python3.10/site-packages/outlines-0.1.11.dist-info/REQUESTED
ADDED
|
File without changes
|